BigDong commited on
Commit
6c071d2
·
verified ·
1 Parent(s): 1fe5735

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "openbmb/MiniCPM-SALA",
3
+ "architectures": [
4
+ "MiniCPMSALAForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "attn_use_rope": false,
9
+ "auto_map": {
10
+ "AutoConfig": "configuration_minicpm_sala.MiniCPMSALAConfig",
11
+ "AutoModel": "modeling_minicpm_sala.MiniCPMSALAModel",
12
+ "AutoModelForCausalLM": "modeling_minicpm_sala.MiniCPMSALAForCausalLM",
13
+ "AutoModelForSeq2SeqLM": "modeling_minicpm_sala.MiniCPMSALAForCausalLM",
14
+ "AutoModelForSequenceClassification": "modeling_minicpm_sala.MiniCPMSALAForSequenceClassification"
15
+ },
16
+ "bos_token_id": 1,
17
+ "eos_token_id": [
18
+ 2,
19
+ 73440
20
+ ],
21
+ "pad_token_id": 2,
22
+ "head_dim": 128,
23
+ "hidden_act": "silu",
24
+ "hidden_size": 4096,
25
+ "initializer_range": 0.1,
26
+ "intermediate_size": 16384,
27
+ "lightning_head_dim": 128,
28
+ "lightning_nh": 32,
29
+ "lightning_nkv": 32,
30
+ "lightning_scale": "1/sqrt(d)",
31
+ "lightning_use_rope": true,
32
+ "max_position_embeddings": 524288,
33
+ "model_type": "minicpm_sala",
34
+ "mixer_types": [
35
+ "minicpm4",
36
+ "lightning-attn",
37
+ "lightning-attn",
38
+ "lightning-attn",
39
+ "lightning-attn",
40
+ "lightning-attn",
41
+ "lightning-attn",
42
+ "lightning-attn",
43
+ "lightning-attn",
44
+ "minicpm4",
45
+ "lightning-attn",
46
+ "lightning-attn",
47
+ "lightning-attn",
48
+ "lightning-attn",
49
+ "lightning-attn",
50
+ "lightning-attn",
51
+ "minicpm4",
52
+ "minicpm4",
53
+ "lightning-attn",
54
+ "lightning-attn",
55
+ "lightning-attn",
56
+ "lightning-attn",
57
+ "minicpm4",
58
+ "lightning-attn",
59
+ "lightning-attn",
60
+ "lightning-attn",
61
+ "lightning-attn",
62
+ "lightning-attn",
63
+ "lightning-attn",
64
+ "minicpm4",
65
+ "minicpm4",
66
+ "minicpm4"
67
+ ],
68
+ "sparse_config": {
69
+ "kernel_size": 32,
70
+ "kernel_stride": 16,
71
+ "init_blocks": 1,
72
+ "block_size": 64,
73
+ "window_size": 2048,
74
+ "topk": 64,
75
+ "use_nope": false,
76
+ "dense_len": 8192
77
+ },
78
+ "num_attention_heads": 32,
79
+ "num_hidden_layers": 32,
80
+ "num_key_value_heads": 2,
81
+ "qk_norm": true,
82
+ "rand_init": false,
83
+ "rms_norm_eps": 1e-06,
84
+ "torch_dtype": "bfloat16",
85
+ "dtype": "bfloat16",
86
+ "transformers_version": "4.46.3",
87
+ "use_cache": true,
88
+ "vocab_size": 73448,
89
+ "rope_theta": 10000.0,
90
+ "scale_emb": 12,
91
+ "scale_depth": 1.4,
92
+ "mup_denominator": 32,
93
+ "dim_model_base": 256,
94
+ "tie_word_embeddings": false,
95
+ "use_output_gate": true,
96
+ "use_output_norm": true,
97
+ "attn_use_output_gate": true
98
+ }
configuration_minicpm_sala.py ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2025 The OpenBMB Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """MiniCPMSALA model configuration"""
16
+
17
+ from typing import List, Optional
18
+ from transformers.configuration_utils import PretrainedConfig
19
+ from transformers.utils import logging
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ MINICPMSALA_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
24
+
25
+
26
+ class MiniCPMSALAConfig(PretrainedConfig):
27
+ r"""
28
+ This is the configuration class to store the configuration of a [`MiniCPMSALAModel`]. It is used to instantiate an MiniCPMSALA
29
+ model according to the specified arguments, defining the model architecture.
30
+
31
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
32
+ documentation from [`PretrainedConfig`] for more information.
33
+
34
+
35
+ Args:
36
+ vocab_size (`int`, *optional*, defaults to 32000):
37
+ Vocabulary size of the MiniCPMSALA model. Defines the number of different tokens that can be represented by the
38
+ `inputs_ids` passed when calling [`MiniCPMSALAModel`]
39
+ hidden_size (`int`, *optional*, defaults to 4096):
40
+ Dimension of the hidden representations.
41
+ intermediate_size (`int`, *optional*, defaults to 11008):
42
+ Dimension of the MLP representations.
43
+ num_hidden_layers (`int`, *optional*, defaults to 32):
44
+ Number of hidden layers in the Transformer decoder.
45
+ num_attention_heads (`int`, *optional*, defaults to 32):
46
+ Number of attention heads for each attention layer in the Transformer decoder.
47
+ num_key_value_heads (`int`, *optional*):
48
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
49
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
50
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
51
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
52
+ by meanpooling all the original heads within that group. For more details checkout [this
53
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
54
+ `num_attention_heads`.
55
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
56
+ The non-linear activation function (function or string) in the decoder.
57
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
58
+ The maximum sequence length that this model might ever be used with. MiniCPMSALA supports up to 524288 tokens.
59
+ initializer_range (`float`, *optional*, defaults to 0.02):
60
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
61
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
62
+ The epsilon used by the rms normalization layers.
63
+ use_cache (`bool`, *optional*, defaults to `True`):
64
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
65
+ relevant if `config.is_decoder=True`.
66
+ pad_token_id (`int`, *optional*):
67
+ Padding token id.
68
+ bos_token_id (`int`, *optional*, defaults to 1):
69
+ Beginning of stream token id.
70
+ eos_token_id (`int`, *optional*, defaults to 2):
71
+ End of stream token id.
72
+ pretraining_tp (`int`, *optional*, defaults to 1):
73
+ Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
74
+ document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
75
+ necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
76
+ issue](https://github.com/pytorch/pytorch/issues/76232).
77
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
78
+ Whether to tie weight embeddings
79
+ rope_theta (`float`, *optional*, defaults to 10000.0):
80
+ The base period of the RoPE embeddings.
81
+ rope_scaling (`Dict`, *optional*):
82
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
83
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
84
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
85
+ `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
86
+ these scaling strategies behave:
87
+ https://www.reddit.com/r/LocalMiniCPM/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
88
+ experimental feature, subject to breaking API changes in future versions.
89
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
90
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
91
+ attention_dropout (`float`, *optional*, defaults to 0.0):
92
+ The dropout ratio for the attention probabilities.
93
+
94
+ ```python
95
+ >>> from transformers import MiniCPMSALAModel, MiniCPMSALAConfig
96
+
97
+ >>> # Initializing a MiniCPMSALA style configuration
98
+ >>> configuration = MiniCPMSALAConfig()
99
+
100
+ >>> # Initializing a model from the minicpm_sala style configuration
101
+ >>> model = MiniCPMSALAModel(configuration)
102
+
103
+ >>> # Accessing the model configuration
104
+ >>> configuration = model.config
105
+ ```"""
106
+
107
+ model_type = "minicpm_sala"
108
+ keys_to_ignore_at_inference = ["past_key_values"]
109
+
110
+ def __init__(
111
+ self,
112
+ vocab_size=32000,
113
+ hidden_size=4096,
114
+ intermediate_size=11008,
115
+ num_hidden_layers=32,
116
+ num_attention_heads=32,
117
+ num_key_value_heads=None,
118
+ hidden_act="silu",
119
+ max_position_embeddings=2048,
120
+ initializer_range=0.02,
121
+ rms_norm_eps=1e-6,
122
+ use_cache=True,
123
+ pad_token_id=None,
124
+ bos_token_id=1,
125
+ eos_token_id=2,
126
+ pretraining_tp=1,
127
+ tie_word_embeddings=True,
128
+ rope_theta=10000.0,
129
+ rope_scaling=None,
130
+ attention_bias=False,
131
+ attention_dropout=0.0,
132
+ scale_emb=1,
133
+ dim_model_base=1,
134
+ scale_depth=1,
135
+ mup_denominator=32,
136
+ sparse_config=None,
137
+ mixer_types: List[str] = ["minicpm4"],
138
+ head_dim: Optional[int] = None,
139
+ use_output_gate: bool = False,
140
+ use_output_norm: bool = False,
141
+ lightning_use_rope: bool = True,
142
+ lightning_nkv: Optional[int] = None,
143
+ lightning_nh: Optional[int] = None,
144
+ qk_norm: bool = False,
145
+ lightning_head_dim: Optional[int] = None,
146
+ rand_init: bool = False,
147
+ train_mlp: bool = True,
148
+ attn_use_rope: bool = True,
149
+ lightning_scale: str = "1/sqrt(d)",
150
+ shift_labels: bool = True,
151
+ attn_use_output_gate: bool = False,
152
+ **kwargs,
153
+ ):
154
+
155
+ self.vocab_size = vocab_size
156
+ self.max_position_embeddings = max_position_embeddings
157
+ self.hidden_size = hidden_size
158
+ self.intermediate_size = intermediate_size
159
+ self.num_hidden_layers = num_hidden_layers
160
+ self.num_attention_heads = num_attention_heads
161
+
162
+ # for backward compatibility
163
+ if num_key_value_heads is None:
164
+ num_key_value_heads = num_attention_heads
165
+
166
+ self.num_key_value_heads = num_key_value_heads
167
+ self.hidden_act = hidden_act
168
+ self.initializer_range = initializer_range
169
+ self.rms_norm_eps = rms_norm_eps
170
+ self.pretraining_tp = pretraining_tp
171
+ self.use_cache = use_cache
172
+ self.rope_theta = rope_theta
173
+ self.rope_scaling = rope_scaling
174
+ self.attention_bias = attention_bias
175
+ self.attention_dropout = attention_dropout
176
+ self.scale_emb = scale_emb
177
+ self.dim_model_base = dim_model_base
178
+ self.scale_depth = scale_depth
179
+ # only used for Eagle Head
180
+ self.mup_denominator = mup_denominator
181
+
182
+ # sparse config
183
+ self.sparse_config = sparse_config
184
+
185
+ self.mixer_types = mixer_types
186
+ if self.mixer_types is None or len(self.mixer_types) == 0:
187
+ # Default to MiniCPMSALA4 (full attention) for all layers
188
+ self.mixer_types = ["minicpm4"] * self.num_hidden_layers
189
+ elif len(self.mixer_types) < self.num_hidden_layers:
190
+ self.mixer_types = (mixer_types * self.num_hidden_layers)[
191
+ : self.num_hidden_layers
192
+ ]
193
+ elif len(self.mixer_types) == self.num_hidden_layers:
194
+ self.mixer_types = mixer_types
195
+ else:
196
+ raise ValueError(f"Invalid number of mixer types: {len(self.mixer_types)}")
197
+ assert len(self.mixer_types) == self.num_hidden_layers
198
+
199
+ # for Lightning
200
+ if head_dim is None:
201
+ head_dim = self.hidden_size // self.num_attention_heads
202
+ self.head_dim = head_dim
203
+ self.use_output_norm = use_output_norm
204
+ self.use_output_gate = use_output_gate
205
+ self.lightning_use_rope = lightning_use_rope
206
+ self.qk_norm = qk_norm
207
+ self.lightning_nkv = (
208
+ lightning_nkv if lightning_nkv is not None else self.num_key_value_heads
209
+ )
210
+ self.lightning_nh = (
211
+ lightning_nh if lightning_nh is not None else self.num_attention_heads
212
+ )
213
+ self.lightning_head_dim = (
214
+ lightning_head_dim if lightning_head_dim is not None else self.head_dim
215
+ )
216
+ self.lightning_scale = lightning_scale
217
+ self.attn_use_rope = attn_use_rope
218
+ self.shift_labels = shift_labels
219
+ self.attn_use_output_gate = attn_use_output_gate
220
+
221
+ super().__init__(
222
+ pad_token_id=pad_token_id,
223
+ bos_token_id=bos_token_id,
224
+ eos_token_id=eos_token_id,
225
+ tie_word_embeddings=tie_word_embeddings,
226
+ **kwargs,
227
+ )
228
+ try:
229
+ import flash_attn
230
+
231
+ self._attn_implementation = "flash_attention_2"
232
+ except ImportError:
233
+ pass
234
+
235
+ def _rope_scaling_validation(self):
236
+ """
237
+ Validate the `rope_scaling` configuration.
238
+ """
239
+ if self.rope_scaling is None:
240
+ return
241
+
242
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
243
+ raise ValueError(
244
+ "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
245
+ f"got {self.rope_scaling}"
246
+ )
247
+ rope_scaling_type = self.rope_scaling.get("type", None)
248
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
249
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
250
+ raise ValueError(
251
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
252
+ )
253
+ if (
254
+ rope_scaling_factor is None
255
+ or not isinstance(rope_scaling_factor, float)
256
+ or rope_scaling_factor <= 1.0
257
+ ):
258
+ raise ValueError(
259
+ f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}"
260
+ )
generation_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": [
5
+ 2,
6
+ 73440
7
+ ],
8
+ "pad_token_id": 2,
9
+ "transformers_version": "4.56.1"
10
+ }
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c78212b9421a7f674a0cbce97cd584884f275f381d576973bc0e6dad33a1b31f
3
+ size 4968143488
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e93182ef3c7715d3659d297839f2340df869770e6a494f7fc8cce1fdef1eeed
3
+ size 4874002352
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c9a8cc00265c89b922b3d3020925b0b39cd0ca99ed9dcd951aea6e52f491286
3
+ size 4874002384
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a34680df5ba413ff78e1b1faa5c3e4b83a18c07fd54b71fc51cbcb1ec0d995ea
3
+ size 4238305384
model.safetensors.index.json ADDED
@@ -0,0 +1,403 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_parameters": 9477203968,
4
+ "total_size": 18954407936
5
+ },
6
+ "weight_map": {
7
+ "lm_head.weight": "model-00004-of-00004.safetensors",
8
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
9
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
10
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
11
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
12
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
13
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
14
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
15
+ "model.layers.0.self_attn.o_gate.weight": "model-00001-of-00004.safetensors",
16
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
17
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
18
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
19
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
20
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
21
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
22
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
23
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
24
+ "model.layers.1.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
25
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
26
+ "model.layers.1.self_attn.o_norm.weight": "model-00001-of-00004.safetensors",
27
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
28
+ "model.layers.1.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
29
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
30
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
31
+ "model.layers.1.self_attn.z_proj.weight": "model-00001-of-00004.safetensors",
32
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
33
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
34
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
35
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
36
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
37
+ "model.layers.10.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
38
+ "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
39
+ "model.layers.10.self_attn.o_norm.weight": "model-00002-of-00004.safetensors",
40
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
41
+ "model.layers.10.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
42
+ "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
43
+ "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
44
+ "model.layers.10.self_attn.z_proj.weight": "model-00002-of-00004.safetensors",
45
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
46
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
47
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
48
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
49
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
50
+ "model.layers.11.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
51
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
52
+ "model.layers.11.self_attn.o_norm.weight": "model-00002-of-00004.safetensors",
53
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
54
+ "model.layers.11.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
55
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
56
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
57
+ "model.layers.11.self_attn.z_proj.weight": "model-00002-of-00004.safetensors",
58
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
59
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
60
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
61
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
62
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
63
+ "model.layers.12.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
64
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
65
+ "model.layers.12.self_attn.o_norm.weight": "model-00002-of-00004.safetensors",
66
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
67
+ "model.layers.12.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
68
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
69
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
70
+ "model.layers.12.self_attn.z_proj.weight": "model-00002-of-00004.safetensors",
71
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
72
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
73
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
74
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
75
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
76
+ "model.layers.13.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
77
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
78
+ "model.layers.13.self_attn.o_norm.weight": "model-00002-of-00004.safetensors",
79
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
80
+ "model.layers.13.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
81
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
82
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
83
+ "model.layers.13.self_attn.z_proj.weight": "model-00002-of-00004.safetensors",
84
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
85
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
86
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
87
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
88
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
89
+ "model.layers.14.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
90
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
91
+ "model.layers.14.self_attn.o_norm.weight": "model-00002-of-00004.safetensors",
92
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
93
+ "model.layers.14.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
94
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
95
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
96
+ "model.layers.14.self_attn.z_proj.weight": "model-00002-of-00004.safetensors",
97
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
98
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
99
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
100
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
101
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
102
+ "model.layers.15.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
103
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
104
+ "model.layers.15.self_attn.o_norm.weight": "model-00002-of-00004.safetensors",
105
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
106
+ "model.layers.15.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
107
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
108
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
109
+ "model.layers.15.self_attn.z_proj.weight": "model-00002-of-00004.safetensors",
110
+ "model.layers.16.input_layernorm.weight": "model-00003-of-00004.safetensors",
111
+ "model.layers.16.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
112
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
113
+ "model.layers.16.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
114
+ "model.layers.16.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
115
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
116
+ "model.layers.16.self_attn.o_gate.weight": "model-00002-of-00004.safetensors",
117
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
118
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
119
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
120
+ "model.layers.17.input_layernorm.weight": "model-00003-of-00004.safetensors",
121
+ "model.layers.17.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
122
+ "model.layers.17.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
123
+ "model.layers.17.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
124
+ "model.layers.17.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
125
+ "model.layers.17.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
126
+ "model.layers.17.self_attn.o_gate.weight": "model-00003-of-00004.safetensors",
127
+ "model.layers.17.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
128
+ "model.layers.17.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
129
+ "model.layers.17.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
130
+ "model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
131
+ "model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
132
+ "model.layers.18.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
133
+ "model.layers.18.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
134
+ "model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
135
+ "model.layers.18.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
136
+ "model.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
137
+ "model.layers.18.self_attn.o_norm.weight": "model-00003-of-00004.safetensors",
138
+ "model.layers.18.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
139
+ "model.layers.18.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
140
+ "model.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
141
+ "model.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
142
+ "model.layers.18.self_attn.z_proj.weight": "model-00003-of-00004.safetensors",
143
+ "model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
144
+ "model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
145
+ "model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
146
+ "model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
147
+ "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
148
+ "model.layers.19.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
149
+ "model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
150
+ "model.layers.19.self_attn.o_norm.weight": "model-00003-of-00004.safetensors",
151
+ "model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
152
+ "model.layers.19.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
153
+ "model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
154
+ "model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
155
+ "model.layers.19.self_attn.z_proj.weight": "model-00003-of-00004.safetensors",
156
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
157
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
158
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
159
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
160
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
161
+ "model.layers.2.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
162
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
163
+ "model.layers.2.self_attn.o_norm.weight": "model-00001-of-00004.safetensors",
164
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
165
+ "model.layers.2.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
166
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
167
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
168
+ "model.layers.2.self_attn.z_proj.weight": "model-00001-of-00004.safetensors",
169
+ "model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
170
+ "model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
171
+ "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
172
+ "model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
173
+ "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
174
+ "model.layers.20.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
175
+ "model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
176
+ "model.layers.20.self_attn.o_norm.weight": "model-00003-of-00004.safetensors",
177
+ "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
178
+ "model.layers.20.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
179
+ "model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
180
+ "model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
181
+ "model.layers.20.self_attn.z_proj.weight": "model-00003-of-00004.safetensors",
182
+ "model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
183
+ "model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
184
+ "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
185
+ "model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
186
+ "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
187
+ "model.layers.21.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
188
+ "model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
189
+ "model.layers.21.self_attn.o_norm.weight": "model-00003-of-00004.safetensors",
190
+ "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
191
+ "model.layers.21.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
192
+ "model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
193
+ "model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
194
+ "model.layers.21.self_attn.z_proj.weight": "model-00003-of-00004.safetensors",
195
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
196
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
197
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
198
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
199
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
200
+ "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
201
+ "model.layers.22.self_attn.o_gate.weight": "model-00003-of-00004.safetensors",
202
+ "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
203
+ "model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
204
+ "model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
205
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
206
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
207
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
208
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
209
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
210
+ "model.layers.23.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
211
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
212
+ "model.layers.23.self_attn.o_norm.weight": "model-00003-of-00004.safetensors",
213
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
214
+ "model.layers.23.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
215
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
216
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
217
+ "model.layers.23.self_attn.z_proj.weight": "model-00003-of-00004.safetensors",
218
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
219
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
220
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
221
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
222
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
223
+ "model.layers.24.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
224
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
225
+ "model.layers.24.self_attn.o_norm.weight": "model-00003-of-00004.safetensors",
226
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
227
+ "model.layers.24.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
228
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
229
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
230
+ "model.layers.24.self_attn.z_proj.weight": "model-00003-of-00004.safetensors",
231
+ "model.layers.25.input_layernorm.weight": "model-00004-of-00004.safetensors",
232
+ "model.layers.25.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
233
+ "model.layers.25.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
234
+ "model.layers.25.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
235
+ "model.layers.25.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
236
+ "model.layers.25.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
237
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
238
+ "model.layers.25.self_attn.o_norm.weight": "model-00003-of-00004.safetensors",
239
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
240
+ "model.layers.25.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
241
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
242
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
243
+ "model.layers.25.self_attn.z_proj.weight": "model-00003-of-00004.safetensors",
244
+ "model.layers.26.input_layernorm.weight": "model-00004-of-00004.safetensors",
245
+ "model.layers.26.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
246
+ "model.layers.26.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
247
+ "model.layers.26.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
248
+ "model.layers.26.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
249
+ "model.layers.26.self_attn.k_norm.weight": "model-00004-of-00004.safetensors",
250
+ "model.layers.26.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
251
+ "model.layers.26.self_attn.o_norm.weight": "model-00004-of-00004.safetensors",
252
+ "model.layers.26.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
253
+ "model.layers.26.self_attn.q_norm.weight": "model-00004-of-00004.safetensors",
254
+ "model.layers.26.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
255
+ "model.layers.26.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
256
+ "model.layers.26.self_attn.z_proj.weight": "model-00004-of-00004.safetensors",
257
+ "model.layers.27.input_layernorm.weight": "model-00004-of-00004.safetensors",
258
+ "model.layers.27.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
259
+ "model.layers.27.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
260
+ "model.layers.27.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
261
+ "model.layers.27.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
262
+ "model.layers.27.self_attn.k_norm.weight": "model-00004-of-00004.safetensors",
263
+ "model.layers.27.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
264
+ "model.layers.27.self_attn.o_norm.weight": "model-00004-of-00004.safetensors",
265
+ "model.layers.27.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
266
+ "model.layers.27.self_attn.q_norm.weight": "model-00004-of-00004.safetensors",
267
+ "model.layers.27.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
268
+ "model.layers.27.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
269
+ "model.layers.27.self_attn.z_proj.weight": "model-00004-of-00004.safetensors",
270
+ "model.layers.28.input_layernorm.weight": "model-00004-of-00004.safetensors",
271
+ "model.layers.28.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
272
+ "model.layers.28.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
273
+ "model.layers.28.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
274
+ "model.layers.28.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
275
+ "model.layers.28.self_attn.k_norm.weight": "model-00004-of-00004.safetensors",
276
+ "model.layers.28.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
277
+ "model.layers.28.self_attn.o_norm.weight": "model-00004-of-00004.safetensors",
278
+ "model.layers.28.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
279
+ "model.layers.28.self_attn.q_norm.weight": "model-00004-of-00004.safetensors",
280
+ "model.layers.28.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
281
+ "model.layers.28.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
282
+ "model.layers.28.self_attn.z_proj.weight": "model-00004-of-00004.safetensors",
283
+ "model.layers.29.input_layernorm.weight": "model-00004-of-00004.safetensors",
284
+ "model.layers.29.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
285
+ "model.layers.29.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
286
+ "model.layers.29.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
287
+ "model.layers.29.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
288
+ "model.layers.29.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
289
+ "model.layers.29.self_attn.o_gate.weight": "model-00004-of-00004.safetensors",
290
+ "model.layers.29.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
291
+ "model.layers.29.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
292
+ "model.layers.29.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
293
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
294
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
295
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
296
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
297
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
298
+ "model.layers.3.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
299
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
300
+ "model.layers.3.self_attn.o_norm.weight": "model-00001-of-00004.safetensors",
301
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
302
+ "model.layers.3.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
303
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
304
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
305
+ "model.layers.3.self_attn.z_proj.weight": "model-00001-of-00004.safetensors",
306
+ "model.layers.30.input_layernorm.weight": "model-00004-of-00004.safetensors",
307
+ "model.layers.30.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
308
+ "model.layers.30.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
309
+ "model.layers.30.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
310
+ "model.layers.30.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
311
+ "model.layers.30.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
312
+ "model.layers.30.self_attn.o_gate.weight": "model-00004-of-00004.safetensors",
313
+ "model.layers.30.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
314
+ "model.layers.30.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
315
+ "model.layers.30.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
316
+ "model.layers.31.input_layernorm.weight": "model-00004-of-00004.safetensors",
317
+ "model.layers.31.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
318
+ "model.layers.31.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
319
+ "model.layers.31.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
320
+ "model.layers.31.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
321
+ "model.layers.31.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
322
+ "model.layers.31.self_attn.o_gate.weight": "model-00004-of-00004.safetensors",
323
+ "model.layers.31.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
324
+ "model.layers.31.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
325
+ "model.layers.31.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
326
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
327
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
328
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
329
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
330
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
331
+ "model.layers.4.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
332
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
333
+ "model.layers.4.self_attn.o_norm.weight": "model-00001-of-00004.safetensors",
334
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
335
+ "model.layers.4.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
336
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
337
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
338
+ "model.layers.4.self_attn.z_proj.weight": "model-00001-of-00004.safetensors",
339
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
340
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
341
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
342
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
343
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
344
+ "model.layers.5.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
345
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
346
+ "model.layers.5.self_attn.o_norm.weight": "model-00001-of-00004.safetensors",
347
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
348
+ "model.layers.5.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
349
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
350
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
351
+ "model.layers.5.self_attn.z_proj.weight": "model-00001-of-00004.safetensors",
352
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
353
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
354
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
355
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
356
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
357
+ "model.layers.6.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
358
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
359
+ "model.layers.6.self_attn.o_norm.weight": "model-00001-of-00004.safetensors",
360
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
361
+ "model.layers.6.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
362
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
363
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
364
+ "model.layers.6.self_attn.z_proj.weight": "model-00001-of-00004.safetensors",
365
+ "model.layers.7.input_layernorm.weight": "model-00002-of-00004.safetensors",
366
+ "model.layers.7.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
367
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
368
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
369
+ "model.layers.7.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
370
+ "model.layers.7.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
371
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
372
+ "model.layers.7.self_attn.o_norm.weight": "model-00001-of-00004.safetensors",
373
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
374
+ "model.layers.7.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
375
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
376
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
377
+ "model.layers.7.self_attn.z_proj.weight": "model-00001-of-00004.safetensors",
378
+ "model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
379
+ "model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
380
+ "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
381
+ "model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
382
+ "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
383
+ "model.layers.8.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
384
+ "model.layers.8.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
385
+ "model.layers.8.self_attn.o_norm.weight": "model-00002-of-00004.safetensors",
386
+ "model.layers.8.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
387
+ "model.layers.8.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
388
+ "model.layers.8.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
389
+ "model.layers.8.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
390
+ "model.layers.8.self_attn.z_proj.weight": "model-00002-of-00004.safetensors",
391
+ "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
392
+ "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
393
+ "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
394
+ "model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
395
+ "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
396
+ "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
397
+ "model.layers.9.self_attn.o_gate.weight": "model-00002-of-00004.safetensors",
398
+ "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
399
+ "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
400
+ "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
401
+ "model.norm.weight": "model-00004-of-00004.safetensors"
402
+ }
403
+ }
modeling_minicpm_sala.py ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "<|im_end|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ {
11
+ "content": "<|im_start|>",
12
+ "lstrip": false,
13
+ "normalized": false,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ },
17
+ {
18
+ "content": "<tool_call>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ {
25
+ "content": "</tool_call>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ },
31
+ {
32
+ "content": "<|im_sep|>",
33
+ "lstrip": false,
34
+ "normalized": false,
35
+ "rstrip": false,
36
+ "single_word": false
37
+ },
38
+ {
39
+ "content": "<|fim_prefix|>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": false,
43
+ "single_word": false
44
+ },
45
+ {
46
+ "content": "<|fim_middle|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false
51
+ },
52
+ {
53
+ "content": "<|fim_suffix|>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false
58
+ },
59
+ {
60
+ "content": "<tool_response>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false
65
+ },
66
+ {
67
+ "content": "</tool_response>",
68
+ "lstrip": false,
69
+ "normalized": false,
70
+ "rstrip": false,
71
+ "single_word": false
72
+ },
73
+ {
74
+ "content": "<tools>",
75
+ "lstrip": false,
76
+ "normalized": false,
77
+ "rstrip": false,
78
+ "single_word": false
79
+ },
80
+ {
81
+ "content": "</tools>",
82
+ "lstrip": false,
83
+ "normalized": false,
84
+ "rstrip": false,
85
+ "single_word": false
86
+ },
87
+ {
88
+ "content": "<arguments>",
89
+ "lstrip": false,
90
+ "normalized": false,
91
+ "rstrip": false,
92
+ "single_word": false
93
+ },
94
+ {
95
+ "content": "</arguments>",
96
+ "lstrip": false,
97
+ "normalized": false,
98
+ "rstrip": false,
99
+ "single_word": false
100
+ },
101
+ {
102
+ "content": "<parameters>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false
107
+ },
108
+ {
109
+ "content": "</parameters>",
110
+ "lstrip": false,
111
+ "normalized": false,
112
+ "rstrip": false,
113
+ "single_word": false
114
+ },
115
+ {
116
+ "content": "<function",
117
+ "lstrip": false,
118
+ "normalized": false,
119
+ "rstrip": false,
120
+ "single_word": false
121
+ },
122
+ {
123
+ "content": "</function>",
124
+ "lstrip": false,
125
+ "normalized": false,
126
+ "rstrip": false,
127
+ "single_word": false
128
+ },
129
+ {
130
+ "content": "<param",
131
+ "lstrip": false,
132
+ "normalized": false,
133
+ "rstrip": false,
134
+ "single_word": false
135
+ },
136
+ {
137
+ "content": "</param>",
138
+ "lstrip": false,
139
+ "normalized": false,
140
+ "rstrip": false,
141
+ "single_word": false
142
+ }
143
+ ],
144
+ "bos_token": {
145
+ "content": "<s>",
146
+ "lstrip": false,
147
+ "normalized": false,
148
+ "rstrip": false,
149
+ "single_word": false
150
+ },
151
+ "eos_token": {
152
+ "content": "<im_end>",
153
+ "lstrip": false,
154
+ "normalized": false,
155
+ "rstrip": false,
156
+ "single_word": false
157
+ },
158
+ "pad_token": {
159
+ "content": "</s>",
160
+ "lstrip": false,
161
+ "normalized": false,
162
+ "rstrip": false,
163
+ "single_word": false
164
+ },
165
+ "unk_token": {
166
+ "content": "<unk>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false
171
+ }
172
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb74d51116831c3bf65db812c553f94ab0c88dcf97a5bbb37e3504f6d359c530
3
+ size 1181204
tokenizer_config.json ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "101": {
30
+ "content": "<think>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": false
36
+ },
37
+ "102": {
38
+ "content": "</think>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": false
44
+ },
45
+ "103": {
46
+ "content": "<tool_response>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "104": {
54
+ "content": "</tool_response>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "105": {
62
+ "content": "<tools>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "106": {
70
+ "content": "</tools>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "107": {
78
+ "content": "<parameters>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "108": {
86
+ "content": "</parameters>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "109": {
94
+ "content": "<arguments>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "110": {
102
+ "content": "</arguments>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "111": {
110
+ "content": "<function",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "112": {
118
+ "content": "</function>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": true
124
+ },
125
+ "113": {
126
+ "content": "<param",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": true
132
+ },
133
+ "114": {
134
+ "content": "</param>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": true
140
+ },
141
+ "73440": {
142
+ "content": "<|im_end|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": true
148
+ },
149
+ "73441": {
150
+ "content": "<|im_start|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": true
156
+ },
157
+ "73442": {
158
+ "content": "<tool_call>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": true
164
+ },
165
+ "73443": {
166
+ "content": "</tool_call>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": true
172
+ },
173
+ "73444": {
174
+ "content": "<|im_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": true
180
+ },
181
+ "73445": {
182
+ "content": "<|fim_prefix|>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": true
188
+ },
189
+ "73446": {
190
+ "content": "<|fim_middle|>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": true
196
+ },
197
+ "73447": {
198
+ "content": "<|fim_suffix|>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": true
204
+ }
205
+ },
206
+ "additional_special_tokens": [
207
+ "<|im_end|>",
208
+ "<|im_start|>",
209
+ "<tool_call>",
210
+ "</tool_call>",
211
+ "<|im_sep|>",
212
+ "<|fim_prefix|>",
213
+ "<|fim_middle|>",
214
+ "<|fim_suffix|>",
215
+ "<tool_response>",
216
+ "</tool_response>",
217
+ "<tools>",
218
+ "</tools>",
219
+ "<arguments>",
220
+ "</arguments>",
221
+ "<parameters>",
222
+ "</parameters>",
223
+ "<function",
224
+ "</function>",
225
+ "<param",
226
+ "</param>"
227
+ ],
228
+ "bos_token": "<s>",
229
+ "clean_up_tokenization_spaces": false,
230
+ "eos_token": "<|im_end|>",
231
+ "legacy": true,
232
+ "model_max_length": 262144,
233
+ "pad_token": "</s>",
234
+ "sp_model_kwargs": {},
235
+ "spaces_between_special_tokens": false,
236
+ "tokenizer_class": "LlamaTokenizer",
237
+ "unk_token": "<unk>",
238
+ "use_default_system_prompt": false,
239
+ "chat_template": "{%- if tools %}\n {%- set tool_definitions %}\n {{- \"# Tools\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson(ensure_ascii=False) }}\n {%- endfor %}\n {{- '\\n</tools>\\n\\nTool usage guidelines:\\n- You may call zero or more functions. If no function calls are needed, just answer normally and do not include any <function ... </function>.\\n- When calling a function, return an XML object within <function ... </function> using:\\n<function name=\"function-name\"><param name=\"param-name\">param-value</param></function>\\n- param-value may be multi-line. If it contains <, & or newline characters, wrap it in a CDATA block: <param name=\"param-name\"><![CDATA[...multi-line value...]]></param>' }}\n {%- endset %}\n \n {{- '<|im_start|>system\\n' }}\n {%- if messages[0].role == 'system' %}\n {%- if '<tool_def_sep>' in messages[0].content %}\n {{- messages[0].content.replace('<tool_def_sep>', tool_definitions) }}\n {%- else %}\n {{- messages[0].content + '\\n\\n' + tool_definitions }}\n {%- endif %}\n {%- else %}\n {{- tool_definitions.lstrip() }}\n {%- endif %}\n {{- '<|im_end|>\\n' }}\n{%- else %}\n {%- if messages[0].role == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0].content + '<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}\n{%- for message in messages[::-1] %}\n {%- set index = (messages|length - 1) - loop.index0 %}\n {%- if ns.multi_step_tool and message.role == \"user\" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}\n {%- set ns.multi_step_tool = false %}\n {%- set ns.last_query_index = index %}\n {%- endif %}\n{%- endfor %}\n{%- for message in messages %}\n {%- if message.content is string %}\n {%- set content = message.content %}\n {%- else %}\n {%- set content = '' %}\n {%- endif %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) %}\n {{- '<|im_start|>' + message.role + '\\n' + content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {%- set reasoning_content = '' %}\n {%- if message.reasoning_content is string %}\n {%- set reasoning_content = message.reasoning_content %}\n {%- else %}\n {%- if '</think>' in content %}\n {%- set reasoning_content = content.split('</think>')[0].rstrip('\\n').split('<think>')[-1].lstrip('\\n') %}\n {%- set content = content.split('</think>')[-1].lstrip('\\n') %}\n {%- endif %}\n {%- endif %}\n \n {%- if message.tool_calls %}\n {%- set content_parts = content.split('<tool_sep>') %}\n {%- set processed_content = content_parts[0] %}\n {%- set tool_calls_count = message.tool_calls|length %}\n {%- set tool_sep_count = content_parts|length - 1 %}\n {%- set min_count = [tool_calls_count, tool_sep_count]|min %}\n \n {%- for i in range(1, content_parts|length) %}\n {%- set tool_index = i - 1 %}\n {%- if tool_index < tool_calls_count %}\n {%- set tool_call = message.tool_calls[tool_index] %}\n {%- if tool_call.function %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {%- set single_tool_xml %}\n {{- '<function name=\"' ~ tool_call.name ~ '\">' }}\n {%- if tool_call.arguments %}\n {%- set args_dict = tool_call.arguments %}\n {%- for param_name, param_value in args_dict.items() %}\n {{- '<param name=\"' ~ param_name ~ '\">' }}\n {%- if param_value is string and ('<' in param_value or '&' in param_value or '\\n' in param_value) %}\n {{- '<![CDATA[' + param_value + ']]>' }}\n {%- else %}\n {{- param_value }}\n {%- endif %}\n {{- '</param>' }}\n {%- endfor %}\n {%- endif %}\n {{- '</function>' }}\n {%- endset %}\n {%- set processed_content = processed_content + single_tool_xml + content_parts[i] %}\n {%- else %}\n {%- set processed_content = processed_content + content_parts[i] %}\n {%- endif %}\n {%- endfor %}\n \n {%- if tool_calls_count > tool_sep_count %}\n {%- for remaining_index in range(tool_sep_count, tool_calls_count) %}\n {%- set tool_call = message.tool_calls[remaining_index] %}\n {%- if tool_call.function %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {%- set remaining_tool_xml %}\n {{- '<function name=\"' ~ tool_call.name ~ '\">' }}\n {%- if tool_call.arguments %}\n {%- set args_dict = tool_call.arguments %}\n {%- for param_name, param_value in args_dict.items() %}\n {{- '<param name=\"' ~ param_name ~ '\">' }}\n {%- if param_value is string and ('<' in param_value or '&' in param_value or '\\n' in param_value) %}\n {{- '<![CDATA[' + param_value + ']]>' }}\n {%- else %}\n {{- param_value }}\n {%- endif %}\n {{- '</param>' }}\n {%- endfor %}\n {%- endif %}\n {{- '</function>' }}\n {%- endset %}\n {%- set processed_content = processed_content + remaining_tool_xml %}\n {%- endfor %}\n {%- endif %}\n \n {%- set content = processed_content %}\n {%- endif %}\n \n {%- if loop.index0 > ns.last_query_index %}\n {%- if reasoning_content %}\n {{- '<|im_start|>' + message.role + '\\n<think>\\n' + reasoning_content.strip('\\n') + '\\n</think>\\n\\n' + content.lstrip('\\n') }}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n \n {%- if message.tool_calls and not has_tool_sep %}\n {%- for tool_call in message.tool_calls %}\n {%- if (loop.first and content) or (not loop.first) %}\n {{- '\\n' }}\n {%- endif %}\n {%- if tool_call.function %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '<function name=\"' ~ tool_call.name ~ '\">' }}\n {%- if tool_call.arguments %}\n {%- set args_dict = tool_call.arguments %}\n {%- for param_name, param_value in args_dict.items() %}\n {{- '<param name=\"' ~ param_name ~ '\">' }}\n {%- if param_value is string and ('<' in param_value or '&' in param_value or '\\n' in param_value) %}\n {{- '<![CDATA[' + param_value + ']]>' }}\n {%- else %}\n {{- param_value }}\n {%- endif %}\n {{- '</param>' }}\n {%- endfor %}\n {%- endif %}\n {{- '</function>' }}\n {%- endfor %}\n {%- endif %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if loop.first or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {%- if message.content is string %}\n {{- content }}\n {%- else %}\n {{- message.content | tojson(ensure_ascii=False) }}\n {%- endif %}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}"
240
+ }