youngwoo3283 commited on
Commit
74f0d88
·
verified ·
1 Parent(s): 11f964e

Upload Gemma2ForCausalLM

Browse files
config.json CHANGED
@@ -1,30 +1,34 @@
1
  {
2
- "_name_or_path": "NCSOFT/Llama-VARCO-8B-Instruct",
3
  "architectures": [
4
- "LlamaForCausalLM"
5
  ],
6
  "attention_bias": false,
7
  "attention_dropout": 0.0,
8
- "bos_token_id": 128000,
9
- "eos_token_id": 128001,
10
- "head_dim": 128,
11
- "hidden_act": "silu",
12
- "hidden_size": 4096,
 
 
 
 
13
  "initializer_range": 0.02,
14
  "intermediate_size": 14336,
15
  "max_position_embeddings": 8192,
16
- "mlp_bias": false,
17
- "model_type": "llama",
18
- "num_attention_heads": 32,
19
- "num_hidden_layers": 32,
20
  "num_key_value_heads": 8,
21
- "pretraining_tp": 1,
22
- "rms_norm_eps": 1e-05,
23
- "rope_scaling": null,
24
- "rope_theta": 500000.0,
25
- "tie_word_embeddings": false,
 
26
  "torch_dtype": "bfloat16",
27
  "transformers_version": "4.47.1",
28
  "use_cache": false,
29
- "vocab_size": 128256
30
  }
 
1
  {
2
+ "_name_or_path": "HumanF-MarkrAI/Gukbap-Gemma2-9B",
3
  "architectures": [
4
+ "Gemma2ForCausalLM"
5
  ],
6
  "attention_bias": false,
7
  "attention_dropout": 0.0,
8
+ "attn_logit_softcapping": 50.0,
9
+ "bos_token_id": 2,
10
+ "cache_implementation": "hybrid",
11
+ "eos_token_id": 1,
12
+ "final_logit_softcapping": 30.0,
13
+ "head_dim": 256,
14
+ "hidden_act": "gelu_pytorch_tanh",
15
+ "hidden_activation": "gelu_pytorch_tanh",
16
+ "hidden_size": 3584,
17
  "initializer_range": 0.02,
18
  "intermediate_size": 14336,
19
  "max_position_embeddings": 8192,
20
+ "model_type": "gemma2",
21
+ "num_attention_heads": 16,
22
+ "num_hidden_layers": 42,
 
23
  "num_key_value_heads": 8,
24
+ "pad_token_id": 0,
25
+ "query_pre_attn_scalar": 256,
26
+ "rms_norm_eps": 1e-06,
27
+ "rope_theta": 10000.0,
28
+ "sliding_window": 4096,
29
+ "sliding_window_size": 4096,
30
  "torch_dtype": "bfloat16",
31
  "transformers_version": "4.47.1",
32
  "use_cache": false,
33
+ "vocab_size": 256000
34
  }
generation_config.json CHANGED
@@ -1,7 +1,8 @@
1
  {
2
  "_from_model_config": true,
3
- "bos_token_id": 128000,
4
- "eos_token_id": 128001,
5
- "transformers_version": "4.47.1",
6
- "use_cache": false
 
7
  }
 
1
  {
2
  "_from_model_config": true,
3
+ "bos_token_id": 2,
4
+ "cache_implementation": "hybrid",
5
+ "eos_token_id": 1,
6
+ "pad_token_id": 0,
7
+ "transformers_version": "4.47.1"
8
  }
model-00001-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6711bc95ff58a4075c64ce9db7de83be60d92363428da5b65787c0a6cd862d54
3
- size 4984375544
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98cda5fe114f0c1d6aa3d80560acc704d9b6cd9be6aa1c7b9ee1018f918560fe
3
+ size 4910175800
model-00002-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dcb2b8285379d120e7dd93f828228d69cbba737eb41551afd726c025939139fa
3
- size 4892598032
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be0c1504c15f8b6040beaf15c64b3418b89e1ba007e04d078fbcb163cbdc34fe
3
+ size 4958659816
model-00003-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ed15f183fac35998b8a8d2c53e0c2ea8f27507607e2bfb3243116ca92b918e42
3
- size 4925299128
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:acaa0706ba53fa0cc0c85acae106bc775179b2b64ca1866afd2e1ec8b4e3dacd
3
+ size 4972457408
model-00004-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:46558a95b3da87e00e388cb9e00df9f2aa437427111fa0ae0e7413dafa78216c
3
- size 1285579432
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3bbc79b25d3ef3f1ccaf8dac1204d3cf2e7cdb34cf092baec71b4fe4b815517
3
+ size 3677999160
model.safetensors.index.json CHANGED
@@ -1,15 +1,16 @@
1
  {
2
  "metadata": {
3
- "total_size": 16087785472
4
  },
5
  "weight_map": {
6
- "lm_head.weight": "model-00004-of-00004.safetensors",
7
  "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
8
  "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
9
  "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
10
  "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
11
  "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
12
  "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
 
 
13
  "model.layers.0.self_attn.k_proj.base_layer.weight": "model-00001-of-00004.safetensors",
14
  "model.layers.0.self_attn.k_proj.lora_A.default.weight": "model-00001-of-00004.safetensors",
15
  "model.layers.0.self_attn.k_proj.lora_B.default.weight": "model-00001-of-00004.safetensors",
@@ -27,6 +28,8 @@
27
  "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
28
  "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
29
  "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
 
 
30
  "model.layers.1.self_attn.k_proj.base_layer.weight": "model-00001-of-00004.safetensors",
31
  "model.layers.1.self_attn.k_proj.lora_A.default.weight": "model-00001-of-00004.safetensors",
32
  "model.layers.1.self_attn.k_proj.lora_B.default.weight": "model-00001-of-00004.safetensors",
@@ -44,6 +47,8 @@
44
  "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
45
  "model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
46
  "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
 
 
47
  "model.layers.10.self_attn.k_proj.base_layer.weight": "model-00002-of-00004.safetensors",
48
  "model.layers.10.self_attn.k_proj.lora_A.default.weight": "model-00002-of-00004.safetensors",
49
  "model.layers.10.self_attn.k_proj.lora_B.default.weight": "model-00002-of-00004.safetensors",
@@ -61,6 +66,8 @@
61
  "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
62
  "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
63
  "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
 
 
64
  "model.layers.11.self_attn.k_proj.base_layer.weight": "model-00002-of-00004.safetensors",
65
  "model.layers.11.self_attn.k_proj.lora_A.default.weight": "model-00002-of-00004.safetensors",
66
  "model.layers.11.self_attn.k_proj.lora_B.default.weight": "model-00002-of-00004.safetensors",
@@ -78,6 +85,8 @@
78
  "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
79
  "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
80
  "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
 
 
81
  "model.layers.12.self_attn.k_proj.base_layer.weight": "model-00002-of-00004.safetensors",
82
  "model.layers.12.self_attn.k_proj.lora_A.default.weight": "model-00002-of-00004.safetensors",
83
  "model.layers.12.self_attn.k_proj.lora_B.default.weight": "model-00002-of-00004.safetensors",
@@ -95,6 +104,8 @@
95
  "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
96
  "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
97
  "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
 
 
98
  "model.layers.13.self_attn.k_proj.base_layer.weight": "model-00002-of-00004.safetensors",
99
  "model.layers.13.self_attn.k_proj.lora_A.default.weight": "model-00002-of-00004.safetensors",
100
  "model.layers.13.self_attn.k_proj.lora_B.default.weight": "model-00002-of-00004.safetensors",
@@ -112,6 +123,8 @@
112
  "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
113
  "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
114
  "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
 
 
115
  "model.layers.14.self_attn.k_proj.base_layer.weight": "model-00002-of-00004.safetensors",
116
  "model.layers.14.self_attn.k_proj.lora_A.default.weight": "model-00002-of-00004.safetensors",
117
  "model.layers.14.self_attn.k_proj.lora_B.default.weight": "model-00002-of-00004.safetensors",
@@ -129,6 +142,8 @@
129
  "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
130
  "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
131
  "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
 
 
132
  "model.layers.15.self_attn.k_proj.base_layer.weight": "model-00002-of-00004.safetensors",
133
  "model.layers.15.self_attn.k_proj.lora_A.default.weight": "model-00002-of-00004.safetensors",
134
  "model.layers.15.self_attn.k_proj.lora_B.default.weight": "model-00002-of-00004.safetensors",
@@ -146,6 +161,8 @@
146
  "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
147
  "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
148
  "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
 
 
149
  "model.layers.16.self_attn.k_proj.base_layer.weight": "model-00002-of-00004.safetensors",
150
  "model.layers.16.self_attn.k_proj.lora_A.default.weight": "model-00002-of-00004.safetensors",
151
  "model.layers.16.self_attn.k_proj.lora_B.default.weight": "model-00002-of-00004.safetensors",
@@ -163,6 +180,8 @@
163
  "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
164
  "model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
165
  "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
 
 
166
  "model.layers.17.self_attn.k_proj.base_layer.weight": "model-00002-of-00004.safetensors",
167
  "model.layers.17.self_attn.k_proj.lora_A.default.weight": "model-00002-of-00004.safetensors",
168
  "model.layers.17.self_attn.k_proj.lora_B.default.weight": "model-00002-of-00004.safetensors",
@@ -180,6 +199,8 @@
180
  "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
181
  "model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
182
  "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
 
 
183
  "model.layers.18.self_attn.k_proj.base_layer.weight": "model-00002-of-00004.safetensors",
184
  "model.layers.18.self_attn.k_proj.lora_A.default.weight": "model-00002-of-00004.safetensors",
185
  "model.layers.18.self_attn.k_proj.lora_B.default.weight": "model-00002-of-00004.safetensors",
@@ -197,6 +218,8 @@
197
  "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
198
  "model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
199
  "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
 
 
200
  "model.layers.19.self_attn.k_proj.base_layer.weight": "model-00002-of-00004.safetensors",
201
  "model.layers.19.self_attn.k_proj.lora_A.default.weight": "model-00002-of-00004.safetensors",
202
  "model.layers.19.self_attn.k_proj.lora_B.default.weight": "model-00002-of-00004.safetensors",
@@ -214,6 +237,8 @@
214
  "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
215
  "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
216
  "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
 
 
217
  "model.layers.2.self_attn.k_proj.base_layer.weight": "model-00001-of-00004.safetensors",
218
  "model.layers.2.self_attn.k_proj.lora_A.default.weight": "model-00001-of-00004.safetensors",
219
  "model.layers.2.self_attn.k_proj.lora_B.default.weight": "model-00001-of-00004.safetensors",
@@ -231,6 +256,8 @@
231
  "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
232
  "model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
233
  "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
 
 
234
  "model.layers.20.self_attn.k_proj.base_layer.weight": "model-00002-of-00004.safetensors",
235
  "model.layers.20.self_attn.k_proj.lora_A.default.weight": "model-00002-of-00004.safetensors",
236
  "model.layers.20.self_attn.k_proj.lora_B.default.weight": "model-00002-of-00004.safetensors",
@@ -248,6 +275,8 @@
248
  "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
249
  "model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
250
  "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
 
 
251
  "model.layers.21.self_attn.k_proj.base_layer.weight": "model-00003-of-00004.safetensors",
252
  "model.layers.21.self_attn.k_proj.lora_A.default.weight": "model-00003-of-00004.safetensors",
253
  "model.layers.21.self_attn.k_proj.lora_B.default.weight": "model-00003-of-00004.safetensors",
@@ -265,6 +294,8 @@
265
  "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
266
  "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
267
  "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
 
 
268
  "model.layers.22.self_attn.k_proj.base_layer.weight": "model-00003-of-00004.safetensors",
269
  "model.layers.22.self_attn.k_proj.lora_A.default.weight": "model-00003-of-00004.safetensors",
270
  "model.layers.22.self_attn.k_proj.lora_B.default.weight": "model-00003-of-00004.safetensors",
@@ -282,6 +313,8 @@
282
  "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
283
  "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
284
  "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
 
 
285
  "model.layers.23.self_attn.k_proj.base_layer.weight": "model-00003-of-00004.safetensors",
286
  "model.layers.23.self_attn.k_proj.lora_A.default.weight": "model-00003-of-00004.safetensors",
287
  "model.layers.23.self_attn.k_proj.lora_B.default.weight": "model-00003-of-00004.safetensors",
@@ -299,6 +332,8 @@
299
  "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
300
  "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
301
  "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
 
 
302
  "model.layers.24.self_attn.k_proj.base_layer.weight": "model-00003-of-00004.safetensors",
303
  "model.layers.24.self_attn.k_proj.lora_A.default.weight": "model-00003-of-00004.safetensors",
304
  "model.layers.24.self_attn.k_proj.lora_B.default.weight": "model-00003-of-00004.safetensors",
@@ -316,6 +351,8 @@
316
  "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
317
  "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
318
  "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
 
 
319
  "model.layers.25.self_attn.k_proj.base_layer.weight": "model-00003-of-00004.safetensors",
320
  "model.layers.25.self_attn.k_proj.lora_A.default.weight": "model-00003-of-00004.safetensors",
321
  "model.layers.25.self_attn.k_proj.lora_B.default.weight": "model-00003-of-00004.safetensors",
@@ -333,6 +370,8 @@
333
  "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
334
  "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
335
  "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
 
 
336
  "model.layers.26.self_attn.k_proj.base_layer.weight": "model-00003-of-00004.safetensors",
337
  "model.layers.26.self_attn.k_proj.lora_A.default.weight": "model-00003-of-00004.safetensors",
338
  "model.layers.26.self_attn.k_proj.lora_B.default.weight": "model-00003-of-00004.safetensors",
@@ -350,6 +389,8 @@
350
  "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
351
  "model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
352
  "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
 
 
353
  "model.layers.27.self_attn.k_proj.base_layer.weight": "model-00003-of-00004.safetensors",
354
  "model.layers.27.self_attn.k_proj.lora_A.default.weight": "model-00003-of-00004.safetensors",
355
  "model.layers.27.self_attn.k_proj.lora_B.default.weight": "model-00003-of-00004.safetensors",
@@ -367,6 +408,8 @@
367
  "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
368
  "model.layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
369
  "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
 
 
370
  "model.layers.28.self_attn.k_proj.base_layer.weight": "model-00003-of-00004.safetensors",
371
  "model.layers.28.self_attn.k_proj.lora_A.default.weight": "model-00003-of-00004.safetensors",
372
  "model.layers.28.self_attn.k_proj.lora_B.default.weight": "model-00003-of-00004.safetensors",
@@ -384,6 +427,8 @@
384
  "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
385
  "model.layers.29.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
386
  "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
 
 
387
  "model.layers.29.self_attn.k_proj.base_layer.weight": "model-00003-of-00004.safetensors",
388
  "model.layers.29.self_attn.k_proj.lora_A.default.weight": "model-00003-of-00004.safetensors",
389
  "model.layers.29.self_attn.k_proj.lora_B.default.weight": "model-00003-of-00004.safetensors",
@@ -401,6 +446,8 @@
401
  "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
402
  "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
403
  "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
 
 
404
  "model.layers.3.self_attn.k_proj.base_layer.weight": "model-00001-of-00004.safetensors",
405
  "model.layers.3.self_attn.k_proj.lora_A.default.weight": "model-00001-of-00004.safetensors",
406
  "model.layers.3.self_attn.k_proj.lora_B.default.weight": "model-00001-of-00004.safetensors",
@@ -418,6 +465,8 @@
418
  "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
419
  "model.layers.30.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
420
  "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
 
 
421
  "model.layers.30.self_attn.k_proj.base_layer.weight": "model-00003-of-00004.safetensors",
422
  "model.layers.30.self_attn.k_proj.lora_A.default.weight": "model-00003-of-00004.safetensors",
423
  "model.layers.30.self_attn.k_proj.lora_B.default.weight": "model-00003-of-00004.safetensors",
@@ -430,11 +479,13 @@
430
  "model.layers.30.self_attn.v_proj.base_layer.weight": "model-00003-of-00004.safetensors",
431
  "model.layers.30.self_attn.v_proj.lora_A.default.weight": "model-00003-of-00004.safetensors",
432
  "model.layers.30.self_attn.v_proj.lora_B.default.weight": "model-00003-of-00004.safetensors",
433
- "model.layers.31.input_layernorm.weight": "model-00004-of-00004.safetensors",
434
- "model.layers.31.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
435
  "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
436
- "model.layers.31.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
437
- "model.layers.31.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
 
 
438
  "model.layers.31.self_attn.k_proj.base_layer.weight": "model-00003-of-00004.safetensors",
439
  "model.layers.31.self_attn.k_proj.lora_A.default.weight": "model-00003-of-00004.safetensors",
440
  "model.layers.31.self_attn.k_proj.lora_B.default.weight": "model-00003-of-00004.safetensors",
@@ -447,11 +498,165 @@
447
  "model.layers.31.self_attn.v_proj.base_layer.weight": "model-00003-of-00004.safetensors",
448
  "model.layers.31.self_attn.v_proj.lora_A.default.weight": "model-00003-of-00004.safetensors",
449
  "model.layers.31.self_attn.v_proj.lora_B.default.weight": "model-00003-of-00004.safetensors",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
450
  "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
451
  "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
452
  "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
453
  "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
454
  "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
 
 
455
  "model.layers.4.self_attn.k_proj.base_layer.weight": "model-00001-of-00004.safetensors",
456
  "model.layers.4.self_attn.k_proj.lora_A.default.weight": "model-00001-of-00004.safetensors",
457
  "model.layers.4.self_attn.k_proj.lora_B.default.weight": "model-00001-of-00004.safetensors",
@@ -464,11 +669,51 @@
464
  "model.layers.4.self_attn.v_proj.base_layer.weight": "model-00001-of-00004.safetensors",
465
  "model.layers.4.self_attn.v_proj.lora_A.default.weight": "model-00001-of-00004.safetensors",
466
  "model.layers.4.self_attn.v_proj.lora_B.default.weight": "model-00001-of-00004.safetensors",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
467
  "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
468
  "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
469
  "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
470
  "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
471
  "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
 
 
472
  "model.layers.5.self_attn.k_proj.base_layer.weight": "model-00001-of-00004.safetensors",
473
  "model.layers.5.self_attn.k_proj.lora_A.default.weight": "model-00001-of-00004.safetensors",
474
  "model.layers.5.self_attn.k_proj.lora_B.default.weight": "model-00001-of-00004.safetensors",
@@ -486,6 +731,8 @@
486
  "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
487
  "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
488
  "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
 
 
489
  "model.layers.6.self_attn.k_proj.base_layer.weight": "model-00001-of-00004.safetensors",
490
  "model.layers.6.self_attn.k_proj.lora_A.default.weight": "model-00001-of-00004.safetensors",
491
  "model.layers.6.self_attn.k_proj.lora_B.default.weight": "model-00001-of-00004.safetensors",
@@ -498,11 +745,13 @@
498
  "model.layers.6.self_attn.v_proj.base_layer.weight": "model-00001-of-00004.safetensors",
499
  "model.layers.6.self_attn.v_proj.lora_A.default.weight": "model-00001-of-00004.safetensors",
500
  "model.layers.6.self_attn.v_proj.lora_B.default.weight": "model-00001-of-00004.safetensors",
501
- "model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
502
- "model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
503
  "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
504
  "model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
505
- "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
 
 
506
  "model.layers.7.self_attn.k_proj.base_layer.weight": "model-00001-of-00004.safetensors",
507
  "model.layers.7.self_attn.k_proj.lora_A.default.weight": "model-00001-of-00004.safetensors",
508
  "model.layers.7.self_attn.k_proj.lora_B.default.weight": "model-00001-of-00004.safetensors",
@@ -515,28 +764,32 @@
515
  "model.layers.7.self_attn.v_proj.base_layer.weight": "model-00001-of-00004.safetensors",
516
  "model.layers.7.self_attn.v_proj.lora_A.default.weight": "model-00001-of-00004.safetensors",
517
  "model.layers.7.self_attn.v_proj.lora_B.default.weight": "model-00001-of-00004.safetensors",
518
- "model.layers.8.input_layernorm.weight": "model-00001-of-00004.safetensors",
519
- "model.layers.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
520
- "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
521
- "model.layers.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
522
- "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
523
- "model.layers.8.self_attn.k_proj.base_layer.weight": "model-00001-of-00004.safetensors",
524
- "model.layers.8.self_attn.k_proj.lora_A.default.weight": "model-00001-of-00004.safetensors",
525
- "model.layers.8.self_attn.k_proj.lora_B.default.weight": "model-00001-of-00004.safetensors",
526
- "model.layers.8.self_attn.o_proj.base_layer.weight": "model-00001-of-00004.safetensors",
527
- "model.layers.8.self_attn.o_proj.lora_A.default.weight": "model-00001-of-00004.safetensors",
528
- "model.layers.8.self_attn.o_proj.lora_B.default.weight": "model-00001-of-00004.safetensors",
529
- "model.layers.8.self_attn.q_proj.base_layer.weight": "model-00001-of-00004.safetensors",
530
- "model.layers.8.self_attn.q_proj.lora_A.default.weight": "model-00001-of-00004.safetensors",
531
- "model.layers.8.self_attn.q_proj.lora_B.default.weight": "model-00001-of-00004.safetensors",
532
- "model.layers.8.self_attn.v_proj.base_layer.weight": "model-00001-of-00004.safetensors",
533
- "model.layers.8.self_attn.v_proj.lora_A.default.weight": "model-00001-of-00004.safetensors",
534
- "model.layers.8.self_attn.v_proj.lora_B.default.weight": "model-00001-of-00004.safetensors",
 
 
535
  "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
536
  "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
537
  "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
538
  "model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
539
  "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
 
 
540
  "model.layers.9.self_attn.k_proj.base_layer.weight": "model-00002-of-00004.safetensors",
541
  "model.layers.9.self_attn.k_proj.lora_A.default.weight": "model-00002-of-00004.safetensors",
542
  "model.layers.9.self_attn.k_proj.lora_B.default.weight": "model-00002-of-00004.safetensors",
 
1
  {
2
  "metadata": {
3
+ "total_size": 18519194624
4
  },
5
  "weight_map": {
 
6
  "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
7
  "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
8
  "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
9
  "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
10
  "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
11
  "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
12
+ "model.layers.0.post_feedforward_layernorm.weight": "model-00001-of-00004.safetensors",
13
+ "model.layers.0.pre_feedforward_layernorm.weight": "model-00001-of-00004.safetensors",
14
  "model.layers.0.self_attn.k_proj.base_layer.weight": "model-00001-of-00004.safetensors",
15
  "model.layers.0.self_attn.k_proj.lora_A.default.weight": "model-00001-of-00004.safetensors",
16
  "model.layers.0.self_attn.k_proj.lora_B.default.weight": "model-00001-of-00004.safetensors",
 
28
  "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
29
  "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
30
  "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
31
+ "model.layers.1.post_feedforward_layernorm.weight": "model-00001-of-00004.safetensors",
32
+ "model.layers.1.pre_feedforward_layernorm.weight": "model-00001-of-00004.safetensors",
33
  "model.layers.1.self_attn.k_proj.base_layer.weight": "model-00001-of-00004.safetensors",
34
  "model.layers.1.self_attn.k_proj.lora_A.default.weight": "model-00001-of-00004.safetensors",
35
  "model.layers.1.self_attn.k_proj.lora_B.default.weight": "model-00001-of-00004.safetensors",
 
47
  "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
48
  "model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
49
  "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
50
+ "model.layers.10.post_feedforward_layernorm.weight": "model-00002-of-00004.safetensors",
51
+ "model.layers.10.pre_feedforward_layernorm.weight": "model-00002-of-00004.safetensors",
52
  "model.layers.10.self_attn.k_proj.base_layer.weight": "model-00002-of-00004.safetensors",
53
  "model.layers.10.self_attn.k_proj.lora_A.default.weight": "model-00002-of-00004.safetensors",
54
  "model.layers.10.self_attn.k_proj.lora_B.default.weight": "model-00002-of-00004.safetensors",
 
66
  "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
67
  "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
68
  "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
69
+ "model.layers.11.post_feedforward_layernorm.weight": "model-00002-of-00004.safetensors",
70
+ "model.layers.11.pre_feedforward_layernorm.weight": "model-00002-of-00004.safetensors",
71
  "model.layers.11.self_attn.k_proj.base_layer.weight": "model-00002-of-00004.safetensors",
72
  "model.layers.11.self_attn.k_proj.lora_A.default.weight": "model-00002-of-00004.safetensors",
73
  "model.layers.11.self_attn.k_proj.lora_B.default.weight": "model-00002-of-00004.safetensors",
 
85
  "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
86
  "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
87
  "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
88
+ "model.layers.12.post_feedforward_layernorm.weight": "model-00002-of-00004.safetensors",
89
+ "model.layers.12.pre_feedforward_layernorm.weight": "model-00002-of-00004.safetensors",
90
  "model.layers.12.self_attn.k_proj.base_layer.weight": "model-00002-of-00004.safetensors",
91
  "model.layers.12.self_attn.k_proj.lora_A.default.weight": "model-00002-of-00004.safetensors",
92
  "model.layers.12.self_attn.k_proj.lora_B.default.weight": "model-00002-of-00004.safetensors",
 
104
  "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
105
  "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
106
  "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
107
+ "model.layers.13.post_feedforward_layernorm.weight": "model-00002-of-00004.safetensors",
108
+ "model.layers.13.pre_feedforward_layernorm.weight": "model-00002-of-00004.safetensors",
109
  "model.layers.13.self_attn.k_proj.base_layer.weight": "model-00002-of-00004.safetensors",
110
  "model.layers.13.self_attn.k_proj.lora_A.default.weight": "model-00002-of-00004.safetensors",
111
  "model.layers.13.self_attn.k_proj.lora_B.default.weight": "model-00002-of-00004.safetensors",
 
123
  "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
124
  "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
125
  "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
126
+ "model.layers.14.post_feedforward_layernorm.weight": "model-00002-of-00004.safetensors",
127
+ "model.layers.14.pre_feedforward_layernorm.weight": "model-00002-of-00004.safetensors",
128
  "model.layers.14.self_attn.k_proj.base_layer.weight": "model-00002-of-00004.safetensors",
129
  "model.layers.14.self_attn.k_proj.lora_A.default.weight": "model-00002-of-00004.safetensors",
130
  "model.layers.14.self_attn.k_proj.lora_B.default.weight": "model-00002-of-00004.safetensors",
 
142
  "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
143
  "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
144
  "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
145
+ "model.layers.15.post_feedforward_layernorm.weight": "model-00002-of-00004.safetensors",
146
+ "model.layers.15.pre_feedforward_layernorm.weight": "model-00002-of-00004.safetensors",
147
  "model.layers.15.self_attn.k_proj.base_layer.weight": "model-00002-of-00004.safetensors",
148
  "model.layers.15.self_attn.k_proj.lora_A.default.weight": "model-00002-of-00004.safetensors",
149
  "model.layers.15.self_attn.k_proj.lora_B.default.weight": "model-00002-of-00004.safetensors",
 
161
  "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
162
  "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
163
  "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
164
+ "model.layers.16.post_feedforward_layernorm.weight": "model-00002-of-00004.safetensors",
165
+ "model.layers.16.pre_feedforward_layernorm.weight": "model-00002-of-00004.safetensors",
166
  "model.layers.16.self_attn.k_proj.base_layer.weight": "model-00002-of-00004.safetensors",
167
  "model.layers.16.self_attn.k_proj.lora_A.default.weight": "model-00002-of-00004.safetensors",
168
  "model.layers.16.self_attn.k_proj.lora_B.default.weight": "model-00002-of-00004.safetensors",
 
180
  "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
181
  "model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
182
  "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
183
+ "model.layers.17.post_feedforward_layernorm.weight": "model-00002-of-00004.safetensors",
184
+ "model.layers.17.pre_feedforward_layernorm.weight": "model-00002-of-00004.safetensors",
185
  "model.layers.17.self_attn.k_proj.base_layer.weight": "model-00002-of-00004.safetensors",
186
  "model.layers.17.self_attn.k_proj.lora_A.default.weight": "model-00002-of-00004.safetensors",
187
  "model.layers.17.self_attn.k_proj.lora_B.default.weight": "model-00002-of-00004.safetensors",
 
199
  "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
200
  "model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
201
  "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
202
+ "model.layers.18.post_feedforward_layernorm.weight": "model-00002-of-00004.safetensors",
203
+ "model.layers.18.pre_feedforward_layernorm.weight": "model-00002-of-00004.safetensors",
204
  "model.layers.18.self_attn.k_proj.base_layer.weight": "model-00002-of-00004.safetensors",
205
  "model.layers.18.self_attn.k_proj.lora_A.default.weight": "model-00002-of-00004.safetensors",
206
  "model.layers.18.self_attn.k_proj.lora_B.default.weight": "model-00002-of-00004.safetensors",
 
218
  "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
219
  "model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
220
  "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
221
+ "model.layers.19.post_feedforward_layernorm.weight": "model-00002-of-00004.safetensors",
222
+ "model.layers.19.pre_feedforward_layernorm.weight": "model-00002-of-00004.safetensors",
223
  "model.layers.19.self_attn.k_proj.base_layer.weight": "model-00002-of-00004.safetensors",
224
  "model.layers.19.self_attn.k_proj.lora_A.default.weight": "model-00002-of-00004.safetensors",
225
  "model.layers.19.self_attn.k_proj.lora_B.default.weight": "model-00002-of-00004.safetensors",
 
237
  "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
238
  "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
239
  "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
240
+ "model.layers.2.post_feedforward_layernorm.weight": "model-00001-of-00004.safetensors",
241
+ "model.layers.2.pre_feedforward_layernorm.weight": "model-00001-of-00004.safetensors",
242
  "model.layers.2.self_attn.k_proj.base_layer.weight": "model-00001-of-00004.safetensors",
243
  "model.layers.2.self_attn.k_proj.lora_A.default.weight": "model-00001-of-00004.safetensors",
244
  "model.layers.2.self_attn.k_proj.lora_B.default.weight": "model-00001-of-00004.safetensors",
 
256
  "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
257
  "model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
258
  "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
259
+ "model.layers.20.post_feedforward_layernorm.weight": "model-00003-of-00004.safetensors",
260
+ "model.layers.20.pre_feedforward_layernorm.weight": "model-00003-of-00004.safetensors",
261
  "model.layers.20.self_attn.k_proj.base_layer.weight": "model-00002-of-00004.safetensors",
262
  "model.layers.20.self_attn.k_proj.lora_A.default.weight": "model-00002-of-00004.safetensors",
263
  "model.layers.20.self_attn.k_proj.lora_B.default.weight": "model-00002-of-00004.safetensors",
 
275
  "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
276
  "model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
277
  "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
278
+ "model.layers.21.post_feedforward_layernorm.weight": "model-00003-of-00004.safetensors",
279
+ "model.layers.21.pre_feedforward_layernorm.weight": "model-00003-of-00004.safetensors",
280
  "model.layers.21.self_attn.k_proj.base_layer.weight": "model-00003-of-00004.safetensors",
281
  "model.layers.21.self_attn.k_proj.lora_A.default.weight": "model-00003-of-00004.safetensors",
282
  "model.layers.21.self_attn.k_proj.lora_B.default.weight": "model-00003-of-00004.safetensors",
 
294
  "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
295
  "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
296
  "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
297
+ "model.layers.22.post_feedforward_layernorm.weight": "model-00003-of-00004.safetensors",
298
+ "model.layers.22.pre_feedforward_layernorm.weight": "model-00003-of-00004.safetensors",
299
  "model.layers.22.self_attn.k_proj.base_layer.weight": "model-00003-of-00004.safetensors",
300
  "model.layers.22.self_attn.k_proj.lora_A.default.weight": "model-00003-of-00004.safetensors",
301
  "model.layers.22.self_attn.k_proj.lora_B.default.weight": "model-00003-of-00004.safetensors",
 
313
  "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
314
  "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
315
  "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
316
+ "model.layers.23.post_feedforward_layernorm.weight": "model-00003-of-00004.safetensors",
317
+ "model.layers.23.pre_feedforward_layernorm.weight": "model-00003-of-00004.safetensors",
318
  "model.layers.23.self_attn.k_proj.base_layer.weight": "model-00003-of-00004.safetensors",
319
  "model.layers.23.self_attn.k_proj.lora_A.default.weight": "model-00003-of-00004.safetensors",
320
  "model.layers.23.self_attn.k_proj.lora_B.default.weight": "model-00003-of-00004.safetensors",
 
332
  "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
333
  "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
334
  "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
335
+ "model.layers.24.post_feedforward_layernorm.weight": "model-00003-of-00004.safetensors",
336
+ "model.layers.24.pre_feedforward_layernorm.weight": "model-00003-of-00004.safetensors",
337
  "model.layers.24.self_attn.k_proj.base_layer.weight": "model-00003-of-00004.safetensors",
338
  "model.layers.24.self_attn.k_proj.lora_A.default.weight": "model-00003-of-00004.safetensors",
339
  "model.layers.24.self_attn.k_proj.lora_B.default.weight": "model-00003-of-00004.safetensors",
 
351
  "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
352
  "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
353
  "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
354
+ "model.layers.25.post_feedforward_layernorm.weight": "model-00003-of-00004.safetensors",
355
+ "model.layers.25.pre_feedforward_layernorm.weight": "model-00003-of-00004.safetensors",
356
  "model.layers.25.self_attn.k_proj.base_layer.weight": "model-00003-of-00004.safetensors",
357
  "model.layers.25.self_attn.k_proj.lora_A.default.weight": "model-00003-of-00004.safetensors",
358
  "model.layers.25.self_attn.k_proj.lora_B.default.weight": "model-00003-of-00004.safetensors",
 
370
  "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
371
  "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
372
  "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
373
+ "model.layers.26.post_feedforward_layernorm.weight": "model-00003-of-00004.safetensors",
374
+ "model.layers.26.pre_feedforward_layernorm.weight": "model-00003-of-00004.safetensors",
375
  "model.layers.26.self_attn.k_proj.base_layer.weight": "model-00003-of-00004.safetensors",
376
  "model.layers.26.self_attn.k_proj.lora_A.default.weight": "model-00003-of-00004.safetensors",
377
  "model.layers.26.self_attn.k_proj.lora_B.default.weight": "model-00003-of-00004.safetensors",
 
389
  "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
390
  "model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
391
  "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
392
+ "model.layers.27.post_feedforward_layernorm.weight": "model-00003-of-00004.safetensors",
393
+ "model.layers.27.pre_feedforward_layernorm.weight": "model-00003-of-00004.safetensors",
394
  "model.layers.27.self_attn.k_proj.base_layer.weight": "model-00003-of-00004.safetensors",
395
  "model.layers.27.self_attn.k_proj.lora_A.default.weight": "model-00003-of-00004.safetensors",
396
  "model.layers.27.self_attn.k_proj.lora_B.default.weight": "model-00003-of-00004.safetensors",
 
408
  "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
409
  "model.layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
410
  "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
411
+ "model.layers.28.post_feedforward_layernorm.weight": "model-00003-of-00004.safetensors",
412
+ "model.layers.28.pre_feedforward_layernorm.weight": "model-00003-of-00004.safetensors",
413
  "model.layers.28.self_attn.k_proj.base_layer.weight": "model-00003-of-00004.safetensors",
414
  "model.layers.28.self_attn.k_proj.lora_A.default.weight": "model-00003-of-00004.safetensors",
415
  "model.layers.28.self_attn.k_proj.lora_B.default.weight": "model-00003-of-00004.safetensors",
 
427
  "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
428
  "model.layers.29.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
429
  "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
430
+ "model.layers.29.post_feedforward_layernorm.weight": "model-00003-of-00004.safetensors",
431
+ "model.layers.29.pre_feedforward_layernorm.weight": "model-00003-of-00004.safetensors",
432
  "model.layers.29.self_attn.k_proj.base_layer.weight": "model-00003-of-00004.safetensors",
433
  "model.layers.29.self_attn.k_proj.lora_A.default.weight": "model-00003-of-00004.safetensors",
434
  "model.layers.29.self_attn.k_proj.lora_B.default.weight": "model-00003-of-00004.safetensors",
 
446
  "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
447
  "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
448
  "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
449
+ "model.layers.3.post_feedforward_layernorm.weight": "model-00001-of-00004.safetensors",
450
+ "model.layers.3.pre_feedforward_layernorm.weight": "model-00001-of-00004.safetensors",
451
  "model.layers.3.self_attn.k_proj.base_layer.weight": "model-00001-of-00004.safetensors",
452
  "model.layers.3.self_attn.k_proj.lora_A.default.weight": "model-00001-of-00004.safetensors",
453
  "model.layers.3.self_attn.k_proj.lora_B.default.weight": "model-00001-of-00004.safetensors",
 
465
  "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
466
  "model.layers.30.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
467
  "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
468
+ "model.layers.30.post_feedforward_layernorm.weight": "model-00003-of-00004.safetensors",
469
+ "model.layers.30.pre_feedforward_layernorm.weight": "model-00003-of-00004.safetensors",
470
  "model.layers.30.self_attn.k_proj.base_layer.weight": "model-00003-of-00004.safetensors",
471
  "model.layers.30.self_attn.k_proj.lora_A.default.weight": "model-00003-of-00004.safetensors",
472
  "model.layers.30.self_attn.k_proj.lora_B.default.weight": "model-00003-of-00004.safetensors",
 
479
  "model.layers.30.self_attn.v_proj.base_layer.weight": "model-00003-of-00004.safetensors",
480
  "model.layers.30.self_attn.v_proj.lora_A.default.weight": "model-00003-of-00004.safetensors",
481
  "model.layers.30.self_attn.v_proj.lora_B.default.weight": "model-00003-of-00004.safetensors",
482
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00004.safetensors",
483
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
484
  "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
485
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
486
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
487
+ "model.layers.31.post_feedforward_layernorm.weight": "model-00003-of-00004.safetensors",
488
+ "model.layers.31.pre_feedforward_layernorm.weight": "model-00003-of-00004.safetensors",
489
  "model.layers.31.self_attn.k_proj.base_layer.weight": "model-00003-of-00004.safetensors",
490
  "model.layers.31.self_attn.k_proj.lora_A.default.weight": "model-00003-of-00004.safetensors",
491
  "model.layers.31.self_attn.k_proj.lora_B.default.weight": "model-00003-of-00004.safetensors",
 
498
  "model.layers.31.self_attn.v_proj.base_layer.weight": "model-00003-of-00004.safetensors",
499
  "model.layers.31.self_attn.v_proj.lora_A.default.weight": "model-00003-of-00004.safetensors",
500
  "model.layers.31.self_attn.v_proj.lora_B.default.weight": "model-00003-of-00004.safetensors",
501
+ "model.layers.32.input_layernorm.weight": "model-00004-of-00004.safetensors",
502
+ "model.layers.32.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
503
+ "model.layers.32.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
504
+ "model.layers.32.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
505
+ "model.layers.32.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
506
+ "model.layers.32.post_feedforward_layernorm.weight": "model-00004-of-00004.safetensors",
507
+ "model.layers.32.pre_feedforward_layernorm.weight": "model-00004-of-00004.safetensors",
508
+ "model.layers.32.self_attn.k_proj.base_layer.weight": "model-00003-of-00004.safetensors",
509
+ "model.layers.32.self_attn.k_proj.lora_A.default.weight": "model-00003-of-00004.safetensors",
510
+ "model.layers.32.self_attn.k_proj.lora_B.default.weight": "model-00003-of-00004.safetensors",
511
+ "model.layers.32.self_attn.o_proj.base_layer.weight": "model-00003-of-00004.safetensors",
512
+ "model.layers.32.self_attn.o_proj.lora_A.default.weight": "model-00003-of-00004.safetensors",
513
+ "model.layers.32.self_attn.o_proj.lora_B.default.weight": "model-00003-of-00004.safetensors",
514
+ "model.layers.32.self_attn.q_proj.base_layer.weight": "model-00003-of-00004.safetensors",
515
+ "model.layers.32.self_attn.q_proj.lora_A.default.weight": "model-00003-of-00004.safetensors",
516
+ "model.layers.32.self_attn.q_proj.lora_B.default.weight": "model-00003-of-00004.safetensors",
517
+ "model.layers.32.self_attn.v_proj.base_layer.weight": "model-00003-of-00004.safetensors",
518
+ "model.layers.32.self_attn.v_proj.lora_A.default.weight": "model-00003-of-00004.safetensors",
519
+ "model.layers.32.self_attn.v_proj.lora_B.default.weight": "model-00003-of-00004.safetensors",
520
+ "model.layers.33.input_layernorm.weight": "model-00004-of-00004.safetensors",
521
+ "model.layers.33.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
522
+ "model.layers.33.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
523
+ "model.layers.33.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
524
+ "model.layers.33.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
525
+ "model.layers.33.post_feedforward_layernorm.weight": "model-00004-of-00004.safetensors",
526
+ "model.layers.33.pre_feedforward_layernorm.weight": "model-00004-of-00004.safetensors",
527
+ "model.layers.33.self_attn.k_proj.base_layer.weight": "model-00004-of-00004.safetensors",
528
+ "model.layers.33.self_attn.k_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
529
+ "model.layers.33.self_attn.k_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
530
+ "model.layers.33.self_attn.o_proj.base_layer.weight": "model-00004-of-00004.safetensors",
531
+ "model.layers.33.self_attn.o_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
532
+ "model.layers.33.self_attn.o_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
533
+ "model.layers.33.self_attn.q_proj.base_layer.weight": "model-00004-of-00004.safetensors",
534
+ "model.layers.33.self_attn.q_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
535
+ "model.layers.33.self_attn.q_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
536
+ "model.layers.33.self_attn.v_proj.base_layer.weight": "model-00004-of-00004.safetensors",
537
+ "model.layers.33.self_attn.v_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
538
+ "model.layers.33.self_attn.v_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
539
+ "model.layers.34.input_layernorm.weight": "model-00004-of-00004.safetensors",
540
+ "model.layers.34.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
541
+ "model.layers.34.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
542
+ "model.layers.34.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
543
+ "model.layers.34.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
544
+ "model.layers.34.post_feedforward_layernorm.weight": "model-00004-of-00004.safetensors",
545
+ "model.layers.34.pre_feedforward_layernorm.weight": "model-00004-of-00004.safetensors",
546
+ "model.layers.34.self_attn.k_proj.base_layer.weight": "model-00004-of-00004.safetensors",
547
+ "model.layers.34.self_attn.k_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
548
+ "model.layers.34.self_attn.k_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
549
+ "model.layers.34.self_attn.o_proj.base_layer.weight": "model-00004-of-00004.safetensors",
550
+ "model.layers.34.self_attn.o_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
551
+ "model.layers.34.self_attn.o_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
552
+ "model.layers.34.self_attn.q_proj.base_layer.weight": "model-00004-of-00004.safetensors",
553
+ "model.layers.34.self_attn.q_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
554
+ "model.layers.34.self_attn.q_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
555
+ "model.layers.34.self_attn.v_proj.base_layer.weight": "model-00004-of-00004.safetensors",
556
+ "model.layers.34.self_attn.v_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
557
+ "model.layers.34.self_attn.v_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
558
+ "model.layers.35.input_layernorm.weight": "model-00004-of-00004.safetensors",
559
+ "model.layers.35.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
560
+ "model.layers.35.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
561
+ "model.layers.35.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
562
+ "model.layers.35.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
563
+ "model.layers.35.post_feedforward_layernorm.weight": "model-00004-of-00004.safetensors",
564
+ "model.layers.35.pre_feedforward_layernorm.weight": "model-00004-of-00004.safetensors",
565
+ "model.layers.35.self_attn.k_proj.base_layer.weight": "model-00004-of-00004.safetensors",
566
+ "model.layers.35.self_attn.k_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
567
+ "model.layers.35.self_attn.k_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
568
+ "model.layers.35.self_attn.o_proj.base_layer.weight": "model-00004-of-00004.safetensors",
569
+ "model.layers.35.self_attn.o_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
570
+ "model.layers.35.self_attn.o_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
571
+ "model.layers.35.self_attn.q_proj.base_layer.weight": "model-00004-of-00004.safetensors",
572
+ "model.layers.35.self_attn.q_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
573
+ "model.layers.35.self_attn.q_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
574
+ "model.layers.35.self_attn.v_proj.base_layer.weight": "model-00004-of-00004.safetensors",
575
+ "model.layers.35.self_attn.v_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
576
+ "model.layers.35.self_attn.v_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
577
+ "model.layers.36.input_layernorm.weight": "model-00004-of-00004.safetensors",
578
+ "model.layers.36.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
579
+ "model.layers.36.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
580
+ "model.layers.36.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
581
+ "model.layers.36.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
582
+ "model.layers.36.post_feedforward_layernorm.weight": "model-00004-of-00004.safetensors",
583
+ "model.layers.36.pre_feedforward_layernorm.weight": "model-00004-of-00004.safetensors",
584
+ "model.layers.36.self_attn.k_proj.base_layer.weight": "model-00004-of-00004.safetensors",
585
+ "model.layers.36.self_attn.k_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
586
+ "model.layers.36.self_attn.k_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
587
+ "model.layers.36.self_attn.o_proj.base_layer.weight": "model-00004-of-00004.safetensors",
588
+ "model.layers.36.self_attn.o_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
589
+ "model.layers.36.self_attn.o_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
590
+ "model.layers.36.self_attn.q_proj.base_layer.weight": "model-00004-of-00004.safetensors",
591
+ "model.layers.36.self_attn.q_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
592
+ "model.layers.36.self_attn.q_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
593
+ "model.layers.36.self_attn.v_proj.base_layer.weight": "model-00004-of-00004.safetensors",
594
+ "model.layers.36.self_attn.v_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
595
+ "model.layers.36.self_attn.v_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
596
+ "model.layers.37.input_layernorm.weight": "model-00004-of-00004.safetensors",
597
+ "model.layers.37.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
598
+ "model.layers.37.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
599
+ "model.layers.37.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
600
+ "model.layers.37.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
601
+ "model.layers.37.post_feedforward_layernorm.weight": "model-00004-of-00004.safetensors",
602
+ "model.layers.37.pre_feedforward_layernorm.weight": "model-00004-of-00004.safetensors",
603
+ "model.layers.37.self_attn.k_proj.base_layer.weight": "model-00004-of-00004.safetensors",
604
+ "model.layers.37.self_attn.k_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
605
+ "model.layers.37.self_attn.k_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
606
+ "model.layers.37.self_attn.o_proj.base_layer.weight": "model-00004-of-00004.safetensors",
607
+ "model.layers.37.self_attn.o_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
608
+ "model.layers.37.self_attn.o_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
609
+ "model.layers.37.self_attn.q_proj.base_layer.weight": "model-00004-of-00004.safetensors",
610
+ "model.layers.37.self_attn.q_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
611
+ "model.layers.37.self_attn.q_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
612
+ "model.layers.37.self_attn.v_proj.base_layer.weight": "model-00004-of-00004.safetensors",
613
+ "model.layers.37.self_attn.v_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
614
+ "model.layers.37.self_attn.v_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
615
+ "model.layers.38.input_layernorm.weight": "model-00004-of-00004.safetensors",
616
+ "model.layers.38.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
617
+ "model.layers.38.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
618
+ "model.layers.38.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
619
+ "model.layers.38.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
620
+ "model.layers.38.post_feedforward_layernorm.weight": "model-00004-of-00004.safetensors",
621
+ "model.layers.38.pre_feedforward_layernorm.weight": "model-00004-of-00004.safetensors",
622
+ "model.layers.38.self_attn.k_proj.base_layer.weight": "model-00004-of-00004.safetensors",
623
+ "model.layers.38.self_attn.k_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
624
+ "model.layers.38.self_attn.k_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
625
+ "model.layers.38.self_attn.o_proj.base_layer.weight": "model-00004-of-00004.safetensors",
626
+ "model.layers.38.self_attn.o_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
627
+ "model.layers.38.self_attn.o_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
628
+ "model.layers.38.self_attn.q_proj.base_layer.weight": "model-00004-of-00004.safetensors",
629
+ "model.layers.38.self_attn.q_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
630
+ "model.layers.38.self_attn.q_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
631
+ "model.layers.38.self_attn.v_proj.base_layer.weight": "model-00004-of-00004.safetensors",
632
+ "model.layers.38.self_attn.v_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
633
+ "model.layers.38.self_attn.v_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
634
+ "model.layers.39.input_layernorm.weight": "model-00004-of-00004.safetensors",
635
+ "model.layers.39.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
636
+ "model.layers.39.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
637
+ "model.layers.39.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
638
+ "model.layers.39.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
639
+ "model.layers.39.post_feedforward_layernorm.weight": "model-00004-of-00004.safetensors",
640
+ "model.layers.39.pre_feedforward_layernorm.weight": "model-00004-of-00004.safetensors",
641
+ "model.layers.39.self_attn.k_proj.base_layer.weight": "model-00004-of-00004.safetensors",
642
+ "model.layers.39.self_attn.k_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
643
+ "model.layers.39.self_attn.k_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
644
+ "model.layers.39.self_attn.o_proj.base_layer.weight": "model-00004-of-00004.safetensors",
645
+ "model.layers.39.self_attn.o_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
646
+ "model.layers.39.self_attn.o_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
647
+ "model.layers.39.self_attn.q_proj.base_layer.weight": "model-00004-of-00004.safetensors",
648
+ "model.layers.39.self_attn.q_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
649
+ "model.layers.39.self_attn.q_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
650
+ "model.layers.39.self_attn.v_proj.base_layer.weight": "model-00004-of-00004.safetensors",
651
+ "model.layers.39.self_attn.v_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
652
+ "model.layers.39.self_attn.v_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
653
  "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
654
  "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
655
  "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
656
  "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
657
  "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
658
+ "model.layers.4.post_feedforward_layernorm.weight": "model-00001-of-00004.safetensors",
659
+ "model.layers.4.pre_feedforward_layernorm.weight": "model-00001-of-00004.safetensors",
660
  "model.layers.4.self_attn.k_proj.base_layer.weight": "model-00001-of-00004.safetensors",
661
  "model.layers.4.self_attn.k_proj.lora_A.default.weight": "model-00001-of-00004.safetensors",
662
  "model.layers.4.self_attn.k_proj.lora_B.default.weight": "model-00001-of-00004.safetensors",
 
669
  "model.layers.4.self_attn.v_proj.base_layer.weight": "model-00001-of-00004.safetensors",
670
  "model.layers.4.self_attn.v_proj.lora_A.default.weight": "model-00001-of-00004.safetensors",
671
  "model.layers.4.self_attn.v_proj.lora_B.default.weight": "model-00001-of-00004.safetensors",
672
+ "model.layers.40.input_layernorm.weight": "model-00004-of-00004.safetensors",
673
+ "model.layers.40.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
674
+ "model.layers.40.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
675
+ "model.layers.40.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
676
+ "model.layers.40.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
677
+ "model.layers.40.post_feedforward_layernorm.weight": "model-00004-of-00004.safetensors",
678
+ "model.layers.40.pre_feedforward_layernorm.weight": "model-00004-of-00004.safetensors",
679
+ "model.layers.40.self_attn.k_proj.base_layer.weight": "model-00004-of-00004.safetensors",
680
+ "model.layers.40.self_attn.k_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
681
+ "model.layers.40.self_attn.k_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
682
+ "model.layers.40.self_attn.o_proj.base_layer.weight": "model-00004-of-00004.safetensors",
683
+ "model.layers.40.self_attn.o_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
684
+ "model.layers.40.self_attn.o_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
685
+ "model.layers.40.self_attn.q_proj.base_layer.weight": "model-00004-of-00004.safetensors",
686
+ "model.layers.40.self_attn.q_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
687
+ "model.layers.40.self_attn.q_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
688
+ "model.layers.40.self_attn.v_proj.base_layer.weight": "model-00004-of-00004.safetensors",
689
+ "model.layers.40.self_attn.v_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
690
+ "model.layers.40.self_attn.v_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
691
+ "model.layers.41.input_layernorm.weight": "model-00004-of-00004.safetensors",
692
+ "model.layers.41.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
693
+ "model.layers.41.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
694
+ "model.layers.41.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
695
+ "model.layers.41.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
696
+ "model.layers.41.post_feedforward_layernorm.weight": "model-00004-of-00004.safetensors",
697
+ "model.layers.41.pre_feedforward_layernorm.weight": "model-00004-of-00004.safetensors",
698
+ "model.layers.41.self_attn.k_proj.base_layer.weight": "model-00004-of-00004.safetensors",
699
+ "model.layers.41.self_attn.k_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
700
+ "model.layers.41.self_attn.k_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
701
+ "model.layers.41.self_attn.o_proj.base_layer.weight": "model-00004-of-00004.safetensors",
702
+ "model.layers.41.self_attn.o_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
703
+ "model.layers.41.self_attn.o_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
704
+ "model.layers.41.self_attn.q_proj.base_layer.weight": "model-00004-of-00004.safetensors",
705
+ "model.layers.41.self_attn.q_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
706
+ "model.layers.41.self_attn.q_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
707
+ "model.layers.41.self_attn.v_proj.base_layer.weight": "model-00004-of-00004.safetensors",
708
+ "model.layers.41.self_attn.v_proj.lora_A.default.weight": "model-00004-of-00004.safetensors",
709
+ "model.layers.41.self_attn.v_proj.lora_B.default.weight": "model-00004-of-00004.safetensors",
710
  "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
711
  "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
712
  "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
713
  "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
714
  "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
715
+ "model.layers.5.post_feedforward_layernorm.weight": "model-00001-of-00004.safetensors",
716
+ "model.layers.5.pre_feedforward_layernorm.weight": "model-00001-of-00004.safetensors",
717
  "model.layers.5.self_attn.k_proj.base_layer.weight": "model-00001-of-00004.safetensors",
718
  "model.layers.5.self_attn.k_proj.lora_A.default.weight": "model-00001-of-00004.safetensors",
719
  "model.layers.5.self_attn.k_proj.lora_B.default.weight": "model-00001-of-00004.safetensors",
 
731
  "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
732
  "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
733
  "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
734
+ "model.layers.6.post_feedforward_layernorm.weight": "model-00001-of-00004.safetensors",
735
+ "model.layers.6.pre_feedforward_layernorm.weight": "model-00001-of-00004.safetensors",
736
  "model.layers.6.self_attn.k_proj.base_layer.weight": "model-00001-of-00004.safetensors",
737
  "model.layers.6.self_attn.k_proj.lora_A.default.weight": "model-00001-of-00004.safetensors",
738
  "model.layers.6.self_attn.k_proj.lora_B.default.weight": "model-00001-of-00004.safetensors",
 
745
  "model.layers.6.self_attn.v_proj.base_layer.weight": "model-00001-of-00004.safetensors",
746
  "model.layers.6.self_attn.v_proj.lora_A.default.weight": "model-00001-of-00004.safetensors",
747
  "model.layers.6.self_attn.v_proj.lora_B.default.weight": "model-00001-of-00004.safetensors",
748
+ "model.layers.7.input_layernorm.weight": "model-00002-of-00004.safetensors",
749
+ "model.layers.7.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
750
  "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
751
  "model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
752
+ "model.layers.7.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
753
+ "model.layers.7.post_feedforward_layernorm.weight": "model-00002-of-00004.safetensors",
754
+ "model.layers.7.pre_feedforward_layernorm.weight": "model-00002-of-00004.safetensors",
755
  "model.layers.7.self_attn.k_proj.base_layer.weight": "model-00001-of-00004.safetensors",
756
  "model.layers.7.self_attn.k_proj.lora_A.default.weight": "model-00001-of-00004.safetensors",
757
  "model.layers.7.self_attn.k_proj.lora_B.default.weight": "model-00001-of-00004.safetensors",
 
764
  "model.layers.7.self_attn.v_proj.base_layer.weight": "model-00001-of-00004.safetensors",
765
  "model.layers.7.self_attn.v_proj.lora_A.default.weight": "model-00001-of-00004.safetensors",
766
  "model.layers.7.self_attn.v_proj.lora_B.default.weight": "model-00001-of-00004.safetensors",
767
+ "model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
768
+ "model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
769
+ "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
770
+ "model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
771
+ "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
772
+ "model.layers.8.post_feedforward_layernorm.weight": "model-00002-of-00004.safetensors",
773
+ "model.layers.8.pre_feedforward_layernorm.weight": "model-00002-of-00004.safetensors",
774
+ "model.layers.8.self_attn.k_proj.base_layer.weight": "model-00002-of-00004.safetensors",
775
+ "model.layers.8.self_attn.k_proj.lora_A.default.weight": "model-00002-of-00004.safetensors",
776
+ "model.layers.8.self_attn.k_proj.lora_B.default.weight": "model-00002-of-00004.safetensors",
777
+ "model.layers.8.self_attn.o_proj.base_layer.weight": "model-00002-of-00004.safetensors",
778
+ "model.layers.8.self_attn.o_proj.lora_A.default.weight": "model-00002-of-00004.safetensors",
779
+ "model.layers.8.self_attn.o_proj.lora_B.default.weight": "model-00002-of-00004.safetensors",
780
+ "model.layers.8.self_attn.q_proj.base_layer.weight": "model-00002-of-00004.safetensors",
781
+ "model.layers.8.self_attn.q_proj.lora_A.default.weight": "model-00002-of-00004.safetensors",
782
+ "model.layers.8.self_attn.q_proj.lora_B.default.weight": "model-00002-of-00004.safetensors",
783
+ "model.layers.8.self_attn.v_proj.base_layer.weight": "model-00002-of-00004.safetensors",
784
+ "model.layers.8.self_attn.v_proj.lora_A.default.weight": "model-00002-of-00004.safetensors",
785
+ "model.layers.8.self_attn.v_proj.lora_B.default.weight": "model-00002-of-00004.safetensors",
786
  "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
787
  "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
788
  "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
789
  "model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
790
  "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
791
+ "model.layers.9.post_feedforward_layernorm.weight": "model-00002-of-00004.safetensors",
792
+ "model.layers.9.pre_feedforward_layernorm.weight": "model-00002-of-00004.safetensors",
793
  "model.layers.9.self_attn.k_proj.base_layer.weight": "model-00002-of-00004.safetensors",
794
  "model.layers.9.self_attn.k_proj.lora_A.default.weight": "model-00002-of-00004.safetensors",
795
  "model.layers.9.self_attn.k_proj.lora_B.default.weight": "model-00002-of-00004.safetensors",