(Trained with Unsloth)
Browse files- config.json +15 -1
config.json
CHANGED
|
@@ -53,7 +53,21 @@
|
|
| 53 |
"bnb_4bit_use_double_quant": true,
|
| 54 |
"llm_int8_enable_fp32_cpu_offload": false,
|
| 55 |
"llm_int8_has_fp16_weight": false,
|
| 56 |
-
"llm_int8_skip_modules":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
"llm_int8_threshold": 6.0,
|
| 58 |
"load_in_4bit": true,
|
| 59 |
"load_in_8bit": false,
|
|
|
|
| 53 |
"bnb_4bit_use_double_quant": true,
|
| 54 |
"llm_int8_enable_fp32_cpu_offload": false,
|
| 55 |
"llm_int8_has_fp16_weight": false,
|
| 56 |
+
"llm_int8_skip_modules": [
|
| 57 |
+
"model.layers.0.self_attn.q_proj",
|
| 58 |
+
"model.layers.0.self_attn.k_proj",
|
| 59 |
+
"model.layers.0.self_attn.v_proj",
|
| 60 |
+
"model.layers.0.self_attn.o_proj",
|
| 61 |
+
"model.layers.2.mlp.gate_proj",
|
| 62 |
+
"model.layers.2.mlp.up_proj",
|
| 63 |
+
"model.layers.2.mlp.down_proj",
|
| 64 |
+
"model.layers.24.self_attn.q_proj",
|
| 65 |
+
"model.layers.24.self_attn.k_proj",
|
| 66 |
+
"model.layers.24.self_attn.v_proj",
|
| 67 |
+
"model.layers.24.self_attn.o_proj",
|
| 68 |
+
"model.layers.27.mlp.up_proj",
|
| 69 |
+
"lm_head"
|
| 70 |
+
],
|
| 71 |
"llm_int8_threshold": 6.0,
|
| 72 |
"load_in_4bit": true,
|
| 73 |
"load_in_8bit": false,
|