Fix: revert lm_head to language_model.lm_head (no .model prefix)
Browse files- config.json +1 -1
- hf_quant_config.json +1 -1
config.json
CHANGED
|
@@ -35,7 +35,7 @@
|
|
| 35 |
}
|
| 36 |
},
|
| 37 |
"ignore": [
|
| 38 |
-
"language_model.
|
| 39 |
"language_model.model.layers.0.self_attn*",
|
| 40 |
"language_model.model.layers.1.self_attn*",
|
| 41 |
"language_model.model.layers.10.self_attn*",
|
|
|
|
| 35 |
}
|
| 36 |
},
|
| 37 |
"ignore": [
|
| 38 |
+
"language_model.lm_head",
|
| 39 |
"language_model.model.layers.0.self_attn*",
|
| 40 |
"language_model.model.layers.1.self_attn*",
|
| 41 |
"language_model.model.layers.10.self_attn*",
|
hf_quant_config.json
CHANGED
|
@@ -8,7 +8,7 @@
|
|
| 8 |
"kv_cache_quant_algo": "FP8",
|
| 9 |
"group_size": 16,
|
| 10 |
"exclude_modules": [
|
| 11 |
-
"language_model.
|
| 12 |
"language_model.model.layers.0.self_attn*",
|
| 13 |
"language_model.model.layers.1.self_attn*",
|
| 14 |
"language_model.model.layers.10.self_attn*",
|
|
|
|
| 8 |
"kv_cache_quant_algo": "FP8",
|
| 9 |
"group_size": 16,
|
| 10 |
"exclude_modules": [
|
| 11 |
+
"language_model.lm_head",
|
| 12 |
"language_model.model.layers.0.self_attn*",
|
| 13 |
"language_model.model.layers.1.self_attn*",
|
| 14 |
"language_model.model.layers.10.self_attn*",
|