LongCat-Flash-Lite-FP8 / config.json
models123's picture
Duplicate from meituan-longcat/LongCat-Flash-Lite-FP8
412850a
{
"architectures": [
"LongcatFlashNgramForCausalLM"
],
"attention_bias": false,
"attention_dropout": 0.0,
"auto_map": {
"AutoConfig": "configuration_longcat_ngram.LongcatFlashNgramConfig",
"AutoModel": "modeling_longcat_ngram.LongcatFlashNgramModel",
"AutoModelForCausalLM": "modeling_longcat_ngram.LongcatFlashNgramForCausalLM"
},
"bos_token_id": 1,
"disable_quant_module": [],
"emb_neighbor_num": 4,
"emb_split_num": 4,
"eos_token_id": 2,
"expert_ffn_hidden_size": 1024,
"ffn_hidden_size": 6144,
"hidden_size": 3072,
"kv_lora_rank": 512,
"max_position_embeddings": 327680,
"mla_scale_kv_lora": true,
"mla_scale_q_lora": true,
"moe_topk": 12,
"n_routed_experts": 256,
"ngram_vocab_size_ratio": 78,
"num_attention_heads": 32,
"num_layers": 14,
"q_lora_rank": 1536,
"qk_nope_head_dim": 128,
"qk_rope_head_dim": 64,
"quantization_config": {
"activation_scheme": "dynamic",
"fmt": "e4m3",
"ignored_layers": [
"lm_head",
"model.embed_tokens",
"model.layers.0.input_layernorm.0",
"model.layers.0.input_layernorm.1",
"model.layers.0.mlp.router.classifier",
"model.layers.0.mlp.router.e_score_correction_bias",
"model.layers.0.post_attention_layernorm.0",
"model.layers.0.post_attention_layernorm.1",
"model.layers.0.self_attn.0.kv_a_layernorm",
"model.layers.0.self_attn.0.q_a_layernorm",
"model.layers.0.self_attn.1.kv_a_layernorm",
"model.layers.0.self_attn.1.q_a_layernorm",
"model.layers.1.input_layernorm.0",
"model.layers.1.input_layernorm.1",
"model.layers.1.mlp.router.classifier",
"model.layers.1.mlp.router.e_score_correction_bias",
"model.layers.1.post_attention_layernorm.0",
"model.layers.1.post_attention_layernorm.1",
"model.layers.1.self_attn.0.kv_a_layernorm",
"model.layers.1.self_attn.0.q_a_layernorm",
"model.layers.1.self_attn.1.kv_a_layernorm",
"model.layers.1.self_attn.1.q_a_layernorm",
"model.layers.10.input_layernorm.0",
"model.layers.10.input_layernorm.1",
"model.layers.10.mlp.router.classifier",
"model.layers.10.mlp.router.e_score_correction_bias",
"model.layers.10.post_attention_layernorm.0",
"model.layers.10.post_attention_layernorm.1",
"model.layers.10.self_attn.0.kv_a_layernorm",
"model.layers.10.self_attn.0.q_a_layernorm",
"model.layers.10.self_attn.1.kv_a_layernorm",
"model.layers.10.self_attn.1.q_a_layernorm",
"model.layers.11.input_layernorm.0",
"model.layers.11.input_layernorm.1",
"model.layers.11.mlp.router.classifier",
"model.layers.11.mlp.router.e_score_correction_bias",
"model.layers.11.post_attention_layernorm.0",
"model.layers.11.post_attention_layernorm.1",
"model.layers.11.self_attn.0.kv_a_layernorm",
"model.layers.11.self_attn.0.q_a_layernorm",
"model.layers.11.self_attn.1.kv_a_layernorm",
"model.layers.11.self_attn.1.q_a_layernorm",
"model.layers.12.input_layernorm.0",
"model.layers.12.input_layernorm.1",
"model.layers.12.mlp.router.classifier",
"model.layers.12.mlp.router.e_score_correction_bias",
"model.layers.12.post_attention_layernorm.0",
"model.layers.12.post_attention_layernorm.1",
"model.layers.12.self_attn.0.kv_a_layernorm",
"model.layers.12.self_attn.0.q_a_layernorm",
"model.layers.12.self_attn.1.kv_a_layernorm",
"model.layers.12.self_attn.1.q_a_layernorm",
"model.layers.13.input_layernorm.0",
"model.layers.13.input_layernorm.1",
"model.layers.13.mlp.router.classifier",
"model.layers.13.mlp.router.e_score_correction_bias",
"model.layers.13.post_attention_layernorm.0",
"model.layers.13.post_attention_layernorm.1",
"model.layers.13.self_attn.0.kv_a_layernorm",
"model.layers.13.self_attn.0.q_a_layernorm",
"model.layers.13.self_attn.1.kv_a_layernorm",
"model.layers.13.self_attn.1.q_a_layernorm",
"model.layers.2.input_layernorm.0",
"model.layers.2.input_layernorm.1",
"model.layers.2.mlp.router.classifier",
"model.layers.2.mlp.router.e_score_correction_bias",
"model.layers.2.post_attention_layernorm.0",
"model.layers.2.post_attention_layernorm.1",
"model.layers.2.self_attn.0.kv_a_layernorm",
"model.layers.2.self_attn.0.q_a_layernorm",
"model.layers.2.self_attn.1.kv_a_layernorm",
"model.layers.2.self_attn.1.q_a_layernorm",
"model.layers.3.input_layernorm.0",
"model.layers.3.input_layernorm.1",
"model.layers.3.mlp.router.classifier",
"model.layers.3.mlp.router.e_score_correction_bias",
"model.layers.3.post_attention_layernorm.0",
"model.layers.3.post_attention_layernorm.1",
"model.layers.3.self_attn.0.kv_a_layernorm",
"model.layers.3.self_attn.0.q_a_layernorm",
"model.layers.3.self_attn.1.kv_a_layernorm",
"model.layers.3.self_attn.1.q_a_layernorm",
"model.layers.4.input_layernorm.0",
"model.layers.4.input_layernorm.1",
"model.layers.4.mlp.router.classifier",
"model.layers.4.mlp.router.e_score_correction_bias",
"model.layers.4.post_attention_layernorm.0",
"model.layers.4.post_attention_layernorm.1",
"model.layers.4.self_attn.0.kv_a_layernorm",
"model.layers.4.self_attn.0.q_a_layernorm",
"model.layers.4.self_attn.1.kv_a_layernorm",
"model.layers.4.self_attn.1.q_a_layernorm",
"model.layers.5.input_layernorm.0",
"model.layers.5.input_layernorm.1",
"model.layers.5.mlp.router.classifier",
"model.layers.5.mlp.router.e_score_correction_bias",
"model.layers.5.post_attention_layernorm.0",
"model.layers.5.post_attention_layernorm.1",
"model.layers.5.self_attn.0.kv_a_layernorm",
"model.layers.5.self_attn.0.q_a_layernorm",
"model.layers.5.self_attn.1.kv_a_layernorm",
"model.layers.5.self_attn.1.q_a_layernorm",
"model.layers.6.input_layernorm.0",
"model.layers.6.input_layernorm.1",
"model.layers.6.mlp.router.classifier",
"model.layers.6.mlp.router.e_score_correction_bias",
"model.layers.6.post_attention_layernorm.0",
"model.layers.6.post_attention_layernorm.1",
"model.layers.6.self_attn.0.kv_a_layernorm",
"model.layers.6.self_attn.0.q_a_layernorm",
"model.layers.6.self_attn.1.kv_a_layernorm",
"model.layers.6.self_attn.1.q_a_layernorm",
"model.layers.7.input_layernorm.0",
"model.layers.7.input_layernorm.1",
"model.layers.7.mlp.router.classifier",
"model.layers.7.mlp.router.e_score_correction_bias",
"model.layers.7.post_attention_layernorm.0",
"model.layers.7.post_attention_layernorm.1",
"model.layers.7.self_attn.0.kv_a_layernorm",
"model.layers.7.self_attn.0.q_a_layernorm",
"model.layers.7.self_attn.1.kv_a_layernorm",
"model.layers.7.self_attn.1.q_a_layernorm",
"model.layers.8.input_layernorm.0",
"model.layers.8.input_layernorm.1",
"model.layers.8.mlp.router.classifier",
"model.layers.8.mlp.router.e_score_correction_bias",
"model.layers.8.post_attention_layernorm.0",
"model.layers.8.post_attention_layernorm.1",
"model.layers.8.self_attn.0.kv_a_layernorm",
"model.layers.8.self_attn.0.q_a_layernorm",
"model.layers.8.self_attn.1.kv_a_layernorm",
"model.layers.8.self_attn.1.q_a_layernorm",
"model.layers.9.input_layernorm.0",
"model.layers.9.input_layernorm.1",
"model.layers.9.mlp.router.classifier",
"model.layers.9.mlp.router.e_score_correction_bias",
"model.layers.9.post_attention_layernorm.0",
"model.layers.9.post_attention_layernorm.1",
"model.layers.9.self_attn.0.kv_a_layernorm",
"model.layers.9.self_attn.0.q_a_layernorm",
"model.layers.9.self_attn.1.kv_a_layernorm",
"model.layers.9.self_attn.1.q_a_layernorm",
"model.mtp.embed_tokens",
"model.mtp.layers.0.enorm.m",
"model.mtp.layers.0.hnorm.m",
"model.mtp.layers.0.input_layernorm",
"model.mtp.layers.0.post_attention_layernorm",
"model.mtp.layers.0.self_attn.kv_a_layernorm",
"model.mtp.layers.0.self_attn.q_a_layernorm",
"model.mtp.norm",
"model.ngram_embeddings.embedders.0",
"model.ngram_embeddings.embedders.1",
"model.ngram_embeddings.embedders.10",
"model.ngram_embeddings.embedders.11",
"model.ngram_embeddings.embedders.2",
"model.ngram_embeddings.embedders.3",
"model.ngram_embeddings.embedders.4",
"model.ngram_embeddings.embedders.5",
"model.ngram_embeddings.embedders.6",
"model.ngram_embeddings.embedders.7",
"model.ngram_embeddings.embedders.8",
"model.ngram_embeddings.embedders.9",
"model.ngram_embeddings.post_projs.0",
"model.ngram_embeddings.post_projs.1",
"model.ngram_embeddings.post_projs.10",
"model.ngram_embeddings.post_projs.11",
"model.ngram_embeddings.post_projs.2",
"model.ngram_embeddings.post_projs.3",
"model.ngram_embeddings.post_projs.4",
"model.ngram_embeddings.post_projs.5",
"model.ngram_embeddings.post_projs.6",
"model.ngram_embeddings.post_projs.7",
"model.ngram_embeddings.post_projs.8",
"model.ngram_embeddings.post_projs.9",
"model.norm"
],
"quant_method": "fp8",
"weight_block_size": [
128,
128
]
},
"rms_norm_eps": 1e-05,
"rope_scaling": {
"beta_fast": 32,
"beta_slow": 1,
"factor": 10,
"mscale": 1,
"mscale_all_dim": 1,
"original_max_position_embeddings": 32768,
"rope_type": "yarn"
},
"rope_theta": 5000000.0,
"routed_scaling_factor": 6.0,
"torch_dtype": "bfloat16",
"transformers_version": "4.57.6",
"use_cache": true,
"v_head_dim": 128,
"vocab_size": 131072,
"zero_expert_num": 128,
"zero_expert_type": "identity"
}