GLM2NSA / config.json
Maxtimer97's picture
Changed to correct architecture name for vllm
d97e95b verified
{
"add_bias_linear": false,
"add_qkv_bias": true,
"apply_query_key_layer_scaling": true,
"apply_residual_connection_post_layernorm": false,
"architectures": [
"ChatGLM2NSAForCausalLM"
],
"attention_dropout": 0.0,
"attention_softmax_in_fp32": true,
"attn_implementation": "nsa",
"auto_map": {
"AutoConfig": "configuration_chatglm.ChatGLMConfig",
"AutoModel": "modeling_chatglm.ChatGLMForConditionalGeneration",
"AutoModelForCausalLM": "modeling_chatglm.ChatGLMForConditionalGeneration",
"AutoModelForSeq2SeqLM": "modeling_chatglm.ChatGLMForConditionalGeneration",
"AutoModelForSequenceClassification": "modeling_chatglm.ChatGLMForSequenceClassification"
},
"bias_dropout_fusion": true,
"block_size": 64,
"classifier_dropout": null,
"dtype": "bfloat16",
"eos_token_id": [
151329,
151336,
151338
],
"ffn_hidden_size": 13696,
"fp32_residual_connection": false,
"hidden_dropout": 0.0,
"hidden_size": 4096,
"init_blocks": 1,
"kernel_size": 64,
"kernel_stride": 64,
"kv_channels": 128,
"layernorm_epsilon": 1.5625e-07,
"local_blocks": 2,
"model_type": "chatglm",
"multi_query_attention": true,
"multi_query_group_num": 2,
"num_attention_heads": 32,
"num_hidden_layers": 40,
"num_layers": 40,
"original_rope": true,
"pad_token_id": 151329,
"padded_vocab_size": 151552,
"post_layer_norm": true,
"rmsnorm": true,
"rope_ratio": 500,
"seq_length": 131072,
"tie_word_embeddings": false,
"topk": 16,
"transformers_version": "4.56.1",
"use_cache": true,
"vocab_size": 151552,
"window_size": 512
}