quamba-chat-w4a8 / config.json
hychiang's picture
Upload folder using huggingface_hub
74e80c4 verified
raw
history blame contribute delete
515 Bytes
{
"d_model": 2560,
"d_intermediate": 0,
"n_layer": 64,
"vocab_size": 50277,
"ssm_cfg": {
"layer": "W4A8QMamba"
},
"attn_layer_idx": [],
"attn_cfg": {},
"rms_norm": true,
"residual_in_fp32": true,
"fused_add_norm": true,
"pad_vocab_size_multiple": 8,
"tie_embeddings": false,
"norm_cfg": {
"norm": "QRMSNorm"
},
"embedding_cfg": {
"layer": "W4O16Embedding"
},
"lm_head_cfg": {
"layer": "W4A8B16O16Linear"
}
}