GLM-4.7-PRISM-AWQ / config.json
nullswan's picture
Upload vLLM-compatible repacked AWQ int32 format
93d3f7c verified
{
"architectures": [
"Glm4MoeForCausalLM"
],
"attention_bias": true,
"attention_dropout": 0.0,
"dtype": "bfloat16",
"eos_token_id": [
151329,
151336,
151338
],
"first_k_dense_replace": 3,
"head_dim": 128,
"hidden_act": "silu",
"hidden_size": 5120,
"initializer_range": 0.02,
"intermediate_size": 12288,
"max_position_embeddings": 202752,
"model_type": "glm4_moe",
"moe_intermediate_size": 1536,
"n_group": 1,
"n_routed_experts": 160,
"n_shared_experts": 1,
"norm_topk_prob": true,
"num_attention_heads": 96,
"num_experts_per_tok": 8,
"num_hidden_layers": 92,
"num_key_value_heads": 8,
"num_nextn_predict_layers": 0,
"pad_token_id": 151329,
"partial_rotary_factor": 0.5,
"quantization_config": {
"bits": 4,
"checkpoint_format": "llm-awq",
"desc_act": false,
"format": "llm-awq",
"group_size": 128,
"lm_head": false,
"meta": {
"act_group_aware": false,
"auto_forward_data_parallel": true,
"damp_auto_increment": 0.01,
"damp_percent": 0.05,
"failsafe": {
"smooth": {
"group_size_threshold": 128,
"k": 2.75,
"type": "mad"
},
"strategy": "rtn",
"threshold": "0.5%"
},
"gc_mode": "interval",
"gptaq": null,
"hessian": {
"chunk_bytes": null,
"chunk_size": null,
"staging_dtype": "float32"
},
"mock_quantization": false,
"mse": 0.0,
"offload_to_disk": true,
"offload_to_disk_path": "/scratch/gptqmodel_offload",
"pack_impl": "cpu",
"quantizer": [
"gptqmodel:5.7.0"
],
"static_groups": false,
"true_sequential": true,
"uri": "https://github.com/modelcloud/gptqmodel",
"vram_strategy": "exclusive",
"wait_for_submodule_finalizers": false
},
"pack_dtype": "int32",
"quant_method": "awq",
"version": "llm-awq",
"zero_point": false
},
"rms_norm_eps": 1e-05,
"rope_scaling": null,
"rope_theta": 1000000,
"routed_scaling_factor": 2.5,
"tie_word_embeddings": false,
"topk_group": 1,
"transformers_version": "4.57.3",
"use_cache": true,
"use_qk_norm": true,
"vocab_size": 151552
}