test03152 / config.json
tpoisonooo's picture
Upload folder using huggingface_hub
63deeff verified
{
"architectures": [
"MiniCPMSALAForCausalLM"
],
"attention_bias": false,
"attention_dropout": 0.0,
"attn_use_output_gate": true,
"attn_use_rope": false,
"auto_map": {
"AutoConfig": "configuration_minicpm_sala.MiniCPMSALAConfig",
"AutoModel": "modeling_minicpm_sala.MiniCPMSALAModel",
"AutoModelForCausalLM": "modeling_minicpm_sala.MiniCPMSALAForCausalLM",
"AutoModelForSeq2SeqLM": "modeling_minicpm_sala.MiniCPMSALAForCausalLM",
"AutoModelForSequenceClassification": "modeling_minicpm_sala.MiniCPMSALAForSequenceClassification"
},
"bos_token_id": 1,
"dim_model_base": 256,
"dtype": "bfloat16",
"eos_token_id": [
2,
73440
],
"head_dim": 128,
"hidden_act": "silu",
"hidden_size": 4096,
"initializer_range": 0.1,
"intermediate_size": 16384,
"lightning_head_dim": 128,
"lightning_nh": 32,
"lightning_nkv": 32,
"lightning_scale": "1/sqrt(d)",
"lightning_use_rope": true,
"max_position_embeddings": 524288,
"mixer_types": [
"minicpm4",
"lightning-attn",
"lightning-attn",
"lightning-attn",
"lightning-attn",
"lightning-attn",
"lightning-attn",
"lightning-attn",
"lightning-attn",
"minicpm4",
"lightning-attn",
"lightning-attn",
"lightning-attn",
"lightning-attn",
"lightning-attn",
"lightning-attn",
"minicpm4",
"minicpm4",
"lightning-attn",
"lightning-attn",
"lightning-attn",
"lightning-attn",
"minicpm4",
"lightning-attn",
"lightning-attn",
"lightning-attn",
"lightning-attn",
"lightning-attn",
"lightning-attn",
"minicpm4",
"minicpm4",
"minicpm4"
],
"model_type": "minicpm_sala",
"mup_denominator": 32,
"num_attention_heads": 32,
"num_hidden_layers": 32,
"num_key_value_heads": 2,
"pad_token_id": 2,
"pretraining_tp": 1,
"qk_norm": true,
"quantization_config": {
"bits": 4,
"checkpoint_format": "gptq",
"desc_act": false,
"dynamic": {
"+:model\\.model\\.layers\\.0\\..*": {
"bits": 8
}
},
"format": "gptq",
"group_size": 128,
"lm_head": false,
"meta": {
"act_group_aware": true,
"auto_forward_data_parallel": true,
"damp_auto_increment": 0.01,
"damp_percent": 0.05,
"failsafe": {
"smooth": {
"group_size_threshold": 128,
"k": 2.75,
"type": "mad"
},
"strategy": "rtn",
"threshold": "0.5%"
},
"gc_mode": "interval",
"gptaq": null,
"hessian": {
"chunk_bytes": null,
"chunk_size": null,
"staging_dtype": "float32"
},
"mock_quantization": false,
"mse": 0.0,
"offload_to_disk": true,
"offload_to_disk_path": "./gptqmodel_offload/hqdpgrum-rkaakpxx/",
"pack_impl": "cpu",
"quantizer": [
"gptqmodel:5.7.0"
],
"static_groups": false,
"true_sequential": true,
"uri": "https://github.com/modelcloud/gptqmodel",
"vram_strategy": "exclusive",
"wait_for_submodule_finalizers": false
},
"pack_dtype": "int32",
"quant_method": "gptq",
"sym": true
},
"rms_norm_eps": 1e-06,
"rope_scaling": null,
"rope_theta": 10000.0,
"scale_depth": 1.4,
"scale_emb": 12,
"shift_labels": true,
"sparse_config": {
"block_size": 64,
"dense_len": 8192,
"init_blocks": 1,
"kernel_size": 32,
"kernel_stride": 16,
"topk": 64,
"use_nope": false,
"window_size": 2048
},
"tie_word_embeddings": false,
"transformers_version": "4.57.1",
"use_cache": true,
"use_output_gate": true,
"use_output_norm": true,
"vocab_size": 73448
}