{ "architectures": [ "MiniCPMSALAForCausalLM" ], "attention_bias": false, "attention_dropout": 0.0, "attn_use_output_gate": true, "attn_use_rope": false, "auto_map": { "AutoConfig": "configuration_minicpm_sala.MiniCPMSALAConfig", "AutoModel": "modeling_minicpm_sala.MiniCPMSALAModel", "AutoModelForCausalLM": "modeling_minicpm_sala.MiniCPMSALAForCausalLM", "AutoModelForSeq2SeqLM": "modeling_minicpm_sala.MiniCPMSALAForCausalLM", "AutoModelForSequenceClassification": "modeling_minicpm_sala.MiniCPMSALAForSequenceClassification" }, "bos_token_id": 1, "dim_model_base": 256, "dtype": "bfloat16", "eos_token_id": [ 2, 73440 ], "head_dim": 128, "hidden_act": "silu", "hidden_size": 4096, "initializer_range": 0.1, "intermediate_size": 16384, "lightning_head_dim": 128, "lightning_nh": 32, "lightning_nkv": 32, "lightning_scale": "1/sqrt(d)", "lightning_use_rope": true, "max_position_embeddings": 524288, "mixer_types": [ "minicpm4", "lightning-attn", "lightning-attn", "lightning-attn", "lightning-attn", "lightning-attn", "lightning-attn", "lightning-attn", "lightning-attn", "minicpm4", "lightning-attn", "lightning-attn", "lightning-attn", "lightning-attn", "lightning-attn", "lightning-attn", "minicpm4", "minicpm4", "lightning-attn", "lightning-attn", "lightning-attn", "lightning-attn", "minicpm4", "lightning-attn", "lightning-attn", "lightning-attn", "lightning-attn", "lightning-attn", "lightning-attn", "minicpm4", "minicpm4", "minicpm4" ], "model_type": "minicpm_sala", "mup_denominator": 32, "num_attention_heads": 32, "num_hidden_layers": 32, "num_key_value_heads": 2, "pad_token_id": 2, "pretraining_tp": 1, "qk_norm": true, "rms_norm_eps": 1e-06, "rope_scaling": null, "rope_theta": 10000.0, "scale_depth": 1.4, "scale_emb": 12, "shift_labels": true, "sparse_config": { "block_size": 64, "dense_len": 8192, "init_blocks": 1, "kernel_size": 32, "kernel_stride": 16, "topk": 64, "use_nope": false, "window_size": 2048 }, "tie_word_embeddings": false, "transformers_version": "4.57.1", "use_cache": true, "use_output_gate": true, "use_output_norm": true, "vocab_size": 73448, "quantization_config": { "config_groups": { "group_0": { "input_activations": { "dynamic": false, "num_bits": 4, "type": "float", "group_size": 16 }, "weights": { "dynamic": false, "num_bits": 4, "type": "float", "group_size": 16 }, "targets": [ "Linear" ] } }, "ignore": [ "lm_head" ], "quant_algo": "NVFP4", "producer": { "name": "modelopt", "version": "0.42.0" }, "quant_method": "modelopt" } }