{ "_external_rope_config_kwargs": {}, "architectures": [ "Glm4MoeLiteForCausalLM" ], "attention_bias": false, "attention_dropout": 0.0, "attn_mechanism": "ragged_page_attention_v3", "backend": null, "bits": null, "blocksize_b": 1, "blocksize_k": 128, "blocksize_q": 128, "bos_token_id": 0, "decode_attn_mechanism": null, "dtype": "bfloat16", "easy_method": "train", "eos_token_id": [ 154820, 154827, 154829 ], "fcm_max_ratio": 0.0, "fcm_min_ratio": 0.0, "first_k_dense_replace": 1, "flash_attention_backward_pass_impl": "triton", "freq_max_position_embeddings": 65536, "fsdp_is_ep_bound": true, "gradient_checkpointing": "", "gradient_checkpointing_targets": null, "hardware_abstraction": true, "head_dim": 64, "hidden_act": "silu", "hidden_size": 2048, "initializer_range": 0.02, "intermediate_size": 10240, "kv_cache_quantization_config": null, "kv_cache_sharding_sequence_axis_name": "sp", "kv_lora_rank": 512, "mask_max_position_embeddings": 65536, "max_position_embeddings": 202752, "mlp_layer_types": [ "dense", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse", "sparse" ], "model_type": "glm4_moe_lite", "moe_force_xla_gmm": false, "moe_intermediate_size": 1536, "moe_method": "fused_moe", "moe_tiling_size_batch": 4, "moe_tiling_size_dim": 128, "moe_tiling_size_seqlen": 128, "n_group": 1, "n_routed_experts": 64, "n_shared_experts": 1, "norm_topk_prob": true, "num_attention_heads": 20, "num_experts_per_tok": 4, "num_hidden_layers": 47, "num_key_value_heads": 20, "num_nextn_predict_layers": 1, "operation_configs": null, "pad_token_id": 154820, "pallas_k_block_size": 128, "pallas_m_block_size": 128, "pallas_n_block_size": 128, "partial_rotary_factor": 1.0, "partition_axis": { "attention_dim_axis": null, "attention_kv_dim_axis": null, "batch_axis": [ "fsdp", "dp" ], "bias_head_sequence_axis": null, "bias_key_sequence_axis": null, "data_parallel_axis": "dp", "decode_attention_dim_axis": null, "decode_attention_kv_dim_axis": null, "decode_batch_axis": [ "fsdp", "dp" ], "decode_head_axis": "tp", "decode_key_sequence_axis": "sp", "decode_kv_head_axis": "tp", "decode_query_sequence_axis": null, "expert_axis": "ep", "expert_gate_axis": null, "expert_parallel_axis": "ep", "fully_sharded_data_parallel_axis": "fsdp", "head_axis": "tp", "hidden_state_axis": "tp", "key_sequence_axis": "sp", "kv_head_axis": "tp", "mlp_intermediate_axis": "tp", "query_sequence_axis": "sp", "sequence_axis": "sp", "sequence_parallel_axis": "sp", "tensor_parallel_axis": "tp", "vocab_axis": "tp" }, "platform": null, "precompute_masks": true, "pretraining_tp": 1, "q_lora_rank": 768, "qk_head_dim": 256, "qk_nope_head_dim": 192, "qk_rope_head_dim": 64, "quantization_config": { "dtype": "nf4", "group_size": 128, "jax_native": false }, "rms_norm_eps": 1e-05, "rope_interleave": true, "rope_parameters": { "partial_rotary_factor": 1.0, "rope_theta": 10000.0, "rope_type": "default" }, "rope_theta": 1000000, "routed_scaling_factor": 1.8, "scan_attention_layers": false, "scan_mlp_chunk_size": 1024, "scan_ring_attention": true, "sequence_axis_name": "sp", "sharding_axis_dims": [ 1, 1, 1, -1, 1 ], "sharding_axis_names": [ "dp", "fsdp", "ep", "tp", "sp" ], "sharding_dcn_axis_dims": null, "sp_is_ep_bound": true, "tie_word_embeddings": false, "topk_group": 1, "topk_method": "noaux_tc", "transformers_version": "5.0.0", "use_cache": true, "use_expert_tensor_mode": false, "use_ring_of_experts": false, "use_scan_mlp": false, "use_sharded_kv_caching": false, "use_sharding_constraint": false, "v_head_dim": 256, "vocab_size": 154880 }