{ "model_name": "Qwen3-8B", "model_type": "llm-qwen3", "vm_cfg": null, "mm_cfg": null, "lm_cfg": { "model_type": "qwen3", "data_type": "bfloat16", "arch": "qwen", "gen": "3", "size": "8b", "token_cfg": { "vocab_size": 151936 }, "rope_cfg": { "rope_theta": 1000000, "rope_local_base_freq": 1000000, "rope_scaling": { "factor": 1.0, "low_freq_factor": 0, "high_freq_factor": 0, "original_max_position_embeddings": 0, "long_factor": null, "short_factor": null, "rope_type": "default" } }, "attn_cfg": { "num_attention_heads": 32, "num_key_value_heads": 8, "head_dim": 128, "swa_enable": false, "swa_ratio": 0, "sliding_window": null, "attention_bias": false, "attention_dropout": 0.0, "query_pre_attn_scalar": 0 }, "mlp_cfg": { "intermediate_size": 12288, "act": "silu", "num_layers": 3, "mlp_bias": false }, "hidden_size": 4096, "num_hidden_layers": 36, "max_position_embeddings": 2048, "rms_norm_eps": 1e-06, "rms_norm_unit_offset": false, "layer_norms": [ "pre_attn", "post_attn", "qk_norm" ], "attn_logit_softcapping": null, "final_logit_softcapping": null, "lm_head_num_splits": 3, "lm_head_split_dim": 50656 }, "pipeline_cfg": { "system_prompt": null, "chat_template": null, "max_num_tokens": 2048, "input_token_group_size": 128, "input_token_group_offsets": [ 0, 128, 256, 384, 512, 640, 768, 896, 1024, 1152, 1280, 1408, 1536, 1664, 1792, 1920 ], "future_token_mask_size": 128, "return_logits": false, "use_strided_kv_cache": false, "enable_filter_sharing": true }, "language_model_name": "Qwen3-8B_language" }