{ "model_name": "LFM2-2.6B", "model_type": "llm-lfm2", "vm_cfg": null, "mm_cfg": null, "lm_cfg": { "model_type": "lfm2", "data_type": "bfloat16", "arch": "lfm", "token_cfg": { "vocab_size": 65536 }, "rope_cfg": { "rope_theta": 1000000.0, "rope_local_base_freq": 10000, "rope_scaling": { "factor": 1.0, "low_freq_factor": 0, "high_freq_factor": 0, "original_max_position_embeddings": 0, "long_factor": null, "short_factor": null, "rope_type": "default", "mrope_section": null, "mrope_interleaved": false } }, "attn_cfg": { "num_attention_heads": 32, "num_key_value_heads": 8, "head_dim": 64, "swa_enable": false, "sliding_window": 0, "attention_bias": false, "attention_dropout": 0.0, "query_pre_attn_scalar": 0 }, "mlp_cfg": { "intermediate_size": 10752, "act": "silu", "num_layers": 3, "mlp_bias": false }, "hidden_size": 2048, "num_hidden_layers": 30, "max_position_embeddings": 2048, "rms_norm_eps": 1e-05, "rms_norm_unit_offset": false, "layer_types": [ "conv", "conv", "full_attention", "conv", "conv", "full_attention", "conv", "conv", "conv", "full_attention", "conv", "conv", "conv", "full_attention", "conv", "conv", "conv", "full_attention", "conv", "conv", "conv", "full_attention", "conv", "conv", "full_attention", "conv", "conv", "full_attention", "conv", "conv" ], "attn_logit_softcapping": null, "final_logit_softcapping": null, "lm_head_num_splits": 1, "lm_head_split_dim": 65536, "conv_L_cache": 3, "conv_bias": false, "lora_cfg": null }, "pipeline_cfg": { "system_prompt": null, "chat_template": null, "max_num_tokens": 2048, "input_token_group_size": 128, "input_token_group_offsets": [ 0, 128, 256, 384, 512, 640, 768, 896, 1024, 1152, 1280, 1408, 1536, 1664, 1792, 1920 ], "future_token_mask_size": 128, "return_logits": true, "use_strided_kv_cache": false, "enable_filter_sharing": false, "quantize_embeddings": false, "split_mlp": true }, "language_model_name": "LFM2-2.6B_language" }