{ "model_type": "llama", "architectures": [ "DeepForCausalLM" ], "version": "0.13.0", "vocab_size": 32000, "hidden_size": 2048, "intermediate_size": 5632, "num_hidden_layers": 24, "num_attention_heads": 16, "num_key_value_heads": 8, "max_position_embeddings": 2048, "rope_theta": 10000.0, "rms_norm_eps": 1e-06, "attention_dropout": 0.0, "hidden_act": "silu", "tie_word_embeddings": true, "initializer_range": 0.02, "pad_token_id": 1, "bos_token_id": 2, "eos_token_id": 0, "use_token_routed_mlp": true, "num_experts": 4, "use_qk_norm": true, "use_sdpa": true, "sliding_window": null, "dynamics_alpha": 0.9, "dynamics_beta": 0.1, "dynamics_gate": 0.5, "dynamics_dt": 0.1, "dynamics_controller_hidden": 64, "parameters": "~1.5B", "innovations": [ "KQV Order (v0.13.0) - industry standard like Qwen, Llama, GPT", "Mu-Guided KQV (INL 2025) - mu biases K, Q, AND V", "Mu-Guided Expert Routing - mu influences MLP expert selection", "Mu Residual Highway - accumulated context across layers", "Token-Routed MLP with mu override", "INL Dynamics with velocity tracking", "Grouped Query Attention (GQA)", "RoPE positional embeddings", "QK Normalization", "Flash Attention (SDPA)" ] }