{ "vocab_size": 1734, "d_model": 512, "n_heads": 8, "n_layers": 10, "n_shared_layers": 2, "d_ff": 2048, "max_seq_len": 192, "dropout": 0.05, "lora_rank": 16, "use_thinking_tokens": true, "n_thinking_steps": 4, "rope_base": 10000, "weight_tying": true }