{ "allow_neg_eigval": false, "architectures": [ "QuasarForCausalLM" ], "attn_mode": "chunk", "bigmac_r": 0.25, "bos_token_id": 2, "conv_bias": false, "conv_size": 4, "d_ff": 11008, "d_model": 4096, "dense_input_layers": 4, "dropout": 0.0, "dtype": "bfloat16", "eos_token_id": 1, "expand_k": 0.5, "expand_v": 1.0, "fuse_cross_entropy": true, "fuse_norm": true, "fuse_swiglu": true, "gated_layers": 2, "gla_mode": "chunk", "gradient_checkpointing": false, "head_dim": 128, "hidden_act": "silu", "hidden_ratio": 4, "hidden_size": 4096, "hybrid_layer_types": [ "quasar", "quasar", "quasar", "quasar", "gla", "gla", "quasar", "quasar", "quasar", "quasar", "gla", "gla", "quasar", "quasar", "quasar", "quasar", "gla", "gla", "quasar", "quasar", "quasar", "quasar", "gla", "gla", "quasar", "quasar", "quasar", "quasar", "gla", "gla", "quasar", "quasar" ], "initializer_range": 0.02, "intermediate_size": 11008, "layer_types": [ "linear_attention", "linear_attention", "linear_attention", "linear_attention", "linear_attention", "linear_attention", "linear_attention", "linear_attention", "linear_attention", "linear_attention", "linear_attention", "linear_attention", "linear_attention", "linear_attention", "linear_attention", "linear_attention", "linear_attention", "linear_attention", "linear_attention", "linear_attention", "linear_attention", "linear_attention", "linear_attention", "linear_attention", "linear_attention", "linear_attention", "linear_attention", "linear_attention", "linear_attention", "linear_attention", "linear_attention", "linear_attention" ], "looped_injection_init": 0.1, "max_position_embeddings": 16384, "max_seq_len": 16384, "memory_dim": 128, "memory_slots": 128, "model_type": "quasar", "moe_aux_loss_coeff": 0.0001, "moe_type": "bigmac", "moe_z_loss_coeff": 0.0001, "n_heads": 32, "n_layers": 32, "norm_eps": 1e-06, "num_attention_heads": 32, "num_heads": 32, "num_hidden_layers": 32, "num_key_value_heads": 32, "num_loops": 1, "num_routed_experts": 56, "num_shared_experts": 1, "num_v_heads": null, "pad_token_id": 0, "quasar_layers": 4, "residual_scale": 0.1, "rms_norm_eps": 1e-06, "rope_theta": 1000000.0, "routed_expert_size": 1024, "shared_expert_size": 2048, "smebu_beta": 0.5, "smebu_kappa": 2.0, "smebu_lambda": 0.002, "tie_word_embeddings": true, "top_k": 8, "transformers_version": "5.7.0", "use_cache": false, "use_gla_first": false, "use_l2warp": false, "use_looped_injection": false, "use_short_conv": true, "vocab_size": 262144 }