| { |
| "allow_neg_eigval": false, |
| "architectures": [ |
| "QuasarForCausalLM" |
| ], |
| "attn_mode": "chunk", |
| "auto_map": { |
| "AutoConfig": "configuration_quasar.QuasarConfig", |
| "AutoModelForCausalLM": "modeling_quasar.QuasarForCausalLM" |
| }, |
| "bigmac_r": 0.25, |
| "bos_token_id": 1, |
| "conv_bias": false, |
| "conv_size": 4, |
| "d_ff": 4096, |
| "d_model": 1536, |
| "dense_input_layers": 4, |
| "dropout": 0.0, |
| "dtype": "bfloat16", |
| "eos_token_id": 2, |
| "expand_k": 0.5, |
| "expand_v": 1.0, |
| "fuse_cross_entropy": true, |
| "fuse_norm": true, |
| "fuse_swiglu": true, |
| "gated_layers": 2, |
| "gla_mode": "chunk", |
| "gradient_checkpointing": false, |
| "head_dim": 128, |
| "hidden_act": "silu", |
| "hidden_ratio": 4, |
| "hidden_size": 1536, |
| "hybrid_layer_types": [ |
| "quasar", |
| "quasar", |
| "quasar", |
| "quasar", |
| "gla", |
| "gla", |
| "quasar", |
| "quasar", |
| "quasar", |
| "quasar", |
| "gla", |
| "gla", |
| "quasar", |
| "quasar", |
| "quasar", |
| "quasar", |
| "gla", |
| "gla", |
| "quasar", |
| "quasar", |
| "quasar", |
| "quasar", |
| "gla", |
| "gla" |
| ], |
| "initializer_range": 0.02, |
| "intermediate_size": 4096, |
| "layer_types": [ |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention" |
| ], |
| "looped_injection_init": 0.1, |
| "max_position_embeddings": 16384, |
| "max_seq_len": 16384, |
| "memory_dim": 128, |
| "memory_slots": 128, |
| "model_type": "quasar", |
| "moe_aux_loss_coeff": 0.0001, |
| "moe_type": "bigmac", |
| "moe_z_loss_coeff": 0.0001, |
| "n_heads": 12, |
| "n_layers": 24, |
| "norm_eps": 1e-06, |
| "num_attention_heads": 12, |
| "num_heads": 12, |
| "num_hidden_layers": 24, |
| "num_key_value_heads": 12, |
| "num_loops": 1, |
| "num_routed_experts": 64, |
| "num_shared_experts": 1, |
| "num_v_heads": null, |
| "pad_token_id": null, |
| "quasar_layers": 4, |
| "residual_scale": 0.1, |
| "rms_norm_eps": 1e-06, |
| "rope_theta": 1000000.0, |
| "routed_expert_size": 256, |
| "shared_expert_size": 3072, |
| "smebu_beta": 0.5, |
| "smebu_kappa": 2.0, |
| "smebu_lambda": 0.002, |
| "tie_word_embeddings": false, |
| "top_k": 4, |
| "transformers_version": "5.7.0", |
| "use_cache": true, |
| "use_gla_first": false, |
| "use_l2warp": false, |
| "use_looped_injection": false, |
| "use_short_conv": true, |
| "vocab_size": 248320 |
| } |
|
|