gpt-oss-120b / config.json
erfanzar's picture
Add files using upload-large-folder tool
803cff0 verified
{
"_external_rope_config_kwargs": {},
"architectures": [
"GptOssForCausalLM"
],
"attention_bias": true,
"attention_dropout": 0.0,
"attn_mechanism": "vanilla",
"backend": null,
"bits": null,
"blocksize_b": 1,
"blocksize_k": 128,
"blocksize_q": 128,
"decode_attn_mechanism": null,
"dtype": "bfloat16",
"easy_method": "train",
"eos_token_id": 200002,
"experts_per_token": 4,
"fcm_max_ratio": 0.0,
"fcm_min_ratio": 0.0,
"flash_attention_backward_pass_impl": "triton",
"fsdp_is_ep_bound": true,
"gradient_checkpointing": "",
"gradient_checkpointing_targets": null,
"hardware_abstraction": false,
"head_dim": 64,
"hidden_act": "silu",
"hidden_size": 2880,
"initial_context_length": 4096,
"initializer_range": 0.02,
"intermediate_size": 2880,
"kv_cache_quantization_config": null,
"kv_cache_sharding_sequence_axis_name": "sp",
"layer_types": [
"sliding_attention",
"full_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"full_attention"
],
"max_position_embeddings": 131072,
"mlp_activations_limit": 7.0,
"model_type": "gpt_oss",
"moe_force_xla_gmm": false,
"moe_method": "fused_moe",
"moe_tiling_size_batch": 4,
"moe_tiling_size_dim": 128,
"moe_tiling_size_seqlen": 128,
"num_attention_heads": 64,
"num_experts_per_tok": 4,
"num_hidden_layers": 36,
"num_key_value_heads": 8,
"num_local_experts": 128,
"operation_configs": null,
"output_router_logits": false,
"pad_token_id": 199999,
"pallas_k_block_size": 128,
"pallas_m_block_size": 128,
"pallas_n_block_size": 128,
"partition_axis": {
"attention_dim_axis": null,
"attention_kv_dim_axis": null,
"batch_axis": [
"fsdp",
"dp"
],
"bias_head_sequence_axis": null,
"bias_key_sequence_axis": null,
"data_parallel_axis": "dp",
"decode_attention_dim_axis": null,
"decode_attention_kv_dim_axis": null,
"decode_batch_axis": [
"fsdp",
"dp"
],
"decode_head_axis": "tp",
"decode_key_sequence_axis": "sp",
"decode_kv_head_axis": "tp",
"decode_query_sequence_axis": null,
"expert_axis": "ep",
"expert_gate_axis": null,
"expert_parallel_axis": "ep",
"fully_sharded_data_parallel_axis": "fsdp",
"head_axis": "tp",
"hidden_state_axis": "tp",
"key_sequence_axis": "sp",
"kv_head_axis": "tp",
"mlp_intermediate_axis": "tp",
"query_sequence_axis": "sp",
"sequence_axis": "sp",
"sequence_parallel_axis": "sp",
"tensor_parallel_axis": "tp",
"vocab_axis": "tp"
},
"platform": null,
"precompute_masks": true,
"pretraining_tp": 1,
"quantization_config": null,
"rms_norm_eps": 1e-05,
"rope_scaling": {
"beta_fast": 32.0,
"beta_slow": 1.0,
"factor": 32.0,
"original_max_position_embeddings": 4096,
"rope_type": "yarn",
"truncate": false
},
"rope_theta": 150000,
"router_aux_loss_coef": 0.9,
"scan_attention_layers": false,
"scan_mlp_chunk_size": 1024,
"scan_ring_attention": true,
"sequence_axis_name": "sp",
"sharding_axis_dims": [
1,
-1,
1,
1,
1
],
"sharding_axis_names": [
"dp",
"fsdp",
"ep",
"tp",
"sp"
],
"sharding_dcn_axis_dims": null,
"sliding_window": 128,
"sp_is_ep_bound": true,
"swiglu_limit": 7.0,
"tie_word_embeddings": false,
"transformers_version": "4.57.3",
"use_cache": true,
"use_expert_tensor_mode": false,
"use_ring_of_experts": false,
"use_scan_mlp": false,
"use_sharded_kv_caching": false,
"use_sharding_constraint": false,
"vocab_size": 201088
}