| { |
| "_external_rope_config_kwargs": {}, |
| "add_cross_attention": false, |
| "architectures": [ |
| "Qwen3_5ForConditionalGeneration" |
| ], |
| "attn_mechanism": "ragged_page_attention_v3", |
| "backend": null, |
| "bits": null, |
| "blocksize_b": 1, |
| "blocksize_k": 128, |
| "blocksize_q": 128, |
| "bos_token_id": null, |
| "cross_attention_hidden_size": null, |
| "decode_attn_mechanism": null, |
| "decoder_start_token_id": null, |
| "easy_method": "train", |
| "eos_token_id": null, |
| "fcm_max_ratio": 0.0, |
| "fcm_min_ratio": 0.0, |
| "flash_attention_backward_pass_impl": "triton", |
| "freq_max_position_embeddings": 40960, |
| "fsdp_is_ep_bound": true, |
| "gradient_checkpointing": "", |
| "gradient_checkpointing_targets": null, |
| "hardware_abstraction": true, |
| "image_token_id": 248056, |
| "is_decoder": false, |
| "kv_cache_quantization_config": null, |
| "kv_cache_sharding_sequence_axis_name": "sp", |
| "mask_max_position_embeddings": 40960, |
| "max_position_embeddings": null, |
| "model_type": "qwen3_5", |
| "moe_force_xla_gmm": false, |
| "moe_method": "fused_moe", |
| "moe_tiling_size_batch": 4, |
| "moe_tiling_size_dim": 128, |
| "moe_tiling_size_seqlen": 128, |
| "operation_configs": null, |
| "pad_token_id": null, |
| "pallas_k_block_size": 128, |
| "pallas_m_block_size": 128, |
| "pallas_n_block_size": 128, |
| "partition_axis": { |
| "attention_dim_axis": null, |
| "attention_kv_dim_axis": null, |
| "batch_axis": [ |
| "fsdp", |
| "dp" |
| ], |
| "bias_head_sequence_axis": null, |
| "bias_key_sequence_axis": null, |
| "data_parallel_axis": "dp", |
| "decode_attention_dim_axis": null, |
| "decode_attention_kv_dim_axis": null, |
| "decode_batch_axis": [ |
| "fsdp", |
| "dp" |
| ], |
| "decode_head_axis": "tp", |
| "decode_key_sequence_axis": "sp", |
| "decode_kv_head_axis": "tp", |
| "decode_query_sequence_axis": null, |
| "expert_axis": "ep", |
| "expert_gate_axis": null, |
| "expert_parallel_axis": "ep", |
| "fully_sharded_data_parallel_axis": "fsdp", |
| "head_axis": "tp", |
| "hidden_state_axis": "tp", |
| "key_sequence_axis": "sp", |
| "kv_head_axis": "tp", |
| "mlp_intermediate_axis": "tp", |
| "query_sequence_axis": "sp", |
| "sequence_axis": "sp", |
| "sequence_parallel_axis": "sp", |
| "tensor_parallel_axis": "tp", |
| "vocab_axis": "tp" |
| }, |
| "platform": null, |
| "precompute_masks": true, |
| "pretraining_tp": 1, |
| "qmm_platform_override": null, |
| "qmm_tpu_path_override": null, |
| "quantization_config": { |
| "bits": 4, |
| "dtype": "affine", |
| "group_size": 128 |
| }, |
| "scan_attention_layers": false, |
| "scan_mlp_chunk_size": 1024, |
| "scan_ring_attention": true, |
| "sep_token_id": null, |
| "sequence_axis_name": "sp", |
| "sharding_axis_dims": [ |
| 1, |
| 4, |
| 1, |
| -1, |
| 1 |
| ], |
| "sharding_axis_names": [ |
| "dp", |
| "fsdp", |
| "ep", |
| "tp", |
| "sp" |
| ], |
| "sharding_dcn_axis_dims": null, |
| "sp_is_ep_bound": true, |
| "text_config": { |
| "_external_rope_config_kwargs": {}, |
| "add_cross_attention": false, |
| "architectures": [ |
| "Qwen3_5ForConditionalGeneration" |
| ], |
| "attention_bias": false, |
| "attention_dropout": 0.0, |
| "attn_mechanism": "ragged_page_attention_v3", |
| "attn_output_gate": true, |
| "backend": null, |
| "bits": null, |
| "blocksize_b": 1, |
| "blocksize_k": 128, |
| "blocksize_q": 128, |
| "bos_token_id": null, |
| "cross_attention_hidden_size": null, |
| "decode_attn_mechanism": null, |
| "decoder_sparse_step": 1, |
| "decoder_start_token_id": null, |
| "dtype": "bfloat16", |
| "easy_method": "train", |
| "eos_token_id": 248044, |
| "fcm_max_ratio": 0.0, |
| "fcm_min_ratio": 0.0, |
| "flash_attention_backward_pass_impl": "triton", |
| "freq_max_position_embeddings": 40960, |
| "fsdp_is_ep_bound": true, |
| "full_attention_interval": 4, |
| "gradient_checkpointing": "", |
| "gradient_checkpointing_targets": null, |
| "hardware_abstraction": true, |
| "head_dim": 256, |
| "hidden_act": "silu", |
| "hidden_size": 5120, |
| "initializer_range": 0.02, |
| "intermediate_size": 17408, |
| "is_decoder": false, |
| "kv_cache_quantization_config": null, |
| "kv_cache_sharding_sequence_axis_name": "sp", |
| "layer_types": [ |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "full_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "full_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "full_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "full_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "full_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "full_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "full_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "full_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "full_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "full_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "full_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "full_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "full_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "full_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "full_attention", |
| "linear_attention", |
| "linear_attention", |
| "linear_attention", |
| "full_attention" |
| ], |
| "linear_attention_separate_proj": true, |
| "linear_conv_kernel_dim": 4, |
| "linear_key_head_dim": 128, |
| "linear_num_key_heads": 16, |
| "linear_num_value_heads": 48, |
| "linear_value_head_dim": 128, |
| "mamba_ssm_dtype": "float32", |
| "mask_max_position_embeddings": 40960, |
| "max_position_embeddings": 262144, |
| "mlp_only_layers": [ |
| 0, |
| 1, |
| 2, |
| 3, |
| 4, |
| 5, |
| 6, |
| 7, |
| 8, |
| 9, |
| 10, |
| 11, |
| 12, |
| 13, |
| 14, |
| 15, |
| 16, |
| 17, |
| 18, |
| 19, |
| 20, |
| 21, |
| 22, |
| 23, |
| 24, |
| 25, |
| 26, |
| 27, |
| 28, |
| 29, |
| 30, |
| 31, |
| 32, |
| 33, |
| 34, |
| 35, |
| 36, |
| 37, |
| 38, |
| 39, |
| 40, |
| 41, |
| 42, |
| 43, |
| 44, |
| 45, |
| 46, |
| 47, |
| 48, |
| 49, |
| 50, |
| 51, |
| 52, |
| 53, |
| 54, |
| 55, |
| 56, |
| 57, |
| 58, |
| 59, |
| 60, |
| 61, |
| 62, |
| 63 |
| ], |
| "model_type": "qwen3_5_text", |
| "moe_force_xla_gmm": false, |
| "moe_intermediate_size": 512, |
| "moe_method": "fused_moe", |
| "moe_tiling_size_batch": 4, |
| "moe_tiling_size_dim": 128, |
| "moe_tiling_size_seqlen": 128, |
| "mtp_num_hidden_layers": 1, |
| "mtp_use_dedicated_embeddings": false, |
| "norm_topk_prob": true, |
| "num_attention_heads": 24, |
| "num_experts": 256, |
| "num_experts_per_tok": 8, |
| "num_hidden_layers": 64, |
| "num_key_value_heads": 4, |
| "num_local_experts": 256, |
| "operation_configs": null, |
| "output_router_logits": false, |
| "pad_token_id": null, |
| "pallas_k_block_size": 128, |
| "pallas_m_block_size": 128, |
| "pallas_n_block_size": 128, |
| "partial_rotary_factor": 0.25, |
| "partition_axis": { |
| "attention_dim_axis": null, |
| "attention_kv_dim_axis": null, |
| "batch_axis": [ |
| "fsdp", |
| "dp" |
| ], |
| "bias_head_sequence_axis": null, |
| "bias_key_sequence_axis": null, |
| "data_parallel_axis": "dp", |
| "decode_attention_dim_axis": null, |
| "decode_attention_kv_dim_axis": null, |
| "decode_batch_axis": [ |
| "fsdp", |
| "dp" |
| ], |
| "decode_head_axis": "tp", |
| "decode_key_sequence_axis": "sp", |
| "decode_kv_head_axis": "tp", |
| "decode_query_sequence_axis": null, |
| "expert_axis": "ep", |
| "expert_gate_axis": null, |
| "expert_parallel_axis": "ep", |
| "fully_sharded_data_parallel_axis": "fsdp", |
| "head_axis": "tp", |
| "hidden_state_axis": "tp", |
| "key_sequence_axis": "sp", |
| "kv_head_axis": "tp", |
| "mlp_intermediate_axis": "tp", |
| "query_sequence_axis": "sp", |
| "sequence_axis": "sp", |
| "sequence_parallel_axis": "sp", |
| "tensor_parallel_axis": "tp", |
| "vocab_axis": "tp" |
| }, |
| "platform": null, |
| "precompute_masks": true, |
| "pretraining_tp": 1, |
| "qmm_platform_override": null, |
| "qmm_tpu_path_override": null, |
| "quantization_config": { |
| "bits": 4, |
| "dtype": "affine", |
| "group_size": 128 |
| }, |
| "rms_norm_eps": 1e-06, |
| "rope_parameters": { |
| "mrope_interleaved": true, |
| "mrope_section": [ |
| 11, |
| 11, |
| 10 |
| ], |
| "partial_rotary_factor": 0.25, |
| "rope_theta": 10000000, |
| "rope_type": "default", |
| "type": "default" |
| }, |
| "rope_theta": 10000000, |
| "router_aux_loss_coef": 0.001, |
| "scan_attention_layers": false, |
| "scan_mlp_chunk_size": 1024, |
| "scan_ring_attention": true, |
| "sep_token_id": null, |
| "sequence_axis_name": "sp", |
| "sharding_axis_dims": [ |
| 1, |
| 4, |
| 1, |
| -1, |
| 1 |
| ], |
| "sharding_axis_names": [ |
| "dp", |
| "fsdp", |
| "ep", |
| "tp", |
| "sp" |
| ], |
| "sharding_dcn_axis_dims": null, |
| "shared_expert_intermediate_size": 512, |
| "sp_is_ep_bound": true, |
| "tie_encoder_decoder": false, |
| "tie_word_embeddings": false, |
| "use_cache": true, |
| "use_expert_tensor_mode": false, |
| "use_qmm_best_config": false, |
| "use_ring_of_experts": false, |
| "use_scan_mlp": false, |
| "use_sharded_kv_caching": false, |
| "use_sharding_constraint": false, |
| "vocab_size": 248320 |
| }, |
| "tie_encoder_decoder": false, |
| "tie_word_embeddings": false, |
| "transformers_version": "5.2.0", |
| "use_expert_tensor_mode": false, |
| "use_qmm_best_config": false, |
| "use_ring_of_experts": false, |
| "use_scan_mlp": false, |
| "use_sharded_kv_caching": false, |
| "use_sharding_constraint": false, |
| "video_token_id": 248057, |
| "vision_config": { |
| "_external_rope_config_kwargs": {}, |
| "add_cross_attention": false, |
| "architectures": [ |
| "Qwen3_5ForConditionalGeneration" |
| ], |
| "attn_mechanism": "ragged_page_attention_v3", |
| "backend": null, |
| "bits": null, |
| "blocksize_b": 1, |
| "blocksize_k": 128, |
| "blocksize_q": 128, |
| "bos_token_id": null, |
| "cross_attention_hidden_size": null, |
| "decode_attn_mechanism": null, |
| "decoder_start_token_id": null, |
| "deepstack_visual_indexes": [], |
| "depth": 27, |
| "easy_method": "train", |
| "embed_dim": 1152, |
| "eos_token_id": null, |
| "fcm_max_ratio": 0.0, |
| "fcm_min_ratio": 0.0, |
| "flash_attention_backward_pass_impl": "triton", |
| "freq_max_position_embeddings": 40960, |
| "fsdp_is_ep_bound": true, |
| "gradient_checkpointing": "", |
| "gradient_checkpointing_targets": null, |
| "hardware_abstraction": true, |
| "hidden_act": "gelu_pytorch_tanh", |
| "hidden_size": 1152, |
| "in_channels": 3, |
| "initializer_range": 0.02, |
| "intermediate_size": 4304, |
| "is_decoder": false, |
| "kv_cache_quantization_config": null, |
| "kv_cache_sharding_sequence_axis_name": "sp", |
| "mask_max_position_embeddings": 40960, |
| "max_position_embeddings": null, |
| "model_type": "qwen3_5", |
| "moe_force_xla_gmm": false, |
| "moe_method": "fused_moe", |
| "moe_tiling_size_batch": 4, |
| "moe_tiling_size_dim": 128, |
| "moe_tiling_size_seqlen": 128, |
| "num_attention_heads": 16, |
| "num_heads": 16, |
| "num_position_embeddings": 2304, |
| "operation_configs": null, |
| "out_hidden_size": 5120, |
| "pad_token_id": null, |
| "pallas_k_block_size": 128, |
| "pallas_m_block_size": 128, |
| "pallas_n_block_size": 128, |
| "partition_axis": { |
| "attention_dim_axis": null, |
| "attention_kv_dim_axis": null, |
| "batch_axis": [ |
| "fsdp", |
| "dp" |
| ], |
| "bias_head_sequence_axis": null, |
| "bias_key_sequence_axis": null, |
| "data_parallel_axis": "dp", |
| "decode_attention_dim_axis": null, |
| "decode_attention_kv_dim_axis": null, |
| "decode_batch_axis": [ |
| "fsdp", |
| "dp" |
| ], |
| "decode_head_axis": "tp", |
| "decode_key_sequence_axis": "sp", |
| "decode_kv_head_axis": "tp", |
| "decode_query_sequence_axis": null, |
| "expert_axis": "ep", |
| "expert_gate_axis": null, |
| "expert_parallel_axis": "ep", |
| "fully_sharded_data_parallel_axis": "fsdp", |
| "head_axis": "tp", |
| "hidden_state_axis": "tp", |
| "key_sequence_axis": "sp", |
| "kv_head_axis": "tp", |
| "mlp_intermediate_axis": "tp", |
| "query_sequence_axis": "sp", |
| "sequence_axis": "sp", |
| "sequence_parallel_axis": "sp", |
| "tensor_parallel_axis": "tp", |
| "vocab_axis": "tp" |
| }, |
| "patch_size": 16, |
| "platform": null, |
| "precompute_masks": true, |
| "pretraining_tp": 1, |
| "qmm_platform_override": null, |
| "qmm_tpu_path_override": null, |
| "quantization_config": { |
| "bits": 4, |
| "dtype": "affine", |
| "group_size": 128 |
| }, |
| "scan_attention_layers": false, |
| "scan_mlp_chunk_size": 1024, |
| "scan_ring_attention": true, |
| "sep_token_id": null, |
| "sequence_axis_name": "sp", |
| "sharding_axis_dims": [ |
| 1, |
| 4, |
| 1, |
| -1, |
| 1 |
| ], |
| "sharding_axis_names": [ |
| "dp", |
| "fsdp", |
| "ep", |
| "tp", |
| "sp" |
| ], |
| "sharding_dcn_axis_dims": null, |
| "sp_is_ep_bound": true, |
| "spatial_merge_size": 2, |
| "temporal_patch_size": 2, |
| "tie_encoder_decoder": false, |
| "tie_word_embeddings": true, |
| "tokens_per_second": 2.0, |
| "use_expert_tensor_mode": false, |
| "use_qmm_best_config": false, |
| "use_ring_of_experts": false, |
| "use_scan_mlp": false, |
| "use_sharded_kv_caching": false, |
| "use_sharding_constraint": false |
| }, |
| "vision_end_token_id": 248054, |
| "vision_start_token_id": 248053 |
| } |
|
|