{ "_attn_implementation_autoset": false, "add_cross_attention": false, "architectures": [ "DeepseekV32ForCausalLM" ], "attention_bias": false, "attention_dropout": 0.0, "auto_map": { "AutoConfig": "configuration_deepseek_v32.DeepseekV32Config", "AutoModelForCausalLM": "modeling_deepseek_v32.DeepseekV32ForCausalLM" }, "bos_token_id": 0, "cross_attention_hidden_size": null, "decoder_start_token_id": null, "dtype": "float32", "eos_token_id": 1, "ep_size": 1, "finetuning_task": null, "first_k_dense_replace": 3, "hidden_act": "silu", "hidden_size": 512, "index_head_dim": 32, "index_n_heads": 4, "index_topk": 64, "initializer_range": 0.02, "intermediate_size": 256, "is_decoder": false, "kv_lora_rank": 32, "max_position_embeddings": 163840, "model_type": "deepseek_v32", "moe_intermediate_size": 256, "moe_layer_freq": 1, "n_group": 1, "n_routed_experts": 4, "n_shared_experts": 1, "norm_topk_prob": true, "num_attention_heads": 8, "num_experts_per_tok": 2, "num_hidden_layers": 4, "num_key_value_heads": 8, "num_nextn_predict_layers": 0, "pad_token_id": null, "prefix": null, "pruned_heads": {}, "q_lora_rank": 64, "qk_nope_head_dim": 32, "qk_rope_head_dim": 16, "rms_norm_eps": 1e-06, "rope_parameters": { "beta_fast": 32, "beta_slow": 1, "factor": 40, "mscale": 1.0, "mscale_all_dim": 1.0, "original_max_position_embeddings": 4096, "rope_theta": 10000, "rope_type": "yarn", "type": "yarn" }, "rope_theta": 10000, "routed_scaling_factor": 2.5, "scoring_func": "sigmoid", "sep_token_id": null, "task_specific_params": null, "tf_legacy_loss": false, "tie_encoder_decoder": false, "tie_word_embeddings": false, "tokenizer_class": null, "topk_group": 1, "topk_method": "noaux_tc", "torchscript": false, "transformers_version": "5.3.0", "use_bfloat16": false, "use_cache": true, "v_head_dim": 32, "vocab_size": 129280 }