RelaxingSnorlax's picture
Upload folder using huggingface_hub
f94877f verified
{
"architectures": [
"Eagle3Speculator"
],
"speculators_model_type": "eagle3",
"speculators_version": "0.1.0.dev42",
"draft_vocab_size": 32000,
"norm_before_residual": false,
"target_hidden_size": null,
"eagle_aux_hidden_state_layer_ids": [
1,
45,
89
],
"transformer_layer_config": {
"model_type": "llama",
"vocab_size": 151936,
"hidden_size": 4096,
"intermediate_size": 12288,
"num_hidden_layers": 1,
"num_attention_heads": 64,
"num_key_value_heads": 4,
"head_dim": 128,
"hidden_act": "silu",
"max_position_embeddings": 262144,
"initializer_range": 0.02,
"rms_norm_eps": 1e-05,
"pretraining_tp": 1,
"use_cache": true,
"rope_theta": 500000.0,
"rope_scaling": {
"factor": 8.0,
"high_freq_factor": 4.0,
"low_freq_factor": 1.0,
"original_max_position_embeddings": 8192,
"rope_type": "llama3"
},
"attention_bias": false,
"attention_dropout": 0.0,
"mlp_bias": false,
"tie_word_embeddings": false
},
"speculators_config": {
"algorithm": "eagle3",
"default_proposal_method": "greedy",
"proposal_methods": [
{
"proposal_type": "greedy",
"speculative_tokens": 3,
"accept_tolerance": 0.0
}
],
"verifier": {
"name_or_path": "Qwen/Qwen3-235B-A22B-Instruct-2507-FP8",
"architectures": [
"Qwen3MoeForCausalLM"
]
}
},
"torch_dtype": "bfloat16",
"_comment": "Eagle3 head based on Llama3 architecture targeting Qwen3 verifier",
"_conversion_notes": {
"source": "nm-testing/Mockup-qwen235-eagle3-fp16",
"architecture_notes": "Eagle3 head uses Llama3 rope_type, targets Qwen3 verifier",
"vocabulary_notes": "Draft vocab size 32000, target vocab size 151936",
"auxiliary_layers": "Uses hidden states from verifier layers 3, 47, 91",
"implementation_note": "May require Eagle3Speculator extensions for aux hidden states"
}
}