k-l-lambda commited on
Commit
c6d1483
·
1 Parent(s): b627654

Add Eagle3 v2 epoch 7 checkpoint (trained on K2.5 API log data)

Browse files

- model.safetensors: Eagle3 draft model weights (3.9 GB, BF16)
- config.json/config.py: Eagle3SpeculatorConfig
- d2t.npy/t2d.npy: draft-to-target and target-to-draft vocab mapping (32K draft vocab)
- Training: 10 epochs, ttt_steps=3, val step-0 acc 71.4% (best epoch 7)
- Architecture: 1 DeepSeek-V3 decoder layer + fc + lm_head, trained with speculators framework

Files changed (5) hide show
  1. config.json +76 -0
  2. config.py +84 -0
  3. d2t.npy +3 -0
  4. model.safetensors +3 -0
  5. t2d.npy +3 -0
config.json ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Eagle3DraftModel"
4
+ ],
5
+ "auto_map": {
6
+ "": "config.Eagle3SpeculatorConfig"
7
+ },
8
+ "base_model_ep_plan": null,
9
+ "draft_vocab_size": 32000,
10
+ "dtype": "float32",
11
+ "eagle_aux_hidden_state_layer_ids": null,
12
+ "embed_requires_grad": false,
13
+ "has_no_defaults_at_init": false,
14
+ "norm_before_residual": true,
15
+ "speculators_config": {
16
+ "algorithm": "eagle3",
17
+ "default_proposal_method": "greedy",
18
+ "proposal_methods": [
19
+ {
20
+ "accept_tolerance": 0.0,
21
+ "proposal_type": "greedy",
22
+ "speculative_tokens": 3,
23
+ "verifier_accept_k": 1
24
+ }
25
+ ],
26
+ "verifier": {
27
+ "architectures": [],
28
+ "name_or_path": "/data/.cache_claude/huggingface/hub/models--moonshotai--Kimi-K2.5/snapshots/54383e83fa343a1331754112fb9e3410c55efa2f"
29
+ }
30
+ },
31
+ "speculators_model_type": "eagle3",
32
+ "speculators_version": "0.5.0.dev1",
33
+ "target_hidden_size": null,
34
+ "transformer_layer_config": {
35
+ "attention_bias": false,
36
+ "attention_dropout": 0.0,
37
+ "aux_loss_alpha": 0.001,
38
+ "ep_size": 1,
39
+ "first_k_dense_replace": 3,
40
+ "head_dim": null,
41
+ "hidden_act": "silu",
42
+ "hidden_size": 7168,
43
+ "initializer_range": 0.02,
44
+ "intermediate_size": 18432,
45
+ "kv_lora_rank": 512,
46
+ "max_position_embeddings": 262144,
47
+ "model_type": "deepseek_v3",
48
+ "moe_intermediate_size": 2048,
49
+ "moe_layer_freq": 1,
50
+ "n_group": 8,
51
+ "n_routed_experts": 256,
52
+ "n_shared_experts": 1,
53
+ "norm_topk_prob": true,
54
+ "num_attention_heads": 64,
55
+ "num_experts_per_tok": 8,
56
+ "num_hidden_layers": 1,
57
+ "num_key_value_heads": 64,
58
+ "num_nextn_predict_layers": 1,
59
+ "pretraining_tp": 1,
60
+ "q_lora_rank": 1536,
61
+ "qk_nope_head_dim": 128,
62
+ "qk_rope_head_dim": 64,
63
+ "rms_norm_eps": 1e-05,
64
+ "rope_scaling": null,
65
+ "rope_theta": 10000.0,
66
+ "routed_scaling_factor": 2.5,
67
+ "scoring_func": "sigmoid",
68
+ "seq_aux": true,
69
+ "topk_group": 4,
70
+ "topk_method": "noaux_tc",
71
+ "use_cache": true,
72
+ "v_head_dim": 128,
73
+ "vocab_size": 163840
74
+ },
75
+ "transformers_version": "4.57.6"
76
+ }
config.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Literal
2
+
3
+ from pydantic import Field, field_serializer, field_validator
4
+ from transformers import AutoConfig, PretrainedConfig
5
+ from transformers.models.llama.configuration_llama import LlamaConfig
6
+
7
+ from speculators import SpeculatorModelConfig
8
+
9
+ __all__ = [
10
+ "Eagle3SpeculatorConfig",
11
+ ]
12
+
13
+
14
+ @SpeculatorModelConfig.register("eagle3")
15
+ class Eagle3SpeculatorConfig(SpeculatorModelConfig):
16
+ """
17
+ Configuration for EAGLE-3 speculator with vocabulary mapping.
18
+
19
+ EAGLE-3 features vocabulary mapping between draft (32K) and target (128K)
20
+ vocabularies, enabling cross-tokenizer speculation.
21
+
22
+ :param transformer_layer_config: Configuration for the transformer decoder layer
23
+ :param draft_vocab_size: Size of draft model vocabulary for speculation
24
+ :param norm_before_residual: Apply hidden_norm before storing residual
25
+ """
26
+
27
+ speculators_model_type: Literal["eagle3"] = "eagle3"
28
+ architectures: list[str] = Field(
29
+ default_factory=lambda: ["Eagle3Speculator"],
30
+ description="Model architectures that can load these weights",
31
+ )
32
+
33
+ transformer_layer_config: PretrainedConfig = Field(
34
+ default_factory=LlamaConfig,
35
+ description="Configuration for the transformer decoder layer",
36
+ )
37
+
38
+ draft_vocab_size: int = Field(
39
+ default=32000,
40
+ description="Size of draft model vocabulary for speculation",
41
+ )
42
+
43
+ norm_before_residual: bool = Field(
44
+ default=False,
45
+ description="Apply hidden_norm before storing residual",
46
+ )
47
+
48
+ target_hidden_size: int | None = Field(
49
+ default=None,
50
+ description="Hidden size of the target model (if different from draft model)",
51
+ )
52
+
53
+ eagle_aux_hidden_state_layer_ids: list[int] | None = Field(
54
+ default=None,
55
+ description="Layer IDs of the Eagle auxiliary hidden state layers",
56
+ )
57
+
58
+ embed_requires_grad: bool = Field(
59
+ default=False,
60
+ description="Whether embedding layer weights require gradients during training",
61
+ )
62
+
63
+ @property
64
+ def target_vocab_size(self) -> int:
65
+ """Get target vocabulary size from transformer config."""
66
+ return self.transformer_layer_config.vocab_size
67
+
68
+ @field_serializer("transformer_layer_config")
69
+ def serialize_transformer_config(self, value: PretrainedConfig) -> dict:
70
+ """Serialize transformer config to dict."""
71
+ return value.to_diff_dict()
72
+
73
+ @field_validator("transformer_layer_config", mode="before")
74
+ @classmethod
75
+ def validate_transformer_config(cls, value: Any) -> PretrainedConfig:
76
+ """Validate and convert transformer config."""
77
+ if isinstance(value, dict):
78
+ config_class: type[PretrainedConfig] = LlamaConfig
79
+ if "model_type" in value:
80
+ config_class = AutoConfig.for_model(
81
+ model_type=value["model_type"]
82
+ ).__class__
83
+ return config_class(**value)
84
+ return value
d2t.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54e8e5f61ab63ccfe4130e66edaba91e768d4cabecfec5b7a00cd7affdb9f934
3
+ size 256128
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5aff905e131bc3d7d831c71a34102464fad3a22286eb63fcd40b27ce2e708b2b
3
+ size 4141572048
t2d.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0245607c6103a5263047b06511156326cb081fcac3e3711f92e7b9dd19da9c8
3
+ size 163968