anthonym21 commited on
Commit
cbbb431
·
verified ·
1 Parent(s): ed041f4

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ tags:
4
+ - moe
5
+ - eve-swarm
6
+ - specialist
7
+ base_model:
8
+ - anthonym21/Eve-2-MoE-IT-272M
9
+ datasets:
10
+ - b-mc2/sql-create-context
11
+ ---
12
+ # Eve-2-MoE-NanoSQL-272M
13
+ Converts natural language to SQL queries using table context.
14
+ ## Training
15
+ - Hardware: NVIDIA H200 SXM
16
+ - Method: Full Fine-Tuning (FFT)
17
+ - Samples: 25,000
18
+ - Time: 4.8 min
config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "DeepSeekMoE"
4
+ ],
5
+ "block_size": 2048,
6
+ "dtype": "bfloat16",
7
+ "expert_intermediate_size": 1408,
8
+ "head_dim": 64,
9
+ "model_type": "eve_moe",
10
+ "n_embd": 512,
11
+ "n_head": 8,
12
+ "n_layer": 12,
13
+ "num_experts": 8,
14
+ "rope_theta": 10000.0,
15
+ "router_aux_loss_coef": 0.01,
16
+ "shared_expert_intermediate_size": 1408,
17
+ "top_k": 2,
18
+ "transformers_version": "5.1.0",
19
+ "use_cache": false,
20
+ "use_checkpointing": false,
21
+ "vocab_size": 50304
22
+ }
configuration_eve.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # configuration_eve.py
2
+ from __future__ import annotations
3
+
4
+ from typing import Any, Optional
5
+ from transformers import PretrainedConfig
6
+
7
+
8
+ class EveConfig(PretrainedConfig):
9
+ model_type = "eve_moe"
10
+ attribute_map = {
11
+ "num_hidden_layers": "n_layer",
12
+ "num_attention_heads": "n_head",
13
+ "hidden_size": "n_embd",
14
+ "max_position_embeddings": "block_size",
15
+ }
16
+
17
+ def __init__(
18
+ self,
19
+ vocab_size: int = 50304,
20
+ n_layer: int = 12,
21
+ n_embd: int = 512,
22
+ n_head: int = 8,
23
+ head_dim: int = 64,
24
+ block_size: int = 2048,
25
+ num_experts: int = 8,
26
+ top_k: int = 2,
27
+ expert_intermediate_size: int = 1408,
28
+ shared_expert_intermediate_size: int = 1408,
29
+ router_aux_loss_coef: float = 0.01,
30
+ use_checkpointing: bool = False,
31
+ rope_theta: float = 10000.0,
32
+ **kwargs: Any,
33
+ ):
34
+ self.vocab_size = vocab_size
35
+ self.n_layer = n_layer
36
+ self.n_embd = n_embd
37
+ self.n_head = n_head
38
+ self.head_dim = head_dim
39
+ self.block_size = block_size
40
+ self.num_experts = num_experts
41
+ self.top_k = top_k
42
+ self.expert_intermediate_size = expert_intermediate_size
43
+ self.shared_expert_intermediate_size = shared_expert_intermediate_size
44
+ self.router_aux_loss_coef = router_aux_loss_coef
45
+ self.use_checkpointing = use_checkpointing
46
+ self.rope_theta = rope_theta
47
+ super().__init__(**kwargs)
48
+
49
+
50
+ __all__ = ["EveConfig"]
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "output_attentions": false,
4
+ "output_hidden_states": false,
5
+ "transformers_version": "5.1.0",
6
+ "use_cache": false
7
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ceaabd9a6ee1af431060e16433d7b689ee8290fee3535843d577c8ce694128d8
3
+ size 543985840
modeling_eve.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modeling_eve.py
2
+ # Self-contained Eve MoE model definition with training-safe loss, PEFT compatibility,
3
+ # and Hugging Face generation support.
4
+ #
5
+ # Key fixes vs. earlier versions:
6
+ # - Correct *shifted* causal LM loss (predict token t+1 from position t).
7
+ # - Returns a proper Transformers ModelOutput (CausalLMOutputWithPast).
8
+ # - Implements get_input_embeddings / get_output_embeddings for PEFT checkpointing.
9
+ # - Supports prompt-masked SFT via ignore_index=-100.
10
+ #
11
+ # Notes:
12
+ # - This model does NOT implement kv-cache; generate() will work but be slower.
13
+ # - Attention masking for padding is not applied (is_causal=True); use right-padding.
14
+
15
+ from __future__ import annotations
16
+
17
+ from dataclasses import dataclass
18
+ from typing import Optional, Tuple, Any, Dict
19
+
20
+ import torch
21
+ import torch.nn as nn
22
+ import torch.nn.functional as F
23
+
24
+ from transformers import PreTrainedModel, PretrainedConfig, GenerationMixin
25
+ from transformers.modeling_outputs import CausalLMOutputWithPast
26
+
27
+
28
+ from configuration_eve import EveConfig
29
+
30
+
31
+ class RMSNorm(nn.Module):
32
+ def __init__(self, dim: int, eps: float = 1e-5):
33
+ super().__init__()
34
+ self.eps = eps
35
+ self.weight = nn.Parameter(torch.ones(dim))
36
+
37
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
38
+ return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) * self.weight
39
+
40
+
41
+ def precompute_rope_freqs(
42
+ head_dim: int,
43
+ max_seq_len: int,
44
+ theta: float = 10000.0,
45
+ device: Optional[torch.device] = None,
46
+ ) -> torch.Tensor:
47
+ """Precompute complex RoPE frequencies as cis values."""
48
+ freqs = 1.0 / (theta ** (torch.arange(0, head_dim, 2, device=device).float() / head_dim))
49
+ t = torch.arange(max_seq_len, device=device).float()
50
+ freqs = torch.outer(t, freqs) # [T, head_dim/2]
51
+ return torch.polar(torch.ones_like(freqs), freqs) # complex64
52
+
53
+
54
+ def apply_rope(x: torch.Tensor, freqs_cis: torch.Tensor) -> torch.Tensor:
55
+ """
56
+ x: [B, H, T, D]
57
+ freqs_cis: [T, D/2] complex
58
+ """
59
+ B, H, T, D = x.shape
60
+ # [B,H,T,D/2] complex
61
+ x_complex = torch.view_as_complex(x.float().reshape(B, H, T, D // 2, 2))
62
+ freqs_cis = freqs_cis[:T].view(1, 1, T, D // 2)
63
+ x_rotated = x_complex * freqs_cis
64
+ return torch.view_as_real(x_rotated).reshape(B, H, T, D).type_as(x)
65
+
66
+
67
+ class MLP(nn.Module):
68
+ def __init__(self, config: EveConfig, intermediate_size: Optional[int] = None):
69
+ super().__init__()
70
+ hidden_dim = intermediate_size or config.expert_intermediate_size
71
+ self.w1 = nn.Linear(config.n_embd, hidden_dim, bias=False)
72
+ self.w2 = nn.Linear(config.n_embd, hidden_dim, bias=False)
73
+ self.c_proj = nn.Linear(hidden_dim, config.n_embd, bias=False)
74
+
75
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
76
+ return self.c_proj(F.silu(self.w1(x)) * self.w2(x))
77
+
78
+
79
+ class SharedMoE(nn.Module):
80
+ """
81
+ Simple top-k MoE:
82
+ - One shared expert always applied
83
+ - N routed experts mixed by router weights
84
+ - Aux loss encourages balanced expert usage (simple squared-mean heuristic)
85
+ """
86
+
87
+ def __init__(self, config: EveConfig):
88
+ super().__init__()
89
+ self.config = config
90
+ self.top_k = config.top_k
91
+ self.shared_expert = MLP(config, config.shared_expert_intermediate_size)
92
+ self.experts = nn.ModuleList([MLP(config) for _ in range(config.num_experts)])
93
+ self.router = nn.Linear(config.n_embd, config.num_experts, bias=False)
94
+
95
+ def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
96
+ B, T, C = x.shape
97
+ if self.top_k < 1 or self.top_k > self.config.num_experts:
98
+ raise ValueError(f"Invalid MoE top_k={self.top_k}; must be in [1, {self.config.num_experts}]")
99
+
100
+ shared_out = self.shared_expert(x)
101
+
102
+ logits = self.router(x) # [B,T,E]
103
+ probs = F.softmax(logits, dim=-1) # [B,T,E]
104
+ top_k_weights, top_k_indices = torch.topk(probs, self.top_k, dim=-1) # [B,T,K]
105
+ top_k_weights = top_k_weights / top_k_weights.sum(dim=-1, keepdim=True)
106
+
107
+ # Aux loss: encourage balanced usage across experts
108
+ flat_probs = probs.view(-1, self.config.num_experts) # [B*T,E]
109
+ expert_usage = flat_probs.mean(dim=0) # [E]
110
+ aux_loss = torch.sum(expert_usage * expert_usage) * self.config.num_experts
111
+
112
+ routed_out = torch.zeros_like(x)
113
+ flat_x = x.view(-1, C) # [B*T,C]
114
+ flat_indices = top_k_indices.view(-1, self.top_k) # [B*T,K]
115
+ flat_weights = top_k_weights.view(-1, self.top_k) # [B*T,K]
116
+
117
+ # NOTE: This routing loop is simple but not optimal.
118
+ for i, expert in enumerate(self.experts):
119
+ mask = flat_indices == i # [B*T,K]
120
+ batch_idx, rank_idx = torch.where(mask)
121
+ if batch_idx.numel() > 0:
122
+ expert_input = flat_x[batch_idx]
123
+ expert_output = expert(expert_input)
124
+ weight = flat_weights[batch_idx, rank_idx].unsqueeze(-1)
125
+ routed_out.view(-1, C).index_add_(0, batch_idx, expert_output * weight)
126
+
127
+ return shared_out + routed_out, aux_loss
128
+
129
+
130
+ class CausalSelfAttention(nn.Module):
131
+ def __init__(self, config: EveConfig):
132
+ super().__init__()
133
+ self.n_head = config.n_head
134
+ self.head_dim = config.head_dim
135
+ self.n_embd = config.n_embd
136
+
137
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=False)
138
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=False)
139
+
140
+ def forward(self, x: torch.Tensor, freqs_cis: torch.Tensor) -> torch.Tensor:
141
+ B, T, C = x.shape
142
+
143
+ qkv = self.c_attn(x)
144
+ q, k, v = qkv.split(self.n_embd, dim=2)
145
+
146
+ q = q.view(B, T, self.n_head, self.head_dim).transpose(1, 2) # [B,H,T,D]
147
+ k = k.view(B, T, self.n_head, self.head_dim).transpose(1, 2)
148
+ v = v.view(B, T, self.n_head, self.head_dim).transpose(1, 2)
149
+
150
+ q = apply_rope(q, freqs_cis)
151
+ k = apply_rope(k, freqs_cis)
152
+
153
+ y = F.scaled_dot_product_attention(q, k, v, is_causal=True)
154
+ y = y.transpose(1, 2).contiguous().view(B, T, C)
155
+ return self.c_proj(y)
156
+
157
+
158
+ class Block(nn.Module):
159
+ def __init__(self, config: EveConfig):
160
+ super().__init__()
161
+ self.ln_1 = RMSNorm(config.n_embd)
162
+ self.ln_2 = RMSNorm(config.n_embd)
163
+ self.attn = CausalSelfAttention(config)
164
+ self.mlp = SharedMoE(config)
165
+
166
+ def forward(self, x: torch.Tensor, freqs_cis: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
167
+ x = x + self.attn(self.ln_1(x), freqs_cis)
168
+ mlp_out, aux_loss = self.mlp(self.ln_2(x))
169
+ x = x + mlp_out
170
+ return x, aux_loss
171
+
172
+
173
+ class DeepSeekMoE(PreTrainedModel, GenerationMixin):
174
+ config_class = EveConfig
175
+ _tied_weights_keys = {"lm_head.weight": "transformer.wte.weight"}
176
+
177
+ # _tied_weights_keys = ["lm_head.weight"] # <--- Removed to avoid conflict with PreTrainedModel internals
178
+
179
+ def __init__(self, config: EveConfig):
180
+ super().__init__(config)
181
+ self.config = config
182
+
183
+ self.transformer = nn.ModuleDict(
184
+ dict(
185
+ wte=nn.Embedding(config.vocab_size, config.n_embd),
186
+ h=nn.ModuleList([Block(config) for _ in range(config.n_layer)]),
187
+ ln_f=RMSNorm(config.n_embd),
188
+ )
189
+ )
190
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
191
+
192
+ # Tie weights (Embedding and LM head share the same base parameter)
193
+ self.transformer.wte.weight = self.lm_head.weight
194
+
195
+ freqs_cis = precompute_rope_freqs(config.head_dim, config.block_size, config.rope_theta)
196
+ self.register_buffer("freqs_cis", freqs_cis, persistent=False)
197
+
198
+ # Initialize weights and apply final processing
199
+ self.post_init()
200
+
201
+ # Harden generation_config to avoid invalid configs blocking save_pretrained()
202
+ if hasattr(self, "generation_config") and self.generation_config is not None:
203
+ g = self.generation_config
204
+ # If not sampling, sampling-only knobs must be neutral.
205
+ if not getattr(g, "do_sample", False):
206
+ if getattr(g, "top_k", 0):
207
+ g.top_k = None
208
+ if getattr(g, "top_p", 1.0) != 1.0:
209
+ g.top_p = None
210
+ if getattr(g, "temperature", 1.0) != 1.0:
211
+ g.temperature = None
212
+
213
+ # --- PEFT / HF compatibility hooks ---
214
+ def get_input_embeddings(self) -> nn.Module:
215
+ return self.transformer.wte
216
+
217
+ def set_input_embeddings(self, value: nn.Module) -> None:
218
+ self.transformer.wte = value
219
+
220
+ def get_output_embeddings(self) -> nn.Module:
221
+ return self.lm_head
222
+
223
+ def set_output_embeddings(self, value: nn.Module) -> None:
224
+ self.lm_head = value
225
+
226
+ # --- Forward ---
227
+ def forward(
228
+ self,
229
+ input_ids: Optional[torch.LongTensor] = None,
230
+ idx: Optional[torch.LongTensor] = None,
231
+ attention_mask: Optional[torch.Tensor] = None, # accept + ignore
232
+ labels: Optional[torch.LongTensor] = None,
233
+ targets: Optional[torch.LongTensor] = None,
234
+ **kwargs: Any,
235
+ ) -> CausalLMOutputWithPast:
236
+ """
237
+ If labels/targets are provided, computes *shifted* causal LM loss:
238
+ loss = CE(logits[:, :-1], labels[:, 1:])
239
+ """
240
+ if idx is None:
241
+ if input_ids is None:
242
+ raise ValueError("Must provide input_ids or idx.")
243
+ idx = input_ids
244
+ if targets is None:
245
+ targets = labels
246
+
247
+ B, T = idx.shape
248
+ x = self.transformer.wte(idx)
249
+
250
+ total_aux_loss: Optional[torch.Tensor] = None
251
+ freqs_cis = self.freqs_cis.to(x.device)
252
+
253
+ for block in self.transformer.h:
254
+ x, aux_loss = block(x, freqs_cis[:T])
255
+ total_aux_loss = aux_loss if total_aux_loss is None else (total_aux_loss + aux_loss)
256
+
257
+ x = self.transformer.ln_f(x)
258
+ logits = self.lm_head(x) # [B,T,V]
259
+
260
+ loss = None
261
+ if targets is not None:
262
+ # Shift for causal LM
263
+ if T < 2:
264
+ # Nothing to predict; return aux-only if desired
265
+ shift_logits = logits[:, :0, :]
266
+ shift_labels = targets[:, :0]
267
+ else:
268
+ shift_logits = logits[:, :-1, :].contiguous()
269
+ shift_labels = targets[:, 1:].contiguous()
270
+
271
+ loss = F.cross_entropy(
272
+ shift_logits.view(-1, shift_logits.size(-1)).to(torch.float32),
273
+ shift_labels.view(-1),
274
+ ignore_index=-100,
275
+ )
276
+
277
+
278
+
279
+ if total_aux_loss is not None and self.config.router_aux_loss_coef:
280
+ loss = loss + (self.config.router_aux_loss_coef * total_aux_loss)
281
+
282
+ return CausalLMOutputWithPast(
283
+ loss=loss,
284
+ logits=logits,
285
+ past_key_values=None,
286
+ )
287
+
288
+ # --- Generation ---
289
+ def prepare_inputs_for_generation(self, input_ids: torch.LongTensor, **kwargs: Any) -> Dict[str, Any]:
290
+ # No kv-cache support; always feed full sequence.
291
+ out = {"input_ids": input_ids}
292
+ # HF generate() may pass attention_mask; accept it even if we don't apply it.
293
+ if "attention_mask" in kwargs and kwargs["attention_mask"] is not None:
294
+ out["attention_mask"] = kwargs["attention_mask"]
295
+ return out
296
+
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "backend": "tokenizers",
4
+ "bos_token": "<|endoftext|>",
5
+ "eos_token": "<|endoftext|>",
6
+ "errors": "replace",
7
+ "is_local": false,
8
+ "model_max_length": 1024,
9
+ "pad_token": "<|endoftext|>",
10
+ "tokenizer_class": "GPT2Tokenizer",
11
+ "unk_token": "<|endoftext|>"
12
+ }