levossadtchi commited on
Commit
5b0480b
·
verified ·
1 Parent(s): 316e437

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. config.json +8 -1
  2. modeling_qed.py +323 -0
  3. vocab.json +0 -0
config.json CHANGED
@@ -14,5 +14,12 @@
14
  "pad_token_id": 0,
15
  "bos_token_id": 1,
16
  "eos_token_id": 2,
17
- "model_type": "qed"
 
 
 
 
 
 
 
18
  }
 
14
  "pad_token_id": 0,
15
  "bos_token_id": 1,
16
  "eos_token_id": 2,
17
+ "model_type": "qed",
18
+ "architectures": [
19
+ "QEDForCausalLM"
20
+ ],
21
+ "auto_map": {
22
+ "AutoConfig": "modeling_qed.QEDConfig",
23
+ "AutoModelForCausalLM": "modeling_qed.QEDForCausalLM"
24
+ }
25
  }
modeling_qed.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: MIT
2
+ # Remote code for Hugging Face Hub (QED / SLLM causal LM).
3
+ # Single module so transformers dynamic import does not treat configuration_qed as a pip package.
4
+ # Mirrors training-time sllm.model.SLLMForCausalLM weight names for load_state_dict compatibility.
5
+ from __future__ import annotations
6
+
7
+ from typing import Optional, Tuple, Union
8
+
9
+ import torch
10
+ import torch.nn.functional as F
11
+ from torch import nn
12
+
13
+ from transformers import PreTrainedModel, PretrainedConfig
14
+ from transformers.modeling_outputs import CausalLMOutputWithPast
15
+
16
+
17
+ class QEDConfig(PretrainedConfig):
18
+ """Configuration for QED (custom RoPE + SwiGLU decoder-only LM)."""
19
+
20
+ model_type = "qed"
21
+
22
+ def __init__(
23
+ self,
24
+ vocab_size: int = 49_152,
25
+ max_seq_len: int = 8_192,
26
+ d_model: int = 384,
27
+ n_layers: int = 32,
28
+ n_heads: int = 6,
29
+ ffn_hidden_dim: int = 1_024,
30
+ rope_theta: float = 10_000.0,
31
+ rms_norm_eps: float = 1e-5,
32
+ initializer_range: float = 0.02,
33
+ dropout: float = 0.0,
34
+ tie_word_embeddings: bool = True,
35
+ bias: bool = False,
36
+ pad_token_id: int = 0,
37
+ bos_token_id: int = 1,
38
+ eos_token_id: int = 2,
39
+ **kwargs,
40
+ ) -> None:
41
+ self.vocab_size = vocab_size
42
+ self.max_seq_len = max_seq_len
43
+ self.d_model = d_model
44
+ self.n_layers = n_layers
45
+ self.n_heads = n_heads
46
+ self.ffn_hidden_dim = ffn_hidden_dim
47
+ self.rope_theta = rope_theta
48
+ self.rms_norm_eps = rms_norm_eps
49
+ self.initializer_range = initializer_range
50
+ self.dropout = dropout
51
+ self.tie_word_embeddings = tie_word_embeddings
52
+ self.bias = bias
53
+ super().__init__(
54
+ pad_token_id=pad_token_id,
55
+ bos_token_id=bos_token_id,
56
+ eos_token_id=eos_token_id,
57
+ tie_word_embeddings=tie_word_embeddings,
58
+ **kwargs,
59
+ )
60
+
61
+
62
+ class RMSNorm(nn.Module):
63
+ def __init__(self, dim: int, eps: float) -> None:
64
+ super().__init__()
65
+ self.eps = eps
66
+ self.weight = nn.Parameter(torch.ones(dim))
67
+
68
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
69
+ variance = hidden_states.pow(2).mean(dim=-1, keepdim=True)
70
+ hidden_states = hidden_states * torch.rsqrt(variance + self.eps)
71
+ return self.weight * hidden_states
72
+
73
+
74
+ def rotate_half(x: torch.Tensor) -> torch.Tensor:
75
+ x1, x2 = x.chunk(2, dim=-1)
76
+ return torch.cat((-x2, x1), dim=-1)
77
+
78
+
79
+ class RotaryEmbedding(nn.Module):
80
+ def __init__(self, dim: int, max_seq_len: int, theta: float) -> None:
81
+ super().__init__()
82
+ inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2).float() / dim))
83
+ positions = torch.arange(max_seq_len, dtype=torch.float32)
84
+ freqs = torch.outer(positions, inv_freq)
85
+ emb = torch.cat((freqs, freqs), dim=-1)
86
+ self.register_buffer("cos_cached", emb.cos(), persistent=False)
87
+ self.register_buffer("sin_cached", emb.sin(), persistent=False)
88
+
89
+ def forward(self, x: torch.Tensor, position_ids: torch.Tensor) -> torch.Tensor:
90
+ cos = self.cos_cached[position_ids].unsqueeze(1).to(dtype=x.dtype, device=x.device)
91
+ sin = self.sin_cached[position_ids].unsqueeze(1).to(dtype=x.dtype, device=x.device)
92
+ return (x * cos) + (rotate_half(x) * sin)
93
+
94
+
95
+ class CausalSelfAttention(nn.Module):
96
+ def __init__(self, config: QEDConfig) -> None:
97
+ super().__init__()
98
+ if config.d_model % config.n_heads != 0:
99
+ raise ValueError("d_model must be divisible by n_heads.")
100
+ self.n_heads = config.n_heads
101
+ self.head_dim = config.d_model // config.n_heads
102
+ self.scale = self.head_dim**-0.5
103
+
104
+ self.q_proj = nn.Linear(config.d_model, config.d_model, bias=config.bias)
105
+ self.k_proj = nn.Linear(config.d_model, config.d_model, bias=config.bias)
106
+ self.v_proj = nn.Linear(config.d_model, config.d_model, bias=config.bias)
107
+ self.o_proj = nn.Linear(config.d_model, config.d_model, bias=config.bias)
108
+ self.rotary = RotaryEmbedding(self.head_dim, config.max_seq_len, config.rope_theta)
109
+ self.dropout = config.dropout
110
+
111
+ def _shape(self, x: torch.Tensor) -> torch.Tensor:
112
+ batch_size, seq_len, _ = x.shape
113
+ return x.view(batch_size, seq_len, self.n_heads, self.head_dim).transpose(1, 2)
114
+
115
+ def forward(
116
+ self,
117
+ hidden_states: torch.Tensor,
118
+ position_ids: torch.Tensor,
119
+ attention_mask: Optional[torch.Tensor] = None,
120
+ past_key_value: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
121
+ use_cache: bool = False,
122
+ ) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor, torch.Tensor]]]:
123
+ query = self._shape(self.q_proj(hidden_states))
124
+ key = self._shape(self.k_proj(hidden_states))
125
+ value = self._shape(self.v_proj(hidden_states))
126
+
127
+ query = self.rotary(query, position_ids)
128
+ key = self.rotary(key, position_ids)
129
+ if past_key_value is not None:
130
+ past_key, past_value = past_key_value
131
+ key = torch.cat([past_key, key], dim=-2)
132
+ value = torch.cat([past_value, value], dim=-2)
133
+
134
+ next_past_key_value = (key, value) if use_cache else None
135
+
136
+ attn_mask = None
137
+ is_causal = past_key_value is None and attention_mask is None
138
+ if attention_mask is not None:
139
+ key_padding_mask = attention_mask[:, None, None, :].to(dtype=torch.bool, device=query.device)
140
+ if not torch.all(key_padding_mask):
141
+ kv_len = key.size(-2)
142
+ key_padding_mask = key_padding_mask[..., :kv_len]
143
+ query_positions = position_ids[:, None, :, None]
144
+ key_positions = torch.arange(kv_len, device=query.device)[None, None, None, :]
145
+ causal_mask = key_positions <= query_positions
146
+ attn_mask = causal_mask & key_padding_mask
147
+ is_causal = False
148
+ elif past_key_value is not None:
149
+ kv_len = key.size(-2)
150
+ query_positions = position_ids[:, None, :, None]
151
+ key_positions = torch.arange(kv_len, device=query.device)[None, None, None, :]
152
+ attn_mask = key_positions <= query_positions
153
+ is_causal = False
154
+
155
+ attn_output = F.scaled_dot_product_attention(
156
+ query,
157
+ key,
158
+ value,
159
+ attn_mask=attn_mask,
160
+ dropout_p=self.dropout if self.training else 0.0,
161
+ is_causal=is_causal,
162
+ scale=self.scale,
163
+ )
164
+ attn_output = attn_output.transpose(1, 2).contiguous().view(hidden_states.shape)
165
+ return self.o_proj(attn_output), next_past_key_value
166
+
167
+
168
+ class SwiGLU(nn.Module):
169
+ def __init__(self, config: QEDConfig) -> None:
170
+ super().__init__()
171
+ self.gate_proj = nn.Linear(config.d_model, config.ffn_hidden_dim, bias=config.bias)
172
+ self.up_proj = nn.Linear(config.d_model, config.ffn_hidden_dim, bias=config.bias)
173
+ self.down_proj = nn.Linear(config.ffn_hidden_dim, config.d_model, bias=config.bias)
174
+
175
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
176
+ return self.down_proj(F.silu(self.gate_proj(hidden_states)) * self.up_proj(hidden_states))
177
+
178
+
179
+ class TransformerBlock(nn.Module):
180
+ def __init__(self, config: QEDConfig) -> None:
181
+ super().__init__()
182
+ self.input_norm = RMSNorm(config.d_model, config.rms_norm_eps)
183
+ self.attention = CausalSelfAttention(config)
184
+ self.post_attn_norm = RMSNorm(config.d_model, config.rms_norm_eps)
185
+ self.mlp = SwiGLU(config)
186
+
187
+ def forward(
188
+ self,
189
+ hidden_states: torch.Tensor,
190
+ position_ids: torch.Tensor,
191
+ attention_mask: Optional[torch.Tensor] = None,
192
+ past_key_value: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
193
+ use_cache: bool = False,
194
+ ) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor, torch.Tensor]]]:
195
+ attn_output, next_past_key_value = self.attention(
196
+ self.input_norm(hidden_states),
197
+ position_ids=position_ids,
198
+ attention_mask=attention_mask,
199
+ past_key_value=past_key_value,
200
+ use_cache=use_cache,
201
+ )
202
+ hidden_states = hidden_states + attn_output
203
+ hidden_states = hidden_states + self.mlp(self.post_attn_norm(hidden_states))
204
+ return hidden_states, next_past_key_value
205
+
206
+
207
+ class QEDForCausalLM(PreTrainedModel):
208
+ config_class = QEDConfig
209
+ supports_gradient_checkpointing = False
210
+ _no_split_modules = ["TransformerBlock"]
211
+
212
+ def __init__(self, config: QEDConfig) -> None:
213
+ super().__init__(config)
214
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model)
215
+ self.layers = nn.ModuleList([TransformerBlock(config) for _ in range(config.n_layers)])
216
+ self.norm = RMSNorm(config.d_model, config.rms_norm_eps)
217
+ self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=True)
218
+
219
+ if config.tie_word_embeddings:
220
+ self.lm_head.weight = self.embed_tokens.weight
221
+
222
+ self.post_init()
223
+
224
+ def get_input_embeddings(self) -> nn.Module:
225
+ return self.embed_tokens
226
+
227
+ def set_input_embeddings(self, value: nn.Module) -> None:
228
+ self.embed_tokens = value
229
+
230
+ def get_output_embeddings(self) -> nn.Module:
231
+ return self.lm_head
232
+
233
+ def set_output_embeddings(self, new_embeddings: nn.Module) -> None:
234
+ self.lm_head = new_embeddings
235
+
236
+ def _tie_weights(self) -> None:
237
+ if self.config.tie_word_embeddings:
238
+ self.lm_head.weight = self.embed_tokens.weight
239
+
240
+ def forward(
241
+ self,
242
+ input_ids: Optional[torch.LongTensor] = None,
243
+ attention_mask: Optional[torch.Tensor] = None,
244
+ position_ids: Optional[torch.LongTensor] = None,
245
+ past_key_values: Optional[list] = None,
246
+ inputs_embeds: Optional[torch.FloatTensor] = None,
247
+ labels: Optional[torch.LongTensor] = None,
248
+ use_cache: Optional[bool] = None,
249
+ output_attentions: Optional[bool] = None,
250
+ output_hidden_states: Optional[bool] = None,
251
+ return_dict: Optional[bool] = None,
252
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
253
+ _ = output_attentions, output_hidden_states
254
+ return_dict = return_dict if return_dict is not None else True
255
+ use_cache = use_cache if use_cache is not None else False
256
+
257
+ if input_ids is not None and inputs_embeds is not None:
258
+ raise ValueError("You cannot specify both input_ids and inputs_embeds")
259
+ if input_ids is None and inputs_embeds is None:
260
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
261
+
262
+ if inputs_embeds is None:
263
+ batch_size, seq_len = input_ids.shape
264
+ hidden_states = self.embed_tokens(input_ids)
265
+ else:
266
+ hidden_states = inputs_embeds
267
+ batch_size, seq_len = hidden_states.shape[:2]
268
+
269
+ past_length = 0
270
+ if past_key_values is not None and len(past_key_values) > 0:
271
+ past_length = past_key_values[0][0].size(-2)
272
+ total_seq_len = past_length + seq_len
273
+ if total_seq_len > self.config.max_seq_len:
274
+ raise ValueError(
275
+ f"Input length {total_seq_len} exceeds model context window {self.config.max_seq_len}."
276
+ )
277
+
278
+ if position_ids is None:
279
+ position_ids = torch.arange(
280
+ past_length,
281
+ total_seq_len,
282
+ device=hidden_states.device,
283
+ ).unsqueeze(0).expand(batch_size, -1)
284
+
285
+ next_past_key_values: list = []
286
+ for layer_index, layer in enumerate(self.layers):
287
+ layer_past = past_key_values[layer_index] if past_key_values is not None else None
288
+ hidden_states, next_past_key_value = layer(
289
+ hidden_states,
290
+ position_ids=position_ids,
291
+ attention_mask=attention_mask,
292
+ past_key_value=layer_past,
293
+ use_cache=use_cache,
294
+ )
295
+ if use_cache and next_past_key_value is not None:
296
+ next_past_key_values.append(next_past_key_value)
297
+
298
+ hidden_states = self.norm(hidden_states)
299
+ logits = self.lm_head(hidden_states)
300
+
301
+ loss = None
302
+ if labels is not None:
303
+ loss = F.cross_entropy(
304
+ logits.view(-1, logits.size(-1)),
305
+ labels.view(-1),
306
+ ignore_index=-100,
307
+ )
308
+
309
+ if not return_dict:
310
+ out = (logits,)
311
+ if past_key_values is not None or use_cache:
312
+ out = out + (next_past_key_values if use_cache else None,)
313
+ if loss is not None:
314
+ out = (loss,) + out
315
+ return out
316
+
317
+ return CausalLMOutputWithPast(
318
+ loss=loss,
319
+ logits=logits,
320
+ past_key_values=next_past_key_values if use_cache else None,
321
+ hidden_states=None,
322
+ attentions=None,
323
+ )
vocab.json CHANGED
The diff for this file is too large to render. See raw diff