PhysiQuanty commited on
Commit
4c9ccfe
·
0 Parent(s):

Duplicate from PhysiQuanty/Patenty-Test2-Radix-65536

Browse files
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ # BinaryLLM (HF export)
4
+
5
+ Tokenizer-free / base-N model export.
6
+
7
+ ## Load
8
+ ```python
9
+ from transformers import AutoModelForCausalLM
10
+ m = AutoModelForCausalLM.from_pretrained("./hf_binaryllm_repo", trust_remote_code=True)
SAVE_tokenizer_config_SAVE.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "65536": {
4
+ "content": "<BOS>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "65537": {
12
+ "content": "<UNK>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ }
19
+ },
20
+ "bos_token": "<BOS>",
21
+ "clean_up_tokenization_spaces": false,
22
+ "eos_token": "<EOS>",
23
+ "extra_special_tokens": {},
24
+ "model_max_length": 1000000000000000019884624838656,
25
+ "pad_token": "<EOS>",
26
+ "tokenizer_class": "tokenization_binaryllm.BinaryLLMTokenizer",
27
+ "unk_token": "<UNK>",
28
+ "auto_map": {
29
+ "AutoTokenizer": "tokenization_binaryllm.BinaryLLMTokenizer"
30
+ }
31
+ }
__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .configuration_binaryllm import BinaryLLMConfig
2
+ from .modeling_binaryllm import BinaryLLMForCausalLM
3
+ from .tokenization_binaryllm import BinaryLLMTokenizer
binaryllm_vocab.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_vocab_size": 65536,
3
+ "vocab_size": 65538,
4
+ "bos_token": "<BOS>",
5
+ "bos_token_id": 65536,
6
+ "eos_token": "<EOS>",
7
+ "eos_token_id": 65537,
8
+ "unk_token": "<EOS>",
9
+ "unk_token_id": 65537,
10
+ "pad_token": "<EOS>",
11
+ "pad_token_id": 65537
12
+ }
config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "binaryllm",
3
+ "architectures": ["BinaryLLMForCausalLM"],
4
+ "auto_map": {
5
+ "AutoConfig": "configuration_binaryllm.BinaryLLMConfig",
6
+ "AutoModelForCausalLM": "modeling_binaryllm.BinaryLLMForCausalLM",
7
+ "AutoTokenizer": "tokenization_binaryllm.BinaryLLMTokenizer"
8
+ },
9
+ "vocab_size": 65538,
10
+ "bos_token_id": 65536,
11
+ "eos_token_id": 65537,
12
+ "pad_token_id": 65537,
13
+ "hidden_size": 512,
14
+ "num_hidden_layers": 4,
15
+ "num_attention_heads": 4,
16
+ "intermediate_size": 2048,
17
+ "max_position_embeddings": 2048,
18
+ "dropout": 0.1,
19
+ "activation": "gelu",
20
+ "attn_backend": "auto",
21
+ "torch_dtype": "float32"
22
+ }
configuration_binaryllm.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+
3
+
4
+ class BinaryLLMConfig(PretrainedConfig):
5
+ model_type = "binaryllm"
6
+
7
+ def __init__(
8
+ self,
9
+ vocab_size: int = 65538,
10
+ hidden_size: int = 512,
11
+ num_hidden_layers: int = 4,
12
+ num_attention_heads: int = 4,
13
+ intermediate_size: int = 2048,
14
+ max_position_embeddings: int = 2048,
15
+ dropout: float = 0.1,
16
+ activation: str = "gelu",
17
+ attn_backend: str = "auto",
18
+ bos_token_id: int = 65536,
19
+ eos_token_id: int = 65537,
20
+ pad_token_id: int = 65537,
21
+ **kwargs,
22
+ ):
23
+ self.vocab_size = int(vocab_size)
24
+ self.hidden_size = int(hidden_size)
25
+ self.num_hidden_layers = int(num_hidden_layers)
26
+ self.num_attention_heads = int(num_attention_heads)
27
+ self.intermediate_size = int(intermediate_size)
28
+ self.max_position_embeddings = int(max_position_embeddings)
29
+ self.dropout = float(dropout)
30
+ self.activation = str(activation)
31
+ self.attn_backend = str(attn_backend)
32
+
33
+ self.bos_token_id = int(bos_token_id)
34
+ self.eos_token_id = int(eos_token_id)
35
+ self.pad_token_id = int(pad_token_id)
36
+
37
+ super().__init__(**kwargs)
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c7c56c25632fdb81d24a2ee74fe46a050b4faddfba5fe62612974b64ee5d660
3
+ size 318893256
modeling_binaryllm.py ADDED
@@ -0,0 +1,556 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from dataclasses import dataclass
3
+ from typing import Optional
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+
9
+ from transformers import PreTrainedModel
10
+ from transformers.modeling_outputs import CausalLMOutput
11
+
12
+ from .configuration_binaryllm import BinaryLLMConfig
13
+
14
+ try:
15
+ import flash_attn_v100_cuda
16
+ _FLASH_V100_AVAILABLE = True
17
+ except Exception:
18
+ flash_attn_v100_cuda = None
19
+ _FLASH_V100_AVAILABLE = False
20
+
21
+
22
+ class PositionalEncoding(nn.Module):
23
+ """
24
+ Sinusoidal positional encoding, stocké en fp32,
25
+ puis casté au dtype de x à chaque forward.
26
+ """
27
+
28
+ def __init__(self, d_model: int, max_len: int) -> None:
29
+ super().__init__()
30
+ pe = torch.zeros(max_len, d_model, dtype=torch.float32)
31
+ position = torch.arange(0, max_len, dtype=torch.float32).unsqueeze(1)
32
+ div_term = torch.exp(
33
+ torch.arange(0, d_model, 2, dtype=torch.float32)
34
+ * (-torch.log(torch.tensor(10000.0)) / d_model)
35
+ )
36
+ pe[:, 0::2] = torch.sin(position * div_term)
37
+ pe[:, 1::2] = torch.cos(position * div_term)
38
+ pe = pe.unsqueeze(0)
39
+ self.register_buffer("pe", pe, persistent=False)
40
+
41
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
42
+ t = x.size(1)
43
+ pe = self.pe[:, :t, :].to(device=x.device, dtype=x.dtype)
44
+ return x + pe
45
+
46
+
47
+ @dataclass
48
+ class _InnerCfg:
49
+ block_size: int
50
+ embed_dim: int
51
+ vocab_size: int
52
+ num_heads: int
53
+ num_layers: int
54
+ ff_hidden_dim: int
55
+ dropout: float
56
+ layernorm_dim: Optional[int] = None
57
+ head_dim: Optional[int] = None
58
+ attn_backend: str = "auto"
59
+
60
+
61
+ class FlashSelfAttentionPortable(nn.Module):
62
+ def __init__(
63
+ self,
64
+ embed_dim: int,
65
+ num_heads: int,
66
+ dropout: float = 0.0,
67
+ causal: bool = True,
68
+ backend: str = "auto",
69
+ ) -> None:
70
+ super().__init__()
71
+
72
+ if embed_dim % num_heads != 0:
73
+ raise ValueError(
74
+ f"embed_dim ({embed_dim}) doit être divisible par num_heads ({num_heads})"
75
+ )
76
+
77
+ self.embed_dim = embed_dim
78
+ self.num_heads = num_heads
79
+ self.head_dim = embed_dim // num_heads
80
+ self.dropout = float(dropout)
81
+ self.causal = bool(causal)
82
+ self.backend = str(backend)
83
+ self.softmax_scale = self.head_dim ** -0.5
84
+
85
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=True)
86
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=True)
87
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=True)
88
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=True)
89
+
90
+ def _shape_qkv(
91
+ self,
92
+ x: torch.Tensor,
93
+ ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.dtype]:
94
+ bsz, seqlen, _ = x.shape
95
+ residual_dtype = x.dtype
96
+
97
+ proj_dtype = self.q_proj.weight.dtype
98
+ if x.dtype != proj_dtype:
99
+ x = x.to(proj_dtype)
100
+
101
+ q = self.q_proj(x)
102
+ k = self.k_proj(x)
103
+ v = self.v_proj(x)
104
+
105
+ q = q.view(bsz, seqlen, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
106
+ k = k.view(bsz, seqlen, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
107
+ v = v.view(bsz, seqlen, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
108
+
109
+ return q, k, v, residual_dtype
110
+
111
+ def _merge_heads(self, x: torch.Tensor) -> torch.Tensor:
112
+ bsz, nheads, seqlen, head_dim = x.shape
113
+ return x.transpose(1, 2).contiguous().view(bsz, seqlen, nheads * head_dim)
114
+
115
+ def _can_use_v100_kernel(self, q: torch.Tensor, padding_mask: Optional[torch.Tensor]) -> bool:
116
+ if not _FLASH_V100_AVAILABLE:
117
+ return False
118
+
119
+ if not q.is_cuda:
120
+ return False
121
+
122
+ if padding_mask is not None and bool(padding_mask.any().item()):
123
+ return False
124
+
125
+ cc = torch.cuda.get_device_capability(q.device)
126
+ if cc != (7, 0):
127
+ return False
128
+
129
+ hd = q.size(-1)
130
+ if hd % 2 != 0:
131
+ return False
132
+ if hd % 8 != 0:
133
+ return False
134
+ if hd > 256:
135
+ return False
136
+
137
+ return True
138
+
139
+ def _flash_attn_v100(
140
+ self,
141
+ q: torch.Tensor,
142
+ k: torch.Tensor,
143
+ v: torch.Tensor,
144
+ ) -> torch.Tensor:
145
+ if q.dtype != torch.float16:
146
+ q = q.to(torch.float16)
147
+ if k.dtype != torch.float16:
148
+ k = k.to(torch.float16)
149
+ if v.dtype != torch.float16:
150
+ v = v.to(torch.float16)
151
+
152
+ result = flash_attn_v100_cuda.fwd(
153
+ q,
154
+ k,
155
+ v,
156
+ None,
157
+ None,
158
+ 0.0,
159
+ self.softmax_scale,
160
+ self.causal,
161
+ -1,
162
+ -1,
163
+ 0.0,
164
+ False,
165
+ None,
166
+ )
167
+
168
+ out = result[0]
169
+ return out
170
+
171
+ def _sdpa_attn(
172
+ self,
173
+ q: torch.Tensor,
174
+ k: torch.Tensor,
175
+ v: torch.Tensor,
176
+ padding_mask: Optional[torch.Tensor] = None,
177
+ ) -> torch.Tensor:
178
+ bsz, nheads, tq, _ = q.shape
179
+ tk = k.size(-2)
180
+
181
+ attn_mask = None
182
+ if padding_mask is not None:
183
+ key_mask = padding_mask[:, None, None, :].to(device=q.device, dtype=torch.bool)
184
+ key_mask = key_mask.expand(bsz, nheads, tq, tk)
185
+ attn_mask = ~key_mask
186
+
187
+ dropout_p = self.dropout if self.training else 0.0
188
+
189
+ with torch.backends.cuda.sdp_kernel(
190
+ enable_flash=True,
191
+ enable_mem_efficient=True,
192
+ enable_math=True,
193
+ ):
194
+ out = F.scaled_dot_product_attention(
195
+ q,
196
+ k,
197
+ v,
198
+ attn_mask=attn_mask,
199
+ dropout_p=dropout_p,
200
+ is_causal=self.causal if attn_mask is None else False,
201
+ scale=self.softmax_scale,
202
+ )
203
+
204
+ return out
205
+
206
+ def _eager_attn(
207
+ self,
208
+ q: torch.Tensor,
209
+ k: torch.Tensor,
210
+ v: torch.Tensor,
211
+ padding_mask: Optional[torch.Tensor] = None,
212
+ ) -> torch.Tensor:
213
+ scores = torch.matmul(q.float(), k.float().transpose(-2, -1)) * self.softmax_scale
214
+
215
+ if self.causal:
216
+ tq = q.size(-2)
217
+ tk = k.size(-2)
218
+ causal_mask = torch.triu(
219
+ torch.ones(tq, tk, device=scores.device, dtype=torch.bool),
220
+ diagonal=1,
221
+ )
222
+ scores = scores.masked_fill(causal_mask.unsqueeze(0).unsqueeze(0), float("-inf"))
223
+
224
+ if padding_mask is not None:
225
+ key_mask = padding_mask[:, None, None, :].to(device=scores.device, dtype=torch.bool)
226
+ scores = scores.masked_fill(key_mask, float("-inf"))
227
+
228
+ probs = torch.softmax(scores, dim=-1)
229
+
230
+ if self.training and self.dropout > 0.0:
231
+ probs = F.dropout(probs, p=self.dropout)
232
+
233
+ out = torch.matmul(probs, v.float())
234
+ return out.to(q.dtype)
235
+
236
+ def forward(
237
+ self,
238
+ x: torch.Tensor,
239
+ padding_mask: Optional[torch.Tensor] = None,
240
+ ) -> torch.Tensor:
241
+ q, k, v, residual_dtype = self._shape_qkv(x)
242
+
243
+ if padding_mask is not None:
244
+ padding_mask = padding_mask.to(device=x.device, dtype=torch.bool)
245
+
246
+ backend = self.backend
247
+
248
+ if backend == "v100":
249
+ if not self._can_use_v100_kernel(q, padding_mask):
250
+ raise RuntimeError(
251
+ "backend='v100' demandé mais indisponible "
252
+ "(flash_attn_v100_cuda absent, GPU non sm70/V100, padding présent, "
253
+ "ou head_dim incompatible)."
254
+ )
255
+ out = self._flash_attn_v100(q, k, v)
256
+
257
+ elif backend == "sdpa":
258
+ out = self._sdpa_attn(q, k, v, padding_mask=padding_mask)
259
+
260
+ elif backend == "eager":
261
+ out = self._eager_attn(q, k, v, padding_mask=padding_mask)
262
+
263
+ elif backend == "auto":
264
+ if self._can_use_v100_kernel(q, padding_mask):
265
+ out = self._flash_attn_v100(q, k, v)
266
+ else:
267
+ out = self._sdpa_attn(q, k, v, padding_mask=padding_mask)
268
+
269
+ else:
270
+ raise ValueError(f"backend d'attention non supporté: {backend}")
271
+
272
+ out = self._merge_heads(out)
273
+
274
+ out_proj_dtype = self.out_proj.weight.dtype
275
+ if out.dtype != out_proj_dtype:
276
+ out = out.to(out_proj_dtype)
277
+
278
+ out = self.out_proj(out)
279
+
280
+ if out.dtype != residual_dtype:
281
+ out = out.to(residual_dtype)
282
+
283
+ return out
284
+
285
+
286
+ class FlashTransformerEncoderLayerPortable(nn.Module):
287
+ def __init__(
288
+ self,
289
+ d_model: int,
290
+ nhead: int,
291
+ dim_feedforward: int,
292
+ dropout: float = 0.1,
293
+ activation: str = "gelu",
294
+ batch_first: bool = True,
295
+ attn_backend: str = "auto",
296
+ ) -> None:
297
+ super().__init__()
298
+
299
+ if not batch_first:
300
+ raise ValueError("Cette implémentation supporte batch_first=True uniquement.")
301
+
302
+ self.self_attn = FlashSelfAttentionPortable(
303
+ embed_dim=d_model,
304
+ num_heads=nhead,
305
+ dropout=dropout,
306
+ causal=True,
307
+ backend=attn_backend,
308
+ )
309
+
310
+ self.linear1 = nn.Linear(d_model, dim_feedforward)
311
+ self.linear2 = nn.Linear(dim_feedforward, d_model)
312
+
313
+ self.norm1 = nn.LayerNorm(d_model)
314
+ self.norm2 = nn.LayerNorm(d_model)
315
+
316
+ self.dropout = nn.Dropout(dropout)
317
+ self.dropout1 = nn.Dropout(dropout)
318
+ self.dropout2 = nn.Dropout(dropout)
319
+
320
+ if activation == "gelu":
321
+ self.activation = F.gelu
322
+ elif activation == "relu":
323
+ self.activation = F.relu
324
+ else:
325
+ raise ValueError(f"activation non supportée: {activation}")
326
+
327
+ def _sa_block(
328
+ self,
329
+ x: torch.Tensor,
330
+ src_key_padding_mask: Optional[torch.Tensor],
331
+ ) -> torch.Tensor:
332
+ x = self.self_attn(x, padding_mask=src_key_padding_mask)
333
+ x = self.dropout1(x)
334
+ return x
335
+
336
+ def _ff_block(self, x: torch.Tensor) -> torch.Tensor:
337
+ ff_dtype = self.linear1.weight.dtype
338
+ x_ff = x if x.dtype == ff_dtype else x.to(ff_dtype)
339
+
340
+ x_ff = self.linear1(x_ff)
341
+ x_ff = self.activation(x_ff)
342
+ x_ff = self.dropout(x_ff)
343
+ x_ff = self.linear2(x_ff)
344
+ x_ff = self.dropout2(x_ff)
345
+
346
+ if x_ff.dtype != x.dtype:
347
+ x_ff = x_ff.to(x.dtype)
348
+
349
+ return x_ff
350
+
351
+ def forward(
352
+ self,
353
+ src: torch.Tensor,
354
+ src_mask: Optional[torch.Tensor] = None,
355
+ src_key_padding_mask: Optional[torch.Tensor] = None,
356
+ ) -> torch.Tensor:
357
+ x = src
358
+ x = self.norm1(x + self._sa_block(x, src_key_padding_mask))
359
+ x = self.norm2(x + self._ff_block(x))
360
+ return x
361
+
362
+
363
+ class FlashTransformerEncoderPortable(nn.Module):
364
+ def __init__(
365
+ self,
366
+ encoder_layer: FlashTransformerEncoderLayerPortable,
367
+ num_layers: int,
368
+ attn_backend: str = "auto",
369
+ ) -> None:
370
+ super().__init__()
371
+
372
+ d_model = encoder_layer.norm1.normalized_shape[0]
373
+ nhead = encoder_layer.self_attn.num_heads
374
+ dim_feedforward = encoder_layer.linear1.out_features
375
+ dropout = encoder_layer.dropout.p
376
+
377
+ self.layers = nn.ModuleList(
378
+ [
379
+ FlashTransformerEncoderLayerPortable(
380
+ d_model=d_model,
381
+ nhead=nhead,
382
+ dim_feedforward=dim_feedforward,
383
+ dropout=dropout,
384
+ activation="gelu",
385
+ batch_first=True,
386
+ attn_backend=attn_backend,
387
+ )
388
+ for _ in range(num_layers)
389
+ ]
390
+ )
391
+
392
+ def forward(
393
+ self,
394
+ src: torch.Tensor,
395
+ mask: Optional[torch.Tensor] = None,
396
+ src_key_padding_mask: Optional[torch.Tensor] = None,
397
+ ) -> torch.Tensor:
398
+ x = src
399
+ for layer in self.layers:
400
+ x = layer(x, src_mask=mask, src_key_padding_mask=src_key_padding_mask)
401
+ return x
402
+
403
+
404
+ class TinyTransformerLM(nn.Module):
405
+ def __init__(self, cfg: _InnerCfg) -> None:
406
+ super().__init__()
407
+ self.cfg = cfg
408
+
409
+ vocab_size = cfg.vocab_size
410
+ self.tok_embed = nn.Embedding(vocab_size, cfg.embed_dim)
411
+ self.pos_encoding = PositionalEncoding(cfg.embed_dim, cfg.block_size)
412
+
413
+ encoder_layer = FlashTransformerEncoderLayerPortable(
414
+ d_model=cfg.embed_dim,
415
+ nhead=cfg.num_heads,
416
+ dim_feedforward=cfg.ff_hidden_dim,
417
+ dropout=cfg.dropout,
418
+ activation="gelu",
419
+ batch_first=True,
420
+ attn_backend=cfg.attn_backend,
421
+ )
422
+ self.encoder = FlashTransformerEncoderPortable(
423
+ encoder_layer,
424
+ num_layers=cfg.num_layers,
425
+ attn_backend=cfg.attn_backend,
426
+ )
427
+
428
+ ln_dim = cfg.layernorm_dim or cfg.embed_dim
429
+ head_dim = cfg.head_dim or ln_dim
430
+
431
+ self.pre_ln_proj: Optional[nn.Linear] = None
432
+ if ln_dim != cfg.embed_dim:
433
+ self.pre_ln_proj = nn.Linear(cfg.embed_dim, ln_dim)
434
+
435
+ self.ln = nn.LayerNorm(ln_dim)
436
+
437
+ self.head_pre: Optional[nn.Linear] = None
438
+ if head_dim != ln_dim:
439
+ self.head_pre = nn.Linear(ln_dim, head_dim)
440
+
441
+ self.head = nn.Linear(head_dim, vocab_size, bias=False)
442
+
443
+ if self.pre_ln_proj is None and self.head_pre is None and head_dim == cfg.embed_dim:
444
+ self.head.weight = self.tok_embed.weight
445
+
446
+ causal = torch.triu(torch.ones(cfg.block_size, cfg.block_size, dtype=torch.bool), diagonal=1)
447
+ self.register_buffer("causal_mask", causal, persistent=False)
448
+
449
+ def forward(self, tokens: torch.Tensor, padding_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
450
+ x = self.tok_embed(tokens)
451
+ x = self.pos_encoding(x)
452
+
453
+ seq_len = tokens.size(1)
454
+ attn_mask = self.causal_mask[:seq_len, :seq_len].to(device=tokens.device)
455
+
456
+ if padding_mask is not None:
457
+ padding_mask = padding_mask[:, :seq_len].to(device=tokens.device, dtype=torch.bool)
458
+
459
+ x = self.encoder(x, mask=attn_mask, src_key_padding_mask=padding_mask)
460
+
461
+ if self.pre_ln_proj is not None:
462
+ proj_dtype = self.pre_ln_proj.weight.dtype
463
+ if x.dtype != proj_dtype:
464
+ x = x.to(proj_dtype)
465
+ x = self.pre_ln_proj(x)
466
+
467
+ ln_dtype = self.ln.weight.dtype
468
+ if x.dtype != ln_dtype:
469
+ x = x.to(ln_dtype)
470
+ x = self.ln(x)
471
+
472
+ if self.head_pre is not None:
473
+ head_pre_dtype = self.head_pre.weight.dtype
474
+ if x.dtype != head_pre_dtype:
475
+ x = x.to(head_pre_dtype)
476
+ x = self.head_pre(x)
477
+
478
+ head_dtype = self.head.weight.dtype
479
+ if x.dtype != head_dtype:
480
+ x = x.to(head_dtype)
481
+
482
+ return self.head(x)
483
+
484
+
485
+ class BinaryLLMForCausalLM(PreTrainedModel):
486
+ config_class = BinaryLLMConfig
487
+ main_input_name = "input_ids"
488
+
489
+ def __init__(self, config: BinaryLLMConfig):
490
+ super().__init__(config)
491
+
492
+ attn_backend = getattr(config, "attn_backend", "auto")
493
+
494
+ inner = _InnerCfg(
495
+ block_size=int(config.max_position_embeddings),
496
+ embed_dim=int(config.hidden_size),
497
+ vocab_size=int(config.vocab_size),
498
+ num_heads=int(config.num_attention_heads),
499
+ num_layers=int(config.num_hidden_layers),
500
+ ff_hidden_dim=int(config.intermediate_size),
501
+ dropout=float(getattr(config, "dropout", 0.0)),
502
+ layernorm_dim=None,
503
+ head_dim=None,
504
+ attn_backend=str(attn_backend),
505
+ )
506
+ self.model = TinyTransformerLM(inner)
507
+
508
+ self.post_init()
509
+
510
+ def get_input_embeddings(self) -> nn.Module:
511
+ return self.model.tok_embed
512
+
513
+ def set_input_embeddings(self, value: nn.Module) -> None:
514
+ self.model.tok_embed = value
515
+
516
+ def get_output_embeddings(self) -> nn.Module:
517
+ return self.model.head
518
+
519
+ def set_output_embeddings(self, new_embeddings: nn.Module) -> None:
520
+ self.model.head = new_embeddings
521
+
522
+ def prepare_inputs_for_generation(
523
+ self,
524
+ input_ids: torch.LongTensor,
525
+ attention_mask: Optional[torch.Tensor] = None,
526
+ **kwargs,
527
+ ):
528
+ return {
529
+ "input_ids": input_ids,
530
+ "attention_mask": attention_mask,
531
+ }
532
+
533
+ def forward(
534
+ self,
535
+ input_ids: torch.LongTensor,
536
+ attention_mask: Optional[torch.Tensor] = None,
537
+ labels: Optional[torch.LongTensor] = None,
538
+ **kwargs,
539
+ ) -> CausalLMOutput:
540
+ padding_mask = None
541
+ if attention_mask is not None:
542
+ padding_mask = ~attention_mask.to(torch.bool)
543
+
544
+ logits = self.model(input_ids, padding_mask=padding_mask)
545
+
546
+ loss = None
547
+ if labels is not None:
548
+ shift_logits = logits[:, :-1, :].contiguous()
549
+ shift_labels = labels[:, 1:].contiguous()
550
+ loss = F.cross_entropy(
551
+ shift_logits.view(-1, self.config.vocab_size),
552
+ shift_labels.view(-1),
553
+ ignore_index=-100,
554
+ )
555
+
556
+ return CausalLMOutput(loss=loss, logits=logits)
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<BOS>",
3
+ "eos_token": "<EOS>",
4
+ "pad_token": "<EOS>",
5
+ "unk_token": "<EOS>"
6
+ }
tokenization_binaryllm.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # tokenization_binaryllm.py
3
+ # ============================================================
4
+ # BinaryLLMTokenizer (AutoTokenizer compatible) — EXACTEMENT la même
5
+ # tokenisation/decodage que llmTalk (mode base=65536) + infer_tagged12/11:
6
+ #
7
+ # - Base: 65536
8
+ # - IDs radix: 0..65535
9
+ # - BOS: 65536
10
+ # - EOS: 65537
11
+ # - UNK: alias EOS (65537) (pas de nouveau token dans la base)
12
+ # - Encodage: UTF-8 bytes -> digits base65536 BIG-ENDIAN (chunks 2 bytes)
13
+ # * si longueur impaire: dernier byte encodé en valeur 0..255 (1 digit)
14
+ # - Décodage: digits -> bytes BIG-ENDIAN -> UTF-8 (errors="replace")
15
+ #
16
+ # Important:
17
+ # - build_inputs_with_special_tokens: [BOS] + seq + [EOS] (comme HF classique)
18
+ # - encode(..., add_special_tokens=False) renvoie UNIQUEMENT les digits base65536
19
+ # - encode(..., add_special_tokens=True) ajoute BOS/EOS via build_inputs...
20
+ #
21
+ # Ce fichier suffit pour `trust_remote_code=True` côté repo HF.
22
+ # ============================================================
23
+
24
+ from __future__ import annotations
25
+
26
+ import json
27
+ import os
28
+ import re
29
+ from typing import Dict, List, Optional, Tuple, Any
30
+
31
+ from transformers import PreTrainedTokenizer
32
+
33
+
34
+ class BinaryLLMTokenizer(PreTrainedTokenizer):
35
+ model_input_names = ["input_ids", "attention_mask"]
36
+
37
+ TOKEN_RE = re.compile(r"^<U([0-9A-Fa-f]{4})>$")
38
+
39
+ def __init__(
40
+ self,
41
+ bos_token: str = "<BOS>",
42
+ eos_token: str = "<EOS>",
43
+ unk_token: str = "<UNK>",
44
+ pad_token: Optional[str] = None,
45
+ **kwargs: Any,
46
+ ):
47
+ # radix strict
48
+ self._base_vocab_size = 65536
49
+
50
+ # specials strict: base + 0/1
51
+ self._bos_id = 65536
52
+ self._eos_id = 65537
53
+
54
+ # UNK alias EOS (pas de token additionnel)
55
+ self._unk_id = self._eos_id
56
+
57
+ self._bos_str = bos_token
58
+ self._eos_str = eos_token
59
+ self._unk_str = unk_token
60
+ self._pad_str = pad_token
61
+
62
+ super().__init__(
63
+ bos_token=bos_token,
64
+ eos_token=eos_token,
65
+ unk_token=unk_token,
66
+ pad_token=pad_token,
67
+ **kwargs,
68
+ )
69
+
70
+ # ---------- vocab / ids ----------
71
+
72
+ @property
73
+ def vocab_size(self) -> int:
74
+ # 65536 + BOS + EOS
75
+ return 65538
76
+
77
+ def get_vocab(self) -> Dict[str, int]:
78
+ # IMPORTANT: ne jamais appeler self.unk_token_id ici (boucle)
79
+ v = {
80
+ self._bos_str: self._bos_id,
81
+ self._eos_str: self._eos_id,
82
+ self._unk_str: self._unk_id,
83
+ }
84
+ if self.pad_token is not None:
85
+ v[self.pad_token] = self._convert_token_to_id(self.pad_token)
86
+ return v
87
+
88
+ def _id_to_token_base(self, i: int) -> str:
89
+ return f"<U{i:04X}>"
90
+
91
+ # ---------- core encode/decode (même logique que infer_tagged / llmTalk base) ----------
92
+
93
+ def _encode_to_base65536_big_endian(self, text: str) -> List[int]:
94
+ b = bytearray(text.encode("utf-8", errors="strict"))
95
+ if len(b) == 0:
96
+ return [0]
97
+
98
+ out: List[int] = []
99
+ i = 0
100
+ n = len(b)
101
+
102
+ while i + 1 < n:
103
+ # 2 bytes -> 1 digit base65536 big-endian
104
+ out.append((b[i] << 8) | b[i + 1])
105
+ i += 2
106
+
107
+ if i < n:
108
+ # dernier byte seul -> digit 0..255
109
+ out.append(int(b[i]))
110
+
111
+ return out
112
+
113
+ def _decode_from_base65536_big_endian(self, ids: List[int]) -> str:
114
+ bb = bytearray()
115
+ for x in ids:
116
+ xi = int(x) & 0xFFFFFFFF
117
+ if 0 <= xi <= 255:
118
+ bb.append(xi)
119
+ else:
120
+ bb.append((xi >> 8) & 0xFF)
121
+ bb.append(xi & 0xFF)
122
+ return bytes(bb).decode("utf-8", errors="replace")
123
+
124
+ # ---------- HF tokenizer API overrides ----------
125
+
126
+ def _tokenize(self, text: str) -> List[str]:
127
+ ids = self._encode_to_base65536_big_endian(text)
128
+ return [self._id_to_token_base(i) for i in ids]
129
+
130
+ def _convert_token_to_id(self, token: str) -> int:
131
+ if token == self._bos_str:
132
+ return self._bos_id
133
+ if token == self._eos_str:
134
+ return self._eos_id
135
+ if token == self._unk_str:
136
+ return self._unk_id
137
+
138
+ if self.pad_token is not None and token == self.pad_token:
139
+ # pas de PAD dédié => alias EOS (compatible avec ton cadre)
140
+ if self.pad_token == self._eos_str:
141
+ return self._eos_id
142
+ return self._eos_id
143
+
144
+ m = self.TOKEN_RE.match(token)
145
+ if m:
146
+ return int(m.group(1), 16)
147
+
148
+ return self._unk_id
149
+
150
+ def _convert_id_to_token(self, index: int) -> str:
151
+ if index == self._bos_id:
152
+ return self._bos_str
153
+ if index == self._eos_id:
154
+ return self._eos_str
155
+ if index == self._unk_id:
156
+ return self._unk_str
157
+
158
+ if self.pad_token is not None and index == self.pad_token_id:
159
+ return self.pad_token
160
+
161
+ if 0 <= index < self._base_vocab_size:
162
+ return self._id_to_token_base(index)
163
+
164
+ return self._unk_str
165
+
166
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
167
+ ids: List[int] = []
168
+ for t in tokens:
169
+ if t in (self._bos_str, self._eos_str, self._unk_str):
170
+ continue
171
+ if self.pad_token is not None and t == self.pad_token:
172
+ continue
173
+ m = self.TOKEN_RE.match(t)
174
+ if m:
175
+ ids.append(int(m.group(1), 16))
176
+ return self._decode_from_base65536_big_endian(ids)
177
+
178
+ def build_inputs_with_special_tokens(
179
+ self,
180
+ token_ids_0: List[int],
181
+ token_ids_1: Optional[List[int]] = None,
182
+ ) -> List[int]:
183
+ # HF-style (simple): [BOS] seq [EOS]
184
+ # Pair: [BOS] seq0 [EOS] seq1 [EOS]
185
+ if token_ids_1 is None:
186
+ return [self._bos_id] + token_ids_0 + [self._eos_id]
187
+ return [self._bos_id] + token_ids_0 + [self._eos_id] + token_ids_1 + [self._eos_id]
188
+
189
+ def get_special_tokens_mask(
190
+ self,
191
+ token_ids_0: List[int],
192
+ token_ids_1: Optional[List[int]] = None,
193
+ already_has_special_tokens: bool = False,
194
+ ) -> List[int]:
195
+ pad_id = self.pad_token_id if self.pad_token is not None else -1
196
+
197
+ if already_has_special_tokens:
198
+ return [
199
+ 1 if t in (self._bos_id, self._eos_id, self._unk_id, pad_id) else 0
200
+ for t in token_ids_0
201
+ ]
202
+
203
+ if token_ids_1 is None:
204
+ return [1] + [0] * len(token_ids_0) + [1]
205
+ return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]
206
+
207
+ def create_token_type_ids_from_sequences(
208
+ self,
209
+ token_ids_0: List[int],
210
+ token_ids_1: Optional[List[int]] = None,
211
+ ) -> List[int]:
212
+ if token_ids_1 is None:
213
+ return [0] * (len(token_ids_0) + 2)
214
+ return [0] * (len(token_ids_0) + len(token_ids_1) + 3)
215
+
216
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
217
+ if not os.path.isdir(save_directory):
218
+ os.makedirs(save_directory, exist_ok=True)
219
+
220
+ name = (filename_prefix + "-" if filename_prefix else "") + "binaryllm_vocab.json"
221
+ path = os.path.join(save_directory, name)
222
+
223
+ data = {
224
+ "base_vocab_size": 65536,
225
+ "vocab_size": 65538,
226
+ "bos_token": self._bos_str,
227
+ "bos_token_id": self._bos_id,
228
+ "eos_token": self._eos_str,
229
+ "eos_token_id": self._eos_id,
230
+ "unk_token": self._unk_str,
231
+ "unk_token_id": self._unk_id,
232
+ "pad_token": self.pad_token,
233
+ "pad_token_id": self.pad_token_id,
234
+ "encoding": "utf-8",
235
+ "radix": 65536,
236
+ "endianness": "big",
237
+ "odd_length_rule": "last_byte_as_single_digit_0_255",
238
+ }
239
+
240
+ with open(path, "w", encoding="utf-8") as f:
241
+ json.dump(data, f, ensure_ascii=False, indent=2)
242
+
243
+ return (path,)
tokenizer_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "65536": {
4
+ "content": "<BOS>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "65537": {
12
+ "content": "<EOS>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ }
19
+ },
20
+ "bos_token": "<BOS>",
21
+ "clean_up_tokenization_spaces": false,
22
+ "eos_token": "<EOS>",
23
+ "extra_special_tokens": {},
24
+ "model_max_length": 1000000000000000019884624838656,
25
+ "pad_token": "<EOS>",
26
+ "tokenizer_class": "BinaryLLMTokenizer",
27
+ "unk_token": "<EOS>",
28
+ "auto_map": {
29
+ "AutoTokenizer": [
30
+ "tokenization_binaryllm.BinaryLLMTokenizer",
31
+ null
32
+ ]
33
+ }
34
+ }