PhysiQuanty commited on
Commit
c4c31fd
·
verified ·
1 Parent(s): ed5e4aa

Patenty-0.1

Browse files
README.md CHANGED
@@ -1,3 +1,8 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
1
+ # BinaryLLM (HF export)
2
+
3
+ Tokenizer-free / base-N model export.
4
+
5
+ ## Load
6
+ ```python
7
+ from transformers import AutoModelForCausalLM
8
+ m = AutoModelForCausalLM.from_pretrained("./hf_binaryllm_repo", trust_remote_code=True)
SAVE_tokenizer_config_SAVE.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "65536": {
4
+ "content": "<BOS>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "65537": {
12
+ "content": "<UNK>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ }
19
+ },
20
+ "bos_token": "<BOS>",
21
+ "clean_up_tokenization_spaces": false,
22
+ "eos_token": "<EOS>",
23
+ "extra_special_tokens": {},
24
+ "model_max_length": 1000000000000000019884624838656,
25
+ "pad_token": "<EOS>",
26
+ "tokenizer_class": "tokenization_binaryllm.BinaryLLMTokenizer",
27
+ "unk_token": "<UNK>",
28
+ "auto_map": {
29
+ "AutoTokenizer": "tokenization_binaryllm.BinaryLLMTokenizer"
30
+ }
31
+ }
__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .configuration_binaryllm import BinaryLLMConfig
2
+ from .modeling_binaryllm import BinaryLLMForCausalLM
3
+ from .tokenization_binaryllm import BinaryLLMTokenizer
binaryllm_vocab.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_vocab_size": 65536,
3
+ "vocab_size": 65538,
4
+ "bos_token": "<BOS>",
5
+ "bos_token_id": 65536,
6
+ "eos_token": "<EOS>",
7
+ "eos_token_id": 65537,
8
+ "unk_token": "<EOS>",
9
+ "unk_token_id": 65537,
10
+ "pad_token": "<EOS>",
11
+ "pad_token_id": 65537
12
+ }
config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "binaryllm",
3
+ "architectures": ["BinaryLLMForCausalLM"],
4
+ "auto_map": {
5
+ "AutoConfig": "configuration_binaryllm.BinaryLLMConfig",
6
+ "AutoModelForCausalLM": "modeling_binaryllm.BinaryLLMForCausalLM",
7
+ "AutoTokenizer": "tokenization_binaryllm.BinaryLLMTokenizer"
8
+ },
9
+ "vocab_size": 65538,
10
+ "bos_token_id": 65536,
11
+ "eos_token_id": 65537,
12
+ "pad_token_id": 65537,
13
+ "hidden_size": 512,
14
+ "num_hidden_layers": 4,
15
+ "num_attention_heads": 4,
16
+ "intermediate_size": 2048,
17
+ "max_position_embeddings": 2048,
18
+ "dropout": 0.1,
19
+ "activation": "gelu",
20
+ "torch_dtype": "float32"
21
+ }
configuration_binaryllm.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+
3
+
4
+ class BinaryLLMConfig(PretrainedConfig):
5
+ model_type = "binaryllm"
6
+
7
+ def __init__(
8
+ self,
9
+ vocab_size: int = 65538,
10
+ hidden_size: int = 512,
11
+ num_hidden_layers: int = 4,
12
+ num_attention_heads: int = 4,
13
+ intermediate_size: int = 2048,
14
+ max_position_embeddings: int = 2048,
15
+ dropout: float = 0.1,
16
+ activation: str = "gelu",
17
+ bos_token_id: int = 65536,
18
+ eos_token_id: int = 65537,
19
+ pad_token_id: int = 65537,
20
+ **kwargs,
21
+ ):
22
+ self.vocab_size = int(vocab_size)
23
+ self.hidden_size = int(hidden_size)
24
+ self.num_hidden_layers = int(num_hidden_layers)
25
+ self.num_attention_heads = int(num_attention_heads)
26
+ self.intermediate_size = int(intermediate_size)
27
+ self.max_position_embeddings = int(max_position_embeddings)
28
+ self.dropout = float(dropout)
29
+ self.activation = str(activation)
30
+
31
+ self.bos_token_id = int(bos_token_id)
32
+ self.eos_token_id = int(eos_token_id)
33
+ self.pad_token_id = int(pad_token_id)
34
+
35
+ super().__init__(**kwargs)
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b2f7855de68552610bfb59cb6d013c08380ed3072f18b129ed2e8124701108d
3
+ size 318891440
modeling_binaryllm.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from dataclasses import dataclass
3
+ from typing import Optional
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+
9
+ from transformers import PreTrainedModel
10
+ from transformers.modeling_outputs import CausalLMOutput
11
+
12
+ from .configuration_binaryllm import BinaryLLMConfig
13
+
14
+
15
+ class PositionalEncoding(nn.Module):
16
+ """
17
+ Sinusoidal positional encoding, stocké en fp32,
18
+ puis casté au dtype de x à chaque forward.
19
+ """
20
+
21
+ def __init__(self, d_model: int, max_len: int) -> None:
22
+ super().__init__()
23
+ pe = torch.zeros(max_len, d_model, dtype=torch.float32)
24
+ position = torch.arange(0, max_len, dtype=torch.float32).unsqueeze(1)
25
+ div_term = torch.exp(
26
+ torch.arange(0, d_model, 2, dtype=torch.float32) * (-torch.log(torch.tensor(10000.0)) / d_model)
27
+ )
28
+ pe[:, 0::2] = torch.sin(position * div_term)
29
+ pe[:, 1::2] = torch.cos(position * div_term)
30
+ pe = pe.unsqueeze(0) # (1, max_len, d_model)
31
+ self.register_buffer("pe", pe, persistent=False)
32
+
33
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
34
+ t = x.size(1)
35
+ pe = self.pe[:, :t, :]
36
+ pe = pe.to(device=x.device, dtype=x.dtype)
37
+ return x + pe
38
+
39
+
40
+ @dataclass
41
+ class _InnerCfg:
42
+ block_size: int
43
+ embed_dim: int
44
+ vocab_size: int
45
+ num_heads: int
46
+ num_layers: int
47
+ ff_hidden_dim: int
48
+ dropout: float
49
+ layernorm_dim: Optional[int] = None
50
+ head_dim: Optional[int] = None
51
+
52
+
53
+ class TinyTransformerLM(nn.Module):
54
+ def __init__(self, cfg: _InnerCfg) -> None:
55
+ super().__init__()
56
+ self.cfg = cfg
57
+
58
+ vocab_size = cfg.vocab_size
59
+ self.tok_embed = nn.Embedding(vocab_size, cfg.embed_dim)
60
+ self.pos_encoding = PositionalEncoding(cfg.embed_dim, cfg.block_size)
61
+
62
+ encoder_layer = nn.TransformerEncoderLayer(
63
+ d_model=cfg.embed_dim,
64
+ nhead=cfg.num_heads,
65
+ dim_feedforward=cfg.ff_hidden_dim,
66
+ dropout=cfg.dropout,
67
+ activation="gelu",
68
+ batch_first=True,
69
+ )
70
+ self.encoder = nn.TransformerEncoder(encoder_layer, num_layers=cfg.num_layers)
71
+
72
+ ln_dim = cfg.layernorm_dim or cfg.embed_dim
73
+ head_dim = cfg.head_dim or ln_dim
74
+
75
+ self.pre_ln_proj: Optional[nn.Linear] = None
76
+ if ln_dim != cfg.embed_dim:
77
+ self.pre_ln_proj = nn.Linear(cfg.embed_dim, ln_dim)
78
+
79
+ self.ln = nn.LayerNorm(ln_dim)
80
+
81
+ self.head_pre: Optional[nn.Linear] = None
82
+ if head_dim != ln_dim:
83
+ self.head_pre = nn.Linear(ln_dim, head_dim)
84
+
85
+ self.head = nn.Linear(head_dim, vocab_size, bias=False)
86
+
87
+ # weight tying seulement si parfait alignement
88
+ if self.pre_ln_proj is None and self.head_pre is None and head_dim == cfg.embed_dim:
89
+ self.head.weight = self.tok_embed.weight
90
+
91
+ causal = torch.triu(torch.ones(cfg.block_size, cfg.block_size, dtype=torch.bool), diagonal=1)
92
+ self.register_buffer("causal_mask", causal, persistent=False)
93
+
94
+ def forward(self, tokens: torch.Tensor, padding_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
95
+ x = self.tok_embed(tokens)
96
+ x = self.pos_encoding(x)
97
+
98
+ seq_len = tokens.size(1)
99
+ attn_mask = self.causal_mask[:seq_len, :seq_len].to(device=tokens.device)
100
+
101
+ if padding_mask is not None:
102
+ padding_mask = padding_mask[:, :seq_len].to(device=tokens.device, dtype=torch.bool)
103
+
104
+ x = self.encoder(x, mask=attn_mask, src_key_padding_mask=padding_mask)
105
+
106
+ if self.pre_ln_proj is not None:
107
+ x = self.pre_ln_proj(x)
108
+
109
+ x = self.ln(x)
110
+
111
+ if self.head_pre is not None:
112
+ x = self.head_pre(x)
113
+
114
+ return self.head(x)
115
+
116
+
117
+ class BinaryLLMForCausalLM(PreTrainedModel):
118
+ config_class = BinaryLLMConfig
119
+ main_input_name = "input_ids"
120
+
121
+ def __init__(self, config: BinaryLLMConfig):
122
+ super().__init__(config)
123
+
124
+ inner = _InnerCfg(
125
+ block_size=int(config.max_position_embeddings),
126
+ embed_dim=int(config.hidden_size),
127
+ vocab_size=int(config.vocab_size),
128
+ num_heads=int(config.num_attention_heads),
129
+ num_layers=int(config.num_hidden_layers),
130
+ ff_hidden_dim=int(config.intermediate_size),
131
+ dropout=float(getattr(config, "dropout", 0.0)),
132
+ layernorm_dim=None,
133
+ head_dim=None,
134
+ )
135
+ self.model = TinyTransformerLM(inner)
136
+
137
+ self.post_init()
138
+
139
+ def forward(
140
+ self,
141
+ input_ids: torch.LongTensor,
142
+ attention_mask: Optional[torch.Tensor] = None,
143
+ labels: Optional[torch.LongTensor] = None,
144
+ **kwargs,
145
+ ) -> CausalLMOutput:
146
+ padding_mask = None
147
+ if attention_mask is not None:
148
+ padding_mask = ~attention_mask.to(torch.bool) # True = ignore
149
+
150
+ logits = self.model(input_ids, padding_mask=padding_mask)
151
+
152
+ loss = None
153
+ if labels is not None:
154
+ shift_logits = logits[:, :-1, :].contiguous()
155
+ shift_labels = labels[:, 1:].contiguous()
156
+ loss = F.cross_entropy(
157
+ shift_logits.view(-1, self.config.vocab_size),
158
+ shift_labels.view(-1),
159
+ ignore_index=-100,
160
+ )
161
+
162
+ return CausalLMOutput(loss=loss, logits=logits)
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<BOS>",
3
+ "eos_token": "<EOS>",
4
+ "pad_token": "<EOS>",
5
+ "unk_token": "<EOS>"
6
+ }
tokenization_binaryllm.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import os
5
+ import re
6
+ from typing import Dict, List, Optional, Tuple
7
+
8
+ from transformers import PreTrainedTokenizer
9
+
10
+
11
+ class BinaryLLMTokenizer(PreTrainedTokenizer):
12
+ model_input_names = ["input_ids", "attention_mask"]
13
+
14
+ TOKEN_RE = re.compile(r"^<U([0-9A-Fa-f]{4})>$")
15
+
16
+ def __init__(
17
+ self,
18
+ bos_token: str = "<BOS>",
19
+ eos_token: str = "<EOS>",
20
+ unk_token: str = "<UNK>",
21
+ pad_token: Optional[str] = None,
22
+ **kwargs,
23
+ ):
24
+ # base ids 0..65535 reserved for radix tokens (strict)
25
+ self._base_vocab_size = 65536
26
+
27
+ # reserve ids
28
+ self._bos_id = 65536
29
+ self._eos_id = 65537
30
+
31
+ # UNK is an alias to EOS to preserve radix purity (no new base id)
32
+ self._unk_id = self._eos_id
33
+
34
+ # special token strings
35
+ self._bos_str = bos_token
36
+ self._eos_str = eos_token
37
+ self._unk_str = unk_token
38
+ self._pad_str = pad_token
39
+
40
+ super().__init__(
41
+ bos_token=bos_token,
42
+ eos_token=eos_token,
43
+ unk_token=unk_token,
44
+ pad_token=pad_token,
45
+ **kwargs,
46
+ )
47
+
48
+ @property
49
+ def vocab_size(self) -> int:
50
+ return 65538
51
+
52
+ def get_vocab(self) -> Dict[str, int]:
53
+ # IMPORTANT: never call self.unk_token_id here (it triggers recursion)
54
+ v = {
55
+ self._bos_str: self._bos_id,
56
+ self._eos_str: self._eos_id,
57
+ self._unk_str: self._unk_id,
58
+ }
59
+ if self.pad_token is not None:
60
+ # if pad_token == "<EOS>", it will map to eos id via _convert_token_to_id()
61
+ v[self.pad_token] = self._convert_token_to_id(self.pad_token)
62
+ return v
63
+
64
+ def _tokenize(self, text: str) -> List[str]:
65
+ ids = self._encode_to_uint16(text)
66
+ return [self._id_to_token_base(i) for i in ids]
67
+
68
+ def _convert_token_to_id(self, token: str) -> int:
69
+ if token == self._bos_str:
70
+ return self._bos_id
71
+ if token == self._eos_str:
72
+ return self._eos_id
73
+ if token == self._unk_str:
74
+ return self._unk_id
75
+
76
+ if self.pad_token is not None and token == self.pad_token:
77
+ # common case: pad_token is "<EOS>"
78
+ if self.pad_token == self._eos_str:
79
+ return self._eos_id
80
+ # otherwise: no dedicated PAD id in this vocab, alias to EOS
81
+ return self._eos_id
82
+
83
+ m = self.TOKEN_RE.match(token)
84
+ if m:
85
+ return int(m.group(1), 16)
86
+
87
+ return self._unk_id
88
+
89
+ def _convert_id_to_token(self, index: int) -> str:
90
+ if index == self._bos_id:
91
+ return self._bos_str
92
+ if index == self._eos_id:
93
+ return self._eos_str
94
+ if index == self._unk_id:
95
+ return self._unk_str
96
+ if self.pad_token is not None and index == self.pad_token_id:
97
+ return self.pad_token
98
+
99
+ if 0 <= index < self._base_vocab_size:
100
+ return self._id_to_token_base(index)
101
+
102
+ return self._unk_str
103
+
104
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
105
+ ids: List[int] = []
106
+ for t in tokens:
107
+ if t in (self._bos_str, self._eos_str, self._unk_str):
108
+ continue
109
+ if self.pad_token is not None and t == self.pad_token:
110
+ continue
111
+ m = self.TOKEN_RE.match(t)
112
+ if m:
113
+ ids.append(int(m.group(1), 16))
114
+ return self._decode_from_uint16(ids)
115
+
116
+ def build_inputs_with_special_tokens(
117
+ self,
118
+ token_ids_0: List[int],
119
+ token_ids_1: Optional[List[int]] = None,
120
+ ) -> List[int]:
121
+ if token_ids_1 is None:
122
+ return [self._bos_id] + token_ids_0 + [self._eos_id]
123
+ return [self._bos_id] + token_ids_0 + [self._eos_id] + token_ids_1 + [self._eos_id]
124
+
125
+ def get_special_tokens_mask(
126
+ self,
127
+ token_ids_0: List[int],
128
+ token_ids_1: Optional[List[int]] = None,
129
+ already_has_special_tokens: bool = False,
130
+ ) -> List[int]:
131
+ pad_id = self.pad_token_id if self.pad_token is not None else -1
132
+
133
+ if already_has_special_tokens:
134
+ return [
135
+ 1 if t in (self._bos_id, self._eos_id, self._unk_id, pad_id) else 0
136
+ for t in token_ids_0
137
+ ]
138
+
139
+ if token_ids_1 is None:
140
+ return [1] + [0] * len(token_ids_0) + [1]
141
+ return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]
142
+
143
+ def create_token_type_ids_from_sequences(
144
+ self,
145
+ token_ids_0: List[int],
146
+ token_ids_1: Optional[List[int]] = None,
147
+ ) -> List[int]:
148
+ if token_ids_1 is None:
149
+ return [0] * (len(token_ids_0) + 2)
150
+ return [0] * (len(token_ids_0) + len(token_ids_1) + 3)
151
+
152
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
153
+ if not os.path.isdir(save_directory):
154
+ os.makedirs(save_directory, exist_ok=True)
155
+
156
+ name = (filename_prefix + "-" if filename_prefix else "") + "binaryllm_vocab.json"
157
+ path = os.path.join(save_directory, name)
158
+
159
+ data = {
160
+ "base_vocab_size": 65536,
161
+ "vocab_size": 65538,
162
+ "bos_token": self._bos_str,
163
+ "bos_token_id": self._bos_id,
164
+ "eos_token": self._eos_str,
165
+ "eos_token_id": self._eos_id,
166
+ "unk_token": self._unk_str,
167
+ "unk_token_id": self._unk_id,
168
+ "pad_token": self.pad_token,
169
+ "pad_token_id": self.pad_token_id,
170
+ }
171
+ with open(path, "w", encoding="utf-8") as f:
172
+ json.dump(data, f, ensure_ascii=False, indent=2)
173
+
174
+ return (path,)
175
+
176
+ def _id_to_token_base(self, i: int) -> str:
177
+ return f"<U{i:04X}>"
178
+
179
+ def _encode_to_uint16(self, text: str) -> List[int]:
180
+ b = text.encode("utf-8", errors="strict")
181
+ if len(b) % 2 == 1:
182
+ b += b"\x00"
183
+ out: List[int] = []
184
+ for k in range(0, len(b), 2):
185
+ out.append(b[k] | (b[k + 1] << 8))
186
+ return out
187
+
188
+ def _decode_from_uint16(self, ids: List[int]) -> str:
189
+ bb = bytearray()
190
+ for x in ids:
191
+ x &= 0xFFFF
192
+ bb.append(x & 0xFF)
193
+ bb.append((x >> 8) & 0xFF)
194
+ if len(bb) and bb[-1] == 0:
195
+ bb = bb[:-1]
196
+ return bb.decode("utf-8", errors="replace")
tokenizer_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "65536": {
4
+ "content": "<BOS>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "65537": {
12
+ "content": "<EOS>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ }
19
+ },
20
+ "bos_token": "<BOS>",
21
+ "clean_up_tokenization_spaces": false,
22
+ "eos_token": "<EOS>",
23
+ "extra_special_tokens": {},
24
+ "model_max_length": 1000000000000000019884624838656,
25
+ "pad_token": "<EOS>",
26
+ "tokenizer_class": "BinaryLLMTokenizer",
27
+ "unk_token": "<EOS>",
28
+ "auto_map": {
29
+ "AutoTokenizer": [
30
+ "tokenization_binaryllm.BinaryLLMTokenizer",
31
+ null
32
+ ]
33
+ }
34
+ }