PhysiQuanty commited on
Commit
efda231
·
0 Parent(s):

Duplicate from PhysiQuanty/Patent-Test-Radix-65536-AutoTokenizer_FineTune

Browse files
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model:
3
+ - PhysiQuanty/Patent-Test-Radix-65536-AutoTokenizer
4
+ ---
5
+ # BinaryLLM (HF export)
6
+
7
+ Tokenizer-free / base-N model export.
8
+
9
+ ## Load
10
+ ```python
11
+ from transformers import AutoModelForCausalLM
12
+ m = AutoModelForCausalLM.from_pretrained("./hf_binaryllm_repo", trust_remote_code=True)
SAVE_tokenizer_config_SAVE.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "65536": {
4
+ "content": "<BOS>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "65537": {
12
+ "content": "<UNK>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ }
19
+ },
20
+ "bos_token": "<BOS>",
21
+ "clean_up_tokenization_spaces": false,
22
+ "eos_token": "<EOS>",
23
+ "extra_special_tokens": {},
24
+ "model_max_length": 1000000000000000019884624838656,
25
+ "pad_token": "<EOS>",
26
+ "tokenizer_class": "tokenization_binaryllm.BinaryLLMTokenizer",
27
+ "unk_token": "<UNK>",
28
+ "auto_map": {
29
+ "AutoTokenizer": "tokenization_binaryllm.BinaryLLMTokenizer"
30
+ }
31
+ }
__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .configuration_binaryllm import BinaryLLMConfig
2
+ from .modeling_binaryllm import BinaryLLMForCausalLM
3
+ from .tokenization_binaryllm import BinaryLLMTokenizer
binaryllm_vocab.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_vocab_size": 65536,
3
+ "vocab_size": 65538,
4
+ "bos_token": "<BOS>",
5
+ "bos_token_id": 65536,
6
+ "eos_token": "<EOS>",
7
+ "eos_token_id": 65537,
8
+ "unk_token": "<EOS>",
9
+ "unk_token_id": 65537,
10
+ "pad_token": "<EOS>",
11
+ "pad_token_id": 65537
12
+ }
config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "binaryllm",
3
+ "architectures": ["BinaryLLMForCausalLM"],
4
+ "auto_map": {
5
+ "AutoConfig": "configuration_binaryllm.BinaryLLMConfig",
6
+ "AutoModelForCausalLM": "modeling_binaryllm.BinaryLLMForCausalLM",
7
+ "AutoTokenizer": "tokenization_binaryllm.BinaryLLMTokenizer"
8
+ },
9
+ "vocab_size": 65538,
10
+ "bos_token_id": 65536,
11
+ "eos_token_id": 65537,
12
+ "pad_token_id": 65537,
13
+ "hidden_size": 512,
14
+ "num_hidden_layers": 4,
15
+ "num_attention_heads": 4,
16
+ "intermediate_size": 2048,
17
+ "max_position_embeddings": 2048,
18
+ "dropout": 0.1,
19
+ "activation": "gelu",
20
+ "torch_dtype": "float32"
21
+ }
configuration_binaryllm.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+
3
+
4
+ class BinaryLLMConfig(PretrainedConfig):
5
+ model_type = "binaryllm"
6
+
7
+ def __init__(
8
+ self,
9
+ vocab_size: int = 65538,
10
+ hidden_size: int = 512,
11
+ num_hidden_layers: int = 4,
12
+ num_attention_heads: int = 4,
13
+ intermediate_size: int = 2048,
14
+ max_position_embeddings: int = 2048,
15
+ dropout: float = 0.1,
16
+ activation: str = "gelu",
17
+ bos_token_id: int = 65536,
18
+ eos_token_id: int = 65537,
19
+ pad_token_id: int = 65537,
20
+ **kwargs,
21
+ ):
22
+ self.vocab_size = int(vocab_size)
23
+ self.hidden_size = int(hidden_size)
24
+ self.num_hidden_layers = int(num_hidden_layers)
25
+ self.num_attention_heads = int(num_attention_heads)
26
+ self.intermediate_size = int(intermediate_size)
27
+ self.max_position_embeddings = int(max_position_embeddings)
28
+ self.dropout = float(dropout)
29
+ self.activation = str(activation)
30
+
31
+ self.bos_token_id = int(bos_token_id)
32
+ self.eos_token_id = int(eos_token_id)
33
+ self.pad_token_id = int(pad_token_id)
34
+
35
+ super().__init__(**kwargs)
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c038b333e996b5f06e4d916ea9fc675353931743cb6f018a1403f80d0f467ae
3
+ size 318891472
modeling_binaryllm.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from dataclasses import dataclass
3
+ from typing import Optional
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+
9
+ from transformers import PreTrainedModel
10
+ from transformers.modeling_outputs import CausalLMOutput
11
+
12
+ from .configuration_binaryllm import BinaryLLMConfig
13
+
14
+
15
+ class PositionalEncoding(nn.Module):
16
+ """
17
+ Sinusoidal positional encoding, stocké en fp32,
18
+ puis casté au dtype de x à chaque forward.
19
+ """
20
+
21
+ def __init__(self, d_model: int, max_len: int) -> None:
22
+ super().__init__()
23
+ pe = torch.zeros(max_len, d_model, dtype=torch.float32)
24
+ position = torch.arange(0, max_len, dtype=torch.float32).unsqueeze(1)
25
+ div_term = torch.exp(
26
+ torch.arange(0, d_model, 2, dtype=torch.float32) * (-torch.log(torch.tensor(10000.0)) / d_model)
27
+ )
28
+ pe[:, 0::2] = torch.sin(position * div_term)
29
+ pe[:, 1::2] = torch.cos(position * div_term)
30
+ pe = pe.unsqueeze(0) # (1, max_len, d_model)
31
+ self.register_buffer("pe", pe, persistent=False)
32
+
33
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
34
+ t = x.size(1)
35
+ pe = self.pe[:, :t, :]
36
+ pe = pe.to(device=x.device, dtype=x.dtype)
37
+ return x + pe
38
+
39
+
40
+ @dataclass
41
+ class _InnerCfg:
42
+ block_size: int
43
+ embed_dim: int
44
+ vocab_size: int
45
+ num_heads: int
46
+ num_layers: int
47
+ ff_hidden_dim: int
48
+ dropout: float
49
+ layernorm_dim: Optional[int] = None
50
+ head_dim: Optional[int] = None
51
+
52
+
53
+ class TinyTransformerLM(nn.Module):
54
+ def __init__(self, cfg: _InnerCfg) -> None:
55
+ super().__init__()
56
+ self.cfg = cfg
57
+
58
+ vocab_size = cfg.vocab_size
59
+ self.tok_embed = nn.Embedding(vocab_size, cfg.embed_dim)
60
+ self.pos_encoding = PositionalEncoding(cfg.embed_dim, cfg.block_size)
61
+
62
+ encoder_layer = nn.TransformerEncoderLayer(
63
+ d_model=cfg.embed_dim,
64
+ nhead=cfg.num_heads,
65
+ dim_feedforward=cfg.ff_hidden_dim,
66
+ dropout=cfg.dropout,
67
+ activation="gelu",
68
+ batch_first=True,
69
+ )
70
+ self.encoder = nn.TransformerEncoder(encoder_layer, num_layers=cfg.num_layers)
71
+
72
+ ln_dim = cfg.layernorm_dim or cfg.embed_dim
73
+ head_dim = cfg.head_dim or ln_dim
74
+
75
+ self.pre_ln_proj: Optional[nn.Linear] = None
76
+ if ln_dim != cfg.embed_dim:
77
+ self.pre_ln_proj = nn.Linear(cfg.embed_dim, ln_dim)
78
+
79
+ self.ln = nn.LayerNorm(ln_dim)
80
+
81
+ self.head_pre: Optional[nn.Linear] = None
82
+ if head_dim != ln_dim:
83
+ self.head_pre = nn.Linear(ln_dim, head_dim)
84
+
85
+ self.head = nn.Linear(head_dim, vocab_size, bias=False)
86
+
87
+ # weight tying seulement si parfait alignement
88
+ if self.pre_ln_proj is None and self.head_pre is None and head_dim == cfg.embed_dim:
89
+ self.head.weight = self.tok_embed.weight
90
+
91
+ causal = torch.triu(torch.ones(cfg.block_size, cfg.block_size, dtype=torch.bool), diagonal=1)
92
+ self.register_buffer("causal_mask", causal, persistent=False)
93
+
94
+ def forward(self, tokens: torch.Tensor, padding_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
95
+ x = self.tok_embed(tokens)
96
+ x = self.pos_encoding(x)
97
+
98
+ seq_len = tokens.size(1)
99
+ attn_mask = self.causal_mask[:seq_len, :seq_len].to(device=tokens.device)
100
+
101
+ if padding_mask is not None:
102
+ padding_mask = padding_mask[:, :seq_len].to(device=tokens.device, dtype=torch.bool)
103
+
104
+ x = self.encoder(x, mask=attn_mask, src_key_padding_mask=padding_mask)
105
+
106
+ if self.pre_ln_proj is not None:
107
+ x = self.pre_ln_proj(x)
108
+
109
+ x = self.ln(x)
110
+
111
+ if self.head_pre is not None:
112
+ x = self.head_pre(x)
113
+
114
+ return self.head(x)
115
+
116
+
117
+ class BinaryLLMForCausalLM(PreTrainedModel):
118
+ config_class = BinaryLLMConfig
119
+ main_input_name = "input_ids"
120
+
121
+ def __init__(self, config: BinaryLLMConfig):
122
+ super().__init__(config)
123
+
124
+ inner = _InnerCfg(
125
+ block_size=int(config.max_position_embeddings),
126
+ embed_dim=int(config.hidden_size),
127
+ vocab_size=int(config.vocab_size),
128
+ num_heads=int(config.num_attention_heads),
129
+ num_layers=int(config.num_hidden_layers),
130
+ ff_hidden_dim=int(config.intermediate_size),
131
+ dropout=float(getattr(config, "dropout", 0.0)),
132
+ layernorm_dim=None,
133
+ head_dim=None,
134
+ )
135
+ self.model = TinyTransformerLM(inner)
136
+
137
+ self.post_init()
138
+
139
+ def forward(
140
+ self,
141
+ input_ids: torch.LongTensor,
142
+ attention_mask: Optional[torch.Tensor] = None,
143
+ labels: Optional[torch.LongTensor] = None,
144
+ **kwargs,
145
+ ) -> CausalLMOutput:
146
+ padding_mask = None
147
+ if attention_mask is not None:
148
+ padding_mask = ~attention_mask.to(torch.bool) # True = ignore
149
+
150
+ logits = self.model(input_ids, padding_mask=padding_mask)
151
+
152
+ loss = None
153
+ if labels is not None:
154
+ shift_logits = logits[:, :-1, :].contiguous()
155
+ shift_labels = labels[:, 1:].contiguous()
156
+ loss = F.cross_entropy(
157
+ shift_logits.view(-1, self.config.vocab_size),
158
+ shift_labels.view(-1),
159
+ ignore_index=-100,
160
+ )
161
+
162
+ return CausalLMOutput(loss=loss, logits=logits)
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<BOS>",
3
+ "eos_token": "<EOS>",
4
+ "pad_token": "<EOS>",
5
+ "unk_token": "<EOS>"
6
+ }
tokenization_binaryllm.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # tokenization_binaryllm.py
3
+ # ============================================================
4
+ # BinaryLLMTokenizer (AutoTokenizer compatible) — EXACTEMENT la même
5
+ # tokenisation/decodage que llmTalk (mode base=65536) + infer_tagged12/11:
6
+ #
7
+ # - Base: 65536
8
+ # - IDs radix: 0..65535
9
+ # - BOS: 65536
10
+ # - EOS: 65537
11
+ # - UNK: alias EOS (65537) (pas de nouveau token dans la base)
12
+ # - Encodage: UTF-8 bytes -> digits base65536 BIG-ENDIAN (chunks 2 bytes)
13
+ # * si longueur impaire: dernier byte encodé en valeur 0..255 (1 digit)
14
+ # - Décodage: digits -> bytes BIG-ENDIAN -> UTF-8 (errors="replace")
15
+ #
16
+ # Important:
17
+ # - build_inputs_with_special_tokens: [BOS] + seq + [EOS] (comme HF classique)
18
+ # - encode(..., add_special_tokens=False) renvoie UNIQUEMENT les digits base65536
19
+ # - encode(..., add_special_tokens=True) ajoute BOS/EOS via build_inputs...
20
+ #
21
+ # Ce fichier suffit pour `trust_remote_code=True` côté repo HF.
22
+ # ============================================================
23
+
24
+ from __future__ import annotations
25
+
26
+ import json
27
+ import os
28
+ import re
29
+ from typing import Dict, List, Optional, Tuple, Any
30
+
31
+ from transformers import PreTrainedTokenizer
32
+
33
+
34
+ class BinaryLLMTokenizer(PreTrainedTokenizer):
35
+ model_input_names = ["input_ids", "attention_mask"]
36
+
37
+ TOKEN_RE = re.compile(r"^<U([0-9A-Fa-f]{4})>$")
38
+
39
+ def __init__(
40
+ self,
41
+ bos_token: str = "<BOS>",
42
+ eos_token: str = "<EOS>",
43
+ unk_token: str = "<UNK>",
44
+ pad_token: Optional[str] = None,
45
+ **kwargs: Any,
46
+ ):
47
+ # radix strict
48
+ self._base_vocab_size = 65536
49
+
50
+ # specials strict: base + 0/1
51
+ self._bos_id = 65536
52
+ self._eos_id = 65537
53
+
54
+ # UNK alias EOS (pas de token additionnel)
55
+ self._unk_id = self._eos_id
56
+
57
+ self._bos_str = bos_token
58
+ self._eos_str = eos_token
59
+ self._unk_str = unk_token
60
+ self._pad_str = pad_token
61
+
62
+ super().__init__(
63
+ bos_token=bos_token,
64
+ eos_token=eos_token,
65
+ unk_token=unk_token,
66
+ pad_token=pad_token,
67
+ **kwargs,
68
+ )
69
+
70
+ # ---------- vocab / ids ----------
71
+
72
+ @property
73
+ def vocab_size(self) -> int:
74
+ # 65536 + BOS + EOS
75
+ return 65538
76
+
77
+ def get_vocab(self) -> Dict[str, int]:
78
+ # IMPORTANT: ne jamais appeler self.unk_token_id ici (boucle)
79
+ v = {
80
+ self._bos_str: self._bos_id,
81
+ self._eos_str: self._eos_id,
82
+ self._unk_str: self._unk_id,
83
+ }
84
+ if self.pad_token is not None:
85
+ v[self.pad_token] = self._convert_token_to_id(self.pad_token)
86
+ return v
87
+
88
+ def _id_to_token_base(self, i: int) -> str:
89
+ return f"<U{i:04X}>"
90
+
91
+ # ---------- core encode/decode (même logique que infer_tagged / llmTalk base) ----------
92
+
93
+ def _encode_to_base65536_big_endian(self, text: str) -> List[int]:
94
+ b = bytearray(text.encode("utf-8", errors="strict"))
95
+ if len(b) == 0:
96
+ return [0]
97
+
98
+ out: List[int] = []
99
+ i = 0
100
+ n = len(b)
101
+
102
+ while i + 1 < n:
103
+ # 2 bytes -> 1 digit base65536 big-endian
104
+ out.append((b[i] << 8) | b[i + 1])
105
+ i += 2
106
+
107
+ if i < n:
108
+ # dernier byte seul -> digit 0..255
109
+ out.append(int(b[i]))
110
+
111
+ return out
112
+
113
+ def _decode_from_base65536_big_endian(self, ids: List[int]) -> str:
114
+ bb = bytearray()
115
+ for x in ids:
116
+ xi = int(x) & 0xFFFFFFFF
117
+ if 0 <= xi <= 255:
118
+ bb.append(xi)
119
+ else:
120
+ bb.append((xi >> 8) & 0xFF)
121
+ bb.append(xi & 0xFF)
122
+ return bytes(bb).decode("utf-8", errors="replace")
123
+
124
+ # ---------- HF tokenizer API overrides ----------
125
+
126
+ def _tokenize(self, text: str) -> List[str]:
127
+ ids = self._encode_to_base65536_big_endian(text)
128
+ return [self._id_to_token_base(i) for i in ids]
129
+
130
+ def _convert_token_to_id(self, token: str) -> int:
131
+ if token == self._bos_str:
132
+ return self._bos_id
133
+ if token == self._eos_str:
134
+ return self._eos_id
135
+ if token == self._unk_str:
136
+ return self._unk_id
137
+
138
+ if self.pad_token is not None and token == self.pad_token:
139
+ # pas de PAD dédié => alias EOS (compatible avec ton cadre)
140
+ if self.pad_token == self._eos_str:
141
+ return self._eos_id
142
+ return self._eos_id
143
+
144
+ m = self.TOKEN_RE.match(token)
145
+ if m:
146
+ return int(m.group(1), 16)
147
+
148
+ return self._unk_id
149
+
150
+ def _convert_id_to_token(self, index: int) -> str:
151
+ if index == self._bos_id:
152
+ return self._bos_str
153
+ if index == self._eos_id:
154
+ return self._eos_str
155
+ if index == self._unk_id:
156
+ return self._unk_str
157
+
158
+ if self.pad_token is not None and index == self.pad_token_id:
159
+ return self.pad_token
160
+
161
+ if 0 <= index < self._base_vocab_size:
162
+ return self._id_to_token_base(index)
163
+
164
+ return self._unk_str
165
+
166
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
167
+ ids: List[int] = []
168
+ for t in tokens:
169
+ if t in (self._bos_str, self._eos_str, self._unk_str):
170
+ continue
171
+ if self.pad_token is not None and t == self.pad_token:
172
+ continue
173
+ m = self.TOKEN_RE.match(t)
174
+ if m:
175
+ ids.append(int(m.group(1), 16))
176
+ return self._decode_from_base65536_big_endian(ids)
177
+
178
+ def build_inputs_with_special_tokens(
179
+ self,
180
+ token_ids_0: List[int],
181
+ token_ids_1: Optional[List[int]] = None,
182
+ ) -> List[int]:
183
+ # HF-style (simple): [BOS] seq [EOS]
184
+ # Pair: [BOS] seq0 [EOS] seq1 [EOS]
185
+ if token_ids_1 is None:
186
+ return [self._bos_id] + token_ids_0 + [self._eos_id]
187
+ return [self._bos_id] + token_ids_0 + [self._eos_id] + token_ids_1 + [self._eos_id]
188
+
189
+ def get_special_tokens_mask(
190
+ self,
191
+ token_ids_0: List[int],
192
+ token_ids_1: Optional[List[int]] = None,
193
+ already_has_special_tokens: bool = False,
194
+ ) -> List[int]:
195
+ pad_id = self.pad_token_id if self.pad_token is not None else -1
196
+
197
+ if already_has_special_tokens:
198
+ return [
199
+ 1 if t in (self._bos_id, self._eos_id, self._unk_id, pad_id) else 0
200
+ for t in token_ids_0
201
+ ]
202
+
203
+ if token_ids_1 is None:
204
+ return [1] + [0] * len(token_ids_0) + [1]
205
+ return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]
206
+
207
+ def create_token_type_ids_from_sequences(
208
+ self,
209
+ token_ids_0: List[int],
210
+ token_ids_1: Optional[List[int]] = None,
211
+ ) -> List[int]:
212
+ if token_ids_1 is None:
213
+ return [0] * (len(token_ids_0) + 2)
214
+ return [0] * (len(token_ids_0) + len(token_ids_1) + 3)
215
+
216
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
217
+ if not os.path.isdir(save_directory):
218
+ os.makedirs(save_directory, exist_ok=True)
219
+
220
+ name = (filename_prefix + "-" if filename_prefix else "") + "binaryllm_vocab.json"
221
+ path = os.path.join(save_directory, name)
222
+
223
+ data = {
224
+ "base_vocab_size": 65536,
225
+ "vocab_size": 65538,
226
+ "bos_token": self._bos_str,
227
+ "bos_token_id": self._bos_id,
228
+ "eos_token": self._eos_str,
229
+ "eos_token_id": self._eos_id,
230
+ "unk_token": self._unk_str,
231
+ "unk_token_id": self._unk_id,
232
+ "pad_token": self.pad_token,
233
+ "pad_token_id": self.pad_token_id,
234
+ "encoding": "utf-8",
235
+ "radix": 65536,
236
+ "endianness": "big",
237
+ "odd_length_rule": "last_byte_as_single_digit_0_255",
238
+ }
239
+
240
+ with open(path, "w", encoding="utf-8") as f:
241
+ json.dump(data, f, ensure_ascii=False, indent=2)
242
+
243
+ return (path,)
tokenizer_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "65536": {
4
+ "content": "<BOS>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "65537": {
12
+ "content": "<EOS>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ }
19
+ },
20
+ "bos_token": "<BOS>",
21
+ "clean_up_tokenization_spaces": false,
22
+ "eos_token": "<EOS>",
23
+ "extra_special_tokens": {},
24
+ "model_max_length": 1000000000000000019884624838656,
25
+ "pad_token": "<EOS>",
26
+ "tokenizer_class": "BinaryLLMTokenizer",
27
+ "unk_token": "<EOS>",
28
+ "auto_map": {
29
+ "AutoTokenizer": [
30
+ "tokenization_binaryllm.BinaryLLMTokenizer",
31
+ null
32
+ ]
33
+ }
34
+ }