vijusudhi commited on
Commit
bc0d57e
·
verified ·
1 Parent(s): 26ad0af

Upload tokenizer

Browse files
gptx_tokenizer.py ADDED
@@ -0,0 +1,422 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import os
5
+ import warnings
6
+ from pathlib import Path
7
+ from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
8
+
9
+ import sentencepiece as spm
10
+ from huggingface_hub import hf_hub_download, list_repo_files
11
+ from transformers.tokenization_utils import PreTrainedTokenizer
12
+ from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
13
+
14
+ # Define special tokens used in the tokenizer
15
+ EOD_TOKEN = "<eod>"
16
+ PAD_TOKEN = "<pad>"
17
+ BOS_TOKEN = "<s>"
18
+ EOS_TOKEN = "</s>"
19
+ UNK_TOKEN = "<unk>"
20
+ REPO_ID = "EuropeanLLM-Beta/HalloEurope-7B"
21
+
22
+ class HFGPTXTokenizer(PreTrainedTokenizer):
23
+ """
24
+ A custom tokenizer class that extends Hugging Face's PreTrainedTokenizer.
25
+ It is specifically designed to work with SentencePiece models and integrates
26
+ with Hugging Face's tokenizer utilities.
27
+ """
28
+
29
+ model_file_glob = "*tokenizer.json"
30
+ vocab_files_names = {"tokenizer_file": "tokenizer.json"}
31
+ decode_kwargs: List[str] = []
32
+
33
+ def _encode(self, text: str, return_tokens: bool = False, is_continuation: bool = False):
34
+ """
35
+ Encode a given text using the tokenizer.
36
+
37
+ Args:
38
+ text (str): The text to encode.
39
+ return_tokens (bool): If True, returns token strings instead of token IDs.
40
+ is_continuation (bool): If True, uses a continuation tokenizer (if available).
41
+
42
+ Returns:
43
+ List[int] or List[str]: Encoded text as a list of token IDs or token strings.
44
+ """
45
+ assert self.tok is not None, "No tokenizer is currently loaded"
46
+
47
+ # Variant with additional sp processor:
48
+ tokenizer = self.continuation_tokenizer if is_continuation else self.tok
49
+
50
+ if return_tokens:
51
+ return tokenizer.encode_as_pieces(text)
52
+ else:
53
+ return tokenizer.encode(text)
54
+
55
+ def create_list_of_special_tokens(self) -> List[str]:
56
+ """
57
+ Create a list of special tokens, including the BOS, EOS, PAD, EOD tokens,
58
+ and 256 additional placeholder tokens.
59
+
60
+ Returns:
61
+ List[str]: List of special tokens.
62
+ """
63
+ return [self.bos_token, self.eos_token, self.pad_token, self.eod_token] + [
64
+ f"<placeholder_tok_{i}>" for i in range(256)
65
+ ]
66
+
67
+ def find_tokenizer_config(self, config_path: Path, repo_id: str = None) -> Path:
68
+ if repo_id is None:
69
+ raise ValueError("repo_id must be provided if config_path is not a local file")
70
+
71
+ try:
72
+ # List all files in the repo
73
+ repo_files = list_repo_files(repo_id)
74
+
75
+ # Find the tokenizer config file
76
+ tokenizer_files = [f for f in repo_files if f.endswith('tokenizer_config.json')]
77
+ if not tokenizer_files:
78
+ raise FileNotFoundError(f"No tokenizer_config.json file found in repository {repo_id}")
79
+
80
+ # Use the first tokenizer_config.json file found
81
+ tokenizer_config_file = tokenizer_files[0]
82
+ print(f"Found tokenizer config file: {tokenizer_config_file}")
83
+
84
+ # Download the file
85
+ tokenizer_config_file_or_name = hf_hub_download(repo_id=repo_id, filename=tokenizer_config_file)
86
+ print(f"Downloaded tokenizer config file to: {tokenizer_config_file_or_name}")
87
+ return tokenizer_config_file_or_name
88
+ except Exception as e:
89
+ raise OSError(f"Failed to download tokenizer model: {str(e)}")
90
+
91
+ def instantiate_from_file_or_name(self, model_file_or_name: str, repo_id: str = None):
92
+ """
93
+ Load the tokenizer model from a file or download it from a repository.
94
+
95
+ Args:
96
+ model_file_or_name (str): Path to the model file or the model name.
97
+ repo_id (str, optional): Repository ID from which to download the model file.
98
+
99
+ Returns:
100
+ spm.SentencePieceProcessor: Loaded SentencePieceProcessor instance.
101
+
102
+ Raises:
103
+ ValueError: If repo_id is not provided when model_file_or_name is not a file.
104
+ OSError: If the model file cannot be loaded or downloaded.
105
+ """
106
+ if not os.path.isfile(model_file_or_name):
107
+ if repo_id is None:
108
+ raise ValueError("repo_id must be provided if model_file_or_name is not a local file")
109
+
110
+ try:
111
+ # List all files in the repo
112
+ repo_files = list_repo_files(repo_id)
113
+
114
+ # Find the tokenizer model file
115
+ tokenizer_files = [f for f in repo_files if f.endswith('.model')]
116
+ if not tokenizer_files:
117
+ raise FileNotFoundError(f"No .model file found in repository {repo_id}")
118
+
119
+ # Use the first .model file found
120
+ model_file = tokenizer_files[0]
121
+ print(f"Found tokenizer model file: {model_file}")
122
+
123
+ # Download the file
124
+ model_file_or_name = hf_hub_download(repo_id=repo_id, filename=model_file)
125
+ print(f"Downloaded tokenizer model to: {model_file_or_name}")
126
+ except Exception as e:
127
+ raise OSError(f"Failed to download tokenizer model: {str(e)}")
128
+
129
+ try:
130
+ return spm.SentencePieceProcessor(model_file=model_file_or_name)
131
+ except Exception as e:
132
+ raise OSError(f"Failed to load tokenizer model: {str(e)}")
133
+
134
+ def __init__(
135
+ self,
136
+ model_path: Optional[str] = None,
137
+ config_path: Optional[str] = None,
138
+ **kwargs: Any,
139
+ ) -> None:
140
+ """
141
+ Initialize the tokenizer.
142
+
143
+ Args:
144
+ model_path (Optional[str]): Path to the tokenizer model file.
145
+ config_path (Optional[str]): Path to the tokenizer configuration file.
146
+ **kwargs: Additional keyword arguments passed to the superclass.
147
+
148
+ This method also ensures backward compatibility by setting
149
+ `clean_up_tokenization_spaces` to False by default.
150
+ """
151
+ # Prevent cleanup of tokenization spaces to maintain backward compatibility
152
+ self.clean_up_tokenization_spaces = kwargs.setdefault("clean_up_tokenization_spaces", False)
153
+ self.vocab = None
154
+ cp_path = kwargs.get("name_or_path", ".")
155
+ if model_path is None:
156
+ model_path = str(Path(cp_path) / self.vocab_files_names["tokenizer_file"])
157
+ self.tok = self.instantiate_from_file_or_name(model_path, repo_id=REPO_ID)
158
+
159
+ super().__init__(**kwargs)
160
+
161
+ # Specify special tokens which we know the value of.
162
+ # EOD from `tok` is used as what is called EOS in HuggingFace.
163
+ # Since there is no corresponding mapping for EOS from `tok` in
164
+ # HuggingFace, it is treated as an additional special token.
165
+ # Same for all other special tokens.
166
+ self.eos_token = EOD_TOKEN
167
+ self.bos_token = BOS_TOKEN
168
+ self.pad_token = PAD_TOKEN
169
+
170
+ if not self.additional_special_tokens:
171
+ self.additional_special_tokens = [
172
+ token
173
+ for token in self.create_list_of_special_tokens()
174
+ # Filter out the special tokens we added manually.
175
+ if token
176
+ not in [
177
+ self.eos_token,
178
+ self.bos_token,
179
+ self.pad_token,
180
+ ]
181
+ ]
182
+ if config_path is None:
183
+ config_path = str(Path(cp_path) / TOKENIZER_CONFIG_FILE)
184
+
185
+ if os.path.isfile(config_path):
186
+ self.tokenizer_config = self.load_json(Path(config_path))
187
+ else: # Load from repo
188
+ self.tokenizer_config = self.load_json(Path(self.find_tokenizer_config(Path(config_path), repo_id=REPO_ID)))
189
+
190
+ @property
191
+ def vocab_size(self) -> int:
192
+ """
193
+ Get the size of the tokenizer vocabulary.
194
+
195
+ Returns:
196
+ int: The size of the vocabulary.
197
+ """
198
+ return self.tok.GetPieceSize()
199
+
200
+ def get_vocab(self) -> Dict[str, int]:
201
+ """
202
+ Get the vocabulary as a dictionary mapping token strings to their IDs.
203
+
204
+ Returns:
205
+ Dict[str, int]: Vocabulary mapping.
206
+ """
207
+ if self.vocab is None:
208
+ self.vocab = {self.tok.IdToPiece(i): i for i in range(self.vocab_size)}
209
+ return self.vocab
210
+
211
+ def _tokenize(self, text: str, **kwargs) -> List[int]:
212
+ """
213
+ Tokenize the input text.
214
+
215
+ Args:
216
+ text (str): Text to tokenize.
217
+ **kwargs: Additional keyword arguments.
218
+
219
+ Returns:
220
+ List[int]: List of token IDs.
221
+ """
222
+ return_tokens = kwargs.pop("return_tokens", True)
223
+ return self._encode(text, return_tokens=return_tokens, **kwargs)
224
+
225
+ def _convert_token_to_id(self, token: str) -> int:
226
+ """
227
+ Convert a token string to its corresponding ID.
228
+
229
+ Args:
230
+ token (str): The token to convert.
231
+
232
+ Returns:
233
+ int: The token's ID.
234
+
235
+ Raises:
236
+ ValueError: If the token is unknown and cannot be encoded to a single ID.
237
+ """
238
+ return self.tok.PieceToId(token)
239
+
240
+
241
+ def decode(
242
+ self,
243
+ token_ids: Union[List[int], List[List[int]]],
244
+ num_threads: Optional[int] = None,
245
+ ) -> str:
246
+ """
247
+ Decode a list of token IDs into a string.
248
+
249
+ Args:
250
+ token_ids (Union[List[int], List[List[int]]]): List of token IDs or lists of token IDs.
251
+ num_threads (Optional[int]): Number of threads to use for decoding.
252
+
253
+ Returns:
254
+ str: Decoded string.
255
+ """
256
+ return self.tok.decode(input=token_ids, num_threads=num_threads)
257
+
258
+ def _convert_id_to_token(self, index: int) -> str:
259
+ """
260
+ Convert a token ID to its corresponding token string.
261
+
262
+ Args:
263
+ index (int): Token ID.
264
+
265
+ Returns:
266
+ str: Corresponding token string.
267
+ """
268
+ return self.tok.IdToPiece(index)
269
+
270
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
271
+ """
272
+ Convert a list of tokens into a single string.
273
+
274
+ Args:
275
+ tokens (List[str]): List of token strings.
276
+
277
+ Returns:
278
+ str: Concatenated string of tokens.
279
+ """
280
+ return self.tok.DecodePieces(tokens)
281
+
282
+ def _tok_decode(self, token_ids: List[int], **kwargs: Any) -> str:
283
+ """
284
+ Internal method to decode token IDs with additional arguments.
285
+
286
+ Args:
287
+ token_ids (List[int]): List of token IDs.
288
+ **kwargs: Additional arguments to pass to the decode method.
289
+
290
+ Returns:
291
+ str: Decoded string.
292
+
293
+ This method also issues a warning if unsupported arguments are provided.
294
+ """
295
+ passed_kwargs = {key: value for (key, value) in kwargs.items() if key in self.decode_kwargs}
296
+ if len(passed_kwargs) != len(kwargs):
297
+ warnings.warn("silently ignoring some arguments to `decode` due to missing " "support from the tokenizer.")
298
+ text = self.decode(token_ids, **passed_kwargs)
299
+ return text
300
+
301
+ def save_tokenizer(self, save_dir: str) -> None:
302
+ if not os.path.isdir(save_dir):
303
+ print(f"Vocabulary path ({save_dir}) should be a directory")
304
+ return
305
+ out_vocab_file = os.path.join(save_dir, "tokenizer.model")
306
+
307
+ # if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
308
+ # copyfile(self.vocab_file, out_vocab_file)
309
+ # elif not os.path.isfile(self.vocab_file):
310
+ with open(out_vocab_file, "wb") as f:
311
+ content_spiece_model = self.tok.serialized_model_proto()
312
+ f.write(content_spiece_model)
313
+
314
+ return (out_vocab_file,)
315
+
316
+ def _decode(
317
+ self,
318
+ token_ids: List[int],
319
+ skip_special_tokens: bool = False,
320
+ clean_up_tokenization_spaces: bool = None,
321
+ spaces_between_special_tokens: bool = True,
322
+ **kwargs: Any,
323
+ ) -> str:
324
+ text = self._tok_decode(
325
+ token_ids,
326
+ skip_special_tokens=skip_special_tokens,
327
+ spaces_between_special_tokens=spaces_between_special_tokens,
328
+ **kwargs,
329
+ )
330
+
331
+ clean_up_tokenization_spaces = (
332
+ clean_up_tokenization_spaces
333
+ if clean_up_tokenization_spaces is not None
334
+ else self.clean_up_tokenization_spaces
335
+ )
336
+ if clean_up_tokenization_spaces:
337
+ warnings.warn(
338
+ "when cleaning up tokenization spaces, this will not behave "
339
+ "like the original `GPTXTokenizer`., Please supply "
340
+ "`clean_up_tokenization_spaces=False` for decoding."
341
+ )
342
+ clean_text = self.clean_up_tokenization(text)
343
+ return clean_text
344
+ else:
345
+ return text
346
+
347
+ def save_vocabulary(
348
+ self,
349
+ save_directory: str,
350
+ filename_prefix: Optional[str] = None,
351
+ ) -> Tuple[str]:
352
+ filename_prefix = filename_prefix + "-" if filename_prefix else ""
353
+ save_directory = Path(save_directory)
354
+
355
+ self._save_tokenizer_config(save_directory, filename_prefix)
356
+ tokenizer_file_path = self._save_tokenizer(save_directory, filename_prefix)
357
+
358
+ return (tokenizer_file_path,)
359
+
360
+ def _save_tokenizer_config(
361
+ self,
362
+ save_directory: Path,
363
+ filename_prefix: str,
364
+ ) -> str:
365
+ self.save_tokenizer_config(save_directory)
366
+ old_tokenizer_config_path = save_directory / TOKENIZER_CONFIG_FILE
367
+ assert old_tokenizer_config_path.is_file(), "tokenizer config path changed"
368
+ new_tokenizer_config_path = save_directory / (filename_prefix + old_tokenizer_config_path.name)
369
+ old_tokenizer_config_path.replace(new_tokenizer_config_path)
370
+ return str(new_tokenizer_config_path)
371
+
372
+ def _find_tokenizer_files(self, save_directory: Path) -> List[Path]:
373
+ files = list(Path(save_directory).glob(self.model_file_glob))
374
+ return files
375
+
376
+ def _get_tokenizer_file(self, files: List[Path]):
377
+ assert files, "no saved tokenizer file found"
378
+ assert len(files) <= 1, "cannot handle multiple saved tokenizer files"
379
+ return files[0]
380
+
381
+ def _save_tokenizer(
382
+ self,
383
+ save_directory: Path,
384
+ filename_prefix: str,
385
+ ) -> str:
386
+ self.save_tokenizer(str(save_directory))
387
+ tokenizer_files = self._find_tokenizer_files(save_directory)
388
+ old_tokenizer_file_path = self._get_tokenizer_file(tokenizer_files)
389
+ assert old_tokenizer_file_path.is_file(), "could not access saved tokenizer file"
390
+ new_tokenizer_file_path = save_directory / (filename_prefix + self.vocab_files_names["tokenizer_file"])
391
+ old_tokenizer_file_path.replace(new_tokenizer_file_path)
392
+ return str(new_tokenizer_file_path)
393
+
394
+ def save_tokenizer_config(self, save_dir: Path) -> None:
395
+ # convert Path to str
396
+ for k in self.tokenizer_config:
397
+ if isinstance(self.tokenizer_config[k], Path):
398
+ self.tokenizer_config[k] = str(self.tokenizer_config[k])
399
+
400
+ info_file = save_dir / "tokenizer_config.json"
401
+ with info_file.open("w") as f:
402
+ json.dump(self.tokenizer_config, f, indent=4)
403
+
404
+ def load_json(self, path: Path) -> dict:
405
+ with path.open("r") as f:
406
+ return json.load(f)
407
+
408
+ class SPTokenizer(HFGPTXTokenizer):
409
+ model_file_glob = "*tokenizer.model"
410
+ vocab_files_names = {"tokenizer_file": "tokenizer.model"}
411
+ decode_kwargs = ["num_threads"]
412
+ # `is_continuation` does not work without this, but it doesn't
413
+ # implement all APIs of `PreTrainedTokenizer`.
414
+ def encode(self, text: str, **kwargs) -> List[int]:
415
+ return_tokens = kwargs.pop('return_tokens', False)
416
+ is_continuation = kwargs.pop('is_continuation', False)
417
+ return self._encode(
418
+ text,
419
+ return_tokens=return_tokens,
420
+ is_continuation=is_continuation,
421
+ )
422
+
special_tokens_map.json ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "</s>",
4
+ "<placeholder_tok_0>",
5
+ "<placeholder_tok_1>",
6
+ "<placeholder_tok_2>",
7
+ "<placeholder_tok_3>",
8
+ "<placeholder_tok_4>",
9
+ "<placeholder_tok_5>",
10
+ "<placeholder_tok_6>",
11
+ "<placeholder_tok_7>",
12
+ "<placeholder_tok_8>",
13
+ "<placeholder_tok_9>",
14
+ "<placeholder_tok_10>",
15
+ "<placeholder_tok_11>",
16
+ "<placeholder_tok_12>",
17
+ "<placeholder_tok_13>",
18
+ "<placeholder_tok_14>",
19
+ "<placeholder_tok_15>",
20
+ "<placeholder_tok_16>",
21
+ "<placeholder_tok_17>",
22
+ "<placeholder_tok_18>",
23
+ "<placeholder_tok_19>",
24
+ "<placeholder_tok_20>",
25
+ "<placeholder_tok_21>",
26
+ "<placeholder_tok_22>",
27
+ "<placeholder_tok_23>",
28
+ "<placeholder_tok_24>",
29
+ "<placeholder_tok_25>",
30
+ "<placeholder_tok_26>",
31
+ "<placeholder_tok_27>",
32
+ "<placeholder_tok_28>",
33
+ "<placeholder_tok_29>",
34
+ "<placeholder_tok_30>",
35
+ "<placeholder_tok_31>",
36
+ "<placeholder_tok_32>",
37
+ "<placeholder_tok_33>",
38
+ "<placeholder_tok_34>",
39
+ "<placeholder_tok_35>",
40
+ "<placeholder_tok_36>",
41
+ "<placeholder_tok_37>",
42
+ "<placeholder_tok_38>",
43
+ "<placeholder_tok_39>",
44
+ "<placeholder_tok_40>",
45
+ "<placeholder_tok_41>",
46
+ "<placeholder_tok_42>",
47
+ "<placeholder_tok_43>",
48
+ "<placeholder_tok_44>",
49
+ "<placeholder_tok_45>",
50
+ "<placeholder_tok_46>",
51
+ "<placeholder_tok_47>",
52
+ "<placeholder_tok_48>",
53
+ "<placeholder_tok_49>",
54
+ "<placeholder_tok_50>",
55
+ "<placeholder_tok_51>",
56
+ "<placeholder_tok_52>",
57
+ "<placeholder_tok_53>",
58
+ "<placeholder_tok_54>",
59
+ "<placeholder_tok_55>",
60
+ "<placeholder_tok_56>",
61
+ "<placeholder_tok_57>",
62
+ "<placeholder_tok_58>",
63
+ "<placeholder_tok_59>",
64
+ "<placeholder_tok_60>",
65
+ "<placeholder_tok_61>",
66
+ "<placeholder_tok_62>",
67
+ "<placeholder_tok_63>",
68
+ "<placeholder_tok_64>",
69
+ "<placeholder_tok_65>",
70
+ "<placeholder_tok_66>",
71
+ "<placeholder_tok_67>",
72
+ "<placeholder_tok_68>",
73
+ "<placeholder_tok_69>",
74
+ "<placeholder_tok_70>",
75
+ "<placeholder_tok_71>",
76
+ "<placeholder_tok_72>",
77
+ "<placeholder_tok_73>",
78
+ "<placeholder_tok_74>",
79
+ "<placeholder_tok_75>",
80
+ "<placeholder_tok_76>",
81
+ "<placeholder_tok_77>",
82
+ "<placeholder_tok_78>",
83
+ "<placeholder_tok_79>",
84
+ "<placeholder_tok_80>",
85
+ "<placeholder_tok_81>",
86
+ "<placeholder_tok_82>",
87
+ "<placeholder_tok_83>",
88
+ "<placeholder_tok_84>",
89
+ "<placeholder_tok_85>",
90
+ "<placeholder_tok_86>",
91
+ "<placeholder_tok_87>",
92
+ "<placeholder_tok_88>",
93
+ "<placeholder_tok_89>",
94
+ "<placeholder_tok_90>",
95
+ "<placeholder_tok_91>",
96
+ "<placeholder_tok_92>",
97
+ "<placeholder_tok_93>",
98
+ "<placeholder_tok_94>",
99
+ "<placeholder_tok_95>",
100
+ "<placeholder_tok_96>",
101
+ "<placeholder_tok_97>",
102
+ "<placeholder_tok_98>",
103
+ "<placeholder_tok_99>",
104
+ "<placeholder_tok_100>",
105
+ "<placeholder_tok_101>",
106
+ "<placeholder_tok_102>",
107
+ "<placeholder_tok_103>",
108
+ "<placeholder_tok_104>",
109
+ "<placeholder_tok_105>",
110
+ "<placeholder_tok_106>",
111
+ "<placeholder_tok_107>",
112
+ "<placeholder_tok_108>",
113
+ "<placeholder_tok_109>",
114
+ "<placeholder_tok_110>",
115
+ "<placeholder_tok_111>",
116
+ "<placeholder_tok_112>",
117
+ "<placeholder_tok_113>",
118
+ "<placeholder_tok_114>",
119
+ "<placeholder_tok_115>",
120
+ "<placeholder_tok_116>",
121
+ "<placeholder_tok_117>",
122
+ "<placeholder_tok_118>",
123
+ "<placeholder_tok_119>",
124
+ "<placeholder_tok_120>",
125
+ "<placeholder_tok_121>",
126
+ "<placeholder_tok_122>",
127
+ "<placeholder_tok_123>",
128
+ "<placeholder_tok_124>",
129
+ "<placeholder_tok_125>",
130
+ "<placeholder_tok_126>",
131
+ "<placeholder_tok_127>",
132
+ "<placeholder_tok_128>",
133
+ "<placeholder_tok_129>",
134
+ "<placeholder_tok_130>",
135
+ "<placeholder_tok_131>",
136
+ "<placeholder_tok_132>",
137
+ "<placeholder_tok_133>",
138
+ "<placeholder_tok_134>",
139
+ "<placeholder_tok_135>",
140
+ "<placeholder_tok_136>",
141
+ "<placeholder_tok_137>",
142
+ "<placeholder_tok_138>",
143
+ "<placeholder_tok_139>",
144
+ "<placeholder_tok_140>",
145
+ "<placeholder_tok_141>",
146
+ "<placeholder_tok_142>",
147
+ "<placeholder_tok_143>",
148
+ "<placeholder_tok_144>",
149
+ "<placeholder_tok_145>",
150
+ "<placeholder_tok_146>",
151
+ "<placeholder_tok_147>",
152
+ "<placeholder_tok_148>",
153
+ "<placeholder_tok_149>",
154
+ "<placeholder_tok_150>",
155
+ "<placeholder_tok_151>",
156
+ "<placeholder_tok_152>",
157
+ "<placeholder_tok_153>",
158
+ "<placeholder_tok_154>",
159
+ "<placeholder_tok_155>",
160
+ "<placeholder_tok_156>",
161
+ "<placeholder_tok_157>",
162
+ "<placeholder_tok_158>",
163
+ "<placeholder_tok_159>",
164
+ "<placeholder_tok_160>",
165
+ "<placeholder_tok_161>",
166
+ "<placeholder_tok_162>",
167
+ "<placeholder_tok_163>",
168
+ "<placeholder_tok_164>",
169
+ "<placeholder_tok_165>",
170
+ "<placeholder_tok_166>",
171
+ "<placeholder_tok_167>",
172
+ "<placeholder_tok_168>",
173
+ "<placeholder_tok_169>",
174
+ "<placeholder_tok_170>",
175
+ "<placeholder_tok_171>",
176
+ "<placeholder_tok_172>",
177
+ "<placeholder_tok_173>",
178
+ "<placeholder_tok_174>",
179
+ "<placeholder_tok_175>",
180
+ "<placeholder_tok_176>",
181
+ "<placeholder_tok_177>",
182
+ "<placeholder_tok_178>",
183
+ "<placeholder_tok_179>",
184
+ "<placeholder_tok_180>",
185
+ "<placeholder_tok_181>",
186
+ "<placeholder_tok_182>",
187
+ "<placeholder_tok_183>",
188
+ "<placeholder_tok_184>",
189
+ "<placeholder_tok_185>",
190
+ "<placeholder_tok_186>",
191
+ "<placeholder_tok_187>",
192
+ "<placeholder_tok_188>",
193
+ "<placeholder_tok_189>",
194
+ "<placeholder_tok_190>",
195
+ "<placeholder_tok_191>",
196
+ "<placeholder_tok_192>",
197
+ "<placeholder_tok_193>",
198
+ "<placeholder_tok_194>",
199
+ "<placeholder_tok_195>",
200
+ "<placeholder_tok_196>",
201
+ "<placeholder_tok_197>",
202
+ "<placeholder_tok_198>",
203
+ "<placeholder_tok_199>",
204
+ "<placeholder_tok_200>",
205
+ "<placeholder_tok_201>",
206
+ "<placeholder_tok_202>",
207
+ "<placeholder_tok_203>",
208
+ "<placeholder_tok_204>",
209
+ "<placeholder_tok_205>",
210
+ "<placeholder_tok_206>",
211
+ "<placeholder_tok_207>",
212
+ "<placeholder_tok_208>",
213
+ "<placeholder_tok_209>",
214
+ "<placeholder_tok_210>",
215
+ "<placeholder_tok_211>",
216
+ "<placeholder_tok_212>",
217
+ "<placeholder_tok_213>",
218
+ "<placeholder_tok_214>",
219
+ "<placeholder_tok_215>",
220
+ "<placeholder_tok_216>",
221
+ "<placeholder_tok_217>",
222
+ "<placeholder_tok_218>",
223
+ "<placeholder_tok_219>",
224
+ "<placeholder_tok_220>",
225
+ "<placeholder_tok_221>",
226
+ "<placeholder_tok_222>",
227
+ "<placeholder_tok_223>",
228
+ "<placeholder_tok_224>",
229
+ "<placeholder_tok_225>",
230
+ "<placeholder_tok_226>",
231
+ "<placeholder_tok_227>",
232
+ "<placeholder_tok_228>",
233
+ "<placeholder_tok_229>",
234
+ "<placeholder_tok_230>",
235
+ "<placeholder_tok_231>",
236
+ "<placeholder_tok_232>",
237
+ "<placeholder_tok_233>",
238
+ "<placeholder_tok_234>",
239
+ "<placeholder_tok_235>",
240
+ "<placeholder_tok_236>",
241
+ "<placeholder_tok_237>",
242
+ "<placeholder_tok_238>",
243
+ "<placeholder_tok_239>",
244
+ "<placeholder_tok_240>",
245
+ "<placeholder_tok_241>",
246
+ "<placeholder_tok_242>",
247
+ "<placeholder_tok_243>",
248
+ "<placeholder_tok_244>",
249
+ "<placeholder_tok_245>",
250
+ "<placeholder_tok_246>",
251
+ "<placeholder_tok_247>",
252
+ "<placeholder_tok_248>",
253
+ "<placeholder_tok_249>",
254
+ "<placeholder_tok_250>",
255
+ "<placeholder_tok_251>",
256
+ "<placeholder_tok_252>",
257
+ "<placeholder_tok_253>",
258
+ "<placeholder_tok_254>",
259
+ "<placeholder_tok_255>"
260
+ ],
261
+ "bos_token": "<s>",
262
+ "eos_token": "<eod>",
263
+ "pad_token": "<pad>"
264
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08d0c8316539a853f2fe6e14f51f0df583011dfb078fa08c8b6dc5c15a19a7e6
3
+ size 4719922
tokenizer_config.json ADDED
@@ -0,0 +1,292 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "num_threads": 224,
3
+ "split_by_whitespace": true,
4
+ "model_type": "unigram",
5
+ "vocab_size": 250680,
6
+ "character_coverage": 0.9999,
7
+ "byte_fallback": true,
8
+ "split_by_number": true,
9
+ "split_digits": true,
10
+ "normalization_rule_name": "nfkc",
11
+ "max_sentence_length": 4096,
12
+ "shuffle_input_sentence": true,
13
+ "input_sentence_size": 0,
14
+ "train_extremely_large_corpus": true,
15
+ "allow_whitespace_only_pieces": true,
16
+ "required_chars": "",
17
+ "remove_extra_whitespaces": false,
18
+ "user_defined_symbols": [
19
+ "<s>",
20
+ "</s>",
21
+ "<pad>",
22
+ "<eod>",
23
+ "<placeholder_tok_0>",
24
+ "<placeholder_tok_1>",
25
+ "<placeholder_tok_2>",
26
+ "<placeholder_tok_3>",
27
+ "<placeholder_tok_4>",
28
+ "<placeholder_tok_5>",
29
+ "<placeholder_tok_6>",
30
+ "<placeholder_tok_7>",
31
+ "<placeholder_tok_8>",
32
+ "<placeholder_tok_9>",
33
+ "<placeholder_tok_10>",
34
+ "<placeholder_tok_11>",
35
+ "<placeholder_tok_12>",
36
+ "<placeholder_tok_13>",
37
+ "<placeholder_tok_14>",
38
+ "<placeholder_tok_15>",
39
+ "<placeholder_tok_16>",
40
+ "<placeholder_tok_17>",
41
+ "<placeholder_tok_18>",
42
+ "<placeholder_tok_19>",
43
+ "<placeholder_tok_20>",
44
+ "<placeholder_tok_21>",
45
+ "<placeholder_tok_22>",
46
+ "<placeholder_tok_23>",
47
+ "<placeholder_tok_24>",
48
+ "<placeholder_tok_25>",
49
+ "<placeholder_tok_26>",
50
+ "<placeholder_tok_27>",
51
+ "<placeholder_tok_28>",
52
+ "<placeholder_tok_29>",
53
+ "<placeholder_tok_30>",
54
+ "<placeholder_tok_31>",
55
+ "<placeholder_tok_32>",
56
+ "<placeholder_tok_33>",
57
+ "<placeholder_tok_34>",
58
+ "<placeholder_tok_35>",
59
+ "<placeholder_tok_36>",
60
+ "<placeholder_tok_37>",
61
+ "<placeholder_tok_38>",
62
+ "<placeholder_tok_39>",
63
+ "<placeholder_tok_40>",
64
+ "<placeholder_tok_41>",
65
+ "<placeholder_tok_42>",
66
+ "<placeholder_tok_43>",
67
+ "<placeholder_tok_44>",
68
+ "<placeholder_tok_45>",
69
+ "<placeholder_tok_46>",
70
+ "<placeholder_tok_47>",
71
+ "<placeholder_tok_48>",
72
+ "<placeholder_tok_49>",
73
+ "<placeholder_tok_50>",
74
+ "<placeholder_tok_51>",
75
+ "<placeholder_tok_52>",
76
+ "<placeholder_tok_53>",
77
+ "<placeholder_tok_54>",
78
+ "<placeholder_tok_55>",
79
+ "<placeholder_tok_56>",
80
+ "<placeholder_tok_57>",
81
+ "<placeholder_tok_58>",
82
+ "<placeholder_tok_59>",
83
+ "<placeholder_tok_60>",
84
+ "<placeholder_tok_61>",
85
+ "<placeholder_tok_62>",
86
+ "<placeholder_tok_63>",
87
+ "<placeholder_tok_64>",
88
+ "<placeholder_tok_65>",
89
+ "<placeholder_tok_66>",
90
+ "<placeholder_tok_67>",
91
+ "<placeholder_tok_68>",
92
+ "<placeholder_tok_69>",
93
+ "<placeholder_tok_70>",
94
+ "<placeholder_tok_71>",
95
+ "<placeholder_tok_72>",
96
+ "<placeholder_tok_73>",
97
+ "<placeholder_tok_74>",
98
+ "<placeholder_tok_75>",
99
+ "<placeholder_tok_76>",
100
+ "<placeholder_tok_77>",
101
+ "<placeholder_tok_78>",
102
+ "<placeholder_tok_79>",
103
+ "<placeholder_tok_80>",
104
+ "<placeholder_tok_81>",
105
+ "<placeholder_tok_82>",
106
+ "<placeholder_tok_83>",
107
+ "<placeholder_tok_84>",
108
+ "<placeholder_tok_85>",
109
+ "<placeholder_tok_86>",
110
+ "<placeholder_tok_87>",
111
+ "<placeholder_tok_88>",
112
+ "<placeholder_tok_89>",
113
+ "<placeholder_tok_90>",
114
+ "<placeholder_tok_91>",
115
+ "<placeholder_tok_92>",
116
+ "<placeholder_tok_93>",
117
+ "<placeholder_tok_94>",
118
+ "<placeholder_tok_95>",
119
+ "<placeholder_tok_96>",
120
+ "<placeholder_tok_97>",
121
+ "<placeholder_tok_98>",
122
+ "<placeholder_tok_99>",
123
+ "<placeholder_tok_100>",
124
+ "<placeholder_tok_101>",
125
+ "<placeholder_tok_102>",
126
+ "<placeholder_tok_103>",
127
+ "<placeholder_tok_104>",
128
+ "<placeholder_tok_105>",
129
+ "<placeholder_tok_106>",
130
+ "<placeholder_tok_107>",
131
+ "<placeholder_tok_108>",
132
+ "<placeholder_tok_109>",
133
+ "<placeholder_tok_110>",
134
+ "<placeholder_tok_111>",
135
+ "<placeholder_tok_112>",
136
+ "<placeholder_tok_113>",
137
+ "<placeholder_tok_114>",
138
+ "<placeholder_tok_115>",
139
+ "<placeholder_tok_116>",
140
+ "<placeholder_tok_117>",
141
+ "<placeholder_tok_118>",
142
+ "<placeholder_tok_119>",
143
+ "<placeholder_tok_120>",
144
+ "<placeholder_tok_121>",
145
+ "<placeholder_tok_122>",
146
+ "<placeholder_tok_123>",
147
+ "<placeholder_tok_124>",
148
+ "<placeholder_tok_125>",
149
+ "<placeholder_tok_126>",
150
+ "<placeholder_tok_127>",
151
+ "<placeholder_tok_128>",
152
+ "<placeholder_tok_129>",
153
+ "<placeholder_tok_130>",
154
+ "<placeholder_tok_131>",
155
+ "<placeholder_tok_132>",
156
+ "<placeholder_tok_133>",
157
+ "<placeholder_tok_134>",
158
+ "<placeholder_tok_135>",
159
+ "<placeholder_tok_136>",
160
+ "<placeholder_tok_137>",
161
+ "<placeholder_tok_138>",
162
+ "<placeholder_tok_139>",
163
+ "<placeholder_tok_140>",
164
+ "<placeholder_tok_141>",
165
+ "<placeholder_tok_142>",
166
+ "<placeholder_tok_143>",
167
+ "<placeholder_tok_144>",
168
+ "<placeholder_tok_145>",
169
+ "<placeholder_tok_146>",
170
+ "<placeholder_tok_147>",
171
+ "<placeholder_tok_148>",
172
+ "<placeholder_tok_149>",
173
+ "<placeholder_tok_150>",
174
+ "<placeholder_tok_151>",
175
+ "<placeholder_tok_152>",
176
+ "<placeholder_tok_153>",
177
+ "<placeholder_tok_154>",
178
+ "<placeholder_tok_155>",
179
+ "<placeholder_tok_156>",
180
+ "<placeholder_tok_157>",
181
+ "<placeholder_tok_158>",
182
+ "<placeholder_tok_159>",
183
+ "<placeholder_tok_160>",
184
+ "<placeholder_tok_161>",
185
+ "<placeholder_tok_162>",
186
+ "<placeholder_tok_163>",
187
+ "<placeholder_tok_164>",
188
+ "<placeholder_tok_165>",
189
+ "<placeholder_tok_166>",
190
+ "<placeholder_tok_167>",
191
+ "<placeholder_tok_168>",
192
+ "<placeholder_tok_169>",
193
+ "<placeholder_tok_170>",
194
+ "<placeholder_tok_171>",
195
+ "<placeholder_tok_172>",
196
+ "<placeholder_tok_173>",
197
+ "<placeholder_tok_174>",
198
+ "<placeholder_tok_175>",
199
+ "<placeholder_tok_176>",
200
+ "<placeholder_tok_177>",
201
+ "<placeholder_tok_178>",
202
+ "<placeholder_tok_179>",
203
+ "<placeholder_tok_180>",
204
+ "<placeholder_tok_181>",
205
+ "<placeholder_tok_182>",
206
+ "<placeholder_tok_183>",
207
+ "<placeholder_tok_184>",
208
+ "<placeholder_tok_185>",
209
+ "<placeholder_tok_186>",
210
+ "<placeholder_tok_187>",
211
+ "<placeholder_tok_188>",
212
+ "<placeholder_tok_189>",
213
+ "<placeholder_tok_190>",
214
+ "<placeholder_tok_191>",
215
+ "<placeholder_tok_192>",
216
+ "<placeholder_tok_193>",
217
+ "<placeholder_tok_194>",
218
+ "<placeholder_tok_195>",
219
+ "<placeholder_tok_196>",
220
+ "<placeholder_tok_197>",
221
+ "<placeholder_tok_198>",
222
+ "<placeholder_tok_199>",
223
+ "<placeholder_tok_200>",
224
+ "<placeholder_tok_201>",
225
+ "<placeholder_tok_202>",
226
+ "<placeholder_tok_203>",
227
+ "<placeholder_tok_204>",
228
+ "<placeholder_tok_205>",
229
+ "<placeholder_tok_206>",
230
+ "<placeholder_tok_207>",
231
+ "<placeholder_tok_208>",
232
+ "<placeholder_tok_209>",
233
+ "<placeholder_tok_210>",
234
+ "<placeholder_tok_211>",
235
+ "<placeholder_tok_212>",
236
+ "<placeholder_tok_213>",
237
+ "<placeholder_tok_214>",
238
+ "<placeholder_tok_215>",
239
+ "<placeholder_tok_216>",
240
+ "<placeholder_tok_217>",
241
+ "<placeholder_tok_218>",
242
+ "<placeholder_tok_219>",
243
+ "<placeholder_tok_220>",
244
+ "<placeholder_tok_221>",
245
+ "<placeholder_tok_222>",
246
+ "<placeholder_tok_223>",
247
+ "<placeholder_tok_224>",
248
+ "<placeholder_tok_225>",
249
+ "<placeholder_tok_226>",
250
+ "<placeholder_tok_227>",
251
+ "<placeholder_tok_228>",
252
+ "<placeholder_tok_229>",
253
+ "<placeholder_tok_230>",
254
+ "<placeholder_tok_231>",
255
+ "<placeholder_tok_232>",
256
+ "<placeholder_tok_233>",
257
+ "<placeholder_tok_234>",
258
+ "<placeholder_tok_235>",
259
+ "<placeholder_tok_236>",
260
+ "<placeholder_tok_237>",
261
+ "<placeholder_tok_238>",
262
+ "<placeholder_tok_239>",
263
+ "<placeholder_tok_240>",
264
+ "<placeholder_tok_241>",
265
+ "<placeholder_tok_242>",
266
+ "<placeholder_tok_243>",
267
+ "<placeholder_tok_244>",
268
+ "<placeholder_tok_245>",
269
+ "<placeholder_tok_246>",
270
+ "<placeholder_tok_247>",
271
+ "<placeholder_tok_248>",
272
+ "<placeholder_tok_249>",
273
+ "<placeholder_tok_250>",
274
+ "<placeholder_tok_251>",
275
+ "<placeholder_tok_252>",
276
+ "<placeholder_tok_253>",
277
+ "<placeholder_tok_254>",
278
+ "<placeholder_tok_255>"
279
+ ],
280
+ "datasets_dir": "/home/fhgiais/gptx_ablations/bias_analysis/data/tokenizer/temp/",
281
+ "save_dir": "/home/fhgiais/gptx_ablations/bias_analysis/tokenizer/24",
282
+ "text_key": "text",
283
+ "cache_dir": "/home/fhgiais/gptx_ablations/bias_analysis/tokenizer/24/cache",
284
+ "library": "sentencepiece",
285
+ "auto_map": {
286
+ "AutoTokenizer": [
287
+ "gptx_tokenizer.SPTokenizer",
288
+ null
289
+ ]
290
+ },
291
+ "tokenizer_class": "SPTokenizer"
292
+ }