0zz commited on
Commit
3547274
·
verified ·
1 Parent(s): 1d711ef

Upload tokenizer

Browse files
Files changed (2) hide show
  1. tokenizer.json +325 -0
  2. tokenizer_config.json +12 -0
tokenizer.json ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 256,
8
+ "content": "<|endoftext|>",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ }
15
+ ],
16
+ "normalizer": null,
17
+ "pre_tokenizer": {
18
+ "type": "ByteLevel",
19
+ "add_prefix_space": false,
20
+ "trim_offsets": true,
21
+ "use_regex": true
22
+ },
23
+ "post_processor": {
24
+ "type": "TemplateProcessing",
25
+ "single": [
26
+ {
27
+ "Sequence": {
28
+ "id": "A",
29
+ "type_id": 0
30
+ }
31
+ }
32
+ ],
33
+ "pair": [
34
+ {
35
+ "Sequence": {
36
+ "id": "A",
37
+ "type_id": 0
38
+ }
39
+ },
40
+ {
41
+ "Sequence": {
42
+ "id": "B",
43
+ "type_id": 1
44
+ }
45
+ }
46
+ ],
47
+ "special_tokens": {}
48
+ },
49
+ "decoder": {
50
+ "type": "ByteLevel",
51
+ "add_prefix_space": true,
52
+ "trim_offsets": true,
53
+ "use_regex": true
54
+ },
55
+ "model": {
56
+ "type": "BPE",
57
+ "dropout": null,
58
+ "unk_token": null,
59
+ "continuing_subword_prefix": "",
60
+ "end_of_word_suffix": "",
61
+ "fuse_unk": false,
62
+ "byte_fallback": false,
63
+ "ignore_merges": false,
64
+ "vocab": {
65
+ "\u0000": 0,
66
+ "\u0001": 1,
67
+ "\u0002": 2,
68
+ "\u0003": 3,
69
+ "\u0004": 4,
70
+ "\u0005": 5,
71
+ "\u0006": 6,
72
+ "\u0007": 7,
73
+ "\b": 8,
74
+ "\t": 9,
75
+ "\n": 10,
76
+ "\u000b": 11,
77
+ "\f": 12,
78
+ "\r": 13,
79
+ "\u000e": 14,
80
+ "\u000f": 15,
81
+ "\u0010": 16,
82
+ "\u0011": 17,
83
+ "\u0012": 18,
84
+ "\u0013": 19,
85
+ "\u0014": 20,
86
+ "\u0015": 21,
87
+ "\u0016": 22,
88
+ "\u0017": 23,
89
+ "\u0018": 24,
90
+ "\u0019": 25,
91
+ "\u001a": 26,
92
+ "\u001b": 27,
93
+ "\u001c": 28,
94
+ "\u001d": 29,
95
+ "\u001e": 30,
96
+ "\u001f": 31,
97
+ " ": 32,
98
+ "!": 33,
99
+ "\"": 34,
100
+ "#": 35,
101
+ "$": 36,
102
+ "%": 37,
103
+ "&": 38,
104
+ "'": 39,
105
+ "(": 40,
106
+ ")": 41,
107
+ "*": 42,
108
+ "+": 43,
109
+ ",": 44,
110
+ "-": 45,
111
+ ".": 46,
112
+ "/": 47,
113
+ "0": 48,
114
+ "1": 49,
115
+ "2": 50,
116
+ "3": 51,
117
+ "4": 52,
118
+ "5": 53,
119
+ "6": 54,
120
+ "7": 55,
121
+ "8": 56,
122
+ "9": 57,
123
+ ":": 58,
124
+ ";": 59,
125
+ "<": 60,
126
+ "=": 61,
127
+ ">": 62,
128
+ "?": 63,
129
+ "@": 64,
130
+ "A": 65,
131
+ "B": 66,
132
+ "C": 67,
133
+ "D": 68,
134
+ "E": 69,
135
+ "F": 70,
136
+ "G": 71,
137
+ "H": 72,
138
+ "I": 73,
139
+ "J": 74,
140
+ "K": 75,
141
+ "L": 76,
142
+ "M": 77,
143
+ "N": 78,
144
+ "O": 79,
145
+ "P": 80,
146
+ "Q": 81,
147
+ "R": 82,
148
+ "S": 83,
149
+ "T": 84,
150
+ "U": 85,
151
+ "V": 86,
152
+ "W": 87,
153
+ "X": 88,
154
+ "Y": 89,
155
+ "Z": 90,
156
+ "[": 91,
157
+ "\\": 92,
158
+ "]": 93,
159
+ "^": 94,
160
+ "_": 95,
161
+ "`": 96,
162
+ "a": 97,
163
+ "b": 98,
164
+ "c": 99,
165
+ "d": 100,
166
+ "e": 101,
167
+ "f": 102,
168
+ "g": 103,
169
+ "h": 104,
170
+ "i": 105,
171
+ "j": 106,
172
+ "k": 107,
173
+ "l": 108,
174
+ "m": 109,
175
+ "n": 110,
176
+ "o": 111,
177
+ "p": 112,
178
+ "q": 113,
179
+ "r": 114,
180
+ "s": 115,
181
+ "t": 116,
182
+ "u": 117,
183
+ "v": 118,
184
+ "w": 119,
185
+ "x": 120,
186
+ "y": 121,
187
+ "z": 122,
188
+ "{": 123,
189
+ "|": 124,
190
+ "}": 125,
191
+ "~": 126,
192
+ "": 127,
193
+ "€": 128,
194
+ "": 129,
195
+ "‚": 130,
196
+ "ƒ": 131,
197
+ "„": 132,
198
+ "…": 133,
199
+ "†": 134,
200
+ "‡": 135,
201
+ "ˆ": 136,
202
+ "‰": 137,
203
+ "Š": 138,
204
+ "‹": 139,
205
+ "Œ": 140,
206
+ "": 141,
207
+ "Ž": 142,
208
+ "": 143,
209
+ "": 144,
210
+ "‘": 145,
211
+ "’": 146,
212
+ "“": 147,
213
+ "”": 148,
214
+ "•": 149,
215
+ "–": 150,
216
+ "—": 151,
217
+ "˜": 152,
218
+ "™": 153,
219
+ "š": 154,
220
+ "›": 155,
221
+ "œ": 156,
222
+ "": 157,
223
+ "ž": 158,
224
+ "Ÿ": 159,
225
+ " ": 160,
226
+ "¡": 161,
227
+ "¢": 162,
228
+ "£": 163,
229
+ "¤": 164,
230
+ "¥": 165,
231
+ "¦": 166,
232
+ "§": 167,
233
+ "¨": 168,
234
+ "©": 169,
235
+ "ª": 170,
236
+ "«": 171,
237
+ "¬": 172,
238
+ "­": 173,
239
+ "®": 174,
240
+ "¯": 175,
241
+ "°": 176,
242
+ "±": 177,
243
+ "²": 178,
244
+ "³": 179,
245
+ "´": 180,
246
+ "µ": 181,
247
+ "¶": 182,
248
+ "·": 183,
249
+ "¸": 184,
250
+ "¹": 185,
251
+ "º": 186,
252
+ "»": 187,
253
+ "¼": 188,
254
+ "½": 189,
255
+ "¾": 190,
256
+ "¿": 191,
257
+ "À": 192,
258
+ "Á": 193,
259
+ "Â": 194,
260
+ "Ã": 195,
261
+ "Ä": 196,
262
+ "Å": 197,
263
+ "Æ": 198,
264
+ "Ç": 199,
265
+ "È": 200,
266
+ "É": 201,
267
+ "Ê": 202,
268
+ "Ë": 203,
269
+ "Ì": 204,
270
+ "Í": 205,
271
+ "Î": 206,
272
+ "Ï": 207,
273
+ "Ð": 208,
274
+ "Ñ": 209,
275
+ "Ò": 210,
276
+ "Ó": 211,
277
+ "Ô": 212,
278
+ "Õ": 213,
279
+ "Ö": 214,
280
+ "×": 215,
281
+ "Ø": 216,
282
+ "Ù": 217,
283
+ "Ú": 218,
284
+ "Û": 219,
285
+ "Ü": 220,
286
+ "Ý": 221,
287
+ "Þ": 222,
288
+ "ß": 223,
289
+ "à": 224,
290
+ "á": 225,
291
+ "â": 226,
292
+ "ã": 227,
293
+ "ä": 228,
294
+ "å": 229,
295
+ "æ": 230,
296
+ "ç": 231,
297
+ "è": 232,
298
+ "é": 233,
299
+ "ê": 234,
300
+ "ë": 235,
301
+ "ì": 236,
302
+ "í": 237,
303
+ "î": 238,
304
+ "ï": 239,
305
+ "ð": 240,
306
+ "ñ": 241,
307
+ "ò": 242,
308
+ "ó": 243,
309
+ "ô": 244,
310
+ "õ": 245,
311
+ "ö": 246,
312
+ "÷": 247,
313
+ "ø": 248,
314
+ "ù": 249,
315
+ "ú": 250,
316
+ "û": 251,
317
+ "ü": 252,
318
+ "ý": 253,
319
+ "þ": 254,
320
+ "ÿ": 255,
321
+ "<|endoftext|>": 256
322
+ },
323
+ "merges": []
324
+ }
325
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "backend": "tokenizers",
4
+ "bos_token": "<|endoftext|>",
5
+ "eos_token": "<|endoftext|>",
6
+ "errors": "replace",
7
+ "is_local": true,
8
+ "model_max_length": 64,
9
+ "pad_token": null,
10
+ "tokenizer_class": "GPT2Tokenizer",
11
+ "unk_token": "<|endoftext|>"
12
+ }