my-custom-tokenizer-0526 / tokenizer.json
Heimrih's picture
Upload tokenizer
b033142 verified
{
"version": "1.0",
"truncation": null,
"padding": null,
"added_tokens": [
{
"id": 0,
"content": "<pad>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
},
{
"id": 1,
"content": "<unk>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
},
{
"id": 2,
"content": "<s>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
},
{
"id": 3,
"content": "</s>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
}
],
"normalizer": null,
"pre_tokenizer": {
"type": "Whitespace"
},
"post_processor": null,
"decoder": null,
"model": {
"type": "BPE",
"dropout": null,
"unk_token": null,
"continuing_subword_prefix": null,
"end_of_word_suffix": null,
"fuse_unk": false,
"byte_fallback": false,
"ignore_merges": false,
"vocab": {
"<pad>": 0,
"<unk>": 1,
"<s>": 2,
"</s>": 3,
"'": 4,
".": 5,
"H": 6,
"T": 7,
"a": 8,
"c": 9,
"e": 10,
"f": 11,
"g": 12,
"h": 13,
"i": 14,
"l": 15,
"m": 16,
"n": 17,
"o": 18,
"p": 19,
"r": 20,
"s": 21,
"t": 22,
"en": 23,
"ce": 24,
"er": 25,
"in": 26,
"is": 27,
"sen": 28,
"ten": 29,
"senten": 30,
"sentence": 31,
"Her": 32,
"Th": 33,
"am": 34,
"an": 35,
"ain": 36,
"fo": 37,
"her": 38,
"le": 39,
"ot": 40,
"ple": 41,
"rain": 42,
"sam": 43,
"train": 44,
"ing": 45,
"Here": 46,
"This": 47,
"anot": 48,
"for": 49,
"sample": 50,
"training": 51,
"another": 52
},
"merges": [
[
"e",
"n"
],
[
"c",
"e"
],
[
"e",
"r"
],
[
"i",
"n"
],
[
"i",
"s"
],
[
"s",
"en"
],
[
"t",
"en"
],
[
"sen",
"ten"
],
[
"senten",
"ce"
],
[
"H",
"er"
],
[
"T",
"h"
],
[
"a",
"m"
],
[
"a",
"n"
],
[
"a",
"in"
],
[
"f",
"o"
],
[
"h",
"er"
],
[
"l",
"e"
],
[
"o",
"t"
],
[
"p",
"le"
],
[
"r",
"ain"
],
[
"s",
"am"
],
[
"t",
"rain"
],
[
"in",
"g"
],
[
"Her",
"e"
],
[
"Th",
"is"
],
[
"an",
"ot"
],
[
"fo",
"r"
],
[
"sam",
"ple"
],
[
"train",
"ing"
],
[
"anot",
"her"
]
]
}
}