ProtGPT3-MSA-nano / tokenizer.json
michele1993's picture
Upload tokenizer
93979fc verified
{
"version": "1.0",
"truncation": null,
"padding": null,
"added_tokens": [
{
"id": 0,
"content": "<|pad|>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
},
{
"id": 1,
"content": "<|bos|>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
},
{
"id": 2,
"content": "<|eos|>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
},
{
"id": 3,
"content": "<unk>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
}
],
"normalizer": null,
"pre_tokenizer": {
"type": "WhitespaceSplit"
},
"post_processor": null,
"decoder": null,
"model": {
"type": "WordLevel",
"vocab": {
"<|pad|>": 0,
"<|bos|>": 1,
"<|eos|>": 2,
"<unk>": 3,
"<gap>": 4,
"<no_gap>": 5,
"<query>": 6,
"<s>": 7,
"-": 8,
"1": 9,
"2": 10,
"A": 11,
"B": 12,
"C": 13,
"D": 14,
"E": 15,
"F": 16,
"G": 17,
"H": 18,
"I": 19,
"K": 20,
"L": 21,
"M": 22,
"N": 23,
"O": 24,
"P": 25,
"Q": 26,
"R": 27,
"S": 28,
"T": 29,
"U": 30,
"V": 31,
"W": 32,
"X": 33,
"Y": 34,
"Z": 35
},
"unk_token": "<unk>"
}
}