ProtGPT3-1.3B / tokenizer.json
michele1993's picture
Upload tokenizer
d719c04 verified
{
"version": "1.0",
"truncation": null,
"padding": null,
"added_tokens": [
{
"id": 0,
"content": "<|pad|>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
},
{
"id": 1,
"content": "<|bos|>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
},
{
"id": 2,
"content": "<|eos|>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
},
{
"id": 3,
"content": "[UNK]",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
},
{
"id": 31,
"content": "<s>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
},
{
"id": 32,
"content": "</s>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
},
{
"id": 33,
"content": "<unk>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
}
],
"normalizer": null,
"pre_tokenizer": {
"type": "Split",
"pattern": {
"String": ""
},
"behavior": "Isolated",
"invert": false
},
"post_processor": {
"type": "TemplateProcessing",
"single": [
{
"Sequence": {
"id": "A",
"type_id": 0
}
}
],
"pair": [
{
"Sequence": {
"id": "A",
"type_id": 0
}
},
{
"Sequence": {
"id": "B",
"type_id": 1
}
}
],
"special_tokens": {}
},
"decoder": null,
"model": {
"type": "WordLevel",
"vocab": {
"<|pad|>": 0,
"<|bos|>": 1,
"<|eos|>": 2,
"[UNK]": 3,
"1": 4,
"2": 5,
"A": 6,
"B": 7,
"C": 8,
"D": 9,
"E": 10,
"F": 11,
"G": 12,
"H": 13,
"I": 14,
"K": 15,
"L": 16,
"M": 17,
"N": 18,
"O": 19,
"P": 20,
"Q": 21,
"R": 22,
"S": 23,
"T": 24,
"U": 25,
"V": 26,
"W": 27,
"X": 28,
"Y": 29,
"Z": 30
},
"unk_token": "[UNK]"
}
}