LisaV3 / tokenizer.json
Qybera's picture
LisaV3 update
a0bd5e7 verified
{
"version": "1.0",
"truncation": null,
"padding": null,
"added_tokens": [
{
"id": 0,
"content": "<PAD>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": true,
"special": true
},
{
"id": 1,
"content": "<UNK>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": true,
"special": true
},
{
"id": 2,
"content": "<SOS>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": true,
"special": true
},
{
"id": 3,
"content": "<EOS>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": true,
"special": true
},
{
"id": 4,
"content": "<IMG>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": true,
"special": true
},
{
"id": 5,
"content": "<AUD>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": true,
"special": true
},
{
"id": 6,
"content": "<VID>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": true,
"special": true
}
],
"normalizer": {
"type": "NFD",
"strip_accents": false
},
"pre_tokenizer": {
"type": "Whitespace"
},
"post_processor": {
"type": "TemplateProcessing",
"single": [
"<SOS>",
"$A",
"<EOS>"
],
"pair": [
"<SOS>",
"$A",
"<EOS>",
"$B:1",
"<EOS>:1"
],
"special_tokens": {
"<SOS>": {
"id": 2,
"type_id": 0
},
"<EOS>": {
"id": 3,
"type_id": 0
}
}
},
"decoder": {
"type": "WordPiece",
"prefix": "##",
"cleanup": true
},
"model": {
"type": "WordPiece",
"unk_token": "<UNK>",
"continuing_subword_prefix": "##",
"max_input_chars_per_word": 100
}
}