| { | |
| "added_tokens_decoder": { | |
| "4": { | |
| "content": "N", | |
| "lstrip": false, | |
| "normalized": false, | |
| "rstrip": false, | |
| "single_word": false, | |
| "special": true | |
| }, | |
| "5": { | |
| "content": "-", | |
| "lstrip": false, | |
| "normalized": false, | |
| "rstrip": false, | |
| "single_word": false, | |
| "special": true | |
| } | |
| }, | |
| "auto_map": { | |
| "AutoTokenizer": [ | |
| "huggingface.Tokenizer", | |
| null | |
| ] | |
| }, | |
| "clean_up_tokenization_spaces": false, | |
| "extra_special_tokens": {}, | |
| "model_max_length": 1000000000000000019884624838656, | |
| "pad_token": "-", | |
| "special_tokens": { | |
| "pad": "-", | |
| "unk": "N" | |
| }, | |
| "split_special_tokens": true, | |
| "tokenizer_class": "Tokenizer", | |
| "unk_token": "N", | |
| "vocab": [ | |
| "A", | |
| "C", | |
| "G", | |
| "T", | |
| "N", | |
| "-" | |
| ] | |
| } | |