{ "added_tokens_decoder": { "259": { "content": "<|endoftext|>", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "260": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "261": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "262": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "263": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "264": { "content": "<|im_end|>", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "265": { "content": "<|im_start|>", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true } }, "additional_special_tokens": [ "<|endoftext|>", "", "", "", "", "<|im_end|>", "<|im_start|>" ], "clean_up_tokenization_spaces": true, "model_max_length": 1000000000000000019884624838656, "tokenizer_class": "PreTrainedTokenizerFast" }