| { | |
| "clean_up_tokenization_spaces": true, | |
| "model_max_length": 2048, | |
| "special_tokens": [ | |
| "<|pad|>", | |
| "<|endoftext|>", | |
| "<|beginoftext|>", | |
| "<|unk|>" | |
| ], | |
| "tokenizer_class": "PreTrainedTokenizerFast" | |
| } | |
| { | |
| "clean_up_tokenization_spaces": true, | |
| "model_max_length": 2048, | |
| "special_tokens": [ | |
| "<|pad|>", | |
| "<|endoftext|>", | |
| "<|beginoftext|>", | |
| "<|unk|>" | |
| ], | |
| "tokenizer_class": "PreTrainedTokenizerFast" | |
| } | |