| { | |
| "tokenizer_class": "GPT2TokenizerFast", | |
| "bos_token": "<|endoftext|>", | |
| "eos_token": "<|endoftext|>", | |
| "unk_token": "<|endoftext|>", | |
| "pad_token": "<|endoftext|>", | |
| "model_max_length": 2048, | |
| "vocab_file": "vocab.json", | |
| "merges_file": "merges.txt" | |
| } | |
| { | |
| "tokenizer_class": "GPT2TokenizerFast", | |
| "bos_token": "<|endoftext|>", | |
| "eos_token": "<|endoftext|>", | |
| "unk_token": "<|endoftext|>", | |
| "pad_token": "<|endoftext|>", | |
| "model_max_length": 2048, | |
| "vocab_file": "vocab.json", | |
| "merges_file": "merges.txt" | |
| } | |