| #!/usr/bin/env python3 | |
| from datasets import load_dataset | |
| from tokenizers import ByteLevelBPETokenizer | |
| # load dataset | |
| # Size of downloaded dataset files: 26637.62 MB | |
| # Size of the generated dataset: 70661.48 MB | |
| # Total amount of disk used: 97299.10 MB | |
| dataset = load_dataset("oscar", "unshuffled_deduplicated_it", split="train") | |
| # Instantiate tokenizer | |
| tokenizer = ByteLevelBPETokenizer() | |
| def batch_iterator(batch_size=1000): | |
| for i in range(0, len(dataset), batch_size): | |
| yield dataset[i: i + batch_size]["text"] | |
| # Customized training | |
| tokenizer.train_from_iterator(batch_iterator(), vocab_size=50265, min_frequency=2, special_tokens=[ | |
| "<s>", | |
| "<pad>", | |
| "</s>", | |
| "<unk>", | |
| "<mask>", | |
| ]) | |
| # Save files to disk | |
| tokenizer.save("./tokenizer.json") |