#!/usr/bin/env python3 from datasets import load_dataset from tokenizers import trainers, Tokenizer, normalizers, ByteLevelBPETokenizer from tokenizers.pre_tokenizers import Whitespace from tokenizers import normalizers # Instantiate tokenizer tokenizer = ByteLevelBPETokenizer() dataset = load_dataset("oscar", "unshuffled_deduplicated_de", split="train") def batch_iterator(batch_size=1000): for i in range(0, len(dataset), batch_size): yield dataset[i: i + batch_size]["text"] # Customized training tokenizer.train_from_iterator(batch_iterator(), vocab_size=50265, min_frequency=2, special_tokens=[ "", "", "", "", "", ]) # Save files to disk tokenizer.save("./roberta-base-german/tokenizer.json")