| |
|
|
| import argparse |
| from collections.abc import Iterator |
|
|
| from datasets import load_dataset |
| from tokenizers import Tokenizer |
| from tokenizers.models import WordLevel |
| from tokenizers.normalizers import Sequence, NFC, Strip, Lowercase |
| from tokenizers.pre_tokenizers import Whitespace |
| from tokenizers.trainers import WordLevelTrainer |
| from tqdm.auto import tqdm |
|
|
|
|
| def main() -> None: |
| parser = argparse.ArgumentParser() |
| parser.add_argument('--vocabulary', type=int, default=75000, help='Vocabulary size') |
| parser.add_argument('--batch', type=int, default=1024, help='Batch size') |
| args = parser.parse_args() |
|
|
| dataset = load_dataset('wikitext', 'wikitext-103-raw-v1', split='train+validation+test') |
|
|
| tokenizer = Tokenizer(WordLevel(unk_token='<unk>')) |
| tokenizer.normalizer = Sequence([NFC(), Strip(), Lowercase()]) |
| tokenizer.pre_tokenizer = Whitespace() |
|
|
| def batches(batch_size: int) -> Iterator[str]: |
| for batch in tqdm(dataset.iter(batch_size=batch_size), desc='Tokenization'): |
| yield batch['text'] |
|
|
| trainer = WordLevelTrainer(vocab_size=args.vocabulary, |
| special_tokens=['<s>', '</s>', '<unk>']) |
|
|
| tokenizer.train_from_iterator(batches(args.batch), trainer=trainer, length=len(dataset)) |
|
|
| tokenizer.save('tokenizer.json', pretty=True) |
|
|
|
|
| if __name__ == '__main__': |
| main() |
|
|