| from datasets import load_dataset, load_from_disk |
| from tokenizers import trainers, Tokenizer, normalizers, ByteLevelBPETokenizer |
| from transformers import AutoConfig, AutoTokenizer |
|
|
|
|
| model_dir = "./" |
|
|
| |
| config = AutoConfig.from_pretrained("roberta-large") |
| config.save_pretrained(model_dir) |
|
|
| |
| dataset = load_from_disk("/researchdisk1/data/training_data_full") |
| dataset = dataset["train"] |
|
|
| |
| tokenizer = ByteLevelBPETokenizer() |
| def batch_iterator(batch_size=1000): |
| for i in range(0, len(dataset), batch_size): |
| yield dataset[i: i + batch_size]["text"] |
|
|
| |
| tokenizer.train_from_iterator(batch_iterator(), vocab_size=config.vocab_size, min_frequency=2, special_tokens=[ |
| "<s>", |
| "<pad>", |
| "</s>", |
| "<unk>", |
| "<mask>", |
| ]) |
| |
| tokenizer.save(f"{model_dir}/tokenizer.json") |
|
|
| tokenizer = AutoTokenizer.from_pretrained(model_dir) |
| tokenizer.save_pretrained(model_dir) |