File size: 598 Bytes
2155c30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
from datasets import load_dataset
from tokenizers import ByteLevelBPETokenizer

dataset = load_dataset("HuggingFaceFW/fineweb-edu", "sample-10BT", split="train", streaming=True)
def get_training_corpus():
    dataset_iter = iter(dataset)
    for _ in range(50000):
        yield next(dataset_iter)["text"]

tokenizer = ByteLevelBPETokenizer()

tokenizer.train_from_iterator(
    get_training_corpus(),
    vocab_size=500,
    min_frequency=2,
    special_tokens=["<s>", "<pad>", "</s>", "<unk>", "<mask>"]
)

tokenizer.save_model(".", "custom_llama_tokenizer")
print("Tokenizer training complete!")