| | from datasets import load_dataset, concatenate_datasets |
| | from tokenizers import ByteLevelBPETokenizer |
| | from transformers import AutoConfig |
| | from pythainlp.tokenize import word_tokenize |
| |
|
| | language = "th" |
| | model_config = "roberta-base" |
| | model_dir = model_config + f"-pretrained-{language}" |
| | config = AutoConfig.from_pretrained(model_config) |
| | config.save_pretrained(f"{model_dir}") |
| |
|
| | |
| | |
| | raw_dataset = load_dataset( |
| | "oscar", f"unshuffled_deduplicated_{language}", split="train" |
| | ) |
| |
|
| | |
| | tokenizer = ByteLevelBPETokenizer() |
| |
|
| | |
| | def th_tokenize(text): |
| | result = " ".join(word_tokenize(text, engine="newmm", keep_whitespace=False)) |
| | return result |
| |
|
| |
|
| | def batch_iterator(batch_size=10000): |
| | for i in range(0, len(raw_dataset), batch_size): |
| | yield [th_tokenize(text) for text in raw_dataset[i : i + batch_size]["text"]] |
| |
|
| |
|
| | |
| | tokenizer.train_from_iterator( |
| | batch_iterator(), |
| | vocab_size=50265, |
| | min_frequency=2, |
| | special_tokens=["<s>", "<pad>", "</s>", "<unk>", "<mask>",], |
| | ) |
| |
|
| | |
| | tokenizer.save(f"./tokenizer.json") |
| |
|