| | from tokenizers.decoders import WordPiece as WordPieceDecoder |
| | from tokenizers.pre_tokenizers import BertPreTokenizer |
| | from tokenizers.normalizers import BertNormalizer |
| | from tokenizers.trainers import WordPieceTrainer |
| | from tokenizers.models import WordPiece as WordPieceModel |
| | from tokenizers import Tokenizer |
| | import itertools |
| |
|
| | from datasets import load_dataset |
| | from datasets.utils.logging import set_verbosity_error |
| | set_verbosity_error() |
| |
|
| | from utils import SampleBatch |
| |
|
| | def unpack_samples( |
| | batch: SampleBatch |
| | ): |
| | iterator = ( |
| | sample.values() |
| | for sample in batch['translation'] |
| | ) |
| |
|
| | return list( |
| | itertools.chain |
| | .from_iterable(iterator) |
| | ) |
| |
|
| |
|
| | def build_tokenizer( |
| | clean_text: bool = True, |
| | strip_accents: bool = True, |
| | lowercase: bool = True |
| | ) -> Tokenizer: |
| | tokenizer = Tokenizer( |
| | model=WordPieceModel( |
| | unk_token='<UNK>' |
| | ) |
| | ) |
| | tokenizer.normalizer = BertNormalizer( |
| | clean_text=clean_text, |
| | handle_chinese_chars=True, |
| | strip_accents=strip_accents, |
| | lowercase=lowercase |
| | ) |
| | tokenizer.pre_tokenizer = BertPreTokenizer() |
| | tokenizer.decoder = WordPieceDecoder() |
| |
|
| | return tokenizer |
| |
|
| |
|
| | train_dset = load_dataset( |
| | path='nordmann2023', |
| | name='balanced', |
| | split='train' |
| | ) |
| |
|
| | tokenizer = build_tokenizer( |
| | clean_text=True, |
| | strip_accents=False, |
| | lowercase=False |
| | ) |
| | tokenizer.train_from_iterator( |
| | iterator=( |
| | unpack_samples(batch) |
| | for batch in train_dset.iter( |
| | batch_size=10000 |
| | ) |
| | ), |
| | trainer=WordPieceTrainer( |
| | vocab_size=40000, |
| | special_tokens=[ |
| | '<UNK>', '<CLS>', '<SEP>', '<PAD>', '<MASK>' |
| | ] |
| | ), |
| | length=train_dset.num_rows * 2 |
| | ) |
| | tokenizer.save( |
| | path='tokenizer.json' |
| | ) |
| |
|