Chiquitin
upload tokenizer script and configs
41fb647
# - x - x - x - x - x - x - x - x - x - x - x - x - x - x - #
# #
# This file was created by: Alberto Palomo Alonso #
# Universidad de Alcalá - Escuela Politécnica Superior #
# #
# - x - x - x - x - x - x - x - x - x - x - x - x - x - x - #
# Import statements:
from dataclasses import dataclass
@dataclass
class TokenizerConfig:
vocab_size: int = 32_768
min_frequency: int = 2
max_length: int = 1024 ** 6
dataset_portion: float = 1.0
max_sentences: int = 384
train_batch_size: int = 1024
# - x - x - x - x - x - x - x - x - x - x - x - x - x - x - #
# END OF FILE #
# - x - x - x - x - x - x - x - x - x - x - x - x - x - x - #