text
stringlengths
1
1.02k
class_index
int64
0
10.8k
source
stringlengths
85
188
use of the past hidden states for their predictions. If this argument is set to a positive int, the `Trainer` will use the corresponding output (usually index 2) as the past state and feed it to the model at the next training step under the keyword argument `mems`. tpu_name (`str`, *optional*): The name of the TPU the process is running on. tpu_zone (`str`, *optional*): The zone of the TPU the process is running on. If not specified, we will attempt to automatically detect from metadata. gcp_project (`str`, *optional*): Google Cloud Project name for the Cloud TPU-enabled project. If not specified, we will attempt to automatically detect from metadata. run_name (`str`, *optional*): A descriptor for the run. Notably used for wandb, mlflow and comet logging. xla (`bool`, *optional*): Whether to activate the XLA compilation or not. """
132
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args_tf.py
framework = "tf" tpu_name: Optional[str] = field( default=None, metadata={"help": "Name of TPU"}, ) tpu_zone: Optional[str] = field( default=None, metadata={"help": "Zone of TPU"}, ) gcp_project: Optional[str] = field( default=None, metadata={"help": "Name of Cloud TPU-enabled project"}, ) poly_power: float = field( default=1.0, metadata={"help": "Power for the Polynomial decay LR scheduler."}, ) xla: bool = field(default=False, metadata={"help": "Whether to activate the XLA compilation or not"}) @cached_property def _setup_strategy(self) -> Tuple["tf.distribute.Strategy", int]: requires_backends(self, ["tf"]) logger.info("Tensorflow: setting up strategy") gpus = tf.config.list_physical_devices("GPU") # Set to float16 at first if self.fp16: keras.mixed_precision.set_global_policy("mixed_float16")
132
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args_tf.py
if self.no_cuda: strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0") else: try: if self.tpu_name: tpu = tf.distribute.cluster_resolver.TPUClusterResolver( self.tpu_name, zone=self.tpu_zone, project=self.gcp_project ) else: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: if self.tpu_name: raise RuntimeError(f"Couldn't connect to TPU {self.tpu_name}!") else: tpu = None if tpu: # Set to bfloat16 in case of TPU if self.fp16: keras.mixed_precision.set_global_policy("mixed_bfloat16") tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.TPUStrategy(tpu)
132
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args_tf.py
elif len(gpus) == 0: strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0") elif len(gpus) == 1: strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0") elif len(gpus) > 1: # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` strategy = tf.distribute.MirroredStrategy() else: raise ValueError("Cannot find the proper strategy, please check your environment properties.") return strategy @property def strategy(self) -> "tf.distribute.Strategy": """ The strategy used for distributed training. """ requires_backends(self, ["tf"]) return self._setup_strategy
132
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args_tf.py
@property def n_replicas(self) -> int: """ The number of replicas (CPUs, GPUs or TPU cores) used in this training. """ requires_backends(self, ["tf"]) return self._setup_strategy.num_replicas_in_sync @property def should_log(self): """ Whether or not the current process should produce log. """ return False # TF Logging is handled by Keras not the Trainer
132
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args_tf.py
@property def train_batch_size(self) -> int: """ The actual batch size for training (may differ from `per_gpu_train_batch_size` in distributed training). """ if self.per_gpu_train_batch_size: logger.warning( "Using deprecated `--per_gpu_train_batch_size` argument which will be removed in a future " "version. Using `--per_device_train_batch_size` is preferred." ) per_device_batch_size = self.per_gpu_train_batch_size or self.per_device_train_batch_size return per_device_batch_size * self.n_replicas
132
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args_tf.py
@property def eval_batch_size(self) -> int: """ The actual batch size for evaluation (may differ from `per_gpu_eval_batch_size` in distributed training). """ if self.per_gpu_eval_batch_size: logger.warning( "Using deprecated `--per_gpu_eval_batch_size` argument which will be removed in a future " "version. Using `--per_device_eval_batch_size` is preferred." ) per_device_batch_size = self.per_gpu_eval_batch_size or self.per_device_eval_batch_size return per_device_batch_size * self.n_replicas
132
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args_tf.py
@property def n_gpu(self) -> int: """ The number of replicas (CPUs, GPUs or TPU cores) used in this training. """ requires_backends(self, ["tf"]) warnings.warn( "The n_gpu argument is deprecated and will be removed in a future version, use n_replicas instead.", FutureWarning, ) return self._setup_strategy.num_replicas_in_sync
132
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args_tf.py
class SentencePieceExtractor: """ Extractor implementation for SentencePiece trained models. https://github.com/google/sentencepiece """ def __init__(self, model: str): requires_backends(self, "sentencepiece") from sentencepiece import SentencePieceProcessor self.sp = SentencePieceProcessor() self.sp.Load(model) def extract(self, vocab_scores=None) -> Tuple[Dict[str, int], List[Tuple]]: """ By default will return vocab and merges with respect to their order, by sending `vocab_scores` we're going to order the merges with respect to the piece scores instead. """ sp = self.sp vocab = {sp.id_to_piece(index): index for index in range(sp.GetPieceSize())} merges = generate_merges(vocab, vocab_scores) return vocab, merges
133
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class GemmaSentencePieceExtractor(SentencePieceExtractor): def extract(self, vocab_scores=None) -> Tuple[Dict[str, int], List[Tuple]]: """ By default will return vocab and merges with respect to their order, by sending `vocab_scores` we're going to order the merges with respect to the piece scores instead. """ sp = self.sp vocab = {sp.id_to_piece(index): index for index in range(sp.GetPieceSize())} # there is a missing token in the vocab. We have to do this to support merges # "<0x09>" is the bytefallback for `\t` vocab["\t"] = vocab.get("<0x09>") merges = generate_merges(vocab, vocab_scores) return vocab, merges
134
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class Converter: def __init__(self, original_tokenizer): self.original_tokenizer = original_tokenizer def converted(self) -> Tokenizer: raise NotImplementedError()
135
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class BertConverter(Converter): def converted(self) -> Tokenizer: vocab = self.original_tokenizer.vocab tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token))) tokenize_chinese_chars = False strip_accents = False do_lower_case = False if hasattr(self.original_tokenizer, "basic_tokenizer"): tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case tokenizer.normalizer = normalizers.BertNormalizer( clean_text=True, handle_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, lowercase=do_lower_case, ) tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
136
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
cls = str(self.original_tokenizer.cls_token) sep = str(self.original_tokenizer.sep_token) cls_token_id = self.original_tokenizer.cls_token_id sep_token_id = self.original_tokenizer.sep_token_id tokenizer.post_processor = processors.TemplateProcessing( single=f"{cls}:0 $A:0 {sep}:0", pair=f"{cls}:0 $A:0 {sep}:0 $B:1 {sep}:1", special_tokens=[ (cls, cls_token_id), (sep, sep_token_id), ], ) tokenizer.decoder = decoders.WordPiece(prefix="##") return tokenizer
136
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class SplinterConverter(Converter): def converted(self) -> Tokenizer: vocab = self.original_tokenizer.vocab tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token))) tokenize_chinese_chars = False strip_accents = False do_lower_case = False if hasattr(self.original_tokenizer, "basic_tokenizer"): tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case tokenizer.normalizer = normalizers.BertNormalizer( clean_text=True, handle_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, lowercase=do_lower_case, ) tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
137
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
cls = str(self.original_tokenizer.cls_token) sep = str(self.original_tokenizer.sep_token) question = str(self.original_tokenizer.question_token) dot = "." cls_token_id = self.original_tokenizer.cls_token_id sep_token_id = self.original_tokenizer.sep_token_id question_token_id = self.original_tokenizer.question_token_id dot_token_id = self.original_tokenizer.convert_tokens_to_ids(".") if self.original_tokenizer.padding_side == "right": pair = f"{cls}:0 $A:0 {question} {dot} {sep}:0 $B:1 {sep}:1" else: pair = f"{cls}:0 $A:0 {sep}:0 $B:1 {question} {dot} {sep}:1"
137
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
tokenizer.post_processor = processors.TemplateProcessing( single=f"{cls}:0 $A:0 {sep}:0", pair=pair, special_tokens=[ (cls, cls_token_id), (sep, sep_token_id), (question, question_token_id), (dot, dot_token_id), ], ) tokenizer.decoder = decoders.WordPiece(prefix="##") return tokenizer
137
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class FunnelConverter(Converter): def converted(self) -> Tokenizer: vocab = self.original_tokenizer.vocab tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token))) tokenize_chinese_chars = False strip_accents = False do_lower_case = False if hasattr(self.original_tokenizer, "basic_tokenizer"): tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case tokenizer.normalizer = normalizers.BertNormalizer( clean_text=True, handle_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, lowercase=do_lower_case, ) tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
138
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
cls = str(self.original_tokenizer.cls_token) sep = str(self.original_tokenizer.sep_token) cls_token_id = self.original_tokenizer.cls_token_id sep_token_id = self.original_tokenizer.sep_token_id tokenizer.post_processor = processors.TemplateProcessing( single=f"{cls}:2 $A:0 {sep}:0", # token_type_id is 2 for Funnel transformer pair=f"{cls}:2 $A:0 {sep}:0 $B:1 {sep}:1", special_tokens=[ (cls, cls_token_id), (sep, sep_token_id), ], ) tokenizer.decoder = decoders.WordPiece(prefix="##") return tokenizer
138
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class MPNetConverter(Converter): def converted(self) -> Tokenizer: vocab = self.original_tokenizer.vocab tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token))) tokenize_chinese_chars = False strip_accents = False do_lower_case = False if hasattr(self.original_tokenizer, "basic_tokenizer"): tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case tokenizer.normalizer = normalizers.BertNormalizer( clean_text=True, handle_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, lowercase=do_lower_case, ) tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
139
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
cls = str(self.original_tokenizer.cls_token) sep = str(self.original_tokenizer.sep_token) cls_token_id = self.original_tokenizer.cls_token_id sep_token_id = self.original_tokenizer.sep_token_id tokenizer.post_processor = processors.TemplateProcessing( single=f"{cls}:0 $A:0 {sep}:0", pair=f"{cls}:0 $A:0 {sep}:0 {sep}:0 $B:1 {sep}:1", # MPNet uses two [SEP] tokens special_tokens=[ (cls, cls_token_id), (sep, sep_token_id), ], ) tokenizer.decoder = decoders.WordPiece(prefix="##") return tokenizer
139
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class OpenAIGPTConverter(Converter): def converted(self) -> Tokenizer: vocab = self.original_tokenizer.encoder merges = list(self.original_tokenizer.bpe_ranks.keys()) unk_token = self.original_tokenizer.unk_token tokenizer = Tokenizer( BPE( vocab=vocab, merges=merges, dropout=None, unk_token=str(unk_token), end_of_word_suffix="</w>", fuse_unk=False, ) ) if tokenizer.token_to_id(str(unk_token)) is not None: tokenizer.add_special_tokens([str(unk_token)]) tokenizer.normalizer = normalizers.BertNormalizer(lowercase=True) tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() tokenizer.decoder = decoders.BPEDecoder(suffix="</w>") return tokenizer
140
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class GPT2Converter(Converter): def converted(self, vocab: Dict[str, int] = None, merges: List[Tuple[str, str]] = None) -> Tokenizer: if not vocab: vocab = self.original_tokenizer.encoder if not merges: merges = list(self.original_tokenizer.bpe_ranks) tokenizer = Tokenizer( BPE( vocab=vocab, merges=merges, dropout=None, continuing_subword_prefix="", end_of_word_suffix="", fuse_unk=False, ) )
141
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
add_prefix_space = getattr(self.original_tokenizer, "add_prefix_space", False) tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=add_prefix_space) tokenizer.decoder = decoders.ByteLevel() if getattr(self.original_tokenizer, "add_bos_token", False): bos = self.original_tokenizer.bos_token bos_token_id = self.original_tokenizer.bos_token_id tokenizer.post_processor = processors.TemplateProcessing( single=f"{bos}:0 $A:0", pair=f"{bos}:0 $A:0 $B:1", special_tokens=[ (bos, bos_token_id), ], ) else: # XXX trim_offsets=False actually means this post_processor doesn't # really do anything. tokenizer.post_processor = processors.ByteLevel(trim_offsets=False) return tokenizer
141
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class HerbertConverter(Converter): def converted(self) -> Tokenizer: tokenizer_info_str = "#version:" token_suffix = "</w>" vocab = self.original_tokenizer.encoder merges = list(self.original_tokenizer.bpe_ranks.keys()) if tokenizer_info_str in merges[0][0]: merges = merges[1:] tokenizer = Tokenizer( BPE( vocab, merges, dropout=None, unk_token=self.original_tokenizer.unk_token, end_of_word_suffix=token_suffix, ) )
142
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
tokenizer.normalizer = normalizers.BertNormalizer(lowercase=False, strip_accents=False) tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() tokenizer.decoder = decoders.BPEDecoder(suffix=token_suffix) tokenizer.post_processor = processors.BertProcessing( sep=(self.original_tokenizer.sep_token, self.original_tokenizer.sep_token_id), cls=(self.original_tokenizer.cls_token, self.original_tokenizer.cls_token_id), ) return tokenizer
142
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class Qwen2Converter(Converter): def converted(self, vocab: Dict[str, int] = None, merges: List[Tuple[str, str]] = None) -> Tokenizer: if not vocab: vocab = self.original_tokenizer.encoder if not merges: merges = list(self.original_tokenizer.bpe_ranks.keys()) tokenizer = Tokenizer( BPE( vocab=vocab, merges=merges, dropout=None, unk_token=None, continuing_subword_prefix="", end_of_word_suffix="", fuse_unk=False, byte_fallback=False, ) ) tokenizer.normalizer = normalizers.NFC()
143
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
tokenizer.pre_tokenizer = pre_tokenizers.Sequence( [ pre_tokenizers.Split( Regex( r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+""" ), behavior="isolated", invert=False, ), pre_tokenizers.ByteLevel( add_prefix_space=getattr(self.original_tokenizer, "add_prefix_space", False), use_regex=False, ), ] ) tokenizer.decoder = decoders.ByteLevel() tokenizer.post_processor = processors.ByteLevel(trim_offsets=False) return tokenizer
143
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class RobertaConverter(Converter): def converted(self) -> Tokenizer: ot = self.original_tokenizer vocab = ot.encoder merges = list(ot.bpe_ranks.keys()) tokenizer = Tokenizer( BPE( vocab=vocab, merges=merges, dropout=None, continuing_subword_prefix="", end_of_word_suffix="", fuse_unk=False, ) ) tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=ot.add_prefix_space) tokenizer.decoder = decoders.ByteLevel() tokenizer.post_processor = processors.RobertaProcessing( sep=(ot.sep_token, ot.sep_token_id), cls=(ot.cls_token, ot.cls_token_id), add_prefix_space=ot.add_prefix_space, trim_offsets=True, # True by default on Roberta (historical) ) return tokenizer
144
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class RoFormerConverter(Converter): def converted(self) -> Tokenizer: from .models.roformer.tokenization_utils import JiebaPreTokenizer vocab = self.original_tokenizer.vocab tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token))) strip_accents = False do_lower_case = False if hasattr(self.original_tokenizer, "basic_tokenizer"): strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case tokenizer.normalizer = normalizers.BertNormalizer( clean_text=True, handle_chinese_chars=False, strip_accents=strip_accents, lowercase=do_lower_case, ) tokenizer.pre_tokenizer = pre_tokenizers.PreTokenizer.custom(JiebaPreTokenizer(vocab))
145
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
cls = str(self.original_tokenizer.cls_token) sep = str(self.original_tokenizer.sep_token) cls_token_id = self.original_tokenizer.cls_token_id sep_token_id = self.original_tokenizer.sep_token_id tokenizer.post_processor = processors.TemplateProcessing( single=f"{cls}:0 $A:0 {sep}:0", pair=f"{cls}:0 $A:0 {sep}:0 $B:1 {sep}:1", special_tokens=[ (cls, cls_token_id), (sep, sep_token_id), ], ) tokenizer.decoder = decoders.WordPiece(prefix="##") return tokenizer
145
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class DebertaConverter(Converter): def converted(self) -> Tokenizer: ot = self.original_tokenizer vocab = ot.encoder merges = list(ot.bpe_ranks.keys()) tokenizer = Tokenizer( BPE( vocab=vocab, merges=merges, dropout=None, continuing_subword_prefix="", end_of_word_suffix="", fuse_unk=False, ) ) tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=ot.add_prefix_space) tokenizer.decoder = decoders.ByteLevel() tokenizer.post_processor = processors.TemplateProcessing( single="[CLS]:0 $A:0 [SEP]:0", pair="[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1", special_tokens=[ ("[CLS]", self.original_tokenizer.convert_tokens_to_ids("[CLS]")), ("[SEP]", self.original_tokenizer.convert_tokens_to_ids("[SEP]")), ], ) return tokenizer
146
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class SpmConverter(Converter): handle_byte_fallback = False SpmExtractor = SentencePieceExtractor special_tokens = {} def __init__(self, *args): requires_backends(self, "protobuf") super().__init__(*args) # from .utils import sentencepiece_model_pb2 as model_pb2 model_pb2 = import_protobuf() m = model_pb2.ModelProto() with open(self.original_tokenizer.vocab_file, "rb") as f: m.ParseFromString(f.read()) self.proto = m
147
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
if self.proto.trainer_spec.byte_fallback and not self.handle_byte_fallback: warnings.warn( "The sentencepiece tokenizer that you are converting to a fast tokenizer uses the byte fallback option" " which is not implemented in the fast tokenizers. In practice this means that the fast version of the" " tokenizer can produce unknown tokens whereas the sentencepiece version would have converted these " "unknown tokens into a sequence of byte tokens matching the original piece of text." ) def vocab(self, proto): return [(piece.piece, piece.score) for piece in proto.pieces] def unk_id(self, proto): return proto.trainer_spec.unk_id def tokenizer(self, proto): model_type = proto.trainer_spec.model_type vocab_scores = self.vocab(proto)
147
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
if model_type == 1: tokenizer = Tokenizer( Unigram( vocab_scores, unk_id=self.unk_id(proto), byte_fallback=self.handle_byte_fallback, ) ) elif model_type == 2: _, merges = self.SpmExtractor(self.original_tokenizer.vocab_file).extract(vocab_scores) bpe_vocab = {word: i for i, (word, score) in enumerate(vocab_scores)} tokenizer = Tokenizer( BPE( bpe_vocab, merges, unk_token=proto.trainer_spec.unk_piece, fuse_unk=True, byte_fallback=self.handle_byte_fallback, dropout=None, ) ) else: raise Exception( "You're trying to run a `Unigram` model but you're file was trained with a different algorithm" )
147
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
# control tokens are special # user defined symbols are not # both user and control tokens are AddedTokens # Add user defined symbols (type == 4) from sentencepiece (https://github.com/google/sentencepiece/blob/6225e08edb2577757163b3f5dbba4c0b670ef445/src/sentencepiece_model.proto#L299C29-L299C33) spm_added_tokens = [ (id, p.piece, p.type == 3 or p.piece in self.special_tokens) for id, p in enumerate(proto.pieces) if p.type in [3, 4] ] tokenizer.add_tokens( [ AddedToken(token, normalized=False, special=special) for id, token, special in sorted(spm_added_tokens, key=lambda x: x[0]) ] ) return tokenizer
147
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
def normalizer(self, proto): precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap _normalizers = [ normalizers.Strip(left=False, right=True), # stripping is important normalizers.Replace(Regex(" {2,}"), "▁"), ] if not precompiled_charsmap: return normalizers.Sequence(_normalizers) else: return normalizers.Sequence([normalizers.Precompiled(precompiled_charsmap)] + _normalizers) def pre_tokenizer(self, replacement, add_prefix_space): prepend_scheme = _get_prepend_scheme(add_prefix_space, self.original_tokenizer) return pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) def post_processor(self): return None def decoder(self, replacement, add_prefix_space): prepend_scheme = _get_prepend_scheme(add_prefix_space, self.original_tokenizer) return decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
147
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
def converted(self) -> Tokenizer: tokenizer = self.tokenizer(self.proto) # Tokenizer assemble normalizer = self.normalizer(self.proto) if normalizer is not None: tokenizer.normalizer = normalizer replacement = "▁" add_prefix_space = True if hasattr(self.original_tokenizer, "add_prefix_space"): add_prefix_space = self.original_tokenizer.add_prefix_space pre_tokenizer = self.pre_tokenizer(replacement, add_prefix_space) if pre_tokenizer is not None: tokenizer.pre_tokenizer = pre_tokenizer tokenizer.decoder = self.decoder(replacement, add_prefix_space) post_processor = self.post_processor() if post_processor: tokenizer.post_processor = post_processor return tokenizer
147
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class AlbertConverter(SpmConverter): def vocab(self, proto): return [ (piece.piece, piece.score) if check_number_comma(piece.piece) else (piece.piece, piece.score - 100) for piece in proto.pieces ] def normalizer(self, proto): list_normalizers = [ normalizers.Replace("``", '"'), normalizers.Replace("''", '"'), ] if not self.original_tokenizer.keep_accents: list_normalizers.append(normalizers.NFKD()) list_normalizers.append(normalizers.StripAccents()) if self.original_tokenizer.do_lower_case: list_normalizers.append(normalizers.Lowercase()) precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap if precompiled_charsmap: list_normalizers.append(normalizers.Precompiled(precompiled_charsmap)) list_normalizers.append(normalizers.Replace(Regex(" {2,}"), " ")) return normalizers.Sequence(list_normalizers)
148
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
def post_processor(self): return processors.TemplateProcessing( single="[CLS]:0 $A:0 [SEP]:0", pair="[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1", special_tokens=[ ("[CLS]", self.original_tokenizer.convert_tokens_to_ids("[CLS]")), ("[SEP]", self.original_tokenizer.convert_tokens_to_ids("[SEP]")), ], )
148
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class BarthezConverter(SpmConverter): def unk_id(self, proto): unk_id = 3 return unk_id def post_processor(self): return processors.TemplateProcessing( single="<s> $A </s>", pair="<s> $A </s> </s> $B </s>", special_tokens=[ ("<s>", self.original_tokenizer.convert_tokens_to_ids("<s>")), ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], )
149
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class CamembertConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>NOTUSED", 0.0), ("<pad>", 0.0), ("</s>NOTUSED", 0.0), ("<unk>", 0.0), ("<unk>NOTUSED", -100), ] # We down-grade the original SentencePiece by -100 to avoid using it and use our added token instead vocab += [(piece.piece, piece.score) for piece in proto.pieces[1:]] vocab += [("<mask>", 0.0)] return vocab def unk_id(self, proto): # See vocab unk position return 3 def post_processor(self): return processors.TemplateProcessing( single="<s> $A </s>", pair="<s> $A </s> </s> $B </s>", special_tokens=[ ("<s>", self.original_tokenizer.convert_tokens_to_ids("<s>")), ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], )
150
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class DebertaV2Converter(SpmConverter): def pre_tokenizer(self, replacement, add_prefix_space): list_pretokenizers = [] if self.original_tokenizer.split_by_punct: list_pretokenizers.append(pre_tokenizers.Punctuation(behavior="isolated")) prepend_scheme = _get_prepend_scheme(add_prefix_space, self.original_tokenizer) list_pretokenizers.append(pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)) return pre_tokenizers.Sequence(list_pretokenizers) def normalizer(self, proto): list_normalizers = [] if self.original_tokenizer.do_lower_case: list_normalizers.append(normalizers.Lowercase()) list_normalizers.append(normalizers.Strip())
151
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap if precompiled_charsmap: list_normalizers.append(normalizers.Precompiled(precompiled_charsmap)) list_normalizers.append(normalizers.Replace(Regex(" {2,}"), " ")) return normalizers.Sequence(list_normalizers) def post_processor(self): return processors.TemplateProcessing( single="[CLS]:0 $A:0 [SEP]:0", pair="[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1", special_tokens=[ ("[CLS]", self.original_tokenizer.convert_tokens_to_ids("[CLS]")), ("[SEP]", self.original_tokenizer.convert_tokens_to_ids("[SEP]")), ], )
151
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class MBartConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>", 0.0), ("<pad>", 0.0), ("</s>", 0.0), ("<unk>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] vocab += [ ("ar_AR", 0.0), ("cs_CZ", 0.0), ("de_DE", 0.0), ("en_XX", 0.0), ("es_XX", 0.0), ("et_EE", 0.0), ("fi_FI", 0.0), ("fr_XX", 0.0), ("gu_IN", 0.0), ("hi_IN", 0.0), ("it_IT", 0.0), ("ja_XX", 0.0), ("kk_KZ", 0.0), ("ko_KR", 0.0), ("lt_LT", 0.0), ("lv_LV", 0.0), ("my_MM", 0.0), ("ne_NP", 0.0), ("nl_XX", 0.0), ("ro_RO", 0.0), ("ru_RU", 0.0), ("si_LK", 0.0), ("tr_TR", 0.0), ("vi_VN", 0.0), ("zh_CN", 0.0), ]
152
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
vocab += [("<mask>", 0.0)] return vocab
152
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
def unk_id(self, proto): return 3 def post_processor(self): return processors.TemplateProcessing( single="$A </s> en_XX", pair="$A $B </s> en_XX", special_tokens=[ ("en_XX", self.original_tokenizer.convert_tokens_to_ids("en_XX")), ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], )
152
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class MBart50Converter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>", 0.0), ("<pad>", 0.0), ("</s>", 0.0), ("<unk>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
153
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
vocab += [("ar_AR", 0.0), ("cs_CZ", 0.0), ("de_DE", 0.0), ("en_XX", 0.0), ("es_XX", 0.0), ("et_EE", 0.0), ("fi_FI", 0.0), ("fr_XX", 0.0), ("gu_IN", 0.0), ("hi_IN", 0.0), ("it_IT", 0.0), ("ja_XX", 0.0), ("kk_KZ", 0.0), ("ko_KR", 0.0), ("lt_LT", 0.0), ("lv_LV", 0.0), ("my_MM", 0.0), ("ne_NP", 0.0), ("nl_XX", 0.0), ("ro_RO", 0.0), ("ru_RU", 0.0), ("si_LK", 0.0), ("tr_TR", 0.0), ("vi_VN", 0.0), ("zh_CN", 0.0), ("af_ZA", 0.0), ("az_AZ", 0.0), ("bn_IN", 0.0), ("fa_IR", 0.0), ("he_IL", 0.0), ("hr_HR", 0.0), ("id_ID", 0.0), ("ka_GE", 0.0), ("km_KH", 0.0), ("mk_MK", 0.0), ("ml_IN", 0.0), ("mn_MN", 0.0), ("mr_IN", 0.0), ("pl_PL", 0.0), ("ps_AF", 0.0), ("pt_XX", 0.0), ("sv_SE", 0.0), ("sw_KE", 0.0), ("ta_IN", 0.0), ("te_IN", 0.0), ("th_TH", 0.0), ("tl_XX", 0.0), ("uk_UA", 0.0), ("ur_PK", 0.0), ("xh_ZA", 0.0), ("gl_ES", 0.0), ("sl_SI", 0.0)] # fmt: skip vocab += [("<mask>", 0.0)] return vocab
153
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
def unk_id(self, proto): return 3 def post_processor(self): return processors.TemplateProcessing( single="en_XX $A </s>", pair="en_XX $A $B </s>", special_tokens=[ ("en_XX", self.original_tokenizer.convert_tokens_to_ids("en_XX")), ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], )
153
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class NllbConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>", 0.0), ("<pad>", 0.0), ("</s>", 0.0), ("<unk>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] return vocab def unk_id(self, proto): return 3 def post_processor(self): return processors.TemplateProcessing( single="eng_Latn $A </s>", pair="eng_Latn $A $B </s>", special_tokens=[ ("eng_Latn", self.original_tokenizer.convert_tokens_to_ids("eng_Latn")), ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], )
154
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class SeamlessM4TConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<pad>", 0.0), ("<unk>", 0.0), ("<s>", 0.0), ("</s>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] return vocab def unk_id(self, proto): return self.original_tokenizer.unk_token_id def post_processor(self): return processors.TemplateProcessing( single="__eng__ $A </s>", pair="__eng__ $A $B </s>", special_tokens=[ ("__eng__", self.original_tokenizer.convert_tokens_to_ids("__eng__")), ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], )
155
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class XLMRobertaConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>", 0.0), ("<pad>", 0.0), ("</s>", 0.0), ("<unk>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] vocab += [("<mask>", 0.0)] return vocab def unk_id(self, proto): unk_id = 3 return unk_id def post_processor(self): return processors.TemplateProcessing( single="<s> $A </s>", pair="<s> $A </s> </s> $B </s>", special_tokens=[ ("<s>", self.original_tokenizer.convert_tokens_to_ids("<s>")), ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], )
156
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class XLNetConverter(SpmConverter): def vocab(self, proto): return [ (piece.piece, piece.score) if check_number_comma(piece.piece) else (piece.piece, piece.score - 100) for piece in proto.pieces ] def normalizer(self, proto): list_normalizers = [ normalizers.Replace("``", '"'), normalizers.Replace("''", '"'), ] if not self.original_tokenizer.keep_accents: list_normalizers.append(normalizers.NFKD()) list_normalizers.append(normalizers.StripAccents()) if self.original_tokenizer.do_lower_case: list_normalizers.append(normalizers.Lowercase()) precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap if precompiled_charsmap: list_normalizers.append(normalizers.Precompiled(precompiled_charsmap)) list_normalizers.append(normalizers.Replace(Regex(" {2,}"), " ")) return normalizers.Sequence(list_normalizers)
157
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
def post_processor(self): return processors.TemplateProcessing( single="$A:0 <sep>:0 <cls>:2", pair="$A:0 <sep>:0 $B:1 <sep>:1 <cls>:2", special_tokens=[ ("<sep>", self.original_tokenizer.convert_tokens_to_ids("<sep>")), ("<cls>", self.original_tokenizer.convert_tokens_to_ids("<cls>")), ], )
157
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class ReformerConverter(SpmConverter): pass
158
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class RemBertConverter(SpmConverter): # Inspired from AlbertConverter def normalizer(self, proto): list_normalizers = [ normalizers.Replace("``", '"'), normalizers.Replace("''", '"'), normalizers.Replace(Regex(" {2,}"), " "), ] if not self.original_tokenizer.keep_accents: list_normalizers.append(normalizers.NFKD()) list_normalizers.append(normalizers.StripAccents()) if self.original_tokenizer.do_lower_case: list_normalizers.append(normalizers.Lowercase()) precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap if precompiled_charsmap: list_normalizers.append(normalizers.Precompiled(precompiled_charsmap)) return normalizers.Sequence(list_normalizers)
159
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
def post_processor(self): return processors.TemplateProcessing( single="[CLS]:0 $A:0 [SEP]:0", pair="[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1", special_tokens=[ ("[CLS]", self.original_tokenizer.convert_tokens_to_ids("[CLS]")), ("[SEP]", self.original_tokenizer.convert_tokens_to_ids("[SEP]")), ], )
159
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class BertGenerationConverter(SpmConverter): pass
160
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class PegasusConverter(SpmConverter): def vocab(self, proto): vocab = [ (self.original_tokenizer.pad_token, 0.0), (self.original_tokenizer.eos_token, 0.0), ] if self.original_tokenizer.mask_token_sent is not None: vocab += [(self.original_tokenizer.mask_token_sent, 0.0)] if ( self.original_tokenizer.mask_token is not None and self.original_tokenizer.mask_token_id < self.original_tokenizer.offset ): vocab += [(self.original_tokenizer.mask_token, 0.0)] vocab += [(f"<unk_{i}>", -100.0) for i in range(2, self.original_tokenizer.offset)] vocab += [(piece.piece, piece.score) for piece in proto.pieces[2:]] return vocab def unk_id(self, proto): return proto.trainer_spec.unk_id + self.original_tokenizer.offset
161
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
def pre_tokenizer(self, replacement, add_prefix_space): prepend_scheme = _get_prepend_scheme(add_prefix_space, self.original_tokenizer) return pre_tokenizers.Sequence( [ pre_tokenizers.WhitespaceSplit(), pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme), ] ) def post_processor(self): eos = self.original_tokenizer.eos_token special_tokens = [ (eos, self.original_tokenizer.eos_token_id), ] return processors.TemplateProcessing(single=["$A", eos], pair=["$A", "$B", eos], special_tokens=special_tokens)
161
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class T5Converter(SpmConverter): def vocab(self, proto): num_extra_ids = self.original_tokenizer._extra_ids vocab = [(piece.piece, piece.score) for piece in proto.pieces] vocab += [(f"<extra_id_{i}>", 0.0) for i in range(num_extra_ids - 1, -1, -1)] return vocab def post_processor(self): return processors.TemplateProcessing( single=["$A", "</s>"], pair=["$A", "</s>", "$B", "</s>"], special_tokens=[ ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], )
162
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class UdopConverter(SpmConverter): def post_processor(self): return processors.TemplateProcessing( single=["$A", "</s>"], pair=["$A", "</s>", "$B", "</s>"], special_tokens=[ ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], )
163
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class WhisperConverter(Converter): def converted(self) -> Tokenizer: vocab = self.original_tokenizer.encoder merges = list(self.original_tokenizer.bpe_ranks.keys()) tokenizer = Tokenizer( BPE( vocab=vocab, merges=merges, dropout=None, continuing_subword_prefix="", end_of_word_suffix="", fuse_unk=False, ) ) tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=self.original_tokenizer.add_prefix_space) tokenizer.decoder = decoders.ByteLevel()
164
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
prefix_token_ids = self.original_tokenizer.prefix_tokens prefixes = self.original_tokenizer.convert_ids_to_tokens(prefix_token_ids) eos = self.original_tokenizer.eos_token eos_token_id = self.original_tokenizer.eos_token_id prefix_template = " ".join([f"{token}:0" for token in prefixes]) tokenizer.post_processor = processors.TemplateProcessing( single=f"{prefix_template} $A:0 {eos}:0", pair=f"{prefix_template} $A:0 $B:1 {eos}:1", special_tokens=[ (eos, eos_token_id), *zip(prefixes, prefix_token_ids), ], ) return tokenizer
164
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class BigBirdConverter(SpmConverter): def post_processor(self): return processors.TemplateProcessing( single="[CLS]:0 $A:0 [SEP]:0", pair="[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1", special_tokens=[ ("[CLS]", self.original_tokenizer.convert_tokens_to_ids("[CLS]")), ("[SEP]", self.original_tokenizer.convert_tokens_to_ids("[SEP]")), ], )
165
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class CLIPConverter(Converter): def converted(self) -> Tokenizer: vocab = self.original_tokenizer.encoder merges = list(self.original_tokenizer.bpe_ranks.keys()) unk_token = self.original_tokenizer.unk_token tokenizer = Tokenizer( BPE( vocab=vocab, merges=merges, dropout=None, continuing_subword_prefix="", end_of_word_suffix="</w>", fuse_unk=False, unk_token=str(unk_token), ) )
166
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
tokenizer.normalizer = normalizers.Sequence( [normalizers.NFC(), normalizers.Replace(Regex(r"\s+"), " "), normalizers.Lowercase()] ) tokenizer.pre_tokenizer = pre_tokenizers.Sequence( [ pre_tokenizers.Split( Regex(r"""'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+"""), behavior="removed", invert=True, ), pre_tokenizers.ByteLevel(add_prefix_space=False), ] ) tokenizer.decoder = decoders.ByteLevel() # Hack to have a ByteLevel and TemplaceProcessor tokenizer.post_processor = processors.RobertaProcessing( sep=(self.original_tokenizer.eos_token, self.original_tokenizer.eos_token_id), cls=(self.original_tokenizer.bos_token, self.original_tokenizer.bos_token_id), add_prefix_space=False, trim_offsets=False, ) return tokenizer
166
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class LayoutLMv2Converter(Converter): def converted(self) -> Tokenizer: vocab = self.original_tokenizer.vocab tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token))) tokenize_chinese_chars = False strip_accents = False do_lower_case = True if hasattr(self.original_tokenizer, "basic_tokenizer"): tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case tokenizer.normalizer = normalizers.BertNormalizer( clean_text=True, handle_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, lowercase=do_lower_case, ) tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
167
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
cls = str(self.original_tokenizer.cls_token) sep = str(self.original_tokenizer.sep_token) cls_token_id = self.original_tokenizer.cls_token_id sep_token_id = self.original_tokenizer.sep_token_id tokenizer.post_processor = processors.TemplateProcessing( single=f"{cls}:0 $A:0 {sep}:0", pair=f"{cls}:0 $A:0 {sep}:0 $B:1 {sep}:1", special_tokens=[ (cls, cls_token_id), (sep, sep_token_id), ], ) tokenizer.decoder = decoders.WordPiece(prefix="##") return tokenizer
167
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class BlenderbotConverter(Converter): def converted(self) -> Tokenizer: ot = self.original_tokenizer vocab = ot.encoder merges = list(ot.bpe_ranks.keys()) tokenizer = Tokenizer( BPE( vocab=vocab, merges=merges, dropout=None, continuing_subword_prefix="", end_of_word_suffix="", fuse_unk=False, ) ) tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=ot.add_prefix_space) tokenizer.decoder = decoders.ByteLevel() tokenizer.post_processor = processors.TemplateProcessing( single=f"$A:0 {ot.eos_token}:0", special_tokens=[ (ot.eos_token, ot.eos_token_id), ], ) return tokenizer
168
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class XGLMConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>", 0.0), ("<pad>", 0.0), ("</s>", 0.0), ("<unk>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] vocab += [("<madeupword0>", 0.0), ("<madeupword1>", 0.0), ("<madeupword2>", 0.0), ("<madeupword3>", 0.0), ("<madeupword4>", 0.0), ("<madeupword5>", 0.0), ("<madeupword6>", 0.0)] # fmt: skip return vocab def unk_id(self, proto): unk_id = 3 return unk_id def post_processor(self): return processors.TemplateProcessing( single="</s> $A", pair="</s> $A </s> </s> $B", special_tokens=[ ("<s>", self.original_tokenizer.convert_tokens_to_ids("<s>")), ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], )
169
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class GemmaConverter(SpmConverter): handle_byte_fallback = True SpmExtractor = GemmaSentencePieceExtractor # start and end of turn tokens must be marked as special special_tokens = {"<start_of_turn>", "<end_of_turn>"} """" split_by_unicode_script: true split_by_number: true split_by_whitespace: true treat_whitespace_as_suffix: false allow_whitespace_only_pieces: true split_digits: true byte_fallback: true """ def normalizer(self, proto): return normalizers.Replace(" ", "▁")
170
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
def vocab(self, proto): vocab = [ (self.original_tokenizer.pad_token, 0.0), (self.original_tokenizer.eos_token, 0.0), (self.original_tokenizer.bos_token, 0.0), ] for piece in proto.pieces[3:]: if piece.piece == "<0x09>": vocab += [("\t", piece.score)] else: vocab += [(piece.piece, piece.score)] # vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] return vocab def pre_tokenizer(self, replacement, add_prefix_space): return pre_tokenizers.Split(" ", "merged_with_previous") def unk_id(self, proto): unk_id = 3 return unk_id def decoder(self, replacement, add_prefix_space): return decoders.Sequence( [ decoders.Replace("▁", " "), decoders.ByteFallback(), decoders.Fuse(), ] )
170
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class LlamaConverter(SpmConverter): handle_byte_fallback = True def vocab(self, proto): vocab = [ (self.original_tokenizer.convert_ids_to_tokens(0), 0.0), (self.original_tokenizer.convert_ids_to_tokens(1), 0.0), (self.original_tokenizer.convert_ids_to_tokens(2), 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] return vocab def unk_id(self, proto): unk_id = 0 return unk_id def decoder(self, replacement, add_prefix_space): sequence = [ decoders.Replace("▁", " "), decoders.ByteFallback(), decoders.Fuse(), ] if add_prefix_space: sequence += [decoders.Strip(content=" ", left=1)] return decoders.Sequence(sequence)
171
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
def normalizer(self, proto): if getattr(self.original_tokenizer, "legacy", True): sequence = [] if getattr(self.original_tokenizer, "add_prefix_space", True): sequence += [normalizers.Prepend(prepend="▁")] sequence += [normalizers.Replace(pattern=" ", content="▁")] return normalizers.Sequence(sequence) return None # non-legacy, no normalizer def pre_tokenizer(self, replacement, add_prefix_space): if not getattr(self.original_tokenizer, "legacy", True): # non-legacy, we need a replace prepend_scheme = _get_prepend_scheme(add_prefix_space, self.original_tokenizer) return pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme, split=False) return None def post_processor(self): # the processor is defined in the LlamaTokenizerFast class. return None
171
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class MarkupLMConverter(Converter): def converted(self) -> Tokenizer: ot = self.original_tokenizer vocab = ot.encoder merges = list(ot.bpe_ranks.keys()) tokenizer = Tokenizer( BPE( vocab=vocab, merges=merges, dropout=None, continuing_subword_prefix="", end_of_word_suffix="", fuse_unk=False, unk_token=self.original_tokenizer.unk_token, ) ) tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=ot.add_prefix_space) tokenizer.decoder = decoders.ByteLevel() cls = str(self.original_tokenizer.cls_token) sep = str(self.original_tokenizer.sep_token) cls_token_id = self.original_tokenizer.cls_token_id sep_token_id = self.original_tokenizer.sep_token_id
172
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
tokenizer.post_processor = processors.TemplateProcessing( single=f"{cls} $A {sep}", pair=f"{cls} $A {sep} $B {sep}", special_tokens=[ (cls, cls_token_id), (sep, sep_token_id), ], ) return tokenizer
172
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class MoshiConverter(SpmConverter): handle_byte_fallback = True def __init__(self, vocab_file, model_max_length=None, **kwargs): requires_backends(self, "protobuf") Converter.__init__(self, vocab_file) # from .utils import sentencepiece_model_pb2 as model_pb2 model_pb2 = import_protobuf() m = model_pb2.ModelProto() with open(vocab_file, "rb") as f: m.ParseFromString(f.read()) self.proto = m def normalizer(self, proto): precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap _normalizers = [ normalizers.Replace(" ", "▁"), ] if not precompiled_charsmap: return normalizers.Sequence(_normalizers) else: return normalizers.Sequence([normalizers.Precompiled(precompiled_charsmap)] + _normalizers)
173
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
def decoder(self, replacement, add_prefix_space): sequence = [ decoders.Replace("▁", " "), decoders.ByteFallback(), decoders.Fuse(), ] if add_prefix_space: sequence += [decoders.Strip(content=" ", left=1)] return decoders.Sequence(sequence) def pre_tokenizer(self, replacement, add_prefix_space): prepend_scheme = "first" return pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme, split=False)
173
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class HeliumConverter(SpmConverter): handle_byte_fallback = True def __init__(self, vocab_file=None, *args): requires_backends(self, "protobuf") Converter.__init__(self, vocab_file) model_pb2 = import_protobuf() m = model_pb2.ModelProto() with open(vocab_file, "rb") as f: m.ParseFromString(f.read()) self.proto = m
174
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
def tokenizer(self, proto): vocab_scores = self.vocab(proto) tokenizer = Tokenizer( Unigram( vocab_scores, unk_id=self.unk_id(proto), byte_fallback=self.handle_byte_fallback, ) ) # control tokens are special # user defined symbols are not # both user and control tokens are AddedTokens # Add user defined symbols (type == 4) from sentencepiece (https://github.com/google/sentencepiece/blob/6225e08edb2577757163b3f5dbba4c0b670ef445/src/sentencepiece_model.proto#L299C29-L299C33) spm_added_tokens = [ (id, p.piece, p.type == 3 or p.piece in self.special_tokens) for id, p in enumerate(proto.pieces) if p.type in [3, 4] ] tokenizer.add_tokens( [ AddedToken(token, normalized=False, special=special, single_word=True) for id, token, special in sorted(spm_added_tokens, key=lambda x: x[0])
174
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
] ) tokenizer.add_tokens([AddedToken("\n", normalized=False, special=False)]) tokenizer.enable_padding(pad_token="<pad>", pad_id=3) return tokenizer
174
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
def vocab(self, proto): vocab = [] for piece in proto.pieces: if piece.piece == "<0x0A>": vocab += [("\n", piece.score)] else: vocab += [(piece.piece, piece.score)] return vocab def unk_id(self, proto): unk_id = 0 return unk_id def decoder(self, replacement, add_prefix_space): sequence = [ decoders.Replace("▁", " "), decoders.ByteFallback(), decoders.Fuse(), ] sequence += [decoders.Strip(content=" ", left=1)] return decoders.Sequence(sequence) def normalizer(self, proto): return normalizers.Sequence([normalizers.Prepend(" "), normalizers.Replace(r" ", "▁")]) def pre_tokenizer(self, replacement, add_prefix_space): return pre_tokenizers.Sequence([pre_tokenizers.Split("\n", "contiguous")])
174
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
def post_processor(self): return processors.TemplateProcessing( single=[ "<s>", "$A", ], pair=[ "<s>", "$A", "<s>", "$B", ], special_tokens=[ ("<s>", 1), ], )
174
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class TikTokenConverter: """ A general tiktoken converter. """ def __init__( self, vocab_file=None, pattern=r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}{1,3}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+""", add_prefix_space=False, additional_special_tokens=None, *args, **kwargs, ): super().__init__(*args) self.vocab_file = vocab_file self.pattern = pattern self.add_prefix_space = add_prefix_space self.additional_special_tokens = additional_special_tokens def extract_vocab_merges_from_model(self, tiktoken_url: str): try: from tiktoken.load import load_tiktoken_bpe except Exception: raise ValueError( "`tiktoken` is required to read a `tiktoken` file. Install it with " "`pip install tiktoken`." ) bpe_ranks = load_tiktoken_bpe(tiktoken_url) byte_encoder = bytes_to_unicode()
175
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
def token_bytes_to_string(b): return "".join([byte_encoder[ord(char)] for char in b.decode("latin-1")]) merges = [] vocab = {} for token, rank in bpe_ranks.items(): vocab[token_bytes_to_string(token)] = rank if len(token) == 1: continue local = [] for index in range(1, len(token)): piece_l, piece_r = token[:index], token[index:] if piece_l in bpe_ranks and piece_r in bpe_ranks and (piece_l + piece_r) in bpe_ranks: local.append((piece_l, piece_r, rank)) local = sorted(local, key=lambda x: (bpe_ranks[x[0]], bpe_ranks[x[1]]), reverse=False) merges.extend(local) merges = sorted(merges, key=lambda val: val[2], reverse=False) merges = [(token_bytes_to_string(val[0]), token_bytes_to_string(val[1])) for val in merges] return vocab, merges
175
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
def tokenizer(self): vocab_scores, merges = self.extract_vocab_merges_from_model(self.vocab_file) tokenizer = Tokenizer(BPE(vocab_scores, merges, fuse_unk=False)) if hasattr(tokenizer.model, "ignore_merges"): tokenizer.model.ignore_merges = True return tokenizer def converted(self) -> Tokenizer: tokenizer = self.tokenizer() tokenizer.pre_tokenizer = pre_tokenizers.Sequence( [ pre_tokenizers.Split(Regex(self.pattern), behavior="isolated", invert=False), pre_tokenizers.ByteLevel(add_prefix_space=self.add_prefix_space, use_regex=False), ] ) tokenizer.decoder = decoders.ByteLevel() tokenizer.add_special_tokens(self.additional_special_tokens) tokenizer.post_processor = processors.ByteLevel(trim_offsets=False) return tokenizer
175
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_slow_tokenizer.py
class BaseImageProcessor(ImageProcessingMixin): def __init__(self, **kwargs): super().__init__(**kwargs) def __call__(self, images, **kwargs) -> BatchFeature: """Preprocess an image or a batch of images.""" return self.preprocess(images, **kwargs) def preprocess(self, images, **kwargs) -> BatchFeature: raise NotImplementedError("Each image processor must implement its own preprocess method") def rescale( self, image: np.ndarray, scale: float, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Rescale an image by a scale factor. image = image * scale.
176
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_utils.py
Args: image (`np.ndarray`): Image to rescale. scale (`float`): The scaling factor to rescale pixel values by. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
176
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_utils.py
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
176
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_utils.py
Returns: `np.ndarray`: The rescaled image. """ return rescale(image, scale=scale, data_format=data_format, input_data_format=input_data_format, **kwargs) def normalize( self, image: np.ndarray, mean: Union[float, Iterable[float]], std: Union[float, Iterable[float]], data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Normalize an image. image = (image - image_mean) / image_std.
176
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_utils.py
Args: image (`np.ndarray`): Image to normalize. mean (`float` or `Iterable[float]`): Image mean to use for normalization. std (`float` or `Iterable[float]`): Image standard deviation to use for normalization. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of:
176
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_utils.py
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
176
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_utils.py
Returns: `np.ndarray`: The normalized image. """ return normalize( image, mean=mean, std=std, data_format=data_format, input_data_format=input_data_format, **kwargs ) def center_crop( self, image: np.ndarray, size: Dict[str, int], data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Center crop an image to `(size["height"], size["width"])`. If the input size is smaller than `crop_size` along any edge, the image is padded with 0's and then center cropped.
176
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_utils.py
Args: image (`np.ndarray`): Image to center crop. size (`Dict[str, int]`): Size of the output image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
176
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_utils.py
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. """ size = get_size_dict(size) if "height" not in size or "width" not in size: raise ValueError(f"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}") return center_crop( image, size=(size["height"], size["width"]), data_format=data_format, input_data_format=input_data_format, **kwargs, )
176
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_utils.py
def to_dict(self): encoder_dict = super().to_dict() encoder_dict.pop("_valid_processor_keys", None) return encoder_dict
176
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/image_processing_utils.py
class SequenceFeatureExtractor(FeatureExtractionMixin): """ This is a general feature extraction class for speech recognition. Args: feature_size (`int`): The feature dimension of the extracted features. sampling_rate (`int`): The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). padding_value (`float`): The value that is used to fill the padding values / vectors. """ def __init__(self, feature_size: int, sampling_rate: int, padding_value: float, **kwargs): self.feature_size = feature_size self.sampling_rate = sampling_rate self.padding_value = padding_value self.padding_side = kwargs.pop("padding_side", "right") self.return_attention_mask = kwargs.pop("return_attention_mask", True) super().__init__(**kwargs)
177
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/feature_extraction_sequence_utils.py
def pad( self, processed_features: Union[ BatchFeature, List[BatchFeature], Dict[str, BatchFeature], Dict[str, List[BatchFeature]], List[Dict[str, BatchFeature]], ], padding: Union[bool, str, PaddingStrategy] = True, max_length: Optional[int] = None, truncation: bool = False, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, ) -> BatchFeature: """ Pad input values / input vectors or a batch of input values / input vectors up to predefined length or to the max sequence length in the batch. Padding side (left/right) padding values are defined at the feature extractor level (with `self.padding_side`, `self.padding_value`) <Tip>
177
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/feature_extraction_sequence_utils.py
If the `processed_features` passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the result will use the same type unless you provide a different tensor type with `return_tensors`. In the case of PyTorch tensors, you will lose the specific device of your tensors however. </Tip> Args: processed_features ([`BatchFeature`], list of [`BatchFeature`], `Dict[str, List[float]]`, `Dict[str, List[List[float]]` or `List[Dict[str, List[float]]]`): Processed inputs. Can represent one input ([`BatchFeature`] or `Dict[str, List[float]]`) or a batch of input values / vectors (list of [`BatchFeature`], *Dict[str, List[List[float]]]* or *List[Dict[str, List[float]]]*) so you can use this method during preprocessing as well as in a PyTorch Dataloader collate function.
177
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/feature_extraction_sequence_utils.py