| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """Fast Tokenization class for Blenderbot.""" |
| import json |
| from typing import List, Optional, Tuple |
|
|
| from tokenizers import pre_tokenizers, processors |
|
|
| from ...tokenization_utils_base import AddedToken, BatchEncoding |
| from ...tokenization_utils_fast import PreTrainedTokenizerFast |
| from ...utils import logging |
| from .tokenization_blenderbot import BlenderbotTokenizer |
|
|
|
|
| logger = logging.get_logger(__name__) |
|
|
|
|
| VOCAB_FILES_NAMES = { |
| "vocab_file": "vocab.json", |
| "merges_file": "merges.txt", |
| "tokenizer_config_file": "tokenizer_config.json", |
| } |
|
|
| PRETRAINED_VOCAB_FILES_MAP = { |
| "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, |
| "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, |
| "tokenizer_config_file": { |
| "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" |
| }, |
| } |
|
|
| PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"facebook/blenderbot-3B": 128} |
|
|
|
|
| class BlenderbotTokenizerFast(PreTrainedTokenizerFast): |
| """ |
| Construct a "fast" Blenderbot tokenizer (backed by HuggingFace's *tokenizers* library), derived from the GPT-2 |
| tokenizer, using byte-level Byte-Pair-Encoding. |
| |
| This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will |
| be encoded differently whether it is at the beginning of the sentence (without space) or not: |
| |
| ```python |
| >>> from transformers import BlenderbotTokenizerFast |
| |
| >>> tokenizer = BlenderbotTokenizerFast.from_pretrained("facebook/blenderbot-3B") |
| >>> tokenizer("Hello world")["input_ids"] |
| [6950, 1085, 2] |
| |
| >>> tokenizer(" Hello world")["input_ids"] |
| [6950, 1085, 2] |
| ``` |
| |
| You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you |
| call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. |
| |
| <Tip> |
| |
| When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`. |
| |
| </Tip> |
| |
| This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should |
| refer to this superclass for more information regarding those methods. |
| |
| Args: |
| vocab_file (`str`): |
| Path to the vocabulary file. |
| merges_file (`str`): |
| Path to the merges file. |
| errors (`str`, *optional*, defaults to `"replace"`): |
| Paradigm to follow when decoding bytes to UTF-8. See |
| [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. |
| bos_token (`str`, *optional*, defaults to `"<s>"`): |
| The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. |
| |
| <Tip> |
| |
| When building a sequence using special tokens, this is not the token that is used for the beginning of |
| sequence. The token used is the `cls_token`. |
| |
| </Tip> |
| |
| eos_token (`str`, *optional*, defaults to `"</s>"`): |
| The end of sequence token. |
| |
| <Tip> |
| |
| When building a sequence using special tokens, this is not the token that is used for the end of sequence. |
| The token used is the `sep_token`. |
| |
| </Tip> |
| |
| sep_token (`str`, *optional*, defaults to `"</s>"`): |
| The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for |
| sequence classification or for a text and a question for question answering. It is also used as the last |
| token of a sequence built with special tokens. |
| cls_token (`str`, *optional*, defaults to `"<s>"`): |
| The classifier token which is used when doing sequence classification (classification of the whole sequence |
| instead of per-token classification). It is the first token of the sequence when built with special tokens. |
| unk_token (`str`, *optional*, defaults to `"<unk>"`): |
| The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this |
| token instead. |
| pad_token (`str`, *optional*, defaults to `"<pad>"`): |
| The token used for padding, for example when batching sequences of different lengths. |
| mask_token (`str`, *optional*, defaults to `"<mask>"`): |
| The token used for masking values. This is the token used when training this model with masked language |
| modeling. This is the token which the model will try to predict. |
| add_prefix_space (`bool`, *optional*, defaults to `False`): |
| Whether or not to add an initial space to the input. This allows to treat the leading word just as any |
| other word. (Blenderbot tokenizer detect beginning of words by the preceding space). |
| trim_offsets (`bool`, *optional*, defaults to `True`): |
| Whether the post processing step should trim offsets to avoid including whitespaces. |
| """ |
|
|
| vocab_files_names = VOCAB_FILES_NAMES |
| pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP |
| max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES |
| model_input_names = ["input_ids", "attention_mask"] |
| slow_tokenizer_class = BlenderbotTokenizer |
|
|
| |
| def __init__( |
| self, |
| vocab_file=None, |
| merges_file=None, |
| tokenizer_file=None, |
| errors="replace", |
| bos_token="<s>", |
| eos_token="</s>", |
| sep_token="</s>", |
| cls_token="<s>", |
| unk_token="<unk>", |
| pad_token="<pad>", |
| mask_token="<mask>", |
| add_prefix_space=False, |
| trim_offsets=True, |
| **kwargs, |
| ): |
| mask_token = ( |
| AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False) |
| if isinstance(mask_token, str) |
| else mask_token |
| ) |
| super().__init__( |
| vocab_file, |
| merges_file, |
| tokenizer_file=tokenizer_file, |
| errors=errors, |
| bos_token=bos_token, |
| eos_token=eos_token, |
| sep_token=sep_token, |
| cls_token=cls_token, |
| unk_token=unk_token, |
| pad_token=pad_token, |
| mask_token=mask_token, |
| add_prefix_space=add_prefix_space, |
| trim_offsets=trim_offsets, |
| **kwargs, |
| ) |
|
|
| pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) |
| if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space: |
| pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type")) |
| pre_tok_state["add_prefix_space"] = add_prefix_space |
| self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state) |
|
|
| self.add_prefix_space = add_prefix_space |
|
|
| tokenizer_component = "post_processor" |
| tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None) |
| if tokenizer_component_instance: |
| state = json.loads(tokenizer_component_instance.__getstate__()) |
|
|
| |
| if "sep" in state: |
| state["sep"] = tuple(state["sep"]) |
| if "cls" in state: |
| state["cls"] = tuple(state["cls"]) |
|
|
| changes_to_apply = False |
|
|
| if state.get("add_prefix_space", add_prefix_space) != add_prefix_space: |
| state["add_prefix_space"] = add_prefix_space |
| changes_to_apply = True |
|
|
| if state.get("trim_offsets", trim_offsets) != trim_offsets: |
| state["trim_offsets"] = trim_offsets |
| changes_to_apply = True |
|
|
| if changes_to_apply: |
| component_class = getattr(processors, state.pop("type")) |
| new_value = component_class(**state) |
| setattr(self.backend_tokenizer, tokenizer_component, new_value) |
|
|
| @property |
| |
| def mask_token(self) -> str: |
| """ |
| `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not |
| having been set. |
| |
| Blenderbot tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will |
| greedily comprise the space before the *<mask>*. |
| """ |
| if self._mask_token is None: |
| if self.verbose: |
| logger.error("Using mask_token, but it is not set yet.") |
| return None |
| return str(self._mask_token) |
|
|
| @mask_token.setter |
| def mask_token(self, value): |
| """ |
| Overriding the default behavior of the mask token to have it eat the space before it. |
| |
| This is needed to preserve backward compatibility with all the previously used models based on Roberta. |
| """ |
| |
| |
| value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value |
| self._mask_token = value |
|
|
| |
| def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: |
| is_split_into_words = kwargs.get("is_split_into_words", False) |
| assert self.add_prefix_space or not is_split_into_words, ( |
| f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " |
| "to use it with pretokenized inputs." |
| ) |
|
|
| return super()._batch_encode_plus(*args, **kwargs) |
|
|
| |
| def _encode_plus(self, *args, **kwargs) -> BatchEncoding: |
| is_split_into_words = kwargs.get("is_split_into_words", False) |
|
|
| assert self.add_prefix_space or not is_split_into_words, ( |
| f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " |
| "to use it with pretokenized inputs." |
| ) |
|
|
| return super()._encode_plus(*args, **kwargs) |
|
|
| |
| def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: |
| files = self._tokenizer.model.save(save_directory, name=filename_prefix) |
| return tuple(files) |
|
|
| |
| def create_token_type_ids_from_sequences( |
| self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None |
| ) -> List[int]: |
| """ |
| Create a mask from the two sequences passed to be used in a sequence-pair classification task. Blenderbot does |
| not make use of token type ids, therefore a list of zeros is returned. |
| |
| Args: |
| token_ids_0 (`List[int]`): |
| List of IDs. |
| token_ids_1 (`List[int]`, *optional*): |
| Optional second list of IDs for sequence pairs. |
| |
| Returns: |
| `List[int]`: List of zeros. |
| """ |
| sep = [self.sep_token_id] |
| cls = [self.cls_token_id] |
|
|
| if token_ids_1 is None: |
| return len(cls + token_ids_0 + sep) * [0] |
| return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] |
|
|
| def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None): |
| """ |
| Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and |
| adding special tokens. A Blenderbot sequence has the following format: |
| - single sequence: ` X </s>` |
| |
| Args: |
| token_ids_0 (`List[int]`): |
| List of IDs to which the special tokens will be added |
| token_ids_1 (`List[int]`, *optional*): |
| Will be ignored |
| Returns: |
| `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. |
| """ |
| return token_ids_0 + [self.eos_token_id] |
|
|
| @property |
| |
| def default_chat_template(self): |
| """ |
| A very simple chat template that just adds whitespace between messages. |
| """ |
| return ( |
| "{% for message in messages %}" |
| "{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}" |
| "{{ message['content'] }}" |
| "{% if not loop.last %}{{ ' ' }}{% endif %}" |
| "{% endfor %}" |
| "{{ eos_token }}" |
| ) |
|
|