| | |
| |
|
| | """ Tokenization class for model EvaByte.""" |
| |
|
| |
|
| | from typing import List, Optional, Tuple |
| |
|
| | from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer |
| | from transformers.utils import logging |
| |
|
| |
|
| | logger = logging.get_logger(__name__) |
| |
|
| |
|
| | chat_template = """ |
| | {{- bos_token }} |
| | {%- if messages[0]['role'] == 'system' %} |
| | {%- set system_message = messages[0]['content'] %} |
| | {%- set messages = messages[1:] %} |
| | {%- else %} |
| | {%- set system_message = "" %} |
| | {%- endif %} |
| | |
| | {{- '<|start_header_id|>system<|end_header_id|>\n\n' + system_message + '<|eot_id|>'}} |
| | |
| | {%- for message in messages %} |
| | {%- if (message['role'] != 'user') and (message['role'] != 'assistant') %} |
| | {{- raise_exception('Conversation roles must be user or assistant') }} |
| | {%- endif %} |
| | |
| | {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] + '<|eot_id|>' }} |
| | {%- endfor %} |
| | |
| | {%- if add_generation_prompt %} |
| | {{- '<|start_header_id|>' + 'assistant' + '<|end_header_id|>\n\n' }} |
| | {%- endif %} |
| | """ |
| |
|
| | class EvaByteTokenizer(PreTrainedTokenizer): |
| | def __init__( |
| | self, |
| | bos_token="<bos>", |
| | eos_token="<eos>", |
| | unk_token="<unk>", |
| | sep_token="<sep>", |
| | pad_token="<pad>", |
| | extra_ids=59, |
| | additional_special_tokens=None, |
| | clean_up_tokenization_spaces=False, |
| | **kwargs, |
| | ) -> None: |
| | num_base_special_tokens = 5 |
| | |
| | if extra_ids > 0 and additional_special_tokens is None: |
| | additional_special_tokens = [f"<extra_id_{i}>" for i in range(num_base_special_tokens, extra_ids + num_base_special_tokens)] |
| | elif extra_ids > 0 and additional_special_tokens is not None and len(additional_special_tokens) > 0: |
| | |
| | extra_tokens = len(set(filter(lambda x: bool("extra_id" in str(x)), additional_special_tokens))) |
| | if extra_tokens != extra_ids: |
| | raise ValueError( |
| | f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" |
| | " provided to EvaByteTokenizer. In this case the additional_special_tokens must include the" |
| | " extra_ids tokens" |
| | ) |
| |
|
| | |
| | for i, token in enumerate(additional_special_tokens): |
| | if token == "<extra_id_5>": |
| | token = "<repo_name>" |
| | elif token == "<extra_id_6>": |
| | token = "<file_sep>" |
| | elif token == "<extra_id_7>": |
| | token = "<t2v_token>" |
| | elif token == "<extra_id_8>": |
| | token = "<v2t_token>" |
| | elif token == "<extra_id_9>": |
| | token = "<|start_header_id|>" |
| | elif token == "<extra_id_10>": |
| | token = "<|end_header_id|>" |
| | elif token == "<extra_id_11>": |
| | token = "<|eot_id|>" |
| | additional_special_tokens[i] = token |
| |
|
| | |
| | |
| | pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token |
| | bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token |
| | eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token |
| | unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token |
| | sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token |
| |
|
| | self._added_tokens_decoder = { |
| | 0: pad_token, |
| | 1: bos_token, |
| | 2: eos_token, |
| | 3: unk_token, |
| | 4: sep_token, |
| | **{i: AddedToken(t, lstrip=False, rstrip=False) for i, t in enumerate(additional_special_tokens, start=num_base_special_tokens)}, |
| | } |
| | self.offset = len(self._added_tokens_decoder) |
| | self._utf_vocab_size = 2**8 |
| | self.add_bos_token = True |
| | self.add_eos_token = False |
| | super().__init__( |
| | pad_token=pad_token, |
| | bos_token=bos_token, |
| | eos_token=eos_token, |
| | unk_token=unk_token, |
| | sep_token=sep_token, |
| | extra_ids=0, |
| | clean_up_tokenization_spaces=clean_up_tokenization_spaces, |
| | additional_special_tokens=additional_special_tokens, |
| | **kwargs, |
| | ) |
| | self.chat_template = chat_template |
| |
|
| |
|
| | @property |
| | def vocab_size(self): |
| | return self._utf_vocab_size |
| |
|
| | def get_vocab(self): |
| | vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size + self.offset)} |
| | vocab.update(self.added_tokens_encoder) |
| | return vocab |
| |
|
| | |
| | def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): |
| | bos_token_id = [self.bos_token_id] if self.add_bos_token else [] |
| | eos_token_id = [self.eos_token_id] if self.add_eos_token else [] |
| |
|
| | output = bos_token_id + token_ids_0 + eos_token_id |
| |
|
| | if token_ids_1 is not None: |
| | output = output + bos_token_id + token_ids_1 + eos_token_id |
| |
|
| | return output |
| |
|
| | |
| | def get_special_tokens_mask( |
| | self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False |
| | ) -> List[int]: |
| | """ |
| | Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding |
| | special tokens using the tokenizer `prepare_for_model` method. |
| | |
| | Args: |
| | token_ids_0 (`List[int]`): |
| | List of IDs. |
| | token_ids_1 (`List[int]`, *optional*): |
| | Optional second list of IDs for sequence pairs. |
| | already_has_special_tokens (`bool`, *optional*, defaults to `False`): |
| | Whether or not the token list is already formatted with special tokens for the model. |
| | |
| | Returns: |
| | `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. |
| | """ |
| | if already_has_special_tokens: |
| | return super().get_special_tokens_mask( |
| | token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True |
| | ) |
| |
|
| | bos_token_id = [1] if self.add_bos_token else [] |
| | eos_token_id = [1] if self.add_eos_token else [] |
| |
|
| | if token_ids_1 is None: |
| | return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id |
| | return ( |
| | bos_token_id |
| | + ([0] * len(token_ids_0)) |
| | + eos_token_id |
| | + bos_token_id |
| | + ([0] * len(token_ids_1)) |
| | + eos_token_id |
| | ) |
| |
|
| | |
| | def create_token_type_ids_from_sequences( |
| | self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None |
| | ) -> List[int]: |
| | """ |
| | Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT |
| | sequence pair mask has the following format: |
| | |
| | ``` |
| | 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 |
| | | first sequence | second sequence | |
| | ``` |
| | |
| | if token_ids_1 is None, only returns the first portion of the mask (0s). |
| | |
| | Args: |
| | token_ids_0 (`List[int]`): |
| | List of ids. |
| | token_ids_1 (`List[int]`, *optional*): |
| | Optional second list of IDs for sequence pairs. |
| | |
| | Returns: |
| | `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). |
| | """ |
| | bos_token_id = [self.bos_token_id] if self.add_bos_token else [] |
| | eos_token_id = [self.eos_token_id] if self.add_eos_token else [] |
| |
|
| | output = [0] * len(bos_token_id + token_ids_0 + eos_token_id) |
| |
|
| | if token_ids_1 is not None: |
| | output += [1] * len(bos_token_id + token_ids_1 + eos_token_id) |
| |
|
| | return output |
| |
|
| | def _tokenize(self, text: str) -> List[str]: |
| | """Take as input a string and return a list of strings (tokens) for words/sub-words""" |
| | tokens = [chr(i) for i in text.encode("utf-8")] |
| | return tokens |
| |
|
| | def _convert_token_to_id(self, token): |
| | """Converts a token (str) in an id using the vocab.""" |
| |
|
| | if len(token) != 1: |
| | token_id = None |
| | else: |
| | token_id = ord(token) + self.offset |
| |
|
| | return token_id |
| |
|
| | def _convert_id_to_token(self, index): |
| | """Converts an index (integer) to a byte (str) using the vocab.""" |
| | token = chr(index - self.offset) |
| | return token |
| |
|
| | def convert_tokens_to_string(self, tokens): |
| | """Converts a sequence of bytes (string) to a single string.""" |
| | bstring = b"" |
| | for token in tokens: |
| | if token in self.added_tokens_decoder: |
| | tok_string = self.added_tokens_decoder[token].encode("utf-8") |
| | elif token in self.added_tokens_encoder: |
| | tok_string = token.encode("utf-8") |
| | else: |
| | tok_string = bytes([ord(token)]) |
| | bstring += tok_string |
| | string = bstring.decode("utf-8", errors="ignore") |
| | return string |
| |
|
| | def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: |
| | return () |
| |
|