| from functools import lru_cache |
| from typing import Any, Dict, List, Optional, Tuple |
|
|
| from transformers import PreTrainedTokenizer |
| import tiktoken |
|
|
|
|
| |
| |
| @lru_cache() |
| def bytes_to_unicode(): |
| """Returns list of utf-8 byte and a mapping to unicode strings. |
| We specifically avoids mapping to whitespace/control characters the bpe code |
| barfs on. |
| The reversible bpe codes work on unicode strings. This means you need a |
| large # of unicode characters in your vocab if you want to avoid UNKs. When |
| you're at something like a 10B token dataset you end up needing around 5K |
| for decent coverage. This is a significant percentage of your normal, say, |
| 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and |
| unicode strings. |
| """ |
| bs = (list(range(ord('!'), |
| ord('~') + 1)) + list(range(ord('¡'), |
| ord('¬') + 1)) + |
| list(range(ord('®'), |
| ord('ÿ') + 1))) |
| cs = bs[:] |
| n = 0 |
| for b in range(2**8): |
| if b not in bs: |
| bs.append(b) |
| cs.append(2**8 + n) |
| n += 1 |
| cs = [chr(n) for n in cs] |
| return dict(zip(bs, cs)) |
|
|
|
|
| def add_special_tokens_to_tiktoken(base="cl100k_base", eos_token=None, pad_token=None): |
| def include_dobf_tokens(): |
| dobf_tokens = [f"<dobf_special_{i}>" for i in range(18)] |
| return dobf_tokens |
|
|
| def include_vector_tokens(): |
| tokens = [] |
| tokens.append("<sep>") |
| tokens.append("<mask>") |
| tokens += [f"<dummy_{i}>" for i in reversed(range(20))] |
| return tokens |
|
|
| dobf_tokens = include_dobf_tokens() |
| vector_tokens = include_vector_tokens() |
|
|
| tokenizer = tiktoken.get_encoding(base) |
| idx = tokenizer.n_vocab |
| bpe_ranks = tokenizer._mergeable_ranks |
| special_tokens = dict() |
|
|
| |
| if eos_token and eos_token not in tokenizer._special_tokens and eos_token not in special_tokens: |
| special_tokens[eos_token] = idx |
| idx += 1 |
|
|
| for sp in dobf_tokens: |
| special_tokens[sp] = idx |
| idx += 1 |
| for sp in vector_tokens: |
| special_tokens[sp] = idx |
| idx += 1 |
|
|
| if pad_token and pad_token not in tokenizer._special_tokens and pad_token not in special_tokens: |
| special_tokens[pad_token] = idx |
| idx += 1 |
| |
| |
| |
| enc = tiktoken.Encoding( |
| |
| |
| name=base.replace("base", "im"), |
| pat_str=tokenizer._pat_str, |
| mergeable_ranks=bpe_ranks, |
| special_tokens={ |
| **tokenizer._special_tokens, |
| **special_tokens |
| } |
| ) |
| return enc |
|
|
|
|
| class SageLiteTokenizer(PreTrainedTokenizer): |
| """A thin wrapper around tiktoken to make it compatible with Hugging Face. |
| tokenizers. |
| See HuggingFace for further documentation on general tokenizer methods. |
| """ |
|
|
| model_input_names = ['input_ids', 'attention_mask'] |
|
|
| def __init__(self, |
| model_name: Optional[str] = None, |
| encoding_name: Optional[str] = "cl100k_base", |
| add_bos_token: bool = False, |
| add_eos_token: bool = False, |
| unk_token: Optional[str] = '<|endoftext|>', |
| eos_token: Optional[str] = '<|endoftext|>', |
| bos_token: Optional[str] = '<|endoftext|>', |
| pad_token: Optional[str] = '<pad>', |
| errors: str = 'replace', |
| **kwargs: Any): |
| """Constructor creates a tiktoken tokenizer to use as the underlying. |
| tokenizer. |
| Args: |
| model_name (Optional[str], optional): The name of the model to load from tiktoken. Defaults to None. |
| Either model_name or encoding_name must be set, but not both. |
| encoding_name (Optional[str], optional): The name of the encoding to load from tiktoken. Defaults to None. |
| Either model_name or encoding_name must be set, but not both. |
| add_bos_token (bool, optional): Whether to add bos tokens. Defaults to False. |
| add_eos_token (bool, optional): Whether to add eos tokens. Defaults to False. |
| use_default_system_prompt (bool, optional): Use the default system prompt or not. Defaults to False. |
| unk_token (Optional[str], optional): The unk token. Defaults to '<|endoftext|>'. |
| eos_token (Optional[str], optional): The eos token. Defaults to '<|endoftext|>'. |
| bos_token (Optional[str], optional): The bos token. Defaults to '<|endoftext|>'. |
| pad_token (Optional[str], optional): The pad token. Defaults to None. |
| errors (str, optional): Paradigm to follow when decoding bytes to UTF-8. See |
| [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. |
| Defaults to `"replace"`. |
| """ |
| try: |
| import tiktoken |
| except: |
| raise ImportError( |
| 'You need to install tiktoken to use TiktokenTokenizerWrapper.') |
|
|
| |
| |
| |
| import copyreg |
| import functools |
|
|
| from tiktoken import Encoding |
|
|
| def pickle_Encoding(enc: Encoding): |
| return (functools.partial(Encoding, |
| enc.name, |
| pat_str=enc._pat_str, |
| mergeable_ranks=enc._mergeable_ranks, |
| special_tokens=enc._special_tokens), ()) |
|
|
| copyreg.pickle(Encoding, pickle_Encoding) |
|
|
|
|
| self.encoding = add_special_tokens_to_tiktoken(base=encoding_name, eos_token=eos_token, pad_token=pad_token) |
|
|
| self.add_bos_token = add_bos_token |
| self.add_eos_token = add_eos_token |
|
|
| self.byte_encoder = bytes_to_unicode() |
| self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} |
| self.errors = errors |
|
|
| self.decoder: Dict[int, str] = {} |
| for i in range(self.encoding.n_vocab): |
| try: |
| self.encoding.decode_single_token_bytes(i) |
| except KeyError: |
| continue |
| |
| |
| decoding = ''.join([ |
| bytes_to_unicode()[ord(char)] for char in |
| self.encoding.decode_single_token_bytes(i).decode('latin-1') |
| ]) |
| self.decoder[i] = decoding |
|
|
| self.encoder: Dict[str, int] = {} |
| for i in range(self.encoding.n_vocab): |
| if i in self.decoder: |
| self.encoder[self.decoder[i]] = i |
|
|
| super().__init__(model_name=model_name, |
| encoding_name=encoding_name, |
| add_bos_token=add_bos_token, |
| add_eos_token=add_eos_token, |
| unk_token=unk_token, |
| eos_token=eos_token, |
| bos_token=bos_token, |
| pad_token=pad_token, |
| errors=errors, |
| **kwargs) |
|
|
| @property |
| def vocab_size(self) -> int: |
| """Returns vocab size.""" |
| return self.encoding.n_vocab |
|
|
| @property |
| def is_fast(self) -> bool: |
| return False |
|
|
| def get_vocab(self) -> Dict[str, int]: |
| """Returns vocab as a dict.""" |
| |
| |
| |
| vocab_clone = self.encoder.copy() |
| extra_id_index = 0 |
| candidate_extra_id = f'<extra_id_{extra_id_index}>' |
| indices_to_fill_in = {i for i in range(self.vocab_size)} - set( |
| vocab_clone.values()) |
|
|
| |
| for index_to_add in indices_to_fill_in: |
| |
| while candidate_extra_id in vocab_clone: |
| extra_id_index += 1 |
| candidate_extra_id = f'<extra_id_{extra_id_index}>' |
|
|
| |
| vocab_clone[candidate_extra_id] = index_to_add |
|
|
| return vocab_clone |
|
|
| def _tokenize(self, text: str) -> List[str]: |
| """Returns a tokenized string.""" |
| if not isinstance(text, str): |
| raise ValueError( |
| f'Expected a string input to _tokenize but got {type(text)}.') |
|
|
| tokens = [ |
| self.decoder[t] |
| for t in self.encoding.encode(text, allowed_special='all') |
| ] |
|
|
| return tokens |
|
|
| def _convert_token_to_id(self, token: str) -> Optional[int]: |
| """Converts a token (str) in an id using the vocab.""" |
| return self.encoder.get(token, self.encoder.get(self.unk_token)) |
|
|
| def _convert_id_to_token(self, index: int) -> Optional[str]: |
| """Converts an index (integer) in a token (str) using the vocab.""" |
| |
| |
| |
| return self.decoder.get(index, '') |
|
|
| def convert_tokens_to_string(self, tokens: List[str]) -> str: |
| """Converts a sequence of tokens (string) in a single string.""" |
| text = ''.join(tokens) |
| text = bytearray([self.byte_decoder[c] for c in text |
| ]).decode('utf-8', errors=self.errors) |
| return text |
|
|
| def build_inputs_with_special_tokens( |
| self, |
| token_ids_0: List[int], |
| token_ids_1: Optional[List[int]] = None) -> List[int]: |
| bos_token_id = [self.bos_token_id] if self.add_bos_token else [] |
| eos_token_id = [self.eos_token_id] if self.add_eos_token else [] |
|
|
| output = bos_token_id + token_ids_0 + eos_token_id |
|
|
| if token_ids_1 is not None: |
| output = output + bos_token_id + token_ids_1 + eos_token_id |
|
|
| return output |
|
|
| def get_special_tokens_mask( |
| self, |
| token_ids_0: List[int], |
| token_ids_1: Optional[List[int]] = None, |
| already_has_special_tokens: bool = False) -> List[int]: |
| """Retrieves sequence ids from a token list that has no special tokens. |
| Function copied from |
| https://github.com/huggingface/transformers/blob/e3a4bd2bee212a2d0fd9f03b27fe7bfc1debe42d/src/transformers/models/gpt2/tokenization_gpt2.py#L265-L295 |
| added. This method is called when adding special tokens using the |
| tokenizer `prepare_for_model` or `encode_plus` methods. |
| Args: |
| token_ids_0 (`List[int]`): |
| List of IDs. |
| token_ids_1 (`List[int]`, *optional*): |
| Optional second list of IDs for sequence pairs. |
| already_has_special_tokens (`bool`, *optional*, defaults to `False`): |
| Whether or not the token list is already formatted with special tokens for the model. |
| Returns: |
| `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. |
| """ |
| if already_has_special_tokens: |
| return super().get_special_tokens_mask( |
| token_ids_0=token_ids_0, |
| token_ids_1=token_ids_1, |
| already_has_special_tokens=True) |
|
|
| bos_token_id = [1] if self.add_bos_token else [] |
| eos_token_id = [1] if self.add_eos_token else [] |
|
|
| if token_ids_1 is None: |
| return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id |
| return (bos_token_id + ([0] * len(token_ids_0)) + eos_token_id + |
| bos_token_id + ([0] * len(token_ids_1)) + eos_token_id) |
|
|
| def create_token_type_ids_from_sequences( |
| self, |
| token_ids_0: List[int], |
| token_ids_1: Optional[List[int]] = None) -> List[int]: |
| sep = [self.sep_token_id] |
|
|
| if token_ids_1 is None: |
| return len(token_ids_0 + sep) * [0] |
| return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] |
|
|
| def save_vocabulary(self, |
| save_directory: str, |
| filename_prefix: Optional[str] = None) -> Tuple[str]: |
|
|
| |
| |
| |
| |
| |
| |
| return (None, None) |
|
|
| def sanitize_special_tokens(self) -> int: |
| """Make sure that all the special tokens attributes of the tokenizer. |
| (`tokenizer.mask_token`, `tokenizer.cls_token`, etc.) are in the |
| vocabulary. |
| Add the missing ones to the vocabulary if needed. |
| Return: |
| `int`: The number of tokens added in the vocabulary during the operation. |
| """ |
| actual_new_tokens = [] |
| for token in self.all_special_tokens_extended: |
| encoded = self.encoding.encode(token, allowed_special='all') |
| if len(encoded) > 1: |
| actual_new_tokens.append(token) |
|
|
| return self.add_tokens(actual_new_tokens, special_tokens=True) |