Spaces:
Runtime error
Runtime error
| # coding=utf-8 | |
| # Copyright 2021 The Open AI Team Authors and The HuggingFace Inc. team. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| """Tokenization classes for CLIP.""" | |
| import json | |
| import os | |
| import unicodedata | |
| from functools import lru_cache | |
| from typing import List, Optional, Tuple | |
| import regex as re | |
| from ...tokenization_utils import AddedToken, PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace | |
| from ...utils import logging | |
| logger = logging.get_logger(__name__) | |
| VOCAB_FILES_NAMES = { | |
| "vocab_file": "vocab.json", | |
| "merges_file": "merges.txt", | |
| } | |
| PRETRAINED_VOCAB_FILES_MAP = { | |
| "vocab_file": { | |
| "openai/clip-vit-base-patch32": "https://huggingface.co/openai/clip-vit-base-patch32/resolve/main/vocab.json", | |
| }, | |
| "merges_file": { | |
| "openai/clip-vit-base-patch32": "https://huggingface.co/openai/clip-vit-base-patch32/resolve/main/merges.txt", | |
| }, | |
| } | |
| PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { | |
| "openai/clip-vit-base-patch32": 77, | |
| } | |
| PRETRAINED_INIT_CONFIGURATION = { | |
| "openai/clip-vit-base-patch32": {}, | |
| } | |
| def bytes_to_unicode(): | |
| """ | |
| Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control | |
| characters the bpe code barfs on. | |
| The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab | |
| if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for | |
| decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup | |
| tables between utf-8 bytes and unicode strings. | |
| """ | |
| bs = ( | |
| list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) | |
| ) | |
| cs = bs[:] | |
| n = 0 | |
| for b in range(2**8): | |
| if b not in bs: | |
| bs.append(b) | |
| cs.append(2**8 + n) | |
| n += 1 | |
| cs = [chr(n) for n in cs] | |
| return dict(zip(bs, cs)) | |
| def get_pairs(word): | |
| """ | |
| Return set of symbol pairs in a word. | |
| Word is represented as tuple of symbols (symbols being variable-length strings). | |
| """ | |
| pairs = set() | |
| prev_char = word[0] | |
| for char in word[1:]: | |
| pairs.add((prev_char, char)) | |
| prev_char = char | |
| return pairs | |
| def whitespace_clean(text): | |
| text = re.sub(r"\s+", " ", text) | |
| text = text.strip() | |
| return text | |
| # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize | |
| def whitespace_tokenize(text): | |
| """Runs basic whitespace cleaning and splitting on a piece of text.""" | |
| text = text.strip() | |
| if not text: | |
| return [] | |
| tokens = text.split() | |
| return tokens | |
| # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer | |
| class BasicTokenizer(object): | |
| """ | |
| Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). | |
| Args: | |
| do_lower_case (`bool`, *optional*, defaults to `True`): | |
| Whether or not to lowercase the input when tokenizing. | |
| never_split (`Iterable`, *optional*): | |
| Collection of tokens which will never be split during tokenization. Only has an effect when | |
| `do_basic_tokenize=True` | |
| tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): | |
| Whether or not to tokenize Chinese characters. | |
| This should likely be deactivated for Japanese (see this | |
| [issue](https://github.com/huggingface/transformers/issues/328)). | |
| strip_accents (`bool`, *optional*): | |
| Whether or not to strip all accents. If this option is not specified, then it will be determined by the | |
| value for `lowercase` (as in the original BERT). | |
| do_split_on_punc (`bool`, *optional*, defaults to `True`): | |
| In some instances we want to skip the basic punctuation splitting so that later tokenization can capture | |
| the full context of the words, such as contractions. | |
| """ | |
| def __init__( | |
| self, | |
| do_lower_case=True, | |
| never_split=None, | |
| tokenize_chinese_chars=True, | |
| strip_accents=None, | |
| do_split_on_punc=True, | |
| ): | |
| if never_split is None: | |
| never_split = [] | |
| self.do_lower_case = do_lower_case | |
| self.never_split = set(never_split) | |
| self.tokenize_chinese_chars = tokenize_chinese_chars | |
| self.strip_accents = strip_accents | |
| self.do_split_on_punc = do_split_on_punc | |
| def tokenize(self, text, never_split=None): | |
| """ | |
| Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer. | |
| Args: | |
| never_split (`List[str]`, *optional*) | |
| Kept for backward compatibility purposes. Now implemented directly at the base class level (see | |
| [`PreTrainedTokenizer.tokenize`]) List of token not to split. | |
| """ | |
| # union() returns a new set by concatenating the two sets. | |
| never_split = self.never_split.union(set(never_split)) if never_split else self.never_split | |
| text = self._clean_text(text) | |
| # This was added on November 1st, 2018 for the multilingual and Chinese | |
| # models. This is also applied to the English models now, but it doesn't | |
| # matter since the English models were not trained on any Chinese data | |
| # and generally don't have any Chinese data in them (there are Chinese | |
| # characters in the vocabulary because Wikipedia does have some Chinese | |
| # words in the English Wikipedia.). | |
| if self.tokenize_chinese_chars: | |
| text = self._tokenize_chinese_chars(text) | |
| # prevents treating the same character with different unicode codepoints as different characters | |
| unicode_normalized_text = unicodedata.normalize("NFC", text) | |
| orig_tokens = whitespace_tokenize(unicode_normalized_text) | |
| split_tokens = [] | |
| for token in orig_tokens: | |
| if token not in never_split: | |
| if self.do_lower_case: | |
| token = token.lower() | |
| if self.strip_accents is not False: | |
| token = self._run_strip_accents(token) | |
| elif self.strip_accents: | |
| token = self._run_strip_accents(token) | |
| split_tokens.extend(self._run_split_on_punc(token, never_split)) | |
| output_tokens = whitespace_tokenize(" ".join(split_tokens)) | |
| return output_tokens | |
| def _run_strip_accents(self, text): | |
| """Strips accents from a piece of text.""" | |
| text = unicodedata.normalize("NFD", text) | |
| output = [] | |
| for char in text: | |
| cat = unicodedata.category(char) | |
| if cat == "Mn": | |
| continue | |
| output.append(char) | |
| return "".join(output) | |
| def _run_split_on_punc(self, text, never_split=None): | |
| """Splits punctuation on a piece of text.""" | |
| if not self.do_split_on_punc or (never_split is not None and text in never_split): | |
| return [text] | |
| chars = list(text) | |
| i = 0 | |
| start_new_word = True | |
| output = [] | |
| while i < len(chars): | |
| char = chars[i] | |
| if _is_punctuation(char): | |
| output.append([char]) | |
| start_new_word = True | |
| else: | |
| if start_new_word: | |
| output.append([]) | |
| start_new_word = False | |
| output[-1].append(char) | |
| i += 1 | |
| return ["".join(x) for x in output] | |
| def _tokenize_chinese_chars(self, text): | |
| """Adds whitespace around any CJK character.""" | |
| output = [] | |
| for char in text: | |
| cp = ord(char) | |
| if self._is_chinese_char(cp): | |
| output.append(" ") | |
| output.append(char) | |
| output.append(" ") | |
| else: | |
| output.append(char) | |
| return "".join(output) | |
| def _is_chinese_char(self, cp): | |
| """Checks whether CP is the codepoint of a CJK character.""" | |
| # This defines a "chinese character" as anything in the CJK Unicode block: | |
| # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) | |
| # | |
| # Note that the CJK Unicode block is NOT all Japanese and Korean characters, | |
| # despite its name. The modern Korean Hangul alphabet is a different block, | |
| # as is Japanese Hiragana and Katakana. Those alphabets are used to write | |
| # space-separated words, so they are not treated specially and handled | |
| # like the all of the other languages. | |
| if ( | |
| (cp >= 0x4E00 and cp <= 0x9FFF) | |
| or (cp >= 0x3400 and cp <= 0x4DBF) # | |
| or (cp >= 0x20000 and cp <= 0x2A6DF) # | |
| or (cp >= 0x2A700 and cp <= 0x2B73F) # | |
| or (cp >= 0x2B740 and cp <= 0x2B81F) # | |
| or (cp >= 0x2B820 and cp <= 0x2CEAF) # | |
| or (cp >= 0xF900 and cp <= 0xFAFF) | |
| or (cp >= 0x2F800 and cp <= 0x2FA1F) # | |
| ): # | |
| return True | |
| return False | |
| def _clean_text(self, text): | |
| """Performs invalid character removal and whitespace cleanup on text.""" | |
| output = [] | |
| for char in text: | |
| cp = ord(char) | |
| if cp == 0 or cp == 0xFFFD or _is_control(char): | |
| continue | |
| if _is_whitespace(char): | |
| output.append(" ") | |
| else: | |
| output.append(char) | |
| return "".join(output) | |
| class CLIPTokenizer(PreTrainedTokenizer): | |
| """ | |
| Construct a CLIP tokenizer. Based on byte-level Byte-Pair-Encoding. | |
| This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to | |
| this superclass for more information regarding those methods. | |
| Args: | |
| vocab_file (`str`): | |
| Path to the vocabulary file. | |
| merges_file (`str`): | |
| Path to the merges file. | |
| errors (`str`, *optional*, defaults to `"replace"`): | |
| Paradigm to follow when decoding bytes to UTF-8. See | |
| [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. | |
| unk_token (`str`, *optional*, defaults to `<|endoftext|>`): | |
| The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this | |
| token instead. | |
| bos_token (`str`, *optional*, defaults to `<|startoftext|>`): | |
| The beginning of sequence token. | |
| eos_token (`str`, *optional*, defaults to `<|endoftext|>`): | |
| The end of sequence token. | |
| """ | |
| vocab_files_names = VOCAB_FILES_NAMES | |
| pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP | |
| max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES | |
| model_input_names = ["input_ids", "attention_mask"] | |
| def __init__( | |
| self, | |
| vocab_file, | |
| merges_file, | |
| errors="replace", | |
| unk_token="<|endoftext|>", | |
| bos_token="<|startoftext|>", | |
| eos_token="<|endoftext|>", | |
| pad_token="<|endoftext|>", # hack to enable padding | |
| **kwargs, | |
| ): | |
| bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token | |
| eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token | |
| unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token | |
| try: | |
| import ftfy | |
| self.fix_text = ftfy.fix_text | |
| except ImportError: | |
| logger.info("ftfy or spacy is not installed using custom BasicTokenizer instead of ftfy.") | |
| self.nlp = BasicTokenizer(strip_accents=False, do_split_on_punc=False) | |
| self.fix_text = None | |
| with open(vocab_file, encoding="utf-8") as vocab_handle: | |
| self.encoder = json.load(vocab_handle) | |
| self.decoder = {v: k for k, v in self.encoder.items()} | |
| self.errors = errors # how to handle errors in decoding | |
| self.byte_encoder = bytes_to_unicode() | |
| self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} | |
| with open(merges_file, encoding="utf-8") as merges_handle: | |
| bpe_merges = merges_handle.read().strip().split("\n")[1 : 49152 - 256 - 2 + 1] | |
| bpe_merges = [tuple(merge.split()) for merge in bpe_merges] | |
| self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) | |
| self.cache = {"<|startoftext|>": "<|startoftext|>", "<|endoftext|>": "<|endoftext|>"} | |
| self.pat = re.compile( | |
| r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", | |
| re.IGNORECASE, | |
| ) | |
| super().__init__( | |
| errors=errors, | |
| unk_token=unk_token, | |
| bos_token=bos_token, | |
| eos_token=eos_token, | |
| pad_token=pad_token, | |
| **kwargs, | |
| ) | |
| def vocab_size(self): | |
| return len(self.encoder) | |
| def get_vocab(self): | |
| return dict(self.encoder, **self.added_tokens_encoder) | |
| def build_inputs_with_special_tokens( | |
| self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None | |
| ) -> List[int]: | |
| """ | |
| Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and | |
| adding special tokens. A CLIP sequence has the following format: | |
| - single sequence: `<|startoftext|> X <|endoftext|>` | |
| Pairs of sequences are not the expected use case, but they will be handled without a separator. | |
| Args: | |
| token_ids_0 (`List[int]`): | |
| List of IDs to which the special tokens will be added. | |
| token_ids_1 (`List[int]`, *optional*): | |
| Optional second list of IDs for sequence pairs. | |
| Returns: | |
| `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. | |
| """ | |
| bos_token = [self.bos_token_id] | |
| eos_token = [self.eos_token_id] | |
| if token_ids_1 is None: | |
| return bos_token + token_ids_0 + eos_token | |
| return bos_token + token_ids_0 + eos_token + eos_token + token_ids_1 + eos_token | |
| def get_special_tokens_mask( | |
| self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False | |
| ) -> List[int]: | |
| """ | |
| Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding | |
| special tokens using the tokenizer `prepare_for_model` method. | |
| Args: | |
| token_ids_0 (`List[int]`): | |
| List of IDs. | |
| token_ids_1 (`List[int]`, *optional*): | |
| Optional second list of IDs for sequence pairs. | |
| already_has_special_tokens (`bool`, *optional*, defaults to `False`): | |
| Whether or not the token list is already formatted with special tokens for the model. | |
| Returns: | |
| `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. | |
| """ | |
| if already_has_special_tokens: | |
| return super().get_special_tokens_mask( | |
| token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True | |
| ) | |
| if token_ids_1 is None: | |
| return [1] + ([0] * len(token_ids_0)) + [1] | |
| return [1] + ([0] * len(token_ids_0)) + [1] + [1] + ([0] * len(token_ids_1)) + [1] | |
| def create_token_type_ids_from_sequences( | |
| self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None | |
| ) -> List[int]: | |
| """ | |
| Create a mask from the two sequences passed. CLIP does not make use of token type ids, therefore a list of | |
| zeros is returned. | |
| Args: | |
| token_ids_0 (`List[int]`): | |
| List of IDs. | |
| token_ids_1 (`List[int]`, *optional*): | |
| Optional second list of IDs for sequence pairs. | |
| Returns: | |
| `List[int]`: List of zeros. | |
| """ | |
| bos_token = [self.bos_token_id] | |
| eos_token = [self.eos_token_id] | |
| if token_ids_1 is None: | |
| return len(bos_token + token_ids_0 + eos_token) * [0] | |
| return len(bos_token + token_ids_0 + eos_token + eos_token + token_ids_1 + eos_token) * [0] | |
| def bpe(self, token): | |
| if token in self.cache: | |
| return self.cache[token] | |
| word = tuple(token[:-1]) + (token[-1] + "</w>",) | |
| pairs = get_pairs(word) | |
| if not pairs: | |
| return token + "</w>" | |
| while True: | |
| bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) | |
| if bigram not in self.bpe_ranks: | |
| break | |
| first, second = bigram | |
| new_word = [] | |
| i = 0 | |
| while i < len(word): | |
| try: | |
| j = word.index(first, i) | |
| except ValueError: | |
| new_word.extend(word[i:]) | |
| break | |
| else: | |
| new_word.extend(word[i:j]) | |
| i = j | |
| if word[i] == first and i < len(word) - 1 and word[i + 1] == second: | |
| new_word.append(first + second) | |
| i += 2 | |
| else: | |
| new_word.append(word[i]) | |
| i += 1 | |
| new_word = tuple(new_word) | |
| word = new_word | |
| if len(word) == 1: | |
| break | |
| else: | |
| pairs = get_pairs(word) | |
| word = " ".join(word) | |
| self.cache[token] = word | |
| return word | |
| def _tokenize(self, text): | |
| """Tokenize a string.""" | |
| bpe_tokens = [] | |
| if self.fix_text is None: | |
| text = " ".join(self.nlp.tokenize(text)) | |
| else: | |
| text = whitespace_clean(self.fix_text(text)).lower() | |
| for token in re.findall(self.pat, text): | |
| token = "".join( | |
| self.byte_encoder[b] for b in token.encode("utf-8") | |
| ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) | |
| bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" ")) | |
| return bpe_tokens | |
| def _convert_token_to_id(self, token): | |
| """Converts a token (str) in an id using the vocab.""" | |
| return self.encoder.get(token, self.encoder.get(self.unk_token)) | |
| def _convert_id_to_token(self, index): | |
| """Converts an index (integer) in a token (str) using the vocab.""" | |
| return self.decoder.get(index) | |
| def convert_tokens_to_string(self, tokens): | |
| """Converts a sequence of tokens (string) in a single string.""" | |
| text = "".join(tokens) | |
| byte_array = bytearray([self.byte_decoder[c] for c in text]) | |
| text = byte_array.decode("utf-8", errors=self.errors).replace("</w>", " ").strip() | |
| return text | |
| def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: | |
| if not os.path.isdir(save_directory): | |
| logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) | |
| return | |
| vocab_file = os.path.join( | |
| save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] | |
| ) | |
| merge_file = os.path.join( | |
| save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] | |
| ) | |
| with open(vocab_file, "w", encoding="utf-8") as f: | |
| f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") | |
| index = 0 | |
| with open(merge_file, "w", encoding="utf-8") as writer: | |
| writer.write("#version: 0.2\n") | |
| for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): | |
| if index != token_index: | |
| logger.warning( | |
| "Saving vocabulary to {}: BPE merge indices are not consecutive." | |
| " Please check that the tokenizer is not corrupted!".format(merge_file) | |
| ) | |
| index = token_index | |
| writer.write(" ".join(bpe_tokens) + "\n") | |
| index += 1 | |
| return vocab_file, merge_file | |