| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """Tokenization classes for PoNet.""" |
|
|
|
|
| import collections |
| import os |
| import unicodedata |
| from typing import Dict, List, Optional, Tuple, Union |
|
|
| from transformers.tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace |
| from transformers.tokenization_utils_base import BatchEncoding, EncodedInput |
| from transformers.utils import PaddingStrategy, logging |
|
|
|
|
| logger = logging.get_logger(__name__) |
|
|
| VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} |
|
|
| PRETRAINED_VOCAB_FILES_MAP = { |
| "vocab_file": { |
| "chtan/ponet-base-uncased": "https://huggingface.co/chtan/ponet-base-uncased/resolve/main/vocab.txt", |
| } |
| } |
|
|
| PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { |
| "chtan/ponet-base-uncased": 512, |
| } |
|
|
| PRETRAINED_INIT_CONFIGURATION = { |
| "chtan/ponet-base-uncased": {"do_lower_case": True}, |
| } |
|
|
|
|
| def load_vocab(vocab_file): |
| """Loads a vocabulary file into a dictionary.""" |
| vocab = collections.OrderedDict() |
| with open(vocab_file, "r", encoding="utf-8") as reader: |
| tokens = reader.readlines() |
| for index, token in enumerate(tokens): |
| token = token.rstrip("\n") |
| vocab[token] = index |
| return vocab |
|
|
|
|
| def whitespace_tokenize(text): |
| """Runs basic whitespace cleaning and splitting on a piece of text.""" |
| text = text.strip() |
| if not text: |
| return [] |
| tokens = text.split() |
| return tokens |
|
|
|
|
| class PoNetTokenizer(PreTrainedTokenizer): |
| r""" |
| Construct a PONET tokenizer. Based on WordPiece. |
| |
| This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to |
| this superclass for more information regarding those methods. |
| |
| Args: |
| vocab_file (`str`): |
| File containing the vocabulary. |
| do_lower_case (`bool`, *optional*, defaults to `True`): |
| Whether or not to lowercase the input when tokenizing. |
| do_basic_tokenize (`bool`, *optional*, defaults to `True`): |
| Whether or not to do basic tokenization before WordPiece. |
| never_split (`Iterable`, *optional*): |
| Collection of tokens which will never be split during tokenization. Only has an effect when |
| `do_basic_tokenize=True` |
| unk_token (`str`, *optional*, defaults to `"[UNK]"`): |
| The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this |
| token instead. |
| sep_token (`str`, *optional*, defaults to `"[SEP]"`): |
| The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for |
| sequence classification or for a text and a question for question answering. It is also used as the last |
| token of a sequence built with special tokens. |
| pad_token (`str`, *optional*, defaults to `"[PAD]"`): |
| The token used for padding, for example when batching sequences of different lengths. |
| cls_token (`str`, *optional*, defaults to `"[CLS]"`): |
| The classifier token which is used when doing sequence classification (classification of the whole sequence |
| instead of per-token classification). It is the first token of the sequence when built with special tokens. |
| mask_token (`str`, *optional*, defaults to `"[MASK]"`): |
| The token used for masking values. This is the token used when training this model with masked language |
| modeling. This is the token which the model will try to predict. |
| tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): |
| Whether or not to tokenize Chinese characters. |
| |
| This should likely be deactivated for Japanese (see this |
| [issue](https://github.com/huggingface/transformers/issues/328)). |
| strip_accents (`bool`, *optional*): |
| Whether or not to strip all accents. If this option is not specified, then it will be determined by the |
| value for `lowercase` (as in the original PONET). |
| """ |
|
|
| vocab_files_names = VOCAB_FILES_NAMES |
| pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP |
| pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION |
| max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES |
|
|
| def __init__( |
| self, |
| vocab_file, |
| do_lower_case=True, |
| do_basic_tokenize=True, |
| never_split=None, |
| unk_token="[UNK]", |
| sep_token="[SEP]", |
| pad_token="[PAD]", |
| cls_token="[CLS]", |
| mask_token="[MASK]", |
| tokenize_chinese_chars=True, |
| strip_accents=None, |
| **kwargs, |
| ): |
| super().__init__( |
| do_lower_case=do_lower_case, |
| do_basic_tokenize=do_basic_tokenize, |
| never_split=never_split, |
| unk_token=unk_token, |
| sep_token=sep_token, |
| pad_token=pad_token, |
| cls_token=cls_token, |
| mask_token=mask_token, |
| tokenize_chinese_chars=tokenize_chinese_chars, |
| strip_accents=strip_accents, |
| **kwargs, |
| ) |
|
|
| if not os.path.isfile(vocab_file): |
| raise ValueError( |
| f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" |
| " model use `tokenizer = PoNetTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" |
| ) |
| self.vocab = load_vocab(vocab_file) |
| self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) |
| self.do_basic_tokenize = do_basic_tokenize |
| if do_basic_tokenize: |
| self.basic_tokenizer = BasicTokenizer( |
| do_lower_case=do_lower_case, |
| never_split=never_split, |
| tokenize_chinese_chars=tokenize_chinese_chars, |
| strip_accents=strip_accents, |
| ) |
| self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token) |
|
|
| def _pad( |
| self, |
| encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], |
| max_length: Optional[int] = None, |
| padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, |
| pad_to_multiple_of: Optional[int] = None, |
| return_attention_mask: Optional[bool] = None, |
| ) -> dict: |
| """ |
| Pad encoded inputs (on left/right and up to predefined length or max length in the batch) |
| |
| Args: |
| encoded_inputs: |
| Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). |
| max_length: maximum length of the returned list and optionally padding length (see below). |
| Will truncate by taking into account the special tokens. |
| padding_strategy: PaddingStrategy to use for padding. |
| |
| - PaddingStrategy.LONGEST Pad to the longest sequence in the batch |
| - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) |
| - PaddingStrategy.DO_NOT_PAD: Do not pad |
| The tokenizer padding sides are defined in self.padding_side: |
| |
| - 'left': pads on the left of the sequences |
| - 'right': pads on the right of the sequences |
| pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. |
| This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability |
| `>= 7.5` (Volta). |
| return_attention_mask: |
| (optional) Set to False to avoid returning attention mask (default: set to model specifics) |
| """ |
| |
| if return_attention_mask is None: |
| return_attention_mask = "attention_mask" in self.model_input_names |
|
|
| required_input = encoded_inputs[self.model_input_names[0]] |
|
|
| if padding_strategy == PaddingStrategy.LONGEST: |
| max_length = len(required_input) |
|
|
| if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): |
| max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of |
|
|
| needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length |
|
|
| |
| if return_attention_mask and "attention_mask" not in encoded_inputs: |
| encoded_inputs["attention_mask"] = [1] * len(required_input) |
|
|
| if needs_to_be_padded: |
| difference = max_length - len(required_input) |
|
|
| if self.padding_side == "right": |
| if return_attention_mask: |
| encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference |
| if "token_type_ids" in encoded_inputs: |
| encoded_inputs["token_type_ids"] = ( |
| encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference |
| ) |
| if "segment_ids" in encoded_inputs: |
| encoded_inputs["segment_ids"] = ( |
| encoded_inputs["segment_ids"] + [encoded_inputs["segment_ids"][-1] + 1] * difference |
| ) |
| if "special_tokens_mask" in encoded_inputs: |
| encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference |
| encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference |
| elif self.padding_side == "left": |
| if return_attention_mask: |
| encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] |
| if "token_type_ids" in encoded_inputs: |
| encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ |
| "token_type_ids" |
| ] |
| if "segment_ids" in encoded_inputs: |
| encoded_inputs["segment_ids"] = [ |
| encoded_inputs["segment_ids"][-1] + 1 |
| ] * difference + encoded_inputs["segment_ids"] |
| if "special_tokens_mask" in encoded_inputs: |
| encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] |
| encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input |
| else: |
| raise ValueError("Invalid padding strategy:" + str(self.padding_side)) |
|
|
| return encoded_inputs |
|
|
| @property |
| def do_lower_case(self): |
| return self.basic_tokenizer.do_lower_case |
|
|
| @property |
| def vocab_size(self): |
| return len(self.vocab) |
|
|
| def get_vocab(self): |
| return dict(self.vocab, **self.added_tokens_encoder) |
|
|
| def _tokenize(self, text): |
| split_tokens = [] |
| if self.do_basic_tokenize: |
| for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): |
| |
| if token in self.basic_tokenizer.never_split: |
| split_tokens.append(token) |
| else: |
| split_tokens += self.wordpiece_tokenizer.tokenize(token) |
| else: |
| split_tokens = self.wordpiece_tokenizer.tokenize(text) |
| return split_tokens |
|
|
| def _convert_token_to_id(self, token): |
| """Converts a token (str) in an id using the vocab.""" |
| return self.vocab.get(token, self.vocab.get(self.unk_token)) |
|
|
| def _convert_id_to_token(self, index): |
| """Converts an index (integer) in a token (str) using the vocab.""" |
| return self.ids_to_tokens.get(index, self.unk_token) |
|
|
| def convert_tokens_to_string(self, tokens): |
| """Converts a sequence of tokens (string) in a single string.""" |
| out_string = " ".join(tokens).replace(" ##", "").strip() |
| return out_string |
|
|
| def build_inputs_with_special_tokens( |
| self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None |
| ) -> List[int]: |
| """ |
| Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and |
| adding special tokens. A PONET sequence has the following format: |
| |
| - single sequence: `[CLS] X [SEP]` |
| - pair of sequences: `[CLS] A [SEP] B [SEP]` |
| |
| Args: |
| token_ids_0 (`List[int]`): |
| List of IDs to which the special tokens will be added. |
| token_ids_1 (`List[int]`, *optional*): |
| Optional second list of IDs for sequence pairs. |
| |
| Returns: |
| `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. |
| """ |
| if token_ids_1 is None: |
| return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] |
| cls = [self.cls_token_id] |
| sep = [self.sep_token_id] |
| return cls + token_ids_0 + sep + token_ids_1 + sep |
|
|
| def get_special_tokens_mask( |
| self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False |
| ) -> List[int]: |
| """ |
| Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding |
| special tokens using the tokenizer `prepare_for_model` method. |
| |
| Args: |
| token_ids_0 (`List[int]`): |
| List of IDs. |
| token_ids_1 (`List[int]`, *optional*): |
| Optional second list of IDs for sequence pairs. |
| already_has_special_tokens (`bool`, *optional*, defaults to `False`): |
| Whether or not the token list is already formatted with special tokens for the model. |
| |
| Returns: |
| `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. |
| """ |
|
|
| if already_has_special_tokens: |
| return super().get_special_tokens_mask( |
| token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True |
| ) |
|
|
| if token_ids_1 is not None: |
| return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] |
| return [1] + ([0] * len(token_ids_0)) + [1] |
|
|
| def create_token_type_ids_from_sequences( |
| self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None |
| ) -> List[int]: |
| """ |
| Create a mask from the two sequences passed to be used in a sequence-pair classification task. A PONET sequence |
| pair mask has the following format: |
| |
| ``` |
| 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 |
| | first sequence | second sequence | |
| ``` |
| |
| If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). |
| |
| Args: |
| token_ids_0 (`List[int]`): |
| List of IDs. |
| token_ids_1 (`List[int]`, *optional*): |
| Optional second list of IDs for sequence pairs. |
| |
| Returns: |
| `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). |
| """ |
| sep = [self.sep_token_id] |
| cls = [self.cls_token_id] |
| if token_ids_1 is None: |
| return len(cls + token_ids_0 + sep) * [0] |
| return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] |
|
|
| def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: |
| index = 0 |
| if os.path.isdir(save_directory): |
| vocab_file = os.path.join( |
| save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] |
| ) |
| else: |
| vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory |
| with open(vocab_file, "w", encoding="utf-8") as writer: |
| for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): |
| if index != token_index: |
| logger.warning( |
| f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." |
| " Please check that the vocabulary is not corrupted!" |
| ) |
| index = token_index |
| writer.write(token + "\n") |
| index += 1 |
| return (vocab_file,) |
|
|
|
|
| |
| class BasicTokenizer(object): |
| """ |
| Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). |
| |
| Args: |
| do_lower_case (`bool`, *optional*, defaults to `True`): |
| Whether or not to lowercase the input when tokenizing. |
| never_split (`Iterable`, *optional*): |
| Collection of tokens which will never be split during tokenization. Only has an effect when |
| `do_basic_tokenize=True` |
| tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): |
| Whether or not to tokenize Chinese characters. |
| |
| This should likely be deactivated for Japanese (see this |
| [issue](https://github.com/huggingface/transformers/issues/328)). |
| strip_accents (`bool`, *optional*): |
| Whether or not to strip all accents. If this option is not specified, then it will be determined by the |
| value for `lowercase` (as in the original BERT). |
| """ |
|
|
| def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None): |
| if never_split is None: |
| never_split = [] |
| self.do_lower_case = do_lower_case |
| self.never_split = set(never_split) |
| self.tokenize_chinese_chars = tokenize_chinese_chars |
| self.strip_accents = strip_accents |
|
|
| def tokenize(self, text, never_split=None): |
| """ |
| Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see |
| WordPieceTokenizer. |
| |
| Args: |
| never_split (`List[str]`, *optional*) |
| Kept for backward compatibility purposes. Now implemented directly at the base class level (see |
| [`PreTrainedTokenizer.tokenize`]) List of token not to split. |
| """ |
| |
| never_split = self.never_split.union(set(never_split)) if never_split else self.never_split |
| text = self._clean_text(text) |
|
|
| |
| |
| |
| |
| |
| |
| if self.tokenize_chinese_chars: |
| text = self._tokenize_chinese_chars(text) |
| orig_tokens = whitespace_tokenize(text) |
| split_tokens = [] |
| for token in orig_tokens: |
| if token not in never_split: |
| if self.do_lower_case: |
| token = token.lower() |
| if self.strip_accents is not False: |
| token = self._run_strip_accents(token) |
| elif self.strip_accents: |
| token = self._run_strip_accents(token) |
| split_tokens.extend(self._run_split_on_punc(token, never_split)) |
|
|
| output_tokens = whitespace_tokenize(" ".join(split_tokens)) |
| return output_tokens |
|
|
| def _run_strip_accents(self, text): |
| """Strips accents from a piece of text.""" |
| text = unicodedata.normalize("NFD", text) |
| output = [] |
| for char in text: |
| cat = unicodedata.category(char) |
| if cat == "Mn": |
| continue |
| output.append(char) |
| return "".join(output) |
|
|
| def _run_split_on_punc(self, text, never_split=None): |
| """Splits punctuation on a piece of text.""" |
| if never_split is not None and text in never_split: |
| return [text] |
| chars = list(text) |
| i = 0 |
| start_new_word = True |
| output = [] |
| while i < len(chars): |
| char = chars[i] |
| if _is_punctuation(char): |
| output.append([char]) |
| start_new_word = True |
| else: |
| if start_new_word: |
| output.append([]) |
| start_new_word = False |
| output[-1].append(char) |
| i += 1 |
|
|
| return ["".join(x) for x in output] |
|
|
| def _tokenize_chinese_chars(self, text): |
| """Adds whitespace around any CJK character.""" |
| output = [] |
| for char in text: |
| cp = ord(char) |
| if self._is_chinese_char(cp): |
| output.append(" ") |
| output.append(char) |
| output.append(" ") |
| else: |
| output.append(char) |
| return "".join(output) |
|
|
| def _is_chinese_char(self, cp): |
| """Checks whether CP is the codepoint of a CJK character.""" |
| |
| |
| |
| |
| |
| |
| |
| |
| if ( |
| (cp >= 0x4E00 and cp <= 0x9FFF) |
| or (cp >= 0x3400 and cp <= 0x4DBF) |
| or (cp >= 0x20000 and cp <= 0x2A6DF) |
| or (cp >= 0x2A700 and cp <= 0x2B73F) |
| or (cp >= 0x2B740 and cp <= 0x2B81F) |
| or (cp >= 0x2B820 and cp <= 0x2CEAF) |
| or (cp >= 0xF900 and cp <= 0xFAFF) |
| or (cp >= 0x2F800 and cp <= 0x2FA1F) |
| ): |
| return True |
|
|
| return False |
|
|
| def _clean_text(self, text): |
| """Performs invalid character removal and whitespace cleanup on text.""" |
| output = [] |
| for char in text: |
| cp = ord(char) |
| if cp == 0 or cp == 0xFFFD or _is_control(char): |
| continue |
| if _is_whitespace(char): |
| output.append(" ") |
| else: |
| output.append(char) |
| return "".join(output) |
|
|
|
|
| |
| class WordpieceTokenizer(object): |
| """Runs WordPiece tokenization.""" |
|
|
| def __init__(self, vocab, unk_token, max_input_chars_per_word=100): |
| self.vocab = vocab |
| self.unk_token = unk_token |
| self.max_input_chars_per_word = max_input_chars_per_word |
|
|
| def tokenize(self, text): |
| """ |
| Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform |
| tokenization using the given vocabulary. |
| |
| For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`. |
| |
| Args: |
| text: A single token or whitespace separated tokens. This should have |
| already been passed through *BasicTokenizer*. |
| |
| Returns: |
| A list of wordpiece tokens. |
| """ |
|
|
| output_tokens = [] |
| for token in whitespace_tokenize(text): |
| chars = list(token) |
| if len(chars) > self.max_input_chars_per_word: |
| output_tokens.append(self.unk_token) |
| continue |
|
|
| is_bad = False |
| start = 0 |
| sub_tokens = [] |
| while start < len(chars): |
| end = len(chars) |
| cur_substr = None |
| while start < end: |
| substr = "".join(chars[start:end]) |
| if start > 0: |
| substr = "##" + substr |
| if substr in self.vocab: |
| cur_substr = substr |
| break |
| end -= 1 |
| if cur_substr is None: |
| is_bad = True |
| break |
| sub_tokens.append(cur_substr) |
| start = end |
|
|
| if is_bad: |
| output_tokens.append(self.unk_token) |
| else: |
| output_tokens.extend(sub_tokens) |
| return output_tokens |
|
|