| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """Tokenization classes.""" |
|
|
| from __future__ import absolute_import |
| from __future__ import division |
| from __future__ import print_function |
|
|
| import collections |
| import unicodedata |
| import os |
| import logging |
|
|
| from transformers.file_utils import cached_path |
|
|
| logger = logging.getLogger(__name__) |
|
|
| PRETRAINED_VOCAB_ARCHIVE_MAP = { |
| 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt", |
| 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt", |
| 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt", |
| 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt", |
| 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt", |
| 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt", |
| 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt", |
| } |
| PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = { |
| 'bert-base-uncased': 512, |
| 'bert-large-uncased': 512, |
| 'bert-base-cased': 512, |
| 'bert-large-cased': 512, |
| 'bert-base-multilingual-uncased': 512, |
| 'bert-base-multilingual-cased': 512, |
| 'bert-base-chinese': 512, |
| } |
| VOCAB_NAME = 'vocab.txt' |
|
|
|
|
| def load_vocab(vocab_file): |
| """Loads a vocabulary file into a dictionary.""" |
| vocab = collections.OrderedDict() |
| index = 0 |
| with open(vocab_file, "r", encoding="utf-8") as reader: |
| while True: |
| token = reader.readline() |
| if not token: |
| break |
| |
| |
| if token.find('n_iters=') == 0 or token.find('max_length=') == 0 : |
| continue |
| token = token.split('\t')[0] |
| |
| token = token.strip() |
| vocab[token] = index |
| index += 1 |
| return vocab |
|
|
|
|
| def whitespace_tokenize(text): |
| """Runs basic whitespace cleaning and splitting on a peice of text.""" |
| text = text.strip() |
| if not text: |
| return [] |
| tokens = text.split() |
| return tokens |
|
|
|
|
| class BertTokenizer(object): |
| """Runs end-to-end tokenization: punctuation splitting + wordpiece""" |
|
|
| def __init__(self, vocab_file, do_lower_case=True, max_len=None, |
| never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")): |
| if not os.path.isfile(vocab_file): |
| raise ValueError( |
| "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained " |
| "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)) |
| self.vocab = load_vocab(vocab_file) |
| self.ids_to_tokens = collections.OrderedDict( |
| [(ids, tok) for tok, ids in self.vocab.items()]) |
| self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, |
| never_split=never_split) |
| self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) |
| self.max_len = max_len if max_len is not None else int(1e12) |
|
|
| def tokenize(self, text): |
| split_tokens = [] |
| for token in self.basic_tokenizer.tokenize(text): |
| |
| token += '_' |
| for sub_token in self.wordpiece_tokenizer.tokenize(token): |
| split_tokens.append(sub_token) |
| return split_tokens |
|
|
| def convert_tokens_to_ids(self, tokens): |
| """Converts a sequence of tokens into ids using the vocab.""" |
| ids = [] |
| for token in tokens: |
| ids.append(self.vocab[token]) |
| if len(ids) > self.max_len: |
| raise ValueError( |
| "Token indices sequence length is longer than the specified maximum " |
| " sequence length for this BERT model ({} > {}). Running this" |
| " sequence through BERT will result in indexing errors".format(len(ids), self.max_len) |
| ) |
| return ids |
|
|
| def convert_ids_to_tokens(self, ids): |
| """Converts a sequence of ids in wordpiece tokens using the vocab.""" |
| tokens = [] |
| for i in ids: |
| tokens.append(self.ids_to_tokens[i]) |
| return tokens |
|
|
| @classmethod |
| def from_pretrained(cls, pretrained_model_name, cache_dir=None, *inputs, **kwargs): |
| """ |
| Instantiate a PreTrainedBertModel from a pre-trained model file. |
| Download and cache the pre-trained model file if needed. |
| """ |
| if pretrained_model_name in PRETRAINED_VOCAB_ARCHIVE_MAP: |
| vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name] |
| else: |
| vocab_file = pretrained_model_name |
| if os.path.isdir(vocab_file): |
| vocab_file = os.path.join(vocab_file, VOCAB_NAME) |
| |
| try: |
| resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir) |
| except FileNotFoundError: |
| logger.error( |
| "Model name '{}' was not found in model name list ({}). " |
| "We assumed '{}' was a path or url but couldn't find any file " |
| "associated to this path or url.".format( |
| pretrained_model_name, |
| ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()), |
| vocab_file)) |
| return None |
| if resolved_vocab_file == vocab_file: |
| logger.info("loading vocabulary file {}".format(vocab_file)) |
| else: |
| logger.info("loading vocabulary file {} from cache at {}".format( |
| vocab_file, resolved_vocab_file)) |
| if pretrained_model_name in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP: |
| |
| |
| max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name] |
| kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len) |
| |
| tokenizer = cls(resolved_vocab_file, *inputs, **kwargs) |
| return tokenizer |
|
|
|
|
| class BasicTokenizer(object): |
| """Runs basic tokenization (punctuation splitting, lower casing, etc.).""" |
|
|
| def __init__(self, |
| do_lower_case=True, |
| never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")): |
| """Constructs a BasicTokenizer. |
| |
| Args: |
| do_lower_case: Whether to lower case the input. |
| """ |
| self.do_lower_case = do_lower_case |
| self.never_split = never_split |
|
|
| def tokenize(self, text): |
| """Tokenizes a piece of text.""" |
| text = self._clean_text(text) |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| orig_tokens = whitespace_tokenize(text) |
| split_tokens = [] |
| for token in orig_tokens: |
| if self.do_lower_case and token not in self.never_split: |
| token = token.lower() |
| token = self._run_strip_accents(token) |
| split_tokens.extend(self._run_split_on_punc(token)) |
|
|
| output_tokens = whitespace_tokenize(" ".join(split_tokens)) |
| return output_tokens |
|
|
| def _run_strip_accents(self, text): |
| """Strips accents from a piece of text.""" |
| text = unicodedata.normalize("NFD", text) |
| output = [] |
| for char in text: |
| cat = unicodedata.category(char) |
| if cat == "Mn": |
| continue |
| output.append(char) |
| return "".join(output) |
|
|
| def _run_split_on_punc(self, text): |
| """Splits punctuation on a piece of text.""" |
| if text in self.never_split: |
| return [text] |
| chars = list(text) |
| i = 0 |
| start_new_word = True |
| output = [] |
| while i < len(chars): |
| char = chars[i] |
| if _is_punctuation(char): |
| output.append([char]) |
| start_new_word = True |
| else: |
| if start_new_word: |
| output.append([]) |
| start_new_word = False |
| output[-1].append(char) |
| i += 1 |
|
|
| return ["".join(x) for x in output] |
|
|
| def _tokenize_chinese_chars(self, text): |
| """Adds whitespace around any CJK character.""" |
| output = [] |
| for char in text: |
| cp = ord(char) |
| if self._is_chinese_char(cp): |
| output.append(" ") |
| output.append(char) |
| output.append(" ") |
| else: |
| output.append(char) |
| return "".join(output) |
|
|
| def _is_chinese_char(self, cp): |
| """Checks whether CP is the codepoint of a CJK character.""" |
| |
| |
| |
| |
| |
| |
| |
| |
| if ((cp >= 0x4E00 and cp <= 0x9FFF) or |
| (cp >= 0x3400 and cp <= 0x4DBF) or |
| (cp >= 0x20000 and cp <= 0x2A6DF) or |
| (cp >= 0x2A700 and cp <= 0x2B73F) or |
| (cp >= 0x2B740 and cp <= 0x2B81F) or |
| (cp >= 0x2B820 and cp <= 0x2CEAF) or |
| (cp >= 0xF900 and cp <= 0xFAFF) or |
| (cp >= 0x2F800 and cp <= 0x2FA1F)): |
| return True |
|
|
| return False |
|
|
| def _clean_text(self, text): |
| """Performs invalid character removal and whitespace cleanup on text.""" |
| output = [] |
| for char in text: |
| cp = ord(char) |
| if cp == 0 or cp == 0xfffd or _is_control(char): |
| continue |
| if _is_whitespace(char): |
| output.append(" ") |
| else: |
| output.append(char) |
| return "".join(output) |
|
|
|
|
| class WordpieceTokenizer(object): |
| """Runs WordPiece tokenization.""" |
|
|
| def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100): |
| self.vocab = vocab |
| self.unk_token = unk_token |
| self.max_input_chars_per_word = max_input_chars_per_word |
|
|
| def tokenize(self, text): |
| """Tokenizes a piece of text into its word pieces. |
| |
| This uses a greedy longest-match-first algorithm to perform tokenization |
| using the given vocabulary. |
| |
| For example: |
| input = "unaffable" |
| output = ["un", "##aff", "##able"] |
| |
| Args: |
| text: A single token or whitespace separated tokens. This should have |
| already been passed through `BasicTokenizer`. |
| |
| Returns: |
| A list of wordpiece tokens. |
| """ |
|
|
| output_tokens = [] |
| for token in whitespace_tokenize(text): |
| chars = list(token) |
| if len(chars) > self.max_input_chars_per_word: |
| output_tokens.append(self.unk_token) |
| continue |
|
|
| is_bad = False |
| start = 0 |
| sub_tokens = [] |
| while start < len(chars): |
| end = len(chars) |
| cur_substr = None |
| while start < end: |
| substr = "".join(chars[start:end]) |
| |
| |
| |
| if substr in self.vocab: |
| cur_substr = substr |
| break |
| end -= 1 |
| if cur_substr is None: |
| is_bad = True |
| break |
| sub_tokens.append(cur_substr) |
| start = end |
|
|
| if is_bad: |
| output_tokens.append(self.unk_token) |
| else: |
| output_tokens.extend(sub_tokens) |
| return output_tokens |
|
|
|
|
| def _is_whitespace(char): |
| """Checks whether `chars` is a whitespace character.""" |
| |
| |
| if char == " " or char == "\t" or char == "\n" or char == "\r": |
| return True |
| cat = unicodedata.category(char) |
| if cat == "Zs": |
| return True |
| return False |
|
|
|
|
| def _is_control(char): |
| """Checks whether `chars` is a control character.""" |
| |
| |
| if char == "\t" or char == "\n" or char == "\r": |
| return False |
| cat = unicodedata.category(char) |
| if cat.startswith("C"): |
| return True |
| return False |
|
|
|
|
| def _is_punctuation(char): |
| |
| return char == ' ' |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|