| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """ Tokenization classes for KoBERT model """ |
|
|
|
|
| import logging |
| import os |
| import unicodedata |
| from shutil import copyfile |
|
|
| from transformers import PreTrainedTokenizer |
|
|
| |
| from konlpy.tag import Mecab |
| from unicode import join_jamos |
| from normalize import MosesPunctNormalizer |
| nor = MosesPunctNormalizer() |
|
|
| |
| BASE_CODE, CHOSUNG, JUNGSUNG = 44032, 588, 28 |
| |
| CHOSUNG_LIST = ['ใฑ', 'ใฒ', 'ใด', 'ใท', 'ใธ', 'ใน', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
'] |
| |
| JUNGSUNG_LIST = ['ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
ก', 'ใ
ข', 'ใ
ฃ'] |
| |
| JONGSUNG_LIST = [' ', 'ใฑ', 'ใฒ', 'ใณ', 'ใด', 'ใต', 'ใถ', 'ใท', 'ใน', 'ใบ', 'ใป', 'ใผ', 'ใฝ', 'ใพ', 'ใฟ', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
', 'ใ
'] |
| def splitjamo(string): |
| sp_list = list(string) |
| result = [] |
| for keyword in sp_list: |
| |
| if re.match('.*[ใฑ-ใ
ใ
-ใ
ฃ๊ฐ-ํฃ]+.*', keyword) is not None: |
| |
| char_code = ord(keyword) - BASE_CODE |
| char1 = int(char_code / CHOSUNG) |
| try: |
| result.append(CHOSUNG_LIST[char1]) |
| except: |
| return string |
| |
| |
| char2 = int((char_code - (CHOSUNG * char1)) / JUNGSUNG) |
| result.append(JUNGSUNG_LIST[char2]) |
| |
| char3 = int((char_code - (CHOSUNG * char1) - (JUNGSUNG * char2))) |
| result.append(JONGSUNG_LIST[char3]) |
| else: |
| result.append(keyword) |
| return result |
| def has_coda(word): |
| return (ord(word[-1]) -44032)%28==0 |
| def _replace_unicode(line): |
| if(line==None): |
| return "" |
| line = line.replace("โ",'-').replace("โ","-").replace("โ","-").replace("๏ผ",'"').replace("๏ผ","'").replace("โน","<").replace("โบ",">").replace("โ","'").replace("โ","'").replace("โ",'"').replace("โ",'"').replace("ยซ",'<').replace("ยป",'>').replace("ห",'"').replace("๏ผ",'(').replace("๏ผ",')').replace("ใ",'"').replace("ใ",'"').replace("โ",'"').replace("โ",'"').replace("โ","'").replace("โ","'").replace("ใ","<").replace("ใ",">").replace("ใ","<").replace("ใ",">").replace("ใ","'").replace("ใ","'").replace("ใ","[").replace("ใ","]").replace("ใ","[").replace("ใ","]").replace("๏ผป","[").replace("๏ผฝ","]").replace("๏ฝ","{").replace("๏ฝ","}") |
| line=nor.replace_unicode_punct(line) |
| return line |
| def _mecab(line): |
| mecab = Mecab() |
| |
| |
| pdoc = [] |
| morphs = [] |
| |
| poss = mecab.pos(line) |
| for pos in poss: |
| morphs.append(pos[0]) |
| ''' |
| pdoc.append(" ".join(morphs)) |
| return pdoc |
| ''' |
| return " ".join(morphs) |
|
|
| logger = logging.getLogger(__name__) |
|
|
| VOCAB_FILES_NAMES = { |
| "vocab_file": "spm.model", |
| "vocab_txt": "vocab.txt", |
| } |
|
|
| PRETRAINED_VOCAB_FILES_MAP = { |
| "vocab_file": { |
| "monologg/kobert": "https://s3.amazonaws.com/models.huggingface.co/bert/monologg/kobert/tokenizer_78b3253a26.model", |
| "monologg/kobert-lm": "https://s3.amazonaws.com/models.huggingface.co/bert/monologg/kobert-lm/tokenizer_78b3253a26.model", |
| "monologg/distilkobert": "https://s3.amazonaws.com/models.huggingface.co/bert/monologg/distilkobert/tokenizer_78b3253a26.model", |
| }, |
| "vocab_txt": { |
| "monologg/kobert": "https://s3.amazonaws.com/models.huggingface.co/bert/monologg/kobert/vocab.txt", |
| "monologg/kobert-lm": "https://s3.amazonaws.com/models.huggingface.co/bert/monologg/kobert-lm/vocab.txt", |
| "monologg/distilkobert": "https://s3.amazonaws.com/models.huggingface.co/bert/monologg/distilkobert/vocab.txt", |
| }, |
| } |
|
|
| PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { |
| "monologg/kobert": 512, |
| "monologg/kobert-lm": 512, |
| "monologg/distilkobert": 512, |
| } |
|
|
| PRETRAINED_INIT_CONFIGURATION = { |
| "monologg/kobert": {"do_lower_case": False}, |
| "monologg/kobert-lm": {"do_lower_case": False}, |
| "monologg/distilkobert": {"do_lower_case": False}, |
| } |
|
|
| SPIECE_UNDERLINE = "โ" |
|
|
|
|
| class DebertaV2Tokenizer(PreTrainedTokenizer): |
| """ |
| SentencePiece based tokenizer. Peculiarities: |
| - requires `SentencePiece <https://github.com/google/sentencepiece>`_ |
| """ |
|
|
| vocab_files_names = VOCAB_FILES_NAMES |
| pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP |
| pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION |
| max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES |
|
|
| def __init__( |
| self, |
| vocab_file, |
| vocab_txt, |
| do_lower_case=False, |
| remove_space=True, |
| keep_accents=False, |
| unk_token="<unk>", |
| sep_token="<s>", |
| pad_token="<pad>", |
| cls_token="<cls>", |
| mask_token="<mask>", |
| **kwargs, |
| ): |
| super().__init__( |
| unk_token="<unk>", |
| sep_token=sep_token, |
| pad_token=pad_token, |
| cls_token=cls_token, |
| mask_token=mask_token, |
| **kwargs, |
| ) |
|
|
| |
| self.token2idx = dict() |
| self.idx2token = [] |
| with open(vocab_txt, "r", encoding="utf-8") as f: |
| for idx, token in enumerate(f): |
| token = token.strip() |
| self.token2idx[token] = idx |
| self.idx2token.append(token) |
|
|
| try: |
| import sentencepiece as spm |
| except ImportError: |
| logger.warning( |
| "You need to install SentencePiece to use KoBertTokenizer: https://github.com/google/sentencepiece" |
| "pip install sentencepiece" |
| ) |
|
|
| self.do_lower_case = do_lower_case |
| self.remove_space = remove_space |
| self.keep_accents = keep_accents |
| self.vocab_file = vocab_file |
| self.vocab_txt = vocab_txt |
|
|
| self.sp_model = spm.SentencePieceProcessor() |
| self.sp_model.Load(vocab_file) |
|
|
| @property |
| def vocab_size(self): |
| return len(self.idx2token) |
|
|
| def get_vocab(self): |
| return dict(self.token2idx, **self.added_tokens_encoder) |
|
|
| def __getstate__(self): |
| state = self.__dict__.copy() |
| state["sp_model"] = None |
| return state |
|
|
| def __setstate__(self, d): |
| self.__dict__ = d |
| try: |
| import sentencepiece as spm |
| except ImportError: |
| logger.warning( |
| "You need to install SentencePiece to use KoBertTokenizer: https://github.com/google/sentencepiece" |
| "pip install sentencepiece" |
| ) |
| self.sp_model = spm.SentencePieceProcessor() |
| self.sp_model.Load(self.vocab_file) |
|
|
| def preprocess_text(self, inputs): |
| if self.remove_space: |
| outputs = " ".join(inputs.strip().split()) |
| else: |
| outputs = inputs |
| outputs = outputs.replace("``", '"').replace("''", '"') |
|
|
| if not self.keep_accents: |
| outputs = unicodedata.normalize("NFKD", outputs) |
| outputs = "".join([c for c in outputs if not unicodedata.combining(c)]) |
| if self.do_lower_case: |
| outputs = outputs.lower() |
|
|
| return outputs |
|
|
| def _tokenize(self, text): |
| """Tokenize a string.""" |
| text = self.preprocess_text(text) |
| |
| |
| text = _replace_unicode(text) |
| text = _mecab(text) |
| |
| |
| pieces = self.sp_model.encode(text, out_type=str) |
| new_pieces = [] |
| for piece in pieces: |
| if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit(): |
| cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, "")) |
| if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: |
| if len(cur_pieces[0]) == 1: |
| cur_pieces = cur_pieces[1:] |
| else: |
| cur_pieces[0] = cur_pieces[0][1:] |
| cur_pieces.append(piece[-1]) |
| new_pieces.extend(cur_pieces) |
| else: |
| new_pieces.append(piece) |
| ''' |
| return_pieces = [] |
| for n in new_pieces: |
| if(isinstance(n,list)): |
| for nn in n: |
| return_pieces.append(nn) |
| else: |
| return_pieces.append(n) |
| return return_pieces |
| ''' |
| return new_pieces |
| |
|
|
| def _convert_token_to_id(self, token): |
| """ Converts a token (str/unicode) in an id using the vocab. """ |
| return self.token2idx.get(token, self.token2idx[self.unk_token]) |
|
|
| def _convert_id_to_token(self, index): |
| """Converts an index (integer) in a token (string/unicode) using the vocab.""" |
| return self.idx2token[index] |
|
|
| def convert_tokens_to_string(self, tokens): |
| """Converts a sequence of tokens (strings for sub-words) in a single string.""" |
| out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip() |
| return out_string |
|
|
| def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): |
| """ |
| Build model inputs from a sequence or a pair of sequence for sequence classification tasks |
| by concatenating and adding special tokens. |
| A KoBERT sequence has the following format: |
| single sequence: [CLS] X [SEP] |
| pair of sequences: [CLS] A [SEP] B [SEP] |
| """ |
| if token_ids_1 is None: |
| return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] |
| cls = [self.cls_token_id] |
| sep = [self.sep_token_id] |
| return cls + token_ids_0 + sep + token_ids_1 + sep |
|
|
| def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False): |
| """ |
| Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding |
| special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods. |
| Args: |
| token_ids_0: list of ids (must not contain special tokens) |
| token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids |
| for sequence pairs |
| already_has_special_tokens: (default False) Set to True if the token list is already formated with |
| special tokens for the model |
| Returns: |
| A list of integers in the range [0, 1]: 0 for a special token, 1 for a sequence token. |
| """ |
|
|
| if already_has_special_tokens: |
| if token_ids_1 is not None: |
| raise ValueError( |
| "You should not supply a second sequence if the provided sequence of " |
| "ids is already formated with special tokens for the model." |
| ) |
| return list( |
| map( |
| lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, |
| token_ids_0, |
| ) |
| ) |
|
|
| if token_ids_1 is not None: |
| return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] |
| return [1] + ([0] * len(token_ids_0)) + [1] |
|
|
| def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None): |
| """ |
| Creates a mask from the two sequences passed to be used in a sequence-pair classification task. |
| A KoBERT sequence pair mask has the following format: |
| 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 |
| | first sequence | second sequence |
| if token_ids_1 is None, only returns the first portion of the mask (0's). |
| """ |
| sep = [self.sep_token_id] |
| cls = [self.cls_token_id] |
| if token_ids_1 is None: |
| return len(cls + token_ids_0 + sep) * [0] |
| return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] |
|
|
| def save_vocabulary(self, save_directory): |
| """Save the sentencepiece vocabulary (copy original file) and special tokens file |
| to a directory. |
| """ |
| if not os.path.isdir(save_directory): |
| logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) |
| return |
|
|
| |
| out_vocab_model = os.path.join(save_directory, VOCAB_FILES_NAMES["vocab_file"]) |
|
|
| if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_model): |
| copyfile(self.vocab_file, out_vocab_model) |
|
|
| |
| index = 0 |
| out_vocab_txt = os.path.join(save_directory, VOCAB_FILES_NAMES["vocab_txt"]) |
| with open(out_vocab_txt, "w", encoding="utf-8") as writer: |
| for token, token_index in sorted(self.token2idx.items(), key=lambda kv: kv[1]): |
| if index != token_index: |
| logger.warning( |
| "Saving vocabulary to {}: vocabulary indices are not consecutive." |
| " Please check that the vocabulary is not corrupted!".format(out_vocab_txt) |
| ) |
| index = token_index |
| writer.write(token + "\n") |
| index += 1 |
|
|
| return out_vocab_model, out_vocab_txt |
|
|