| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| import os |
| from contextlib import contextmanager |
| from shutil import copyfile |
| from typing import Any, Dict, List, Optional, Tuple |
|
|
| import sentencepiece as spm |
|
|
| from transformers import AddedToken, PreTrainedTokenizer |
| from transformers import logging |
|
|
|
|
| logger = logging.get_logger(__name__) |
|
|
| SPIECE_UNDERLINE = "▁" |
|
|
| VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"} |
|
|
|
|
| PRETRAINED_VOCAB_FILES_MAP = { |
| "vocab_file": { |
| "Formzu/bart-base-japanese": ( |
| "https://huggingface.co/Formzu/bart-base-japanese/resolve/main/sentencepiece.bpe.model" |
| ), |
| "Formzu/bart-large-japanese": ( |
| "https://huggingface.co/Formzu/bart-large-japanese/resolve/main/sentencepiece.bpe.model" |
| ), |
| } |
| } |
|
|
| PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { |
| "Formzu/bart-base-japanese": 1024, |
| "Formzu/bart-large-japanese": 1024, |
| } |
|
|
|
|
| class BartJapaneseTokenizer(PreTrainedTokenizer): |
| """ |
| Construct a BART tokenizer for Japanese text. |
| |
| Adapted from [`RobertaTokenizer`], [`XLNetTokenizer`] and [`MBartTokenizer`]. Based on |
| [SentencePiece](https://github.com/google/sentencepiece). |
| |
| The tokenization method is `<bos> <tokens> <eos>`. |
| |
| Examples: |
| |
| ```python |
| >>> from tokenization_bart_japanese import BartJapaneseTokenizer |
| |
| >>> tokenizer = BartJapaneseTokenizer.from_pretrained("Formzu/bart-base-japanese") |
| >>> example_japanese_phrase = "今日は晴れています。" |
| >>> expected_label = "天気" |
| >>> inputs = tokenizer(example_japanese_phrase, return_tensors="pt") |
| >>> labels = tokenizer(expected_label, return_tensors="pt") |
| >>> inputs["labels"] = labels["input_ids"] |
| ```""" |
|
|
| vocab_files_names = VOCAB_FILES_NAMES |
| max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES |
| pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP |
| model_input_names = ["input_ids", "attention_mask"] |
|
|
| prefix_tokens: List[int] = [] |
| suffix_tokens: List[int] = [] |
|
|
| def __init__( |
| self, |
| vocab_file, |
| bos_token="<s>", |
| eos_token="</s>", |
| sep_token="</s>", |
| cls_token="<s>", |
| unk_token="<unk>", |
| pad_token="<pad>", |
| mask_token="<mask>", |
| tokenizer_file=None, |
| src_lang=None, |
| tgt_lang=None, |
| sp_model_kwargs: Optional[Dict[str, Any]] = None, |
| additional_special_tokens=None, |
| **kwargs |
| ): |
| |
| mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token |
|
|
| self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs |
|
|
| super().__init__( |
| bos_token=bos_token, |
| eos_token=eos_token, |
| unk_token=unk_token, |
| sep_token=sep_token, |
| cls_token=cls_token, |
| pad_token=pad_token, |
| mask_token=mask_token, |
| tokenizer_file=None, |
| src_lang=src_lang, |
| tgt_lang=tgt_lang, |
| additional_special_tokens=additional_special_tokens, |
| sp_model_kwargs=self.sp_model_kwargs, |
| **kwargs, |
| ) |
|
|
|
|
| self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) |
| self.sp_model.Load(str(vocab_file)) |
| self.vocab_file = vocab_file |
| try: |
| from zenhan import h2z |
| except ModuleNotFoundError as error: |
| raise error.__class__( |
| "You need to install zenhan to use BartJapaneseTokenizer." |
| "See https://pypi.org/project/zenhan/ for installation." |
| ) |
| try: |
| from pyknp import Juman |
| except ModuleNotFoundError as error: |
| raise error.__class__( |
| "You need to install pyknp to use BartJapaneseTokenizer." |
| "See https://pypi.org/project/pyknp/ for installation." |
| ) |
|
|
| self.h2z = h2z |
| self.jumanpp = Juman() |
|
|
| |
| |
| |
| |
| |
|
|
| |
| self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} |
|
|
| |
| self.fairseq_offset = 1 |
|
|
| self.sp_model_size = len(self.sp_model) |
|
|
| self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + self.fairseq_offset |
| self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()} |
|
|
| self.set_special_tokens() |
|
|
| def __getstate__(self): |
| state = self.__dict__.copy() |
| state["sp_model"] = None |
| state["sp_model_proto"] = self.sp_model.serialized_model_proto() |
| return state |
|
|
| def __setstate__(self, d): |
| self.__dict__ = d |
|
|
| |
| if not hasattr(self, "sp_model_kwargs"): |
| self.sp_model_kwargs = {} |
|
|
| self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) |
| self.sp_model.LoadFromSerializedProto(self.sp_model_proto) |
|
|
| @property |
| def vocab_size(self): |
| return len(self.sp_model) + self.fairseq_offset + 1 |
|
|
| def get_special_tokens_mask( |
| self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False |
| ) -> List[int]: |
| """ |
| Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding |
| special tokens using the tokenizer `prepare_for_model` method. |
| |
| Args: |
| token_ids_0 (`List[int]`): |
| List of IDs. |
| token_ids_1 (`List[int]`, *optional*): |
| Optional second list of IDs for sequence pairs. |
| already_has_special_tokens (`bool`, *optional*, defaults to `False`): |
| Whether or not the token list is already formatted with special tokens for the model. |
| |
| Returns: |
| `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. |
| """ |
|
|
| if already_has_special_tokens: |
| return super().get_special_tokens_mask( |
| token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True |
| ) |
|
|
| prefix_ones = [1] * len(self.prefix_tokens) |
| suffix_ones = [1] * len(self.suffix_tokens) |
| if token_ids_1 is None: |
| return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones |
| return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones |
|
|
| def build_inputs_with_special_tokens( |
| self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None |
| ) -> List[int]: |
| """ |
| Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and |
| adding special tokens. A Japanese BART sequence has the following format, where `X` represents the sequence: |
| |
| - `input_ids` (for encoder) `[bos] X [eos]` |
| - `decoder_input_ids`: (for decoder) `[bos] X [eos]` |
| |
| Pairs of sequences are not the expected use case, but they will be handled without a separator. |
| |
| Args: |
| token_ids_0 (`List[int]`): |
| List of IDs to which the special tokens will be added. |
| token_ids_1 (`List[int]`, *optional*): |
| Optional second list of IDs for sequence pairs. |
| |
| Returns: |
| `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. |
| """ |
| if token_ids_1 is None: |
| return self.prefix_tokens + token_ids_0 + self.suffix_tokens |
| |
| return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens |
|
|
| def create_token_type_ids_from_sequences( |
| self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None |
| ) -> List[int]: |
| """ |
| Create a mask from the two sequences passed to be used in a sequence-pair classification task. Japanese BART does not |
| make use of token type ids, therefore a list of zeros is returned. |
| |
| Args: |
| token_ids_0 (`List[int]`): |
| List of IDs. |
| token_ids_1 (`List[int]`, *optional*): |
| Optional second list of IDs for sequence pairs. |
| |
| Returns: |
| `List[int]`: List of zeros. |
| |
| """ |
|
|
| sep = [self.sep_token_id] |
| cls = [self.cls_token_id] |
|
|
| if token_ids_1 is None: |
| return len(cls + token_ids_0 + sep) * [0] |
| return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] |
|
|
| def get_vocab(self): |
| vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} |
| vocab.update(self.added_tokens_encoder) |
| return vocab |
|
|
| def _tokenize(self, text: str) -> List[str]: |
| text = text |
| text = self.h2z(text) |
| text = self.jumanpp.analysis(text) |
| text = ' '.join([mrph.midasi for mrph in text.mrph_list()]) |
| return self.sp_model.encode(text, out_type=str) |
|
|
| def _convert_token_to_id(self, token): |
| """Converts a token (str) in an id using the vocab.""" |
| if token in self.fairseq_tokens_to_ids: |
| return self.fairseq_tokens_to_ids[token] |
| spm_id = self.sp_model.PieceToId(token) |
|
|
| |
| return spm_id + self.fairseq_offset if spm_id else self.unk_token_id |
|
|
| def _convert_id_to_token(self, index): |
| """Converts an index (integer) in a token (str) using the vocab.""" |
| if index in self.fairseq_ids_to_tokens: |
| return self.fairseq_ids_to_tokens[index] |
| return self.sp_model.IdToPiece(index - self.fairseq_offset) |
|
|
| def convert_tokens_to_string(self, tokens): |
| """Converts a sequence of tokens (strings for sub-words) in a single string.""" |
| out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip() |
| return out_string |
|
|
| def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: |
| if not os.path.isdir(save_directory): |
| logger.error(f"Vocabulary path ({save_directory}) should be a directory") |
| return |
| out_vocab_file = os.path.join( |
| save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] |
| ) |
|
|
| if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): |
| copyfile(self.vocab_file, out_vocab_file) |
| elif not os.path.isfile(self.vocab_file): |
| with open(out_vocab_file, "wb") as fi: |
| content_spiece_model = self.sp_model.serialized_model_proto() |
| fi.write(content_spiece_model) |
|
|
| return (out_vocab_file,) |
|
|
| def set_special_tokens(self) -> None: |
| """Set prefix=[bos], suffix=[eos].""" |
| self.prefix_tokens = [self.bos_token_id] |
| self.suffix_tokens = [self.eos_token_id] |
| self.add_tokens(self.all_special_tokens_extended, special_tokens=True) |
|
|