| import json |
| import os |
| import warnings |
| from typing import Dict, List, Optional, Tuple, Union |
|
|
| from sentencepiece import SentencePieceProcessor |
| from tokenizers import AddedToken, decoders, normalizers, processors |
| from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast |
| from transformers.convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS, SpmConverter |
| from transformers.tokenization_utils_base import ( |
| BatchEncoding, |
| EncodedInput, |
| PreTokenizedInput, |
| PreTokenizedInputPair, |
| TextInput, |
| TextInputPair, |
| TruncationStrategy, |
| ) |
| from transformers.utils import PaddingStrategy |
|
|
| ADDITIONAL_SPECIAL_TOKENS = [ |
| "[MASK]", |
| "[gMASK]", |
| "[sMASK]", |
| "<!sop!>", |
| "<!eop!>", |
| "<|system|>", |
| "<|user|>", |
| "<|assistant|>", |
| "<|observation|>", |
| ] |
| PREFIX_TOKENS = ["[gMASK]", "<!sop!>"] |
|
|
| ENCODE_SEP_TOKEN_FOR_FAST = "<!encode-sep!>" |
|
|
|
|
| class SPTokenizer: |
| def __init__(self, model_path: str): |
| |
| assert os.path.isfile(model_path), model_path |
| self.sp_model = SentencePieceProcessor(model_file=model_path) |
|
|
| |
| self.n_words: int = self.sp_model.vocab_size() |
| self.bos_id: int = self.sp_model.bos_id() |
| self.eos_id: int = self.sp_model.eos_id() |
| self.pad_id: int = self.sp_model.unk_id() |
| assert self.sp_model.vocab_size() == self.sp_model.get_piece_size() |
|
|
| self.special_tokens = {} |
| self.index_special_tokens = {} |
| for token in ADDITIONAL_SPECIAL_TOKENS: |
| self.special_tokens[token] = self.n_words |
| self.index_special_tokens[self.n_words] = token |
| self.n_words += 1 |
|
|
| def tokenize(self, s: str): |
| return self.sp_model.EncodeAsPieces(s) |
|
|
| def encode(self, s: str, bos: bool = False, eos: bool = False) -> List[int]: |
| assert type(s) is str |
| t = self.sp_model.encode(s) |
| if bos: |
| t = [self.bos_id] + t |
| if eos: |
| t = t + [self.eos_id] |
| return t |
|
|
| def decode(self, t: List[int]) -> str: |
| text, buffer = "", [] |
| for token in t: |
| if token in self.index_special_tokens: |
| if buffer: |
| text += self.sp_model.decode(buffer) |
| buffer = [] |
| text += self.index_special_tokens[token] |
| else: |
| buffer.append(token) |
| if buffer: |
| text += self.sp_model.decode(buffer) |
| return text |
|
|
| def decode_tokens(self, tokens: List[str]) -> str: |
| text = self.sp_model.DecodePieces(tokens) |
| return text |
|
|
| def convert_token_to_id(self, token): |
| """ Converts a token (str) in an id using the vocab. """ |
| if token in self.special_tokens: |
| return self.special_tokens[token] |
| return self.sp_model.PieceToId(token) |
|
|
| def convert_id_to_token(self, index): |
| """Converts an index (integer) in a token (str) using the vocab.""" |
| if index in self.index_special_tokens: |
| return self.index_special_tokens[index] |
| if index in [self.eos_id, self.bos_id, self.pad_id] or index < 0: |
| return "" |
| return self.sp_model.IdToPiece(index) |
|
|
|
|
| class ChatGLMTokenizer(PreTrainedTokenizer): |
| vocab_files_names = {"vocab_file": "tokenizer.model"} |
|
|
| model_input_names = ["input_ids", "attention_mask", "position_ids"] |
|
|
| def __init__(self, vocab_file, padding_side="left", clean_up_tokenization_spaces=False, **kwargs): |
| self.name = "GLMTokenizer" |
|
|
| self.vocab_file = vocab_file |
| self.tokenizer = SPTokenizer(vocab_file) |
| self.special_tokens = { |
| "<bos>": self.tokenizer.bos_id, |
| "<eos>": self.tokenizer.eos_id, |
| "<pad>": self.tokenizer.pad_id |
| } |
| super().__init__(padding_side=padding_side, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs) |
|
|
| def get_command(self, token): |
| if token in self.special_tokens: |
| return self.special_tokens[token] |
| assert token in self.tokenizer.special_tokens, f"{token} is not a special token for {self.name}" |
| return self.tokenizer.special_tokens[token] |
|
|
| @property |
| def unk_token(self) -> str: |
| return "<unk>" |
|
|
| @property |
| def pad_token(self) -> str: |
| return "<unk>" |
|
|
| @property |
| def pad_token_id(self): |
| return self.get_command("<pad>") |
|
|
| @property |
| def eos_token(self) -> str: |
| return "</s>" |
|
|
| @property |
| def eos_token_id(self): |
| return self.get_command("<eos>") |
|
|
| @property |
| def vocab_size(self): |
| return self.tokenizer.n_words |
|
|
| def get_vocab(self): |
| """ Returns vocab as a dict """ |
| vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)} |
| vocab.update(self.added_tokens_encoder) |
| return vocab |
|
|
| def _tokenize(self, text, **kwargs): |
| return self.tokenizer.tokenize(text) |
|
|
| def _convert_token_to_id(self, token): |
| """ Converts a token (str) in an id using the vocab. """ |
| return self.tokenizer.convert_token_to_id(token) |
|
|
| def _convert_id_to_token(self, index): |
| """Converts an index (integer) in a token (str) using the vocab.""" |
| return self.tokenizer.convert_id_to_token(index) |
|
|
| def convert_tokens_to_string(self, tokens: List[str]) -> str: |
| return self.tokenizer.decode_tokens(tokens) |
|
|
| def save_vocabulary(self, save_directory, filename_prefix=None): |
| """ |
| Save the vocabulary and special tokens file to a directory. |
| |
| Args: |
| save_directory (`str`): |
| The directory in which to save the vocabulary. |
| filename_prefix (`str`, *optional*): |
| An optional prefix to add to the named of the saved files. |
| |
| Returns: |
| `Tuple(str)`: Paths to the files saved. |
| """ |
| if os.path.isdir(save_directory): |
| vocab_file = os.path.join( |
| save_directory, self.vocab_files_names["vocab_file"] |
| ) |
| else: |
| vocab_file = save_directory |
|
|
| with open(self.vocab_file, 'rb') as fin: |
| proto_str = fin.read() |
|
|
| with open(vocab_file, "wb") as writer: |
| writer.write(proto_str) |
|
|
| return (vocab_file,) |
|
|
| def get_prefix_tokens(self): |
| return list(map(self.get_command, PREFIX_TOKENS)) |
|
|
| def build_single_message(self, role, metadata, message): |
| assert role in ["system", "user", "assistant", "observation"], role |
| role_tokens = [self.get_command(f"<|{role}|>")] + self.tokenizer.encode(f"{metadata}\n") |
| message_tokens = self.tokenizer.encode(message) |
| tokens = role_tokens + message_tokens |
| return tokens |
|
|
| def build_chat_input(self, query, history=None, role="user"): |
| if history is None: |
| history = [] |
| input_ids = [] |
| for item in history: |
| content = item["content"] |
| if item["role"] == "system" and "tools" in item: |
| content = content + "\n" + json.dumps(item["tools"], indent=4, ensure_ascii=False) |
| input_ids.extend(self.build_single_message(item["role"], item.get("metadata", ""), content)) |
| input_ids.extend(self.build_single_message(role, "", query)) |
| input_ids.extend([self.get_command("<|assistant|>")]) |
| return self.batch_encode_plus([input_ids], return_tensors="pt", is_split_into_words=True) |
|
|
| def build_inputs_with_special_tokens( |
| self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None |
| ) -> List[int]: |
| """ |
| Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and |
| adding special tokens. A BERT sequence has the following format: |
| |
| - single sequence: `[CLS] X [SEP]` |
| - pair of sequences: `[CLS] A [SEP] B [SEP]` |
| |
| Args: |
| token_ids_0 (`List[int]`): |
| List of IDs to which the special tokens will be added. |
| token_ids_1 (`List[int]`, *optional*): |
| Optional second list of IDs for sequence pairs. |
| |
| Returns: |
| `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. |
| """ |
| prefix_tokens = self.get_prefix_tokens() |
| token_ids_0 = prefix_tokens + token_ids_0 |
| if token_ids_1 is not None: |
| token_ids_0 = token_ids_0 + token_ids_1 + [self.get_command("<eos>")] |
| return token_ids_0 |
|
|
| def _pad( |
| self, |
| encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], |
| max_length: Optional[int] = None, |
| padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, |
| pad_to_multiple_of: Optional[int] = None, |
| return_attention_mask: Optional[bool] = None, |
| ) -> dict: |
| """ |
| Pad encoded inputs (on left/right and up to predefined length or max length in the batch) |
| |
| Args: |
| encoded_inputs: |
| Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). |
| max_length: maximum length of the returned list and optionally padding length (see below). |
| Will truncate by taking into account the special tokens. |
| padding_strategy: PaddingStrategy to use for padding. |
| |
| - PaddingStrategy.LONGEST Pad to the longest sequence in the batch |
| - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) |
| - PaddingStrategy.DO_NOT_PAD: Do not pad |
| The tokenizer padding sides are defined in self.padding_side: |
| |
| - 'left': pads on the left of the sequences |
| - 'right': pads on the right of the sequences |
| pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. |
| This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability |
| `>= 7.5` (Volta). |
| return_attention_mask: |
| (optional) Set to False to avoid returning attention mask (default: set to model specifics) |
| """ |
| |
| assert self.padding_side == "left" |
|
|
| required_input = encoded_inputs[self.model_input_names[0]] |
| seq_length = len(required_input) |
|
|
| if padding_strategy == PaddingStrategy.LONGEST: |
| max_length = len(required_input) |
|
|
| if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): |
| max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of |
|
|
| needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length |
|
|
| |
| if "attention_mask" not in encoded_inputs: |
| encoded_inputs["attention_mask"] = [1] * seq_length |
|
|
| if "position_ids" not in encoded_inputs: |
| encoded_inputs["position_ids"] = list(range(seq_length)) |
|
|
| if needs_to_be_padded: |
| difference = max_length - len(required_input) |
|
|
| if "attention_mask" in encoded_inputs: |
| encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] |
| if "position_ids" in encoded_inputs: |
| encoded_inputs["position_ids"] = [0] * difference + encoded_inputs["position_ids"] |
| encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input |
|
|
| return encoded_inputs |
|
|
|
|
| class ChatGLMTokenizerFast(PreTrainedTokenizerFast): |
| |
| slow_tokenizer_class = ChatGLMTokenizer |
| vocab_files_names = { |
| **ChatGLMTokenizer.vocab_files_names, |
| **PreTrainedTokenizerFast.vocab_files_names, |
| } |
|
|
| def __init__(self, **kwargs): |
| kwargs.setdefault("clean_up_tokenization_spaces", False) |
| kwargs.setdefault("bos_token", "<s>") |
| kwargs.setdefault("eos_token", "</s>") |
| kwargs.setdefault("unk_token", "<unk>") |
| kwargs.setdefault("pad_token", "<unk>") |
| super().__init__(**kwargs) |
|
|
| @property |
| def encode_sep_token(self): |
| return ENCODE_SEP_TOKEN_FOR_FAST |
|
|
| def _batch_encode_plus( |
| self, |
| batch_text_or_text_pairs: Union[ |
| List[TextInput], |
| List[TextInputPair], |
| List[PreTokenizedInput], |
| List[PreTokenizedInputPair], |
| ], |
| add_special_tokens: bool = True, |
| padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, |
| truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, |
| max_length: Optional[int] = None, |
| stride: int = 0, |
| is_split_into_words: bool = False, |
| pad_to_multiple_of: Optional[int] = None, |
| return_tensors: Optional[str] = None, |
| return_token_type_ids: Optional[bool] = None, |
| return_attention_mask: Optional[bool] = None, |
| return_overflowing_tokens: bool = False, |
| return_special_tokens_mask: bool = False, |
| return_offsets_mapping: bool = False, |
| return_length: bool = False, |
| verbose: bool = True, |
| ) -> BatchEncoding: |
| def split_sep(t: Union[TextInput, PreTokenizedInput]) -> PreTokenizedInput: |
| if isinstance(t, str): |
| return t.split(self.encode_sep_token) |
|
|
| return [w for word in t for w in split_sep(word)] |
|
|
| def split_maybe_tupled( |
| t: Union[TextInput, TextInputPair, PreTokenizedInput, PreTokenizedInputPair] |
| ) -> Union[PreTokenizedInputPair, PreTokenizedInput]: |
| if isinstance(t, tuple): |
| return split_sep(t[0]), split_sep(t[1]) |
|
|
| return split_sep(t) |
|
|
| return super()._batch_encode_plus( |
| list(map(split_maybe_tupled, batch_text_or_text_pairs)), |
| add_special_tokens, |
| padding_strategy, |
| truncation_strategy, |
| max_length, |
| stride, |
| True, |
| pad_to_multiple_of, |
| return_tensors, |
| return_token_type_ids, |
| return_attention_mask, |
| return_overflowing_tokens, |
| return_special_tokens_mask, |
| return_offsets_mapping, |
| return_length, |
| verbose, |
| ) |
|
|
| @property |
| def can_save_slow_tokenizer(self) -> bool: |
| |
| return False |
|
|
| def save_pretrained( |
| self, |
| save_directory: Union[str, os.PathLike], |
| legacy_format: Optional[bool] = None, |
| filename_prefix: Optional[str] = None, |
| push_to_hub: bool = False, |
| **kwargs, |
| ) -> Tuple[str]: |
| warnings.warn( |
| f"{type(self)} does not support saving slow tokenizer. " |
| "Saving it at the same directory may break the slow tokenizer. " |
| "Please keep a backup of the original tokenizer beforehand." |
| ) |
| return super().save_pretrained( |
| save_directory, legacy_format, filename_prefix, push_to_hub, **kwargs |
| ) |
|
|
| def build_single_message(self, role, metadata, message): |
| assert role in ["system", "user", "assistant", "observation"], role |
| return f"<|{role}|>{self.encode_sep_token}{metadata}\n{self.encode_sep_token}{message}" |
|
|
| def build_chat_text(self, query, history=None, role="user", metadata=""): |
| inputs = [] |
|
|
| for item in history or []: |
| content = item["content"] |
|
|
| if item["role"] == "system" and "tools" in item: |
| content += "\n" + json.dumps( |
| item["tools"], indent=4, ensure_ascii=False |
| ) |
|
|
| inputs.append( |
| self.build_single_message( |
| item["role"], item.get("metadata", ""), content |
| ) |
| ) |
|
|
| inputs.append(self.build_single_message(role, metadata, query)) |
| inputs.append("<|assistant|>") |
|
|
| return "".join(inputs) |
|
|
| def build_chat_input(self, *args, **kwargs): |
| return self.batch_encode_plus( |
| [self.build_chat_text(*args, **kwargs)], |
| return_tensors="pt", |
| ) |
|
|
|
|
| ChatGLMTokenizer.register_for_auto_class() |
| ChatGLMTokenizerFast.register_for_auto_class() |
|
|
|
|
| class ChatGLMTokenizerConverter(SpmConverter): |
| handle_byte_fallback = True |
|
|
| def normalizer(self, proto): |
| return normalizers.Sequence( |
| [ |
| normalizers.Prepend(prepend="▁"), |
| normalizers.Replace(pattern=" ", content="▁"), |
| ] |
| ) |
|
|
| def pre_tokenizer(self, replacement, add_prefix_space): |
| |
|
|
| |
| |
|
|
| return None |
|
|
| def decoder(self, replacement, add_prefix_space): |
| return decoders.Sequence( |
| [ |
| decoders.ByteFallback(), |
| super().decoder(replacement, add_prefix_space), |
| ] |
| ) |
|
|
| def tokenizer(self, proto): |
| tokenizer = super().tokenizer(proto) |
|
|
| tokenizer.model.byte_fallback = True |
|
|
| special_tokens = [ |
| "<unk>", |
| "<s>", |
| "</s>", |
| *ADDITIONAL_SPECIAL_TOKENS, |
| ] |
|
|
| tokenizer.add_special_tokens( |
| [ |
| AddedToken(token, special=True, normalized=False) |
| for token in special_tokens |
| ] |
| ) |
|
|
| return tokenizer |
|
|
| def converted(self): |
| tokenizer = super().converted() |
|
|
| |
| prefix_token_ids = list(map(tokenizer.token_to_id, PREFIX_TOKENS)) |
| assert all(i is not None for i in prefix_token_ids) |
| prefix_template = " ".join(PREFIX_TOKENS) |
|
|
| template_special_tokens = list(frozenset(zip(PREFIX_TOKENS, prefix_token_ids))) |
|
|
| if "</s>" not in PREFIX_TOKENS: |
| eos_token_id = tokenizer.token_to_id("</s>") |
| assert eos_token_id is not None |
| template_special_tokens.append(("</s>", eos_token_id)) |
|
|
| post = processors.TemplateProcessing( |
| single=f"{prefix_template} $A", |
| pair=f"{prefix_template} $A $B:1 </s>:1", |
| special_tokens=template_special_tokens, |
| ) |
| if tokenizer.post_processor is None: |
| tokenizer.post_processor = post |
| else: |
| tokenizer.post_processor = processors.Sequence( |
| [tokenizer.post_processor, post] |
| ) |
|
|
| return tokenizer |
|
|
|
|
| SLOW_TO_FAST_CONVERTERS[ChatGLMTokenizer.__name__] = ChatGLMTokenizerConverter |
|
|