| |
|
| |
|
| |
|
| |
|
| |
|
| | """Tokenization classes for QWen."""
|
| |
|
| | import base64
|
| | import logging
|
| | import os
|
| | import unicodedata
|
| | from typing import Collection, Dict, List, Set, Tuple, Union
|
| |
|
| | import tiktoken
|
| | from transformers import PreTrainedTokenizer, AddedToken
|
| |
|
| | logger = logging.getLogger(__name__)
|
| |
|
| |
|
| | VOCAB_FILES_NAMES = {"vocab_file": "qwen.tiktoken"}
|
| |
|
| | PAT_STR = r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"""
|
| | ENDOFTEXT = "<|endoftext|>"
|
| | IMSTART = "<|im_start|>"
|
| | IMEND = "<|im_end|>"
|
| |
|
| |
|
| |
|
| | EXTRAS = tuple((f"<|extra_{i}|>" for i in range(205)))
|
| | SPECIAL_TOKENS = (
|
| | ENDOFTEXT,
|
| | IMSTART,
|
| | IMEND,
|
| | ) + EXTRAS
|
| |
|
| |
|
| | def _load_tiktoken_bpe(tiktoken_bpe_file: str) -> Dict[bytes, int]:
|
| | with open(tiktoken_bpe_file, "rb") as f:
|
| | contents = f.read()
|
| | return {
|
| | base64.b64decode(token): int(rank)
|
| | for token, rank in (line.split() for line in contents.splitlines() if line)
|
| | }
|
| |
|
| | class QWenTokenizer(PreTrainedTokenizer):
|
| | """QWen tokenizer."""
|
| |
|
| | vocab_files_names = VOCAB_FILES_NAMES
|
| |
|
| | def __init__(
|
| | self,
|
| | vocab_file,
|
| | errors="replace",
|
| | image_start_tag='<img>',
|
| | image_end_tag='</img>',
|
| | image_pad_tag='<imgpad>',
|
| | ref_start_tag='<ref>',
|
| | ref_end_tag='</ref>',
|
| | box_start_tag='<box>',
|
| | box_end_tag='</box>',
|
| | quad_start_tag='<quad>',
|
| | quad_end_tag='</quad>',
|
| | **kwargs,
|
| | ):
|
| | super().__init__(**kwargs)
|
| |
|
| | self.image_start_tag = image_start_tag
|
| | self.image_end_tag = image_end_tag
|
| | self.image_pad_tag = image_pad_tag
|
| | self.ref_start_tag = ref_start_tag
|
| | self.ref_end_tag = ref_end_tag
|
| | self.box_start_tag = box_start_tag
|
| | self.box_end_tag = box_end_tag
|
| | self.quad_start_tag = quad_start_tag
|
| | self.quad_end_tag = quad_end_tag
|
| | self.IMAGE_ST = (
|
| | ref_start_tag, ref_end_tag,
|
| | box_start_tag, box_end_tag,
|
| | quad_start_tag, quad_end_tag,
|
| | image_start_tag, image_end_tag,
|
| | image_pad_tag
|
| | )
|
| |
|
| | self.errors = errors
|
| |
|
| | self.mergeable_ranks = _load_tiktoken_bpe(vocab_file)
|
| | self.special_tokens = {
|
| | token: index
|
| | for index, token in enumerate(
|
| | SPECIAL_TOKENS + self.IMAGE_ST, start=len(self.mergeable_ranks)
|
| | )
|
| | }
|
| |
|
| | self.img_start_id = self.special_tokens[self.image_start_tag]
|
| | self.img_end_id = self.special_tokens[self.image_end_tag]
|
| | self.img_pad_id = self.special_tokens[self.image_pad_tag]
|
| | self.ref_start_id = self.special_tokens[self.ref_start_tag]
|
| | self.ref_end_id = self.special_tokens[self.ref_end_tag]
|
| | self.box_start_id = self.special_tokens[self.box_start_tag]
|
| | self.box_end_id = self.special_tokens[self.box_end_tag]
|
| | self.quad_start_id = self.special_tokens[self.quad_start_tag]
|
| | self.quad_end_id = self.special_tokens[self.quad_end_tag]
|
| |
|
| | enc = tiktoken.Encoding(
|
| | "Qwen",
|
| | pat_str=PAT_STR,
|
| | mergeable_ranks=self.mergeable_ranks,
|
| | special_tokens=self.special_tokens,
|
| | )
|
| | assert (
|
| | len(self.mergeable_ranks) + len(self.special_tokens) == enc.n_vocab
|
| | ), f"{len(self.mergeable_ranks) + len(self.special_tokens)} != {enc.n_vocab} in encoding"
|
| |
|
| | self.decoder = {
|
| | v: k for k, v in self.mergeable_ranks.items()
|
| | }
|
| | self.decoder.update({v: k for k, v in self.special_tokens.items()})
|
| |
|
| | self.tokenizer = enc
|
| |
|
| | self.eod_id = self.tokenizer.eot_token
|
| | self.im_start_id = self.special_tokens[IMSTART]
|
| | self.im_end_id = self.special_tokens[IMEND]
|
| |
|
| | def __len__(self) -> int:
|
| | return self.tokenizer.n_vocab
|
| |
|
| | def get_vocab(self) -> Dict[bytes, int]:
|
| | return self.mergeable_ranks
|
| |
|
| | def convert_tokens_to_ids(
|
| | self, tokens: Union[bytes, str, List[Union[bytes, str]]]
|
| | ) -> List[int]:
|
| | ids = []
|
| | if isinstance(tokens, (str, bytes)):
|
| | if tokens in self.special_tokens:
|
| | return self.special_tokens[tokens]
|
| | else:
|
| | return self.mergeable_ranks.get(tokens)
|
| | for token in tokens:
|
| | if token in self.special_tokens:
|
| | ids.append(self.special_tokens[token])
|
| | else:
|
| | ids.append(self.mergeable_ranks.get(token))
|
| | return ids
|
| |
|
| | def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int:
|
| | if not special_tokens and new_tokens:
|
| | raise ValueError('Adding regular tokens is not supported')
|
| | for token in new_tokens:
|
| | surface_form = token.content if isinstance(token, AddedToken) else token
|
| | if surface_form not in SPECIAL_TOKENS:
|
| | raise ValueError('Adding unknown special tokens is not supported')
|
| | return 0
|
| |
|
| | def save_vocabulary(self, save_directory: str, **kwargs) -> Tuple[str]:
|
| | """
|
| | Save only the vocabulary of the tokenizer (vocabulary).
|
| |
|
| | Returns:
|
| | `Tuple(str)`: Paths to the files saved.
|
| | """
|
| | file_path = os.path.join(save_directory, "qwen.tiktoken")
|
| | with open(file_path, "w", encoding="utf8") as w:
|
| | for k, v in self.mergeable_ranks.items():
|
| | line = base64.b64encode(k).decode("utf8") + " " + str(v) + "\n"
|
| | w.write(line)
|
| | return (file_path,)
|
| |
|
| | def tokenize(
|
| | self,
|
| | text: str,
|
| | allowed_special: Union[Set, str] = "all",
|
| | disallowed_special: Union[Collection, str] = (),
|
| | **kwargs,
|
| | ) -> List[Union[bytes, str]]:
|
| | """
|
| | Converts a string in a sequence of tokens.
|
| |
|
| | Args:
|
| | text (`str`):
|
| | The sequence to be encoded.
|
| | allowed_special (`Literal["all"]` or `set`):
|
| | The surface forms of the tokens to be encoded as special tokens in regular texts.
|
| | Default to "all".
|
| | disallowed_special (`Literal["all"]` or `Collection`):
|
| | The surface forms of the tokens that should not be in regular texts and trigger errors.
|
| | Default to an empty tuple.
|
| |
|
| | kwargs (additional keyword arguments, *optional*):
|
| | Will be passed to the underlying model specific encode method.
|
| |
|
| | Returns:
|
| | `List[bytes|str]`: The list of tokens.
|
| | """
|
| | tokens = []
|
| | text = unicodedata.normalize("NFC", text)
|
| |
|
| |
|
| | for t in self.tokenizer.encode(
|
| | text, allowed_special=allowed_special, disallowed_special=disallowed_special
|
| | ):
|
| | tokens.append(self.decoder[t])
|
| | return tokens
|
| |
|
| | def convert_tokens_to_string(self, tokens: List[Union[bytes, str]]) -> str:
|
| | """
|
| | Converts a sequence of tokens in a single string.
|
| | """
|
| | text = ""
|
| | temp = b""
|
| | for t in tokens:
|
| | if isinstance(t, str):
|
| | if temp:
|
| | text += temp.decode("utf-8", errors=self.errors)
|
| | temp = b""
|
| | text += t
|
| | elif isinstance(t, bytes):
|
| | temp += t
|
| | else:
|
| | raise TypeError("token should only be of type types or str")
|
| | if temp:
|
| | text += temp.decode("utf-8", errors=self.errors)
|
| | return text
|
| |
|
| | @property
|
| | def vocab_size(self):
|
| | return self.tokenizer.n_vocab
|
| |
|
| | def _convert_id_to_token(self, index: int) -> Union[bytes, str]:
|
| | """Converts an id to a token, special tokens included"""
|
| | if index in self.decoder:
|
| | return self.decoder[index]
|
| | raise ValueError("unknown ids")
|
| |
|
| | def _convert_token_to_id(self, token: Union[bytes, str]) -> int:
|
| | """Converts a token to an id using the vocab, special tokens included"""
|
| | if token in self.special_tokens:
|
| | return self.special_tokens[token]
|
| | if token in self.mergeable_ranks:
|
| | return self.mergeable_ranks[token]
|
| | raise ValueError("unknown token")
|
| |
|
| | def _tokenize(self, text: str, **kwargs):
|
| | """
|
| | Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based
|
| | vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).
|
| |
|
| | Do NOT take care of added tokens.
|
| | """
|
| | raise NotImplementedError
|
| |
|
| | def _decode(
|
| | self,
|
| | token_ids: Union[int, List[int]],
|
| | skip_special_tokens: bool = False,
|
| | errors: str = None,
|
| | **kwargs,
|
| | ) -> str:
|
| | if isinstance(token_ids, int):
|
| | token_ids = [token_ids]
|
| | if skip_special_tokens:
|
| | token_ids = [i for i in token_ids if i < self.eod_id]
|
| | return self.tokenizer.decode(token_ids, errors=errors or self.errors)
|
| |
|