exdysa commited on
Commit
fe1b0c6
·
verified ·
1 Parent(s): 00b7d08

Delete tokenization_qwen2.py

Browse files
Files changed (1) hide show
  1. tokenization_qwen2.py +0 -342
tokenization_qwen2.py DELETED
@@ -1,342 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2024 The Qwen team, Alibaba Group and The HuggingFace Inc. team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Tokenization classes for Qwen2."""
16
-
17
- import json
18
- import os
19
- import unicodedata
20
- from functools import lru_cache
21
- from typing import Optional, Tuple
22
-
23
- import regex as re
24
-
25
- from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
26
- from transformers.utils import logging
27
-
28
-
29
- logger = logging.get_logger(__name__)
30
-
31
- VOCAB_FILES_NAMES = {
32
- "vocab_file": "vocab.json",
33
- "merges_file": "merges.txt",
34
- }
35
-
36
-
37
- MAX_MODEL_INPUT_SIZES = {"qwen/qwen-tokenizer": 32768}
38
-
39
- PRETOKENIZE_REGEX = r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"""
40
-
41
-
42
- @lru_cache()
43
- # Copied from transformers.models.gpt2.tokenization_gpt2.bytes_to_unicode
44
- def bytes_to_unicode():
45
- """
46
- Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
47
- characters the bpe code barfs on.
48
-
49
- The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
50
- if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
51
- decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
52
- tables between utf-8 bytes and unicode strings.
53
- """
54
- bs = (
55
- list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
56
- )
57
- cs = bs[:]
58
- n = 0
59
- for b in range(2**8):
60
- if b not in bs:
61
- bs.append(b)
62
- cs.append(2**8 + n)
63
- n += 1
64
- cs = [chr(n) for n in cs]
65
- return dict(zip(bs, cs))
66
-
67
-
68
- # Copied from transformers.models.gpt2.tokenization_gpt2.get_pairs
69
- def get_pairs(word):
70
- """
71
- Return set of symbol pairs in a word.
72
-
73
- Word is represented as tuple of symbols (symbols being variable-length strings).
74
- """
75
- pairs = set()
76
- prev_char = word[0]
77
- for char in word[1:]:
78
- pairs.add((prev_char, char))
79
- prev_char = char
80
- return pairs
81
-
82
-
83
- class Qwen2Tokenizer(PreTrainedTokenizer):
84
- """
85
- Construct a Qwen2 tokenizer. Based on byte-level Byte-Pair-Encoding.
86
-
87
- Same with GPT2Tokenizer, this tokenizer has been trained to treat spaces like parts of the tokens so a word will
88
- be encoded differently whether it is at the beginning of the sentence (without space) or not:
89
-
90
- ```python
91
- >>> from transformers import Qwen2Tokenizer
92
-
93
- >>> tokenizer = Qwen2Tokenizer.from_pretrained("Qwen/Qwen-tokenizer")
94
- >>> tokenizer("Hello world")["input_ids"]
95
- [9707, 1879]
96
-
97
- >>> tokenizer(" Hello world")["input_ids"]
98
- [21927, 1879]
99
- ```
100
- This is expected.
101
-
102
- You should not use GPT2Tokenizer instead, because of the different pretokenization rules.
103
-
104
- This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
105
- this superclass for more information regarding those methods.
106
-
107
- Args:
108
- vocab_file (`str`):
109
- Path to the vocabulary file.
110
- merges_file (`str`):
111
- Path to the merges file.
112
- errors (`str`, *optional*, defaults to `"replace"`):
113
- Paradigm to follow when decoding bytes to UTF-8. See
114
- [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
115
- unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
116
- The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
117
- token instead.
118
- bos_token (`str`, *optional*):
119
- The beginning of sequence token. Not applicable for this tokenizer.
120
- eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
121
- The end of sequence token.
122
- pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
123
- The token used for padding, for example when batching sequences of different lengths.
124
- clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
125
- Whether or not the model should cleanup the spaces that were added when splitting the input text during the
126
- tokenization process. Not applicable to this tokenizer, since tokenization does not add spaces.
127
- split_special_tokens (`bool`, *optional*, defaults to `False`):
128
- Whether or not the special tokens should be split during the tokenization process. The default behavior is
129
- to not split special tokens. This means that if `<|endoftext|>` is the `eos_token`, then `tokenizer.tokenize("<|endoftext|>") =
130
- ['<|endoftext|>`]. Otherwise, if `split_special_tokens=True`, then `tokenizer.tokenize("<|endoftext|>")` will be give `['<',
131
- '|', 'endo', 'ft', 'ext', '|', '>']`. This argument is only supported for `slow` tokenizers for the moment.
132
- """
133
-
134
- vocab_files_names = VOCAB_FILES_NAMES
135
- model_input_names = ["input_ids", "attention_mask"]
136
-
137
- def __init__(
138
- self,
139
- vocab_file,
140
- merges_file,
141
- errors="replace",
142
- unk_token="<|endoftext|>",
143
- bos_token=None,
144
- eos_token="<|endoftext|>",
145
- pad_token="<|endoftext|>",
146
- clean_up_tokenization_spaces=False,
147
- split_special_tokens=False,
148
- **kwargs,
149
- ):
150
- # Qwen vocab does not contain control tokens; added tokens need to be special
151
- bos_token = (
152
- AddedToken(bos_token, lstrip=False, rstrip=False, special=True, normalized=False)
153
- if isinstance(bos_token, str)
154
- else bos_token
155
- )
156
- eos_token = (
157
- AddedToken(eos_token, lstrip=False, rstrip=False, special=True, normalized=False)
158
- if isinstance(eos_token, str)
159
- else eos_token
160
- )
161
- unk_token = (
162
- AddedToken(unk_token, lstrip=False, rstrip=False, special=True, normalized=False)
163
- if isinstance(unk_token, str)
164
- else unk_token
165
- )
166
- pad_token = (
167
- AddedToken(pad_token, lstrip=False, rstrip=False, special=True, normalized=False)
168
- if isinstance(pad_token, str)
169
- else pad_token
170
- )
171
-
172
- with open(vocab_file, encoding="utf-8") as vocab_handle:
173
- self.encoder = json.load(vocab_handle)
174
- self.decoder = {v: k for k, v in self.encoder.items()}
175
- self.errors = errors # how to handle errors in decoding
176
- self.byte_encoder = bytes_to_unicode()
177
- self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
178
- bpe_merges = []
179
- with open(merges_file, encoding="utf-8") as merges_handle:
180
- for i, line in enumerate(merges_handle):
181
- line = line.strip()
182
- if (i == 0 and line.startswith("#version:")) or not line:
183
- continue
184
- bpe_merges.append(tuple(line.split()))
185
- self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
186
- # NOTE: the cache can grow without bound and will get really large for long running processes
187
- # (esp. for texts of language that do not use space between word, e.g. Chinese); technically
188
- # not a memory leak but appears as one.
189
- # GPT2Tokenizer has the same problem, so let's be consistent.
190
- self.cache = {}
191
-
192
- self.pat = re.compile(PRETOKENIZE_REGEX)
193
-
194
- if kwargs.get("add_prefix_space", False):
195
- logger.warning_once(
196
- f"{self.__class__.__name} does not support `add_prefix_space`, setting it to True has no effect."
197
- )
198
-
199
- super().__init__(
200
- errors=errors,
201
- bos_token=bos_token,
202
- eos_token=eos_token,
203
- pad_token=pad_token,
204
- unk_token=unk_token,
205
- clean_up_tokenization_spaces=clean_up_tokenization_spaces,
206
- split_special_tokens=split_special_tokens,
207
- **kwargs,
208
- )
209
-
210
- @property
211
- def vocab_size(self) -> int:
212
- return len(self.encoder)
213
-
214
- # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.get_vocab
215
- def get_vocab(self):
216
- return dict(self.encoder, **self.added_tokens_encoder)
217
-
218
- # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.bpe
219
- def bpe(self, token):
220
- if token in self.cache:
221
- return self.cache[token]
222
- word = tuple(token)
223
- pairs = get_pairs(word)
224
-
225
- if not pairs:
226
- return token
227
-
228
- while True:
229
- bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
230
- if bigram not in self.bpe_ranks:
231
- break
232
- first, second = bigram
233
- new_word = []
234
- i = 0
235
- while i < len(word):
236
- try:
237
- j = word.index(first, i)
238
- except ValueError:
239
- new_word.extend(word[i:])
240
- break
241
- else:
242
- new_word.extend(word[i:j])
243
- i = j
244
-
245
- if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
246
- new_word.append(first + second)
247
- i += 2
248
- else:
249
- new_word.append(word[i])
250
- i += 1
251
- new_word = tuple(new_word)
252
- word = new_word
253
- if len(word) == 1:
254
- break
255
- else:
256
- pairs = get_pairs(word)
257
- word = " ".join(word)
258
- self.cache[token] = word
259
- return word
260
-
261
- # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._tokenize
262
- def _tokenize(self, text):
263
- """Tokenize a string."""
264
- bpe_tokens = []
265
- for token in re.findall(self.pat, text):
266
- token = "".join(
267
- self.byte_encoder[b] for b in token.encode("utf-8")
268
- ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
269
- bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
270
- return bpe_tokens
271
-
272
- # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_token_to_id
273
- def _convert_token_to_id(self, token):
274
- """Converts a token (str) in an id using the vocab."""
275
- return self.encoder.get(token, self.encoder.get(self.unk_token))
276
-
277
- # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_id_to_token
278
- def _convert_id_to_token(self, index):
279
- """Converts an index (integer) in a token (str) using the vocab."""
280
- return self.decoder.get(index)
281
-
282
- # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.convert_tokens_to_string
283
- def convert_tokens_to_string(self, tokens):
284
- """Converts a sequence of tokens (string) in a single string."""
285
- text = "".join(tokens)
286
- text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
287
- return text
288
-
289
- def decode(
290
- self,
291
- token_ids,
292
- skip_special_tokens: bool = False,
293
- clean_up_tokenization_spaces: Optional[bool] = False,
294
- spaces_between_special_tokens: bool = False,
295
- **kwargs,
296
- ) -> str:
297
- # `spaces_between_special_tokens` defaults to True for _decode in slow tokenizers
298
- # and cannot be configured elsewhere, but it should default to False for Qwen2Tokenizer
299
- return super().decode(
300
- token_ids,
301
- skip_special_tokens=skip_special_tokens,
302
- clean_up_tokenization_spaces=clean_up_tokenization_spaces,
303
- spaces_between_special_tokens=spaces_between_special_tokens,
304
- **kwargs,
305
- )
306
-
307
- # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.save_vocabulary
308
- def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
309
- if not os.path.isdir(save_directory):
310
- logger.error(f"Vocabulary path ({save_directory}) should be a directory")
311
- return
312
- vocab_file = os.path.join(
313
- save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
314
- )
315
- merge_file = os.path.join(
316
- save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
317
- )
318
-
319
- with open(vocab_file, "w", encoding="utf-8") as f:
320
- f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
321
-
322
- index = 0
323
- with open(merge_file, "w", encoding="utf-8") as writer:
324
- writer.write("#version: 0.2\n")
325
- for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
326
- if index != token_index:
327
- logger.warning(
328
- f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
329
- " Please check that the tokenizer is not corrupted!"
330
- )
331
- index = token_index
332
- writer.write(" ".join(bpe_tokens) + "\n")
333
- index += 1
334
-
335
- return vocab_file, merge_file
336
-
337
- def prepare_for_tokenization(self, text, **kwargs):
338
- text = unicodedata.normalize("NFC", text)
339
- return (text, kwargs)
340
-
341
-
342
- __all__ = ["Qwen2Tokenizer"]