shivavardhineedi commited on
Commit
25f9e25
·
verified ·
1 Parent(s): c96f668

Delete tokenization_internlm2.py

Browse files
Files changed (1) hide show
  1. tokenization_internlm2.py +0 -235
tokenization_internlm2.py DELETED
@@ -1,235 +0,0 @@
1
- # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
2
- #
3
- # This code is based on transformers/src/transformers/models/llama/tokenization_llama.py
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
16
-
17
- """Tokenization classes for InternLM."""
18
- import os
19
- from shutil import copyfile
20
- from typing import Any, Dict, List, Optional, Tuple
21
-
22
- import sentencepiece as spm
23
- from transformers.tokenization_utils import PreTrainedTokenizer
24
- from transformers.utils import logging
25
-
26
- logger = logging.get_logger(__name__)
27
-
28
- VOCAB_FILES_NAMES = {'vocab_file': './tokenizer.model'}
29
-
30
- PRETRAINED_VOCAB_FILES_MAP = {}
31
-
32
-
33
- # Modified from transformers.model.llama.tokenization_llama.LlamaTokenizer
34
- class InternLM2Tokenizer(PreTrainedTokenizer):
35
- """
36
- Construct a InternLM2 tokenizer. Based on byte-level Byte-Pair-Encoding.
37
-
38
- Args:
39
- vocab_file (`str`):
40
- Path to the vocabulary file.
41
- """
42
-
43
- vocab_files_names = VOCAB_FILES_NAMES
44
- pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
45
- model_input_names = ['input_ids', 'attention_mask']
46
- _auto_class = 'AutoTokenizer'
47
-
48
- def __init__(
49
- self,
50
- vocab_file,
51
- unk_token='<unk>',
52
- bos_token='<s>',
53
- eos_token='</s>',
54
- pad_token='</s>',
55
- sp_model_kwargs: Optional[Dict[str, Any]] = None,
56
- add_bos_token=True,
57
- add_eos_token=False,
58
- decode_with_prefix_space=False,
59
- clean_up_tokenization_spaces=False,
60
- **kwargs,
61
- ):
62
- self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
63
- self.vocab_file = vocab_file
64
- self.add_bos_token = add_bos_token
65
- self.add_eos_token = add_eos_token
66
- self.decode_with_prefix_space = decode_with_prefix_space
67
- self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
68
- self.sp_model.Load(vocab_file)
69
- self._no_prefix_space_tokens = None
70
- super().__init__(
71
- bos_token=bos_token,
72
- eos_token=eos_token,
73
- unk_token=unk_token,
74
- pad_token=pad_token,
75
- clean_up_tokenization_spaces=clean_up_tokenization_spaces,
76
- **kwargs,
77
- )
78
-
79
- @property
80
- def no_prefix_space_tokens(self):
81
- if self._no_prefix_space_tokens is None:
82
- vocab = self.convert_ids_to_tokens(list(range(self.vocab_size)))
83
- self._no_prefix_space_tokens = {i for i, tok in enumerate(vocab) if not tok.startswith('▁')}
84
- return self._no_prefix_space_tokens
85
-
86
- @property
87
- def vocab_size(self):
88
- """Returns vocab size"""
89
- return self.sp_model.get_piece_size()
90
-
91
- @property
92
- def bos_token_id(self) -> Optional[int]:
93
- return self.sp_model.bos_id()
94
-
95
- @property
96
- def eos_token_id(self) -> Optional[int]:
97
- return self.sp_model.eos_id()
98
-
99
- def get_vocab(self):
100
- """Returns vocab as a dict"""
101
- vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
102
- vocab.update(self.added_tokens_encoder)
103
- return vocab
104
-
105
- def _tokenize(self, text):
106
- """Returns a tokenized string."""
107
- return self.sp_model.encode(text, out_type=str)
108
-
109
- def _convert_token_to_id(self, token):
110
- """Converts a token (str) in an id using the vocab."""
111
- return self.sp_model.piece_to_id(token)
112
-
113
- def _convert_id_to_token(self, index):
114
- """Converts an index (integer) in a token (str) using the vocab."""
115
- token = self.sp_model.IdToPiece(index)
116
- return token
117
-
118
- def _maybe_add_prefix_space(self, tokens, decoded):
119
- if tokens and tokens[0] not in self.no_prefix_space_tokens:
120
- return ' ' + decoded
121
- else:
122
- return decoded
123
-
124
- def convert_tokens_to_string(self, tokens):
125
- """Converts a sequence of tokens (string) in a single string."""
126
- current_sub_tokens = []
127
- out_string = ''
128
- prev_is_special = False
129
- for token in tokens:
130
- # make sure that special tokens are not decoded using sentencepiece model
131
- if token in self.all_special_tokens:
132
- if not prev_is_special:
133
- out_string += ' '
134
- out_string += self.sp_model.decode(current_sub_tokens) + token
135
- prev_is_special = True
136
- current_sub_tokens = []
137
- else:
138
- current_sub_tokens.append(token)
139
- prev_is_special = False
140
- out_string += self.sp_model.decode(current_sub_tokens)
141
- out_string = self.clean_up_tokenization(out_string)
142
- out_string = self._maybe_add_prefix_space(tokens=tokens, decoded=out_string)
143
- return out_string[1:]
144
-
145
- def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
146
- """
147
- Save the vocabulary and special tokens file to a directory.
148
-
149
- Args:
150
- save_directory (`str`):
151
- The directory in which to save the vocabulary.
152
-
153
- Returns:
154
- `Tuple(str)`: Paths to the files saved.
155
- """
156
- if not os.path.isdir(save_directory):
157
- logger.error(f'Vocabulary path ({save_directory}) should be a directory')
158
- return
159
- out_vocab_file = os.path.join(
160
- save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']
161
- )
162
-
163
- if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
164
- copyfile(self.vocab_file, out_vocab_file)
165
- elif not os.path.isfile(self.vocab_file):
166
- with open(out_vocab_file, 'wb') as fi:
167
- content_spiece_model = self.sp_model.serialized_model_proto()
168
- fi.write(content_spiece_model)
169
-
170
- return (out_vocab_file,)
171
-
172
- def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
173
- if self.add_bos_token:
174
- bos_token_ids = [self.bos_token_id]
175
- else:
176
- bos_token_ids = []
177
-
178
- output = bos_token_ids + token_ids_0
179
-
180
- if token_ids_1 is not None:
181
- output = output + token_ids_1
182
-
183
- if self.add_eos_token:
184
- output = output + [self.eos_token_id]
185
-
186
- return output
187
-
188
- def get_special_tokens_mask(
189
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
190
- ) -> List[int]:
191
- """
192
- Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
193
- special tokens using the tokenizer `prepare_for_model` method.
194
-
195
- Args:
196
- token_ids_0 (`List[int]`):
197
- List of IDs.
198
- token_ids_1 (`List[int]`, *optional*):
199
- Optional second list of IDs for sequence pairs.
200
- already_has_special_tokens (`bool`, *optional*, defaults to `False`):
201
- Whether or not the token list is already formatted with special tokens for the model.
202
-
203
- Returns:
204
- `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
205
- """
206
- if already_has_special_tokens:
207
- return super().get_special_tokens_mask(
208
- token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
209
- )
210
-
211
- if token_ids_1 is None:
212
- return [1] + ([0] * len(token_ids_0)) + [1]
213
- return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
214
-
215
- def create_token_type_ids_from_sequences(
216
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
217
- ) -> List[int]:
218
- """
219
- Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
220
- use of token type ids, therefore a list of zeros is returned.
221
-
222
- Args:
223
- token_ids_0 (`List[int]`):
224
- List of IDs.
225
- token_ids_1 (`List[int]`, *optional*):
226
- Optional second list of IDs for sequence pairs.
227
-
228
- Returns:
229
- `List[int]`: List of zeros.
230
- """
231
- eos = [self.eos_token_id]
232
-
233
- if token_ids_1 is None:
234
- return len(token_ids_0 + eos) * [0]
235
- return len(token_ids_0 + eos + token_ids_1 + eos) * [0]