seshing commited on
Commit
708c479
·
verified ·
1 Parent(s): 345097f

Delete checkpoint-1400

Browse files
checkpoint-1400/added_tokens.json DELETED
@@ -1,11 +0,0 @@
1
- {
2
- "</box>": 92552,
3
- "</img>": 92545,
4
- "</quad>": 92548,
5
- "</ref>": 92550,
6
- "<IMG_CONTEXT>": 92546,
7
- "<box>": 92551,
8
- "<img>": 92544,
9
- "<quad>": 92547,
10
- "<ref>": 92549
11
- }
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-1400/config.json DELETED
@@ -1,204 +0,0 @@
1
- {
2
- "_commit_hash": null,
3
- "_name_or_path": "/public/home/zhaotianhong/xiucheng/models/InternVL/InternVL2_5-2B",
4
- "architectures": [
5
- "InternVLChatModel"
6
- ],
7
- "auto_map": {
8
- "AutoConfig": "configuration_internvl_chat.InternVLChatConfig",
9
- "AutoModel": "modeling_internvl_chat.InternVLChatModel",
10
- "AutoModelForCausalLM": "modeling_internvl_chat.InternVLChatModel"
11
- },
12
- "downsample_ratio": 0.5,
13
- "dynamic_image_size": true,
14
- "force_image_size": 448,
15
- "hidden_size": 2048,
16
- "llm_config": {
17
- "_name_or_path": "internlm/internlm2_5-1_8b-chat",
18
- "add_cross_attention": false,
19
- "architectures": [
20
- "InternLM2ForCausalLM"
21
- ],
22
- "attn_implementation": "flash_attention_2",
23
- "auto_map": {
24
- "AutoConfig": "configuration_internlm2.InternLM2Config",
25
- "AutoModel": "modeling_internlm2.InternLM2ForCausalLM",
26
- "AutoModelForCausalLM": "modeling_internlm2.InternLM2ForCausalLM",
27
- "AutoModelForSequenceClassification": "modeling_internlm2.InternLM2ForSequenceClassification"
28
- },
29
- "bad_words_ids": null,
30
- "begin_suppress_tokens": null,
31
- "bias": false,
32
- "bos_token_id": 1,
33
- "chunk_size_feed_forward": 0,
34
- "cross_attention_hidden_size": null,
35
- "decoder_start_token_id": null,
36
- "diversity_penalty": 0.0,
37
- "do_sample": false,
38
- "early_stopping": false,
39
- "encoder_no_repeat_ngram_size": 0,
40
- "eos_token_id": 2,
41
- "exponential_decay_length_penalty": null,
42
- "finetuning_task": null,
43
- "forced_bos_token_id": null,
44
- "forced_eos_token_id": null,
45
- "hidden_act": "silu",
46
- "hidden_size": 2048,
47
- "id2label": {
48
- "0": "LABEL_0",
49
- "1": "LABEL_1"
50
- },
51
- "initializer_range": 0.02,
52
- "intermediate_size": 8192,
53
- "is_decoder": false,
54
- "is_encoder_decoder": false,
55
- "label2id": {
56
- "LABEL_0": 0,
57
- "LABEL_1": 1
58
- },
59
- "length_penalty": 1.0,
60
- "max_length": 20,
61
- "max_position_embeddings": 32768,
62
- "min_length": 0,
63
- "model_type": "internlm2",
64
- "no_repeat_ngram_size": 0,
65
- "num_attention_heads": 16,
66
- "num_beam_groups": 1,
67
- "num_beams": 1,
68
- "num_hidden_layers": 24,
69
- "num_key_value_heads": 8,
70
- "num_return_sequences": 1,
71
- "output_attentions": false,
72
- "output_hidden_states": false,
73
- "output_scores": false,
74
- "pad_token_id": 2,
75
- "prefix": null,
76
- "pretraining_tp": 1,
77
- "problem_type": null,
78
- "pruned_heads": {},
79
- "remove_invalid_values": false,
80
- "repetition_penalty": 1.0,
81
- "return_dict": true,
82
- "return_dict_in_generate": false,
83
- "rms_norm_eps": 1e-05,
84
- "rope_scaling": {
85
- "factor": 2.0,
86
- "type": "dynamic"
87
- },
88
- "rope_theta": 1000000,
89
- "sep_token_id": null,
90
- "suppress_tokens": null,
91
- "task_specific_params": null,
92
- "temperature": 1.0,
93
- "tf_legacy_loss": false,
94
- "tie_encoder_decoder": false,
95
- "tie_word_embeddings": false,
96
- "tokenizer_class": null,
97
- "top_k": 50,
98
- "top_p": 1.0,
99
- "torch_dtype": "bfloat16",
100
- "torchscript": false,
101
- "transformers_version": "4.37.2",
102
- "typical_p": 1.0,
103
- "use_bfloat16": true,
104
- "use_cache": false,
105
- "vocab_size": 92553
106
- },
107
- "max_dynamic_patch": 6,
108
- "min_dynamic_patch": 1,
109
- "model_type": "internvl_chat",
110
- "pad2square": false,
111
- "ps_version": "v2",
112
- "select_layer": -1,
113
- "template": "internvl2_5",
114
- "tie_word_embeddings": false,
115
- "torch_dtype": "bfloat16",
116
- "transformers_version": null,
117
- "use_backbone_lora": 0,
118
- "use_llm_lora": 0,
119
- "use_thumbnail": true,
120
- "vision_config": {
121
- "_name_or_path": "",
122
- "add_cross_attention": false,
123
- "architectures": [
124
- "InternVisionModel"
125
- ],
126
- "attention_dropout": 0.0,
127
- "bad_words_ids": null,
128
- "begin_suppress_tokens": null,
129
- "bos_token_id": null,
130
- "chunk_size_feed_forward": 0,
131
- "cross_attention_hidden_size": null,
132
- "decoder_start_token_id": null,
133
- "diversity_penalty": 0.0,
134
- "do_sample": false,
135
- "drop_path_rate": 0.1,
136
- "dropout": 0.0,
137
- "early_stopping": false,
138
- "encoder_no_repeat_ngram_size": 0,
139
- "eos_token_id": null,
140
- "exponential_decay_length_penalty": null,
141
- "finetuning_task": null,
142
- "forced_bos_token_id": null,
143
- "forced_eos_token_id": null,
144
- "hidden_act": "gelu",
145
- "hidden_size": 1024,
146
- "id2label": {
147
- "0": "LABEL_0",
148
- "1": "LABEL_1"
149
- },
150
- "image_size": 448,
151
- "initializer_factor": 1.0,
152
- "initializer_range": 0.02,
153
- "intermediate_size": 4096,
154
- "is_decoder": false,
155
- "is_encoder_decoder": false,
156
- "label2id": {
157
- "LABEL_0": 0,
158
- "LABEL_1": 1
159
- },
160
- "layer_norm_eps": 1e-06,
161
- "length_penalty": 1.0,
162
- "max_length": 20,
163
- "min_length": 0,
164
- "model_type": "intern_vit_6b",
165
- "no_repeat_ngram_size": 0,
166
- "norm_type": "layer_norm",
167
- "num_attention_heads": 16,
168
- "num_beam_groups": 1,
169
- "num_beams": 1,
170
- "num_channels": 3,
171
- "num_hidden_layers": 24,
172
- "num_return_sequences": 1,
173
- "output_attentions": false,
174
- "output_hidden_states": false,
175
- "output_scores": false,
176
- "pad_token_id": null,
177
- "patch_size": 14,
178
- "prefix": null,
179
- "problem_type": null,
180
- "pruned_heads": {},
181
- "qk_normalization": false,
182
- "qkv_bias": true,
183
- "remove_invalid_values": false,
184
- "repetition_penalty": 1.0,
185
- "return_dict": true,
186
- "return_dict_in_generate": false,
187
- "sep_token_id": null,
188
- "suppress_tokens": null,
189
- "task_specific_params": null,
190
- "temperature": 1.0,
191
- "tf_legacy_loss": false,
192
- "tie_encoder_decoder": false,
193
- "tie_word_embeddings": true,
194
- "tokenizer_class": null,
195
- "top_k": 50,
196
- "top_p": 1.0,
197
- "torch_dtype": "bfloat16",
198
- "torchscript": false,
199
- "transformers_version": "4.37.2",
200
- "typical_p": 1.0,
201
- "use_bfloat16": true,
202
- "use_flash_attn": true
203
- }
204
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-1400/generation_config.json DELETED
@@ -1,8 +0,0 @@
1
- {
2
- "_from_model_config": true,
3
- "eos_token_id": [
4
- 92542,
5
- 92543
6
- ],
7
- "transformers_version": "4.37.2"
8
- }
 
 
 
 
 
 
 
 
 
checkpoint-1400/global_step1400/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:29c94675d051e534574eca59a4a84c26c365223402a13853f5a12c07be3760e8
3
- size 5293842032
 
 
 
 
checkpoint-1400/global_step1400/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:b55ffa87942e5b918f00ba7c84d7d7bd20b8fd3ad17c236f82ac2dd331d99af7
3
- size 5293817968
 
 
 
 
checkpoint-1400/global_step1400/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:2d3547ab815a0ab756adbd8a8e9b66925b594ff694e502bf58b39eeed0f6bbde
3
- size 5293818480
 
 
 
 
checkpoint-1400/global_step1400/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ca6c9b058c76eff26a12bb6e328f5c2b414abd08f48aa1a6a44df4c6ba6de9e5
3
- size 5293818608
 
 
 
 
checkpoint-1400/global_step1400/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:fdac2165ade06b38f4f48c450d03e08b09032d0a491219f411497fe93522680a
3
- size 5293816816
 
 
 
 
checkpoint-1400/global_step1400/mp_rank_00_model_states.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:549762e45e57acde57b09cb54107d3f10e2f13c102de68452ba10247177c8462
3
- size 4411668832
 
 
 
 
checkpoint-1400/latest DELETED
@@ -1 +0,0 @@
1
- global_step1400
 
 
checkpoint-1400/model.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:49e1704f0c78d9e40960ff4ad04c4ed1a0b0f3c4f3419e98ca1a5c841b939e46
3
- size 4411571040
 
 
 
 
checkpoint-1400/rng_state_0.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:bae2b7a8cf5b14fa79bec830965bdad1cd513b9022edd43644c31c23e095e029
3
- size 15280
 
 
 
 
checkpoint-1400/rng_state_1.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:512cb3b948f5f4c598548cce0bd2d4fc90f61437aa724b27cc9acf1fb960d43e
3
- size 15280
 
 
 
 
checkpoint-1400/rng_state_2.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:05f893897ee6855ebf07e2d9adf30b3f17caee0f3b895d7ba812e59fb5246db4
3
- size 15280
 
 
 
 
checkpoint-1400/rng_state_3.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:422e5e92c980fde9db19c7bb24451db095242912545b40b3df1d7bda5a8eb804
3
- size 15280
 
 
 
 
checkpoint-1400/rng_state_4.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:4fb13f7db979e35662c730c8b97b8b5ffde3660fe8c9353c490cc45be9ba8501
3
- size 15280
 
 
 
 
checkpoint-1400/special_tokens_map.json DELETED
@@ -1,47 +0,0 @@
1
- {
2
- "additional_special_tokens": [
3
- "<|im_start|>",
4
- "<|im_end|>",
5
- "<|action_start|>",
6
- "<|action_end|>",
7
- "<|interpreter|>",
8
- "<|plugin|>",
9
- "<img>",
10
- "</img>",
11
- "<IMG_CONTEXT>",
12
- "<quad>",
13
- "</quad>",
14
- "<ref>",
15
- "</ref>",
16
- "<box>",
17
- "</box>"
18
- ],
19
- "bos_token": {
20
- "content": "<s>",
21
- "lstrip": false,
22
- "normalized": false,
23
- "rstrip": false,
24
- "single_word": false
25
- },
26
- "eos_token": {
27
- "content": "</s>",
28
- "lstrip": false,
29
- "normalized": false,
30
- "rstrip": false,
31
- "single_word": false
32
- },
33
- "pad_token": {
34
- "content": "</s>",
35
- "lstrip": false,
36
- "normalized": false,
37
- "rstrip": false,
38
- "single_word": false
39
- },
40
- "unk_token": {
41
- "content": "<unk>",
42
- "lstrip": false,
43
- "normalized": false,
44
- "rstrip": false,
45
- "single_word": false
46
- }
47
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-1400/tokenization_internlm2.py DELETED
@@ -1,235 +0,0 @@
1
- # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
2
- #
3
- # This code is based on transformers/src/transformers/models/llama/tokenization_llama.py
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
16
-
17
- """Tokenization classes for InternLM."""
18
- import os
19
- from shutil import copyfile
20
- from typing import Any, Dict, List, Optional, Tuple
21
-
22
- import sentencepiece as spm
23
- from transformers.tokenization_utils import PreTrainedTokenizer
24
- from transformers.utils import logging
25
-
26
- logger = logging.get_logger(__name__)
27
-
28
- VOCAB_FILES_NAMES = {'vocab_file': './tokenizer.model'}
29
-
30
- PRETRAINED_VOCAB_FILES_MAP = {}
31
-
32
-
33
- # Modified from transformers.model.llama.tokenization_llama.LlamaTokenizer
34
- class InternLM2Tokenizer(PreTrainedTokenizer):
35
- """
36
- Construct a InternLM2 tokenizer. Based on byte-level Byte-Pair-Encoding.
37
-
38
- Args:
39
- vocab_file (`str`):
40
- Path to the vocabulary file.
41
- """
42
-
43
- vocab_files_names = VOCAB_FILES_NAMES
44
- pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
45
- model_input_names = ['input_ids', 'attention_mask']
46
- _auto_class = 'AutoTokenizer'
47
-
48
- def __init__(
49
- self,
50
- vocab_file,
51
- unk_token='<unk>',
52
- bos_token='<s>',
53
- eos_token='</s>',
54
- pad_token='</s>',
55
- sp_model_kwargs: Optional[Dict[str, Any]] = None,
56
- add_bos_token=True,
57
- add_eos_token=False,
58
- decode_with_prefix_space=False,
59
- clean_up_tokenization_spaces=False,
60
- **kwargs,
61
- ):
62
- self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
63
- self.vocab_file = vocab_file
64
- self.add_bos_token = add_bos_token
65
- self.add_eos_token = add_eos_token
66
- self.decode_with_prefix_space = decode_with_prefix_space
67
- self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
68
- self.sp_model.Load(vocab_file)
69
- self._no_prefix_space_tokens = None
70
- super().__init__(
71
- bos_token=bos_token,
72
- eos_token=eos_token,
73
- unk_token=unk_token,
74
- pad_token=pad_token,
75
- clean_up_tokenization_spaces=clean_up_tokenization_spaces,
76
- **kwargs,
77
- )
78
-
79
- @property
80
- def no_prefix_space_tokens(self):
81
- if self._no_prefix_space_tokens is None:
82
- vocab = self.convert_ids_to_tokens(list(range(self.vocab_size)))
83
- self._no_prefix_space_tokens = {i for i, tok in enumerate(vocab) if not tok.startswith('▁')}
84
- return self._no_prefix_space_tokens
85
-
86
- @property
87
- def vocab_size(self):
88
- """Returns vocab size"""
89
- return self.sp_model.get_piece_size()
90
-
91
- @property
92
- def bos_token_id(self) -> Optional[int]:
93
- return self.sp_model.bos_id()
94
-
95
- @property
96
- def eos_token_id(self) -> Optional[int]:
97
- return self.sp_model.eos_id()
98
-
99
- def get_vocab(self):
100
- """Returns vocab as a dict"""
101
- vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
102
- vocab.update(self.added_tokens_encoder)
103
- return vocab
104
-
105
- def _tokenize(self, text):
106
- """Returns a tokenized string."""
107
- return self.sp_model.encode(text, out_type=str)
108
-
109
- def _convert_token_to_id(self, token):
110
- """Converts a token (str) in an id using the vocab."""
111
- return self.sp_model.piece_to_id(token)
112
-
113
- def _convert_id_to_token(self, index):
114
- """Converts an index (integer) in a token (str) using the vocab."""
115
- token = self.sp_model.IdToPiece(index)
116
- return token
117
-
118
- def _maybe_add_prefix_space(self, tokens, decoded):
119
- if tokens and tokens[0] not in self.no_prefix_space_tokens:
120
- return ' ' + decoded
121
- else:
122
- return decoded
123
-
124
- def convert_tokens_to_string(self, tokens):
125
- """Converts a sequence of tokens (string) in a single string."""
126
- current_sub_tokens = []
127
- out_string = ''
128
- prev_is_special = False
129
- for token in tokens:
130
- # make sure that special tokens are not decoded using sentencepiece model
131
- if token in self.all_special_tokens:
132
- if not prev_is_special:
133
- out_string += ' '
134
- out_string += self.sp_model.decode(current_sub_tokens) + token
135
- prev_is_special = True
136
- current_sub_tokens = []
137
- else:
138
- current_sub_tokens.append(token)
139
- prev_is_special = False
140
- out_string += self.sp_model.decode(current_sub_tokens)
141
- out_string = self.clean_up_tokenization(out_string)
142
- out_string = self._maybe_add_prefix_space(tokens=tokens, decoded=out_string)
143
- return out_string[1:]
144
-
145
- def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
146
- """
147
- Save the vocabulary and special tokens file to a directory.
148
-
149
- Args:
150
- save_directory (`str`):
151
- The directory in which to save the vocabulary.
152
-
153
- Returns:
154
- `Tuple(str)`: Paths to the files saved.
155
- """
156
- if not os.path.isdir(save_directory):
157
- logger.error(f'Vocabulary path ({save_directory}) should be a directory')
158
- return
159
- out_vocab_file = os.path.join(
160
- save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']
161
- )
162
-
163
- if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
164
- copyfile(self.vocab_file, out_vocab_file)
165
- elif not os.path.isfile(self.vocab_file):
166
- with open(out_vocab_file, 'wb') as fi:
167
- content_spiece_model = self.sp_model.serialized_model_proto()
168
- fi.write(content_spiece_model)
169
-
170
- return (out_vocab_file,)
171
-
172
- def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
173
- if self.add_bos_token:
174
- bos_token_ids = [self.bos_token_id]
175
- else:
176
- bos_token_ids = []
177
-
178
- output = bos_token_ids + token_ids_0
179
-
180
- if token_ids_1 is not None:
181
- output = output + token_ids_1
182
-
183
- if self.add_eos_token:
184
- output = output + [self.eos_token_id]
185
-
186
- return output
187
-
188
- def get_special_tokens_mask(
189
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
190
- ) -> List[int]:
191
- """
192
- Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
193
- special tokens using the tokenizer `prepare_for_model` method.
194
-
195
- Args:
196
- token_ids_0 (`List[int]`):
197
- List of IDs.
198
- token_ids_1 (`List[int]`, *optional*):
199
- Optional second list of IDs for sequence pairs.
200
- already_has_special_tokens (`bool`, *optional*, defaults to `False`):
201
- Whether or not the token list is already formatted with special tokens for the model.
202
-
203
- Returns:
204
- `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
205
- """
206
- if already_has_special_tokens:
207
- return super().get_special_tokens_mask(
208
- token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
209
- )
210
-
211
- if token_ids_1 is None:
212
- return [1] + ([0] * len(token_ids_0)) + [1]
213
- return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
214
-
215
- def create_token_type_ids_from_sequences(
216
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
217
- ) -> List[int]:
218
- """
219
- Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
220
- use of token type ids, therefore a list of zeros is returned.
221
-
222
- Args:
223
- token_ids_0 (`List[int]`):
224
- List of IDs.
225
- token_ids_1 (`List[int]`, *optional*):
226
- Optional second list of IDs for sequence pairs.
227
-
228
- Returns:
229
- `List[int]`: List of zeros.
230
- """
231
- eos = [self.eos_token_id]
232
-
233
- if token_ids_1 is None:
234
- return len(token_ids_0 + eos) * [0]
235
- return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-1400/tokenizer.model DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f868398fc4e05ee1e8aeba95ddf18ddcc45b8bce55d5093bead5bbf80429b48b
3
- size 1477754
 
 
 
 
checkpoint-1400/tokenizer_config.json DELETED
@@ -1,179 +0,0 @@
1
- {
2
- "added_tokens_decoder": {
3
- "0": {
4
- "content": "<unk>",
5
- "lstrip": false,
6
- "normalized": false,
7
- "rstrip": false,
8
- "single_word": false,
9
- "special": true
10
- },
11
- "1": {
12
- "content": "<s>",
13
- "lstrip": false,
14
- "normalized": false,
15
- "rstrip": false,
16
- "single_word": false,
17
- "special": true
18
- },
19
- "2": {
20
- "content": "</s>",
21
- "lstrip": false,
22
- "normalized": false,
23
- "rstrip": false,
24
- "single_word": false,
25
- "special": true
26
- },
27
- "92538": {
28
- "content": "<|plugin|>",
29
- "lstrip": false,
30
- "normalized": false,
31
- "rstrip": false,
32
- "single_word": false,
33
- "special": true
34
- },
35
- "92539": {
36
- "content": "<|interpreter|>",
37
- "lstrip": false,
38
- "normalized": false,
39
- "rstrip": false,
40
- "single_word": false,
41
- "special": true
42
- },
43
- "92540": {
44
- "content": "<|action_end|>",
45
- "lstrip": false,
46
- "normalized": false,
47
- "rstrip": false,
48
- "single_word": false,
49
- "special": true
50
- },
51
- "92541": {
52
- "content": "<|action_start|>",
53
- "lstrip": false,
54
- "normalized": false,
55
- "rstrip": false,
56
- "single_word": false,
57
- "special": true
58
- },
59
- "92542": {
60
- "content": "<|im_end|>",
61
- "lstrip": false,
62
- "normalized": false,
63
- "rstrip": false,
64
- "single_word": false,
65
- "special": true
66
- },
67
- "92543": {
68
- "content": "<|im_start|>",
69
- "lstrip": false,
70
- "normalized": false,
71
- "rstrip": false,
72
- "single_word": false,
73
- "special": true
74
- },
75
- "92544": {
76
- "content": "<img>",
77
- "lstrip": false,
78
- "normalized": false,
79
- "rstrip": false,
80
- "single_word": false,
81
- "special": true
82
- },
83
- "92545": {
84
- "content": "</img>",
85
- "lstrip": false,
86
- "normalized": false,
87
- "rstrip": false,
88
- "single_word": false,
89
- "special": true
90
- },
91
- "92546": {
92
- "content": "<IMG_CONTEXT>",
93
- "lstrip": false,
94
- "normalized": false,
95
- "rstrip": false,
96
- "single_word": false,
97
- "special": true
98
- },
99
- "92547": {
100
- "content": "<quad>",
101
- "lstrip": false,
102
- "normalized": false,
103
- "rstrip": false,
104
- "single_word": false,
105
- "special": true
106
- },
107
- "92548": {
108
- "content": "</quad>",
109
- "lstrip": false,
110
- "normalized": false,
111
- "rstrip": false,
112
- "single_word": false,
113
- "special": true
114
- },
115
- "92549": {
116
- "content": "<ref>",
117
- "lstrip": false,
118
- "normalized": false,
119
- "rstrip": false,
120
- "single_word": false,
121
- "special": true
122
- },
123
- "92550": {
124
- "content": "</ref>",
125
- "lstrip": false,
126
- "normalized": false,
127
- "rstrip": false,
128
- "single_word": false,
129
- "special": true
130
- },
131
- "92551": {
132
- "content": "<box>",
133
- "lstrip": false,
134
- "normalized": false,
135
- "rstrip": false,
136
- "single_word": false,
137
- "special": true
138
- },
139
- "92552": {
140
- "content": "</box>",
141
- "lstrip": false,
142
- "normalized": false,
143
- "rstrip": false,
144
- "single_word": false,
145
- "special": true
146
- }
147
- },
148
- "additional_special_tokens": [
149
- "<|im_start|>",
150
- "<|im_end|>",
151
- "<|action_start|>",
152
- "<|action_end|>",
153
- "<|interpreter|>",
154
- "<|plugin|>",
155
- "<img>",
156
- "</img>",
157
- "<IMG_CONTEXT>",
158
- "<quad>",
159
- "</quad>",
160
- "<ref>",
161
- "</ref>",
162
- "<box>",
163
- "</box>"
164
- ],
165
- "auto_map": {
166
- "AutoTokenizer": [
167
- "tokenization_internlm2.InternLM2Tokenizer",
168
- null
169
- ]
170
- },
171
- "bos_token": "<s>",
172
- "chat_template": "{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
173
- "clean_up_tokenization_spaces": false,
174
- "eos_token": "</s>",
175
- "model_max_length": 4096,
176
- "pad_token": "</s>",
177
- "tokenizer_class": "InternLM2Tokenizer",
178
- "unk_token": "<unk>"
179
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-1400/trainer_state.json DELETED
The diff for this file is too large to render. See raw diff
 
checkpoint-1400/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e613f84f84655ffdfef40c83684e6dd844a0c2bb52a7a2456e5228958e10b13f
3
- size 6264
 
 
 
 
checkpoint-1400/zero_to_fp32.py DELETED
@@ -1,604 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
- # Copyright (c) Microsoft Corporation.
4
- # SPDX-License-Identifier: Apache-2.0
5
-
6
- # DeepSpeed Team
7
-
8
- # This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
9
- # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
- # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
- # application.
12
- #
13
- # example: python zero_to_fp32.py . pytorch_model.bin
14
-
15
- import argparse
16
- import torch
17
- import glob
18
- import math
19
- import os
20
- import re
21
- from collections import OrderedDict
22
- from dataclasses import dataclass
23
-
24
- # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
25
- # DeepSpeed data structures it has to be available in the current python environment.
26
- from deepspeed.utils import logger
27
- from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
28
- FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
29
- FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
30
-
31
-
32
- @dataclass
33
- class zero_model_state:
34
- buffers: dict()
35
- param_shapes: dict()
36
- shared_params: list
37
- ds_version: int
38
- frozen_param_shapes: dict()
39
- frozen_param_fragments: dict()
40
-
41
-
42
- debug = 0
43
-
44
- # load to cpu
45
- device = torch.device('cpu')
46
-
47
-
48
- def atoi(text):
49
- return int(text) if text.isdigit() else text
50
-
51
-
52
- def natural_keys(text):
53
- '''
54
- alist.sort(key=natural_keys) sorts in human order
55
- http://nedbatchelder.com/blog/200712/human_sorting.html
56
- (See Toothy's implementation in the comments)
57
- '''
58
- return [atoi(c) for c in re.split(r'(\d+)', text)]
59
-
60
-
61
- def get_model_state_file(checkpoint_dir, zero_stage):
62
- if not os.path.isdir(checkpoint_dir):
63
- raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
64
-
65
- # there should be only one file
66
- if zero_stage <= 2:
67
- file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
68
- elif zero_stage == 3:
69
- file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
70
-
71
- if not os.path.exists(file):
72
- raise FileNotFoundError(f"can't find model states file at '{file}'")
73
-
74
- return file
75
-
76
-
77
- def get_checkpoint_files(checkpoint_dir, glob_pattern):
78
- # XXX: need to test that this simple glob rule works for multi-node setup too
79
- ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
80
-
81
- if len(ckpt_files) == 0:
82
- raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
83
-
84
- return ckpt_files
85
-
86
-
87
- def get_optim_files(checkpoint_dir):
88
- return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
89
-
90
-
91
- def get_model_state_files(checkpoint_dir):
92
- return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
93
-
94
-
95
- def parse_model_states(files):
96
- zero_model_states = []
97
- for file in files:
98
- state_dict = torch.load(file, map_location=device)
99
-
100
- if BUFFER_NAMES not in state_dict:
101
- raise ValueError(f"{file} is not a model state checkpoint")
102
- buffer_names = state_dict[BUFFER_NAMES]
103
- if debug:
104
- print("Found buffers:", buffer_names)
105
-
106
- # recover just the buffers while restoring them to fp32 if they were saved in fp16
107
- buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
108
- param_shapes = state_dict[PARAM_SHAPES]
109
-
110
- # collect parameters that are included in param_shapes
111
- param_names = []
112
- for s in param_shapes:
113
- for name in s.keys():
114
- param_names.append(name)
115
-
116
- # update with frozen parameters
117
- frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
118
- if frozen_param_shapes is not None:
119
- if debug:
120
- print(f"Found frozen_param_shapes: {frozen_param_shapes}")
121
- param_names += list(frozen_param_shapes.keys())
122
-
123
- # handle shared params
124
- shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
125
-
126
- ds_version = state_dict.get(DS_VERSION, None)
127
-
128
- frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
129
-
130
- z_model_state = zero_model_state(buffers=buffers,
131
- param_shapes=param_shapes,
132
- shared_params=shared_params,
133
- ds_version=ds_version,
134
- frozen_param_shapes=frozen_param_shapes,
135
- frozen_param_fragments=frozen_param_fragments)
136
- zero_model_states.append(z_model_state)
137
-
138
- return zero_model_states
139
-
140
-
141
- def parse_optim_states(files, ds_checkpoint_dir):
142
-
143
- total_files = len(files)
144
- state_dicts = []
145
- for f in files:
146
- state_dict = torch.load(f, map_location=device)
147
- # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
148
- # and also handle the case where it was already removed by another helper script
149
- state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
150
- state_dicts.append(state_dict)
151
-
152
- if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
153
- raise ValueError(f"{files[0]} is not a zero checkpoint")
154
- zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
155
- world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
156
-
157
- # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
158
- # parameters can be different from data parallelism for non-expert parameters. So we can just
159
- # use the max of the partition_count to get the dp world_size.
160
-
161
- if type(world_size) is list:
162
- world_size = max(world_size)
163
-
164
- if world_size != total_files:
165
- raise ValueError(
166
- f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
167
- "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
168
- )
169
-
170
- # the groups are named differently in each stage
171
- if zero_stage <= 2:
172
- fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
173
- elif zero_stage == 3:
174
- fp32_groups_key = FP32_FLAT_GROUPS
175
- else:
176
- raise ValueError(f"unknown zero stage {zero_stage}")
177
-
178
- if zero_stage <= 2:
179
- fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
180
- elif zero_stage == 3:
181
- # if there is more than one param group, there will be multiple flattened tensors - one
182
- # flattened tensor per group - for simplicity merge them into a single tensor
183
- #
184
- # XXX: could make the script more memory efficient for when there are multiple groups - it
185
- # will require matching the sub-lists of param_shapes for each param group flattened tensor
186
-
187
- fp32_flat_groups = [
188
- torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
189
- ]
190
-
191
- return zero_stage, world_size, fp32_flat_groups
192
-
193
-
194
- def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
195
- """
196
- Returns fp32 state_dict reconstructed from ds checkpoint
197
-
198
- Args:
199
- - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
200
-
201
- """
202
- print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
203
-
204
- optim_files = get_optim_files(ds_checkpoint_dir)
205
- zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
206
- print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
207
-
208
- model_files = get_model_state_files(ds_checkpoint_dir)
209
-
210
- zero_model_states = parse_model_states(model_files)
211
- print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
212
-
213
- if zero_stage <= 2:
214
- return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
215
- exclude_frozen_parameters)
216
- elif zero_stage == 3:
217
- return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
218
- exclude_frozen_parameters)
219
-
220
-
221
- def _zero2_merge_frozen_params(state_dict, zero_model_states):
222
- if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
223
- return
224
-
225
- frozen_param_shapes = zero_model_states[0].frozen_param_shapes
226
- frozen_param_fragments = zero_model_states[0].frozen_param_fragments
227
-
228
- if debug:
229
- num_elem = sum(s.numel() for s in frozen_param_shapes.values())
230
- print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
231
-
232
- wanted_params = len(frozen_param_shapes)
233
- wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
234
- avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
235
- print(f'Frozen params: Have {avail_numel} numels to process.')
236
- print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
237
-
238
- total_params = 0
239
- total_numel = 0
240
- for name, shape in frozen_param_shapes.items():
241
- total_params += 1
242
- unpartitioned_numel = shape.numel()
243
- total_numel += unpartitioned_numel
244
-
245
- state_dict[name] = frozen_param_fragments[name]
246
-
247
- if debug:
248
- print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
249
-
250
- print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
251
-
252
-
253
- def _has_callable(obj, fn):
254
- attr = getattr(obj, fn, None)
255
- return callable(attr)
256
-
257
-
258
- def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
259
- param_shapes = zero_model_states[0].param_shapes
260
-
261
- # Reconstruction protocol:
262
- #
263
- # XXX: document this
264
-
265
- if debug:
266
- for i in range(world_size):
267
- for j in range(len(fp32_flat_groups[0])):
268
- print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
269
-
270
- # XXX: memory usage doubles here (zero2)
271
- num_param_groups = len(fp32_flat_groups[0])
272
- merged_single_partition_of_fp32_groups = []
273
- for i in range(num_param_groups):
274
- merged_partitions = [sd[i] for sd in fp32_flat_groups]
275
- full_single_fp32_vector = torch.cat(merged_partitions, 0)
276
- merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
277
- avail_numel = sum(
278
- [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
279
-
280
- if debug:
281
- wanted_params = sum([len(shapes) for shapes in param_shapes])
282
- wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
283
- # not asserting if there is a mismatch due to possible padding
284
- print(f"Have {avail_numel} numels to process.")
285
- print(f"Need {wanted_numel} numels in {wanted_params} params.")
286
-
287
- # params
288
- # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
289
- # out-of-core computing solution
290
- total_numel = 0
291
- total_params = 0
292
- for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
293
- offset = 0
294
- avail_numel = full_single_fp32_vector.numel()
295
- for name, shape in shapes.items():
296
-
297
- unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
298
- total_numel += unpartitioned_numel
299
- total_params += 1
300
-
301
- if debug:
302
- print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
303
- state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
304
- offset += unpartitioned_numel
305
-
306
- # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
307
- # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
308
- # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
309
- # live optimizer object, so we are checking that the numbers are within the right range
310
- align_to = 2 * world_size
311
-
312
- def zero2_align(x):
313
- return align_to * math.ceil(x / align_to)
314
-
315
- if debug:
316
- print(f"original offset={offset}, avail_numel={avail_numel}")
317
-
318
- offset = zero2_align(offset)
319
- avail_numel = zero2_align(avail_numel)
320
-
321
- if debug:
322
- print(f"aligned offset={offset}, avail_numel={avail_numel}")
323
-
324
- # Sanity check
325
- if offset != avail_numel:
326
- raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
327
-
328
- print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
329
-
330
-
331
- def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
332
- exclude_frozen_parameters):
333
- state_dict = OrderedDict()
334
-
335
- # buffers
336
- buffers = zero_model_states[0].buffers
337
- state_dict.update(buffers)
338
- if debug:
339
- print(f"added {len(buffers)} buffers")
340
-
341
- if not exclude_frozen_parameters:
342
- _zero2_merge_frozen_params(state_dict, zero_model_states)
343
-
344
- _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
345
-
346
- # recover shared parameters
347
- for pair in zero_model_states[0].shared_params:
348
- if pair[1] in state_dict:
349
- state_dict[pair[0]] = state_dict[pair[1]]
350
-
351
- return state_dict
352
-
353
-
354
- def zero3_partitioned_param_info(unpartitioned_numel, world_size):
355
- remainder = unpartitioned_numel % world_size
356
- padding_numel = (world_size - remainder) if remainder else 0
357
- partitioned_numel = math.ceil(unpartitioned_numel / world_size)
358
- return partitioned_numel, padding_numel
359
-
360
-
361
- def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
362
- if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
363
- return
364
-
365
- if debug:
366
- for i in range(world_size):
367
- num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
368
- print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
369
-
370
- frozen_param_shapes = zero_model_states[0].frozen_param_shapes
371
- wanted_params = len(frozen_param_shapes)
372
- wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
373
- avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
374
- print(f'Frozen params: Have {avail_numel} numels to process.')
375
- print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
376
-
377
- total_params = 0
378
- total_numel = 0
379
- for name, shape in zero_model_states[0].frozen_param_shapes.items():
380
- total_params += 1
381
- unpartitioned_numel = shape.numel()
382
- total_numel += unpartitioned_numel
383
-
384
- param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
385
- state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
386
-
387
- partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
388
-
389
- if debug:
390
- print(
391
- f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
392
- )
393
-
394
- print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
395
-
396
-
397
- def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
398
- param_shapes = zero_model_states[0].param_shapes
399
- avail_numel = fp32_flat_groups[0].numel() * world_size
400
- # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
401
- # param, re-consolidating each param, while dealing with padding if any
402
-
403
- # merge list of dicts, preserving order
404
- param_shapes = {k: v for d in param_shapes for k, v in d.items()}
405
-
406
- if debug:
407
- for i in range(world_size):
408
- print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
409
-
410
- wanted_params = len(param_shapes)
411
- wanted_numel = sum(shape.numel() for shape in param_shapes.values())
412
- # not asserting if there is a mismatch due to possible padding
413
- avail_numel = fp32_flat_groups[0].numel() * world_size
414
- print(f"Trainable params: Have {avail_numel} numels to process.")
415
- print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
416
-
417
- # params
418
- # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
419
- # out-of-core computing solution
420
- offset = 0
421
- total_numel = 0
422
- total_params = 0
423
- for name, shape in param_shapes.items():
424
-
425
- unpartitioned_numel = shape.numel()
426
- total_numel += unpartitioned_numel
427
- total_params += 1
428
-
429
- partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
430
-
431
- if debug:
432
- print(
433
- f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
434
- )
435
-
436
- # XXX: memory usage doubles here
437
- state_dict[name] = torch.cat(
438
- tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
439
- 0).narrow(0, 0, unpartitioned_numel).view(shape)
440
- offset += partitioned_numel
441
-
442
- offset *= world_size
443
-
444
- # Sanity check
445
- if offset != avail_numel:
446
- raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
447
-
448
- print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
449
-
450
-
451
- def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
452
- exclude_frozen_parameters):
453
- state_dict = OrderedDict()
454
-
455
- # buffers
456
- buffers = zero_model_states[0].buffers
457
- state_dict.update(buffers)
458
- if debug:
459
- print(f"added {len(buffers)} buffers")
460
-
461
- if not exclude_frozen_parameters:
462
- _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
463
-
464
- _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
465
-
466
- # recover shared parameters
467
- for pair in zero_model_states[0].shared_params:
468
- if pair[1] in state_dict:
469
- state_dict[pair[0]] = state_dict[pair[1]]
470
-
471
- return state_dict
472
-
473
-
474
- def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
475
- """
476
- Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
477
- ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
478
- via a model hub.
479
-
480
- Args:
481
- - ``checkpoint_dir``: path to the desired checkpoint folder
482
- - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
483
- - ``exclude_frozen_parameters``: exclude frozen parameters
484
-
485
- Returns:
486
- - pytorch ``state_dict``
487
-
488
- Note: this approach may not work if your application doesn't have sufficient free CPU memory and
489
- you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
490
- the checkpoint.
491
-
492
- A typical usage might be ::
493
-
494
- from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
495
- # do the training and checkpoint saving
496
- state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
497
- model = model.cpu() # move to cpu
498
- model.load_state_dict(state_dict)
499
- # submit to model hub or save the model to share with others
500
-
501
- In this example the ``model`` will no longer be usable in the deepspeed context of the same
502
- application. i.e. you will need to re-initialize the deepspeed engine, since
503
- ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
504
-
505
- If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
506
-
507
- """
508
- if tag is None:
509
- latest_path = os.path.join(checkpoint_dir, 'latest')
510
- if os.path.isfile(latest_path):
511
- with open(latest_path, 'r') as fd:
512
- tag = fd.read().strip()
513
- else:
514
- raise ValueError(f"Unable to find 'latest' file at {latest_path}")
515
-
516
- ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
517
-
518
- if not os.path.isdir(ds_checkpoint_dir):
519
- raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
520
-
521
- return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
522
-
523
-
524
- def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None, exclude_frozen_parameters=False):
525
- """
526
- Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
527
- loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
528
-
529
- Args:
530
- - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
531
- - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
532
- - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
533
- - ``exclude_frozen_parameters``: exclude frozen parameters
534
- """
535
-
536
- state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
537
- print(f"Saving fp32 state dict to {output_file}")
538
- torch.save(state_dict, output_file)
539
-
540
-
541
- def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
542
- """
543
- 1. Put the provided model to cpu
544
- 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
545
- 3. Load it into the provided model
546
-
547
- Args:
548
- - ``model``: the model object to update
549
- - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
550
- - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
551
-
552
- Returns:
553
- - ``model`: modified model
554
-
555
- Make sure you have plenty of CPU memory available before you call this function. If you don't
556
- have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
557
- conveniently placed for you in the checkpoint folder.
558
-
559
- A typical usage might be ::
560
-
561
- from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
562
- model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
563
- # submit to model hub or save the model to share with others
564
-
565
- Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
566
- of the same application. i.e. you will need to re-initialize the deepspeed engine, since
567
- ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
568
-
569
- """
570
- logger.info(f"Extracting fp32 weights")
571
- state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
572
-
573
- logger.info(f"Overwriting model with fp32 weights")
574
- model = model.cpu()
575
- model.load_state_dict(state_dict, strict=False)
576
-
577
- return model
578
-
579
-
580
- if __name__ == "__main__":
581
-
582
- parser = argparse.ArgumentParser()
583
- parser.add_argument("checkpoint_dir",
584
- type=str,
585
- help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
586
- parser.add_argument(
587
- "output_file",
588
- type=str,
589
- help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
590
- parser.add_argument("-t",
591
- "--tag",
592
- type=str,
593
- default=None,
594
- help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
595
- parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
596
- parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
597
- args = parser.parse_args()
598
-
599
- debug = args.debug
600
-
601
- convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
602
- args.output_file,
603
- tag=args.tag,
604
- exclude_frozen_parameters=args.exclude_frozen_parameters)