henryu commited on
Commit
0159e45
Β·
1 Parent(s): d6c4b54

Create convert_llama_weights_to_hf.py

Browse files
Files changed (1) hide show
  1. convert_llama_weights_to_hf.py +273 -0
convert_llama_weights_to_hf.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 EleutherAI and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import argparse
15
+ import gc
16
+ import json
17
+ import math
18
+ import os
19
+ import shutil
20
+ import warnings
21
+
22
+ import torch
23
+
24
+ from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
25
+
26
+
27
+ try:
28
+ from transformers import LlamaTokenizerFast
29
+ except ImportError as e:
30
+ warnings.warn(e)
31
+ warnings.warn(
32
+ "The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
33
+ )
34
+ LlamaTokenizerFast = None
35
+
36
+ """
37
+ Sample usage:
38
+ ```
39
+ python src/transformers/models/llama/convert_llama_weights_to_hf.py \
40
+ --input_dir /path/to/downloaded/llama/weights --model_size 7B --output_dir /output/path
41
+ ```
42
+ Thereafter, models can be loaded via:
43
+ ```py
44
+ from transformers import LlamaForCausalLM, LlamaTokenizer
45
+ model = LlamaForCausalLM.from_pretrained("/output/path")
46
+ tokenizer = LlamaTokenizer.from_pretrained("/output/path")
47
+ ```
48
+ Important note: you need to be able to host the whole model in RAM to execute this script (even if the biggest versions
49
+ come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM).
50
+ """
51
+
52
+ INTERMEDIATE_SIZE_MAP = {
53
+ "7B": 11008,
54
+ "13B": 13824,
55
+ "30B": 17920,
56
+ "65B": 22016,
57
+ }
58
+ NUM_SHARDS = {
59
+ "7B": 1,
60
+ "13B": 2,
61
+ "30B": 4,
62
+ "65B": 8,
63
+ }
64
+
65
+
66
+ def compute_intermediate_size(n):
67
+ return int(math.ceil(n * 8 / 3) + 255) // 256 * 256
68
+
69
+
70
+ def read_json(path):
71
+ with open(path, "r") as f:
72
+ return json.load(f)
73
+
74
+
75
+ def write_json(text, path):
76
+ with open(path, "w") as f:
77
+ json.dump(text, f)
78
+
79
+
80
+ def write_model(model_path, input_base_path, model_size):
81
+ os.makedirs(model_path, exist_ok=True)
82
+ tmp_model_path = os.path.join(model_path, "tmp")
83
+ os.makedirs(tmp_model_path, exist_ok=True)
84
+
85
+ params = read_json(os.path.join(input_base_path, "params.json"))
86
+ num_shards = NUM_SHARDS[model_size]
87
+ n_layers = params["n_layers"]
88
+ n_heads = params["n_heads"]
89
+ n_heads_per_shard = n_heads // num_shards
90
+ dim = params["dim"]
91
+ dims_per_head = dim // n_heads
92
+ base = 10000.0
93
+ inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head))
94
+
95
+ # permute for sliced rotary
96
+ def permute(w):
97
+ return w.view(n_heads, dim // n_heads // 2, 2, dim).transpose(1, 2).reshape(dim, dim)
98
+
99
+ print(f"Fetching all parameters from the checkpoint at {input_base_path}.")
100
+ # Load weights
101
+ if model_size == "7B":
102
+ # Not sharded
103
+ # (The sharded implementation would also work, but this is simpler.)
104
+ loaded = torch.load(os.path.join(input_base_path, "consolidated.00.pth"), map_location="cpu")
105
+ else:
106
+ # Sharded
107
+ loaded = [
108
+ torch.load(os.path.join(input_base_path, f"consolidated.{i:02d}.pth"), map_location="cpu")
109
+ for i in range(num_shards)
110
+ ]
111
+ param_count = 0
112
+ index_dict = {"weight_map": {}}
113
+ for layer_i in range(n_layers):
114
+ filename = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
115
+ if model_size == "7B":
116
+ # Unsharded
117
+ state_dict = {
118
+ f"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
119
+ loaded[f"layers.{layer_i}.attention.wq.weight"]
120
+ ),
121
+ f"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
122
+ loaded[f"layers.{layer_i}.attention.wk.weight"]
123
+ ),
124
+ f"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[f"layers.{layer_i}.attention.wv.weight"],
125
+ f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"layers.{layer_i}.attention.wo.weight"],
126
+ f"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w1.weight"],
127
+ f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w2.weight"],
128
+ f"model.layers.{layer_i}.mlp.up_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w3.weight"],
129
+ f"model.layers.{layer_i}.input_layernorm.weight": loaded[f"layers.{layer_i}.attention_norm.weight"],
130
+ f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[f"layers.{layer_i}.ffn_norm.weight"],
131
+ }
132
+ else:
133
+ # Sharded
134
+ # Note that in the 13B checkpoint, not cloning the two following weights will result in the checkpoint
135
+ # becoming 37GB instead of 26GB for some reason.
136
+ state_dict = {
137
+ f"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
138
+ f"layers.{layer_i}.attention_norm.weight"
139
+ ].clone(),
140
+ f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
141
+ f"layers.{layer_i}.ffn_norm.weight"
142
+ ].clone(),
143
+ }
144
+ state_dict[f"model.layers.{layer_i}.self_attn.q_proj.weight"] = permute(
145
+ torch.cat(
146
+ [
147
+ loaded[i][f"layers.{layer_i}.attention.wq.weight"].view(n_heads_per_shard, dims_per_head, dim)
148
+ for i in range(num_shards)
149
+ ],
150
+ dim=0,
151
+ ).reshape(dim, dim)
152
+ )
153
+ state_dict[f"model.layers.{layer_i}.self_attn.k_proj.weight"] = permute(
154
+ torch.cat(
155
+ [
156
+ loaded[i][f"layers.{layer_i}.attention.wk.weight"].view(n_heads_per_shard, dims_per_head, dim)
157
+ for i in range(num_shards)
158
+ ],
159
+ dim=0,
160
+ ).reshape(dim, dim)
161
+ )
162
+ state_dict[f"model.layers.{layer_i}.self_attn.v_proj.weight"] = torch.cat(
163
+ [
164
+ loaded[i][f"layers.{layer_i}.attention.wv.weight"].view(n_heads_per_shard, dims_per_head, dim)
165
+ for i in range(num_shards)
166
+ ],
167
+ dim=0,
168
+ ).reshape(dim, dim)
169
+
170
+ state_dict[f"model.layers.{layer_i}.self_attn.o_proj.weight"] = torch.cat(
171
+ [loaded[i][f"layers.{layer_i}.attention.wo.weight"] for i in range(num_shards)], dim=1
172
+ )
173
+ state_dict[f"model.layers.{layer_i}.mlp.gate_proj.weight"] = torch.cat(
174
+ [loaded[i][f"layers.{layer_i}.feed_forward.w1.weight"] for i in range(num_shards)], dim=0
175
+ )
176
+ state_dict[f"model.layers.{layer_i}.mlp.down_proj.weight"] = torch.cat(
177
+ [loaded[i][f"layers.{layer_i}.feed_forward.w2.weight"] for i in range(num_shards)], dim=1
178
+ )
179
+ state_dict[f"model.layers.{layer_i}.mlp.up_proj.weight"] = torch.cat(
180
+ [loaded[i][f"layers.{layer_i}.feed_forward.w3.weight"] for i in range(num_shards)], dim=0
181
+ )
182
+
183
+ state_dict[f"model.layers.{layer_i}.self_attn.rotary_emb.inv_freq"] = inv_freq
184
+ for k, v in state_dict.items():
185
+ index_dict["weight_map"][k] = filename
186
+ param_count += v.numel()
187
+ torch.save(state_dict, os.path.join(tmp_model_path, filename))
188
+
189
+ filename = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
190
+ if model_size == "7B":
191
+ # Unsharded
192
+ state_dict = {
193
+ "model.embed_tokens.weight": loaded["tok_embeddings.weight"],
194
+ "model.norm.weight": loaded["norm.weight"],
195
+ "lm_head.weight": loaded["output.weight"],
196
+ }
197
+ else:
198
+ state_dict = {
199
+ "model.norm.weight": loaded[0]["norm.weight"],
200
+ "model.embed_tokens.weight": torch.cat(
201
+ [loaded[i]["tok_embeddings.weight"] for i in range(num_shards)], dim=1
202
+ ),
203
+ "lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(num_shards)], dim=0),
204
+ }
205
+
206
+ for k, v in state_dict.items():
207
+ index_dict["weight_map"][k] = filename
208
+ param_count += v.numel()
209
+ torch.save(state_dict, os.path.join(tmp_model_path, filename))
210
+
211
+ # Write configs
212
+ index_dict["metadata"] = {"total_size": param_count * 2}
213
+ write_json(index_dict, os.path.join(tmp_model_path, "pytorch_model.bin.index.json"))
214
+
215
+ config = LlamaConfig(
216
+ hidden_size=dim,
217
+ intermediate_size=compute_intermediate_size(dim),
218
+ num_attention_heads=params["n_heads"],
219
+ num_hidden_layers=params["n_layers"],
220
+ rms_norm_eps=params["norm_eps"],
221
+ )
222
+ config.save_pretrained(tmp_model_path)
223
+
224
+ # Make space so we can load the model properly now.
225
+ del state_dict
226
+ del loaded
227
+ gc.collect()
228
+
229
+ print("Loading the checkpoint in a Llama model.")
230
+ model = LlamaForCausalLM.from_pretrained(tmp_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
231
+ # Avoid saving this as part of the config.
232
+ del model.config._name_or_path
233
+
234
+ print("Saving in the Transformers format.")
235
+ model.save_pretrained(model_path)
236
+ shutil.rmtree(tmp_model_path)
237
+
238
+
239
+ def write_tokenizer(tokenizer_path, input_tokenizer_path):
240
+ # Initialize the tokenizer based on the `spm` model
241
+ tokenizer_class = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
242
+ print(f"Saving a {tokenizer_class.__name__} to {tokenizer_path}.")
243
+ tokenizer = tokenizer_class(input_tokenizer_path)
244
+ tokenizer.save_pretrained(tokenizer_path)
245
+
246
+
247
+ def main():
248
+ parser = argparse.ArgumentParser()
249
+ parser.add_argument(
250
+ "--input_dir",
251
+ help="Location of LLaMA weights, which contains tokenizer.model and model folders",
252
+ )
253
+ parser.add_argument(
254
+ "--model_size",
255
+ choices=["7B", "13B", "30B", "65B", "tokenizer_only"],
256
+ )
257
+ parser.add_argument(
258
+ "--output_dir",
259
+ help="Location to write HF model and tokenizer",
260
+ )
261
+ args = parser.parse_args()
262
+ if args.model_size != "tokenizer_only":
263
+ write_model(
264
+ model_path=args.output_dir,
265
+ input_base_path=os.path.join(args.input_dir, args.model_size),
266
+ model_size=args.model_size,
267
+ )
268
+ spm_path = os.path.join(args.input_dir, "tokenizer.model")
269
+ write_tokenizer(args.output_dir, spm_path)
270
+
271
+
272
+ if __name__ == "__main__":
273
+ main()