netcat420 commited on
Commit
d656cbe
·
verified ·
1 Parent(s): a6a1745

Upload 2 files

Browse files
Files changed (2) hide show
  1. convert-hf-to-gguf.py +0 -0
  2. convert.py +1555 -0
convert-hf-to-gguf.py ADDED
The diff for this file is too large to render. See raw diff
 
convert.py ADDED
@@ -0,0 +1,1555 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from __future__ import annotations
3
+
4
+ import argparse
5
+ import concurrent.futures
6
+ import enum
7
+ import faulthandler
8
+ import functools
9
+ import itertools
10
+ import json
11
+ import math
12
+ import mmap
13
+ import os
14
+ import pickle
15
+ import re
16
+ import signal
17
+ import struct
18
+ import sys
19
+ import textwrap
20
+ import time
21
+ import zipfile
22
+ from abc import ABC, abstractmethod
23
+ from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
24
+ from dataclasses import dataclass
25
+ from pathlib import Path
26
+ from typing import TYPE_CHECKING, Any, Callable, ClassVar, IO, Iterable, Literal, Protocol, TypeVar, runtime_checkable
27
+
28
+ import numpy as np
29
+ from sentencepiece import SentencePieceProcessor
30
+
31
+ if 'NO_LOCAL_GGUF' not in os.environ:
32
+ sys.path.insert(1, str(Path(__file__).parent / 'gguf-py'))
33
+ import gguf
34
+
35
+ if TYPE_CHECKING:
36
+ from typing_extensions import Self, TypeAlias
37
+
38
+ if hasattr(faulthandler, 'register') and hasattr(signal, 'SIGUSR1'):
39
+ faulthandler.register(signal.SIGUSR1)
40
+
41
+ NDArray: TypeAlias = 'np.ndarray[Any, Any]'
42
+
43
+ ARCH = gguf.MODEL_ARCH.LLAMA
44
+
45
+ DEFAULT_CONCURRENCY = 8
46
+
47
+ ADDED_TOKENS_FILE = 'added_tokens.json'
48
+ FAST_TOKENIZER_FILE = 'tokenizer.json'
49
+
50
+ #
51
+ # data types
52
+ #
53
+
54
+
55
+ @dataclass(frozen=True)
56
+ class DataType:
57
+ name: str
58
+ dtype: np.dtype[Any]
59
+ valid_conversions: list[str]
60
+
61
+ def elements_to_bytes(self, n_elements: int) -> int:
62
+ return n_elements * self.dtype.itemsize
63
+
64
+
65
+ @dataclass(frozen=True)
66
+ class UnquantizedDataType(DataType):
67
+ pass
68
+
69
+
70
+ DT_F16 = UnquantizedDataType('F16', dtype = np.dtype(np.float16), valid_conversions = ['F32', 'Q8_0'])
71
+ DT_F32 = UnquantizedDataType('F32', dtype = np.dtype(np.float32), valid_conversions = ['F16', 'Q8_0'])
72
+ DT_I32 = UnquantizedDataType('I32', dtype = np.dtype(np.int16), valid_conversions = [])
73
+ DT_BF16 = UnquantizedDataType('BF16', dtype = np.dtype(np.uint16), valid_conversions = ['F32', 'F16', 'Q8_0'])
74
+
75
+
76
+ @dataclass(frozen=True)
77
+ class QuantizedDataType(DataType):
78
+ block_size: int
79
+ quantized_dtype: np.dtype[Any]
80
+ ggml_type: gguf.GGMLQuantizationType
81
+
82
+ def quantize(self, arr: NDArray) -> NDArray:
83
+ raise NotImplementedError(f'Quantization for {self.name} not implemented')
84
+
85
+ def elements_to_bytes(self, n_elements: int) -> int:
86
+ assert n_elements % self.block_size == 0, f'Invalid number of elements {n_elements} for {self.name} with block size {self.block_size}'
87
+ return self.quantized_dtype.itemsize * (n_elements // self.block_size)
88
+
89
+
90
+ @dataclass(frozen=True)
91
+ class Q8_0QuantizedDataType(QuantizedDataType):
92
+ # Mini Q8_0 quantization in Python!
93
+ def quantize(self, arr: NDArray) -> NDArray:
94
+ assert arr.size % self.block_size == 0 and arr.size != 0, f'Bad array size {arr.size}'
95
+ assert arr.dtype == np.float32, f'Bad array type {arr.dtype}'
96
+ n_blocks = arr.size // self.block_size
97
+ blocks = arr.reshape((n_blocks, self.block_size))
98
+ # Much faster implementation of block quantization contributed by @Cebtenzzre
99
+
100
+ def quantize_blocks_q8_0(blocks: NDArray) -> Iterable[tuple[Any, Any]]:
101
+ d = abs(blocks).max(axis = 1) / np.float32(127)
102
+ with np.errstate(divide = 'ignore'):
103
+ qs = (blocks / d[:, None]).round()
104
+ qs[d == 0] = 0
105
+ yield from zip(d, qs)
106
+ return np.fromiter(quantize_blocks_q8_0(blocks), count = n_blocks, dtype = self.quantized_dtype)
107
+
108
+
109
+ DT_Q8_0 = Q8_0QuantizedDataType('Q8_0',
110
+ dtype = np.dtype(np.float32), valid_conversions = [],
111
+ ggml_type = gguf.GGMLQuantizationType.Q8_0, block_size = 32,
112
+ quantized_dtype = np.dtype([('d', '<f2'), ('qs', 'i1', (32,))]))
113
+
114
+ # Quantized types skipped here because they may also map to np.float32
115
+ NUMPY_TYPE_TO_DATA_TYPE: dict[np.dtype[Any], DataType] = {}
116
+ for dt in (DT_BF16, DT_F16, DT_F32, DT_I32):
117
+ if dt.dtype in NUMPY_TYPE_TO_DATA_TYPE:
118
+ raise ValueError(f'Invalid duplicate data type {dt}')
119
+ NUMPY_TYPE_TO_DATA_TYPE[dt.dtype] = dt
120
+
121
+ SAFETENSORS_DATA_TYPES: dict[str, DataType] = {
122
+ 'BF16': DT_BF16,
123
+ 'F16': DT_F16,
124
+ 'F32': DT_F32,
125
+ 'I32': DT_I32,
126
+ }
127
+
128
+ # TODO: match this with `llama_ftype`
129
+ # TODO: rename to LLAMAFileType
130
+ # TODO: move to `gguf.py`
131
+
132
+
133
+ class GGMLFileType(enum.IntEnum):
134
+ AllF32 = 0
135
+ MostlyF16 = 1 # except 1d tensors
136
+ MostlyQ8_0 = 7 # except 1d tensors
137
+
138
+ def type_for_tensor(self, name: str, tensor: LazyTensor) -> DataType:
139
+ dt = GGML_FILE_TYPE_TO_DATA_TYPE.get(self)
140
+ if dt is None:
141
+ raise ValueError(self)
142
+ # Convert all 1D tensors to F32. Most of the codebase that takes in 1D tensors only handles F32 tensors, and most of the outputs tensors are F32.
143
+ # Also The 1d tensors aren't much of a performance/size issue. So instead of having to have separate F32 and F16 implementations of both, just convert everything to F32 for now.
144
+ return dt if len(tensor.shape) > 1 else DT_F32
145
+
146
+
147
+ GGML_FILE_TYPE_TO_DATA_TYPE: dict[GGMLFileType, DataType] = {
148
+ GGMLFileType.AllF32 : DT_F32,
149
+ GGMLFileType.MostlyF16 : DT_F16,
150
+ GGMLFileType.MostlyQ8_0: DT_Q8_0,
151
+ }
152
+
153
+ #
154
+ # hparams loading
155
+ #
156
+
157
+
158
+ @dataclass
159
+ class Params:
160
+ n_vocab: int
161
+ n_embd: int
162
+ n_layer: int
163
+ n_ctx: int
164
+ n_ff: int
165
+ n_head: int
166
+ n_head_kv: int
167
+ n_experts: int | None = None
168
+ n_experts_used: int | None = None
169
+ f_norm_eps: float | None = None
170
+
171
+ rope_scaling_type: gguf.RopeScalingType | None = None
172
+ f_rope_freq_base: float | None = None
173
+ f_rope_scale: float | None = None
174
+ n_orig_ctx: int | None = None
175
+ rope_finetuned: bool | None = None
176
+
177
+ ftype: GGMLFileType | None = None
178
+
179
+ # path to the directory containing the model files
180
+ path_model: Path | None = None
181
+
182
+ @staticmethod
183
+ def guessed(model: LazyModel) -> Params:
184
+ # try transformer naming first
185
+ n_vocab, n_embd = model["model.embed_tokens.weight"].shape if "model.embed_tokens.weight" in model else model["tok_embeddings.weight"].shape
186
+
187
+ # try transformer naming first
188
+ if "model.layers.0.self_attn.q_proj.weight" in model:
189
+ n_layer = next(i for i in itertools.count() if f"model.layers.{i}.self_attn.q_proj.weight" not in model)
190
+ elif "model.layers.0.self_attn.W_pack.weight" in model: # next: try baichuan naming
191
+ n_layer = next(i for i in itertools.count() if f"model.layers.{i}.self_attn.W_pack.weight" not in model)
192
+ else:
193
+ n_layer = next(i for i in itertools.count() if f"layers.{i}.attention.wq.weight" not in model)
194
+
195
+ if n_layer < 1:
196
+ msg = """\
197
+ failed to guess 'n_layer'. This model is unknown or unsupported.
198
+ Suggestion: provide 'config.json' of the model in the same directory containing model files."""
199
+ raise KeyError(textwrap.dedent(msg))
200
+
201
+ n_head = n_embd // 128 # guessed
202
+ n_mult = 256 # guessed
203
+
204
+ # TODO: verify this
205
+ n_ff = int(2 * (4 * n_embd) / 3)
206
+ n_ff = n_mult * ((n_ff + n_mult - 1) // n_mult)
207
+
208
+ return Params(
209
+ n_vocab = n_vocab,
210
+ n_embd = n_embd,
211
+ n_layer = n_layer,
212
+ n_ctx = -1,
213
+ n_ff = n_ff,
214
+ n_head = n_head,
215
+ n_head_kv = n_head,
216
+ f_norm_eps = 1e-5,
217
+ )
218
+
219
+ @staticmethod
220
+ def loadHFTransformerJson(model: LazyModel, config_path: Path) -> Params:
221
+ with open(config_path) as f:
222
+ config = json.load(f)
223
+
224
+ rope_scaling_type = f_rope_scale = n_orig_ctx = rope_finetuned = None
225
+ rope_scaling = config.get("rope_scaling")
226
+
227
+ if rope_scaling is not None and (typ := rope_scaling.get("type")):
228
+ rope_factor = rope_scaling.get("factor")
229
+ f_rope_scale = rope_factor
230
+ if typ == "linear":
231
+ rope_scaling_type = gguf.RopeScalingType.LINEAR
232
+ elif typ == "yarn":
233
+ rope_scaling_type = gguf.RopeScalingType.YARN
234
+ n_orig_ctx = rope_scaling['original_max_position_embeddings']
235
+ rope_finetuned = rope_scaling['finetuned']
236
+ else:
237
+ raise NotImplementedError(f'Unknown rope scaling type: {typ}')
238
+
239
+ if "max_sequence_length" in config:
240
+ n_ctx = config["max_sequence_length"]
241
+ elif "max_position_embeddings" in config:
242
+ n_ctx = config["max_position_embeddings"]
243
+ else:
244
+ msg = """\
245
+ failed to guess 'n_ctx'. This model is unknown or unsupported.
246
+ Suggestion: provide 'config.json' of the model in the same directory containing model files."""
247
+ raise KeyError(textwrap.dedent(msg))
248
+
249
+ n_experts = None
250
+ n_experts_used = None
251
+
252
+ if "num_local_experts" in config:
253
+ n_experts = config["num_local_experts"]
254
+ n_experts_used = config["num_experts_per_tok"]
255
+
256
+ return Params(
257
+ n_vocab = config["vocab_size"],
258
+ n_embd = config["hidden_size"],
259
+ n_layer = config["num_hidden_layers"],
260
+ n_ctx = n_ctx,
261
+ n_ff = config["intermediate_size"],
262
+ n_head = (n_head := config["num_attention_heads"]),
263
+ n_head_kv = config.get("num_key_value_heads", n_head),
264
+ n_experts = n_experts,
265
+ n_experts_used = n_experts_used,
266
+ f_norm_eps = config["rms_norm_eps"],
267
+ f_rope_freq_base = config.get("rope_theta"),
268
+ rope_scaling_type = rope_scaling_type,
269
+ f_rope_scale = f_rope_scale,
270
+ n_orig_ctx = n_orig_ctx,
271
+ rope_finetuned = rope_finetuned,
272
+ )
273
+
274
+ # LLaMA v2 70B params.json
275
+ # {"dim": 8192, "multiple_of": 4096, "ffn_dim_multiplier": 1.3, "n_heads": 64, "n_kv_heads": 8, "n_layers": 80, "norm_eps": 1e-05, "vocab_size": -1}
276
+ @staticmethod
277
+ def loadOriginalParamsJson(model: LazyModel, config_path: Path) -> Params:
278
+ with open(config_path) as f:
279
+ config = json.load(f)
280
+
281
+ n_experts = None
282
+ n_experts_used = None
283
+ f_rope_freq_base = None
284
+
285
+ # hack to determine LLaMA v1 vs v2 vs CodeLlama
286
+ if config.get("moe"):
287
+ # Mixtral
288
+ n_ctx = 32768
289
+ elif config.get("rope_theta") == 1000000:
290
+ # CodeLlama
291
+ n_ctx = 16384
292
+ elif config["norm_eps"] == 1e-05:
293
+ # LLaMA v2
294
+ n_ctx = 4096
295
+ else:
296
+ # LLaMA v1
297
+ n_ctx = 2048
298
+
299
+ if "layers.0.feed_forward.w1.weight" in model:
300
+ n_ff = model["layers.0.feed_forward.w1.weight"].shape[0]
301
+
302
+ if config.get("moe"):
303
+ n_ff = model["layers.0.feed_forward.experts.0.w1.weight"].shape[0]
304
+ n_experts = config["moe"]["num_experts"]
305
+ n_experts_used = config["moe"]["num_experts_per_tok"]
306
+ f_rope_freq_base = 1e6
307
+
308
+ return Params(
309
+ n_vocab = model["tok_embeddings.weight"].shape[0],
310
+ n_embd = config["dim"],
311
+ n_layer = config["n_layers"],
312
+ n_ctx = n_ctx,
313
+ n_ff = n_ff,
314
+ n_head = (n_head := config["n_heads"]),
315
+ n_head_kv = config.get("n_kv_heads", n_head),
316
+ n_experts = n_experts,
317
+ n_experts_used = n_experts_used,
318
+ f_norm_eps = config["norm_eps"],
319
+ f_rope_freq_base = config.get("rope_theta", f_rope_freq_base),
320
+ )
321
+
322
+ @staticmethod
323
+ def load(model_plus: ModelPlus) -> Params:
324
+ hf_config_path = model_plus.paths[0].parent / "config.json"
325
+ orig_config_path = model_plus.paths[0].parent / "params.json"
326
+
327
+ if hf_config_path.exists():
328
+ params = Params.loadHFTransformerJson(model_plus.model, hf_config_path)
329
+ elif orig_config_path.exists():
330
+ params = Params.loadOriginalParamsJson(model_plus.model, orig_config_path)
331
+ elif model_plus.format != 'none':
332
+ params = Params.guessed(model_plus.model)
333
+ else:
334
+ raise ValueError('Cannot guess params when model format is none')
335
+
336
+ params.path_model = model_plus.paths[0].parent
337
+
338
+ return params
339
+
340
+
341
+ #
342
+ # vocab
343
+ #
344
+
345
+ @runtime_checkable
346
+ class BaseVocab(Protocol):
347
+ tokenizer_model: ClassVar[str]
348
+ name: ClassVar[str]
349
+
350
+
351
+ class NoVocab(BaseVocab):
352
+ tokenizer_model = "no_vocab"
353
+ name = "no_vocab"
354
+
355
+ def __repr__(self) -> str:
356
+ return "<NoVocab for a model without integrated vocabulary>"
357
+
358
+
359
+ @runtime_checkable
360
+ class Vocab(BaseVocab, Protocol):
361
+ vocab_size: int
362
+ added_tokens_dict: dict[str, int]
363
+ added_tokens_list: list[str]
364
+ fname_tokenizer: Path
365
+
366
+ def __init__(self, base_path: Path): ...
367
+ def all_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]: ...
368
+
369
+
370
+ class BpeVocab(Vocab):
371
+ tokenizer_model = "gpt2"
372
+ name = "bpe"
373
+
374
+ def __init__(self, base_path: Path):
375
+ added_tokens: dict[str, int] = {}
376
+
377
+ if (fname_tokenizer := base_path / 'vocab.json').exists():
378
+ # "slow" tokenizer
379
+ with open(fname_tokenizer, encoding="utf-8") as f:
380
+ self.vocab = json.load(f)
381
+
382
+ try:
383
+ # FIXME: Verify that added tokens here _cannot_ overlap with the main vocab.
384
+ with open(base_path / ADDED_TOKENS_FILE, encoding="utf-8") as f:
385
+ added_tokens = json.load(f)
386
+ except FileNotFoundError:
387
+ pass
388
+ else:
389
+ # "fast" tokenizer
390
+ fname_tokenizer = base_path / FAST_TOKENIZER_FILE
391
+
392
+ # if this fails, FileNotFoundError propagates to caller
393
+ with open(fname_tokenizer, encoding="utf-8") as f:
394
+ tokenizer_json = json.load(f)
395
+
396
+ tokenizer_model: dict[str, Any] = tokenizer_json['model']
397
+ if (
398
+ tokenizer_model['type'] != 'BPE' or tokenizer_model.get('byte_fallback', False)
399
+ or tokenizer_json['decoder']['type'] != 'ByteLevel'
400
+ ):
401
+ raise FileNotFoundError('Cannot find GPT-2 BPE tokenizer')
402
+
403
+ self.vocab = tokenizer_model["vocab"]
404
+
405
+ if (added := tokenizer_json.get('added_tokens')) is not None:
406
+ # Added tokens here can be duplicates of the main vocabulary.
407
+ added_tokens = {item['content']: item['id']
408
+ for item in added
409
+ if item['content'] not in self.vocab}
410
+
411
+ vocab_size = len(self.vocab)
412
+ expected_ids = list(range(vocab_size, vocab_size + len(added_tokens)))
413
+ actual_ids = sorted(added_tokens.values())
414
+ if expected_ids != actual_ids:
415
+ expected_end_id = vocab_size + len(actual_ids) - 1
416
+ raise ValueError(f"Expected the {len(actual_ids)} added token ID(s) to be sequential in the range "
417
+ f"{vocab_size} - {expected_end_id}; got {actual_ids}")
418
+
419
+ items = sorted(added_tokens.items(), key=lambda text_idx: text_idx[1])
420
+ self.added_tokens_dict = added_tokens
421
+ self.added_tokens_list = [text for (text, idx) in items]
422
+ self.vocab_size_base = vocab_size
423
+ self.vocab_size = self.vocab_size_base + len(self.added_tokens_list)
424
+ self.fname_tokenizer = fname_tokenizer
425
+
426
+ def bpe_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
427
+ reverse_vocab = {id: encoded_tok for encoded_tok, id in self.vocab.items()}
428
+
429
+ for i, _ in enumerate(self.vocab):
430
+ yield reverse_vocab[i], 0.0, gguf.TokenType.NORMAL
431
+
432
+ def added_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
433
+ for text in self.added_tokens_list:
434
+ score = -1000.0
435
+ yield text.encode("utf-8"), score, gguf.TokenType.CONTROL
436
+
437
+ def all_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
438
+ yield from self.bpe_tokens()
439
+ yield from self.added_tokens()
440
+
441
+ def __repr__(self) -> str:
442
+ return f"<BpeVocab with {self.vocab_size_base} base tokens and {len(self.added_tokens_list)} added tokens>"
443
+
444
+
445
+ class SentencePieceVocab(Vocab):
446
+ tokenizer_model = "llama"
447
+ name = "spm"
448
+
449
+ def __init__(self, base_path: Path):
450
+ added_tokens: dict[str, int] = {}
451
+ if (fname_tokenizer := base_path / 'tokenizer.model').exists():
452
+ # normal location
453
+ try:
454
+ with open(base_path / ADDED_TOKENS_FILE, encoding="utf-8") as f:
455
+ added_tokens = json.load(f)
456
+ except FileNotFoundError:
457
+ pass
458
+ elif not (fname_tokenizer := base_path.parent / 'tokenizer.model').exists():
459
+ # not found in alternate location either
460
+ raise FileNotFoundError('Cannot find tokenizer.model')
461
+
462
+ self.sentencepiece_tokenizer = SentencePieceProcessor(str(fname_tokenizer))
463
+ vocab_size = self.sentencepiece_tokenizer.vocab_size()
464
+
465
+ new_tokens = {id: piece for piece, id in added_tokens.items() if id >= vocab_size}
466
+ expected_new_ids = list(range(vocab_size, vocab_size + len(new_tokens)))
467
+ actual_new_ids = sorted(new_tokens.keys())
468
+
469
+ if expected_new_ids != actual_new_ids:
470
+ raise ValueError(f"Expected new token IDs {expected_new_ids} to be sequential; got {actual_new_ids}")
471
+
472
+ # Token pieces that were added to the base vocabulary.
473
+ self.added_tokens_dict = added_tokens
474
+ self.added_tokens_list = [new_tokens[id] for id in actual_new_ids]
475
+ self.vocab_size_base = vocab_size
476
+ self.vocab_size = self.vocab_size_base + len(self.added_tokens_list)
477
+ self.fname_tokenizer = fname_tokenizer
478
+
479
+ def sentencepiece_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
480
+ tokenizer = self.sentencepiece_tokenizer
481
+ for i in range(tokenizer.vocab_size()):
482
+ piece = tokenizer.id_to_piece(i)
483
+ text = piece.encode("utf-8")
484
+ score: float = tokenizer.get_score(i)
485
+
486
+ toktype = gguf.TokenType.NORMAL
487
+ if tokenizer.is_unknown(i):
488
+ toktype = gguf.TokenType.UNKNOWN
489
+ if tokenizer.is_control(i):
490
+ toktype = gguf.TokenType.CONTROL
491
+
492
+ # NOTE: I think added_tokens are user defined.
493
+ # ref: https://github.com/google/sentencepiece/blob/master/src/sentencepiece_model.proto
494
+ # if tokenizer.is_user_defined(i): toktype = gguf.TokenType.USER_DEFINED
495
+
496
+ if tokenizer.is_unused(i):
497
+ toktype = gguf.TokenType.UNUSED
498
+ if tokenizer.is_byte(i):
499
+ toktype = gguf.TokenType.BYTE
500
+
501
+ yield text, score, toktype
502
+
503
+ def added_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
504
+ for text in self.added_tokens_list:
505
+ score = -1000.0
506
+ yield text.encode("utf-8"), score, gguf.TokenType.USER_DEFINED
507
+
508
+ def all_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
509
+ yield from self.sentencepiece_tokens()
510
+ yield from self.added_tokens()
511
+
512
+ def __repr__(self) -> str:
513
+ return f"<SentencePieceVocab with {self.vocab_size_base} base tokens and {len(self.added_tokens_list)} added tokens>"
514
+
515
+
516
+ class LlamaHfVocab(Vocab):
517
+ tokenizer_model = "llama"
518
+ name = "hfft"
519
+
520
+ def __init__(self, base_path: Path):
521
+ fname_tokenizer = base_path / FAST_TOKENIZER_FILE
522
+ # if this fails, FileNotFoundError propagates to caller
523
+ with open(fname_tokenizer, encoding='utf-8') as f:
524
+ tokenizer_json = json.load(f)
525
+
526
+ # pre-check so we know if we need transformers
527
+ tokenizer_model: dict[str, Any] = tokenizer_json['model']
528
+ is_llama3 = (
529
+ tokenizer_model['type'] == 'BPE' and tokenizer_model.get('ignore_merges', False)
530
+ and not tokenizer_model.get('byte_fallback', True)
531
+ )
532
+ if is_llama3:
533
+ raise TypeError('Llama 3 must be converted with BpeVocab')
534
+
535
+ if not is_llama3 and (
536
+ tokenizer_model['type'] != 'BPE' or not tokenizer_model.get('byte_fallback', False)
537
+ or tokenizer_json['decoder']['type'] != 'Sequence'
538
+ ):
539
+ raise FileNotFoundError('Cannot find Llama BPE tokenizer')
540
+
541
+ try:
542
+ from transformers import AutoTokenizer
543
+ except ImportError as e:
544
+ raise ImportError(
545
+ "To use LlamaHfVocab, please install the `transformers` package. "
546
+ "You can install it with `pip install transformers`."
547
+ ) from e
548
+
549
+ # Allow the tokenizer to default to slow or fast versions.
550
+ # Explicitly set tokenizer to use local paths.
551
+ self.tokenizer = AutoTokenizer.from_pretrained(
552
+ base_path,
553
+ cache_dir=base_path,
554
+ local_files_only=True,
555
+ )
556
+ assert self.tokenizer.is_fast # assume tokenizer.json is used
557
+
558
+ # Initialize lists and dictionaries for added tokens
559
+ self.added_tokens_list = []
560
+ self.added_tokens_dict = dict()
561
+ self.added_tokens_ids = set()
562
+
563
+ # Process added tokens
564
+ for tok, tokidx in sorted(
565
+ self.tokenizer.get_added_vocab().items(), key=lambda x: x[1]
566
+ ):
567
+ # Only consider added tokens that are not in the base vocabulary
568
+ if tokidx >= self.tokenizer.vocab_size:
569
+ self.added_tokens_list.append(tok)
570
+ self.added_tokens_dict[tok] = tokidx
571
+ self.added_tokens_ids.add(tokidx)
572
+
573
+ # Store special tokens and their IDs
574
+ self.specials = {
575
+ tok: self.tokenizer.get_vocab()[tok]
576
+ for tok in self.tokenizer.all_special_tokens
577
+ }
578
+ self.special_ids = set(self.tokenizer.all_special_ids)
579
+
580
+ # Set vocabulary sizes
581
+ self.vocab_size_base = self.tokenizer.vocab_size
582
+ self.vocab_size = self.vocab_size_base + len(self.added_tokens_list)
583
+
584
+ self.fname_tokenizer = fname_tokenizer
585
+
586
+ def hf_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
587
+ reverse_vocab = {
588
+ id: encoded_tok for encoded_tok, id in self.tokenizer.get_vocab().items()
589
+ }
590
+
591
+ for token_id in range(self.vocab_size_base):
592
+ # Skip processing added tokens here
593
+ if token_id in self.added_tokens_ids:
594
+ continue
595
+
596
+ # Convert token text to bytes
597
+ token_text = reverse_vocab[token_id].encode("utf-8")
598
+
599
+ # Yield token text, score, and type
600
+ yield token_text, self.get_token_score(token_id), self.get_token_type(
601
+ token_id, token_text, self.special_ids # Reuse already stored special IDs
602
+ )
603
+
604
+ def get_token_type(self, token_id: int, token_text: bytes, special_ids: set[int]) -> gguf.TokenType:
605
+ # Special case for byte tokens
606
+ if re.fullmatch(br"<0x[0-9A-Fa-f]{2}>", token_text):
607
+ return gguf.TokenType.BYTE
608
+
609
+ # Determine token type based on whether it's a special token
610
+ return gguf.TokenType.CONTROL if token_id in special_ids else gguf.TokenType.NORMAL
611
+
612
+ def get_token_score(self, token_id: int) -> float:
613
+ # Placeholder for actual logic to determine the token's score
614
+ # This needs to be implemented based on specific requirements
615
+ return -1000.0 # Default score
616
+
617
+ def added_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
618
+ for text in self.added_tokens_list:
619
+ if text in self.specials:
620
+ toktype = self.get_token_type(self.specials[text], b'', self.special_ids)
621
+ score = self.get_token_score(self.specials[text])
622
+ else:
623
+ toktype = gguf.TokenType.USER_DEFINED
624
+ score = -1000.0
625
+
626
+ yield text.encode("utf-8"), score, toktype
627
+
628
+ def has_newline_token(self):
629
+ return "<0x0A>" in self.tokenizer.vocab or "\n" in self.tokenizer.vocab
630
+
631
+ def all_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
632
+ yield from self.hf_tokens()
633
+ yield from self.added_tokens()
634
+
635
+ def __repr__(self) -> str:
636
+ return f"<LlamaHfVocab with {self.vocab_size_base} base tokens and {len(self.added_tokens_list)} added tokens>"
637
+
638
+
639
+ #
640
+ # data loading
641
+ # TODO: reuse (probably move to gguf.py?)
642
+ #
643
+
644
+
645
+ def permute(weights: NDArray, n_head: int, n_head_kv: int) -> NDArray:
646
+ # print( "permute debug " + str(weights.shape[0]) + " x " + str(weights.shape[1]) + " nhead " + str(n_head) + " nheadkv " + str(n_kv_head) )
647
+ if n_head_kv is not None and n_head != n_head_kv:
648
+ n_head = n_head_kv
649
+ return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
650
+ .swapaxes(1, 2)
651
+ .reshape(weights.shape))
652
+
653
+
654
+ class Tensor(ABC):
655
+ ndarray: NDArray
656
+ data_type: DataType
657
+
658
+ @abstractmethod
659
+ def astype(self, data_type: DataType) -> Self: ...
660
+ @abstractmethod
661
+ def permute(self, n_head: int, n_head_kv: int) -> Self: ...
662
+ @abstractmethod
663
+ def permute_part(self, n_part: int, n_head: int, n_head_kv: int) -> Self: ...
664
+ @abstractmethod
665
+ def part(self, n_part: int) -> Self: ...
666
+ @abstractmethod
667
+ def to_ggml(self) -> GGMLCompatibleTensor: ...
668
+
669
+
670
+ def bf16_to_fp32(bf16_arr: np.ndarray[Any, np.dtype[np.uint16]]) -> NDArray:
671
+ assert bf16_arr.dtype == np.uint16, f"Input array should be of dtype uint16, but got {bf16_arr.dtype}"
672
+ fp32_arr = bf16_arr.astype(np.uint32) << 16
673
+ return fp32_arr.view(np.float32)
674
+
675
+
676
+ class UnquantizedTensor(Tensor):
677
+ def __init__(self, ndarray: NDArray):
678
+ assert isinstance(ndarray, np.ndarray)
679
+ self.ndarray = ndarray
680
+ self.data_type = NUMPY_TYPE_TO_DATA_TYPE[ndarray.dtype]
681
+
682
+ def astype(self, data_type: DataType) -> UnquantizedTensor:
683
+ dtype = data_type.dtype
684
+ if self.data_type == DT_BF16:
685
+ self.ndarray = bf16_to_fp32(self.ndarray)
686
+ return UnquantizedTensor(self.ndarray.astype(dtype))
687
+
688
+ def to_ggml(self) -> Self:
689
+ return self
690
+
691
+ def permute_part(self, n_part: int, n_head: int, n_head_kv: int) -> UnquantizedTensor:
692
+ r = self.ndarray.shape[0] // 3
693
+ return UnquantizedTensor(permute(self.ndarray[r * n_part : r * n_part + r, ...], n_head, n_head_kv))
694
+
695
+ def part(self, n_part: int) -> UnquantizedTensor:
696
+ r = self.ndarray.shape[0] // 3
697
+ return UnquantizedTensor(self.ndarray[r * n_part : r * n_part + r, ...])
698
+
699
+ def permute(self, n_head: int, n_head_kv: int) -> UnquantizedTensor:
700
+ return UnquantizedTensor(permute(self.ndarray, n_head, n_head_kv))
701
+
702
+
703
+ def load_unquantized(lazy_tensor: LazyTensor, expected_dtype: Any = None, convert: bool = False) -> NDArray:
704
+ tensor = lazy_tensor.load()
705
+ assert isinstance(tensor, UnquantizedTensor)
706
+
707
+ # double-check:
708
+ actual_shape = list(tensor.ndarray.shape)
709
+ assert actual_shape == lazy_tensor.shape, (actual_shape, lazy_tensor.shape)
710
+ if expected_dtype is not None and expected_dtype != tensor.ndarray.dtype:
711
+ if convert:
712
+ tensor.ndarray = tensor.ndarray.astype(expected_dtype)
713
+ else:
714
+ raise ValueError(f'expected this tensor to have dtype {expected_dtype}, got {tensor.ndarray.dtype}')
715
+
716
+ return tensor.ndarray
717
+
718
+
719
+ GGMLCompatibleTensor = UnquantizedTensor
720
+
721
+
722
+ @dataclass
723
+ class LazyTensor:
724
+ _load: Callable[[], Tensor]
725
+ shape: list[int]
726
+ data_type: DataType
727
+ description: str
728
+
729
+ def load(self) -> Tensor:
730
+ ret = self._load()
731
+ # Should be okay if it maps to the same numpy type?
732
+ assert ret.data_type == self.data_type or (self.data_type.dtype == ret.data_type.dtype), \
733
+ (self.data_type, ret.data_type, self.description)
734
+ return ret
735
+
736
+ def astype(self, data_type: DataType) -> LazyTensor:
737
+ self.validate_conversion_to(data_type)
738
+
739
+ def load() -> Tensor:
740
+ return self.load().astype(data_type)
741
+ return LazyTensor(load, self.shape, data_type, f'convert({data_type}) {self.description}')
742
+
743
+ def validate_conversion_to(self, data_type: DataType) -> None:
744
+ if data_type != self.data_type and data_type.name not in self.data_type.valid_conversions:
745
+ raise ValueError(f'Cannot validate conversion from {self.data_type} to {data_type}.')
746
+
747
+
748
+ LazyModel: TypeAlias = 'dict[str, LazyTensor]'
749
+
750
+
751
+ @dataclass
752
+ class ModelPlus:
753
+ model: LazyModel
754
+ paths: list[Path] # Where this was read from.
755
+ format: Literal['ggml', 'torch', 'safetensors', 'none']
756
+ vocab: BaseVocab | None # For GGML models (which have vocab built in), the vocab.
757
+
758
+
759
+ def merge_sharded(models: list[LazyModel]) -> LazyModel:
760
+ # Original LLaMA models have each file contain one part of each tensor.
761
+ # Use a dict instead of a set to preserve order.
762
+ names = {name: None for model in models for name in model}
763
+
764
+ def convert(name: str) -> LazyTensor:
765
+ lazy_tensors = [model[name] for model in models]
766
+ if len(lazy_tensors) == 1:
767
+ # only one file; don't go through this procedure since there might
768
+ # be quantized tensors
769
+ return lazy_tensors[0]
770
+ if len(lazy_tensors[0].shape) == 1:
771
+ # the tensor is just duplicated in every file
772
+ return lazy_tensors[0]
773
+ if name.startswith('tok_embeddings.') or \
774
+ name.endswith('.attention.wo.weight') or \
775
+ name.endswith('.feed_forward.w2.weight'):
776
+ # split by columns
777
+ axis = 1
778
+ else:
779
+ # split by rows
780
+ axis = 0
781
+ concatenated_shape = list(lazy_tensors[0].shape)
782
+ concatenated_shape[axis] = sum(tensor.shape[axis] for tensor in lazy_tensors)
783
+
784
+ def load() -> UnquantizedTensor:
785
+ ndarrays = [load_unquantized(tensor) for tensor in lazy_tensors]
786
+ concatenated = np.concatenate(ndarrays, axis=axis)
787
+ return UnquantizedTensor(concatenated)
788
+ description = 'concatenated[[' + '] | ['.join(lt.description for lt in lazy_tensors) + ']]'
789
+ return LazyTensor(load, concatenated_shape, lazy_tensors[0].data_type, description)
790
+ return {name: convert(name) for name in names}
791
+
792
+
793
+ def merge_multifile_models(models_plus: list[ModelPlus]) -> ModelPlus:
794
+ formats = set(mp.format for mp in models_plus)
795
+ assert len(formats) == 1, "different formats?"
796
+ format = formats.pop()
797
+ paths = [path for mp in models_plus for path in mp.paths]
798
+ # Use the first non-None vocab, if any.
799
+ try:
800
+ vocab = next(mp.vocab for mp in models_plus if mp.vocab is not None)
801
+ except StopIteration:
802
+ vocab = None
803
+
804
+ if any("model.embed_tokens.weight" in mp.model for mp in models_plus):
805
+ # Transformers models put different tensors in different files, but
806
+ # don't split individual tensors between files.
807
+ model: LazyModel = {}
808
+ for mp in models_plus:
809
+ model.update(mp.model)
810
+ else:
811
+ model = merge_sharded([mp.model for mp in models_plus])
812
+
813
+ return ModelPlus(model, paths, format, vocab) # pytype: disable=wrong-arg-types
814
+
815
+
816
+ def permute_lazy(lazy_tensor: LazyTensor, n_head: int, n_head_kv: int) -> LazyTensor:
817
+ def load() -> Tensor:
818
+ return lazy_tensor.load().permute(n_head, n_head_kv)
819
+ return LazyTensor(load, lazy_tensor.shape, lazy_tensor.data_type, f'permute({n_head}, {n_head_kv}) ' + lazy_tensor.description)
820
+
821
+
822
+ def permute_part_lazy(lazy_tensor: LazyTensor, n_part: int, n_head: int, n_head_kv: int) -> LazyTensor:
823
+ def load() -> Tensor:
824
+ return lazy_tensor.load().permute_part(n_part, n_head, n_head_kv)
825
+ s = lazy_tensor.shape.copy()
826
+ s[0] = s[0] // 3
827
+ return LazyTensor(load, s, lazy_tensor.data_type, f'permute({n_head}, {n_head_kv}) ' + lazy_tensor.description)
828
+
829
+
830
+ def part_lazy(lazy_tensor: LazyTensor, n_part: int) -> LazyTensor:
831
+ def load() -> Tensor:
832
+ return lazy_tensor.load().part(n_part)
833
+ s = lazy_tensor.shape.copy()
834
+ s[0] = s[0] // 3
835
+ return LazyTensor(load, s, lazy_tensor.data_type, 'part ' + lazy_tensor.description)
836
+
837
+
838
+ def pack_experts_lazy(lazy_tensors: list[LazyTensor]) -> LazyTensor:
839
+ def load() -> Tensor:
840
+ tensors = [lazy_tensor.load() for lazy_tensor in lazy_tensors]
841
+ return UnquantizedTensor(np.array([tensor.ndarray for tensor in tensors]))
842
+ s = lazy_tensors[0].shape.copy()
843
+ s.insert(0, len(lazy_tensors))
844
+ return LazyTensor(load, s, lazy_tensors[0].data_type, 'pack_experts ' + ' | '.join(lt.description for lt in lazy_tensors))
845
+
846
+
847
+ # Functionality that simulates `torch.load` but where individual tensors are
848
+ # only loaded into memory on demand, not all at once.
849
+ # PyTorch can't do this natively as of time of writing:
850
+ # - https://github.com/pytorch/pytorch/issues/64327
851
+ # This allows us to de-shard without multiplying RAM usage, and also
852
+ # conveniently drops the PyTorch dependency (though we still need numpy).
853
+
854
+
855
+ @dataclass
856
+ class LazyStorageKind:
857
+ data_type: DataType
858
+
859
+
860
+ @dataclass
861
+ class LazyStorage:
862
+ load: Callable[[int, int], NDArray]
863
+ kind: LazyStorageKind
864
+ description: str
865
+
866
+
867
+ class LazyUnpickler(pickle.Unpickler):
868
+ def __init__(self, fp: IO[bytes], data_base_path: str, zip_file: zipfile.ZipFile):
869
+ super().__init__(fp)
870
+ self.data_base_path = data_base_path
871
+ self.zip_file = zip_file
872
+
873
+ def persistent_load(self, pid: Any) -> Any:
874
+ assert pid[0] == 'storage'
875
+ assert isinstance(pid[1], LazyStorageKind)
876
+ data_type = pid[1].data_type
877
+ filename_stem = pid[2]
878
+ filename = f'{self.data_base_path}/{filename_stem}'
879
+ info = self.zip_file.getinfo(filename)
880
+
881
+ def load(offset: int, elm_count: int) -> NDArray:
882
+ dtype = data_type.dtype
883
+ with self.zip_file.open(info) as fp:
884
+ fp.seek(offset * dtype.itemsize)
885
+ size = elm_count * dtype.itemsize
886
+ data = fp.read(size)
887
+ assert len(data) == size
888
+ return np.frombuffer(data, dtype)
889
+ description = f'storage data_type={data_type} path-in-zip={filename} path={self.zip_file.filename}'
890
+ return LazyStorage(load=load, kind=pid[1], description=description)
891
+
892
+ @staticmethod
893
+ def lazy_rebuild_tensor_v2(storage: Any, storage_offset: Any, size: Any, stride: Any,
894
+ requires_grad: Any, backward_hooks: Any, metadata: Any = None) -> LazyTensor:
895
+ assert isinstance(storage, LazyStorage)
896
+
897
+ def load() -> UnquantizedTensor:
898
+ elm_count = stride[0] * size[0]
899
+ return UnquantizedTensor(storage.load(storage_offset, elm_count).reshape(size))
900
+ description = f'pickled storage_offset={storage_offset} in {storage.description}'
901
+ return LazyTensor(load, list(size), storage.kind.data_type, description)
902
+
903
+ @staticmethod
904
+ def rebuild_from_type_v2(func, new_type, args, state):
905
+ return func(*args)
906
+
907
+ CLASSES = {
908
+ # getattr used here as a workaround for mypy not being smart enough to determine
909
+ # the staticmethods have a __func__ attribute.
910
+ ('torch._tensor', '_rebuild_from_type_v2'): getattr(rebuild_from_type_v2, '__func__'),
911
+ ('torch._utils', '_rebuild_tensor_v2'): getattr(lazy_rebuild_tensor_v2, '__func__'),
912
+ ('torch', 'BFloat16Storage'): LazyStorageKind(DT_BF16),
913
+ ('torch', 'HalfStorage'): LazyStorageKind(DT_F16),
914
+ ('torch', 'FloatStorage'): LazyStorageKind(DT_F32),
915
+ ('torch', 'IntStorage'): LazyStorageKind(DT_I32),
916
+ ('torch', 'Tensor'): LazyTensor,
917
+ }
918
+
919
+ def find_class(self, module: str, name: str) -> Any:
920
+ if not module.startswith('torch'):
921
+ return super().find_class(module, name)
922
+ return self.CLASSES[(module, name)]
923
+
924
+
925
+ def lazy_load_torch_file(outer_fp: IO[bytes], path: Path) -> ModelPlus:
926
+ zf = zipfile.ZipFile(outer_fp)
927
+ pickle_paths = [name for name in zf.namelist() if name.endswith('.pkl')]
928
+ assert len(pickle_paths) == 1, pickle_paths
929
+ pickle_fp = zf.open(pickle_paths[0], 'r')
930
+ unpickler = LazyUnpickler(pickle_fp,
931
+ data_base_path=pickle_paths[0][:-4],
932
+ zip_file=zf)
933
+ model = unpickler.load()
934
+ if 'model' in model: model = model['model']
935
+ as_dict = dict(model.items())
936
+ return ModelPlus(model=as_dict, paths=[path], format='torch', vocab=None)
937
+
938
+
939
+ def lazy_load_safetensors_file(fp: IO[bytes], path: Path) -> ModelPlus:
940
+ header_size, = struct.unpack('<Q', fp.read(8))
941
+ header: dict[str, dict[str, Any]] = json.loads(fp.read(header_size))
942
+ # Use mmap for the actual data to avoid race conditions with the file offset.
943
+ mapped = memoryview(mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ))
944
+ byte_buf = mapped[8 + header_size:]
945
+
946
+ def convert(info: dict[str, Any]) -> LazyTensor:
947
+ data_type = SAFETENSORS_DATA_TYPES[info['dtype']]
948
+ numpy_dtype = data_type.dtype
949
+ shape: list[int] = info['shape']
950
+ begin, end = info['data_offsets']
951
+ assert 0 <= begin <= end <= len(byte_buf)
952
+ assert end - begin == math.prod(shape) * numpy_dtype.itemsize
953
+ buf = byte_buf[begin:end]
954
+
955
+ def load() -> UnquantizedTensor:
956
+ return UnquantizedTensor(np.frombuffer(buf, dtype=numpy_dtype).reshape(shape))
957
+ description = f'safetensors begin={begin} end={end} type={data_type} path={path}'
958
+ return LazyTensor(load, shape, data_type, description)
959
+ model = {name: convert(info) for (name, info) in header.items() if name != '__metadata__'}
960
+ return ModelPlus(model=model, paths=[path], format='safetensors', vocab=None)
961
+
962
+
963
+ def must_read(fp: IO[bytes], length: int) -> bytes:
964
+ ret = fp.read(length)
965
+ if len(ret) < length:
966
+ raise EOFError("unexpectedly reached end of file")
967
+ return ret
968
+
969
+
970
+ @functools.lru_cache(maxsize=None)
971
+ def lazy_load_file(path: Path) -> ModelPlus:
972
+ fp = open(path, 'rb')
973
+ first8 = fp.read(8)
974
+ fp.seek(0)
975
+ if first8[:2] == b'PK':
976
+ # A zip file, i.e. PyTorch format
977
+ return lazy_load_torch_file(fp, path)
978
+ elif struct.unpack('<Q', first8)[0] < 16 * 1024 * 1024:
979
+ # Probably safetensors
980
+ return lazy_load_safetensors_file(fp, path)
981
+ else:
982
+ raise ValueError(f"unknown format: {path}")
983
+
984
+
985
+ In = TypeVar('In')
986
+ Out = TypeVar('Out')
987
+
988
+
989
+ def bounded_parallel_map(func: Callable[[In], Out], iterable: Iterable[In], concurrency: int, max_workers: int | None = None, use_processpool_executor: bool = False) -> Iterable[Out]:
990
+ '''Parallel map, but with backpressure. If the caller doesn't call `next`
991
+ fast enough, this will stop calling `func` at some point rather than
992
+ letting results pile up in memory. Specifically, there is a max of one
993
+ output value buffered per thread.'''
994
+ if concurrency < 2:
995
+ yield from map(func, iterable)
996
+ # Not reached.
997
+ iterable = iter(iterable)
998
+ executor_class: type[ThreadPoolExecutor] | type[ProcessPoolExecutor]
999
+ if use_processpool_executor:
1000
+ executor_class = ProcessPoolExecutor
1001
+ else:
1002
+ executor_class = ThreadPoolExecutor
1003
+ with executor_class(max_workers=max_workers) as executor:
1004
+ futures: list[concurrent.futures.Future[Out]] = []
1005
+ done = False
1006
+ for _ in range(concurrency):
1007
+ try:
1008
+ futures.append(executor.submit(func, next(iterable)))
1009
+ except StopIteration:
1010
+ done = True
1011
+ break
1012
+
1013
+ while futures:
1014
+ result = futures.pop(0).result()
1015
+ while not done and len(futures) < concurrency:
1016
+ try:
1017
+ futures.append(executor.submit(func, next(iterable)))
1018
+ except StopIteration:
1019
+ done = True
1020
+ break
1021
+ yield result
1022
+
1023
+
1024
+ def check_vocab_size(params: Params, vocab: BaseVocab, pad_vocab: bool = False) -> None:
1025
+ # Handle special case where the model's vocab size is not set
1026
+ if params.n_vocab == -1:
1027
+ raise ValueError(
1028
+ "The model's vocab size is set to -1 in params.json. Please update it manually."
1029
+ + (f" Maybe {vocab.vocab_size}?" if isinstance(vocab, Vocab) else ""),
1030
+ )
1031
+ if not isinstance(vocab, Vocab):
1032
+ return # model has no vocab
1033
+
1034
+ # Check for a vocab size mismatch
1035
+ if params.n_vocab == vocab.vocab_size:
1036
+ print("Ignoring added_tokens.json since model matches vocab size without it.")
1037
+ return
1038
+
1039
+ if pad_vocab and params.n_vocab > vocab.vocab_size:
1040
+ pad_count = params.n_vocab - vocab.vocab_size
1041
+ print(
1042
+ f"Padding vocab with {pad_count} token(s) - <dummy00001> through <dummy{pad_count:05}>"
1043
+ )
1044
+ for i in range(1, pad_count + 1):
1045
+ vocab.added_tokens_dict[f"<dummy{i:05}>"] = -1
1046
+ vocab.added_tokens_list.append(f"<dummy{i:05}>")
1047
+ vocab.vocab_size = params.n_vocab
1048
+ return
1049
+
1050
+ msg = f"Vocab size mismatch (model has {params.n_vocab}, but {vocab.fname_tokenizer} has {vocab.vocab_size})."
1051
+ if vocab.vocab_size < params.n_vocab < vocab.vocab_size + 20:
1052
+ msg += f" Most likely you are missing added_tokens.json (should be in {vocab.fname_tokenizer.parent})."
1053
+ if vocab.vocab_size < params.n_vocab:
1054
+ msg += " Add the --pad-vocab option and try again."
1055
+
1056
+ raise ValueError(msg)
1057
+
1058
+
1059
+ class OutputFile:
1060
+ def __init__(self, fname_out: Path, endianess:gguf.GGUFEndian = gguf.GGUFEndian.LITTLE):
1061
+ self.gguf = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH], endianess=endianess)
1062
+
1063
+ def add_meta_arch(self, params: Params) -> None:
1064
+ name = "LLaMA"
1065
+
1066
+ # TODO: better logic to determine model name
1067
+ if params.n_ctx == 4096:
1068
+ name = "LLaMA v2"
1069
+ elif params.path_model is not None:
1070
+ name = str(params.path_model.parent).split('/')[-1]
1071
+
1072
+ self.gguf.add_name (name)
1073
+ self.gguf.add_vocab_size (params.n_vocab)
1074
+ self.gguf.add_context_length (params.n_ctx)
1075
+ self.gguf.add_embedding_length (params.n_embd)
1076
+ self.gguf.add_block_count (params.n_layer)
1077
+ self.gguf.add_feed_forward_length (params.n_ff)
1078
+ self.gguf.add_rope_dimension_count(params.n_embd // params.n_head)
1079
+ self.gguf.add_head_count (params.n_head)
1080
+ self.gguf.add_head_count_kv (params.n_head_kv)
1081
+
1082
+ if params.n_experts:
1083
+ self.gguf.add_expert_count(params.n_experts)
1084
+
1085
+ if params.n_experts_used:
1086
+ self.gguf.add_expert_used_count(params.n_experts_used)
1087
+
1088
+ if params.f_norm_eps:
1089
+ self.gguf.add_layer_norm_rms_eps(params.f_norm_eps)
1090
+ else:
1091
+ raise ValueError('f_norm_eps is None')
1092
+
1093
+ if params.f_rope_freq_base is not None:
1094
+ self.gguf.add_rope_freq_base(params.f_rope_freq_base)
1095
+
1096
+ if params.rope_scaling_type:
1097
+ assert params.f_rope_scale is not None
1098
+ self.gguf.add_rope_scaling_type(params.rope_scaling_type)
1099
+ self.gguf.add_rope_scaling_factor(params.f_rope_scale)
1100
+
1101
+ if params.n_orig_ctx is not None:
1102
+ self.gguf.add_rope_scaling_orig_ctx_len(params.n_orig_ctx)
1103
+
1104
+ if params.rope_finetuned is not None:
1105
+ self.gguf.add_rope_scaling_finetuned(params.rope_finetuned)
1106
+
1107
+ if params.ftype is not None:
1108
+ self.gguf.add_file_type(params.ftype)
1109
+
1110
+ def extract_vocabulary_from_model(self, vocab: Vocab) -> tuple[list[bytes], list[float], list[gguf.TokenType]]:
1111
+ tokens = []
1112
+ scores = []
1113
+ toktypes = []
1114
+
1115
+ # NOTE: `all_tokens` returns the base vocabulary and added tokens
1116
+ for text, score, toktype in vocab.all_tokens():
1117
+ tokens.append(text)
1118
+ scores.append(score)
1119
+ toktypes.append(toktype)
1120
+
1121
+ assert len(tokens) == vocab.vocab_size
1122
+
1123
+ return tokens, scores, toktypes
1124
+
1125
+ def add_meta_vocab(self, vocab: Vocab) -> None:
1126
+ # Ensure that tokenizer_model is added to the GGUF model
1127
+ self.gguf.add_tokenizer_model(vocab.tokenizer_model)
1128
+
1129
+ # Extract model vocabulary for model conversion
1130
+ tokens, scores, toktypes = self.extract_vocabulary_from_model(vocab)
1131
+
1132
+ # Add extracted token information for model conversion
1133
+ self.gguf.add_token_list(tokens)
1134
+ self.gguf.add_token_scores(scores)
1135
+ self.gguf.add_token_types(toktypes)
1136
+
1137
+ def add_meta_special_vocab(self, svocab: gguf.SpecialVocab) -> None:
1138
+ svocab.add_to_gguf(self.gguf)
1139
+
1140
+ def add_tensor_info(self, name: str, tensor: LazyTensor) -> None:
1141
+ n_elements = int(np.prod(tensor.shape))
1142
+ raw_dtype = getattr(tensor.data_type, 'ggml_type', None)
1143
+ data_type = getattr(tensor.data_type, 'quantized_type', None) or tensor.data_type.dtype
1144
+ data_nbytes = tensor.data_type.elements_to_bytes(n_elements)
1145
+ self.gguf.add_tensor_info(name, tensor.shape, data_type, data_nbytes, raw_dtype=raw_dtype)
1146
+
1147
+ def write_meta(self) -> None:
1148
+ self.gguf.write_header_to_file()
1149
+ self.gguf.write_kv_data_to_file()
1150
+
1151
+ def write_tensor_info(self) -> None:
1152
+ self.gguf.write_ti_data_to_file()
1153
+
1154
+ def write_tensor_data(self, ftype: GGMLFileType, model: LazyModel, concurrency: int) -> None:
1155
+ ndarrays_inner = bounded_parallel_map(OutputFile.do_item, model.items(), concurrency=concurrency)
1156
+ if ftype == GGMLFileType.MostlyQ8_0:
1157
+ ndarrays = bounded_parallel_map(
1158
+ OutputFile.maybe_do_quantize, ndarrays_inner, concurrency=concurrency, max_workers=concurrency,
1159
+ use_processpool_executor=True,
1160
+ )
1161
+ else:
1162
+ ndarrays = map(OutputFile.maybe_do_quantize, ndarrays_inner)
1163
+
1164
+ start = time.time()
1165
+ for i, ((name, lazy_tensor), ndarray) in enumerate(zip(model.items(), ndarrays)):
1166
+ elapsed = time.time() - start
1167
+ size = ' x '.join(f"{dim:6d}" for dim in lazy_tensor.shape)
1168
+ padi = len(str(len(model)))
1169
+ print(
1170
+ f"[{i + 1:{padi}d}/{len(model)}] Writing tensor {name:38s} | size {size:16} | type {lazy_tensor.data_type.name:4} | T+{int(elapsed):4}"
1171
+ )
1172
+ self.gguf.write_tensor_data(ndarray)
1173
+
1174
+ def close(self) -> None:
1175
+ self.gguf.close()
1176
+
1177
+ @staticmethod
1178
+ def write_vocab_only(
1179
+ fname_out: Path, params: Params, vocab: Vocab, svocab: gguf.SpecialVocab,
1180
+ endianess: gguf.GGUFEndian = gguf.GGUFEndian.LITTLE, pad_vocab: bool = False,
1181
+ ) -> None:
1182
+ check_vocab_size(params, vocab, pad_vocab=pad_vocab)
1183
+
1184
+ of = OutputFile(fname_out, endianess=endianess)
1185
+
1186
+ # meta data
1187
+ of.add_meta_arch(params)
1188
+ of.add_meta_vocab(vocab)
1189
+ of.add_meta_special_vocab(svocab)
1190
+
1191
+ of.write_meta()
1192
+
1193
+ of.close()
1194
+
1195
+ @staticmethod
1196
+ def do_item(item: tuple[str, LazyTensor]) -> tuple[DataType, NDArray]:
1197
+ name, lazy_tensor = item
1198
+ tensor = lazy_tensor.load().to_ggml()
1199
+ return (lazy_tensor.data_type, tensor.ndarray)
1200
+
1201
+ @staticmethod
1202
+ def maybe_do_quantize(item: tuple[DataType, NDArray]) -> NDArray:
1203
+ dt, arr = item
1204
+ if not isinstance(dt, QuantizedDataType):
1205
+ return arr
1206
+ return dt.quantize(arr)
1207
+
1208
+ @staticmethod
1209
+ def write_all(
1210
+ fname_out: Path, ftype: GGMLFileType, params: Params, model: LazyModel, vocab: BaseVocab, svocab: gguf.SpecialVocab,
1211
+ concurrency: int = DEFAULT_CONCURRENCY, endianess: gguf.GGUFEndian = gguf.GGUFEndian.LITTLE,
1212
+ pad_vocab: bool = False,
1213
+ ) -> None:
1214
+ check_vocab_size(params, vocab, pad_vocab=pad_vocab)
1215
+
1216
+ of = OutputFile(fname_out, endianess=endianess)
1217
+
1218
+ # meta data
1219
+ of.add_meta_arch(params)
1220
+ if isinstance(vocab, Vocab):
1221
+ of.add_meta_vocab(vocab)
1222
+ of.add_meta_special_vocab(svocab)
1223
+ else: # NoVocab
1224
+ of.gguf.add_tokenizer_model(vocab.tokenizer_model)
1225
+
1226
+ # tensor info
1227
+ for name, lazy_tensor in model.items():
1228
+ of.add_tensor_info(name, lazy_tensor)
1229
+
1230
+ of.write_meta()
1231
+ of.write_tensor_info()
1232
+
1233
+ # tensor data
1234
+ of.write_tensor_data(ftype, model, concurrency)
1235
+
1236
+ of.close()
1237
+
1238
+
1239
+ def pick_output_type(model: LazyModel, output_type_str: str | None) -> GGMLFileType:
1240
+ wq_type = model[gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.ATTN_Q].format(bid=0) + ".weight"].data_type
1241
+
1242
+ if output_type_str == "f32" or (output_type_str is None and wq_type in (DT_F32, DT_BF16)):
1243
+ return GGMLFileType.AllF32
1244
+ if output_type_str == "f16" or (output_type_str is None and wq_type == DT_F16):
1245
+ return GGMLFileType.MostlyF16
1246
+ if output_type_str == "q8_0":
1247
+ return GGMLFileType.MostlyQ8_0
1248
+
1249
+ name_to_type = {name: lazy_tensor.data_type for (name, lazy_tensor) in model.items()}
1250
+
1251
+ raise ValueError(f"Unexpected combination of types: {name_to_type}")
1252
+
1253
+
1254
+ def convert_to_output_type(model: LazyModel, output_type: GGMLFileType) -> LazyModel:
1255
+ return {name: tensor.astype(output_type.type_for_tensor(name, tensor))
1256
+ for (name, tensor) in model.items()}
1257
+
1258
+
1259
+ def convert_model_names(model: LazyModel, params: Params, skip_unknown: bool) -> LazyModel:
1260
+ tmap = gguf.TensorNameMap(ARCH, params.n_layer)
1261
+ should_skip = set(gguf.MODEL_TENSOR_SKIP.get(ARCH, []))
1262
+
1263
+ tmp = model
1264
+
1265
+ # merge experts into one tensor
1266
+ if params.n_experts and params.n_experts > 0:
1267
+ for i_l in range(params.n_layer):
1268
+ for w in range(1, 4):
1269
+ experts = []
1270
+ for e in range(params.n_experts):
1271
+ if f"layers.{i_l}.feed_forward.experts.{e}.w{w}.weight" in model:
1272
+ experts.append(model[f"layers.{i_l}.feed_forward.experts.{e}.w{w}.weight"])
1273
+ del tmp[f"layers.{i_l}.feed_forward.experts.{e}.w{w}.weight"]
1274
+ elif f"model.layers.{i_l}.block_sparse_moe.experts.{e}.w{w}.weight" in model:
1275
+ experts.append(model[f"model.layers.{i_l}.block_sparse_moe.experts.{e}.w{w}.weight"])
1276
+ del tmp[f"model.layers.{i_l}.block_sparse_moe.experts.{e}.w{w}.weight"]
1277
+ else:
1278
+ raise ValueError(f"Expert tensor not found: layers.{i_l}.feed_forward.experts.{e}.w{w}.weight")
1279
+ tmp[f"layers.{i_l}.feed_forward.experts.w{w}.weight"] = pack_experts_lazy(experts)
1280
+
1281
+ # HF models permut or pack some of the tensors, so we need to undo that
1282
+ for i in itertools.count():
1283
+ if f"model.layers.{i}.self_attn.q_proj.weight" in model:
1284
+ print(f"Permuting layer {i}")
1285
+ tmp[f"model.layers.{i}.self_attn.q_proj.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.q_proj.weight"], params.n_head, params.n_head)
1286
+ tmp[f"model.layers.{i}.self_attn.k_proj.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.k_proj.weight"], params.n_head, params.n_head_kv)
1287
+ # tmp[f"model.layers.{i}.self_attn.v_proj.weight"] = model[f"model.layers.{i}.self_attn.v_proj.weight"]
1288
+ elif f"model.layers.{i}.self_attn.W_pack.weight" in model:
1289
+ print(f"Unpacking and permuting layer {i}")
1290
+ tmp[f"model.layers.{i}.self_attn.q_proj.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 0, params.n_head, params.n_head)
1291
+ tmp[f"model.layers.{i}.self_attn.k_proj.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 1, params.n_head, params.n_head_kv)
1292
+ tmp[f"model.layers.{i}.self_attn.v_proj.weight"] = part_lazy (model[f"model.layers.{i}.self_attn.W_pack.weight"], 2)
1293
+ del tmp[f"model.layers.{i}.self_attn.W_pack.weight"]
1294
+ else:
1295
+ break
1296
+
1297
+ out: LazyModel = {}
1298
+ for name, lazy_tensor in model.items():
1299
+ tensor_type, name_new = tmap.get_type_and_name(name, try_suffixes = (".weight", ".bias")) or (None, None)
1300
+ if name_new is None:
1301
+ if skip_unknown:
1302
+ print(f"Unexpected tensor name: {name} - skipping")
1303
+ continue
1304
+ raise ValueError(f"Unexpected tensor name: {name}. Use --skip-unknown to ignore it (e.g. LLaVA)")
1305
+
1306
+ if tensor_type in should_skip:
1307
+ print(f"skipping tensor {name_new}")
1308
+ continue
1309
+
1310
+ print(f"{name:48s} -> {name_new:40s} | {lazy_tensor.data_type.name:6s} | {lazy_tensor.shape}")
1311
+ out[name_new] = lazy_tensor
1312
+
1313
+ return out
1314
+
1315
+
1316
+ def nth_multifile_path(path: Path, n: int) -> Path | None:
1317
+ '''Given any path belonging to a multi-file model (e.g. foo.bin.1), return
1318
+ the nth path in the model.
1319
+ '''
1320
+ # Support the following patterns:
1321
+ patterns = [
1322
+ # - x.00.pth, x.01.pth, etc.
1323
+ (r'\.[0-9]{2}\.pth$', f'.{n:02}.pth'),
1324
+ # - x-00001-of-00002.bin, x-00002-of-00002.bin, etc.
1325
+ (r'-[0-9]{5}-of-(.*)$', fr'-{n:05}-of-\1'),
1326
+ # x.bin, x.bin.1, etc.
1327
+ (r'(\.[0-9]+)?$', r'\1' if n == 0 else fr'\1.{n}')
1328
+ ]
1329
+ for regex, replacement in patterns:
1330
+ if re.search(regex, path.name):
1331
+ new_path = path.with_name(re.sub(regex, replacement, path.name))
1332
+ if new_path.exists():
1333
+ return new_path
1334
+ return None
1335
+
1336
+
1337
+ def find_multifile_paths(path: Path) -> list[Path]:
1338
+ '''Given any path belonging to a multi-file model (e.g. foo.bin.1), return
1339
+ the whole list of paths in the model.
1340
+ '''
1341
+ ret: list[Path] = []
1342
+ for i in itertools.count():
1343
+ nth_path = nth_multifile_path(path, i)
1344
+ if nth_path is None:
1345
+ break
1346
+ ret.append(nth_path)
1347
+ if not ret:
1348
+ # No matches. This should only happen if the file was named, e.g.,
1349
+ # foo.0, and there was no file named foo. Oh well, try to process it
1350
+ # as a single file.
1351
+ return [path]
1352
+ return ret
1353
+
1354
+
1355
+ def load_some_model(path: Path) -> ModelPlus:
1356
+ '''Load a model of any supported format.'''
1357
+ # Be extra-friendly and accept either a file or a directory:
1358
+ if path.is_dir():
1359
+ # Check if it's a set of safetensors files first
1360
+ globs = ["model-00001-of-*.safetensors", "model.safetensors", "consolidated.safetensors"]
1361
+ files = [file for glob in globs for file in path.glob(glob)]
1362
+ if not files:
1363
+ # Try the PyTorch patterns too, with lower priority
1364
+ globs = ["consolidated.00.pth", "pytorch_model-00001-of-*.bin", "*.pt", "pytorch_model.bin"]
1365
+ files = [file for glob in globs for file in path.glob(glob)]
1366
+ if not files:
1367
+ raise FileNotFoundError(f"Can't find model in directory {path}")
1368
+ if len(files) > 1:
1369
+ raise ValueError(f"Found multiple models in {path}, not sure which to pick: {files}")
1370
+ path = files[0]
1371
+
1372
+ paths = find_multifile_paths(path)
1373
+ models_plus: list[ModelPlus] = []
1374
+ for path in paths:
1375
+ print(f"Loading model file {path}")
1376
+ models_plus.append(lazy_load_file(path))
1377
+
1378
+ model_plus = merge_multifile_models(models_plus)
1379
+ return model_plus
1380
+
1381
+
1382
+ class VocabFactory:
1383
+ _VOCAB_CLASSES: list[type[Vocab]] = [SentencePieceVocab, BpeVocab, LlamaHfVocab]
1384
+
1385
+ def __init__(self, path: Path):
1386
+ self.path = path
1387
+
1388
+ def _create_special_vocab(self, vocab: BaseVocab, model_parent_path: Path) -> gguf.SpecialVocab:
1389
+ load_merges = vocab.name == "bpe"
1390
+ n_vocab = vocab.vocab_size if isinstance(vocab, Vocab) else None
1391
+ return gguf.SpecialVocab(
1392
+ model_parent_path,
1393
+ load_merges=load_merges,
1394
+ special_token_types=None, # Predetermined or passed as a parameter
1395
+ n_vocab=n_vocab,
1396
+ )
1397
+
1398
+ def _create_vocab_by_path(self, vocab_types: list[str]) -> Vocab:
1399
+ vocab_classes: dict[str, type[Vocab]] = {cls.name: cls for cls in self._VOCAB_CLASSES}
1400
+ selected_vocabs: dict[str, type[Vocab]] = {}
1401
+ for vtype in vocab_types:
1402
+ try:
1403
+ selected_vocabs[vtype] = vocab_classes[vtype]
1404
+ except KeyError:
1405
+ raise ValueError(f"Unsupported vocabulary type {vtype}") from None
1406
+
1407
+ for vtype, cls in selected_vocabs.items():
1408
+ try:
1409
+ vocab = cls(self.path)
1410
+ break
1411
+ except FileNotFoundError:
1412
+ pass # ignore unavailable tokenizers
1413
+ else:
1414
+ raise FileNotFoundError(f"Could not find a tokenizer matching any of {vocab_types}")
1415
+
1416
+ print(f"Loaded vocab file {vocab.fname_tokenizer!r}, type {vocab.name!r}")
1417
+ return vocab
1418
+
1419
+ def load_vocab(self, vocab_types: list[str] | None, model_parent_path: Path) -> tuple[BaseVocab, gguf.SpecialVocab]:
1420
+ vocab: BaseVocab
1421
+ if vocab_types is None:
1422
+ vocab = NoVocab()
1423
+ else:
1424
+ vocab = self._create_vocab_by_path(vocab_types)
1425
+ # FIXME: Respect --vocab-dir?
1426
+ special_vocab = self._create_special_vocab(
1427
+ vocab,
1428
+ model_parent_path,
1429
+ )
1430
+ return vocab, special_vocab
1431
+
1432
+
1433
+ def default_outfile(model_paths: list[Path], file_type: GGMLFileType) -> Path:
1434
+ namestr = {
1435
+ GGMLFileType.AllF32: "f32",
1436
+ GGMLFileType.MostlyF16: "f16",
1437
+ GGMLFileType.MostlyQ8_0:"q8_0",
1438
+ }[file_type]
1439
+ ret = model_paths[0].parent / f"ggml-model-{namestr}.gguf"
1440
+ if ret in model_paths:
1441
+ sys.stderr.write(
1442
+ f"Error: Default output path ({ret}) would overwrite the input. "
1443
+ "Please explicitly specify a path using --outfile.\n")
1444
+ sys.exit(1)
1445
+ return ret
1446
+
1447
+
1448
+ def do_dump_model(model_plus: ModelPlus) -> None:
1449
+ print(f"model_plus.paths = {model_plus.paths!r}")
1450
+ print(f"model_plus.format = {model_plus.format!r}")
1451
+ print(f"model_plus.vocab = {model_plus.vocab!r}")
1452
+ for name, lazy_tensor in model_plus.model.items():
1453
+ print(f"{name}: shape={lazy_tensor.shape} type={lazy_tensor.data_type}; {lazy_tensor.description}")
1454
+
1455
+
1456
+ def main(args_in: list[str] | None = None) -> None:
1457
+ output_choices = ["f32", "f16"]
1458
+ if np.uint32(1) == np.uint32(1).newbyteorder("<"):
1459
+ # We currently only support Q8_0 output on little endian systems.
1460
+ output_choices.append("q8_0")
1461
+ parser = argparse.ArgumentParser(description="Convert a LLaMA model to a GGML compatible file")
1462
+ parser.add_argument("--dump", action="store_true", help="don't convert, just show what's in the model")
1463
+ parser.add_argument("--dump-single", action="store_true", help="don't convert, just show what's in a single model file")
1464
+ parser.add_argument("--vocab-only", action="store_true", help="extract only the vocab")
1465
+ parser.add_argument("--no-vocab", action="store_true", help="store model without the vocab")
1466
+ parser.add_argument("--outtype", choices=output_choices, help="output format - note: q8_0 may be very slow (default: f16 or f32 based on input)")
1467
+ parser.add_argument("--vocab-dir", type=Path, help="directory containing tokenizer.model, if separate from model file")
1468
+ parser.add_argument("--vocab-type", help="vocab types to try in order, choose from 'spm', 'bpe', 'hfft' (default: spm,hfft)", default="spm,hfft")
1469
+ parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input")
1470
+ parser.add_argument("model", type=Path, help="directory containing model file, or model file itself (*.pth, *.pt, *.bin)")
1471
+ parser.add_argument("--ctx", type=int, help="model training context (default: based on input)")
1472
+ parser.add_argument("--concurrency", type=int, help=f"concurrency used for conversion (default: {DEFAULT_CONCURRENCY})", default=DEFAULT_CONCURRENCY)
1473
+ parser.add_argument("--big-endian", action="store_true", help="model is executed on big endian machine")
1474
+ parser.add_argument("--pad-vocab", action="store_true", help="add pad tokens when model vocab expects more than tokenizer metadata provides")
1475
+ parser.add_argument("--skip-unknown", action="store_true", help="skip unknown tensor names instead of failing")
1476
+
1477
+ args = parser.parse_args(args_in)
1478
+ if args.no_vocab and args.vocab_only:
1479
+ raise ValueError("--vocab-only does not make sense with --no-vocab")
1480
+
1481
+ if args.dump_single:
1482
+ model_plus = lazy_load_file(args.model)
1483
+ do_dump_model(model_plus)
1484
+ return
1485
+
1486
+ if not args.vocab_only:
1487
+ model_plus = load_some_model(args.model)
1488
+ else:
1489
+ model_plus = ModelPlus(model = {}, paths = [args.model / 'dummy'], format = 'none', vocab = None)
1490
+
1491
+ if args.dump:
1492
+ do_dump_model(model_plus)
1493
+ return
1494
+ endianess = gguf.GGUFEndian.LITTLE
1495
+ if args.big_endian:
1496
+ endianess = gguf.GGUFEndian.BIG
1497
+
1498
+ params = Params.load(model_plus)
1499
+ if params.n_ctx == -1:
1500
+ if args.ctx is None:
1501
+ msg = """\
1502
+ The model doesn't have a context size, and you didn't specify one with --ctx
1503
+ Please specify one with --ctx:
1504
+ - LLaMA v1: --ctx 2048
1505
+ - LLaMA v2: --ctx 4096"""
1506
+ parser.error(textwrap.dedent(msg))
1507
+ params.n_ctx = args.ctx
1508
+
1509
+ if args.outtype:
1510
+ params.ftype = {
1511
+ "f32": GGMLFileType.AllF32,
1512
+ "f16": GGMLFileType.MostlyF16,
1513
+ "q8_0": GGMLFileType.MostlyQ8_0,
1514
+ }[args.outtype]
1515
+
1516
+ print(f"params = {params}")
1517
+
1518
+ model_parent_path = model_plus.paths[0].parent
1519
+ vocab_path = Path(args.vocab_dir or args.model or model_parent_path)
1520
+ vocab_factory = VocabFactory(vocab_path)
1521
+ vocab_types = None if args.no_vocab else args.vocab_type.split(",")
1522
+ vocab, special_vocab = vocab_factory.load_vocab(vocab_types, model_parent_path)
1523
+
1524
+ if args.vocab_only:
1525
+ assert isinstance(vocab, Vocab)
1526
+ if not args.outfile:
1527
+ raise ValueError("need --outfile if using --vocab-only")
1528
+ outfile = args.outfile
1529
+ OutputFile.write_vocab_only(outfile, params, vocab, special_vocab,
1530
+ endianess=endianess, pad_vocab=args.pad_vocab)
1531
+ print(f"Wrote {outfile}")
1532
+ return
1533
+
1534
+ if model_plus.vocab is not None and args.vocab_dir is None and not args.no_vocab:
1535
+ vocab = model_plus.vocab
1536
+
1537
+ print(f"Vocab info: {vocab}")
1538
+ print(f"Special vocab info: {special_vocab}")
1539
+
1540
+ model = model_plus.model
1541
+ model = convert_model_names(model, params, args.skip_unknown)
1542
+ ftype = pick_output_type(model, args.outtype)
1543
+ model = convert_to_output_type(model, ftype)
1544
+ outfile = args.outfile or default_outfile(model_plus.paths, ftype)
1545
+
1546
+ params.ftype = ftype
1547
+ print(f"Writing {outfile}, format {ftype}")
1548
+
1549
+ OutputFile.write_all(outfile, ftype, params, model, vocab, special_vocab,
1550
+ concurrency=args.concurrency, endianess=endianess, pad_vocab=args.pad_vocab)
1551
+ print(f"Wrote {outfile}")
1552
+
1553
+
1554
+ if __name__ == '__main__':
1555
+ main()