| |
| |
| |
| |
| |
| |
| |
| |
|
|
| import io |
| import os |
| import sys |
| import struct |
| import json |
| import code |
| import torch |
| import numpy as np |
|
|
| from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig |
|
|
|
|
| GGML_MEM_ALIGN = 32 |
|
|
|
|
| |
| def bytes_to_unicode(): |
| """ |
| Returns list of utf-8 byte and a corresponding list of unicode strings. |
| The reversible bpe codes work on unicode strings. |
| This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. |
| When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. |
| This is a significant percentage of your normal, say, 32K bpe vocab. |
| To avoid that, we want lookup tables between utf-8 bytes and unicode strings. |
| And avoids mapping to whitespace/control characters the bpe code barfs on. |
| """ |
| bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) |
| cs = bs[:] |
| n = 0 |
| for b in range(2**8): |
| if b not in bs: |
| bs.append(b) |
| cs.append(2**8+n) |
| n += 1 |
| cs = [chr(n) for n in cs] |
| return dict(zip(bs, cs)) |
|
|
| if len(sys.argv) < 4: |
| print("Usage: python3 falcon.py num_parts model_name output [use-f32]") |
| print(" num_parts: number of pytorch parts, use 0 if not a multipart model. example: 2") |
| print(" model_name: name of the model to convert.") |
| print(" output: the output file path will be written") |
| print(" use-f32: if present, use float32 instead of float16") |
| sys.exit(1) |
| num_parts = int(sys.argv[1]) |
| model_name = sys.argv[2] |
| output = sys.argv[3] |
|
|
| |
| |
| |
| |
| |
| ftype_str = ["f32", "f16"] |
| ftype = 1 |
| if len(sys.argv) > 4: |
| ftype = 0 |
|
|
| tokenizer = AutoTokenizer.from_pretrained(model_name) |
| config = AutoConfig.from_pretrained(model_name, trust_remote_code=True) |
| hparams = config.to_dict() |
|
|
|
|
|
|
| print("* Loading model from: ", model_name) |
|
|
| fout = open(output, "wb") |
|
|
| |
| fout.write(b"ggjt"[::-1]) |
|
|
|
|
| |
| n_vocab = hparams["vocab_size"] |
| n_embd = hparams["hidden_size"] |
| n_head = hparams["n_head"] |
| n_head_kv = hparams["n_head_kv"] if "n_head_kv" in hparams else 1 |
| n_layer = hparams["n_layer"] |
| head_dim = n_embd // n_head |
| config_values = [ |
| 3, |
| n_vocab, |
| n_embd, |
| n_head, |
| n_head_kv, |
| n_layer, |
| ftype |
| ] |
| fout.write(struct.pack("i" * len(config_values), *config_values)) |
|
|
| |
| reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.vocab.items()} |
| byte_encoder = bytes_to_unicode() |
| byte_decoder = {v:k for k, v in byte_encoder.items()} |
|
|
| for i in range(hparams["vocab_size"]): |
| text = bytearray([byte_decoder[c] for c in reverse_vocab[i]]) |
| fout.write(struct.pack("i", len(text))) |
| fout.write(text) |
| |
| fout.write(struct.pack('f', 0.0)) |
|
|
| |
| if num_parts == 0: |
| partnames= ('pytorch_model.bin',) |
| else: |
| partnames = (f'pytorch_model-{n:05}-of-{num_parts:05}.bin' for n in range(1, num_parts + 1)) |
| for partname in partnames: |
| filename = f'{model_name}/{partname}' |
| print(f'\n* Loading part: {partname}') |
| model = torch.load(filename, map_location = 'cpu') |
| for name in model.keys(): |
| |
| |
| |
| |
| |
| |
| |
| if "query_key_value" in name: |
| qkv = model[name].view( |
| n_head_kv, n_head // n_head_kv + 2, head_dim, head_dim * n_head) |
|
|
| q = qkv[:, :-2 ].reshape(n_head * head_dim, head_dim * n_head) |
| k = qkv[:, [-2]].reshape(n_head_kv * head_dim, head_dim * n_head) |
| v = qkv[:, [-1]].reshape(n_head_kv * head_dim, head_dim * n_head) |
|
|
| model[name] = torch.cat((q,k,v)).reshape_as(model[name]) |
| tensor = model[name] |
| |
| ftype_cur = 1 if ftype == 1 and tensor.ndim > 1 else 0 |
| print(f' |', name, tensor.shape, '->', tensor.dtype) |
| |
| sname = name.encode('utf-8') |
| fout.write(struct.pack("i" * 3, tensor.ndim, len(sname), ftype_cur)) |
| fout.write(struct.pack("i" * tensor.ndim, *tensor.shape[::-1])) |
| fout.write(sname) |
|
|
| |
| aligned_pos = (fout.tell() + (GGML_MEM_ALIGN - 1)) & -GGML_MEM_ALIGN |
| fout.seek(aligned_pos) |
| tensor.to(dtype = torch.float16 if ftype_cur == 1 else torch.float32).numpy().tofile(fout) |
|
|
| fout.close() |
|
|
| print("GGML model file saved to " + output) |
| print("") |