File size: 5,497 Bytes
097a4d2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 | """
Export the trained model to HuggingFace-compatible format.
Creates:
- model.safetensors (weights)
- config.json (architecture config)
- generation_config.json
- tokenizer.json, tokenizer_config.json, special_tokens_map.json
"""
import os
import sys
import json
import torch
from collections import OrderedDict
from safetensors.torch import save_file
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from model.config import ModelConfig
from model.transformer import Transformer
from model.data import get_tokenizer
CHECKPOINT = "/jfs/deepak-kumar/checkpoints_dpo/dpo_final.pt"
OUTPUT_DIR = "/home/jovyan/training/hf_model"
os.makedirs(OUTPUT_DIR, exist_ok=True)
print("=" * 60)
print(" EXPORTING MODEL TO HUGGING FACE FORMAT")
print("=" * 60)
# --- 1. Load model ---
print("\n[1/4] Loading checkpoint...")
tokenizer = get_tokenizer()
special_tokens = ["<|user|>", "<|assistant|>", "<|end|>"]
vocab = tokenizer.get_vocab()
new_tokens = [t for t in special_tokens if t not in vocab]
if new_tokens:
tokenizer.add_tokens(new_tokens, special_tokens=True)
model_config = ModelConfig()
model_config.vocab_size = len(tokenizer)
model = Transformer(model_config)
ckpt = torch.load(CHECKPOINT, map_location="cpu", weights_only=False)
model.load_state_dict(ckpt["model"])
step = ckpt.get("step", 0)
del ckpt
print(f" Loaded DPO model (step {step}, vocab {model_config.vocab_size})")
# --- 2. Convert state dict keys to HF-style naming ---
print("\n[2/4] Converting weights to safetensors...")
state_dict = model.state_dict()
hf_state = OrderedDict()
KEY_MAP = {
"tok_embeddings.weight": "model.embed_tokens.weight",
"norm.weight": "model.norm.weight",
"output.weight": "lm_head.weight",
}
for key, tensor in state_dict.items():
if key in KEY_MAP:
hf_state[KEY_MAP[key]] = tensor
continue
if key.startswith("layers."):
parts = key.split(".")
layer_idx = parts[1]
rest = ".".join(parts[2:])
layer_map = {
"attention_norm.weight": f"model.layers.{layer_idx}.input_layernorm.weight",
"ffn_norm.weight": f"model.layers.{layer_idx}.post_attention_layernorm.weight",
"attention.wq.weight": f"model.layers.{layer_idx}.self_attn.q_proj.weight",
"attention.wk.weight": f"model.layers.{layer_idx}.self_attn.k_proj.weight",
"attention.wv.weight": f"model.layers.{layer_idx}.self_attn.v_proj.weight",
"attention.wo.weight": f"model.layers.{layer_idx}.self_attn.o_proj.weight",
"ffn.w_gate.weight": f"model.layers.{layer_idx}.mlp.gate_proj.weight",
"ffn.w_up.weight": f"model.layers.{layer_idx}.mlp.up_proj.weight",
"ffn.w_down.weight": f"model.layers.{layer_idx}.mlp.down_proj.weight",
}
if rest in layer_map:
hf_state[layer_map[rest]] = tensor
else:
print(f" WARNING: unmapped key {key}")
hf_state[key] = tensor
elif key == "freqs_cis":
continue
else:
print(f" WARNING: unmapped key {key}")
hf_state[key] = tensor
# Convert all to bfloat16 for storage
for k in hf_state:
if hf_state[k].dtype == torch.float32:
hf_state[k] = hf_state[k].to(torch.bfloat16)
safetensors_path = os.path.join(OUTPUT_DIR, "model.safetensors")
save_file(hf_state, safetensors_path)
size_gb = os.path.getsize(safetensors_path) / 1e9
print(f" Saved {len(hf_state)} tensors -> {safetensors_path} ({size_gb:.2f} GB)")
# --- 3. Write config files ---
print("\n[3/4] Writing config files...")
config_json = {
"architectures": ["LlamaForCausalLM"],
"model_type": "llama",
"vocab_size": model_config.vocab_size,
"hidden_size": model_config.hidden_dim,
"intermediate_size": model_config.intermediate_dim,
"num_hidden_layers": model_config.num_layers,
"num_attention_heads": model_config.num_attention_heads,
"num_key_value_heads": model_config.num_kv_heads,
"max_position_embeddings": model_config.max_seq_len,
"rope_theta": model_config.rope_theta,
"rms_norm_eps": model_config.rms_norm_eps,
"hidden_act": "silu",
"initializer_range": 0.02,
"tie_word_embeddings": False,
"torch_dtype": "bfloat16",
"transformers_version": "4.40.0",
"use_cache": True,
"bos_token_id": tokenizer.bos_token_id,
"eos_token_id": tokenizer.eos_token_id,
"pad_token_id": tokenizer.pad_token_id,
}
with open(os.path.join(OUTPUT_DIR, "config.json"), "w") as f:
json.dump(config_json, f, indent=2)
print(" config.json")
gen_config = {
"bos_token_id": tokenizer.bos_token_id,
"eos_token_id": tokenizer.eos_token_id,
"pad_token_id": tokenizer.pad_token_id,
"do_sample": True,
"temperature": 0.7,
"top_k": 50,
"top_p": 0.9,
"repetition_penalty": 1.15,
"max_new_tokens": 512,
"transformers_version": "4.40.0",
}
with open(os.path.join(OUTPUT_DIR, "generation_config.json"), "w") as f:
json.dump(gen_config, f, indent=2)
print(" generation_config.json")
# --- 4. Export tokenizer ---
print("\n[4/4] Exporting tokenizer...")
tokenizer.save_pretrained(OUTPUT_DIR)
print(" Tokenizer files saved")
print("\n" + "=" * 60)
print(" EXPORT COMPLETE -> " + OUTPUT_DIR)
print("=" * 60)
print("\nFiles:")
for f in sorted(os.listdir(OUTPUT_DIR)):
size = os.path.getsize(os.path.join(OUTPUT_DIR, f))
if size > 1e6:
print(f" {f:40s} {size/1e6:.1f} MB")
else:
print(f" {f:40s} {size/1e3:.1f} KB")
|