Create copy_weights.py
Browse files- copy_weights.py +45 -0
copy_weights.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from JiRackTernary_new import JiRackConfig, JiRackTernary1B
|
| 3 |
+
from transformers import AutoTokenizer
|
| 4 |
+
import os
|
| 5 |
+
|
| 6 |
+
print("🚀 Copying embeddings and lm_head...")
|
| 7 |
+
|
| 8 |
+
old_model_path = "."
|
| 9 |
+
new_tokenizer_path = "./jirack_code_tokenizer_fixed"
|
| 10 |
+
save_path = "./JiRack_init_new_vocab"
|
| 11 |
+
|
| 12 |
+
os.makedirs(save_path, exist_ok=True)
|
| 13 |
+
|
| 14 |
+
# Load new tokenizer
|
| 15 |
+
tokenizer = AutoTokenizer.from_pretrained(new_tokenizer_path)
|
| 16 |
+
new_vocab_size = len(tokenizer)
|
| 17 |
+
|
| 18 |
+
# Create new model with updated vocab size
|
| 19 |
+
config = JiRackConfig()
|
| 20 |
+
model = JiRackTernary1B(config)
|
| 21 |
+
|
| 22 |
+
# Load old model weights
|
| 23 |
+
old_state = torch.load(f"{old_model_path}/pytorch_model.bin", map_location="cpu")
|
| 24 |
+
|
| 25 |
+
old_vocab_size = 128256
|
| 26 |
+
|
| 27 |
+
with torch.no_grad():
|
| 28 |
+
# Copy old weights
|
| 29 |
+
model.token_emb.weight[:old_vocab_size] = old_state['token_emb.weight'][:old_vocab_size]
|
| 30 |
+
model.lm_head.weight[:old_vocab_size] = old_state['lm_head.weight'][:old_vocab_size]
|
| 31 |
+
|
| 32 |
+
# Initialize the new 3 tokens (FIM markers) with mean value
|
| 33 |
+
mean_emb = old_state['token_emb.weight'].mean(dim=0)
|
| 34 |
+
model.token_emb.weight[old_vocab_size:] = mean_emb
|
| 35 |
+
model.lm_head.weight[old_vocab_size:] = mean_emb
|
| 36 |
+
|
| 37 |
+
print(f"✅ Copied {old_vocab_size} tokens")
|
| 38 |
+
print(f"✅ Initialized {new_vocab_size - old_vocab_size} new tokens")
|
| 39 |
+
|
| 40 |
+
# Save
|
| 41 |
+
torch.save(model.state_dict(), f"{save_path}/pytorch_model.bin")
|
| 42 |
+
tokenizer.save_pretrained(save_path)
|
| 43 |
+
|
| 44 |
+
print(f"\n🎉 Done! New model saved to: {save_path}")
|
| 45 |
+
print("Use this folder as the starting weights for training from scratch.")
|