File size: 2,876 Bytes
9f6fe04
08c4447
9f6fe04
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c9eedbd
 
 
 
4fcac99
 
 
c9eedbd
4fcac99
 
 
 
c9eedbd
 
 
4fcac99
 
 
 
 
 
 
c9eedbd
 
 
4fcac99
 
 
c9eedbd
 
4fcac99
 
 
 
 
c9eedbd
 
4fcac99
c9eedbd
4fcac99
 
 
 
 
 
 
c9eedbd
 
4fcac99
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
# =============================================================================
# COPYRIGHT © 2025-2026  Konstantin Vladimirovich Grabko. ALL RIGHTS RESERVED.
# CMS Manhattan JiRack Technology — PATENT PENDING
#
# This code is proprietary. 
# Personal and non-commercial research use is allowed.
# Any commercial use, derivative works for profit, or distribution 
# requires a paid license and 5% royalty.
#
# Unauthorized commercial use is strictly prohibited.
# Contact: grabko@cmsmanhattan.com
# =============================================================================
#
# Model updated for new last tokenizer version 
# 
# Replace toknizer in current model: Just use the class and call resize function in your train script
# New model: Use our conversion script to rapidly initialize new models by copying existing embeddings and LM_head weights. This enables fast model bootstrapping, or alternatively, provides the foundation to train a new model entirely from scratch.
# 
# =============================================================================

# =============================================================================
# COPYRIGHT © 2025 Konstantin Vladimirovich Grabko. ALL RIGHTS RESERVED.
# =============================================================================

import torch
from JiRackTernary_new import JiRackConfig, JiRackTernary1B
from transformers import AutoTokenizer
from safetensors.torch import load_file, save_file
import os

print("🚀 Copying embeddings and lm_head...")

old_model_path = "." 
new_tokenizer_path = "./jirack_code_tokenizer" 
save_path = "./JiRack_init_model_with_new_vocab"

os.makedirs(save_path, exist_ok=True)

# Load new tokenizer
tokenizer = AutoTokenizer.from_pretrained(new_tokenizer_path)
new_vocab_size = len(tokenizer)

print(f"New vocab size: {new_vocab_size}")

# Create new model
config = JiRackConfig()
model = JiRackTernary1B(config)

# Load old model (SafeTensors)
old_state = load_file(f"{old_model_path}/model.safetensors")

old_vocab_size = 128256

with torch.no_grad():
    # Copy old weights
    model.token_emb.weight[:old_vocab_size] = old_state['token_emb.weight'][:old_vocab_size].clone()
    model.lm_head.weight[:old_vocab_size] = old_state['lm_head.weight'][:old_vocab_size].clone()
    
    # Initialize new 3 tokens (FIM) with mean value
    mean_emb = old_state['token_emb.weight'].mean(dim=0)
    model.token_emb.weight[old_vocab_size:] = mean_emb
    model.lm_head.weight[old_vocab_size:] = mean_emb

print(f"✅ Copied {old_vocab_size} tokens")
print(f"✅ Initialized {new_vocab_size - old_vocab_size} new tokens")

# Save in SafeTensors
save_file(model.state_dict(), f"{save_path}/model.safetensors")
tokenizer.save_pretrained(save_path)

print(f"\n🎉 Done! New model saved to: {save_path}")
print("Use this folder as the starting weights for training from scratch.")