Commit Β·
1a800b4
1
Parent(s): c10a920
add README
Browse files
README.md
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Tiny model creation script:
|
| 2 |
+
|
| 3 |
+
```pthon
|
| 4 |
+
"""
|
| 5 |
+
Create a tiny GigaChat3 model for testing .
|
| 6 |
+
|
| 7 |
+
GigaChat3 uses DeepseekV3Config (no text_config/vision_config sub-objects).
|
| 8 |
+
Key constraint: qk_head_dim == qk_nope_head_dim + qk_rope_head_dim
|
| 9 |
+
"""
|
| 10 |
+
import json
|
| 11 |
+
import os
|
| 12 |
+
|
| 13 |
+
import torch
|
| 14 |
+
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
|
| 15 |
+
|
| 16 |
+
model_id = "ai-sage/GigaChat3-10B-A1.8B-bf16"
|
| 17 |
+
output_dir = "./tiny-gigachat3"
|
| 18 |
+
|
| 19 |
+
config = AutoConfig.from_pretrained(model_id)
|
| 20 |
+
config.num_hidden_layers = 2
|
| 21 |
+
config.num_attention_heads = 2
|
| 22 |
+
config.num_key_value_heads = 2
|
| 23 |
+
config.hidden_size = 32
|
| 24 |
+
config.intermediate_size = 64
|
| 25 |
+
config.moe_intermediate_size = 32
|
| 26 |
+
config.n_routed_experts = 4
|
| 27 |
+
config.n_shared_experts = 1
|
| 28 |
+
config.num_experts_per_tok = 2
|
| 29 |
+
config.kv_lora_rank = 8
|
| 30 |
+
config.q_lora_rank = None
|
| 31 |
+
|
| 32 |
+
# Attention head dims β MUST satisfy: qk_head_dim == qk_nope_head_dim + qk_rope_head_dim
|
| 33 |
+
config.qk_nope_head_dim = 4
|
| 34 |
+
config.qk_rope_head_dim = 2
|
| 35 |
+
config.qk_head_dim = 6 # 4 + 2
|
| 36 |
+
config.v_head_dim = 4
|
| 37 |
+
config.head_dim = config.qk_rope_head_dim # used by RoPE
|
| 38 |
+
|
| 39 |
+
TINY_VOCAB = 32000
|
| 40 |
+
config.vocab_size = TINY_VOCAB
|
| 41 |
+
|
| 42 |
+
assert config.qk_head_dim == config.qk_nope_head_dim + config.qk_rope_head_dim
|
| 43 |
+
|
| 44 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 45 |
+
model = AutoModelForCausalLM.from_config(config)
|
| 46 |
+
model.save_pretrained(output_dir)
|
| 47 |
+
|
| 48 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 49 |
+
tokenizer.save_pretrained(output_dir)
|
| 50 |
+
|
| 51 |
+
tok_path = os.path.join(output_dir, "tokenizer.json")
|
| 52 |
+
with open(tok_path, encoding="utf-8") as f:
|
| 53 |
+
tok_data = json.load(f)
|
| 54 |
+
|
| 55 |
+
if "model" in tok_data and "vocab" in tok_data["model"]:
|
| 56 |
+
tok_data["model"]["vocab"] = {
|
| 57 |
+
k: v for k, v in tok_data["model"]["vocab"].items() if v < TINY_VOCAB
|
| 58 |
+
}
|
| 59 |
+
tok_data["model"]["merges"] = []
|
| 60 |
+
|
| 61 |
+
if "added_tokens" in tok_data:
|
| 62 |
+
tok_data["added_tokens"] = [t for t in tok_data["added_tokens"] if t["id"] < TINY_VOCAB]
|
| 63 |
+
|
| 64 |
+
with open(tok_path, "w", encoding="utf-8") as f:
|
| 65 |
+
json.dump(tok_data, f, ensure_ascii=False)
|
| 66 |
+
|
| 67 |
+
# ββ Smoke test βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 68 |
+
tokens = tokenizer("Hello world", return_tensors="pt")
|
| 69 |
+
tokens.pop("token_type_ids", None)
|
| 70 |
+
with torch.no_grad():
|
| 71 |
+
out = model(**tokens)
|
| 72 |
+
|
| 73 |
+
total_mb = sum(os.path.getsize(os.path.join(output_dir, fn)) for fn in os.listdir(output_dir)) / 1e6
|
| 74 |
+
print(f"shape={out.logits.shape} params={sum(p.numel() for p in model.parameters())/1e6:.2f}M size={total_mb:.1f} MB")
|
| 75 |
+
|
| 76 |
+
```
|