File size: 1,129 Bytes
fed1832
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
from transformers import AutoModelForCausalLM, AutoTokenizer

# Replace with your model
model_name = "meta-llama/Llama-2-7b-hf"

# Load model and tokenizer
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)

print(f"Vocab size: {tokenizer.vocab_size}")

# Assuming standard LLaMA-like structure
hidden_dim = model.config.hidden_size
print(f"Hidden dimension: {hidden_dim}")

# Input embedding
print(f"Input embedding: {model.model.embed_tokens.weight.shape}")

# LM head
if hasattr(model, "lm_head"):
    print(f"LM head: {model.lm_head.weight.shape}")

# Iterate through layers
for idx, layer in enumerate(model.model.layers):
    print(f"\nLayer {idx}")
    print(f"q_proj: {layer.self_attn.q_proj.weight.shape}")
    print(f"k_proj: {layer.self_attn.k_proj.weight.shape}")
    print(f"v_proj: {layer.self_attn.v_proj.weight.shape}")
    print(f"o_proj: {layer.self_attn.o_proj.weight.shape}")
    print(f"mlp_up: {layer.mlp.up_proj.weight.shape}")
    print(f"mlp_down: {layer.mlp.down_proj.weight.shape}")
    print(f"mlp_gate: {layer.mlp.gate_proj.weight.shape}")