File size: 981 Bytes
7e984ce 9dfce1f 7e984ce 9dfce1f 7e984ce 9dfce1f 02c3a7a 7e984ce | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 | ---
license: apache-2.0
---
Here is a code to create this tiny model:
```python
import os
import torch
from transformers import AutoTokenizer, AutoConfig, Lfm2MoeForCausalLM
# # === Step 1: Define tiny model config ===
model_id = "LiquidAI/LFM2-24B-A2B"
config = AutoConfig.from_pretrained(model_id)
config.num_hidden_layers = 3
config.layer_types = [
"full_attention",
"full_attention",
"conv",
]
config.num_attention_heads = 4
config.num_key_value_heads = 4
config.hidden_size = 16
config.num_dense_layers = 1
config.moe_intermediate_size = 16
config.intermediate_size = 16
# === Step 2: Create model from config ===
model = Lfm2MoeForCausalLM(config)
# === Step 3: Load or create tokenizer ===
tokenizer = AutoTokenizer.from_pretrained(model_id)
# === Step 4: Save model and tokenizer ===
output_dir = "./lfm2_moe"
os.makedirs(output_dir, exist_ok=True)
model.save_pretrained(output_dir, safe_serialization=False)
tokenizer.save_pretrained(output_dir)
``` |