pnevskaiaan commited on
Commit
a6b8b16
·
verified ·
1 Parent(s): 64921b1

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +13 -12
README.md CHANGED
@@ -7,25 +7,26 @@ Here is a code to create this tiny model:
7
 
8
  ```python
9
  import os
 
10
 
11
- from transformers import AutoTokenizer
12
- from transformers import Lfm2Config, Lfm2ForCausalLM
13
 
14
- # === Step 1: Define tiny model config ===
15
- config = Lfm2Config(
16
- num_hidden_layers=4,
17
- num_attention_heads=4,
18
- num_key_value_heads=4,
19
- intermediate_size=12,
20
- hidden_size=16,
21
- block_multiple_of=8
22
- )
 
 
23
 
24
  # === Step 2: Create model from config ===
25
  model = Lfm2ForCausalLM(config)
26
 
27
  # === Step 3: Load or create tokenizer ===
28
- model_id = "LiquidAI/LFM2-350M"
29
  tokenizer = AutoTokenizer.from_pretrained(model_id)
30
 
31
  # === Step 4: Save model and tokenizer ===
 
7
 
8
  ```python
9
  import os
10
+ import torch
11
 
12
+ torch.set_default_dtype(torch.bfloat16)
 
13
 
14
+ from transformers import AutoTokenizer, AutoConfig, Lfm2ForCausalLM
15
+
16
+ # # === Step 1: Define tiny model config ===
17
+ model_id = "LiquidAI/LFM2-350M"
18
+ config = AutoConfig.from_pretrained(model_id)
19
+
20
+ config.num_hidden_layers=4
21
+ config.num_attention_heads=4
22
+ config.num_key_value_heads=4
23
+ config.hidden_size=16
24
+ config.block_multiple_of=8
25
 
26
  # === Step 2: Create model from config ===
27
  model = Lfm2ForCausalLM(config)
28
 
29
  # === Step 3: Load or create tokenizer ===
 
30
  tokenizer = AutoTokenizer.from_pretrained(model_id)
31
 
32
  # === Step 4: Save model and tokenizer ===