File size: 180 Bytes
ced5ffb
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
{
  "architectures": ["GemmaForCausalLM"],
  "model_type": "gemma",
  "hidden_size": 1024,
  "num_attention_heads": 16,
  "num_hidden_layers": 24,
  "vocab_size": 262144
}