File size: 502 Bytes
e4b2fcb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 |
{
"architectures": [
"GemmaForCausalLM"
],
"model_type": "gemma",
"hidden_size": 3072,
"intermediate_size": 8192,
"num_attention_heads": 24,
"num_hidden_layers": 32,
"vocab_size": 256000,
"max_position_embeddings": 8192,
"torch_dtype": "float16",
"transformers_version": "4.39.0",
"auto_map": {
"AutoConfig": "configuration_gemma.GemmaConfig",
"AutoModelForCausalLM": "modeling_gemma.GemmaForCausalLM",
"AutoTokenizer": "tokenization_gemma.GemmaTokenizer"
}
}
|