{ "architectures": [ "GemmaForCausalLM" ], "attention_bias": false, "attention_dropout": 0.0, "attention_partition_spec": [ [ "dp", "fsdp" ], "sp", "tp", null ], "attn_mechanism": "normal", "axis_dims": [ 1, -1, 1, 1 ], "axis_names": [ "dp", "fsdp", "tp", "sp" ], "backend": null, "bias_partition_spec": [ [ "dp", "fsdp" ], null, null, null ], "bits": null, "block_b": 1, "block_k": 128, "block_k_dkv": 128, "block_k_dq": 128, "block_k_major": 128, "block_k_major_dkv": 128, "block_k_major_dq": 128, "block_q": 128, "block_q_dkv": 128, "block_q_dq": 128, "block_q_major_dkv": 128, "bos_token_id": 2, "easy_method": "train", "eos_token_id": 1, "head_dim": 32, "hidden_act": "gelu", "hidden_size": 64, "initializer_range": 0.02, "intermediate_size": 256, "key_partition_spec": [ [ "dp", "fsdp" ], "sp", "tp", null ], "max_position_embeddings": 8192, "model_type": "gemma", "num_attention_heads": 2, "num_hidden_layers": 2, "num_key_value_heads": 2, "pad_token_id": 0, "query_partition_spec": [ [ "dp", "fsdp" ], "sp", "tp", null ], "rms_norm_eps": 1e-06, "rope_theta": 10000.0, "scan_attention_layers": false, "scan_layers": false, "scan_mlp_chunk_size": 1024, "scan_ring_attention": true, "torch_dtype": "float32", "transformers_version": "4.38.1", "use_cache": true, "use_pjit_attention_force": false, "use_scan_mlp": true, "use_shard_map": false, "use_sharded_kv_caching": true, "value_partition_spec": [ [ "dp", "fsdp" ], "sp", "tp", null ], "vocab_size": 256000 }