{ "architectures": [ "NanoChatForCausalLM" ], "hidden_size": 1280, "intermediate_size": 5120, "max_position_embeddings": 2048, "model_type": "nanochat", "num_attention_heads": 10, "num_hidden_layers": 20, "num_key_value_heads": 10, "rope_theta": 10000.0, "torch_dtype": "bfloat16", "transformers_version": "4.0.0", "vocab_size": 65536 }