{ "architectures": [ "GPT" ], "vocab_size": 32000, "hidden_size": 768, "num_attention_heads": 12, "num_hidden_layers": 12, "max_position_embeddings": 1024, "ffn_mult": 4.0, "model_type": "gptx-min" }