{ "architectures": [ "GPTminiHF" ], "context_length": 128, "drop_rate": 0.1, "dtype": "float32", "emb_dim": 256, "model_type": "gptmini", "n_heads": 4, "n_layers": 4, "qkv_bias": false, "transformers_version": "4.57.1", "vocab_size": 50257 }