{ "architectures": ["GPT2LMHeadModel"], "model_type": "gpt2", "vocab_size": 50257, "n_positions": 64, "n_ctx": 64, "n_embd": 64, "n_layer": 2, "n_head": 2 }