{ "architectures": ["GPT2LMHeadModel"], "model_type": "gpt2", "vocab_size": 1200, "n_positions": 256, "n_ctx": 256, "n_embd": 80, "n_layer": 3, "n_head": 4 }