{ "architectures": [ "TinyTransformerModel" ], "dropout": 0.1, "dtype": "float32", "hidden_size": 128, "intermediate_size": 512, "max_position_embeddings": 512, "model_type": "tinytransformer", "num_attention_heads": 4, "num_hidden_layers": 2, "transformers_version": "5.0.0", "use_cache": false, "vocab_size": 21091 }