| from transformers import PretrainedConfig | |
| class TinyTransformerConfig(PretrainedConfig): | |
| model_type = "tinytransformer" | |
| def __init__( | |
| self, | |
| vocab_size=5000, | |
| hidden_size=128, | |
| num_hidden_layers=2, | |
| num_attention_heads=4, | |
| intermediate_size=512, | |
| max_position_embeddings=512, | |
| dropout=0.1, | |
| num_labels=2, | |
| **kwargs | |
| ): | |
| super().__init__(**kwargs) | |
| self.vocab_size = vocab_size | |
| self.hidden_size = hidden_size | |
| self.num_hidden_layers = num_hidden_layers | |
| self.num_attention_heads = num_attention_heads | |
| self.intermediate_size = intermediate_size | |
| self.max_position_embeddings = max_position_embeddings | |
| self.dropout = dropout | |
| self.num_labels = num_labels |