| from transformers import PretrainedConfig | |
| class HCAEConfig(PretrainedConfig): | |
| model_type = "hcae" | |
| def __init__( | |
| self, | |
| vocab_size=30522, | |
| hidden_size=384, | |
| conv_layers=4, | |
| attn_layers=4, | |
| num_heads=12, | |
| max_position_embeddings=512, | |
| dropout=0.1, | |
| initializer_range=0.02, | |
| pad_token_id=0, | |
| **kwargs | |
| ): | |
| super().__init__(pad_token_id=pad_token_id, **kwargs) | |
| self.vocab_size = vocab_size | |
| self.hidden_size = hidden_size | |
| self.conv_layers = conv_layers | |
| self.attn_layers = attn_layers | |
| self.num_heads = num_heads | |
| self.max_position_embeddings = max_position_embeddings | |
| self.dropout = dropout | |
| self.initializer_range = initializer_range | |