| from transformers import PretrainedConfig | |
| class HareConfig(PretrainedConfig): | |
| model_type = "hare" | |
| def __init__( | |
| self, | |
| hidden_size=768, | |
| num_attention_heads=12, | |
| num_hidden_layers=22, | |
| intermediate_size=1152, | |
| hidden_activation="gelu", | |
| max_position_embeddings=8192, | |
| vocab_size=50368, | |
| pad_token_id=50283, | |
| bos_token_id=50281, | |
| eos_token_id=50282, | |
| cls_token_id=50281, | |
| sep_token_id=50282, | |
| global_attn_every_n_layers=3, | |
| local_attention=128, | |
| replaced_layers=None, | |
| surgery_variant="conservative", | |
| **kwargs, | |
| ): | |
| super().__init__( | |
| pad_token_id=pad_token_id, | |
| bos_token_id=bos_token_id, | |
| eos_token_id=eos_token_id, | |
| **kwargs, | |
| ) | |
| self.hidden_size = hidden_size | |
| self.num_attention_heads = num_attention_heads | |
| self.num_hidden_layers = num_hidden_layers | |
| self.intermediate_size = intermediate_size | |
| self.hidden_activation = hidden_activation | |
| self.max_position_embeddings = max_position_embeddings | |
| self.vocab_size = vocab_size | |
| self.cls_token_id = cls_token_id | |
| self.sep_token_id = sep_token_id | |
| self.global_attn_every_n_layers = global_attn_every_n_layers | |
| self.local_attention = local_attention | |
| self.replaced_layers = replaced_layers | |
| self.surgery_variant = surgery_variant | |