| from transformers import PretrainedConfig | |
| class ESPFormerConfig(PretrainedConfig): | |
| model_type = "ESPFormer" | |
| def __init__(self, | |
| input_dim = 20, | |
| embed_dim = 128, | |
| num_heads = 8, | |
| num_layers = 2, | |
| seq_length = 1280, | |
| num_classes = 2, | |
| dropout = 0.1, | |
| **kwargs): | |
| super().__init__(**kwargs) | |
| self.input_dim = input_dim | |
| self.embed_dim = embed_dim | |
| self.num_heads = num_heads | |
| self.num_layers = num_layers | |
| self.seq_length = seq_length | |
| self.num_classes = num_classes | |
| self.dropout = dropout | |