| from transformers import PretrainedConfig | |
| class Im2LatexTransformerConfig(PretrainedConfig): | |
| model_type = "Im2LatexTransformer" | |
| def __init__(self, **kwargs): | |
| self.vocab_size = 544 | |
| self.max_len = 512 | |
| self.d_model = 512 | |
| self.nhead = 8 | |
| self.num_layers = 6 | |
| self.dim_feedforward = 2048 | |
| self.dropout = 0.1 | |
| self.in_channels = 1 | |
| super().__init__( | |
| pad_token_id=0, | |
| sos_token_id=1, | |
| eos_token_id=2, | |
| ) |