from transformers import PretrainedConfig class EmCoderConfig(PretrainedConfig): model_type = "emcoder" def __init__( self, vocab_size=50265, max_seq_len=512, d_model=768, n_head=12, n_layers=6, d_ffn=3072, dropout=0.15, num_labels=28, base_encoder_path="", id2label=None, label2id=None, **kwargs ): # id2label konverze na int klíče (kvůli JSON standardu) if id2label is not None: id2label = {int(k): v for k, v in id2label.items()} super().__init__( id2label=id2label, label2id=label2id, **kwargs ) self.vocab_size = vocab_size self.max_seq_len = max_seq_len self.d_model = d_model self.n_head = n_head self.n_layers = n_layers self.d_ffn = d_ffn self.dropout = dropout self.num_labels = num_labels self.base_encoder_path = base_encoder_path