| from transformers import PretrainedConfig | |
| class GPTConfig(PretrainedConfig): | |
| model_type = "custom_gpt" | |
| def __init__( | |
| self, | |
| block_size=1024, | |
| vocab_size=50304, | |
| n_layer=12, | |
| n_head=12, | |
| n_embd=768, | |
| dropout=0.0, | |
| bias=True, | |
| hc_num_streams=1, | |
| hc_num_fracs=1, | |
| hc_disable=False, | |
| mhc=False, | |
| sinkhorn_iters=10, | |
| sinkhorn_tau=0.05, | |
| mhc_h_res_proj="sinkhorn", | |
| ns_steps=5, | |
| ns_eps=1e-7, | |
| ns_coeffs=(3.0, -3.2, 1.2), | |
| **kwargs, | |
| ): | |
| super().__init__(**kwargs) | |
| self.block_size = block_size | |
| self.vocab_size = vocab_size | |
| self.n_layer = n_layer | |
| self.n_head = n_head | |
| self.n_embd = n_embd | |
| self.dropout = dropout | |
| self.bias = bias | |
| self.hc_num_streams = hc_num_streams | |
| self.hc_num_fracs = hc_num_fracs | |
| self.hc_disable = hc_disable | |
| self.mhc = mhc | |
| self.sinkhorn_iters = sinkhorn_iters | |
| self.sinkhorn_tau = sinkhorn_tau | |
| self.mhc_h_res_proj = mhc_h_res_proj | |
| self.ns_steps = ns_steps | |
| self.ns_eps = ns_eps | |
| self.ns_coeffs = ns_coeffs |