File size: 5,808 Bytes
525736b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 |
# coding=utf-8
# Copyright 2025 Maincode. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Maincoder model configuration."""
from typing import Optional
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import logging
logger = logging.get_logger(__name__)
class MaincoderConfig(PretrainedConfig):
r"""
Configuration class for Maincoder model.
Args:
vocab_size (`int`, *optional*, defaults to 151936):
Vocabulary size of the Maincoder model.
hidden_size (`int`, *optional*, defaults to 1536):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 4096):
Dimension of the MLP intermediate representations.
intermediate_size_mlp (`int`, *optional*, defaults to 4096):
Dimension of the MLP representations (same as intermediate_size for dense models).
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer.
num_key_value_heads (`int`, *optional*, defaults to 4):
Number of key-value heads for Grouped Query Attention (GQA).
head_dim (`int`, *optional*, defaults to 96):
Dimension of each attention head.
hidden_act (`str`, *optional*, defaults to `"silu"`):
The activation function in the MLP.
max_position_embeddings (`int`, *optional*, defaults to 2048):
Maximum sequence length the model can handle.
initializer_range (`float`, *optional*, defaults to 0.02):
Standard deviation for weight initialization.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
Epsilon for RMS normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether to use key-value cache for generation.
pad_token_id (`int`, *optional*, defaults to 151643):
Padding token id.
bos_token_id (`int`, *optional*):
Beginning of sequence token id.
eos_token_id (`int`, *optional*, defaults to 151643):
End of sequence token id.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether to tie input and output embeddings.
rope_theta (`float`, *optional*, defaults to 1000000.0):
Base period for RoPE embeddings.
rope_scaling (`Dict`, *optional*):
RoPE scaling configuration for extended context.
attention_dropout (`float`, *optional*, defaults to 0.0):
Dropout probability for attention weights.
use_qk_norm (`bool`, *optional*, defaults to `True`):
Whether to apply RMS normalization to query and key.
Example:
```python
>>> from configuration_maincoder import MaincoderConfig
>>> from modelling_maincoder import MaincoderForCausalLM
>>> config = MaincoderConfig()
>>> model = MaincoderForCausalLM(config)
```
"""
model_type = "maincoder"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size: int = 151936,
hidden_size: int = 1536,
intermediate_size: int = 4096,
intermediate_size_mlp: int = 4096,
num_hidden_layers: int = 32,
num_attention_heads: int = 16,
num_key_value_heads: Optional[int] = 4,
head_dim: Optional[int] = 96,
hidden_act: str = "silu",
max_position_embeddings: int = 2048,
initializer_range: float = 0.02,
rms_norm_eps: float = 1e-5,
use_cache: bool = True,
pad_token_id: Optional[int] = 151643,
bos_token_id: Optional[int] = None,
eos_token_id: int = 151643,
tie_word_embeddings: bool = True,
rope_theta: float = 1000000.0,
rope_scaling: Optional[dict] = None,
attention_dropout: float = 0.0,
use_qk_norm: bool = True,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.intermediate_size_mlp = intermediate_size_mlp
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self.attention_dropout = attention_dropout
self.use_qk_norm = use_qk_norm
self.hidden_act = hidden_act
# GQA configuration
self.num_key_value_heads = num_key_value_heads if num_key_value_heads is not None else num_attention_heads
self.head_dim = head_dim if head_dim is not None else self.hidden_size // self.num_attention_heads
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
__all__ = ["MaincoderConfig"]
|