File size: 2,947 Bytes
8cc7157
 
 
 
 
 
 
 
 
51cf4ed
8cc7157
51cf4ed
8cc7157
 
 
a85fd6b
8cc7157
 
2fa9200
8cc7157
2d2e45d
8cc7157
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51cf4ed
8cc7157
51cf4ed
 
 
 
8cc7157
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a85fd6b
 
51cf4ed
 
a85fd6b
8cc7157
a85fd6b
51cf4ed
 
 
 
 
 
2d2e45d
 
 
 
 
a85fd6b
51cf4ed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
# ==================== configuration_neollm.py ====================
from transformers.configuration_utils import PretrainedConfig
from transformers.modeling_rope_utils import rope_config_validation
from transformers.utils import logging

logger = logging.get_logger(__name__)

class NeoLLMConfig(PretrainedConfig):
    r"""
    This is the configuration class to store the configuration of a `NeoLLMModel`. It is used to instantiate a
    NeoLLM model according to the specified arguments, defining the model architecture.
    Configuration objects inherit from `PretrainedConfig` and can be used to control the model outputs.
    """
    model_type = "neollm"
    keys_to_ignore_at_inference = []
    
    def __init__(
        self,
        vocab_size=200005,
        hidden_size=512,
        intermediate_size=1536,
        num_hidden_layers=12,
        num_attention_heads=8,
        num_key_value_heads=2,
        hidden_act="xielu",
        max_position_embeddings=32768,
        initializer_range=0.02,
        rms_norm_eps=1e-6,
        tie_word_embeddings=True,
        rope_theta=10000.0,
        rope_scaling=None,
        partial_rotary_factor=0.25,
        attention_bias=False,
        attention_dropout=0.1,
        head_dim=64,
        fan_ratio=0.125,
        fan_ratio_ffn=0.0625,
        dropout_rate=0.1,
        use_stack=True,
        num_stack_heads=4,
        stack_slots=24,
        stack_d_model=16,
        **kwargs,
    ):
        super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
        self.vocab_size = vocab_size
        self.max_position_embeddings = max_position_embeddings
        self.hidden_size = hidden_size
        self.intermediate_size = intermediate_size
        self.num_hidden_layers = num_hidden_layers
        self.num_attention_heads = num_attention_heads
        self.num_key_value_heads = num_key_value_heads
        self.hidden_act = hidden_act
        self.initializer_range = initializer_range
        self.rms_norm_eps = rms_norm_eps
        self.rope_theta = rope_theta
        self.rope_scaling = rope_scaling
        self.partial_rotary_factor = partial_rotary_factor
        self.attention_bias = attention_bias
        self.attention_dropout = attention_dropout
        self.head_dim = head_dim
        rope_config_validation(self)
        
        # FANformer parameters
        self.fan_ratio = fan_ratio
        self.fan_ratio_ffn = fan_ratio_ffn
        
        self.dropout_rate = dropout_rate
        
        # StackMemory parameters
        self.use_stack = use_stack
        self.num_stack_heads = num_stack_heads
        self.stack_slots = stack_slots
        self.stack_d_model = stack_d_model
        
        self.auto_map = {
            "AutoConfig": "configuration_neollm.NeoLLMConfig",
            "AutoModel": "modeling_neollm.NeoLLMModel",
            "AutoModelForCausalLM": "modeling_neollm.NeoLLMForCausalLM"
        }

__all__ = ["NeoLLMConfig"]