phoebeklett commited on
Commit
4aec30a
·
1 Parent(s): c09aabb

Delete configuration.py

Browse files
Files changed (1) hide show
  1. configuration.py +0 -207
configuration.py DELETED
@@ -1,207 +0,0 @@
1
- # Adapted from https://github.com/mosaicml/llm-foundry
2
- # Classes changed: MPTConfig
3
- # SPDX-License-Identifier: Apache-2.0
4
-
5
-
6
- """A HuggingFace-style model configuration."""
7
-
8
- from typing import Dict, List, Optional, Union
9
- from transformers import PretrainedConfig
10
-
11
- attn_config_defaults: Dict = {
12
- 'attn_type': 'multihead_attention',
13
- 'attn_pdrop': 0.0,
14
- 'attn_impl': 'torch',
15
- 'qk_ln': False,
16
- 'clip_qkv': None,
17
- 'softmax_scale': None,
18
- 'prefix_lm': False,
19
- 'attn_uses_sequence_id': False,
20
- 'alibi': True,
21
- 'alibi_bias_max': 8,
22
- "topk": 10,
23
- 'mask_by_sim':True,
24
- 'sim_threshold':0.25,
25
- 'use_active_externalism':True,
26
- 'memory_type':'manual'
27
- }
28
-
29
- init_config_defaults: Dict = {
30
- 'name': 'kaiming_normal_',
31
- 'fan_mode': 'fan_in',
32
- 'init_nonlinearity': 'relu',
33
- 'init_div_is_residual': True,
34
- 'emb_init_std': None,
35
- 'emb_init_uniform_lim': None,
36
- 'init_std': None,
37
- 'init_gain': 0.0,
38
- }
39
-
40
-
41
- class ExtendedMPTConfig(PretrainedConfig):
42
- model_type = 'extended-mpt'
43
-
44
- def __init__(
45
- self,
46
- d_model: int = 4096,
47
- n_heads: int = 32,
48
- n_layers: int = 32,
49
- expansion_ratio: int = 4,
50
- max_seq_len: int = 2048,
51
- vocab_size: int = 50432,
52
- resid_pdrop: float = 0.0,
53
- emb_pdrop: float = 0.0,
54
- learned_pos_emb: bool = True,
55
- attn_config: Dict = attn_config_defaults,
56
- init_device: str = 'cpu',
57
- logit_scale: Optional[Union[float, str]] = None,
58
- no_bias: bool = True,
59
- verbose: int = 0,
60
- embedding_fraction: float = 1.0,
61
- norm_type: str = 'low_precision_layernorm',
62
- use_cache: bool = False,
63
- init_config: Dict = init_config_defaults,
64
- use_active_externalism_by_layer: List[bool] = [True for _ in range(32)],
65
- memory_device:str = 'cpu',
66
- **kwargs,
67
- ):
68
- """The MPT configuration class.
69
-
70
- Args:
71
- d_model (int): The size of the embedding dimension of the model.
72
- n_heads (int): The number of attention heads.
73
- n_layers (int): The number of layers in the model.
74
- expansion_ratio (int): The ratio of the up/down scale in the MLP.
75
- max_seq_len (int): The maximum sequence length of the model.
76
- vocab_size (int): The size of the vocabulary.
77
- resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.
78
- emb_pdrop (float): The dropout probability for the embedding layer.
79
- learned_pos_emb (bool): Whether to use learned positional embeddings
80
- attn_config (Dict): A dictionary used to configure the model's attention module:
81
- attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention
82
- attn_pdrop (float): The dropout probability for the attention layers.
83
- attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'.
84
- qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer.
85
- clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to
86
- this value.
87
- softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None,
88
- use the default scale of ``1/sqrt(d_keys)``.
89
- prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an
90
- extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix
91
- can attend to one another bi-directionally. Tokens outside the prefix use causal attention.
92
- attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id.
93
- When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates
94
- which sub-sequence each token belongs to.
95
- Defaults to ``False`` meaning any provided `sequence_id` will be ignored.
96
- alibi (bool): Whether to use the alibi bias instead of position embeddings.
97
- alibi_bias_max (int): The maximum value of the alibi bias.
98
- init_device (str): The device to use for parameter initialization.
99
- logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.
100
- no_bias (bool): Whether to use bias in all layers.
101
- verbose (int): The verbosity level. 0 is silent.
102
- embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.
103
- norm_type (str): choose type of norm to use
104
- multiquery_attention (bool): Whether to use multiquery attention implementation.
105
- use_cache (bool): Whether or not the model should return the last key/values attentions
106
- init_config (Dict): A dictionary used to configure the model initialization:
107
- init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_',
108
- 'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or
109
- 'xavier_normal_'. These mimic the parameter initialization methods in PyTorch.
110
- init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True.
111
- emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer.
112
- emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution
113
- used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``.
114
- init_std (float): The standard deviation of the normal distribution used to initialize the model,
115
- if using the baseline_ parameter initialization scheme.
116
- init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes.
117
- fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes.
118
- init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes.
119
- ---
120
- See llmfoundry.models.utils.param_init_fns.py for info on other param init config options
121
- """
122
- self.d_model = d_model
123
- self.n_heads = n_heads
124
- self.n_layers = n_layers
125
- self.expansion_ratio = expansion_ratio
126
- self.max_seq_len = max_seq_len
127
- self.vocab_size = vocab_size
128
- self.resid_pdrop = resid_pdrop
129
- self.emb_pdrop = emb_pdrop
130
- self.learned_pos_emb = learned_pos_emb
131
- self.attn_config = attn_config
132
- self.init_device = init_device
133
- self.logit_scale = logit_scale
134
- self.no_bias = no_bias
135
- self.verbose = verbose
136
- self.embedding_fraction = embedding_fraction
137
- self.norm_type = norm_type
138
- self.use_cache = use_cache
139
- self.init_config = init_config
140
- self.use_active_externalism_by_layer = use_active_externalism_by_layer
141
- self.memory_device = memory_device
142
- if 'name' in kwargs:
143
- del kwargs['name']
144
- if 'loss_fn' in kwargs:
145
- del kwargs['loss_fn']
146
- super().__init__(**kwargs)
147
-
148
- self._validate_config()
149
-
150
- def _set_config_defaults(self, config, config_defaults):
151
- # set config defaults
152
- for k, v in config_defaults.items():
153
- if k not in config:
154
- config[k] = v
155
- return config
156
-
157
- def _validate_config(self):
158
- # set config defaults
159
- self.attn_config = self._set_config_defaults(
160
- self.attn_config,
161
- attn_config_defaults,
162
- )
163
- self.init_config = self._set_config_defaults(
164
- self.init_config,
165
- init_config_defaults,
166
- )
167
-
168
- if self.d_model % self.n_heads != 0:
169
- raise ValueError('d_model must be divisible by n_heads')
170
- if any(
171
- prob < 0 or prob > 1 for prob in
172
- [self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop]):
173
- raise ValueError(
174
- "self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1"
175
- )
176
- if self.attn_config['attn_impl'] not in ['torch', 'flash', 'triton']:
177
- raise ValueError(
178
- f"Unknown attn_impl={self.attn_config['attn_impl']}")
179
- if self.attn_config['prefix_lm'] and self.attn_config[
180
- 'attn_impl'] not in ['torch', 'triton']:
181
- raise NotImplementedError(
182
- 'prefix_lm only implemented with torch and triton attention.')
183
- if self.attn_config['alibi'] and self.attn_config['attn_impl'] not in [
184
- 'torch', 'triton'
185
- ]:
186
- raise NotImplementedError(
187
- 'alibi only implemented with torch and triton attention.')
188
- if self.attn_config['attn_uses_sequence_id'] and self.attn_config[
189
- 'attn_impl'] not in ['torch', 'triton']:
190
- raise NotImplementedError(
191
- 'attn_uses_sequence_id only implemented with torch and triton attention.'
192
- )
193
- if self.embedding_fraction > 1 or self.embedding_fraction <= 0:
194
- raise ValueError(
195
- 'model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!'
196
- )
197
- if isinstance(self.logit_scale,
198
- str) and self.logit_scale != 'inv_sqrt_d_model':
199
- raise ValueError(
200
- f"{self.logit_scale=} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'."
201
- )
202
- if self.init_config.get('name', None) is None:
203
- raise ValueError(f"{self.init_config=} 'name' needs to be set.")
204
- if not self.learned_pos_emb and not self.attn_config['alibi']:
205
- raise ValueError(
206
- f'Positional information must be provided to the model using either learned_pos_emb or alibi.'
207
- )