Commit
·
e1b1fa5
1
Parent(s):
95dacd1
Delete configuration_mpt.py
Browse files- configuration_mpt.py +0 -118
configuration_mpt.py
DELETED
|
@@ -1,118 +0,0 @@
|
|
| 1 |
-
"""A HuggingFace-style model configuration."""
|
| 2 |
-
from typing import Dict, Optional, Union
|
| 3 |
-
from transformers import PretrainedConfig
|
| 4 |
-
attn_config_defaults: Dict = {'attn_type': 'multihead_attention', 'attn_pdrop': 0.0, 'attn_impl': 'triton', 'qk_ln': False, 'clip_qkv': None, 'softmax_scale': None, 'prefix_lm': False, 'attn_uses_sequence_id': False, 'alibi': False, 'alibi_bias_max': 8}
|
| 5 |
-
init_config_defaults: Dict = {'name': 'kaiming_normal_', 'fan_mode': 'fan_in', 'init_nonlinearity': 'relu'}
|
| 6 |
-
|
| 7 |
-
class MPTConfig(PretrainedConfig):
|
| 8 |
-
model_type = 'mpt'
|
| 9 |
-
|
| 10 |
-
def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, expansion_ratio: int=4, max_seq_len: int=2048, vocab_size: int=50368, resid_pdrop: float=0.0, emb_pdrop: float=0.0, learned_pos_emb: bool=True, attn_config: Dict=attn_config_defaults, init_device: str='cpu', logit_scale: Optional[Union[float, str]]=None, no_bias: bool=False, verbose: int=0, embedding_fraction: float=1.0, norm_type: str='low_precision_layernorm', use_cache: bool=False, init_config: Dict=init_config_defaults, **kwargs):
|
| 11 |
-
"""The MPT configuration class.
|
| 12 |
-
|
| 13 |
-
Args:
|
| 14 |
-
d_model (int): The size of the embedding dimension of the model.
|
| 15 |
-
n_heads (int): The number of attention heads.
|
| 16 |
-
n_layers (int): The number of layers in the model.
|
| 17 |
-
expansion_ratio (int): The ratio of the up/down scale in the MLP.
|
| 18 |
-
max_seq_len (int): The maximum sequence length of the model.
|
| 19 |
-
vocab_size (int): The size of the vocabulary.
|
| 20 |
-
resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.
|
| 21 |
-
emb_pdrop (float): The dropout probability for the embedding layer.
|
| 22 |
-
learned_pos_emb (bool): Whether to use learned positional embeddings
|
| 23 |
-
attn_config (Dict): A dictionary used to configure the model's attention module:
|
| 24 |
-
attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention
|
| 25 |
-
attn_pdrop (float): The dropout probability for the attention layers.
|
| 26 |
-
attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'.
|
| 27 |
-
qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer.
|
| 28 |
-
clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to
|
| 29 |
-
this value.
|
| 30 |
-
softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None,
|
| 31 |
-
use the default scale of ``1/sqrt(d_keys)``.
|
| 32 |
-
prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an
|
| 33 |
-
extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix
|
| 34 |
-
can attend to one another bi-directionally. Tokens outside the prefix use causal attention.
|
| 35 |
-
attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id.
|
| 36 |
-
When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates
|
| 37 |
-
which sub-sequence each token belongs to.
|
| 38 |
-
Defaults to ``False`` meaning any provided `sequence_id` will be ignored.
|
| 39 |
-
alibi (bool): Whether to use the alibi bias instead of position embeddings.
|
| 40 |
-
alibi_bias_max (int): The maximum value of the alibi bias.
|
| 41 |
-
init_device (str): The device to use for parameter initialization.
|
| 42 |
-
logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.
|
| 43 |
-
no_bias (bool): Whether to use bias in all layers.
|
| 44 |
-
verbose (int): The verbosity level. 0 is silent.
|
| 45 |
-
embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.
|
| 46 |
-
norm_type (str): choose type of norm to use
|
| 47 |
-
multiquery_attention (bool): Whether to use multiquery attention implementation.
|
| 48 |
-
use_cache (bool): Whether or not the model should return the last key/values attentions
|
| 49 |
-
init_config (Dict): A dictionary used to configure the model initialization:
|
| 50 |
-
init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_',
|
| 51 |
-
'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or
|
| 52 |
-
'xavier_normal_'. These mimic the parameter initialization methods in PyTorch.
|
| 53 |
-
init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True.
|
| 54 |
-
emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer.
|
| 55 |
-
emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution
|
| 56 |
-
used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``.
|
| 57 |
-
init_std (float): The standard deviation of the normal distribution used to initialize the model,
|
| 58 |
-
if using the baseline_ parameter initialization scheme.
|
| 59 |
-
init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes.
|
| 60 |
-
fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes.
|
| 61 |
-
init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes.
|
| 62 |
-
---
|
| 63 |
-
See llmfoundry.models.utils.param_init_fns.py for info on other param init config options
|
| 64 |
-
"""
|
| 65 |
-
self.d_model = d_model
|
| 66 |
-
self.n_heads = n_heads
|
| 67 |
-
self.n_layers = n_layers
|
| 68 |
-
self.expansion_ratio = expansion_ratio
|
| 69 |
-
self.max_seq_len = max_seq_len
|
| 70 |
-
self.vocab_size = vocab_size
|
| 71 |
-
self.resid_pdrop = resid_pdrop
|
| 72 |
-
self.emb_pdrop = emb_pdrop
|
| 73 |
-
self.learned_pos_emb = learned_pos_emb
|
| 74 |
-
self.attn_config = attn_config
|
| 75 |
-
self.init_device = init_device
|
| 76 |
-
self.logit_scale = logit_scale
|
| 77 |
-
self.no_bias = no_bias
|
| 78 |
-
self.verbose = verbose
|
| 79 |
-
self.embedding_fraction = embedding_fraction
|
| 80 |
-
self.norm_type = norm_type
|
| 81 |
-
self.use_cache = use_cache
|
| 82 |
-
self.init_config = init_config
|
| 83 |
-
if 'name' in kwargs:
|
| 84 |
-
del kwargs['name']
|
| 85 |
-
if 'loss_fn' in kwargs:
|
| 86 |
-
del kwargs['loss_fn']
|
| 87 |
-
super().__init__(**kwargs)
|
| 88 |
-
self._validate_config()
|
| 89 |
-
|
| 90 |
-
def _set_config_defaults(self, config, config_defaults):
|
| 91 |
-
for (k, v) in config_defaults.items():
|
| 92 |
-
if k not in config:
|
| 93 |
-
config[k] = v
|
| 94 |
-
return config
|
| 95 |
-
|
| 96 |
-
def _validate_config(self):
|
| 97 |
-
self.attn_config = self._set_config_defaults(self.attn_config, attn_config_defaults)
|
| 98 |
-
self.init_config = self._set_config_defaults(self.init_config, init_config_defaults)
|
| 99 |
-
if self.d_model % self.n_heads != 0:
|
| 100 |
-
raise ValueError('d_model must be divisible by n_heads')
|
| 101 |
-
if any((prob < 0 or prob > 1 for prob in [self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop])):
|
| 102 |
-
raise ValueError("self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1")
|
| 103 |
-
if self.attn_config['attn_impl'] not in ['torch', 'flash', 'triton']:
|
| 104 |
-
raise ValueError(f"Unknown attn_impl={self.attn_config['attn_impl']}")
|
| 105 |
-
if self.attn_config['prefix_lm'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:
|
| 106 |
-
raise NotImplementedError('prefix_lm only implemented with torch and triton attention.')
|
| 107 |
-
if self.attn_config['alibi'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:
|
| 108 |
-
raise NotImplementedError('alibi only implemented with torch and triton attention.')
|
| 109 |
-
if self.attn_config['attn_uses_sequence_id'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:
|
| 110 |
-
raise NotImplementedError('attn_uses_sequence_id only implemented with torch and triton attention.')
|
| 111 |
-
if self.embedding_fraction > 1 or self.embedding_fraction <= 0:
|
| 112 |
-
raise ValueError('model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!')
|
| 113 |
-
if isinstance(self.logit_scale, str) and self.logit_scale != 'inv_sqrt_d_model':
|
| 114 |
-
raise ValueError(f"self.logit_scale={self.logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.")
|
| 115 |
-
if self.init_config.get('name', None) is None:
|
| 116 |
-
raise ValueError(f"self.init_config={self.init_config!r} 'name' needs to be set.")
|
| 117 |
-
if not self.learned_pos_emb and (not self.attn_config['alibi']):
|
| 118 |
-
raise ValueError(f'Positional information must be provided to the model using either learned_pos_emb or alibi.')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|