Upload folder using huggingface_hub
Browse files- chat_template.jinja +1 -0
- config.json +52 -0
- configuration_megrez_moe.py +201 -0
- model-00001-of-00003.safetensors +3 -0
- model-00002-of-00003.safetensors +3 -0
- model-00003-of-00003.safetensors +3 -0
- model.safetensors.index.json +0 -0
- modeling_megrez_moe2.py +1024 -0
- special_tokens_map.json +16 -0
- tokenizer.json +0 -0
- tokenizer_config.json +236 -0
chat_template.jinja
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|role_start|>system<|role_end|>你是megrez-3x7b-a3b-instruct<|turn_end|>' }}{% endif %}{{ '<|role_start|>' + message['role'] + '<|role_end|>' + message['content'] + '<|turn_end|>' }}{% endfor %}{% if add_generation_prompt %}{{ '<|role_start|>assistant<|role_end|>' }}{% endif %}
|
config.json
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_moe_implementation": "fused",
|
| 3 |
+
"architectures": [
|
| 4 |
+
"MegrezMoeForCausalLM"
|
| 5 |
+
],
|
| 6 |
+
"attention_bias": false,
|
| 7 |
+
"attention_dropout": 0.0,
|
| 8 |
+
"auto_map": {
|
| 9 |
+
"AutoConfig": "configuration_megrez_moe.MegrezMoeConfig",
|
| 10 |
+
"AutoModel": "modeling_megrez_moe.MegrezMoeModel",
|
| 11 |
+
"AutoModelForCausalLM": "modeling_megrez_moe.MegrezMoeForCausalLM"
|
| 12 |
+
},
|
| 13 |
+
"aux_loss_alpha": 0.001,
|
| 14 |
+
"bos_token_id": null,
|
| 15 |
+
"dtype": "bfloat16",
|
| 16 |
+
"eos_token_id": 120005,
|
| 17 |
+
"ep_size": 1,
|
| 18 |
+
"experts_shared_frequency": 3,
|
| 19 |
+
"first_k_dense_replace": 1,
|
| 20 |
+
"hidden_act": "silu",
|
| 21 |
+
"hidden_size": 2048,
|
| 22 |
+
"initializer_range": 0.02,
|
| 23 |
+
"intermediate_size": 10944,
|
| 24 |
+
"max_position_embeddings": 163840,
|
| 25 |
+
"model_type": "megrez_moe",
|
| 26 |
+
"moe_intermediate_size": 1408,
|
| 27 |
+
"moe_layer_freq": 1,
|
| 28 |
+
"n_group": 1,
|
| 29 |
+
"n_routed_experts": 64,
|
| 30 |
+
"n_shared_experts": 4,
|
| 31 |
+
"norm_topk_prob": true,
|
| 32 |
+
"num_attention_heads": 16,
|
| 33 |
+
"num_experts_per_tok": 6,
|
| 34 |
+
"num_hidden_layers": 31,
|
| 35 |
+
"num_key_value_heads": 4,
|
| 36 |
+
"pad_token_id": 120002,
|
| 37 |
+
"pre_gate": true,
|
| 38 |
+
"pretraining_tp": 1,
|
| 39 |
+
"quantize": true,
|
| 40 |
+
"rms_norm_eps": 1e-06,
|
| 41 |
+
"rope_scaling": null,
|
| 42 |
+
"rope_theta": 5000000,
|
| 43 |
+
"routed_scaling_factor": 1.0,
|
| 44 |
+
"scoring_func": "sigmoid",
|
| 45 |
+
"seq_aux": true,
|
| 46 |
+
"tie_word_embeddings": false,
|
| 47 |
+
"topk_group": 1,
|
| 48 |
+
"topk_method": "noaux_tc",
|
| 49 |
+
"transformers_version": "4.56.0",
|
| 50 |
+
"use_cache": true,
|
| 51 |
+
"vocab_size": 122880
|
| 52 |
+
}
|
configuration_megrez_moe.py
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 2 |
+
from transformers.utils import logging
|
| 3 |
+
|
| 4 |
+
logger = logging.get_logger(__name__)
|
| 5 |
+
|
| 6 |
+
MegrezMoe_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
|
| 7 |
+
class MegrezMoeConfig(PretrainedConfig):
|
| 8 |
+
r"""
|
| 9 |
+
This is the configuration class to store the configuration of a [`MegrezMoeModel`]. It is used to instantiate an DeepSeek
|
| 10 |
+
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
| 11 |
+
defaults will yield a similar configuration to that of the DeepSeek-V2.
|
| 12 |
+
|
| 13 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 14 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
Args:
|
| 18 |
+
vocab_size (`int`, *optional*, defaults to 102400):
|
| 19 |
+
Vocabulary size of the Deep model. Defines the number of different tokens that can be represented by the
|
| 20 |
+
`inputs_ids` passed when calling [`MegrezMoeModel`]
|
| 21 |
+
hidden_size (`int`, *optional*, defaults to 4096):
|
| 22 |
+
Dimension of the hidden representations.
|
| 23 |
+
intermediate_size (`int`, *optional*, defaults to 11008):
|
| 24 |
+
Dimension of the MLP representations.
|
| 25 |
+
moe_intermediate_size (`int`, *optional*, defaults to 1407):
|
| 26 |
+
Dimension of the MoE representations.
|
| 27 |
+
num_hidden_layers (`int`, *optional*, defaults to 32):
|
| 28 |
+
Number of hidden layers in the Transformer decoder.
|
| 29 |
+
num_attention_heads (`int`, *optional*, defaults to 32):
|
| 30 |
+
Number of attention heads for each attention layer in the Transformer decoder.
|
| 31 |
+
n_shared_experts (`int`, *optional*, defaults to None):
|
| 32 |
+
Number of shared experts, None means dense model.
|
| 33 |
+
n_routed_experts (`int`, *optional*, defaults to None):
|
| 34 |
+
Number of routed experts, None means dense model.
|
| 35 |
+
routed_scaling_factor (`float`, *optional*, defaults to 1.0):
|
| 36 |
+
Scaling factor or routed experts.
|
| 37 |
+
topk_method (`str`, *optional*, defaults to `gready`):
|
| 38 |
+
Topk method used in routed gate.
|
| 39 |
+
n_group (`int`, *optional*, defaults to None):
|
| 40 |
+
Number of groups for routed experts.
|
| 41 |
+
topk_group (`int`, *optional*, defaults to None):
|
| 42 |
+
Number of selected groups for each token(for each token, ensuring the selected experts is only within `topk_group` groups).
|
| 43 |
+
num_experts_per_tok (`int`, *optional*, defaults to None):
|
| 44 |
+
Number of selected experts, None means dense model.
|
| 45 |
+
moe_layer_freq (`int`, *optional*, defaults to 1):
|
| 46 |
+
The frequency of the MoE layer: one expert layer for every `moe_layer_freq - 1` dense layers.
|
| 47 |
+
first_k_dense_replace (`int`, *optional*, defaults to 0):
|
| 48 |
+
Number of dense layers in shallow layers(embed->dense->dense->...->dense->moe->moe...->lm_head).
|
| 49 |
+
\--k dense layers--/
|
| 50 |
+
norm_topk_prob (`bool`, *optional*, defaults to False):
|
| 51 |
+
Whether to normalize the weights of the routed experts.
|
| 52 |
+
scoring_func (`str`, *optional*, defaults to 'softmax'):
|
| 53 |
+
Method of computing expert weights.
|
| 54 |
+
aux_loss_alpha (`float`, *optional*, defaults to 0.001):
|
| 55 |
+
Auxiliary loss weight coefficient.
|
| 56 |
+
seq_aux = (`bool`, *optional*, defaults to True):
|
| 57 |
+
Whether to compute the auxiliary loss for each individual sample.
|
| 58 |
+
num_key_value_heads (`int`, *optional*):
|
| 59 |
+
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
|
| 60 |
+
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
|
| 61 |
+
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
|
| 62 |
+
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
|
| 63 |
+
by meanpooling all the original heads within that group. For more details checkout [this
|
| 64 |
+
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
|
| 65 |
+
`num_attention_heads`.
|
| 66 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
| 67 |
+
The non-linear activation function (function or string) in the decoder.
|
| 68 |
+
max_position_embeddings (`int`, *optional*, defaults to 2048):
|
| 69 |
+
The maximum sequence length that this model might ever be used with.
|
| 70 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 71 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 72 |
+
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
|
| 73 |
+
The epsilon used by the rms normalization layers.
|
| 74 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
| 75 |
+
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
| 76 |
+
relevant if `config.is_decoder=True`.
|
| 77 |
+
pad_token_id (`int`, *optional*):
|
| 78 |
+
Padding token id.
|
| 79 |
+
bos_token_id (`int`, *optional*, defaults to 1):
|
| 80 |
+
Beginning of stream token id.
|
| 81 |
+
eos_token_id (`int`, *optional*, defaults to 2):
|
| 82 |
+
End of stream token id.
|
| 83 |
+
pretraining_tp (`int`, *optional*, defaults to 1):
|
| 84 |
+
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
|
| 85 |
+
document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
|
| 86 |
+
necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
|
| 87 |
+
issue](https://github.com/pytorch/pytorch/issues/76232).
|
| 88 |
+
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
|
| 89 |
+
Whether to tie weight embeddings
|
| 90 |
+
rope_theta (`float`, *optional*, defaults to 10000.0):
|
| 91 |
+
The base period of the RoPE embeddings.
|
| 92 |
+
rope_scaling (`Dict`, *optional*):
|
| 93 |
+
Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
|
| 94 |
+
strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
|
| 95 |
+
`{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
|
| 96 |
+
`max_position_embeddings` to the expected new maximum.
|
| 97 |
+
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
|
| 98 |
+
Whether to use a bias in the query, key, value and output projection layers during self-attention.
|
| 99 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
| 100 |
+
The dropout ratio for the attention probabilities.
|
| 101 |
+
|
| 102 |
+
```python
|
| 103 |
+
>>> from transformers import MegrezMoeModel, MegrezMoeConfig
|
| 104 |
+
|
| 105 |
+
>>> # Initializing a Deepseek-V2 style configuration
|
| 106 |
+
>>> configuration = MegrezMoeConfig()
|
| 107 |
+
|
| 108 |
+
>>> # Accessing the model configuration
|
| 109 |
+
>>> configuration = model.config
|
| 110 |
+
```"""
|
| 111 |
+
|
| 112 |
+
model_type = "megrez_moe"
|
| 113 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
| 114 |
+
|
| 115 |
+
def __init__(
|
| 116 |
+
self,
|
| 117 |
+
vocab_size=102400,
|
| 118 |
+
hidden_size=4096,
|
| 119 |
+
intermediate_size=11008,
|
| 120 |
+
moe_intermediate_size = 1407,
|
| 121 |
+
num_hidden_layers=30,
|
| 122 |
+
num_attention_heads=32,
|
| 123 |
+
num_key_value_heads=32,
|
| 124 |
+
n_shared_experts = None,
|
| 125 |
+
n_routed_experts = None,
|
| 126 |
+
ep_size = 1,
|
| 127 |
+
routed_scaling_factor = 1.0,
|
| 128 |
+
topk_method = 'gready',
|
| 129 |
+
n_group = None,
|
| 130 |
+
topk_group = None,
|
| 131 |
+
num_experts_per_tok = None,
|
| 132 |
+
moe_layer_freq = 1,
|
| 133 |
+
first_k_dense_replace = 0,
|
| 134 |
+
norm_topk_prob = False,
|
| 135 |
+
scoring_func = 'softmax',
|
| 136 |
+
aux_loss_alpha = 0.001,
|
| 137 |
+
seq_aux = True,
|
| 138 |
+
hidden_act="silu",
|
| 139 |
+
max_position_embeddings=2048,
|
| 140 |
+
initializer_range=0.02,
|
| 141 |
+
rms_norm_eps=1e-6,
|
| 142 |
+
use_cache=True,
|
| 143 |
+
pad_token_id=None,
|
| 144 |
+
bos_token_id=100000,
|
| 145 |
+
eos_token_id=100001,
|
| 146 |
+
pretraining_tp=1,
|
| 147 |
+
tie_word_embeddings=False,
|
| 148 |
+
rope_theta=10000.0,
|
| 149 |
+
rope_scaling=None,
|
| 150 |
+
attention_bias=False,
|
| 151 |
+
attention_dropout=0.0,
|
| 152 |
+
experts_shared_frequency=1,
|
| 153 |
+
pre_gate=False,
|
| 154 |
+
**kwargs,
|
| 155 |
+
):
|
| 156 |
+
self.vocab_size = vocab_size
|
| 157 |
+
self.max_position_embeddings = max_position_embeddings
|
| 158 |
+
self.hidden_size = hidden_size
|
| 159 |
+
self.intermediate_size = intermediate_size
|
| 160 |
+
self.moe_intermediate_size = moe_intermediate_size
|
| 161 |
+
self.num_hidden_layers = num_hidden_layers
|
| 162 |
+
self.num_attention_heads = num_attention_heads
|
| 163 |
+
self.n_shared_experts = n_shared_experts
|
| 164 |
+
self.n_routed_experts = n_routed_experts
|
| 165 |
+
self.ep_size = ep_size
|
| 166 |
+
self.routed_scaling_factor = routed_scaling_factor
|
| 167 |
+
self.topk_method = topk_method
|
| 168 |
+
self.n_group = n_group
|
| 169 |
+
self.topk_group = topk_group
|
| 170 |
+
self.num_experts_per_tok = num_experts_per_tok
|
| 171 |
+
self.moe_layer_freq = moe_layer_freq
|
| 172 |
+
self.first_k_dense_replace = first_k_dense_replace
|
| 173 |
+
self.norm_topk_prob = norm_topk_prob
|
| 174 |
+
self.scoring_func = scoring_func
|
| 175 |
+
self.aux_loss_alpha = aux_loss_alpha
|
| 176 |
+
self.seq_aux = seq_aux
|
| 177 |
+
# for backward compatibility
|
| 178 |
+
if num_key_value_heads is None:
|
| 179 |
+
num_key_value_heads = num_attention_heads
|
| 180 |
+
|
| 181 |
+
self.num_key_value_heads = num_key_value_heads
|
| 182 |
+
self.hidden_act = hidden_act
|
| 183 |
+
self.initializer_range = initializer_range
|
| 184 |
+
self.rms_norm_eps = rms_norm_eps
|
| 185 |
+
self.pretraining_tp = pretraining_tp
|
| 186 |
+
self.use_cache = use_cache
|
| 187 |
+
self.rope_theta = rope_theta
|
| 188 |
+
self.rope_scaling = rope_scaling
|
| 189 |
+
self.attention_bias = attention_bias
|
| 190 |
+
self.attention_dropout = attention_dropout
|
| 191 |
+
|
| 192 |
+
self.experts_shared_frequency = experts_shared_frequency
|
| 193 |
+
self.pre_gate = pre_gate
|
| 194 |
+
|
| 195 |
+
super().__init__(
|
| 196 |
+
pad_token_id=pad_token_id,
|
| 197 |
+
bos_token_id=bos_token_id,
|
| 198 |
+
eos_token_id=eos_token_id,
|
| 199 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 200 |
+
**kwargs,
|
| 201 |
+
)
|
model-00001-of-00003.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bfd6d27b9fd4b394927245f2377ba309bb78711eeaf114e16b44a49aaf689846
|
| 3 |
+
size 4994539264
|
model-00002-of-00003.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4725776f68e6468a1ecb28f2f7fafd6bce3d32f9c4ae4c449d2e002b941e399d
|
| 3 |
+
size 4995331048
|
model-00003-of-00003.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dbc8e7cb07a1e3478e38bc84c6473109355a9b3fbfe36e185c224ee4d1cafb1d
|
| 3 |
+
size 4958908888
|
model.safetensors.index.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
modeling_megrez_moe2.py
ADDED
|
@@ -0,0 +1,1024 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2025 Infini-AI and The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
| 5 |
+
# and OPT implementations in this library. It has been modified from its
|
| 6 |
+
# original forms to accommodate minor architectural differences compared
|
| 7 |
+
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
|
| 8 |
+
#
|
| 9 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 10 |
+
# you may not use this file except in compliance with the License.
|
| 11 |
+
# You may obtain a copy of the License at
|
| 12 |
+
#
|
| 13 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 14 |
+
#
|
| 15 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 16 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 17 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 18 |
+
# See the License for the specific language governing permissions and
|
| 19 |
+
# limitations under the License.
|
| 20 |
+
"""PyTorch Megrez model."""
|
| 21 |
+
import math
|
| 22 |
+
import warnings
|
| 23 |
+
from typing import List, Optional, Tuple, Union
|
| 24 |
+
|
| 25 |
+
import numpy as np
|
| 26 |
+
import torch
|
| 27 |
+
import torch.distributed as dist
|
| 28 |
+
import torch.nn.functional as F
|
| 29 |
+
from torch import nn
|
| 30 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
| 31 |
+
from transformers.activations import ACT2FN
|
| 32 |
+
from transformers.cache_utils import Cache, DynamicCache
|
| 33 |
+
from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
|
| 34 |
+
from transformers.modeling_outputs import (BaseModelOutputWithPast, CausalLMOutputWithPast,
|
| 35 |
+
SequenceClassifierOutputWithPast)
|
| 36 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 37 |
+
from transformers.models.llama.modeling_llama import LlamaAttention, LlamaRotaryEmbedding
|
| 38 |
+
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS, is_torch_greater_or_equal_than_1_13
|
| 39 |
+
from transformers.utils import (add_start_docstrings, add_start_docstrings_to_model_forward, logging,
|
| 40 |
+
replace_return_docstrings)
|
| 41 |
+
from transformers.utils.import_utils import is_torch_fx_available
|
| 42 |
+
|
| 43 |
+
from .configuration_megrez_moe import MegrezMoeConfig
|
| 44 |
+
|
| 45 |
+
# This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph.
|
| 46 |
+
# It means that the function will not be traced through and simply appear as a node in the graph.
|
| 47 |
+
if is_torch_fx_available():
|
| 48 |
+
if not is_torch_greater_or_equal_than_1_13:
|
| 49 |
+
import torch.fx
|
| 50 |
+
|
| 51 |
+
_prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
logger = logging.get_logger(__name__)
|
| 55 |
+
|
| 56 |
+
_CONFIG_FOR_DOC = "MegrezMoeConfig"
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class MegrezMoeRMSNorm(nn.Module):
|
| 60 |
+
def __init__(self, hidden_size, eps=1e-6):
|
| 61 |
+
"""
|
| 62 |
+
MegrezMoeRMSNorm is equivalent to T5LayerNorm
|
| 63 |
+
"""
|
| 64 |
+
super().__init__()
|
| 65 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
| 66 |
+
self.variance_epsilon = eps
|
| 67 |
+
|
| 68 |
+
def forward(self, hidden_states):
|
| 69 |
+
input_dtype = hidden_states.dtype
|
| 70 |
+
hidden_states = hidden_states.to(torch.float32)
|
| 71 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
| 72 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
| 73 |
+
return self.weight * hidden_states.to(input_dtype)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
ALL_LAYERNORM_LAYERS.append(MegrezMoeRMSNorm)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
class MegrezMoeMLP(nn.Module):
|
| 80 |
+
def __init__(self, config, hidden_size=None, intermediate_size=None):
|
| 81 |
+
super().__init__()
|
| 82 |
+
self.config = config
|
| 83 |
+
self.hidden_size = config.hidden_size if hidden_size is None else hidden_size
|
| 84 |
+
self.intermediate_size = config.intermediate_size if intermediate_size is None else intermediate_size
|
| 85 |
+
|
| 86 |
+
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
| 87 |
+
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
| 88 |
+
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
| 89 |
+
self.act_fn = ACT2FN[config.hidden_act]
|
| 90 |
+
|
| 91 |
+
def forward(self, x):
|
| 92 |
+
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
|
| 93 |
+
return down_proj
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
class MoEGate(nn.Module):
|
| 97 |
+
def __init__(self, config):
|
| 98 |
+
super().__init__()
|
| 99 |
+
self.config = config
|
| 100 |
+
self.top_k = config.num_experts_per_tok
|
| 101 |
+
self.n_routed_experts = config.n_routed_experts
|
| 102 |
+
self.routed_scaling_factor = config.routed_scaling_factor
|
| 103 |
+
self.scoring_func = config.scoring_func
|
| 104 |
+
self.topk_method = config.topk_method
|
| 105 |
+
self.n_group = config.n_group
|
| 106 |
+
self.topk_group = config.topk_group
|
| 107 |
+
|
| 108 |
+
# topk selection algorithm
|
| 109 |
+
self.norm_topk_prob = config.norm_topk_prob
|
| 110 |
+
self.gating_dim = config.hidden_size
|
| 111 |
+
self.weight = nn.Parameter(torch.empty((self.n_routed_experts, self.gating_dim)))
|
| 112 |
+
if self.topk_method == "noaux_tc":
|
| 113 |
+
self.e_score_correction_bias = nn.Parameter(
|
| 114 |
+
torch.empty((self.n_routed_experts))
|
| 115 |
+
)
|
| 116 |
+
self.reset_parameters()
|
| 117 |
+
|
| 118 |
+
def reset_parameters(self) -> None:
|
| 119 |
+
import torch.nn.init as init
|
| 120 |
+
|
| 121 |
+
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
|
| 122 |
+
|
| 123 |
+
def forward(self, hidden_states):
|
| 124 |
+
bsz, seq_len, h = hidden_states.shape
|
| 125 |
+
### compute gating score
|
| 126 |
+
hidden_states = hidden_states.view(-1, h)
|
| 127 |
+
logits = F.linear(
|
| 128 |
+
hidden_states.type(torch.float32), self.weight.type(torch.float32), None
|
| 129 |
+
)
|
| 130 |
+
if self.scoring_func == "sigmoid":
|
| 131 |
+
scores = logits.sigmoid()
|
| 132 |
+
else:
|
| 133 |
+
raise NotImplementedError(
|
| 134 |
+
f"insupportable scoring function for MoE gating: {self.scoring_func}"
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
### select top-k experts
|
| 138 |
+
if self.topk_method == "noaux_tc":
|
| 139 |
+
assert not self.training
|
| 140 |
+
scores_for_choice = scores.view(bsz * seq_len, -1) + self.e_score_correction_bias.unsqueeze(0)
|
| 141 |
+
group_scores = (
|
| 142 |
+
scores_for_choice.view(bsz * seq_len, self.n_group, -1).topk(2, dim=-1)[0].sum(dim = -1)
|
| 143 |
+
) # [n, n_group]
|
| 144 |
+
group_idx = torch.topk(
|
| 145 |
+
group_scores, k=self.topk_group, dim=-1, sorted=False
|
| 146 |
+
)[
|
| 147 |
+
1
|
| 148 |
+
] # [n, top_k_group]
|
| 149 |
+
group_mask = torch.zeros_like(group_scores) # [n, n_group]
|
| 150 |
+
group_mask.scatter_(1, group_idx, 1) # [n, n_group]
|
| 151 |
+
score_mask = (
|
| 152 |
+
group_mask.unsqueeze(-1)
|
| 153 |
+
.expand(
|
| 154 |
+
bsz * seq_len, self.n_group, self.n_routed_experts // self.n_group
|
| 155 |
+
)
|
| 156 |
+
.reshape(bsz * seq_len, -1)
|
| 157 |
+
) # [n, e]
|
| 158 |
+
tmp_scores = scores_for_choice.masked_fill(~score_mask.bool(), float("-inf")) # [n, e]
|
| 159 |
+
_, topk_idx = torch.topk(
|
| 160 |
+
tmp_scores, k=self.top_k, dim=-1, sorted=False
|
| 161 |
+
)
|
| 162 |
+
topk_weight = scores.gather(1, topk_idx)
|
| 163 |
+
else:
|
| 164 |
+
raise NotImplementedError(
|
| 165 |
+
f"insupportable TopK function for MoE gating: {self.topk_method}"
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
### norm gate to sum 1
|
| 169 |
+
if self.top_k > 1 and self.norm_topk_prob:
|
| 170 |
+
denominator = topk_weight.sum(dim=-1, keepdim=True) + 1e-20
|
| 171 |
+
topk_weight = topk_weight / denominator
|
| 172 |
+
topk_weight = topk_weight * self.routed_scaling_factor # must multiply the scaling factor
|
| 173 |
+
|
| 174 |
+
return topk_idx, topk_weight
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
class MegrezMoeMoE(nn.Module):
|
| 178 |
+
"""
|
| 179 |
+
A mixed expert module containing shared experts.
|
| 180 |
+
"""
|
| 181 |
+
|
| 182 |
+
def __init__(self, config, layer_number, init_experts: bool = True):
|
| 183 |
+
super().__init__()
|
| 184 |
+
self.layer_number = layer_number
|
| 185 |
+
self.config = config
|
| 186 |
+
self.num_experts_per_tok = config.num_experts_per_tok
|
| 187 |
+
|
| 188 |
+
if hasattr(config, "ep_size") and config.ep_size > 1:
|
| 189 |
+
assert config.ep_size == dist.get_world_size()
|
| 190 |
+
self.ep_size = config.ep_size
|
| 191 |
+
self.experts_per_rank = config.n_routed_experts // config.ep_size
|
| 192 |
+
self.ep_rank = dist.get_rank()
|
| 193 |
+
if init_experts:
|
| 194 |
+
self.experts = nn.ModuleList(
|
| 195 |
+
[
|
| 196 |
+
(
|
| 197 |
+
MegrezMoeMLP(config, intermediate_size=config.moe_intermediate_size)
|
| 198 |
+
if i >= self.ep_rank * self.experts_per_rank
|
| 199 |
+
and i < (self.ep_rank + 1) * self.experts_per_rank
|
| 200 |
+
else None
|
| 201 |
+
)
|
| 202 |
+
for i in range(config.n_routed_experts)
|
| 203 |
+
]
|
| 204 |
+
)
|
| 205 |
+
else:
|
| 206 |
+
self.experts = None
|
| 207 |
+
else:
|
| 208 |
+
self.ep_size = 1
|
| 209 |
+
self.experts_per_rank = config.n_routed_experts
|
| 210 |
+
self.ep_rank = 0
|
| 211 |
+
if init_experts:
|
| 212 |
+
self.experts = nn.ModuleList(
|
| 213 |
+
[
|
| 214 |
+
MegrezMoeMLP(config, intermediate_size=config.moe_intermediate_size)
|
| 215 |
+
for i in range(config.n_routed_experts)
|
| 216 |
+
]
|
| 217 |
+
)
|
| 218 |
+
else:
|
| 219 |
+
self.experts = None
|
| 220 |
+
|
| 221 |
+
self.gate = MoEGate(config)
|
| 222 |
+
if config.n_shared_experts is not None:
|
| 223 |
+
intermediate_size = config.moe_intermediate_size * config.n_shared_experts
|
| 224 |
+
self.shared_experts = MegrezMoeMLP(config=config, intermediate_size=intermediate_size)
|
| 225 |
+
|
| 226 |
+
def set_experts(self, experts):
|
| 227 |
+
self.experts = experts
|
| 228 |
+
|
| 229 |
+
def forward(self, hidden_states, pre_gate_hidden_states=None):
|
| 230 |
+
identity = hidden_states
|
| 231 |
+
orig_shape = hidden_states.shape
|
| 232 |
+
if pre_gate_hidden_states is not None:
|
| 233 |
+
topk_idx, topk_weight = self.gate(pre_gate_hidden_states)
|
| 234 |
+
else:
|
| 235 |
+
topk_idx, topk_weight = self.gate(hidden_states)
|
| 236 |
+
hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
|
| 237 |
+
flat_topk_idx = topk_idx.view(-1)
|
| 238 |
+
if self.training:
|
| 239 |
+
hidden_states = hidden_states.repeat_interleave(self.num_experts_per_tok, dim=0)
|
| 240 |
+
y = torch.empty_like(hidden_states)
|
| 241 |
+
for i, expert in enumerate(self.experts):
|
| 242 |
+
y[flat_topk_idx == i] = expert(hidden_states[flat_topk_idx == i])
|
| 243 |
+
y = (y.view(*topk_weight.shape, -1) * topk_weight.unsqueeze(-1)).sum(dim=1)
|
| 244 |
+
y = y.to(hidden_states.dtype).view(*orig_shape)
|
| 245 |
+
else:
|
| 246 |
+
y = self.moe_infer(hidden_states, topk_idx, topk_weight).view(*orig_shape)
|
| 247 |
+
if self.config.n_shared_experts is not None:
|
| 248 |
+
shared_out = self.shared_experts(identity)
|
| 249 |
+
y = y + shared_out
|
| 250 |
+
# y = y + self.shared_experts(identity)
|
| 251 |
+
return y
|
| 252 |
+
|
| 253 |
+
@torch.no_grad()
|
| 254 |
+
def moe_infer(self, x, topk_ids, topk_weight):
|
| 255 |
+
cnts = topk_ids.new_zeros((topk_ids.shape[0], len(self.experts)))
|
| 256 |
+
cnts.scatter_(1, topk_ids, 1)
|
| 257 |
+
tokens_per_expert = cnts.sum(dim=0)
|
| 258 |
+
idxs = topk_ids.view(-1).argsort()
|
| 259 |
+
sorted_tokens = x[idxs // topk_ids.shape[1]]
|
| 260 |
+
sorted_tokens_shape = sorted_tokens.shape
|
| 261 |
+
if self.ep_size > 1:
|
| 262 |
+
tokens_per_ep_rank = tokens_per_expert.view(self.ep_size, -1).sum(dim=1)
|
| 263 |
+
tokens_per_expert_group = tokens_per_expert.new_empty(tokens_per_expert.shape[0])
|
| 264 |
+
dist.all_to_all_single(tokens_per_expert_group, tokens_per_expert)
|
| 265 |
+
output_splits = tokens_per_expert_group.view(self.ep_size, -1).sum(1).cpu().numpy().tolist()
|
| 266 |
+
gathered_tokens = sorted_tokens.new_empty(
|
| 267 |
+
tokens_per_expert_group.sum(dim=0).cpu().item(), sorted_tokens.shape[1]
|
| 268 |
+
)
|
| 269 |
+
input_split_sizes = tokens_per_ep_rank.cpu().numpy().tolist()
|
| 270 |
+
dist.all_to_all(
|
| 271 |
+
list(gathered_tokens.split(output_splits)),
|
| 272 |
+
list(sorted_tokens.split(input_split_sizes)),
|
| 273 |
+
)
|
| 274 |
+
tokens_per_expert_post_gather = tokens_per_expert_group.view(self.ep_size, self.experts_per_rank).sum(dim=0)
|
| 275 |
+
gatherd_idxs = np.zeros(shape=(gathered_tokens.shape[0],), dtype=np.int32)
|
| 276 |
+
s = 0
|
| 277 |
+
for i, k in enumerate(tokens_per_expert_group.cpu().numpy()):
|
| 278 |
+
gatherd_idxs[s : s + k] = i % self.experts_per_rank
|
| 279 |
+
s += k
|
| 280 |
+
gatherd_idxs = gatherd_idxs.argsort()
|
| 281 |
+
sorted_tokens = gathered_tokens[gatherd_idxs]
|
| 282 |
+
tokens_per_expert = tokens_per_expert_post_gather
|
| 283 |
+
tokens_per_expert = tokens_per_expert.cpu().numpy()
|
| 284 |
+
|
| 285 |
+
outputs = []
|
| 286 |
+
start_idx = 0
|
| 287 |
+
for i, num_tokens in enumerate(tokens_per_expert):
|
| 288 |
+
end_idx = start_idx + num_tokens
|
| 289 |
+
if num_tokens == 0:
|
| 290 |
+
continue
|
| 291 |
+
expert = self.experts[i + self.ep_rank * self.experts_per_rank]
|
| 292 |
+
tokens_for_this_expert = sorted_tokens[start_idx:end_idx]
|
| 293 |
+
expert_out = expert(tokens_for_this_expert)
|
| 294 |
+
outputs.append(expert_out)
|
| 295 |
+
start_idx = end_idx
|
| 296 |
+
|
| 297 |
+
outs = torch.cat(outputs, dim=0) if len(outputs) else sorted_tokens.new_empty(0)
|
| 298 |
+
if self.ep_size > 1:
|
| 299 |
+
new_x = torch.empty_like(outs)
|
| 300 |
+
new_x[gatherd_idxs] = outs
|
| 301 |
+
gathered_tokens = new_x.new_empty(*sorted_tokens_shape)
|
| 302 |
+
dist.all_to_all(
|
| 303 |
+
list(gathered_tokens.split(input_split_sizes)),
|
| 304 |
+
list(new_x.split(output_splits)),
|
| 305 |
+
)
|
| 306 |
+
outs = gathered_tokens
|
| 307 |
+
|
| 308 |
+
new_x = torch.empty_like(outs)
|
| 309 |
+
new_x[idxs] = outs
|
| 310 |
+
final_out = (
|
| 311 |
+
new_x.view(*topk_ids.shape, -1)
|
| 312 |
+
.type(topk_weight.dtype)
|
| 313 |
+
.mul_(topk_weight.unsqueeze(dim=-1))
|
| 314 |
+
.sum(dim=1)
|
| 315 |
+
.type(new_x.dtype)
|
| 316 |
+
)
|
| 317 |
+
return final_out
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
# Copied from transformers.models.llama.modeling_llama.repeat_kv
|
| 321 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
| 322 |
+
"""
|
| 323 |
+
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
| 324 |
+
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
| 325 |
+
"""
|
| 326 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
| 327 |
+
if n_rep == 1:
|
| 328 |
+
return hidden_states
|
| 329 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
| 330 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
class MegrezMoeDecoderLayer(nn.Module):
|
| 334 |
+
def __init__(self, config: MegrezMoeConfig, layer_idx: int):
|
| 335 |
+
super().__init__()
|
| 336 |
+
self.config = config
|
| 337 |
+
self.layer_number = layer_idx
|
| 338 |
+
|
| 339 |
+
self.experts_shared = (
|
| 340 |
+
config.experts_shared_frequency is not None and layer_idx >= self.config.first_k_dense_replace
|
| 341 |
+
)
|
| 342 |
+
|
| 343 |
+
self.pre_gate = config.pre_gate
|
| 344 |
+
|
| 345 |
+
self.hidden_size = config.hidden_size
|
| 346 |
+
|
| 347 |
+
is_moe = (
|
| 348 |
+
config.n_routed_experts is not None
|
| 349 |
+
and layer_idx >= config.first_k_dense_replace
|
| 350 |
+
and layer_idx % config.moe_layer_freq == 0
|
| 351 |
+
)
|
| 352 |
+
|
| 353 |
+
init_experts = (layer_idx - config.first_k_dense_replace) % config.experts_shared_frequency == 0
|
| 354 |
+
self.self_attn = LlamaAttention(config=config, layer_idx=layer_idx)
|
| 355 |
+
self.mlp = MegrezMoeMoE(config, layer_idx, init_experts) if is_moe else MegrezMoeMLP(config)
|
| 356 |
+
self.input_layernorm = MegrezMoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 357 |
+
self.post_attention_layernorm = MegrezMoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 358 |
+
|
| 359 |
+
def forward(
|
| 360 |
+
self,
|
| 361 |
+
hidden_states: torch.Tensor,
|
| 362 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 363 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 364 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
| 365 |
+
output_attentions: Optional[bool] = False,
|
| 366 |
+
use_cache: Optional[bool] = False,
|
| 367 |
+
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
|
| 368 |
+
**kwargs,
|
| 369 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 370 |
+
"""
|
| 371 |
+
Args:
|
| 372 |
+
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
| 373 |
+
attention_mask (`torch.FloatTensor`, *optional*):
|
| 374 |
+
attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
|
| 375 |
+
query_sequence_length, key_sequence_length)` if default attention is used.
|
| 376 |
+
output_attentions (`bool`, *optional*):
|
| 377 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
| 378 |
+
returned tensors for more detail.
|
| 379 |
+
use_cache (`bool`, *optional*):
|
| 380 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
| 381 |
+
(see `past_key_values`).
|
| 382 |
+
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
|
| 383 |
+
"""
|
| 384 |
+
|
| 385 |
+
if self.pre_gate and self.layer_number >= self.config.first_k_dense_replace:
|
| 386 |
+
hidden_states = torch.split(hidden_states, hidden_states.shape[0] // 2, dim=0)
|
| 387 |
+
pre_gate_hidden_states = hidden_states[0]
|
| 388 |
+
hidden_states = hidden_states[1]
|
| 389 |
+
else:
|
| 390 |
+
pre_gate_hidden_states = None
|
| 391 |
+
|
| 392 |
+
if "padding_mask" in kwargs:
|
| 393 |
+
warnings.warn(
|
| 394 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
|
| 395 |
+
)
|
| 396 |
+
|
| 397 |
+
residual = hidden_states
|
| 398 |
+
hidden_states = self.input_layernorm(hidden_states)
|
| 399 |
+
|
| 400 |
+
# Self Attention
|
| 401 |
+
hidden_states, self_attn_weights = self.self_attn(
|
| 402 |
+
hidden_states=hidden_states,
|
| 403 |
+
attention_mask=attention_mask,
|
| 404 |
+
position_ids=position_ids,
|
| 405 |
+
past_key_value=past_key_value,
|
| 406 |
+
output_attentions=output_attentions,
|
| 407 |
+
use_cache=use_cache,
|
| 408 |
+
position_embeddings=position_embeddings,
|
| 409 |
+
**kwargs,
|
| 410 |
+
)
|
| 411 |
+
hidden_states = residual + hidden_states
|
| 412 |
+
|
| 413 |
+
# Fully Connected
|
| 414 |
+
residual = hidden_states
|
| 415 |
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
| 416 |
+
post_attention_layernorm_hidden_states = hidden_states
|
| 417 |
+
if isinstance(self.mlp, MegrezMoeMoE):
|
| 418 |
+
hidden_states = self.mlp(hidden_states, pre_gate_hidden_states=pre_gate_hidden_states)
|
| 419 |
+
else:
|
| 420 |
+
hidden_states = self.mlp(hidden_states)
|
| 421 |
+
hidden_states = residual + hidden_states
|
| 422 |
+
pre_gate_hidden_states = post_attention_layernorm_hidden_states
|
| 423 |
+
|
| 424 |
+
if self.pre_gate and self.layer_number < self.config.num_hidden_layers - 1:
|
| 425 |
+
hidden_states = torch.cat([pre_gate_hidden_states, hidden_states], dim=0)
|
| 426 |
+
|
| 427 |
+
outputs = (hidden_states,)
|
| 428 |
+
|
| 429 |
+
if output_attentions:
|
| 430 |
+
outputs += (self_attn_weights,)
|
| 431 |
+
|
| 432 |
+
return outputs
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
MegrezMoe_START_DOCSTRING = r"""
|
| 436 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 437 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| 438 |
+
etc.)
|
| 439 |
+
|
| 440 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
| 441 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
| 442 |
+
and behavior.
|
| 443 |
+
|
| 444 |
+
Parameters:
|
| 445 |
+
config ([`MegrezMoeConfig`]):
|
| 446 |
+
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
| 447 |
+
load the weights associated with the model, only the configuration. Check out the
|
| 448 |
+
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 449 |
+
"""
|
| 450 |
+
|
| 451 |
+
|
| 452 |
+
@add_start_docstrings(
|
| 453 |
+
"The bare MegrezMoe Model outputting raw hidden-states without any specific head on top.",
|
| 454 |
+
MegrezMoe_START_DOCSTRING,
|
| 455 |
+
)
|
| 456 |
+
class MegrezMoePreTrainedModel(PreTrainedModel):
|
| 457 |
+
config_class = MegrezMoeConfig
|
| 458 |
+
base_model_prefix = "model"
|
| 459 |
+
supports_gradient_checkpointing = True
|
| 460 |
+
_no_split_modules = ["MegrezMoeDecoderLayer"]
|
| 461 |
+
_skip_keys_device_placement = "past_key_values"
|
| 462 |
+
_supports_flash_attn_2 = True
|
| 463 |
+
_supports_cache_class = True
|
| 464 |
+
|
| 465 |
+
def _init_weights(self, module):
|
| 466 |
+
std = self.config.initializer_range
|
| 467 |
+
if isinstance(module, nn.Linear):
|
| 468 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 469 |
+
if module.bias is not None:
|
| 470 |
+
module.bias.data.zero_()
|
| 471 |
+
elif isinstance(module, nn.Embedding):
|
| 472 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 473 |
+
if module.padding_idx is not None:
|
| 474 |
+
module.weight.data[module.padding_idx].zero_()
|
| 475 |
+
|
| 476 |
+
|
| 477 |
+
MegrezMoe_INPUTS_DOCSTRING = r"""
|
| 478 |
+
Args:
|
| 479 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
| 480 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
| 481 |
+
it.
|
| 482 |
+
|
| 483 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 484 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 485 |
+
|
| 486 |
+
[What are input IDs?](../glossary#input-ids)
|
| 487 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 488 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 489 |
+
|
| 490 |
+
- 1 for tokens that are **not masked**,
|
| 491 |
+
- 0 for tokens that are **masked**.
|
| 492 |
+
|
| 493 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 494 |
+
|
| 495 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 496 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 497 |
+
|
| 498 |
+
If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
|
| 499 |
+
`past_key_values`).
|
| 500 |
+
|
| 501 |
+
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
|
| 502 |
+
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
|
| 503 |
+
information on the default strategy.
|
| 504 |
+
|
| 505 |
+
- 1 indicates the head is **not masked**,
|
| 506 |
+
- 0 indicates the head is **masked**.
|
| 507 |
+
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 508 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| 509 |
+
config.n_positions - 1]`.
|
| 510 |
+
|
| 511 |
+
[What are position IDs?](../glossary#position-ids)
|
| 512 |
+
past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
|
| 513 |
+
Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
| 514 |
+
blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
|
| 515 |
+
returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
|
| 516 |
+
|
| 517 |
+
Two formats are allowed:
|
| 518 |
+
- a [`~cache_utils.Cache`] instance;
|
| 519 |
+
- Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
|
| 520 |
+
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
|
| 521 |
+
cache format.
|
| 522 |
+
|
| 523 |
+
The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
|
| 524 |
+
legacy cache format will be returned.
|
| 525 |
+
|
| 526 |
+
If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
|
| 527 |
+
have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
|
| 528 |
+
of shape `(batch_size, sequence_length)`.
|
| 529 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
| 530 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
| 531 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
| 532 |
+
model's internal embedding lookup matrix.
|
| 533 |
+
use_cache (`bool`, *optional*):
|
| 534 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
| 535 |
+
`past_key_values`).
|
| 536 |
+
output_attentions (`bool`, *optional*):
|
| 537 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 538 |
+
tensors for more detail.
|
| 539 |
+
output_hidden_states (`bool`, *optional*):
|
| 540 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 541 |
+
more detail.
|
| 542 |
+
return_dict (`bool`, *optional*):
|
| 543 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 544 |
+
"""
|
| 545 |
+
|
| 546 |
+
|
| 547 |
+
@add_start_docstrings(
|
| 548 |
+
"The bare MegrezMoe Model outputting raw hidden-states without any specific head on top.",
|
| 549 |
+
MegrezMoe_START_DOCSTRING,
|
| 550 |
+
)
|
| 551 |
+
class MegrezMoeModel(MegrezMoePreTrainedModel):
|
| 552 |
+
"""
|
| 553 |
+
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MegrezMoeDecoderLayer`]
|
| 554 |
+
|
| 555 |
+
Args:
|
| 556 |
+
config: MegrezMoeConfig
|
| 557 |
+
"""
|
| 558 |
+
|
| 559 |
+
def __init__(self, config: MegrezMoeConfig):
|
| 560 |
+
super().__init__(config)
|
| 561 |
+
self.padding_idx = config.pad_token_id
|
| 562 |
+
self.vocab_size = config.vocab_size
|
| 563 |
+
|
| 564 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 565 |
+
self.rotary_emb = LlamaRotaryEmbedding(config=config)
|
| 566 |
+
self.layers = nn.ModuleList(
|
| 567 |
+
[MegrezMoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
| 568 |
+
)
|
| 569 |
+
self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
|
| 570 |
+
self.norm = MegrezMoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 571 |
+
|
| 572 |
+
self.gradient_checkpointing = False
|
| 573 |
+
# Initialize weights and apply final processing
|
| 574 |
+
self.post_init()
|
| 575 |
+
|
| 576 |
+
def get_input_embeddings(self):
|
| 577 |
+
return self.embed_tokens
|
| 578 |
+
|
| 579 |
+
def set_input_embeddings(self, value):
|
| 580 |
+
self.embed_tokens = value
|
| 581 |
+
|
| 582 |
+
@add_start_docstrings_to_model_forward(MegrezMoe_INPUTS_DOCSTRING)
|
| 583 |
+
def forward(
|
| 584 |
+
self,
|
| 585 |
+
input_ids: torch.LongTensor = None,
|
| 586 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 587 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 588 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 589 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 590 |
+
use_cache: Optional[bool] = None,
|
| 591 |
+
output_attentions: Optional[bool] = None,
|
| 592 |
+
output_hidden_states: Optional[bool] = None,
|
| 593 |
+
**flash_attn_kwargs,
|
| 594 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
| 595 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 596 |
+
output_hidden_states = (
|
| 597 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 598 |
+
)
|
| 599 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 600 |
+
|
| 601 |
+
# retrieve input_ids and inputs_embeds
|
| 602 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 603 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 604 |
+
elif input_ids is not None:
|
| 605 |
+
batch_size, seq_length = input_ids.shape[:2]
|
| 606 |
+
elif inputs_embeds is not None:
|
| 607 |
+
batch_size, seq_length = inputs_embeds.shape[:2]
|
| 608 |
+
else:
|
| 609 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 610 |
+
|
| 611 |
+
if self.gradient_checkpointing and self.training:
|
| 612 |
+
if use_cache:
|
| 613 |
+
logger.warning_once(
|
| 614 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`transformers."
|
| 615 |
+
)
|
| 616 |
+
use_cache = False
|
| 617 |
+
|
| 618 |
+
past_key_values_length = 0
|
| 619 |
+
if use_cache:
|
| 620 |
+
use_legacy_cache = not isinstance(past_key_values, Cache)
|
| 621 |
+
if use_legacy_cache:
|
| 622 |
+
past_key_values = DynamicCache.from_legacy_cache(past_key_values)
|
| 623 |
+
#past_key_values_length = past_key_values.get_usable_length(seq_length)
|
| 624 |
+
past_key_values_length = past_key_values.get_seq_length(seq_length)
|
| 625 |
+
|
| 626 |
+
if position_ids is None:
|
| 627 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
| 628 |
+
position_ids = torch.arange(
|
| 629 |
+
past_key_values_length,
|
| 630 |
+
seq_length + past_key_values_length,
|
| 631 |
+
dtype=torch.long,
|
| 632 |
+
device=device,
|
| 633 |
+
)
|
| 634 |
+
position_ids = position_ids.unsqueeze(0)
|
| 635 |
+
|
| 636 |
+
if inputs_embeds is None:
|
| 637 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
| 638 |
+
if self._use_flash_attention_2:
|
| 639 |
+
# 2d mask is passed through the layers
|
| 640 |
+
attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
|
| 641 |
+
else:
|
| 642 |
+
# 4d mask is passed through the layers
|
| 643 |
+
attention_mask = _prepare_4d_causal_attention_mask(
|
| 644 |
+
attention_mask,
|
| 645 |
+
(batch_size, seq_length),
|
| 646 |
+
inputs_embeds,
|
| 647 |
+
past_key_values_length,
|
| 648 |
+
)
|
| 649 |
+
|
| 650 |
+
# embed positions
|
| 651 |
+
hidden_states = inputs_embeds
|
| 652 |
+
|
| 653 |
+
# decoder layers
|
| 654 |
+
all_hidden_states = () if output_hidden_states else None
|
| 655 |
+
all_self_attns = () if output_attentions else None
|
| 656 |
+
next_decoder_cache = None
|
| 657 |
+
|
| 658 |
+
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
|
| 659 |
+
for layer_idx, decoder_layer in enumerate(self.layers):
|
| 660 |
+
if output_hidden_states:
|
| 661 |
+
all_hidden_states += (hidden_states,)
|
| 662 |
+
|
| 663 |
+
shared_layer_idx = (
|
| 664 |
+
(layer_idx - self.config.first_k_dense_replace)
|
| 665 |
+
// self.config.experts_shared_frequency
|
| 666 |
+
* self.config.experts_shared_frequency
|
| 667 |
+
+ self.config.first_k_dense_replace
|
| 668 |
+
)
|
| 669 |
+
if layer_idx >= self.config.first_k_dense_replace and shared_layer_idx != layer_idx:
|
| 670 |
+
decoder_layer.mlp.set_experts(self.layers[shared_layer_idx].mlp.experts)
|
| 671 |
+
|
| 672 |
+
if self.gradient_checkpointing and self.training:
|
| 673 |
+
layer_outputs = self._gradient_checkpointing_func(
|
| 674 |
+
decoder_layer.__call__,
|
| 675 |
+
hidden_states,
|
| 676 |
+
attention_mask,
|
| 677 |
+
position_ids,
|
| 678 |
+
past_key_values,
|
| 679 |
+
output_attentions,
|
| 680 |
+
use_cache,
|
| 681 |
+
position_embeddings,
|
| 682 |
+
**flash_attn_kwargs,
|
| 683 |
+
)
|
| 684 |
+
else:
|
| 685 |
+
layer_outputs = decoder_layer(
|
| 686 |
+
hidden_states,
|
| 687 |
+
attention_mask=attention_mask,
|
| 688 |
+
position_ids=position_ids,
|
| 689 |
+
past_key_value=past_key_values,
|
| 690 |
+
output_attentions=output_attentions,
|
| 691 |
+
use_cache=use_cache,
|
| 692 |
+
position_embeddings=position_embeddings,
|
| 693 |
+
**flash_attn_kwargs,
|
| 694 |
+
)
|
| 695 |
+
if layer_idx >= self.config.first_k_dense_replace and shared_layer_idx != layer_idx:
|
| 696 |
+
decoder_layer.mlp.set_experts(None)
|
| 697 |
+
hidden_states = layer_outputs[0]
|
| 698 |
+
|
| 699 |
+
if output_attentions:
|
| 700 |
+
all_self_attns += (layer_outputs[1],)
|
| 701 |
+
|
| 702 |
+
hidden_states = self.norm(hidden_states)
|
| 703 |
+
# add hidden states from the last decoder layer
|
| 704 |
+
if output_hidden_states:
|
| 705 |
+
all_hidden_states += (hidden_states,)
|
| 706 |
+
|
| 707 |
+
return BaseModelOutputWithPast(
|
| 708 |
+
last_hidden_state=hidden_states,
|
| 709 |
+
past_key_values=past_key_values,
|
| 710 |
+
hidden_states=all_hidden_states,
|
| 711 |
+
attentions=all_self_attns,
|
| 712 |
+
)
|
| 713 |
+
|
| 714 |
+
|
| 715 |
+
class MegrezMoeForCausalLM(MegrezMoePreTrainedModel):
|
| 716 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 717 |
+
|
| 718 |
+
def __init__(self, config):
|
| 719 |
+
super().__init__(config)
|
| 720 |
+
self.model = MegrezMoeModel(config)
|
| 721 |
+
self.vocab_size = config.vocab_size
|
| 722 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 723 |
+
|
| 724 |
+
# Initialize weights and apply final processing
|
| 725 |
+
self.post_init()
|
| 726 |
+
|
| 727 |
+
def get_input_embeddings(self):
|
| 728 |
+
return self.model.embed_tokens
|
| 729 |
+
|
| 730 |
+
def set_input_embeddings(self, value):
|
| 731 |
+
self.model.embed_tokens = value
|
| 732 |
+
|
| 733 |
+
def get_output_embeddings(self):
|
| 734 |
+
return self.lm_head
|
| 735 |
+
|
| 736 |
+
def set_output_embeddings(self, new_embeddings):
|
| 737 |
+
self.lm_head = new_embeddings
|
| 738 |
+
|
| 739 |
+
def set_decoder(self, decoder):
|
| 740 |
+
self.model = decoder
|
| 741 |
+
|
| 742 |
+
def get_decoder(self):
|
| 743 |
+
return self.model
|
| 744 |
+
|
| 745 |
+
@add_start_docstrings_to_model_forward(MegrezMoe_INPUTS_DOCSTRING)
|
| 746 |
+
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
| 747 |
+
def forward(
|
| 748 |
+
self,
|
| 749 |
+
input_ids: torch.LongTensor = None,
|
| 750 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 751 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 752 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 753 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 754 |
+
labels: Optional[torch.LongTensor] = None,
|
| 755 |
+
use_cache: Optional[bool] = None,
|
| 756 |
+
output_attentions: Optional[bool] = None,
|
| 757 |
+
output_hidden_states: Optional[bool] = None,
|
| 758 |
+
return_dict: Optional[bool] = None,
|
| 759 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 760 |
+
r"""
|
| 761 |
+
Args:
|
| 762 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 763 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, transformers.,
|
| 764 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
| 765 |
+
(masked), the loss is only computed for the tokens with labels in `[0, transformers., config.vocab_size]`.
|
| 766 |
+
|
| 767 |
+
Returns:
|
| 768 |
+
|
| 769 |
+
Example:
|
| 770 |
+
|
| 771 |
+
```python
|
| 772 |
+
>>> from transformers import AutoTokenizer, MegrezMoeForCausalLM
|
| 773 |
+
|
| 774 |
+
>>> model = MegrezMoeForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
|
| 775 |
+
>>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
|
| 776 |
+
|
| 777 |
+
>>> prompt = "Hey, are you conscious? Can you talk to me?"
|
| 778 |
+
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
| 779 |
+
|
| 780 |
+
>>> # Generate
|
| 781 |
+
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
| 782 |
+
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
| 783 |
+
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
|
| 784 |
+
```"""
|
| 785 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 786 |
+
output_hidden_states = (
|
| 787 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 788 |
+
)
|
| 789 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 790 |
+
|
| 791 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
| 792 |
+
outputs = self.model(
|
| 793 |
+
input_ids=input_ids,
|
| 794 |
+
attention_mask=attention_mask,
|
| 795 |
+
position_ids=position_ids,
|
| 796 |
+
past_key_values=past_key_values,
|
| 797 |
+
inputs_embeds=inputs_embeds,
|
| 798 |
+
use_cache=use_cache,
|
| 799 |
+
output_attentions=output_attentions,
|
| 800 |
+
output_hidden_states=output_hidden_states,
|
| 801 |
+
return_dict=return_dict,
|
| 802 |
+
)
|
| 803 |
+
|
| 804 |
+
hidden_states = outputs[0]
|
| 805 |
+
logits = self.lm_head(hidden_states)
|
| 806 |
+
logits = logits.float()
|
| 807 |
+
|
| 808 |
+
loss = None
|
| 809 |
+
if labels is not None:
|
| 810 |
+
# Shift so that tokens < n predict n
|
| 811 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
| 812 |
+
shift_labels = labels[..., 1:].contiguous()
|
| 813 |
+
# Flatten the tokens
|
| 814 |
+
loss_fct = CrossEntropyLoss()
|
| 815 |
+
shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
| 816 |
+
shift_labels = shift_labels.view(-1)
|
| 817 |
+
# Enable model parallelism
|
| 818 |
+
shift_labels = shift_labels.to(shift_logits.device)
|
| 819 |
+
loss = loss_fct(shift_logits, shift_labels)
|
| 820 |
+
|
| 821 |
+
if not return_dict:
|
| 822 |
+
output = (logits,) + outputs[1:]
|
| 823 |
+
return (loss,) + output if loss is not None else output
|
| 824 |
+
|
| 825 |
+
return CausalLMOutputWithPast(
|
| 826 |
+
loss=loss,
|
| 827 |
+
logits=logits,
|
| 828 |
+
past_key_values=outputs.past_key_values,
|
| 829 |
+
hidden_states=outputs.hidden_states,
|
| 830 |
+
attentions=outputs.attentions,
|
| 831 |
+
)
|
| 832 |
+
|
| 833 |
+
def prepare_inputs_for_generation(
|
| 834 |
+
self,
|
| 835 |
+
input_ids,
|
| 836 |
+
past_key_values=None,
|
| 837 |
+
attention_mask=None,
|
| 838 |
+
inputs_embeds=None,
|
| 839 |
+
**kwargs,
|
| 840 |
+
):
|
| 841 |
+
if past_key_values is not None:
|
| 842 |
+
if isinstance(past_key_values, Cache):
|
| 843 |
+
cache_length = past_key_values.get_seq_length()
|
| 844 |
+
past_length = past_key_values.seen_tokens
|
| 845 |
+
# max_cache_length = past_key_values.get_max_length()
|
| 846 |
+
max_cache_length = past_key_values.get_max_cache_shape()
|
| 847 |
+
else:
|
| 848 |
+
cache_length = past_length = past_key_values[0][0].shape[2]
|
| 849 |
+
max_cache_length = None
|
| 850 |
+
|
| 851 |
+
# Keep only the unprocessed tokens:
|
| 852 |
+
# 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
|
| 853 |
+
# some of the inputs are exclusivelly passed as part of the cache (e.g. when passing input_embeds as
|
| 854 |
+
# input)
|
| 855 |
+
if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
|
| 856 |
+
input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
|
| 857 |
+
# 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
|
| 858 |
+
# input_ids based on the past_length.
|
| 859 |
+
elif past_length < input_ids.shape[1]:
|
| 860 |
+
input_ids = input_ids[:, past_length:]
|
| 861 |
+
# 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
|
| 862 |
+
|
| 863 |
+
# If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
|
| 864 |
+
if (
|
| 865 |
+
max_cache_length is not None
|
| 866 |
+
and attention_mask is not None
|
| 867 |
+
and cache_length + input_ids.shape[1] > max_cache_length
|
| 868 |
+
):
|
| 869 |
+
attention_mask = attention_mask[:, -max_cache_length:]
|
| 870 |
+
|
| 871 |
+
position_ids = kwargs.get("position_ids", None)
|
| 872 |
+
if attention_mask is not None and position_ids is None:
|
| 873 |
+
# create position_ids on the fly for batch generation
|
| 874 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
| 875 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
| 876 |
+
if past_key_values:
|
| 877 |
+
position_ids = position_ids[:, -input_ids.shape[1] :]
|
| 878 |
+
|
| 879 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 880 |
+
if inputs_embeds is not None and past_key_values is None:
|
| 881 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
| 882 |
+
else:
|
| 883 |
+
model_inputs = {"input_ids": input_ids}
|
| 884 |
+
|
| 885 |
+
model_inputs.update(
|
| 886 |
+
{
|
| 887 |
+
"position_ids": position_ids,
|
| 888 |
+
"past_key_values": past_key_values,
|
| 889 |
+
"use_cache": kwargs.get("use_cache"),
|
| 890 |
+
"attention_mask": attention_mask,
|
| 891 |
+
}
|
| 892 |
+
)
|
| 893 |
+
return model_inputs
|
| 894 |
+
|
| 895 |
+
@staticmethod
|
| 896 |
+
def _reorder_cache(past_key_values, beam_idx):
|
| 897 |
+
reordered_past = ()
|
| 898 |
+
for layer_past in past_key_values:
|
| 899 |
+
reordered_past += (
|
| 900 |
+
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
|
| 901 |
+
)
|
| 902 |
+
return reordered_past
|
| 903 |
+
|
| 904 |
+
|
| 905 |
+
@add_start_docstrings(
|
| 906 |
+
"""
|
| 907 |
+
The MegrezMoe Model transformer with a sequence classification head on top (linear layer).
|
| 908 |
+
|
| 909 |
+
[`MegrezMoeForSequenceClassification`] uses the last token in order to do the classification, as other causal models
|
| 910 |
+
(e.g. GPT-2) do.
|
| 911 |
+
|
| 912 |
+
Since it does classification on the last token, it requires to know the position of the last token. If a
|
| 913 |
+
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
| 914 |
+
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
| 915 |
+
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
|
| 916 |
+
each row of the batch).
|
| 917 |
+
""",
|
| 918 |
+
MegrezMoe_START_DOCSTRING,
|
| 919 |
+
)
|
| 920 |
+
class MegrezMoeForSequenceClassification(MegrezMoePreTrainedModel):
|
| 921 |
+
def __init__(self, config):
|
| 922 |
+
super().__init__(config)
|
| 923 |
+
self.num_labels = config.num_labels
|
| 924 |
+
self.model = MegrezMoeModel(config)
|
| 925 |
+
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
|
| 926 |
+
|
| 927 |
+
# Initialize weights and apply final processing
|
| 928 |
+
self.post_init()
|
| 929 |
+
|
| 930 |
+
def get_input_embeddings(self):
|
| 931 |
+
return self.model.embed_tokens
|
| 932 |
+
|
| 933 |
+
def set_input_embeddings(self, value):
|
| 934 |
+
self.model.embed_tokens = value
|
| 935 |
+
|
| 936 |
+
@add_start_docstrings_to_model_forward(MegrezMoe_INPUTS_DOCSTRING)
|
| 937 |
+
def forward(
|
| 938 |
+
self,
|
| 939 |
+
input_ids: torch.LongTensor = None,
|
| 940 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 941 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 942 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 943 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 944 |
+
labels: Optional[torch.LongTensor] = None,
|
| 945 |
+
use_cache: Optional[bool] = None,
|
| 946 |
+
output_attentions: Optional[bool] = None,
|
| 947 |
+
output_hidden_states: Optional[bool] = None,
|
| 948 |
+
return_dict: Optional[bool] = None,
|
| 949 |
+
) -> Union[Tuple, SequenceClassifierOutputWithPast]:
|
| 950 |
+
r"""
|
| 951 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 952 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, transformers.,
|
| 953 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 954 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 955 |
+
"""
|
| 956 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 957 |
+
|
| 958 |
+
transformer_outputs = self.model(
|
| 959 |
+
input_ids,
|
| 960 |
+
attention_mask=attention_mask,
|
| 961 |
+
position_ids=position_ids,
|
| 962 |
+
past_key_values=past_key_values,
|
| 963 |
+
inputs_embeds=inputs_embeds,
|
| 964 |
+
use_cache=use_cache,
|
| 965 |
+
output_attentions=output_attentions,
|
| 966 |
+
output_hidden_states=output_hidden_states,
|
| 967 |
+
return_dict=return_dict,
|
| 968 |
+
)
|
| 969 |
+
hidden_states = transformer_outputs[0]
|
| 970 |
+
logits = self.score(hidden_states)
|
| 971 |
+
|
| 972 |
+
if input_ids is not None:
|
| 973 |
+
batch_size = input_ids.shape[0]
|
| 974 |
+
else:
|
| 975 |
+
batch_size = inputs_embeds.shape[0]
|
| 976 |
+
|
| 977 |
+
if self.config.pad_token_id is None and batch_size != 1:
|
| 978 |
+
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
|
| 979 |
+
if self.config.pad_token_id is None:
|
| 980 |
+
sequence_lengths = -1
|
| 981 |
+
else:
|
| 982 |
+
if input_ids is not None:
|
| 983 |
+
sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to(
|
| 984 |
+
logits.device
|
| 985 |
+
)
|
| 986 |
+
else:
|
| 987 |
+
sequence_lengths = -1
|
| 988 |
+
|
| 989 |
+
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
|
| 990 |
+
|
| 991 |
+
loss = None
|
| 992 |
+
if labels is not None:
|
| 993 |
+
labels = labels.to(logits.device)
|
| 994 |
+
if self.config.problem_type is None:
|
| 995 |
+
if self.num_labels == 1:
|
| 996 |
+
self.config.problem_type = "regression"
|
| 997 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
| 998 |
+
self.config.problem_type = "single_label_classification"
|
| 999 |
+
else:
|
| 1000 |
+
self.config.problem_type = "multi_label_classification"
|
| 1001 |
+
|
| 1002 |
+
if self.config.problem_type == "regression":
|
| 1003 |
+
loss_fct = MSELoss()
|
| 1004 |
+
if self.num_labels == 1:
|
| 1005 |
+
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
|
| 1006 |
+
else:
|
| 1007 |
+
loss = loss_fct(pooled_logits, labels)
|
| 1008 |
+
elif self.config.problem_type == "single_label_classification":
|
| 1009 |
+
loss_fct = CrossEntropyLoss()
|
| 1010 |
+
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
|
| 1011 |
+
elif self.config.problem_type == "multi_label_classification":
|
| 1012 |
+
loss_fct = BCEWithLogitsLoss()
|
| 1013 |
+
loss = loss_fct(pooled_logits, labels)
|
| 1014 |
+
if not return_dict:
|
| 1015 |
+
output = (pooled_logits,) + transformer_outputs[1:]
|
| 1016 |
+
return ((loss,) + output) if loss is not None else output
|
| 1017 |
+
|
| 1018 |
+
return SequenceClassifierOutputWithPast(
|
| 1019 |
+
loss=loss,
|
| 1020 |
+
logits=pooled_logits,
|
| 1021 |
+
past_key_values=transformer_outputs.past_key_values,
|
| 1022 |
+
hidden_states=transformer_outputs.hidden_states,
|
| 1023 |
+
attentions=transformer_outputs.attentions,
|
| 1024 |
+
)
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"eos_token": {
|
| 3 |
+
"content": "<|turn_end|>",
|
| 4 |
+
"lstrip": false,
|
| 5 |
+
"normalized": false,
|
| 6 |
+
"rstrip": false,
|
| 7 |
+
"single_word": false
|
| 8 |
+
},
|
| 9 |
+
"pad_token": {
|
| 10 |
+
"content": "<|pad|>",
|
| 11 |
+
"lstrip": false,
|
| 12 |
+
"normalized": false,
|
| 13 |
+
"rstrip": false,
|
| 14 |
+
"single_word": false
|
| 15 |
+
}
|
| 16 |
+
}
|
tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,236 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_bos_token": false,
|
| 3 |
+
"added_tokens_decoder": {
|
| 4 |
+
"120000": {
|
| 5 |
+
"content": "<|eos|>",
|
| 6 |
+
"lstrip": false,
|
| 7 |
+
"normalized": false,
|
| 8 |
+
"rstrip": false,
|
| 9 |
+
"single_word": false,
|
| 10 |
+
"special": true
|
| 11 |
+
},
|
| 12 |
+
"120001": {
|
| 13 |
+
"content": "<|unk|>",
|
| 14 |
+
"lstrip": false,
|
| 15 |
+
"normalized": false,
|
| 16 |
+
"rstrip": false,
|
| 17 |
+
"single_word": false,
|
| 18 |
+
"special": true
|
| 19 |
+
},
|
| 20 |
+
"120002": {
|
| 21 |
+
"content": "<|pad|>",
|
| 22 |
+
"lstrip": false,
|
| 23 |
+
"normalized": false,
|
| 24 |
+
"rstrip": false,
|
| 25 |
+
"single_word": false,
|
| 26 |
+
"special": true
|
| 27 |
+
},
|
| 28 |
+
"120003": {
|
| 29 |
+
"content": "<|role_start|>",
|
| 30 |
+
"lstrip": false,
|
| 31 |
+
"normalized": false,
|
| 32 |
+
"rstrip": false,
|
| 33 |
+
"single_word": false,
|
| 34 |
+
"special": true
|
| 35 |
+
},
|
| 36 |
+
"120004": {
|
| 37 |
+
"content": "<|role_end|>",
|
| 38 |
+
"lstrip": false,
|
| 39 |
+
"normalized": false,
|
| 40 |
+
"rstrip": false,
|
| 41 |
+
"single_word": false,
|
| 42 |
+
"special": true
|
| 43 |
+
},
|
| 44 |
+
"120005": {
|
| 45 |
+
"content": "<|turn_end|>",
|
| 46 |
+
"lstrip": false,
|
| 47 |
+
"normalized": false,
|
| 48 |
+
"rstrip": false,
|
| 49 |
+
"single_word": false,
|
| 50 |
+
"special": true
|
| 51 |
+
},
|
| 52 |
+
"120006": {
|
| 53 |
+
"content": "<|code_start|>",
|
| 54 |
+
"lstrip": false,
|
| 55 |
+
"normalized": false,
|
| 56 |
+
"rstrip": false,
|
| 57 |
+
"single_word": false,
|
| 58 |
+
"special": true
|
| 59 |
+
},
|
| 60 |
+
"120007": {
|
| 61 |
+
"content": "<|code_end|>",
|
| 62 |
+
"lstrip": false,
|
| 63 |
+
"normalized": false,
|
| 64 |
+
"rstrip": false,
|
| 65 |
+
"single_word": false,
|
| 66 |
+
"special": true
|
| 67 |
+
},
|
| 68 |
+
"120008": {
|
| 69 |
+
"content": "<|commit_start|>",
|
| 70 |
+
"lstrip": false,
|
| 71 |
+
"normalized": false,
|
| 72 |
+
"rstrip": false,
|
| 73 |
+
"single_word": false,
|
| 74 |
+
"special": true
|
| 75 |
+
},
|
| 76 |
+
"120009": {
|
| 77 |
+
"content": "<|commit_end|>",
|
| 78 |
+
"lstrip": false,
|
| 79 |
+
"normalized": false,
|
| 80 |
+
"rstrip": false,
|
| 81 |
+
"single_word": false,
|
| 82 |
+
"special": true
|
| 83 |
+
},
|
| 84 |
+
"120010": {
|
| 85 |
+
"content": "<|diff_start|>",
|
| 86 |
+
"lstrip": false,
|
| 87 |
+
"normalized": false,
|
| 88 |
+
"rstrip": false,
|
| 89 |
+
"single_word": false,
|
| 90 |
+
"special": true
|
| 91 |
+
},
|
| 92 |
+
"120011": {
|
| 93 |
+
"content": "<|diff_end|>",
|
| 94 |
+
"lstrip": false,
|
| 95 |
+
"normalized": false,
|
| 96 |
+
"rstrip": false,
|
| 97 |
+
"single_word": false,
|
| 98 |
+
"special": true
|
| 99 |
+
},
|
| 100 |
+
"120012": {
|
| 101 |
+
"content": "<|code_execution_start|>",
|
| 102 |
+
"lstrip": false,
|
| 103 |
+
"normalized": false,
|
| 104 |
+
"rstrip": false,
|
| 105 |
+
"single_word": false,
|
| 106 |
+
"special": true
|
| 107 |
+
},
|
| 108 |
+
"120013": {
|
| 109 |
+
"content": "<|code_execution_end|>",
|
| 110 |
+
"lstrip": false,
|
| 111 |
+
"normalized": false,
|
| 112 |
+
"rstrip": false,
|
| 113 |
+
"single_word": false,
|
| 114 |
+
"special": true
|
| 115 |
+
},
|
| 116 |
+
"120014": {
|
| 117 |
+
"content": "<|image_start|>",
|
| 118 |
+
"lstrip": false,
|
| 119 |
+
"normalized": false,
|
| 120 |
+
"rstrip": false,
|
| 121 |
+
"single_word": false,
|
| 122 |
+
"special": true
|
| 123 |
+
},
|
| 124 |
+
"120015": {
|
| 125 |
+
"content": "<|image_end|>",
|
| 126 |
+
"lstrip": false,
|
| 127 |
+
"normalized": false,
|
| 128 |
+
"rstrip": false,
|
| 129 |
+
"single_word": false,
|
| 130 |
+
"special": true
|
| 131 |
+
},
|
| 132 |
+
"120016": {
|
| 133 |
+
"content": "<|image_pad|>",
|
| 134 |
+
"lstrip": false,
|
| 135 |
+
"normalized": false,
|
| 136 |
+
"rstrip": false,
|
| 137 |
+
"single_word": false,
|
| 138 |
+
"special": true
|
| 139 |
+
},
|
| 140 |
+
"120017": {
|
| 141 |
+
"content": "<|video_start|>",
|
| 142 |
+
"lstrip": false,
|
| 143 |
+
"normalized": false,
|
| 144 |
+
"rstrip": false,
|
| 145 |
+
"single_word": false,
|
| 146 |
+
"special": true
|
| 147 |
+
},
|
| 148 |
+
"120018": {
|
| 149 |
+
"content": "<|video_end|>",
|
| 150 |
+
"lstrip": false,
|
| 151 |
+
"normalized": false,
|
| 152 |
+
"rstrip": false,
|
| 153 |
+
"single_word": false,
|
| 154 |
+
"special": true
|
| 155 |
+
},
|
| 156 |
+
"120019": {
|
| 157 |
+
"content": "<|video_pad|>",
|
| 158 |
+
"lstrip": false,
|
| 159 |
+
"normalized": false,
|
| 160 |
+
"rstrip": false,
|
| 161 |
+
"single_word": false,
|
| 162 |
+
"special": true
|
| 163 |
+
},
|
| 164 |
+
"120020": {
|
| 165 |
+
"content": "<|audio_start|>",
|
| 166 |
+
"lstrip": false,
|
| 167 |
+
"normalized": false,
|
| 168 |
+
"rstrip": false,
|
| 169 |
+
"single_word": false,
|
| 170 |
+
"special": true
|
| 171 |
+
},
|
| 172 |
+
"120021": {
|
| 173 |
+
"content": "<|audio_end|>",
|
| 174 |
+
"lstrip": false,
|
| 175 |
+
"normalized": false,
|
| 176 |
+
"rstrip": false,
|
| 177 |
+
"single_word": false,
|
| 178 |
+
"special": true
|
| 179 |
+
},
|
| 180 |
+
"120022": {
|
| 181 |
+
"content": "<|audio_pad|>",
|
| 182 |
+
"lstrip": false,
|
| 183 |
+
"normalized": false,
|
| 184 |
+
"rstrip": false,
|
| 185 |
+
"single_word": false,
|
| 186 |
+
"special": true
|
| 187 |
+
},
|
| 188 |
+
"120023": {
|
| 189 |
+
"content": "<|function_start|>",
|
| 190 |
+
"lstrip": false,
|
| 191 |
+
"normalized": false,
|
| 192 |
+
"rstrip": false,
|
| 193 |
+
"single_word": false,
|
| 194 |
+
"special": true
|
| 195 |
+
},
|
| 196 |
+
"120024": {
|
| 197 |
+
"content": "<|function_end|>",
|
| 198 |
+
"lstrip": false,
|
| 199 |
+
"normalized": false,
|
| 200 |
+
"rstrip": false,
|
| 201 |
+
"single_word": false,
|
| 202 |
+
"special": true
|
| 203 |
+
},
|
| 204 |
+
"120025": {
|
| 205 |
+
"content": "<|turn_end>",
|
| 206 |
+
"lstrip": false,
|
| 207 |
+
"normalized": false,
|
| 208 |
+
"rstrip": false,
|
| 209 |
+
"single_word": false,
|
| 210 |
+
"special": true
|
| 211 |
+
},
|
| 212 |
+
"120026": {
|
| 213 |
+
"content": "<think>",
|
| 214 |
+
"lstrip": false,
|
| 215 |
+
"normalized": false,
|
| 216 |
+
"rstrip": false,
|
| 217 |
+
"single_word": false,
|
| 218 |
+
"special": true
|
| 219 |
+
},
|
| 220 |
+
"120027": {
|
| 221 |
+
"content": "</think>",
|
| 222 |
+
"lstrip": false,
|
| 223 |
+
"normalized": false,
|
| 224 |
+
"rstrip": false,
|
| 225 |
+
"single_word": false,
|
| 226 |
+
"special": true
|
| 227 |
+
}
|
| 228 |
+
},
|
| 229 |
+
"clean_up_tokenization_spaces": true,
|
| 230 |
+
"eos_token": "<|turn_end|>",
|
| 231 |
+
"extra_special_tokens": {},
|
| 232 |
+
"model_max_length": 32768,
|
| 233 |
+
"pad_token": "<|pad|>",
|
| 234 |
+
"padding_side": "right",
|
| 235 |
+
"tokenizer_class": "PreTrainedTokenizerFast"
|
| 236 |
+
}
|