Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- fla/models/__pycache__/__init__.cpython-312.pyc +0 -0
- fla/models/__pycache__/utils.cpython-312.pyc +0 -0
- fla/models/abc/__init__.py +13 -0
- fla/models/abc/modeling_abc.py +418 -0
- fla/models/bitnet/__init__.py +13 -0
- fla/models/bitnet/configuration_bitnet.py +67 -0
- fla/models/bitnet/modeling_bitnet.py +441 -0
- fla/models/delta_net/__init__.py +12 -0
- fla/models/delta_net/configuration_delta_net.py +91 -0
- fla/models/gated_deltanet/__init__.py +12 -0
- fla/models/gated_deltanet/configuration_gated_deltanet.py +83 -0
- fla/models/gated_deltaproduct/__pycache__/modeling_gated_deltaproduct.cpython-312.pyc +0 -0
- fla/models/gated_deltaproduct/configuration_gated_deltaproduct.py +90 -0
- fla/models/gated_deltaproduct/modeling_gated_deltaproduct.py +520 -0
- fla/models/gla/__init__.py +13 -0
- fla/models/gla/modeling_gla.py +417 -0
- fla/models/gsa/modeling_gsa.py +420 -0
- fla/models/hgrn2/__init__.py +13 -0
- fla/models/lightnet/modeling_lightnet.py +410 -0
- fla/models/linear_attn/__init__.py +12 -0
- fla/models/linear_attn/configuration_linear_attn.py +91 -0
- fla/models/linear_attn/modeling_linear_attn.py +406 -0
- fla/models/mamba/__init__.py +13 -0
- fla/models/mamba/configuration_mamba.py +166 -0
- fla/models/mamba2/configuration_mamba2.py +170 -0
- fla/models/nsa/__pycache__/__init__.cpython-312.pyc +0 -0
- fla/models/nsa/configuration_nsa.py +75 -0
- fla/models/nsa/modeling_nsa.py +398 -0
- fla/models/retnet/__init__.py +13 -0
- fla/models/retnet/configuration_retnet.py +92 -0
- fla/models/rwkv6/modeling_rwkv6.py +480 -0
- fla/models/rwkv7/modeling_rwkv7.py +505 -0
- fla/models/samba/__init__.py +13 -0
- fla/models/samba/modeling_samba.py +413 -0
- fla/models/transformer/__init__.py +13 -0
- fla/models/transformer_dsmtp/__init__.py +13 -0
- fla/models/transformer_dsmtp/configuration_transformer.py +73 -0
- fla/models/transformer_dsmtp/modeling_transformer.py +494 -0
- fla/models/transformer_mtp/__init__.py +13 -0
- fla/models/transformer_top/modeling_transformer.py +440 -0
- fla/ops/based/__pycache__/parallel.cpython-312.pyc +0 -0
- fla/ops/common/__pycache__/chunk_delta_h.cpython-312.pyc +0 -0
- fla/ops/common/__pycache__/chunk_h.cpython-312.pyc +0 -0
- fla/ops/common/__pycache__/fused_recurrent.cpython-312.pyc +0 -0
- fla/ops/common/__pycache__/utils.cpython-312.pyc +0 -0
- fla/ops/gated_delta_rule/__pycache__/__init__.cpython-312.pyc +0 -0
- fla/ops/gated_delta_rule/__pycache__/chunk.cpython-312.pyc +0 -0
- fla/ops/gated_delta_rule/__pycache__/fused_recurrent.cpython-312.pyc +0 -0
- fla/ops/generalized_delta_rule/dplr/__pycache__/chunk_A_bwd.cpython-312.pyc +0 -0
- fla/ops/generalized_delta_rule/dplr/__pycache__/chunk_h_bwd.cpython-312.pyc +0 -0
fla/models/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (3.26 kB). View file
|
|
|
fla/models/__pycache__/utils.cpython-312.pyc
ADDED
|
Binary file (6.68 kB). View file
|
|
|
fla/models/abc/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
|
| 4 |
+
|
| 5 |
+
from fla.models.abc.configuration_abc import ABCConfig
|
| 6 |
+
from fla.models.abc.modeling_abc import ABCForCausalLM, ABCModel
|
| 7 |
+
|
| 8 |
+
AutoConfig.register(ABCConfig.model_type, ABCConfig)
|
| 9 |
+
AutoModel.register(ABCConfig, ABCModel)
|
| 10 |
+
AutoModelForCausalLM.register(ABCConfig, ABCForCausalLM)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
__all__ = ['ABCConfig', 'ABCForCausalLM', 'ABCModel']
|
fla/models/abc/modeling_abc.py
ADDED
|
@@ -0,0 +1,418 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
import warnings
|
| 7 |
+
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import torch.utils.checkpoint
|
| 12 |
+
from transformers.generation import GenerationMixin
|
| 13 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
|
| 14 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 15 |
+
from transformers.utils import logging
|
| 16 |
+
from transformers.utils.deprecation import deprecate_kwarg
|
| 17 |
+
|
| 18 |
+
from fla.layers.abc import ABCAttention
|
| 19 |
+
from fla.layers.attn import Attention
|
| 20 |
+
from fla.models.abc.configuration_abc import ABCConfig
|
| 21 |
+
from fla.models.utils import Cache
|
| 22 |
+
from fla.modules import FusedCrossEntropyLoss, FusedLinearCrossEntropyLoss
|
| 23 |
+
from fla.modules import GatedMLP as ABCMLP
|
| 24 |
+
from fla.modules import RMSNorm
|
| 25 |
+
|
| 26 |
+
logger = logging.get_logger(__name__)
|
| 27 |
+
|
| 28 |
+
if TYPE_CHECKING:
|
| 29 |
+
from transformers.processing_utils import Unpack
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class ABCBlock(nn.Module):
|
| 33 |
+
def __init__(self, config: ABCConfig, layer_idx: int):
|
| 34 |
+
super().__init__()
|
| 35 |
+
|
| 36 |
+
self.config = config
|
| 37 |
+
self.layer_idx = layer_idx
|
| 38 |
+
|
| 39 |
+
self.attn_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 40 |
+
if config.attn is not None and layer_idx in config.attn['layers']:
|
| 41 |
+
self.attn = Attention(
|
| 42 |
+
hidden_size=config.hidden_size,
|
| 43 |
+
num_heads=config.attn['num_heads'],
|
| 44 |
+
num_kv_heads=config.attn['num_kv_heads'],
|
| 45 |
+
qkv_bias=config.attn['qkv_bias'],
|
| 46 |
+
window_size=config.attn['window_size'],
|
| 47 |
+
rope_theta=config.attn['rope_theta'],
|
| 48 |
+
max_position_embeddings=config.max_position_embeddings,
|
| 49 |
+
layer_idx=layer_idx
|
| 50 |
+
)
|
| 51 |
+
else:
|
| 52 |
+
self.attn = ABCAttention(
|
| 53 |
+
hidden_size=config.hidden_size,
|
| 54 |
+
expand_k=config.expand_k,
|
| 55 |
+
expand_v=config.expand_v,
|
| 56 |
+
num_heads=config.num_heads,
|
| 57 |
+
num_slots=config.num_slots,
|
| 58 |
+
use_short_conv=config.use_short_conv,
|
| 59 |
+
conv_size=config.conv_size,
|
| 60 |
+
gate_fn=config.hidden_act,
|
| 61 |
+
elementwise_affine=config.elementwise_affine,
|
| 62 |
+
norm_eps=config.norm_eps,
|
| 63 |
+
use_rope=config.use_rope,
|
| 64 |
+
clamp_min=config.clamp_min,
|
| 65 |
+
clamp_max=config.clamp_max,
|
| 66 |
+
fuse_norm=config.fuse_norm,
|
| 67 |
+
layer_idx=layer_idx
|
| 68 |
+
)
|
| 69 |
+
self.mlp_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 70 |
+
self.mlp = ABCMLP(
|
| 71 |
+
hidden_size=config.hidden_size,
|
| 72 |
+
hidden_ratio=config.hidden_ratio,
|
| 73 |
+
intermediate_size=config.intermediate_size,
|
| 74 |
+
hidden_act=config.hidden_act,
|
| 75 |
+
fuse_swiglu=config.fuse_swiglu
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
def forward(
|
| 79 |
+
self,
|
| 80 |
+
hidden_states: torch.Tensor,
|
| 81 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 82 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 83 |
+
use_cache: Optional[bool] = False,
|
| 84 |
+
output_attentions: Optional[bool] = False,
|
| 85 |
+
**kwargs: Unpack[Dict]
|
| 86 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 87 |
+
|
| 88 |
+
residual = hidden_states
|
| 89 |
+
|
| 90 |
+
hidden_states = self.attn_norm(hidden_states)
|
| 91 |
+
hidden_states, attentions, past_key_values = self.attn(
|
| 92 |
+
hidden_states=hidden_states,
|
| 93 |
+
attention_mask=attention_mask,
|
| 94 |
+
past_key_values=past_key_values,
|
| 95 |
+
use_cache=use_cache,
|
| 96 |
+
output_attentions=output_attentions,
|
| 97 |
+
**kwargs
|
| 98 |
+
)
|
| 99 |
+
if self.config.fuse_norm:
|
| 100 |
+
hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
|
| 101 |
+
else:
|
| 102 |
+
hidden_states = residual + hidden_states
|
| 103 |
+
residual = hidden_states
|
| 104 |
+
hidden_states = self.mlp_norm(hidden_states)
|
| 105 |
+
hidden_states = self.mlp(hidden_states)
|
| 106 |
+
hidden_states = residual + hidden_states
|
| 107 |
+
|
| 108 |
+
outputs = (hidden_states, attentions, past_key_values)
|
| 109 |
+
|
| 110 |
+
return outputs
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
class ABCPreTrainedModel(PreTrainedModel):
|
| 114 |
+
|
| 115 |
+
config_class = ABCConfig
|
| 116 |
+
base_model_prefix = 'model'
|
| 117 |
+
supports_gradient_checkpointing = True
|
| 118 |
+
_no_split_modules = ['ABCBlock']
|
| 119 |
+
_supports_cache_class = True
|
| 120 |
+
|
| 121 |
+
def __init__(self, *inputs, **kwargs):
|
| 122 |
+
super().__init__(*inputs, **kwargs)
|
| 123 |
+
|
| 124 |
+
def _init_weights(
|
| 125 |
+
self,
|
| 126 |
+
module: nn.Module,
|
| 127 |
+
prenorm_residual_strategy: Optional[str] = 'rescale',
|
| 128 |
+
num_residuals_per_layer: int = 2,
|
| 129 |
+
):
|
| 130 |
+
if isinstance(module, (nn.Linear, nn.Conv1d)):
|
| 131 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 132 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 133 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 134 |
+
if module.bias is not None:
|
| 135 |
+
nn.init.zeros_(module.bias)
|
| 136 |
+
elif isinstance(module, nn.Embedding):
|
| 137 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 138 |
+
elif hasattr(module, 'reset_parameters'):
|
| 139 |
+
module.reset_parameters()
|
| 140 |
+
|
| 141 |
+
if prenorm_residual_strategy is not None:
|
| 142 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
| 143 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
| 144 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
| 145 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
| 146 |
+
#
|
| 147 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
| 148 |
+
p = None
|
| 149 |
+
if hasattr(module, 'o_proj'):
|
| 150 |
+
p = module.o_proj.weight
|
| 151 |
+
elif hasattr(module, 'down_proj'):
|
| 152 |
+
p = module.down_proj.weight
|
| 153 |
+
if p is not None:
|
| 154 |
+
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
|
| 155 |
+
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
|
| 156 |
+
# We need to reinit p since this code could be called multiple times
|
| 157 |
+
# Having just p *= scale would repeatedly scale it down
|
| 158 |
+
if prenorm_residual_strategy == 'rescale':
|
| 159 |
+
nn.init.kaiming_uniform_(p, a=math.sqrt(5))
|
| 160 |
+
with torch.no_grad():
|
| 161 |
+
p /= math.sqrt(num_residuals_per_layer * self.config.num_hidden_layers)
|
| 162 |
+
elif prenorm_residual_strategy == 'zero':
|
| 163 |
+
nn.init.zeros_(p)
|
| 164 |
+
else:
|
| 165 |
+
raise ValueError(f"Invalid prenorm_residual_strategy: {prenorm_residual_strategy}")
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
class ABCModel(ABCPreTrainedModel):
|
| 169 |
+
|
| 170 |
+
def __init__(self, config: ABCConfig):
|
| 171 |
+
super().__init__(config)
|
| 172 |
+
self.padding_idx = config.pad_token_id
|
| 173 |
+
self.vocab_size = config.vocab_size
|
| 174 |
+
|
| 175 |
+
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 176 |
+
self.layers = nn.ModuleList([ABCBlock(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
|
| 177 |
+
self.norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 178 |
+
|
| 179 |
+
self.gradient_checkpointing = False
|
| 180 |
+
|
| 181 |
+
self.post_init()
|
| 182 |
+
|
| 183 |
+
def get_input_embeddings(self):
|
| 184 |
+
return self.embeddings
|
| 185 |
+
|
| 186 |
+
def set_input_embeddings(self, value):
|
| 187 |
+
self.embeddings = value
|
| 188 |
+
|
| 189 |
+
def forward(
|
| 190 |
+
self,
|
| 191 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 192 |
+
attention_mask: Optional[torch.Tensor] = None, # noqa
|
| 193 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 194 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 195 |
+
use_cache: Optional[bool] = None,
|
| 196 |
+
output_attentions: Optional[bool] = None,
|
| 197 |
+
output_hidden_states: Optional[bool] = None,
|
| 198 |
+
return_dict: Optional[bool] = None,
|
| 199 |
+
**kwargs: Unpack[Dict]
|
| 200 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
| 201 |
+
if output_attentions:
|
| 202 |
+
warnings.warn("`ABCModel` does not `output_attentions` now, setting it to `False`.")
|
| 203 |
+
output_attentions = False
|
| 204 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 205 |
+
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 206 |
+
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
|
| 207 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 208 |
+
|
| 209 |
+
# retrieve input_ids and inputs_embeds
|
| 210 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 211 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 212 |
+
if input_ids is None and inputs_embeds is None:
|
| 213 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 214 |
+
|
| 215 |
+
if inputs_embeds is None:
|
| 216 |
+
inputs_embeds = self.embeddings(input_ids)
|
| 217 |
+
hidden_states = inputs_embeds
|
| 218 |
+
|
| 219 |
+
if use_cache and not isinstance(past_key_values, Cache):
|
| 220 |
+
past_key_values = Cache.from_legacy_cache(past_key_values)
|
| 221 |
+
|
| 222 |
+
if self.gradient_checkpointing and self.training and use_cache:
|
| 223 |
+
logger.warning_once("`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...")
|
| 224 |
+
use_cache = False
|
| 225 |
+
|
| 226 |
+
all_hidden_states = () if output_hidden_states else None
|
| 227 |
+
all_attns = () if output_attentions else None
|
| 228 |
+
for layer in self.layers:
|
| 229 |
+
if output_hidden_states:
|
| 230 |
+
all_hidden_states += (hidden_states,)
|
| 231 |
+
|
| 232 |
+
if self.gradient_checkpointing and self.training:
|
| 233 |
+
hidden_states, attentions, past_key_values = self._gradient_checkpointing_func(
|
| 234 |
+
layer.__call__,
|
| 235 |
+
hidden_states,
|
| 236 |
+
attention_mask,
|
| 237 |
+
past_key_values,
|
| 238 |
+
use_cache,
|
| 239 |
+
output_attentions,
|
| 240 |
+
**kwargs
|
| 241 |
+
)
|
| 242 |
+
else:
|
| 243 |
+
hidden_states, attentions, past_key_values = layer(
|
| 244 |
+
hidden_states,
|
| 245 |
+
attention_mask,
|
| 246 |
+
past_key_values=past_key_values,
|
| 247 |
+
use_cache=use_cache,
|
| 248 |
+
output_attentions=output_attentions,
|
| 249 |
+
**kwargs
|
| 250 |
+
)
|
| 251 |
+
|
| 252 |
+
if output_attentions:
|
| 253 |
+
all_attns += (attentions,)
|
| 254 |
+
|
| 255 |
+
hidden_states = self.norm(hidden_states)
|
| 256 |
+
|
| 257 |
+
# add hidden states from the last decoder layer
|
| 258 |
+
if output_hidden_states:
|
| 259 |
+
all_hidden_states += (hidden_states,)
|
| 260 |
+
|
| 261 |
+
if not return_dict:
|
| 262 |
+
return tuple(i for i in [hidden_states, past_key_values, all_hidden_states, all_attns] if i is not None)
|
| 263 |
+
return BaseModelOutputWithPast(
|
| 264 |
+
last_hidden_state=hidden_states,
|
| 265 |
+
past_key_values=past_key_values,
|
| 266 |
+
hidden_states=all_hidden_states,
|
| 267 |
+
attentions=all_attns
|
| 268 |
+
)
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
class ABCForCausalLM(ABCPreTrainedModel, GenerationMixin):
|
| 272 |
+
|
| 273 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 274 |
+
|
| 275 |
+
def __init__(self, config):
|
| 276 |
+
super().__init__(config)
|
| 277 |
+
self.model = ABCModel(config)
|
| 278 |
+
self.vocab_size = config.vocab_size
|
| 279 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 280 |
+
self.criterion = None
|
| 281 |
+
|
| 282 |
+
# Initialize weights and apply final processing
|
| 283 |
+
self.post_init()
|
| 284 |
+
|
| 285 |
+
def get_input_embeddings(self):
|
| 286 |
+
return self.model.embeddings
|
| 287 |
+
|
| 288 |
+
def set_input_embeddings(self, value):
|
| 289 |
+
self.model.embeddings = value
|
| 290 |
+
|
| 291 |
+
def get_output_embeddings(self):
|
| 292 |
+
return self.lm_head
|
| 293 |
+
|
| 294 |
+
def set_output_embeddings(self, new_embeddings):
|
| 295 |
+
self.lm_head = new_embeddings
|
| 296 |
+
|
| 297 |
+
def set_decoder(self, decoder):
|
| 298 |
+
self.model = decoder
|
| 299 |
+
|
| 300 |
+
def get_decoder(self):
|
| 301 |
+
return self.model
|
| 302 |
+
|
| 303 |
+
def generate(self, *args, **kwargs):
|
| 304 |
+
try:
|
| 305 |
+
return super().generate(*args, **kwargs)
|
| 306 |
+
except AttributeError as exception:
|
| 307 |
+
if 'past_key_values' in str(exception):
|
| 308 |
+
raise AttributeError(
|
| 309 |
+
f"You tried to call `generate` with a decoding strategy that manipulates `past_key_values`, "
|
| 310 |
+
f"which is not supported for {self.__class__.__name__}. "
|
| 311 |
+
f"Try another generation strategy instead. "
|
| 312 |
+
f"For the available generation strategies, check this doc: "
|
| 313 |
+
f"https://huggingface.co/docs/transformers/en/generation_strategies#decoding-strategies"
|
| 314 |
+
)
|
| 315 |
+
else:
|
| 316 |
+
raise exception
|
| 317 |
+
|
| 318 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 319 |
+
def prepare_inputs_for_generation(
|
| 320 |
+
self,
|
| 321 |
+
input_ids: torch.LongTensor = None,
|
| 322 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 323 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 324 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 325 |
+
use_cache: bool = True,
|
| 326 |
+
logits_to_keep: Optional[int] = None,
|
| 327 |
+
**kwargs
|
| 328 |
+
):
|
| 329 |
+
# only last token for `inputs_ids` if the `past_key_values` is not empty.
|
| 330 |
+
if past_key_values is not None and len(past_key_values) > 0:
|
| 331 |
+
input_ids = input_ids[:, -1:]
|
| 332 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 333 |
+
if inputs_embeds is not None and len(past_key_values) == 0:
|
| 334 |
+
model_inputs = {'inputs_embeds': inputs_embeds}
|
| 335 |
+
else:
|
| 336 |
+
# The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
|
| 337 |
+
# recompiles graphs as the stride of the inputs is a guard.
|
| 338 |
+
# Ref: https://github.com/huggingface/transformers/pull/29114
|
| 339 |
+
# TODO: use `next_tokens` directly instead.
|
| 340 |
+
model_inputs = {'input_ids': input_ids.contiguous()}
|
| 341 |
+
|
| 342 |
+
if logits_to_keep is not None:
|
| 343 |
+
model_inputs['logits_to_keep'] = logits_to_keep
|
| 344 |
+
|
| 345 |
+
model_inputs.update({
|
| 346 |
+
'past_key_values': past_key_values,
|
| 347 |
+
'use_cache': use_cache,
|
| 348 |
+
'attention_mask': attention_mask,
|
| 349 |
+
})
|
| 350 |
+
return model_inputs
|
| 351 |
+
|
| 352 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 353 |
+
def forward(
|
| 354 |
+
self,
|
| 355 |
+
input_ids: torch.LongTensor = None,
|
| 356 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 357 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 358 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 359 |
+
labels: Optional[torch.LongTensor] = None,
|
| 360 |
+
use_cache: Optional[bool] = None,
|
| 361 |
+
output_attentions: Optional[bool] = None,
|
| 362 |
+
output_hidden_states: Optional[bool] = None,
|
| 363 |
+
return_dict: Optional[bool] = None,
|
| 364 |
+
logits_to_keep: Optional[int] = 0,
|
| 365 |
+
**kwargs: Unpack[Dict]
|
| 366 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 367 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 368 |
+
output_hidden_states = (
|
| 369 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 370 |
+
)
|
| 371 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 372 |
+
|
| 373 |
+
outputs = self.model(
|
| 374 |
+
input_ids=input_ids,
|
| 375 |
+
attention_mask=attention_mask,
|
| 376 |
+
inputs_embeds=inputs_embeds,
|
| 377 |
+
past_key_values=past_key_values,
|
| 378 |
+
use_cache=use_cache,
|
| 379 |
+
output_attentions=output_attentions,
|
| 380 |
+
output_hidden_states=output_hidden_states,
|
| 381 |
+
return_dict=return_dict,
|
| 382 |
+
**kwargs
|
| 383 |
+
)
|
| 384 |
+
|
| 385 |
+
hidden_states = outputs[0]
|
| 386 |
+
fuse_linear_and_cross_entropy = self.config.fuse_cross_entropy and self.training
|
| 387 |
+
|
| 388 |
+
loss, logits = None, None
|
| 389 |
+
if not fuse_linear_and_cross_entropy or labels is None:
|
| 390 |
+
logits = self.lm_head(hidden_states if logits_to_keep is None else hidden_states[:, -logits_to_keep:])
|
| 391 |
+
if labels is not None:
|
| 392 |
+
if getattr(self, 'criterion', None) is None:
|
| 393 |
+
if fuse_linear_and_cross_entropy:
|
| 394 |
+
criterion = FusedLinearCrossEntropyLoss()
|
| 395 |
+
elif self.config.fuse_cross_entropy:
|
| 396 |
+
criterion = FusedCrossEntropyLoss(inplace_backward=True)
|
| 397 |
+
else:
|
| 398 |
+
criterion = nn.CrossEntropyLoss()
|
| 399 |
+
else:
|
| 400 |
+
criterion = self.criterion
|
| 401 |
+
labels = labels.to(hidden_states.device)
|
| 402 |
+
labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], criterion.ignore_index)), 1)
|
| 403 |
+
if fuse_linear_and_cross_entropy:
|
| 404 |
+
loss = criterion(hidden_states, labels, self.lm_head.weight, self.lm_head.bias)
|
| 405 |
+
else:
|
| 406 |
+
loss = criterion(logits.view(labels.numel(), -1), labels.view(-1))
|
| 407 |
+
|
| 408 |
+
if not return_dict:
|
| 409 |
+
output = (logits,) + outputs[1:]
|
| 410 |
+
return (loss,) + output if loss is not None else output
|
| 411 |
+
|
| 412 |
+
return CausalLMOutputWithPast(
|
| 413 |
+
loss=loss,
|
| 414 |
+
logits=logits,
|
| 415 |
+
past_key_values=outputs.past_key_values,
|
| 416 |
+
hidden_states=outputs.hidden_states,
|
| 417 |
+
attentions=outputs.attentions,
|
| 418 |
+
)
|
fla/models/bitnet/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
|
| 4 |
+
|
| 5 |
+
from fla.models.bitnet.configuration_bitnet import BitNetConfig
|
| 6 |
+
from fla.models.bitnet.modeling_bitnet import BitNetForCausalLM, BitNetModel
|
| 7 |
+
|
| 8 |
+
AutoConfig.register(BitNetConfig.model_type, BitNetConfig)
|
| 9 |
+
AutoModel.register(BitNetConfig, BitNetModel)
|
| 10 |
+
AutoModelForCausalLM.register(BitNetConfig, BitNetForCausalLM)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
__all__ = ['BitNetConfig', 'BitNetForCausalLM', 'BitNetModel']
|
fla/models/bitnet/configuration_bitnet.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class BitNetConfig(PretrainedConfig):
|
| 9 |
+
|
| 10 |
+
model_type = 'bitnet'
|
| 11 |
+
keys_to_ignore_at_inference = ['past_key_values']
|
| 12 |
+
|
| 13 |
+
def __init__(
|
| 14 |
+
self,
|
| 15 |
+
hidden_size: int = 2048,
|
| 16 |
+
num_hidden_layers: int = 24,
|
| 17 |
+
num_heads: int = 32,
|
| 18 |
+
num_kv_heads: int = None,
|
| 19 |
+
window_size: Optional[int] = None,
|
| 20 |
+
rope_theta: Optional[float] = 10000.,
|
| 21 |
+
max_position_embeddings: int = 2048,
|
| 22 |
+
hidden_ratio: Optional[int] = 4,
|
| 23 |
+
intermediate_size: Optional[int] = None,
|
| 24 |
+
hidden_act: str = "swish",
|
| 25 |
+
initializer_range: float = 0.006,
|
| 26 |
+
elementwise_affine: Optional[bool] = True,
|
| 27 |
+
norm_eps: float = 1e-6,
|
| 28 |
+
use_cache: bool = True,
|
| 29 |
+
pad_token_id: int = None,
|
| 30 |
+
bos_token_id: int = 1,
|
| 31 |
+
eos_token_id: int = 2,
|
| 32 |
+
tie_word_embeddings: bool = False,
|
| 33 |
+
fuse_norm: bool = True,
|
| 34 |
+
fuse_swiglu: bool = True,
|
| 35 |
+
fuse_cross_entropy: bool = True,
|
| 36 |
+
vocab_size: int = 32000,
|
| 37 |
+
**kwargs,
|
| 38 |
+
):
|
| 39 |
+
self.hidden_size = hidden_size
|
| 40 |
+
self.num_hidden_layers = num_hidden_layers
|
| 41 |
+
self.num_heads = num_heads
|
| 42 |
+
self.num_kv_heads = num_kv_heads
|
| 43 |
+
self.window_size = window_size
|
| 44 |
+
self.rope_theta = rope_theta
|
| 45 |
+
self.max_position_embeddings = max_position_embeddings
|
| 46 |
+
|
| 47 |
+
self.hidden_ratio = hidden_ratio
|
| 48 |
+
self.intermediate_size = intermediate_size
|
| 49 |
+
self.hidden_act = hidden_act
|
| 50 |
+
|
| 51 |
+
self.initializer_range = initializer_range
|
| 52 |
+
self.elementwise_affine = elementwise_affine
|
| 53 |
+
self.norm_eps = norm_eps
|
| 54 |
+
self.use_cache = use_cache
|
| 55 |
+
|
| 56 |
+
self.fuse_norm = fuse_norm
|
| 57 |
+
self.fuse_swiglu = fuse_swiglu
|
| 58 |
+
self.fuse_cross_entropy = fuse_cross_entropy
|
| 59 |
+
self.vocab_size = vocab_size
|
| 60 |
+
|
| 61 |
+
super().__init__(
|
| 62 |
+
pad_token_id=pad_token_id,
|
| 63 |
+
bos_token_id=bos_token_id,
|
| 64 |
+
eos_token_id=eos_token_id,
|
| 65 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 66 |
+
**kwargs,
|
| 67 |
+
)
|
fla/models/bitnet/modeling_bitnet.py
ADDED
|
@@ -0,0 +1,441 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
import warnings
|
| 7 |
+
from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import torch.utils.checkpoint
|
| 12 |
+
from transformers.generation import GenerationMixin
|
| 13 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
|
| 14 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 15 |
+
from transformers.utils import logging
|
| 16 |
+
from transformers.utils.deprecation import deprecate_kwarg
|
| 17 |
+
|
| 18 |
+
from fla.layers.bitattn import BitAttention
|
| 19 |
+
from fla.models.bitnet.configuration_bitnet import BitNetConfig
|
| 20 |
+
from fla.models.utils import Cache
|
| 21 |
+
from fla.modules import FusedCrossEntropyLoss, FusedLinearCrossEntropyLoss, RMSNorm
|
| 22 |
+
from fla.modules.activations import swiglu
|
| 23 |
+
from fla.modules.fused_bitlinear import FusedBitLinear
|
| 24 |
+
|
| 25 |
+
if TYPE_CHECKING:
|
| 26 |
+
from transformers.processing_utils import Unpack
|
| 27 |
+
|
| 28 |
+
logger = logging.get_logger(__name__)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class BitNetMLP(nn.Module):
|
| 32 |
+
|
| 33 |
+
def __init__(
|
| 34 |
+
self,
|
| 35 |
+
hidden_size: int,
|
| 36 |
+
hidden_ratio: Optional[int] = None,
|
| 37 |
+
intermediate_size: Optional[int] = None,
|
| 38 |
+
hidden_act: str = 'swish',
|
| 39 |
+
fuse_swiglu: bool = True
|
| 40 |
+
) -> BitNetMLP:
|
| 41 |
+
super().__init__()
|
| 42 |
+
|
| 43 |
+
self.hidden_size = hidden_size
|
| 44 |
+
# the final number of params is `hidden_ratio * hidden_size^2`
|
| 45 |
+
# `intermediate_size` is chosen to be a multiple of 256 closest to `2/3 * hidden_size * hidden_ratio`
|
| 46 |
+
if hidden_ratio is None:
|
| 47 |
+
hidden_ratio = 4
|
| 48 |
+
if intermediate_size is None:
|
| 49 |
+
intermediate_size = int(hidden_size * hidden_ratio * 2 / 3)
|
| 50 |
+
intermediate_size = 256 * ((intermediate_size + 256 - 1) // 256)
|
| 51 |
+
self.hidden_ratio = hidden_ratio
|
| 52 |
+
self.intermediate_size = intermediate_size
|
| 53 |
+
self.hidden_act = hidden_act
|
| 54 |
+
self.fuse_swiglu = fuse_swiglu
|
| 55 |
+
|
| 56 |
+
if hidden_act != 'swish':
|
| 57 |
+
raise ValueError(f'Unsupported hidden_act: {hidden_act}')
|
| 58 |
+
|
| 59 |
+
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
| 60 |
+
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
| 61 |
+
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
| 62 |
+
|
| 63 |
+
def forward(
|
| 64 |
+
self,
|
| 65 |
+
x: torch.Tensor,
|
| 66 |
+
**kwargs: Unpack[Any]
|
| 67 |
+
) -> torch.Tensor:
|
| 68 |
+
gate, y = self.gate_proj(x), self.up_proj(x)
|
| 69 |
+
return self.down_proj(swiglu(gate, y))
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
class BitNetBlock(nn.Module):
|
| 73 |
+
|
| 74 |
+
def __init__(self, config: BitNetConfig, layer_idx: int):
|
| 75 |
+
super().__init__()
|
| 76 |
+
|
| 77 |
+
self.config = config
|
| 78 |
+
self.layer_idx = layer_idx
|
| 79 |
+
|
| 80 |
+
self.attn_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 81 |
+
self.attn = BitAttention(
|
| 82 |
+
hidden_size=config.hidden_size,
|
| 83 |
+
num_heads=config.num_heads,
|
| 84 |
+
num_kv_heads=config.num_kv_heads,
|
| 85 |
+
window_size=config.window_size,
|
| 86 |
+
rope_theta=config.rope_theta,
|
| 87 |
+
max_position_embeddings=config.max_position_embeddings,
|
| 88 |
+
layer_idx=layer_idx
|
| 89 |
+
)
|
| 90 |
+
self.mlp_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 91 |
+
self.mlp = BitNetMLP(
|
| 92 |
+
hidden_size=config.hidden_size,
|
| 93 |
+
hidden_ratio=config.hidden_ratio,
|
| 94 |
+
intermediate_size=config.intermediate_size,
|
| 95 |
+
hidden_act=config.hidden_act,
|
| 96 |
+
fuse_swiglu=config.fuse_swiglu
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
def forward(
|
| 100 |
+
self,
|
| 101 |
+
hidden_states: torch.Tensor,
|
| 102 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 103 |
+
past_key_values: Optional[Tuple[torch.Tensor]] = None,
|
| 104 |
+
output_attentions: Optional[bool] = False,
|
| 105 |
+
use_cache: Optional[bool] = False,
|
| 106 |
+
**kwargs: Unpack[Any]
|
| 107 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 108 |
+
|
| 109 |
+
residual = hidden_states
|
| 110 |
+
hidden_states = self.attn_norm(hidden_states)
|
| 111 |
+
hidden_states, attentions, past_key_values = self.attn(
|
| 112 |
+
hidden_states=hidden_states,
|
| 113 |
+
attention_mask=attention_mask,
|
| 114 |
+
past_key_values=past_key_values,
|
| 115 |
+
use_cache=use_cache,
|
| 116 |
+
output_attentions=output_attentions,
|
| 117 |
+
**kwargs
|
| 118 |
+
)
|
| 119 |
+
if self.config.fuse_norm:
|
| 120 |
+
hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
|
| 121 |
+
else:
|
| 122 |
+
hidden_states = residual + hidden_states
|
| 123 |
+
residual = hidden_states
|
| 124 |
+
hidden_states = self.mlp_norm(hidden_states)
|
| 125 |
+
hidden_states = self.mlp(hidden_states, **kwargs)
|
| 126 |
+
hidden_states = residual + hidden_states
|
| 127 |
+
|
| 128 |
+
outputs = (hidden_states,)
|
| 129 |
+
|
| 130 |
+
if output_attentions:
|
| 131 |
+
outputs += (attentions,)
|
| 132 |
+
|
| 133 |
+
if use_cache:
|
| 134 |
+
outputs += (past_key_values,)
|
| 135 |
+
|
| 136 |
+
return outputs
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
class BitNetPreTrainedModel(PreTrainedModel):
|
| 140 |
+
|
| 141 |
+
config_class = BitNetConfig
|
| 142 |
+
base_model_prefix = 'model'
|
| 143 |
+
supports_gradient_checkpointing = True
|
| 144 |
+
_no_split_modules = ['BitNetBlock']
|
| 145 |
+
_supports_cache_class = True
|
| 146 |
+
|
| 147 |
+
def __init__(self, *inputs, **kwargs):
|
| 148 |
+
super().__init__(*inputs, **kwargs)
|
| 149 |
+
|
| 150 |
+
def _init_weights(
|
| 151 |
+
self,
|
| 152 |
+
module: nn.Module,
|
| 153 |
+
rescale_prenorm_residual: bool = False,
|
| 154 |
+
num_residuals_per_layer: int = 2,
|
| 155 |
+
):
|
| 156 |
+
if isinstance(module, (nn.Linear, nn.Conv1d, FusedBitLinear)):
|
| 157 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 158 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 159 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 160 |
+
if module.bias is not None:
|
| 161 |
+
nn.init.zeros_(module.bias)
|
| 162 |
+
elif isinstance(module, nn.Embedding):
|
| 163 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 164 |
+
elif hasattr(module, 'reset_parameters'):
|
| 165 |
+
module.reset_parameters()
|
| 166 |
+
|
| 167 |
+
if rescale_prenorm_residual:
|
| 168 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
| 169 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
| 170 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
| 171 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
| 172 |
+
#
|
| 173 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
| 174 |
+
p = None
|
| 175 |
+
if hasattr(module, 'o_proj'):
|
| 176 |
+
p = module.o_proj.weight
|
| 177 |
+
elif hasattr(module, 'down_proj'):
|
| 178 |
+
p = module.down_proj.weight
|
| 179 |
+
if p is not None:
|
| 180 |
+
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
|
| 181 |
+
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
|
| 182 |
+
# We need to reinit p since this code could be called multiple times
|
| 183 |
+
# Having just p *= scale would repeatedly scale it down
|
| 184 |
+
nn.init.kaiming_uniform_(p, a=math.sqrt(5))
|
| 185 |
+
with torch.no_grad():
|
| 186 |
+
p /= math.sqrt(num_residuals_per_layer * self.config.num_hidden_layers)
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
class BitNetModel(BitNetPreTrainedModel):
|
| 190 |
+
|
| 191 |
+
def __init__(
|
| 192 |
+
self,
|
| 193 |
+
config: BitNetConfig
|
| 194 |
+
) -> BitNetModel:
|
| 195 |
+
super().__init__(config)
|
| 196 |
+
self.padding_idx = config.pad_token_id
|
| 197 |
+
self.vocab_size = config.vocab_size
|
| 198 |
+
|
| 199 |
+
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 200 |
+
self.layers = nn.ModuleList([BitNetBlock(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
|
| 201 |
+
self.norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 202 |
+
|
| 203 |
+
self.gradient_checkpointing = False
|
| 204 |
+
|
| 205 |
+
self.post_init()
|
| 206 |
+
|
| 207 |
+
def get_input_embeddings(self):
|
| 208 |
+
return self.embeddings
|
| 209 |
+
|
| 210 |
+
def set_input_embeddings(self, value):
|
| 211 |
+
self.embeddings = value
|
| 212 |
+
|
| 213 |
+
def forward(
|
| 214 |
+
self,
|
| 215 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 216 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 217 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 218 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 219 |
+
use_cache: Optional[bool] = None,
|
| 220 |
+
output_attentions: Optional[bool] = None,
|
| 221 |
+
output_hidden_states: Optional[bool] = None,
|
| 222 |
+
return_dict: Optional[bool] = None,
|
| 223 |
+
**kwargs: Unpack[Any]
|
| 224 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 225 |
+
if output_attentions:
|
| 226 |
+
warnings.warn(
|
| 227 |
+
"`BitNetModel` does not support output attention weights now, so `output_attentions` is set to `False`."
|
| 228 |
+
)
|
| 229 |
+
output_attentions = False
|
| 230 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 231 |
+
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 232 |
+
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
|
| 233 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 234 |
+
|
| 235 |
+
# retrieve input_ids and inputs_embeds
|
| 236 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 237 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 238 |
+
elif input_ids is None and inputs_embeds is None:
|
| 239 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 240 |
+
|
| 241 |
+
if use_cache and not isinstance(past_key_values, Cache):
|
| 242 |
+
past_key_values = Cache.from_legacy_cache(past_key_values)
|
| 243 |
+
|
| 244 |
+
if inputs_embeds is None:
|
| 245 |
+
inputs_embeds = self.embeddings(input_ids)
|
| 246 |
+
|
| 247 |
+
# embed positions
|
| 248 |
+
hidden_states = inputs_embeds
|
| 249 |
+
|
| 250 |
+
if self.gradient_checkpointing and self.training:
|
| 251 |
+
if use_cache:
|
| 252 |
+
logger.warning_once(
|
| 253 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
| 254 |
+
)
|
| 255 |
+
use_cache = False
|
| 256 |
+
|
| 257 |
+
all_hidden_states = () if output_hidden_states else None
|
| 258 |
+
all_attns = () if output_attentions else None
|
| 259 |
+
next_cache = None
|
| 260 |
+
|
| 261 |
+
for layer in self.layers:
|
| 262 |
+
if output_hidden_states:
|
| 263 |
+
all_hidden_states += (hidden_states,)
|
| 264 |
+
|
| 265 |
+
if self.gradient_checkpointing and self.training:
|
| 266 |
+
layer_outputs = self._gradient_checkpointing_func(
|
| 267 |
+
layer.__call__,
|
| 268 |
+
hidden_states,
|
| 269 |
+
attention_mask,
|
| 270 |
+
past_key_values,
|
| 271 |
+
output_attentions,
|
| 272 |
+
use_cache,
|
| 273 |
+
**kwargs
|
| 274 |
+
)
|
| 275 |
+
else:
|
| 276 |
+
layer_outputs = layer(
|
| 277 |
+
hidden_states,
|
| 278 |
+
attention_mask=attention_mask,
|
| 279 |
+
past_key_values=past_key_values,
|
| 280 |
+
output_attentions=output_attentions,
|
| 281 |
+
use_cache=use_cache,
|
| 282 |
+
**kwargs
|
| 283 |
+
)
|
| 284 |
+
|
| 285 |
+
hidden_states = layer_outputs[0]
|
| 286 |
+
|
| 287 |
+
if use_cache:
|
| 288 |
+
next_cache = layer_outputs[2 if output_attentions else 1]
|
| 289 |
+
|
| 290 |
+
if output_attentions:
|
| 291 |
+
all_attns += (layer_outputs[1],)
|
| 292 |
+
|
| 293 |
+
hidden_states = self.norm(hidden_states)
|
| 294 |
+
|
| 295 |
+
# add hidden states from the last decoder layer
|
| 296 |
+
if output_hidden_states:
|
| 297 |
+
all_hidden_states += (hidden_states,)
|
| 298 |
+
|
| 299 |
+
if not return_dict:
|
| 300 |
+
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_attns] if v is not None)
|
| 301 |
+
|
| 302 |
+
return BaseModelOutputWithPast(
|
| 303 |
+
last_hidden_state=hidden_states,
|
| 304 |
+
past_key_values=next_cache,
|
| 305 |
+
hidden_states=all_hidden_states,
|
| 306 |
+
attentions=all_attns
|
| 307 |
+
)
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
class BitNetForCausalLM(BitNetPreTrainedModel, GenerationMixin):
|
| 311 |
+
|
| 312 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 313 |
+
|
| 314 |
+
def __init__(self, config):
|
| 315 |
+
super().__init__(config)
|
| 316 |
+
self.model = BitNetModel(config)
|
| 317 |
+
self.vocab_size = config.vocab_size
|
| 318 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 319 |
+
self.criterion = None
|
| 320 |
+
|
| 321 |
+
# Initialize weights and apply final processing
|
| 322 |
+
self.post_init()
|
| 323 |
+
|
| 324 |
+
def get_input_embeddings(self):
|
| 325 |
+
return self.model.embeddings
|
| 326 |
+
|
| 327 |
+
def set_input_embeddings(self, value):
|
| 328 |
+
self.model.embeddings = value
|
| 329 |
+
|
| 330 |
+
def get_output_embeddings(self):
|
| 331 |
+
return self.lm_head
|
| 332 |
+
|
| 333 |
+
def set_output_embeddings(self, new_embeddings):
|
| 334 |
+
self.lm_head = new_embeddings
|
| 335 |
+
|
| 336 |
+
def set_decoder(self, decoder):
|
| 337 |
+
self.model = decoder
|
| 338 |
+
|
| 339 |
+
def get_decoder(self):
|
| 340 |
+
return self.model
|
| 341 |
+
|
| 342 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 343 |
+
def prepare_inputs_for_generation(
|
| 344 |
+
self,
|
| 345 |
+
input_ids: torch.LongTensor = None,
|
| 346 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 347 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 348 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 349 |
+
use_cache: bool = True,
|
| 350 |
+
logits_to_keep: Optional[int] = None,
|
| 351 |
+
**kwargs
|
| 352 |
+
):
|
| 353 |
+
# only last token for `inputs_ids` if the `past_key_values` is not empty.
|
| 354 |
+
if past_key_values is not None and len(past_key_values) > 0:
|
| 355 |
+
input_ids = input_ids[:, -1:]
|
| 356 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 357 |
+
if inputs_embeds is not None and len(past_key_values) == 0:
|
| 358 |
+
model_inputs = {'inputs_embeds': inputs_embeds}
|
| 359 |
+
else:
|
| 360 |
+
# The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
|
| 361 |
+
# recompiles graphs as the stride of the inputs is a guard.
|
| 362 |
+
# Ref: https://github.com/huggingface/transformers/pull/29114
|
| 363 |
+
# TODO: use `next_tokens` directly instead.
|
| 364 |
+
model_inputs = {'input_ids': input_ids.contiguous()}
|
| 365 |
+
|
| 366 |
+
if logits_to_keep is not None:
|
| 367 |
+
model_inputs['logits_to_keep'] = logits_to_keep
|
| 368 |
+
|
| 369 |
+
model_inputs.update({
|
| 370 |
+
'past_key_values': past_key_values,
|
| 371 |
+
'use_cache': use_cache,
|
| 372 |
+
'attention_mask': attention_mask,
|
| 373 |
+
})
|
| 374 |
+
return model_inputs
|
| 375 |
+
|
| 376 |
+
def forward(
|
| 377 |
+
self,
|
| 378 |
+
input_ids: torch.LongTensor = None,
|
| 379 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 380 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 381 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 382 |
+
labels: Optional[torch.LongTensor] = None,
|
| 383 |
+
use_cache: Optional[bool] = None,
|
| 384 |
+
output_attentions: Optional[bool] = None,
|
| 385 |
+
output_hidden_states: Optional[bool] = None,
|
| 386 |
+
return_dict: Optional[bool] = None,
|
| 387 |
+
logits_to_keep: Optional[int] = 0,
|
| 388 |
+
**kwargs: Unpack[Any]
|
| 389 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 390 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 391 |
+
output_hidden_states = (
|
| 392 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 393 |
+
)
|
| 394 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 395 |
+
|
| 396 |
+
outputs = self.model(
|
| 397 |
+
input_ids=input_ids,
|
| 398 |
+
attention_mask=attention_mask,
|
| 399 |
+
past_key_values=past_key_values,
|
| 400 |
+
inputs_embeds=inputs_embeds,
|
| 401 |
+
use_cache=use_cache,
|
| 402 |
+
output_attentions=output_attentions,
|
| 403 |
+
output_hidden_states=output_hidden_states,
|
| 404 |
+
return_dict=return_dict,
|
| 405 |
+
**kwargs
|
| 406 |
+
)
|
| 407 |
+
|
| 408 |
+
hidden_states = outputs[0]
|
| 409 |
+
fuse_linear_and_cross_entropy = self.config.fuse_cross_entropy and self.training
|
| 410 |
+
|
| 411 |
+
loss, logits = None, None
|
| 412 |
+
if not fuse_linear_and_cross_entropy or labels is None:
|
| 413 |
+
logits = self.lm_head(hidden_states if logits_to_keep is None else hidden_states[:, -logits_to_keep:])
|
| 414 |
+
if labels is not None:
|
| 415 |
+
if getattr(self, 'criterion', None) is None:
|
| 416 |
+
if fuse_linear_and_cross_entropy:
|
| 417 |
+
criterion = FusedLinearCrossEntropyLoss()
|
| 418 |
+
elif self.config.fuse_cross_entropy:
|
| 419 |
+
criterion = FusedCrossEntropyLoss(inplace_backward=True)
|
| 420 |
+
else:
|
| 421 |
+
criterion = nn.CrossEntropyLoss()
|
| 422 |
+
else:
|
| 423 |
+
criterion = self.criterion
|
| 424 |
+
labels = labels.to(hidden_states.device)
|
| 425 |
+
labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], criterion.ignore_index)), 1)
|
| 426 |
+
if fuse_linear_and_cross_entropy:
|
| 427 |
+
loss = criterion(hidden_states, labels, self.lm_head.weight, self.lm_head.bias)
|
| 428 |
+
else:
|
| 429 |
+
loss = criterion(logits.view(labels.numel(), -1), labels.view(-1))
|
| 430 |
+
|
| 431 |
+
if not return_dict:
|
| 432 |
+
output = (logits,) + outputs[1:]
|
| 433 |
+
return (loss,) + output if loss is not None else output
|
| 434 |
+
|
| 435 |
+
return CausalLMOutputWithPast(
|
| 436 |
+
loss=loss,
|
| 437 |
+
logits=logits,
|
| 438 |
+
past_key_values=outputs.past_key_values,
|
| 439 |
+
hidden_states=outputs.hidden_states,
|
| 440 |
+
attentions=outputs.attentions,
|
| 441 |
+
)
|
fla/models/delta_net/__init__.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
|
| 4 |
+
|
| 5 |
+
from fla.models.delta_net.configuration_delta_net import DeltaNetConfig
|
| 6 |
+
from fla.models.delta_net.modeling_delta_net import DeltaNetForCausalLM, DeltaNetModel
|
| 7 |
+
|
| 8 |
+
AutoConfig.register(DeltaNetConfig.model_type, DeltaNetConfig)
|
| 9 |
+
AutoModel.register(DeltaNetConfig, DeltaNetModel)
|
| 10 |
+
AutoModelForCausalLM.register(DeltaNetConfig, DeltaNetForCausalLM)
|
| 11 |
+
|
| 12 |
+
__all__ = ['DeltaNetConfig', 'DeltaNetForCausalLM', 'DeltaNetModel']
|
fla/models/delta_net/configuration_delta_net.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from typing import Dict, Optional
|
| 4 |
+
|
| 5 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class DeltaNetConfig(PretrainedConfig):
|
| 9 |
+
|
| 10 |
+
model_type = 'delta_net'
|
| 11 |
+
keys_to_ignore_at_inference = ['past_key_values']
|
| 12 |
+
|
| 13 |
+
def __init__(
|
| 14 |
+
self,
|
| 15 |
+
attn_mode: str = "chunk",
|
| 16 |
+
hidden_size: int = 2048,
|
| 17 |
+
expand_k: int = 1,
|
| 18 |
+
expand_v: int = 1,
|
| 19 |
+
use_gate: bool = False,
|
| 20 |
+
use_short_conv: bool = True,
|
| 21 |
+
conv_size: int = 4,
|
| 22 |
+
use_beta: bool = True,
|
| 23 |
+
use_output_norm: bool = True,
|
| 24 |
+
num_heads: int = 16,
|
| 25 |
+
qk_norm: str = 'l2',
|
| 26 |
+
qk_activation: str = 'silu',
|
| 27 |
+
max_position_embeddings: int = 2048,
|
| 28 |
+
hidden_ratio: Optional[int] = 4,
|
| 29 |
+
intermediate_size: Optional[int] = None,
|
| 30 |
+
hidden_act: str = "swish",
|
| 31 |
+
num_hidden_layers: int = 24,
|
| 32 |
+
norm_eps: float = 1e-6,
|
| 33 |
+
attn: Optional[Dict] = None,
|
| 34 |
+
use_cache: bool = True,
|
| 35 |
+
pad_token_id: int = None,
|
| 36 |
+
bos_token_id: int = 1,
|
| 37 |
+
eos_token_id: int = 2,
|
| 38 |
+
tie_word_embeddings: bool = False,
|
| 39 |
+
initializer_range: float = 0.006,
|
| 40 |
+
fuse_norm: bool = True,
|
| 41 |
+
fuse_swiglu: bool = True,
|
| 42 |
+
fuse_cross_entropy: bool = True,
|
| 43 |
+
vocab_size: int = 32000,
|
| 44 |
+
**kwargs
|
| 45 |
+
):
|
| 46 |
+
self.attn_mode = attn_mode
|
| 47 |
+
self.hidden_size = hidden_size
|
| 48 |
+
self.expand_k = expand_k
|
| 49 |
+
self.expand_v = expand_v
|
| 50 |
+
self.use_gate = use_gate
|
| 51 |
+
self.use_short_conv = use_short_conv
|
| 52 |
+
self.conv_size = conv_size
|
| 53 |
+
self.use_beta = use_beta
|
| 54 |
+
self.use_output_norm = use_output_norm
|
| 55 |
+
self.num_heads = num_heads
|
| 56 |
+
self.qk_norm = qk_norm
|
| 57 |
+
self.qk_activation = qk_activation
|
| 58 |
+
self.max_position_embeddings = max_position_embeddings
|
| 59 |
+
|
| 60 |
+
self.hidden_ratio = hidden_ratio
|
| 61 |
+
self.intermediate_size = intermediate_size
|
| 62 |
+
self.hidden_act = hidden_act
|
| 63 |
+
self.num_hidden_layers = num_hidden_layers
|
| 64 |
+
self.norm_eps = norm_eps
|
| 65 |
+
self.attn = attn
|
| 66 |
+
self.use_cache = use_cache
|
| 67 |
+
self.initializer_range = initializer_range
|
| 68 |
+
self.fuse_norm = fuse_norm
|
| 69 |
+
self.fuse_swiglu = fuse_swiglu
|
| 70 |
+
self.fuse_cross_entropy = fuse_cross_entropy
|
| 71 |
+
self.vocab_size = vocab_size
|
| 72 |
+
|
| 73 |
+
if attn is not None:
|
| 74 |
+
if not isinstance(attn, Dict):
|
| 75 |
+
raise ValueError("attn must be a dictionary")
|
| 76 |
+
if 'layers' not in attn:
|
| 77 |
+
raise ValueError("Layer indices must be provided to initialize hybrid attention layers")
|
| 78 |
+
if 'num_heads' not in attn:
|
| 79 |
+
raise ValueError("Number of heads must be provided to initialize hybrid attention layers")
|
| 80 |
+
attn['num_kv_heads'] = attn.get('num_kv_heads', attn['num_heads'])
|
| 81 |
+
attn['qkv_bias'] = attn.get('qkv_bias', False)
|
| 82 |
+
attn['window_size'] = attn.get('window_size', None)
|
| 83 |
+
attn['rope_theta'] = attn.get('rope_theta', 10000.)
|
| 84 |
+
|
| 85 |
+
super().__init__(
|
| 86 |
+
pad_token_id=pad_token_id,
|
| 87 |
+
bos_token_id=bos_token_id,
|
| 88 |
+
eos_token_id=eos_token_id,
|
| 89 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 90 |
+
**kwargs,
|
| 91 |
+
)
|
fla/models/gated_deltanet/__init__.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
|
| 4 |
+
|
| 5 |
+
from fla.models.gated_deltanet.configuration_gated_deltanet import GatedDeltaNetConfig
|
| 6 |
+
from fla.models.gated_deltanet.modeling_gated_deltanet import GatedDeltaNetForCausalLM, GatedDeltaNetModel
|
| 7 |
+
|
| 8 |
+
AutoConfig.register(GatedDeltaNetConfig.model_type, GatedDeltaNetConfig)
|
| 9 |
+
AutoModel.register(GatedDeltaNetConfig, GatedDeltaNetModel)
|
| 10 |
+
AutoModelForCausalLM.register(GatedDeltaNetConfig, GatedDeltaNetForCausalLM)
|
| 11 |
+
|
| 12 |
+
__all__ = ['GatedDeltaNetConfig', 'GatedDeltaNetForCausalLM', 'GatedDeltaNetModel']
|
fla/models/gated_deltanet/configuration_gated_deltanet.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from typing import Dict, Optional
|
| 4 |
+
|
| 5 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class GatedDeltaNetConfig(PretrainedConfig):
|
| 9 |
+
model_type = 'gated_deltanet'
|
| 10 |
+
keys_to_ignore_at_inference = ['past_key_values']
|
| 11 |
+
|
| 12 |
+
def __init__(
|
| 13 |
+
self,
|
| 14 |
+
attn_mode: str = "chunk",
|
| 15 |
+
hidden_size: int = 2048,
|
| 16 |
+
expand_v: int = 2,
|
| 17 |
+
use_gate: bool = True,
|
| 18 |
+
use_short_conv: bool = True,
|
| 19 |
+
conv_size: int = 4,
|
| 20 |
+
head_dim: int = 256,
|
| 21 |
+
num_heads: int = 6,
|
| 22 |
+
max_position_embeddings: int = 2048,
|
| 23 |
+
hidden_ratio: Optional[int] = 4,
|
| 24 |
+
intermediate_size: Optional[int] = None,
|
| 25 |
+
hidden_act: str = "swish",
|
| 26 |
+
num_hidden_layers: int = 21,
|
| 27 |
+
norm_eps: float = 1e-6,
|
| 28 |
+
attn: Optional[Dict] = None,
|
| 29 |
+
use_cache: bool = True,
|
| 30 |
+
pad_token_id: int = None,
|
| 31 |
+
bos_token_id: int = 1,
|
| 32 |
+
eos_token_id: int = 2,
|
| 33 |
+
tie_word_embeddings: bool = False,
|
| 34 |
+
initializer_range: float = 0.006,
|
| 35 |
+
fuse_norm: bool = True,
|
| 36 |
+
fuse_swiglu: bool = True,
|
| 37 |
+
fuse_cross_entropy: bool = True,
|
| 38 |
+
vocab_size: int = 32000,
|
| 39 |
+
**kwargs
|
| 40 |
+
):
|
| 41 |
+
self.attn_mode = attn_mode
|
| 42 |
+
self.hidden_size = hidden_size
|
| 43 |
+
self.expand_v = expand_v
|
| 44 |
+
self.use_gate = use_gate
|
| 45 |
+
self.use_short_conv = use_short_conv
|
| 46 |
+
self.conv_size = conv_size
|
| 47 |
+
self.head_dim = head_dim
|
| 48 |
+
self.num_heads = num_heads
|
| 49 |
+
self.max_position_embeddings = max_position_embeddings
|
| 50 |
+
|
| 51 |
+
self.hidden_ratio = hidden_ratio
|
| 52 |
+
self.intermediate_size = intermediate_size
|
| 53 |
+
self.hidden_act = hidden_act
|
| 54 |
+
self.num_hidden_layers = num_hidden_layers
|
| 55 |
+
self.norm_eps = norm_eps
|
| 56 |
+
self.attn = attn
|
| 57 |
+
self.use_cache = use_cache
|
| 58 |
+
self.initializer_range = initializer_range
|
| 59 |
+
|
| 60 |
+
self.fuse_norm = fuse_norm
|
| 61 |
+
self.fuse_swiglu = fuse_swiglu
|
| 62 |
+
self.fuse_cross_entropy = fuse_cross_entropy
|
| 63 |
+
self.vocab_size = vocab_size
|
| 64 |
+
|
| 65 |
+
if attn is not None:
|
| 66 |
+
if not isinstance(attn, Dict):
|
| 67 |
+
raise ValueError("attn must be a dictionary")
|
| 68 |
+
if 'layers' not in attn:
|
| 69 |
+
raise ValueError("Layer indices must be provided to initialize hybrid attention layers")
|
| 70 |
+
if 'num_heads' not in attn:
|
| 71 |
+
raise ValueError("Number of heads must be provided to initialize hybrid attention layers")
|
| 72 |
+
attn['num_kv_heads'] = attn.get('num_kv_heads', attn['num_heads'])
|
| 73 |
+
attn['qkv_bias'] = attn.get('qkv_bias', False)
|
| 74 |
+
attn['window_size'] = attn.get('window_size', None)
|
| 75 |
+
attn['rope_theta'] = attn.get('rope_theta', 10000.)
|
| 76 |
+
|
| 77 |
+
super().__init__(
|
| 78 |
+
pad_token_id=pad_token_id,
|
| 79 |
+
bos_token_id=bos_token_id,
|
| 80 |
+
eos_token_id=eos_token_id,
|
| 81 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 82 |
+
**kwargs,
|
| 83 |
+
)
|
fla/models/gated_deltaproduct/__pycache__/modeling_gated_deltaproduct.cpython-312.pyc
ADDED
|
Binary file (20.7 kB). View file
|
|
|
fla/models/gated_deltaproduct/configuration_gated_deltaproduct.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from typing import Dict, Optional
|
| 4 |
+
|
| 5 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class GatedDeltaProductConfig(PretrainedConfig):
|
| 9 |
+
model_type = "gated_deltaproduct"
|
| 10 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
| 11 |
+
|
| 12 |
+
def __init__(
|
| 13 |
+
self,
|
| 14 |
+
attn_mode: str = "chunk",
|
| 15 |
+
hidden_size: int = 2048,
|
| 16 |
+
expand_v: int = 2,
|
| 17 |
+
use_gate: bool = True,
|
| 18 |
+
use_short_conv: bool = True,
|
| 19 |
+
conv_size: int = 4,
|
| 20 |
+
head_dim: int = 256,
|
| 21 |
+
num_heads: int = 6,
|
| 22 |
+
max_position_embeddings: int = 2048,
|
| 23 |
+
hidden_ratio: Optional[int] = 4,
|
| 24 |
+
intermediate_size: Optional[int] = None,
|
| 25 |
+
hidden_act: str = "swish",
|
| 26 |
+
num_hidden_layers: int = 21,
|
| 27 |
+
norm_first: bool = False,
|
| 28 |
+
norm_eps: float = 1e-6,
|
| 29 |
+
attn: Optional[Dict] = None,
|
| 30 |
+
use_cache: bool = True,
|
| 31 |
+
pad_token_id: int | None = None,
|
| 32 |
+
bos_token_id: int = 1,
|
| 33 |
+
eos_token_id: int = 2,
|
| 34 |
+
tie_word_embeddings: bool = False,
|
| 35 |
+
initializer_range: float = 0.006,
|
| 36 |
+
fuse_cross_entropy: bool = True,
|
| 37 |
+
vocab_size: int = 32000,
|
| 38 |
+
use_forget_gate: bool = False, # when true Gated DeltaProduct, when false DeltaProduct
|
| 39 |
+
allow_neg_eigval: bool = False, # when true (Gated) DeltaProduct [-1, 1], when false (Gated) DeltaProduct [0, 1]
|
| 40 |
+
num_householder: int = 1,
|
| 41 |
+
**kwargs,
|
| 42 |
+
):
|
| 43 |
+
self.attn_mode = attn_mode
|
| 44 |
+
self.hidden_size = hidden_size
|
| 45 |
+
self.expand_v = expand_v
|
| 46 |
+
self.use_gate = use_gate
|
| 47 |
+
self.use_short_conv = use_short_conv
|
| 48 |
+
self.conv_size = conv_size
|
| 49 |
+
self.head_dim = head_dim
|
| 50 |
+
self.num_heads = num_heads
|
| 51 |
+
self.max_position_embeddings = max_position_embeddings
|
| 52 |
+
|
| 53 |
+
self.hidden_ratio = hidden_ratio
|
| 54 |
+
self.intermediate_size = intermediate_size
|
| 55 |
+
self.hidden_act = hidden_act
|
| 56 |
+
self.num_hidden_layers = num_hidden_layers
|
| 57 |
+
self.norm_first = norm_first
|
| 58 |
+
self.norm_eps = norm_eps
|
| 59 |
+
self.attn = attn
|
| 60 |
+
self.use_cache = use_cache
|
| 61 |
+
self.initializer_range = initializer_range
|
| 62 |
+
self.fuse_cross_entropy = fuse_cross_entropy
|
| 63 |
+
self.vocab_size = vocab_size
|
| 64 |
+
|
| 65 |
+
# DeltaProduct specific
|
| 66 |
+
self.allow_neg_eigval = allow_neg_eigval
|
| 67 |
+
self.num_householder = num_householder
|
| 68 |
+
self.use_forget_gate = use_forget_gate
|
| 69 |
+
|
| 70 |
+
if attn is not None:
|
| 71 |
+
if not isinstance(attn, Dict):
|
| 72 |
+
raise ValueError("attn must be a dictionary")
|
| 73 |
+
if "layers" not in attn:
|
| 74 |
+
raise ValueError(
|
| 75 |
+
"Layer indices must be provided to initialize hybrid attention layers"
|
| 76 |
+
)
|
| 77 |
+
if "num_heads" not in attn:
|
| 78 |
+
raise ValueError(
|
| 79 |
+
"Number of heads must be provided to initialize hybrid attention layers"
|
| 80 |
+
)
|
| 81 |
+
attn["num_kv_heads"] = attn.get("num_kv_heads", attn["num_heads"])
|
| 82 |
+
attn["window_size"] = attn.get("window_size", None)
|
| 83 |
+
|
| 84 |
+
super().__init__(
|
| 85 |
+
pad_token_id=pad_token_id,
|
| 86 |
+
bos_token_id=bos_token_id,
|
| 87 |
+
eos_token_id=eos_token_id,
|
| 88 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 89 |
+
**kwargs,
|
| 90 |
+
)
|
fla/models/gated_deltaproduct/modeling_gated_deltaproduct.py
ADDED
|
@@ -0,0 +1,520 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
import warnings
|
| 7 |
+
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import torch.utils.checkpoint
|
| 12 |
+
from transformers.activations import ACT2FN
|
| 13 |
+
from transformers.generation import GenerationMixin
|
| 14 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
|
| 15 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 16 |
+
from transformers.utils import logging
|
| 17 |
+
from transformers.utils.deprecation import deprecate_kwarg
|
| 18 |
+
|
| 19 |
+
from fla.layers.attn import Attention
|
| 20 |
+
from fla.layers.gated_deltaproduct import GatedDeltaProduct
|
| 21 |
+
from fla.models.gated_deltaproduct.configuration_gated_deltaproduct import GatedDeltaProductConfig
|
| 22 |
+
from fla.models.utils import Cache
|
| 23 |
+
from fla.modules import FusedCrossEntropyLoss, FusedLinearCrossEntropyLoss, RMSNorm
|
| 24 |
+
from fla.modules.activations import swiglu_linear
|
| 25 |
+
from fla.modules.layernorm import rms_norm_linear
|
| 26 |
+
|
| 27 |
+
if TYPE_CHECKING:
|
| 28 |
+
from transformers.processing_utils import Unpack
|
| 29 |
+
|
| 30 |
+
logger = logging.get_logger(__name__)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class GatedDeltaNetMLP(nn.Module):
|
| 34 |
+
def __init__(
|
| 35 |
+
self,
|
| 36 |
+
hidden_size: int,
|
| 37 |
+
hidden_ratio: Optional[int] = None,
|
| 38 |
+
intermediate_size: Optional[int] = None,
|
| 39 |
+
hidden_act: str = "swish",
|
| 40 |
+
norm_first: bool = True,
|
| 41 |
+
norm_eps: float = 1e-5,
|
| 42 |
+
) -> GatedDeltaNetMLP:
|
| 43 |
+
super().__init__()
|
| 44 |
+
|
| 45 |
+
self.hidden_size = hidden_size
|
| 46 |
+
# the final number of params is `hidden_ratio * hidden_size^2`
|
| 47 |
+
# `intermediate_size` is chosen to be a multiple of 256 closest to `2/3 * hidden_size * hidden_ratio`
|
| 48 |
+
if hidden_ratio is None:
|
| 49 |
+
hidden_ratio = 4
|
| 50 |
+
if intermediate_size is None:
|
| 51 |
+
intermediate_size = int(hidden_size * hidden_ratio * 2 / 3)
|
| 52 |
+
intermediate_size = 256 * ((intermediate_size + 256 - 1) // 256)
|
| 53 |
+
self.hidden_ratio = hidden_ratio
|
| 54 |
+
self.intermediate_size = intermediate_size
|
| 55 |
+
self.norm_first = norm_first
|
| 56 |
+
|
| 57 |
+
if norm_first:
|
| 58 |
+
self.norm = RMSNorm(hidden_size=hidden_size, eps=norm_eps)
|
| 59 |
+
|
| 60 |
+
self.gate_proj = nn.Linear(
|
| 61 |
+
self.hidden_size, self.intermediate_size * 2, bias=False
|
| 62 |
+
)
|
| 63 |
+
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
| 64 |
+
self.act_fn = ACT2FN[hidden_act]
|
| 65 |
+
|
| 66 |
+
def forward(
|
| 67 |
+
self,
|
| 68 |
+
x: torch.Tensor,
|
| 69 |
+
**kwargs: Unpack[Dict],
|
| 70 |
+
) -> torch.Tensor:
|
| 71 |
+
if self.norm_first:
|
| 72 |
+
x = rms_norm_linear(
|
| 73 |
+
x,
|
| 74 |
+
self.norm.weight,
|
| 75 |
+
self.norm.bias,
|
| 76 |
+
self.gate_proj.weight,
|
| 77 |
+
self.gate_proj.bias,
|
| 78 |
+
)
|
| 79 |
+
else:
|
| 80 |
+
x = self.gate_proj(x)
|
| 81 |
+
gate, y = x.chunk(2, -1)
|
| 82 |
+
return swiglu_linear(gate, y, self.down_proj.weight, self.down_proj.bias)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
class GatedDeltaProductBlock(nn.Module):
|
| 86 |
+
def __init__(self, config: GatedDeltaProductConfig, layer_idx: int):
|
| 87 |
+
super().__init__()
|
| 88 |
+
self.hidden_size = config.hidden_size
|
| 89 |
+
|
| 90 |
+
if not config.norm_first:
|
| 91 |
+
self.attn_norm = RMSNorm(
|
| 92 |
+
hidden_size=config.hidden_size, eps=config.norm_eps
|
| 93 |
+
)
|
| 94 |
+
if config.attn is not None and layer_idx in config.attn["layers"]:
|
| 95 |
+
self.attn = Attention(
|
| 96 |
+
hidden_size=config.hidden_size,
|
| 97 |
+
num_heads=config.attn["num_heads"],
|
| 98 |
+
num_kv_heads=config.attn["num_kv_heads"],
|
| 99 |
+
window_size=config.attn["window_size"],
|
| 100 |
+
max_position_embeddings=config.max_position_embeddings,
|
| 101 |
+
layer_idx=layer_idx,
|
| 102 |
+
)
|
| 103 |
+
else:
|
| 104 |
+
self.attn = GatedDeltaProduct(
|
| 105 |
+
mode=config.attn_mode,
|
| 106 |
+
hidden_size=config.hidden_size,
|
| 107 |
+
expand_v=config.expand_v,
|
| 108 |
+
head_dim=config.head_dim,
|
| 109 |
+
num_heads=config.num_heads,
|
| 110 |
+
use_gate=config.use_gate,
|
| 111 |
+
use_forget_gate=config.use_forget_gate,
|
| 112 |
+
use_short_conv=config.use_short_conv,
|
| 113 |
+
conv_size=config.conv_size,
|
| 114 |
+
norm_first=config.norm_first,
|
| 115 |
+
norm_eps=config.norm_eps,
|
| 116 |
+
allow_neg_eigval=config.allow_neg_eigval,
|
| 117 |
+
num_householder=config.num_householder,
|
| 118 |
+
layer_idx=layer_idx,
|
| 119 |
+
use_beta_conv=config.use_beta_conv
|
| 120 |
+
)
|
| 121 |
+
if not config.norm_first:
|
| 122 |
+
self.mlp_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.norm_eps)
|
| 123 |
+
self.mlp = GatedDeltaNetMLP(
|
| 124 |
+
hidden_size=config.hidden_size,
|
| 125 |
+
hidden_ratio=config.hidden_ratio,
|
| 126 |
+
intermediate_size=config.intermediate_size,
|
| 127 |
+
hidden_act=config.hidden_act,
|
| 128 |
+
norm_first=config.norm_first,
|
| 129 |
+
norm_eps=config.norm_eps,
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
def forward(
|
| 133 |
+
self,
|
| 134 |
+
hidden_states: torch.Tensor,
|
| 135 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 136 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 137 |
+
use_cache: Optional[bool] = False,
|
| 138 |
+
output_attentions: Optional[bool] = False,
|
| 139 |
+
**kwargs: Unpack[Dict],
|
| 140 |
+
) -> Tuple[
|
| 141 |
+
torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]
|
| 142 |
+
]:
|
| 143 |
+
residual = hidden_states
|
| 144 |
+
if hasattr(self, "attn_norm"):
|
| 145 |
+
hidden_states = self.attn_norm(hidden_states)
|
| 146 |
+
hidden_states, attentions, past_key_values = self.attn(
|
| 147 |
+
hidden_states=hidden_states,
|
| 148 |
+
attention_mask=attention_mask,
|
| 149 |
+
past_key_values=past_key_values,
|
| 150 |
+
use_cache=use_cache,
|
| 151 |
+
output_attentions=output_attentions,
|
| 152 |
+
**kwargs,
|
| 153 |
+
)
|
| 154 |
+
if hasattr(self, "mlp_norm"):
|
| 155 |
+
hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
|
| 156 |
+
else:
|
| 157 |
+
hidden_states = residual + hidden_states
|
| 158 |
+
residual = hidden_states
|
| 159 |
+
hidden_states = self.mlp(hidden_states, **kwargs)
|
| 160 |
+
hidden_states = residual + hidden_states
|
| 161 |
+
|
| 162 |
+
outputs = (hidden_states, attentions, past_key_values)
|
| 163 |
+
|
| 164 |
+
return outputs
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
class GatedDeltaProductPreTrainedModel(PreTrainedModel):
|
| 168 |
+
config_class = GatedDeltaProductConfig
|
| 169 |
+
supports_gradient_checkpointing = True
|
| 170 |
+
_no_split_modules = ["GatedDeltaNetBlock"]
|
| 171 |
+
|
| 172 |
+
def __init__(self, *inputs, **kwargs):
|
| 173 |
+
super().__init__(*inputs, **kwargs)
|
| 174 |
+
|
| 175 |
+
def _init_weights(
|
| 176 |
+
self,
|
| 177 |
+
module: nn.Module,
|
| 178 |
+
rescale_prenorm_residual: bool = True,
|
| 179 |
+
num_residuals_per_layer: int = 2,
|
| 180 |
+
):
|
| 181 |
+
if isinstance(module, (nn.Linear, nn.Conv1d)):
|
| 182 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 183 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 184 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 185 |
+
if module.bias is not None:
|
| 186 |
+
nn.init.zeros_(module.bias)
|
| 187 |
+
elif isinstance(module, nn.Embedding):
|
| 188 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 189 |
+
if module.padding_idx is not None:
|
| 190 |
+
module.weight.data[module.padding_idx].zero_()
|
| 191 |
+
|
| 192 |
+
if rescale_prenorm_residual:
|
| 193 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
| 194 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
| 195 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
| 196 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
| 197 |
+
#
|
| 198 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
| 199 |
+
for name, p in module.named_parameters():
|
| 200 |
+
if name in ["o_proj.weight", "down_proj.weight"]:
|
| 201 |
+
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
|
| 202 |
+
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
|
| 203 |
+
# We need to reinit p since this code could be called multiple times
|
| 204 |
+
# Having just p *= scale would repeatedly scale it down
|
| 205 |
+
with torch.no_grad():
|
| 206 |
+
p /= math.sqrt(
|
| 207 |
+
num_residuals_per_layer * self.config.num_hidden_layers
|
| 208 |
+
)
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
class GatedDeltaProductModel(GatedDeltaProductPreTrainedModel):
|
| 212 |
+
def __init__(self, config: GatedDeltaProductConfig):
|
| 213 |
+
super().__init__(config)
|
| 214 |
+
self.padding_idx = config.pad_token_id
|
| 215 |
+
self.vocab_size = config.vocab_size
|
| 216 |
+
|
| 217 |
+
self.embeddings = nn.Embedding(
|
| 218 |
+
config.vocab_size, config.hidden_size, self.padding_idx
|
| 219 |
+
)
|
| 220 |
+
self.layers = nn.ModuleList(
|
| 221 |
+
[
|
| 222 |
+
GatedDeltaProductBlock(config, layer_idx)
|
| 223 |
+
for layer_idx in range(config.num_hidden_layers)
|
| 224 |
+
]
|
| 225 |
+
)
|
| 226 |
+
self.norm = RMSNorm(config.hidden_size, eps=config.norm_eps)
|
| 227 |
+
|
| 228 |
+
self.gradient_checkpointing = False
|
| 229 |
+
|
| 230 |
+
self.post_init()
|
| 231 |
+
|
| 232 |
+
def get_input_embeddings(self):
|
| 233 |
+
return self.embeddings
|
| 234 |
+
|
| 235 |
+
def set_input_embeddings(self, value):
|
| 236 |
+
self.embeddings = value
|
| 237 |
+
|
| 238 |
+
def forward(
|
| 239 |
+
self,
|
| 240 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 241 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 242 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 243 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 244 |
+
use_cache: Optional[bool] = None,
|
| 245 |
+
output_attentions: Optional[bool] = None,
|
| 246 |
+
output_hidden_states: Optional[bool] = None,
|
| 247 |
+
return_dict: Optional[bool] = None,
|
| 248 |
+
**kwargs: Unpack[Dict],
|
| 249 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
| 250 |
+
if output_attentions:
|
| 251 |
+
warnings.warn(
|
| 252 |
+
"`GatedDeltaNetModel` does not `output_attentions` now, setting it to `False`.",
|
| 253 |
+
stacklevel=2,
|
| 254 |
+
)
|
| 255 |
+
output_attentions = False
|
| 256 |
+
output_attentions = (
|
| 257 |
+
output_attentions
|
| 258 |
+
if output_attentions is not None
|
| 259 |
+
else self.config.output_attentions
|
| 260 |
+
)
|
| 261 |
+
output_hidden_states = (
|
| 262 |
+
output_hidden_states
|
| 263 |
+
if output_hidden_states is not None
|
| 264 |
+
else self.config.output_hidden_states
|
| 265 |
+
)
|
| 266 |
+
use_cache = (
|
| 267 |
+
use_cache
|
| 268 |
+
if use_cache is not None
|
| 269 |
+
else (self.config.use_cache if not self.training else False)
|
| 270 |
+
)
|
| 271 |
+
return_dict = (
|
| 272 |
+
return_dict if return_dict is not None else self.config.use_return_dict
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
# retrieve input_ids and inputs_embeds
|
| 276 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 277 |
+
raise ValueError(
|
| 278 |
+
"You cannot specify both input_ids and inputs_embeds at the same time"
|
| 279 |
+
)
|
| 280 |
+
if input_ids is None and inputs_embeds is None:
|
| 281 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 282 |
+
|
| 283 |
+
if inputs_embeds is None:
|
| 284 |
+
inputs_embeds = self.embeddings(input_ids)
|
| 285 |
+
hidden_states = inputs_embeds
|
| 286 |
+
|
| 287 |
+
if use_cache and not isinstance(past_key_values, Cache):
|
| 288 |
+
past_key_values = Cache.from_legacy_cache(past_key_values)
|
| 289 |
+
|
| 290 |
+
if self.gradient_checkpointing and self.training and use_cache:
|
| 291 |
+
logger.warning_once(
|
| 292 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
| 293 |
+
)
|
| 294 |
+
use_cache = False
|
| 295 |
+
|
| 296 |
+
all_hidden_states = () if output_hidden_states else None
|
| 297 |
+
all_attns = () if output_attentions else None
|
| 298 |
+
for layer in self.layers:
|
| 299 |
+
if output_hidden_states:
|
| 300 |
+
all_hidden_states += (hidden_states,)
|
| 301 |
+
|
| 302 |
+
if self.gradient_checkpointing and self.training:
|
| 303 |
+
hidden_states, attentions, past_key_values = (
|
| 304 |
+
self._gradient_checkpointing_func(
|
| 305 |
+
layer.__call__,
|
| 306 |
+
hidden_states,
|
| 307 |
+
attention_mask,
|
| 308 |
+
past_key_values,
|
| 309 |
+
use_cache,
|
| 310 |
+
output_attentions,
|
| 311 |
+
**kwargs,
|
| 312 |
+
)
|
| 313 |
+
)
|
| 314 |
+
else:
|
| 315 |
+
hidden_states, attentions, past_key_values = layer(
|
| 316 |
+
hidden_states,
|
| 317 |
+
attention_mask=attention_mask,
|
| 318 |
+
past_key_values=past_key_values,
|
| 319 |
+
use_cache=use_cache,
|
| 320 |
+
output_attentions=output_attentions,
|
| 321 |
+
**kwargs,
|
| 322 |
+
)
|
| 323 |
+
|
| 324 |
+
if output_attentions:
|
| 325 |
+
all_attns += (attentions,)
|
| 326 |
+
|
| 327 |
+
hidden_states = self.norm(hidden_states)
|
| 328 |
+
# add hidden states from the last decoder layer
|
| 329 |
+
if output_hidden_states:
|
| 330 |
+
all_hidden_states += (hidden_states,)
|
| 331 |
+
|
| 332 |
+
if not return_dict:
|
| 333 |
+
return tuple(
|
| 334 |
+
i
|
| 335 |
+
for i in [
|
| 336 |
+
hidden_states,
|
| 337 |
+
past_key_values,
|
| 338 |
+
all_hidden_states,
|
| 339 |
+
all_attns,
|
| 340 |
+
]
|
| 341 |
+
if i is not None
|
| 342 |
+
)
|
| 343 |
+
return BaseModelOutputWithPast(
|
| 344 |
+
last_hidden_state=hidden_states,
|
| 345 |
+
past_key_values=past_key_values,
|
| 346 |
+
hidden_states=all_hidden_states,
|
| 347 |
+
attentions=all_attns,
|
| 348 |
+
)
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
class GatedDeltaProductForCausalLM(GatedDeltaProductPreTrainedModel, GenerationMixin):
|
| 352 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 353 |
+
|
| 354 |
+
def __init__(self, config):
|
| 355 |
+
super().__init__(config)
|
| 356 |
+
self.model = GatedDeltaProductModel(config)
|
| 357 |
+
self.vocab_size = config.vocab_size
|
| 358 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 359 |
+
|
| 360 |
+
# Initialize weights and apply final processing
|
| 361 |
+
self.post_init()
|
| 362 |
+
|
| 363 |
+
def get_input_embeddings(self):
|
| 364 |
+
return self.model.embeddings
|
| 365 |
+
|
| 366 |
+
def set_input_embeddings(self, value):
|
| 367 |
+
self.model.embeddings = value
|
| 368 |
+
|
| 369 |
+
def get_output_embeddings(self):
|
| 370 |
+
return self.lm_head
|
| 371 |
+
|
| 372 |
+
def set_output_embeddings(self, new_embeddings):
|
| 373 |
+
self.lm_head = new_embeddings
|
| 374 |
+
|
| 375 |
+
def set_decoder(self, decoder):
|
| 376 |
+
self.model = decoder
|
| 377 |
+
|
| 378 |
+
def get_decoder(self):
|
| 379 |
+
return self.model
|
| 380 |
+
|
| 381 |
+
def generate(self, *args, **kwargs):
|
| 382 |
+
try:
|
| 383 |
+
return super().generate(*args, **kwargs)
|
| 384 |
+
except AttributeError as exception:
|
| 385 |
+
if "past_key_values" in str(exception):
|
| 386 |
+
raise AttributeError(
|
| 387 |
+
f"You tried to call `generate` with a decoding strategy that manipulates `past_key_values`, "
|
| 388 |
+
f"which is not supported for {self.__class__.__name__}. "
|
| 389 |
+
f"Try another generation strategy instead. "
|
| 390 |
+
f"For the available generation strategies, check this doc: "
|
| 391 |
+
f"https://huggingface.co/docs/transformers/en/generation_strategies#decoding-strategies"
|
| 392 |
+
)
|
| 393 |
+
else:
|
| 394 |
+
raise exception
|
| 395 |
+
|
| 396 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 397 |
+
def prepare_inputs_for_generation(
|
| 398 |
+
self,
|
| 399 |
+
input_ids: torch.LongTensor = None,
|
| 400 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 401 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 402 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 403 |
+
use_cache: bool = True,
|
| 404 |
+
num_logits_to_keep: Optional[int] = None,
|
| 405 |
+
logits_to_keep: Optional[int] = None,
|
| 406 |
+
**kwargs,
|
| 407 |
+
):
|
| 408 |
+
# only last token for `inputs_ids` if the `past_key_values` is passed along is not empty.
|
| 409 |
+
if past_key_values is not None and len(past_key_values) > 0:
|
| 410 |
+
input_ids = input_ids[:, -1:]
|
| 411 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 412 |
+
if inputs_embeds is not None and past_key_values is None:
|
| 413 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
| 414 |
+
else:
|
| 415 |
+
# The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
|
| 416 |
+
# recompiles graphs as the stride of the inputs is a guard.
|
| 417 |
+
# Ref: https://github.com/huggingface/transformers/pull/29114
|
| 418 |
+
# TODO: use `next_tokens` directly instead.
|
| 419 |
+
model_inputs = {"input_ids": input_ids.contiguous()}
|
| 420 |
+
|
| 421 |
+
if logits_to_keep is not None:
|
| 422 |
+
model_inputs['logits_to_keep'] = logits_to_keep
|
| 423 |
+
|
| 424 |
+
model_inputs.update(
|
| 425 |
+
{
|
| 426 |
+
"past_key_values": past_key_values,
|
| 427 |
+
"use_cache": use_cache,
|
| 428 |
+
"attention_mask": attention_mask,
|
| 429 |
+
"num_logits_to_keep": num_logits_to_keep,
|
| 430 |
+
}
|
| 431 |
+
)
|
| 432 |
+
return model_inputs
|
| 433 |
+
|
| 434 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 435 |
+
def forward(
|
| 436 |
+
self,
|
| 437 |
+
input_ids: torch.LongTensor = None,
|
| 438 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 439 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 440 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 441 |
+
labels: Optional[torch.LongTensor] = None,
|
| 442 |
+
use_cache: Optional[bool] = None,
|
| 443 |
+
output_attentions: Optional[bool] = None,
|
| 444 |
+
output_hidden_states: Optional[bool] = None,
|
| 445 |
+
return_dict: Optional[bool] = None,
|
| 446 |
+
num_logits_to_keep: Optional[int] = 0,
|
| 447 |
+
logits_to_keep: Optional[int] = 0,
|
| 448 |
+
**kwargs: Unpack[Dict],
|
| 449 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 450 |
+
num_logits_to_keep = 0 if num_logits_to_keep is None else num_logits_to_keep
|
| 451 |
+
output_attentions = (
|
| 452 |
+
output_attentions
|
| 453 |
+
if output_attentions is not None
|
| 454 |
+
else self.config.output_attentions
|
| 455 |
+
)
|
| 456 |
+
output_hidden_states = (
|
| 457 |
+
output_hidden_states
|
| 458 |
+
if output_hidden_states is not None
|
| 459 |
+
else self.config.output_hidden_states
|
| 460 |
+
)
|
| 461 |
+
return_dict = (
|
| 462 |
+
return_dict if return_dict is not None else self.config.use_return_dict
|
| 463 |
+
)
|
| 464 |
+
kwargs.pop("num_items_in_batch", None)
|
| 465 |
+
outputs = self.model(
|
| 466 |
+
input_ids=input_ids,
|
| 467 |
+
attention_mask=attention_mask,
|
| 468 |
+
inputs_embeds=inputs_embeds,
|
| 469 |
+
past_key_values=past_key_values,
|
| 470 |
+
use_cache=use_cache,
|
| 471 |
+
output_attentions=output_attentions,
|
| 472 |
+
output_hidden_states=output_hidden_states,
|
| 473 |
+
return_dict=return_dict,
|
| 474 |
+
**kwargs,
|
| 475 |
+
)
|
| 476 |
+
hidden_states = outputs[0]
|
| 477 |
+
fuse_linear_and_cross_entropy = self.config.fuse_cross_entropy and self.training
|
| 478 |
+
|
| 479 |
+
loss, logits = None, None
|
| 480 |
+
if not fuse_linear_and_cross_entropy or labels is None:
|
| 481 |
+
logits = self.lm_head(hidden_states if logits_to_keep is None else hidden_states[:, -logits_to_keep:])
|
| 482 |
+
if labels is not None:
|
| 483 |
+
if self.config.fuse_cross_entropy:
|
| 484 |
+
if fuse_linear_and_cross_entropy:
|
| 485 |
+
loss_fct = FusedLinearCrossEntropyLoss()
|
| 486 |
+
else:
|
| 487 |
+
loss_fct = FusedCrossEntropyLoss(inplace_backward=True)
|
| 488 |
+
else:
|
| 489 |
+
loss_fct = nn.CrossEntropyLoss()
|
| 490 |
+
# Enable model parallelism
|
| 491 |
+
labels = labels.to(hidden_states.device)
|
| 492 |
+
labels = torch.cat(
|
| 493 |
+
(
|
| 494 |
+
labels[..., 1:],
|
| 495 |
+
torch.full_like(labels[:, :1], loss_fct.ignore_index),
|
| 496 |
+
),
|
| 497 |
+
1,
|
| 498 |
+
)
|
| 499 |
+
if fuse_linear_and_cross_entropy:
|
| 500 |
+
loss = loss_fct(
|
| 501 |
+
hidden_states.view(-1, self.config.hidden_size),
|
| 502 |
+
labels.view(-1),
|
| 503 |
+
self.lm_head.weight,
|
| 504 |
+
self.lm_head.bias,
|
| 505 |
+
)
|
| 506 |
+
else:
|
| 507 |
+
loss = loss_fct(
|
| 508 |
+
logits.view(-1, self.config.vocab_size), labels.view(-1)
|
| 509 |
+
)
|
| 510 |
+
|
| 511 |
+
if not return_dict:
|
| 512 |
+
output = (logits,) + outputs[1:]
|
| 513 |
+
return (loss, *output) if loss is not None else output
|
| 514 |
+
return CausalLMOutputWithPast(
|
| 515 |
+
loss=loss,
|
| 516 |
+
logits=logits,
|
| 517 |
+
past_key_values=outputs.past_key_values,
|
| 518 |
+
hidden_states=outputs.hidden_states,
|
| 519 |
+
attentions=outputs.attentions,
|
| 520 |
+
)
|
fla/models/gla/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
|
| 4 |
+
|
| 5 |
+
from fla.models.gla.configuration_gla import GLAConfig
|
| 6 |
+
from fla.models.gla.modeling_gla import GLAForCausalLM, GLAModel
|
| 7 |
+
|
| 8 |
+
AutoConfig.register(GLAConfig.model_type, GLAConfig)
|
| 9 |
+
AutoModel.register(GLAConfig, GLAModel)
|
| 10 |
+
AutoModelForCausalLM.register(GLAConfig, GLAForCausalLM)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
__all__ = ['GLAConfig', 'GLAForCausalLM', 'GLAModel']
|
fla/models/gla/modeling_gla.py
ADDED
|
@@ -0,0 +1,417 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
import warnings
|
| 7 |
+
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import torch.utils.checkpoint
|
| 12 |
+
from transformers.generation import GenerationMixin
|
| 13 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
|
| 14 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 15 |
+
from transformers.utils import logging
|
| 16 |
+
from transformers.utils.deprecation import deprecate_kwarg
|
| 17 |
+
|
| 18 |
+
from fla.layers.attn import Attention
|
| 19 |
+
from fla.layers.gla import GatedLinearAttention
|
| 20 |
+
from fla.models.gla.configuration_gla import GLAConfig
|
| 21 |
+
from fla.models.utils import Cache
|
| 22 |
+
from fla.modules import FusedCrossEntropyLoss, FusedLinearCrossEntropyLoss
|
| 23 |
+
from fla.modules import GatedMLP as GLAMLP
|
| 24 |
+
from fla.modules import RMSNorm
|
| 25 |
+
|
| 26 |
+
if TYPE_CHECKING:
|
| 27 |
+
from transformers.processing_utils import Unpack
|
| 28 |
+
|
| 29 |
+
logger = logging.get_logger(__name__)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class GLABlock(nn.Module):
|
| 33 |
+
def __init__(self, config: GLAConfig, layer_idx: int):
|
| 34 |
+
super().__init__()
|
| 35 |
+
|
| 36 |
+
self.config = config
|
| 37 |
+
self.layer_idx = layer_idx
|
| 38 |
+
|
| 39 |
+
self.attn_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 40 |
+
if config.attn is not None and layer_idx in config.attn['layers']:
|
| 41 |
+
self.attn = Attention(
|
| 42 |
+
hidden_size=config.hidden_size,
|
| 43 |
+
num_heads=config.attn['num_heads'],
|
| 44 |
+
num_kv_heads=config.attn['num_kv_heads'],
|
| 45 |
+
qkv_bias=config.attn['qkv_bias'],
|
| 46 |
+
window_size=config.attn['window_size'],
|
| 47 |
+
rope_theta=config.attn['rope_theta'],
|
| 48 |
+
max_position_embeddings=config.max_position_embeddings,
|
| 49 |
+
layer_idx=layer_idx
|
| 50 |
+
)
|
| 51 |
+
else:
|
| 52 |
+
self.attn = GatedLinearAttention(
|
| 53 |
+
mode=config.attn_mode,
|
| 54 |
+
hidden_size=config.hidden_size,
|
| 55 |
+
expand_k=config.expand_k,
|
| 56 |
+
expand_v=config.expand_v,
|
| 57 |
+
num_heads=config.num_heads,
|
| 58 |
+
num_kv_heads=config.num_kv_heads,
|
| 59 |
+
feature_map=config.feature_map,
|
| 60 |
+
use_short_conv=config.use_short_conv,
|
| 61 |
+
conv_size=config.conv_size,
|
| 62 |
+
use_output_gate=config.use_output_gate,
|
| 63 |
+
gate_fn=config.hidden_act,
|
| 64 |
+
elementwise_affine=config.elementwise_affine,
|
| 65 |
+
norm_eps=config.norm_eps,
|
| 66 |
+
clamp_min=config.clamp_min,
|
| 67 |
+
fuse_norm=config.fuse_norm,
|
| 68 |
+
layer_idx=layer_idx
|
| 69 |
+
)
|
| 70 |
+
self.mlp_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 71 |
+
self.mlp = GLAMLP(
|
| 72 |
+
hidden_size=config.hidden_size,
|
| 73 |
+
hidden_ratio=config.hidden_ratio,
|
| 74 |
+
intermediate_size=config.intermediate_size,
|
| 75 |
+
hidden_act=config.hidden_act,
|
| 76 |
+
fuse_swiglu=config.fuse_swiglu
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
def forward(
|
| 80 |
+
self,
|
| 81 |
+
hidden_states: torch.Tensor,
|
| 82 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 83 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 84 |
+
use_cache: Optional[bool] = False,
|
| 85 |
+
output_attentions: Optional[bool] = False,
|
| 86 |
+
**kwargs: Unpack[Dict]
|
| 87 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 88 |
+
residual = hidden_states
|
| 89 |
+
hidden_states = self.attn_norm(hidden_states)
|
| 90 |
+
hidden_states, attentions, past_key_values = self.attn(
|
| 91 |
+
hidden_states=hidden_states,
|
| 92 |
+
attention_mask=attention_mask,
|
| 93 |
+
past_key_values=past_key_values,
|
| 94 |
+
use_cache=use_cache,
|
| 95 |
+
output_attentions=output_attentions,
|
| 96 |
+
**kwargs
|
| 97 |
+
)
|
| 98 |
+
if self.config.fuse_norm:
|
| 99 |
+
hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
|
| 100 |
+
else:
|
| 101 |
+
hidden_states = residual + hidden_states
|
| 102 |
+
residual = hidden_states
|
| 103 |
+
hidden_states = self.mlp_norm(hidden_states)
|
| 104 |
+
hidden_states = self.mlp(hidden_states, **kwargs)
|
| 105 |
+
hidden_states = residual + hidden_states
|
| 106 |
+
|
| 107 |
+
outputs = (hidden_states, attentions, past_key_values)
|
| 108 |
+
|
| 109 |
+
return outputs
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
class GLAPreTrainedModel(PreTrainedModel):
|
| 113 |
+
|
| 114 |
+
config_class = GLAConfig
|
| 115 |
+
base_model_prefix = 'model'
|
| 116 |
+
supports_gradient_checkpointing = True
|
| 117 |
+
_no_split_modules = ['GLABlock']
|
| 118 |
+
_supports_cache_class = True
|
| 119 |
+
|
| 120 |
+
def __init__(self, *inputs, **kwargs):
|
| 121 |
+
super().__init__(*inputs, **kwargs)
|
| 122 |
+
|
| 123 |
+
def _init_weights(
|
| 124 |
+
self,
|
| 125 |
+
module: nn.Module,
|
| 126 |
+
prenorm_residual_strategy: Optional[str] = 'rescale',
|
| 127 |
+
num_residuals_per_layer: int = 2,
|
| 128 |
+
):
|
| 129 |
+
if isinstance(module, (nn.Linear, nn.Conv1d)):
|
| 130 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 131 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 132 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 133 |
+
if module.bias is not None:
|
| 134 |
+
nn.init.zeros_(module.bias)
|
| 135 |
+
elif isinstance(module, nn.Embedding):
|
| 136 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 137 |
+
elif hasattr(module, 'reset_parameters'):
|
| 138 |
+
module.reset_parameters()
|
| 139 |
+
|
| 140 |
+
if prenorm_residual_strategy is not None:
|
| 141 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
| 142 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
| 143 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
| 144 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
| 145 |
+
#
|
| 146 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
| 147 |
+
p = None
|
| 148 |
+
if hasattr(module, 'o_proj'):
|
| 149 |
+
p = module.o_proj.weight
|
| 150 |
+
elif hasattr(module, 'down_proj'):
|
| 151 |
+
p = module.down_proj.weight
|
| 152 |
+
if p is not None:
|
| 153 |
+
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
|
| 154 |
+
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
|
| 155 |
+
# We need to reinit p since this code could be called multiple times
|
| 156 |
+
# Having just p *= scale would repeatedly scale it down
|
| 157 |
+
if prenorm_residual_strategy == 'rescale':
|
| 158 |
+
nn.init.kaiming_uniform_(p, a=math.sqrt(5))
|
| 159 |
+
with torch.no_grad():
|
| 160 |
+
p /= math.sqrt(num_residuals_per_layer * self.config.num_hidden_layers)
|
| 161 |
+
elif prenorm_residual_strategy == 'zero':
|
| 162 |
+
nn.init.zeros_(p)
|
| 163 |
+
else:
|
| 164 |
+
raise ValueError(f"Invalid prenorm_residual_strategy: {prenorm_residual_strategy}")
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
class GLAModel(GLAPreTrainedModel):
|
| 168 |
+
|
| 169 |
+
def __init__(self, config: GLAConfig):
|
| 170 |
+
super().__init__(config)
|
| 171 |
+
self.padding_idx = config.pad_token_id
|
| 172 |
+
self.vocab_size = config.vocab_size
|
| 173 |
+
|
| 174 |
+
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 175 |
+
self.layers = nn.ModuleList([GLABlock(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
|
| 176 |
+
self.norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 177 |
+
|
| 178 |
+
self.gradient_checkpointing = False
|
| 179 |
+
|
| 180 |
+
self.post_init()
|
| 181 |
+
|
| 182 |
+
def get_input_embeddings(self):
|
| 183 |
+
return self.embeddings
|
| 184 |
+
|
| 185 |
+
def set_input_embeddings(self, value):
|
| 186 |
+
self.embeddings = value
|
| 187 |
+
|
| 188 |
+
def forward(
|
| 189 |
+
self,
|
| 190 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 191 |
+
attention_mask: Optional[torch.Tensor] = None, # noqa
|
| 192 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 193 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 194 |
+
use_cache: Optional[bool] = None,
|
| 195 |
+
output_attentions: Optional[bool] = None,
|
| 196 |
+
output_hidden_states: Optional[bool] = None,
|
| 197 |
+
return_dict: Optional[bool] = None,
|
| 198 |
+
**kwargs: Unpack[Dict]
|
| 199 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
| 200 |
+
if output_attentions:
|
| 201 |
+
warnings.warn("`GLAModel` does not `output_attentions` now, setting it to `False`.")
|
| 202 |
+
output_attentions = False
|
| 203 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 204 |
+
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 205 |
+
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
|
| 206 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 207 |
+
|
| 208 |
+
# retrieve input_ids and inputs_embeds
|
| 209 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 210 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 211 |
+
if input_ids is None and inputs_embeds is None:
|
| 212 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 213 |
+
|
| 214 |
+
if inputs_embeds is None:
|
| 215 |
+
inputs_embeds = self.embeddings(input_ids)
|
| 216 |
+
hidden_states = inputs_embeds
|
| 217 |
+
|
| 218 |
+
if use_cache and not isinstance(past_key_values, Cache):
|
| 219 |
+
past_key_values = Cache.from_legacy_cache(past_key_values)
|
| 220 |
+
|
| 221 |
+
if self.gradient_checkpointing and self.training and use_cache:
|
| 222 |
+
logger.warning_once("`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...")
|
| 223 |
+
use_cache = False
|
| 224 |
+
|
| 225 |
+
all_hidden_states = () if output_hidden_states else None
|
| 226 |
+
all_attns = () if output_attentions else None
|
| 227 |
+
for layer in self.layers:
|
| 228 |
+
if output_hidden_states:
|
| 229 |
+
all_hidden_states += (hidden_states,)
|
| 230 |
+
|
| 231 |
+
if self.gradient_checkpointing and self.training:
|
| 232 |
+
hidden_states, attentions, past_key_values = self._gradient_checkpointing_func(
|
| 233 |
+
layer.__call__,
|
| 234 |
+
hidden_states,
|
| 235 |
+
attention_mask,
|
| 236 |
+
past_key_values,
|
| 237 |
+
use_cache,
|
| 238 |
+
output_attentions,
|
| 239 |
+
**kwargs
|
| 240 |
+
)
|
| 241 |
+
else:
|
| 242 |
+
hidden_states, attentions, past_key_values = layer(
|
| 243 |
+
hidden_states,
|
| 244 |
+
attention_mask=attention_mask,
|
| 245 |
+
past_key_values=past_key_values,
|
| 246 |
+
use_cache=use_cache,
|
| 247 |
+
output_attentions=output_attentions,
|
| 248 |
+
**kwargs
|
| 249 |
+
)
|
| 250 |
+
|
| 251 |
+
if output_attentions:
|
| 252 |
+
all_attns += (attentions,)
|
| 253 |
+
|
| 254 |
+
hidden_states = self.norm(hidden_states)
|
| 255 |
+
|
| 256 |
+
# add hidden states from the last decoder layer
|
| 257 |
+
if output_hidden_states:
|
| 258 |
+
all_hidden_states += (hidden_states,)
|
| 259 |
+
|
| 260 |
+
if not return_dict:
|
| 261 |
+
return tuple(i for i in [hidden_states, past_key_values, all_hidden_states, all_attns] if i is not None)
|
| 262 |
+
return BaseModelOutputWithPast(
|
| 263 |
+
last_hidden_state=hidden_states,
|
| 264 |
+
past_key_values=past_key_values,
|
| 265 |
+
hidden_states=all_hidden_states,
|
| 266 |
+
attentions=all_attns
|
| 267 |
+
)
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
class GLAForCausalLM(GLAPreTrainedModel, GenerationMixin):
|
| 271 |
+
|
| 272 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 273 |
+
|
| 274 |
+
def __init__(self, config):
|
| 275 |
+
super().__init__(config)
|
| 276 |
+
self.model = GLAModel(config)
|
| 277 |
+
self.vocab_size = config.vocab_size
|
| 278 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 279 |
+
self.criterion = None
|
| 280 |
+
|
| 281 |
+
# Initialize weights and apply final processing
|
| 282 |
+
self.post_init()
|
| 283 |
+
|
| 284 |
+
def get_input_embeddings(self):
|
| 285 |
+
return self.model.embeddings
|
| 286 |
+
|
| 287 |
+
def set_input_embeddings(self, value):
|
| 288 |
+
self.model.embeddings = value
|
| 289 |
+
|
| 290 |
+
def get_output_embeddings(self):
|
| 291 |
+
return self.lm_head
|
| 292 |
+
|
| 293 |
+
def set_output_embeddings(self, new_embeddings):
|
| 294 |
+
self.lm_head = new_embeddings
|
| 295 |
+
|
| 296 |
+
def set_decoder(self, decoder):
|
| 297 |
+
self.model = decoder
|
| 298 |
+
|
| 299 |
+
def get_decoder(self):
|
| 300 |
+
return self.model
|
| 301 |
+
|
| 302 |
+
def generate(self, *args, **kwargs):
|
| 303 |
+
try:
|
| 304 |
+
return super().generate(*args, **kwargs)
|
| 305 |
+
except AttributeError as exception:
|
| 306 |
+
if 'past_key_values' in str(exception):
|
| 307 |
+
raise AttributeError(
|
| 308 |
+
f"You tried to call `generate` with a decoding strategy that manipulates `past_key_values`, "
|
| 309 |
+
f"which is not supported for {self.__class__.__name__}. "
|
| 310 |
+
f"Try another generation strategy instead. "
|
| 311 |
+
f"For the available generation strategies, check this doc: "
|
| 312 |
+
f"https://huggingface.co/docs/transformers/en/generation_strategies#decoding-strategies"
|
| 313 |
+
)
|
| 314 |
+
else:
|
| 315 |
+
raise exception
|
| 316 |
+
|
| 317 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 318 |
+
def prepare_inputs_for_generation(
|
| 319 |
+
self,
|
| 320 |
+
input_ids: torch.LongTensor = None,
|
| 321 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 322 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 323 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 324 |
+
use_cache: bool = True,
|
| 325 |
+
logits_to_keep: Optional[int] = None,
|
| 326 |
+
**kwargs
|
| 327 |
+
):
|
| 328 |
+
# only last token for `inputs_ids` if the `past_key_values` is not empty.
|
| 329 |
+
if past_key_values is not None and len(past_key_values) > 0:
|
| 330 |
+
input_ids = input_ids[:, -1:]
|
| 331 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 332 |
+
if inputs_embeds is not None and len(past_key_values) == 0:
|
| 333 |
+
model_inputs = {'inputs_embeds': inputs_embeds}
|
| 334 |
+
else:
|
| 335 |
+
# The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
|
| 336 |
+
# recompiles graphs as the stride of the inputs is a guard.
|
| 337 |
+
# Ref: https://github.com/huggingface/transformers/pull/29114
|
| 338 |
+
# TODO: use `next_tokens` directly instead.
|
| 339 |
+
model_inputs = {'input_ids': input_ids.contiguous()}
|
| 340 |
+
|
| 341 |
+
if logits_to_keep is not None:
|
| 342 |
+
model_inputs['logits_to_keep'] = logits_to_keep
|
| 343 |
+
|
| 344 |
+
model_inputs.update({
|
| 345 |
+
'past_key_values': past_key_values,
|
| 346 |
+
'use_cache': use_cache,
|
| 347 |
+
'attention_mask': attention_mask,
|
| 348 |
+
})
|
| 349 |
+
return model_inputs
|
| 350 |
+
|
| 351 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 352 |
+
def forward(
|
| 353 |
+
self,
|
| 354 |
+
input_ids: torch.LongTensor = None,
|
| 355 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 356 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 357 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 358 |
+
labels: Optional[torch.LongTensor] = None,
|
| 359 |
+
use_cache: Optional[bool] = None,
|
| 360 |
+
output_attentions: Optional[bool] = None,
|
| 361 |
+
output_hidden_states: Optional[bool] = None,
|
| 362 |
+
return_dict: Optional[bool] = None,
|
| 363 |
+
logits_to_keep: Optional[int] = 0,
|
| 364 |
+
**kwargs: Unpack[Dict]
|
| 365 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 366 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 367 |
+
output_hidden_states = (
|
| 368 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 369 |
+
)
|
| 370 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 371 |
+
|
| 372 |
+
outputs = self.model(
|
| 373 |
+
input_ids=input_ids,
|
| 374 |
+
attention_mask=attention_mask,
|
| 375 |
+
inputs_embeds=inputs_embeds,
|
| 376 |
+
past_key_values=past_key_values,
|
| 377 |
+
use_cache=use_cache,
|
| 378 |
+
output_attentions=output_attentions,
|
| 379 |
+
output_hidden_states=output_hidden_states,
|
| 380 |
+
return_dict=return_dict,
|
| 381 |
+
**kwargs
|
| 382 |
+
)
|
| 383 |
+
|
| 384 |
+
hidden_states = outputs[0]
|
| 385 |
+
fuse_linear_and_cross_entropy = self.config.fuse_cross_entropy and self.training
|
| 386 |
+
|
| 387 |
+
loss, logits = None, None
|
| 388 |
+
if not fuse_linear_and_cross_entropy or labels is None:
|
| 389 |
+
logits = self.lm_head(hidden_states if logits_to_keep is None else hidden_states[:, -logits_to_keep:])
|
| 390 |
+
if labels is not None:
|
| 391 |
+
if getattr(self, 'criterion', None) is None:
|
| 392 |
+
if fuse_linear_and_cross_entropy:
|
| 393 |
+
criterion = FusedLinearCrossEntropyLoss()
|
| 394 |
+
elif self.config.fuse_cross_entropy:
|
| 395 |
+
criterion = FusedCrossEntropyLoss(inplace_backward=True)
|
| 396 |
+
else:
|
| 397 |
+
criterion = nn.CrossEntropyLoss()
|
| 398 |
+
else:
|
| 399 |
+
criterion = self.criterion
|
| 400 |
+
labels = labels.to(hidden_states.device)
|
| 401 |
+
labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], criterion.ignore_index)), 1)
|
| 402 |
+
if fuse_linear_and_cross_entropy:
|
| 403 |
+
loss = criterion(hidden_states, labels, self.lm_head.weight, self.lm_head.bias)
|
| 404 |
+
else:
|
| 405 |
+
loss = criterion(logits.view(labels.numel(), -1), labels.view(-1))
|
| 406 |
+
|
| 407 |
+
if not return_dict:
|
| 408 |
+
output = (logits,) + outputs[1:]
|
| 409 |
+
return (loss,) + output if loss is not None else output
|
| 410 |
+
|
| 411 |
+
return CausalLMOutputWithPast(
|
| 412 |
+
loss=loss,
|
| 413 |
+
logits=logits,
|
| 414 |
+
past_key_values=outputs.past_key_values,
|
| 415 |
+
hidden_states=outputs.hidden_states,
|
| 416 |
+
attentions=outputs.attentions,
|
| 417 |
+
)
|
fla/models/gsa/modeling_gsa.py
ADDED
|
@@ -0,0 +1,420 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
import warnings
|
| 7 |
+
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import torch.utils.checkpoint
|
| 12 |
+
from transformers.generation import GenerationMixin
|
| 13 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
|
| 14 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 15 |
+
from transformers.utils import logging
|
| 16 |
+
from transformers.utils.deprecation import deprecate_kwarg
|
| 17 |
+
|
| 18 |
+
from fla.layers.attn import Attention
|
| 19 |
+
from fla.layers.gsa import GatedSlotAttention
|
| 20 |
+
from fla.models.gsa.configuration_gsa import GSAConfig
|
| 21 |
+
from fla.models.utils import Cache
|
| 22 |
+
from fla.modules import FusedCrossEntropyLoss, FusedLinearCrossEntropyLoss
|
| 23 |
+
from fla.modules import GatedMLP as GSAMLP
|
| 24 |
+
from fla.modules import RMSNorm
|
| 25 |
+
|
| 26 |
+
if TYPE_CHECKING:
|
| 27 |
+
from transformers.processing_utils import Unpack
|
| 28 |
+
|
| 29 |
+
logger = logging.get_logger(__name__)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class GSABlock(nn.Module):
|
| 33 |
+
def __init__(self, config: GSAConfig, layer_idx: int):
|
| 34 |
+
super().__init__()
|
| 35 |
+
|
| 36 |
+
self.config = config
|
| 37 |
+
self.layer_idx = layer_idx
|
| 38 |
+
|
| 39 |
+
self.attn_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 40 |
+
if config.attn is not None and layer_idx in config.attn['layers']:
|
| 41 |
+
self.attn = Attention(
|
| 42 |
+
hidden_size=config.hidden_size,
|
| 43 |
+
num_heads=config.attn['num_heads'],
|
| 44 |
+
num_kv_heads=config.attn['num_kv_heads'],
|
| 45 |
+
qkv_bias=config.attn['qkv_bias'],
|
| 46 |
+
window_size=config.attn['window_size'],
|
| 47 |
+
rope_theta=config.attn['rope_theta'],
|
| 48 |
+
max_position_embeddings=config.max_position_embeddings,
|
| 49 |
+
layer_idx=layer_idx
|
| 50 |
+
)
|
| 51 |
+
else:
|
| 52 |
+
self.attn = GatedSlotAttention(
|
| 53 |
+
hidden_size=config.hidden_size,
|
| 54 |
+
expand_k=config.expand_k,
|
| 55 |
+
expand_v=config.expand_v,
|
| 56 |
+
num_heads=config.num_heads,
|
| 57 |
+
num_kv_heads=config.num_kv_heads,
|
| 58 |
+
num_slots=config.num_slots,
|
| 59 |
+
use_short_conv=config.use_short_conv,
|
| 60 |
+
conv_size=config.conv_size,
|
| 61 |
+
feature_map=config.feature_map,
|
| 62 |
+
use_output_gate=config.use_output_gate,
|
| 63 |
+
use_norm=config.use_norm,
|
| 64 |
+
gate_fn=config.hidden_act,
|
| 65 |
+
gate_logit_normalizer=config.gate_logit_normalizer,
|
| 66 |
+
elementwise_affine=config.elementwise_affine,
|
| 67 |
+
norm_eps=config.norm_eps,
|
| 68 |
+
fuse_norm=config.fuse_norm,
|
| 69 |
+
layer_idx=layer_idx
|
| 70 |
+
)
|
| 71 |
+
self.mlp_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 72 |
+
self.mlp = GSAMLP(
|
| 73 |
+
hidden_size=config.hidden_size,
|
| 74 |
+
hidden_ratio=config.hidden_ratio,
|
| 75 |
+
intermediate_size=config.intermediate_size,
|
| 76 |
+
hidden_act=config.hidden_act,
|
| 77 |
+
fuse_swiglu=config.fuse_swiglu
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
def forward(
|
| 81 |
+
self,
|
| 82 |
+
hidden_states: torch.Tensor,
|
| 83 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 84 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 85 |
+
use_cache: Optional[bool] = False,
|
| 86 |
+
output_attentions: Optional[bool] = False,
|
| 87 |
+
**kwargs: Unpack[Dict]
|
| 88 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 89 |
+
residual = hidden_states
|
| 90 |
+
hidden_states = self.attn_norm(hidden_states)
|
| 91 |
+
hidden_states, attentions, past_key_values = self.attn(
|
| 92 |
+
hidden_states=hidden_states,
|
| 93 |
+
attention_mask=attention_mask,
|
| 94 |
+
past_key_values=past_key_values,
|
| 95 |
+
use_cache=use_cache,
|
| 96 |
+
output_attentions=output_attentions,
|
| 97 |
+
**kwargs
|
| 98 |
+
)
|
| 99 |
+
if self.config.fuse_norm:
|
| 100 |
+
hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
|
| 101 |
+
else:
|
| 102 |
+
hidden_states = residual + hidden_states
|
| 103 |
+
residual = hidden_states
|
| 104 |
+
hidden_states = self.mlp_norm(hidden_states)
|
| 105 |
+
hidden_states = self.mlp(hidden_states, **kwargs)
|
| 106 |
+
hidden_states = residual + hidden_states
|
| 107 |
+
|
| 108 |
+
outputs = (hidden_states, attentions, past_key_values)
|
| 109 |
+
|
| 110 |
+
return outputs
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
class GSAPreTrainedModel(PreTrainedModel):
|
| 114 |
+
|
| 115 |
+
config_class = GSAConfig
|
| 116 |
+
base_model_prefix = 'model'
|
| 117 |
+
supports_gradient_checkpointing = True
|
| 118 |
+
_no_split_modules = ['GSABlock']
|
| 119 |
+
_supports_cache_class = True
|
| 120 |
+
|
| 121 |
+
def __init__(self, *inputs, **kwargs):
|
| 122 |
+
super().__init__(*inputs, **kwargs)
|
| 123 |
+
|
| 124 |
+
def _init_weights(
|
| 125 |
+
self,
|
| 126 |
+
module: nn.Module,
|
| 127 |
+
prenorm_residual_strategy: Optional[str] = 'rescale',
|
| 128 |
+
num_residuals_per_layer: int = 2,
|
| 129 |
+
):
|
| 130 |
+
if isinstance(module, (nn.Linear, nn.Conv1d)):
|
| 131 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 132 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 133 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 134 |
+
if module.bias is not None:
|
| 135 |
+
nn.init.zeros_(module.bias)
|
| 136 |
+
elif isinstance(module, nn.Embedding):
|
| 137 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 138 |
+
elif hasattr(module, 'reset_parameters'):
|
| 139 |
+
module.reset_parameters()
|
| 140 |
+
|
| 141 |
+
if prenorm_residual_strategy is not None:
|
| 142 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
| 143 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
| 144 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
| 145 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
| 146 |
+
#
|
| 147 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
| 148 |
+
p = None
|
| 149 |
+
if hasattr(module, 'o_proj'):
|
| 150 |
+
p = module.o_proj.weight
|
| 151 |
+
elif hasattr(module, 'down_proj'):
|
| 152 |
+
p = module.down_proj.weight
|
| 153 |
+
if p is not None:
|
| 154 |
+
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
|
| 155 |
+
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
|
| 156 |
+
# We need to reinit p since this code could be called multiple times
|
| 157 |
+
# Having just p *= scale would repeatedly scale it down
|
| 158 |
+
if prenorm_residual_strategy == 'rescale':
|
| 159 |
+
nn.init.kaiming_uniform_(p, a=math.sqrt(5))
|
| 160 |
+
with torch.no_grad():
|
| 161 |
+
p /= math.sqrt(num_residuals_per_layer * self.config.num_hidden_layers)
|
| 162 |
+
elif prenorm_residual_strategy == 'zero':
|
| 163 |
+
nn.init.zeros_(p)
|
| 164 |
+
else:
|
| 165 |
+
raise ValueError(f"Invalid prenorm_residual_strategy: {prenorm_residual_strategy}")
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
class GSAModel(GSAPreTrainedModel):
|
| 169 |
+
|
| 170 |
+
def __init__(self, config: GSAConfig):
|
| 171 |
+
super().__init__(config)
|
| 172 |
+
self.padding_idx = config.pad_token_id
|
| 173 |
+
self.vocab_size = config.vocab_size
|
| 174 |
+
|
| 175 |
+
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 176 |
+
self.layers = nn.ModuleList([GSABlock(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
|
| 177 |
+
self.norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 178 |
+
|
| 179 |
+
self.gradient_checkpointing = False
|
| 180 |
+
|
| 181 |
+
self.post_init()
|
| 182 |
+
|
| 183 |
+
def get_input_embeddings(self):
|
| 184 |
+
return self.embeddings
|
| 185 |
+
|
| 186 |
+
def set_input_embeddings(self, value):
|
| 187 |
+
self.embeddings = value
|
| 188 |
+
|
| 189 |
+
def forward(
|
| 190 |
+
self,
|
| 191 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 192 |
+
attention_mask: Optional[torch.Tensor] = None, # noqa
|
| 193 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 194 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 195 |
+
use_cache: Optional[bool] = None,
|
| 196 |
+
output_attentions: Optional[bool] = None,
|
| 197 |
+
output_hidden_states: Optional[bool] = None,
|
| 198 |
+
return_dict: Optional[bool] = None,
|
| 199 |
+
**kwargs: Unpack[Dict]
|
| 200 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
| 201 |
+
if output_attentions:
|
| 202 |
+
warnings.warn("`GSAModel` does not `output_attentions` now, setting it to `False`.")
|
| 203 |
+
output_attentions = False
|
| 204 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 205 |
+
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 206 |
+
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
|
| 207 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 208 |
+
|
| 209 |
+
# retrieve input_ids and inputs_embeds
|
| 210 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 211 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 212 |
+
if input_ids is None and inputs_embeds is None:
|
| 213 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 214 |
+
|
| 215 |
+
if inputs_embeds is None:
|
| 216 |
+
inputs_embeds = self.embeddings(input_ids)
|
| 217 |
+
hidden_states = inputs_embeds
|
| 218 |
+
|
| 219 |
+
if use_cache and not isinstance(past_key_values, Cache):
|
| 220 |
+
past_key_values = Cache.from_legacy_cache(past_key_values)
|
| 221 |
+
|
| 222 |
+
if self.gradient_checkpointing and self.training and use_cache:
|
| 223 |
+
logger.warning_once("`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...")
|
| 224 |
+
use_cache = False
|
| 225 |
+
|
| 226 |
+
all_hidden_states = () if output_hidden_states else None
|
| 227 |
+
all_attns = () if output_attentions else None
|
| 228 |
+
for layer in self.layers:
|
| 229 |
+
if output_hidden_states:
|
| 230 |
+
all_hidden_states += (hidden_states,)
|
| 231 |
+
|
| 232 |
+
if self.gradient_checkpointing and self.training:
|
| 233 |
+
hidden_states, attentions, past_key_values = self._gradient_checkpointing_func(
|
| 234 |
+
layer.__call__,
|
| 235 |
+
hidden_states,
|
| 236 |
+
attention_mask,
|
| 237 |
+
past_key_values,
|
| 238 |
+
use_cache,
|
| 239 |
+
output_attentions,
|
| 240 |
+
**kwargs
|
| 241 |
+
)
|
| 242 |
+
else:
|
| 243 |
+
hidden_states, attentions, past_key_values = layer(
|
| 244 |
+
hidden_states,
|
| 245 |
+
attention_mask=attention_mask,
|
| 246 |
+
past_key_values=past_key_values,
|
| 247 |
+
use_cache=use_cache,
|
| 248 |
+
output_attentions=output_attentions,
|
| 249 |
+
**kwargs
|
| 250 |
+
)
|
| 251 |
+
|
| 252 |
+
if output_attentions:
|
| 253 |
+
all_attns += (attentions,)
|
| 254 |
+
|
| 255 |
+
hidden_states = self.norm(hidden_states)
|
| 256 |
+
|
| 257 |
+
# add hidden states from the last decoder layer
|
| 258 |
+
if output_hidden_states:
|
| 259 |
+
all_hidden_states += (hidden_states,)
|
| 260 |
+
|
| 261 |
+
if not return_dict:
|
| 262 |
+
return tuple(i for i in [hidden_states, past_key_values, all_hidden_states, all_attns] if i is not None)
|
| 263 |
+
return BaseModelOutputWithPast(
|
| 264 |
+
last_hidden_state=hidden_states,
|
| 265 |
+
past_key_values=past_key_values,
|
| 266 |
+
hidden_states=all_hidden_states,
|
| 267 |
+
attentions=all_attns
|
| 268 |
+
)
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
class GSAForCausalLM(GSAPreTrainedModel, GenerationMixin):
|
| 272 |
+
|
| 273 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 274 |
+
|
| 275 |
+
def __init__(self, config):
|
| 276 |
+
|
| 277 |
+
super().__init__(config)
|
| 278 |
+
self.model = GSAModel(config)
|
| 279 |
+
self.vocab_size = config.vocab_size
|
| 280 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 281 |
+
self.criterion = None
|
| 282 |
+
|
| 283 |
+
# Initialize weights and apply final processing
|
| 284 |
+
self.post_init()
|
| 285 |
+
|
| 286 |
+
def get_input_embeddings(self):
|
| 287 |
+
return self.model.embeddings
|
| 288 |
+
|
| 289 |
+
def set_input_embeddings(self, value):
|
| 290 |
+
self.model.embeddings = value
|
| 291 |
+
|
| 292 |
+
def get_output_embeddings(self):
|
| 293 |
+
return self.lm_head
|
| 294 |
+
|
| 295 |
+
def set_output_embeddings(self, new_embeddings):
|
| 296 |
+
self.lm_head = new_embeddings
|
| 297 |
+
|
| 298 |
+
def set_decoder(self, decoder):
|
| 299 |
+
self.model = decoder
|
| 300 |
+
|
| 301 |
+
def get_decoder(self):
|
| 302 |
+
return self.model
|
| 303 |
+
|
| 304 |
+
def generate(self, *args, **kwargs):
|
| 305 |
+
try:
|
| 306 |
+
return super().generate(*args, **kwargs)
|
| 307 |
+
except AttributeError as exception:
|
| 308 |
+
if 'past_key_values' in str(exception):
|
| 309 |
+
raise AttributeError(
|
| 310 |
+
f"You tried to call `generate` with a decoding strategy that manipulates `past_key_values`, "
|
| 311 |
+
f"which is not supported for {self.__class__.__name__}. "
|
| 312 |
+
f"Try another generation strategy instead. "
|
| 313 |
+
f"For the available generation strategies, check this doc: "
|
| 314 |
+
f"https://huggingface.co/docs/transformers/en/generation_strategies#decoding-strategies"
|
| 315 |
+
)
|
| 316 |
+
else:
|
| 317 |
+
raise exception
|
| 318 |
+
|
| 319 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 320 |
+
def prepare_inputs_for_generation(
|
| 321 |
+
self,
|
| 322 |
+
input_ids: torch.LongTensor = None,
|
| 323 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 324 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 325 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 326 |
+
use_cache: bool = True,
|
| 327 |
+
logits_to_keep: Optional[int] = None,
|
| 328 |
+
**kwargs
|
| 329 |
+
):
|
| 330 |
+
# only last token for `inputs_ids` if the `past_key_values` is not empty.
|
| 331 |
+
if past_key_values is not None and len(past_key_values) > 0:
|
| 332 |
+
input_ids = input_ids[:, -1:]
|
| 333 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 334 |
+
if inputs_embeds is not None and len(past_key_values) == 0:
|
| 335 |
+
model_inputs = {'inputs_embeds': inputs_embeds}
|
| 336 |
+
else:
|
| 337 |
+
# The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
|
| 338 |
+
# recompiles graphs as the stride of the inputs is a guard.
|
| 339 |
+
# Ref: https://github.com/huggingface/transformers/pull/29114
|
| 340 |
+
# TODO: use `next_tokens` directly instead.
|
| 341 |
+
model_inputs = {'input_ids': input_ids.contiguous()}
|
| 342 |
+
|
| 343 |
+
if logits_to_keep is not None:
|
| 344 |
+
model_inputs['logits_to_keep'] = logits_to_keep
|
| 345 |
+
|
| 346 |
+
model_inputs.update({
|
| 347 |
+
'past_key_values': past_key_values,
|
| 348 |
+
'use_cache': use_cache,
|
| 349 |
+
'attention_mask': attention_mask,
|
| 350 |
+
})
|
| 351 |
+
return model_inputs
|
| 352 |
+
|
| 353 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 354 |
+
def forward(
|
| 355 |
+
self,
|
| 356 |
+
input_ids: torch.LongTensor = None,
|
| 357 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 358 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 359 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 360 |
+
labels: Optional[torch.LongTensor] = None,
|
| 361 |
+
use_cache: Optional[bool] = None,
|
| 362 |
+
output_attentions: Optional[bool] = None,
|
| 363 |
+
output_hidden_states: Optional[bool] = None,
|
| 364 |
+
return_dict: Optional[bool] = None,
|
| 365 |
+
logits_to_keep: Optional[int] = 0,
|
| 366 |
+
**kwargs: Unpack[Dict]
|
| 367 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 368 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 369 |
+
output_hidden_states = (
|
| 370 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 371 |
+
)
|
| 372 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 373 |
+
|
| 374 |
+
outputs = self.model(
|
| 375 |
+
input_ids=input_ids,
|
| 376 |
+
attention_mask=attention_mask,
|
| 377 |
+
inputs_embeds=inputs_embeds,
|
| 378 |
+
past_key_values=past_key_values,
|
| 379 |
+
use_cache=use_cache,
|
| 380 |
+
output_attentions=output_attentions,
|
| 381 |
+
output_hidden_states=output_hidden_states,
|
| 382 |
+
return_dict=return_dict,
|
| 383 |
+
**kwargs
|
| 384 |
+
)
|
| 385 |
+
|
| 386 |
+
hidden_states = outputs[0]
|
| 387 |
+
fuse_linear_and_cross_entropy = self.config.fuse_cross_entropy and self.training
|
| 388 |
+
|
| 389 |
+
loss, logits = None, None
|
| 390 |
+
if not fuse_linear_and_cross_entropy or labels is None:
|
| 391 |
+
logits = self.lm_head(hidden_states if logits_to_keep is None else hidden_states[:, -logits_to_keep:])
|
| 392 |
+
if labels is not None:
|
| 393 |
+
if getattr(self, 'criterion', None) is None:
|
| 394 |
+
if fuse_linear_and_cross_entropy:
|
| 395 |
+
criterion = FusedLinearCrossEntropyLoss()
|
| 396 |
+
elif self.config.fuse_cross_entropy:
|
| 397 |
+
criterion = FusedCrossEntropyLoss(inplace_backward=True)
|
| 398 |
+
else:
|
| 399 |
+
criterion = nn.CrossEntropyLoss()
|
| 400 |
+
else:
|
| 401 |
+
criterion = self.criterion
|
| 402 |
+
# Enable model parallelism
|
| 403 |
+
labels = labels.to(hidden_states.device)
|
| 404 |
+
labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], criterion.ignore_index)), 1)
|
| 405 |
+
if fuse_linear_and_cross_entropy:
|
| 406 |
+
loss = criterion(hidden_states, labels, self.lm_head.weight, self.lm_head.bias)
|
| 407 |
+
else:
|
| 408 |
+
loss = criterion(logits.view(labels.numel(), -1), labels.view(-1))
|
| 409 |
+
|
| 410 |
+
if not return_dict:
|
| 411 |
+
output = (logits,) + outputs[1:]
|
| 412 |
+
return (loss,) + output if loss is not None else output
|
| 413 |
+
|
| 414 |
+
return CausalLMOutputWithPast(
|
| 415 |
+
loss=loss,
|
| 416 |
+
logits=logits,
|
| 417 |
+
past_key_values=outputs.past_key_values,
|
| 418 |
+
hidden_states=outputs.hidden_states,
|
| 419 |
+
attentions=outputs.attentions,
|
| 420 |
+
)
|
fla/models/hgrn2/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
|
| 4 |
+
|
| 5 |
+
from fla.models.hgrn2.configuration_hgrn2 import HGRN2Config
|
| 6 |
+
from fla.models.hgrn2.modeling_hgrn2 import HGRN2ForCausalLM, HGRN2Model
|
| 7 |
+
|
| 8 |
+
AutoConfig.register(HGRN2Config.model_type, HGRN2Config)
|
| 9 |
+
AutoModel.register(HGRN2Config, HGRN2Model)
|
| 10 |
+
AutoModelForCausalLM.register(HGRN2Config, HGRN2ForCausalLM)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
__all__ = ['HGRN2Config', 'HGRN2ForCausalLM', 'HGRN2Model']
|
fla/models/lightnet/modeling_lightnet.py
ADDED
|
@@ -0,0 +1,410 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
import warnings
|
| 7 |
+
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import torch.utils.checkpoint
|
| 12 |
+
from transformers.generation import GenerationMixin
|
| 13 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
|
| 14 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 15 |
+
from transformers.utils import logging
|
| 16 |
+
from transformers.utils.deprecation import deprecate_kwarg
|
| 17 |
+
|
| 18 |
+
from fla.layers.attn import Attention
|
| 19 |
+
from fla.layers.lightnet import LightNetAttention
|
| 20 |
+
from fla.models.lightnet.configuration_lightnet import LightNetConfig
|
| 21 |
+
from fla.models.utils import Cache
|
| 22 |
+
from fla.modules import FusedCrossEntropyLoss, FusedLinearCrossEntropyLoss
|
| 23 |
+
from fla.modules import GatedMLP as LightNetMLP
|
| 24 |
+
from fla.modules import RMSNorm
|
| 25 |
+
|
| 26 |
+
if TYPE_CHECKING:
|
| 27 |
+
from transformers.processing_utils import Unpack
|
| 28 |
+
|
| 29 |
+
logger = logging.get_logger(__name__)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class LightNetBlock(nn.Module):
|
| 33 |
+
def __init__(self, config: LightNetConfig, layer_idx: int):
|
| 34 |
+
super().__init__()
|
| 35 |
+
|
| 36 |
+
self.config = config
|
| 37 |
+
self.layer_idx = layer_idx
|
| 38 |
+
|
| 39 |
+
self.attn_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 40 |
+
if config.attn is not None and layer_idx in config.attn['layers']:
|
| 41 |
+
self.attn = Attention(
|
| 42 |
+
hidden_size=config.hidden_size,
|
| 43 |
+
num_heads=config.attn['num_heads'],
|
| 44 |
+
num_kv_heads=config.attn['num_kv_heads'],
|
| 45 |
+
qkv_bias=config.attn['qkv_bias'],
|
| 46 |
+
window_size=config.attn['window_size'],
|
| 47 |
+
max_position_embeddings=config.max_position_embeddings,
|
| 48 |
+
layer_idx=layer_idx
|
| 49 |
+
)
|
| 50 |
+
else:
|
| 51 |
+
self.attn = LightNetAttention(
|
| 52 |
+
mode=config.attn_mode,
|
| 53 |
+
hidden_size=config.hidden_size,
|
| 54 |
+
num_heads=config.num_heads,
|
| 55 |
+
expand_ratio=config.expand_ratio,
|
| 56 |
+
use_short_conv=config.use_short_conv,
|
| 57 |
+
conv_size=config.conv_size,
|
| 58 |
+
gate_low_rank_dim=config.gate_low_rank_dim,
|
| 59 |
+
elementwise_affine=config.elementwise_affine,
|
| 60 |
+
norm_eps=config.norm_eps,
|
| 61 |
+
layer_idx=layer_idx
|
| 62 |
+
)
|
| 63 |
+
self.mlp_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 64 |
+
self.mlp = LightNetMLP(
|
| 65 |
+
hidden_size=config.hidden_size,
|
| 66 |
+
hidden_ratio=config.hidden_ratio,
|
| 67 |
+
intermediate_size=config.intermediate_size,
|
| 68 |
+
hidden_act=config.hidden_act,
|
| 69 |
+
fuse_swiglu=config.fuse_swiglu
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
def forward(
|
| 73 |
+
self,
|
| 74 |
+
hidden_states: torch.Tensor,
|
| 75 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 76 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 77 |
+
use_cache: Optional[bool] = False,
|
| 78 |
+
output_attentions: Optional[bool] = False,
|
| 79 |
+
**kwargs: Unpack[Dict]
|
| 80 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 81 |
+
residual = hidden_states
|
| 82 |
+
hidden_states = self.attn_norm(hidden_states)
|
| 83 |
+
hidden_states, attentions, past_key_values = self.attn(
|
| 84 |
+
hidden_states=hidden_states,
|
| 85 |
+
attention_mask=attention_mask,
|
| 86 |
+
past_key_values=past_key_values,
|
| 87 |
+
use_cache=use_cache,
|
| 88 |
+
output_attentions=output_attentions,
|
| 89 |
+
**kwargs
|
| 90 |
+
)
|
| 91 |
+
if self.config.fuse_norm:
|
| 92 |
+
hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
|
| 93 |
+
else:
|
| 94 |
+
hidden_states = residual + hidden_states
|
| 95 |
+
residual = hidden_states
|
| 96 |
+
hidden_states = self.mlp_norm(hidden_states)
|
| 97 |
+
hidden_states = self.mlp(hidden_states, **kwargs)
|
| 98 |
+
hidden_states = residual + hidden_states
|
| 99 |
+
|
| 100 |
+
outputs = (hidden_states, attentions, past_key_values)
|
| 101 |
+
|
| 102 |
+
return outputs
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
class LightNetPreTrainedModel(PreTrainedModel):
|
| 106 |
+
|
| 107 |
+
config_class = LightNetConfig
|
| 108 |
+
supports_gradient_checkpointing = True
|
| 109 |
+
_no_split_modules = ['LightNetBlock']
|
| 110 |
+
_supports_cache_class = True
|
| 111 |
+
|
| 112 |
+
def __init__(self, *inputs, **kwargs):
|
| 113 |
+
super().__init__(*inputs, **kwargs)
|
| 114 |
+
|
| 115 |
+
def _init_weights(
|
| 116 |
+
self,
|
| 117 |
+
module: nn.Module,
|
| 118 |
+
prenorm_residual_strategy: Optional[str] = 'rescale',
|
| 119 |
+
num_residuals_per_layer: int = 2,
|
| 120 |
+
):
|
| 121 |
+
if isinstance(module, (nn.Linear, nn.Conv1d)):
|
| 122 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 123 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 124 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 125 |
+
if module.bias is not None:
|
| 126 |
+
nn.init.zeros_(module.bias)
|
| 127 |
+
elif isinstance(module, nn.Embedding):
|
| 128 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 129 |
+
elif hasattr(module, 'reset_parameters'):
|
| 130 |
+
module.reset_parameters()
|
| 131 |
+
|
| 132 |
+
if prenorm_residual_strategy is not None:
|
| 133 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
| 134 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
| 135 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
| 136 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
| 137 |
+
#
|
| 138 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
| 139 |
+
p = None
|
| 140 |
+
if hasattr(module, 'o_proj'):
|
| 141 |
+
p = module.o_proj.weight
|
| 142 |
+
elif hasattr(module, 'down_proj'):
|
| 143 |
+
p = module.down_proj.weight
|
| 144 |
+
if p is not None:
|
| 145 |
+
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
|
| 146 |
+
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
|
| 147 |
+
# We need to reinit p since this code could be called multiple times
|
| 148 |
+
# Having just p *= scale would repeatedly scale it down
|
| 149 |
+
if prenorm_residual_strategy == 'rescale':
|
| 150 |
+
nn.init.kaiming_uniform_(p, a=math.sqrt(5))
|
| 151 |
+
with torch.no_grad():
|
| 152 |
+
p /= math.sqrt(num_residuals_per_layer * self.config.num_hidden_layers)
|
| 153 |
+
elif prenorm_residual_strategy == 'zero':
|
| 154 |
+
nn.init.zeros_(p)
|
| 155 |
+
else:
|
| 156 |
+
raise ValueError(f"Invalid prenorm_residual_strategy: {prenorm_residual_strategy}")
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
class LightNetModel(LightNetPreTrainedModel):
|
| 160 |
+
|
| 161 |
+
def __init__(self, config: LightNetConfig):
|
| 162 |
+
super().__init__(config)
|
| 163 |
+
self.padding_idx = config.pad_token_id
|
| 164 |
+
self.vocab_size = config.vocab_size
|
| 165 |
+
|
| 166 |
+
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 167 |
+
self.layers = nn.ModuleList([LightNetBlock(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
|
| 168 |
+
self.norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 169 |
+
|
| 170 |
+
self.gradient_checkpointing = False
|
| 171 |
+
|
| 172 |
+
self.post_init()
|
| 173 |
+
|
| 174 |
+
def get_input_embeddings(self):
|
| 175 |
+
return self.embeddings
|
| 176 |
+
|
| 177 |
+
def set_input_embeddings(self, value):
|
| 178 |
+
self.embeddings = value
|
| 179 |
+
|
| 180 |
+
def forward(
|
| 181 |
+
self,
|
| 182 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 183 |
+
attention_mask: Optional[torch.Tensor] = None, # noqa
|
| 184 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 185 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 186 |
+
use_cache: Optional[bool] = None,
|
| 187 |
+
output_attentions: Optional[bool] = None,
|
| 188 |
+
output_hidden_states: Optional[bool] = None,
|
| 189 |
+
return_dict: Optional[bool] = None,
|
| 190 |
+
**kwargs: Unpack[Dict]
|
| 191 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
| 192 |
+
if output_attentions:
|
| 193 |
+
warnings.warn("`LightNetModel` does not `output_attentions` now, setting it to `False`.")
|
| 194 |
+
output_attentions = False
|
| 195 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 196 |
+
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 197 |
+
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
|
| 198 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 199 |
+
|
| 200 |
+
# retrieve input_ids and inputs_embeds
|
| 201 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 202 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 203 |
+
if input_ids is None and inputs_embeds is None:
|
| 204 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 205 |
+
|
| 206 |
+
if inputs_embeds is None:
|
| 207 |
+
inputs_embeds = self.embeddings(input_ids)
|
| 208 |
+
hidden_states = inputs_embeds
|
| 209 |
+
|
| 210 |
+
if use_cache and not isinstance(past_key_values, Cache):
|
| 211 |
+
past_key_values = Cache.from_legacy_cache(past_key_values)
|
| 212 |
+
|
| 213 |
+
if self.gradient_checkpointing and self.training and use_cache:
|
| 214 |
+
logger.warning_once("`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...")
|
| 215 |
+
use_cache = False
|
| 216 |
+
|
| 217 |
+
all_hidden_states = () if output_hidden_states else None
|
| 218 |
+
all_attns = () if output_attentions else None
|
| 219 |
+
|
| 220 |
+
for i, layer in enumerate(self.layers):
|
| 221 |
+
if output_hidden_states:
|
| 222 |
+
all_hidden_states += (hidden_states,)
|
| 223 |
+
|
| 224 |
+
if self.gradient_checkpointing and self.training:
|
| 225 |
+
hidden_states, attentions, past_key_values = self._gradient_checkpointing_func(
|
| 226 |
+
layer.__call__,
|
| 227 |
+
hidden_states,
|
| 228 |
+
attention_mask,
|
| 229 |
+
past_key_values,
|
| 230 |
+
use_cache,
|
| 231 |
+
output_attentions,
|
| 232 |
+
**kwargs
|
| 233 |
+
)
|
| 234 |
+
else:
|
| 235 |
+
hidden_states, attentions, past_key_values = layer(
|
| 236 |
+
hidden_states,
|
| 237 |
+
attention_mask=attention_mask,
|
| 238 |
+
past_key_values=past_key_values,
|
| 239 |
+
use_cache=use_cache,
|
| 240 |
+
output_attentions=output_attentions,
|
| 241 |
+
**kwargs
|
| 242 |
+
)
|
| 243 |
+
|
| 244 |
+
if output_attentions:
|
| 245 |
+
all_attns += (attentions,)
|
| 246 |
+
|
| 247 |
+
hidden_states = self.norm(hidden_states)
|
| 248 |
+
|
| 249 |
+
# add hidden states from the last decoder layer
|
| 250 |
+
if output_hidden_states:
|
| 251 |
+
all_hidden_states += (hidden_states,)
|
| 252 |
+
|
| 253 |
+
if not return_dict:
|
| 254 |
+
return tuple(i for i in [hidden_states, past_key_values, all_hidden_states, all_attns] if i is not None)
|
| 255 |
+
return BaseModelOutputWithPast(
|
| 256 |
+
last_hidden_state=hidden_states,
|
| 257 |
+
past_key_values=past_key_values,
|
| 258 |
+
hidden_states=all_hidden_states,
|
| 259 |
+
attentions=all_attns
|
| 260 |
+
)
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
class LightNetForCausalLM(LightNetPreTrainedModel, GenerationMixin):
|
| 264 |
+
|
| 265 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 266 |
+
|
| 267 |
+
def __init__(self, config):
|
| 268 |
+
super().__init__(config)
|
| 269 |
+
self.model = LightNetModel(config)
|
| 270 |
+
self.vocab_size = config.vocab_size
|
| 271 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 272 |
+
self.criterion = None
|
| 273 |
+
|
| 274 |
+
# Initialize weights and apply final processing
|
| 275 |
+
self.post_init()
|
| 276 |
+
|
| 277 |
+
def get_input_embeddings(self):
|
| 278 |
+
return self.model.embeddings
|
| 279 |
+
|
| 280 |
+
def set_input_embeddings(self, value):
|
| 281 |
+
self.model.embeddings = value
|
| 282 |
+
|
| 283 |
+
def get_output_embeddings(self):
|
| 284 |
+
return self.lm_head
|
| 285 |
+
|
| 286 |
+
def set_output_embeddings(self, new_embeddings):
|
| 287 |
+
self.lm_head = new_embeddings
|
| 288 |
+
|
| 289 |
+
def set_decoder(self, decoder):
|
| 290 |
+
self.model = decoder
|
| 291 |
+
|
| 292 |
+
def get_decoder(self):
|
| 293 |
+
return self.model
|
| 294 |
+
|
| 295 |
+
def generate(self, *args, **kwargs):
|
| 296 |
+
try:
|
| 297 |
+
return super().generate(*args, **kwargs)
|
| 298 |
+
except AttributeError as exception:
|
| 299 |
+
if 'past_key_values' in str(exception):
|
| 300 |
+
raise AttributeError(
|
| 301 |
+
f"You tried to call `generate` with a decoding strategy that manipulates `past_key_values`, "
|
| 302 |
+
f"which is not supported for {self.__class__.__name__}. "
|
| 303 |
+
f"Try another generation strategy instead. "
|
| 304 |
+
f"For the available generation strategies, check this doc: "
|
| 305 |
+
f"https://huggingface.co/docs/transformers/en/generation_strategies#decoding-strategies"
|
| 306 |
+
)
|
| 307 |
+
else:
|
| 308 |
+
raise exception
|
| 309 |
+
|
| 310 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 311 |
+
def prepare_inputs_for_generation(
|
| 312 |
+
self,
|
| 313 |
+
input_ids: torch.LongTensor = None,
|
| 314 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 315 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 316 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 317 |
+
use_cache: bool = True,
|
| 318 |
+
logits_to_keep: Optional[int] = None,
|
| 319 |
+
**kwargs: Unpack[Dict]
|
| 320 |
+
):
|
| 321 |
+
# only last token for `inputs_ids` if the `past_key_values` is not empty.
|
| 322 |
+
if past_key_values is not None and len(past_key_values) > 0:
|
| 323 |
+
input_ids = input_ids[:, -1:]
|
| 324 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 325 |
+
if inputs_embeds is not None and len(past_key_values) == 0:
|
| 326 |
+
model_inputs = {'inputs_embeds': inputs_embeds}
|
| 327 |
+
else:
|
| 328 |
+
# The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
|
| 329 |
+
# recompiles graphs as the stride of the inputs is a guard.
|
| 330 |
+
# Ref: https://github.com/huggingface/transformers/pull/29114
|
| 331 |
+
# TODO: use `next_tokens` directly instead.
|
| 332 |
+
model_inputs = {'input_ids': input_ids.contiguous()}
|
| 333 |
+
|
| 334 |
+
if logits_to_keep is not None:
|
| 335 |
+
model_inputs['logits_to_keep'] = logits_to_keep
|
| 336 |
+
|
| 337 |
+
model_inputs.update({
|
| 338 |
+
'past_key_values': past_key_values,
|
| 339 |
+
'use_cache': use_cache,
|
| 340 |
+
'attention_mask': attention_mask,
|
| 341 |
+
})
|
| 342 |
+
return model_inputs
|
| 343 |
+
|
| 344 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 345 |
+
def forward(
|
| 346 |
+
self,
|
| 347 |
+
input_ids: torch.LongTensor = None,
|
| 348 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 349 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 350 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 351 |
+
labels: Optional[torch.LongTensor] = None,
|
| 352 |
+
use_cache: Optional[bool] = None,
|
| 353 |
+
output_attentions: Optional[bool] = None,
|
| 354 |
+
output_hidden_states: Optional[bool] = None,
|
| 355 |
+
return_dict: Optional[bool] = None,
|
| 356 |
+
logits_to_keep: Optional[int] = 0,
|
| 357 |
+
**kwargs: Unpack[Dict]
|
| 358 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 359 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 360 |
+
output_hidden_states = (
|
| 361 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 362 |
+
)
|
| 363 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 364 |
+
|
| 365 |
+
outputs = self.model(
|
| 366 |
+
input_ids=input_ids,
|
| 367 |
+
attention_mask=attention_mask,
|
| 368 |
+
inputs_embeds=inputs_embeds,
|
| 369 |
+
past_key_values=past_key_values,
|
| 370 |
+
use_cache=use_cache,
|
| 371 |
+
output_attentions=output_attentions,
|
| 372 |
+
output_hidden_states=output_hidden_states,
|
| 373 |
+
return_dict=return_dict,
|
| 374 |
+
**kwargs
|
| 375 |
+
)
|
| 376 |
+
|
| 377 |
+
hidden_states = outputs[0]
|
| 378 |
+
fuse_linear_and_cross_entropy = self.config.fuse_cross_entropy and self.training
|
| 379 |
+
|
| 380 |
+
loss, logits = None, None
|
| 381 |
+
if not fuse_linear_and_cross_entropy or labels is None:
|
| 382 |
+
logits = self.lm_head(hidden_states if logits_to_keep is None else hidden_states[:, -logits_to_keep:])
|
| 383 |
+
if labels is not None:
|
| 384 |
+
if getattr(self, 'criterion', None) is None:
|
| 385 |
+
if fuse_linear_and_cross_entropy:
|
| 386 |
+
criterion = FusedLinearCrossEntropyLoss()
|
| 387 |
+
elif self.config.fuse_cross_entropy:
|
| 388 |
+
criterion = FusedCrossEntropyLoss(inplace_backward=True)
|
| 389 |
+
else:
|
| 390 |
+
criterion = nn.CrossEntropyLoss()
|
| 391 |
+
else:
|
| 392 |
+
criterion = self.criterion
|
| 393 |
+
labels = labels.to(hidden_states.device)
|
| 394 |
+
labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], criterion.ignore_index)), 1)
|
| 395 |
+
if fuse_linear_and_cross_entropy:
|
| 396 |
+
loss = criterion(hidden_states, labels, self.lm_head.weight, self.lm_head.bias)
|
| 397 |
+
else:
|
| 398 |
+
loss = criterion(logits.view(labels.numel(), -1), labels.view(-1))
|
| 399 |
+
|
| 400 |
+
if not return_dict:
|
| 401 |
+
output = (logits,) + outputs[1:]
|
| 402 |
+
return (loss,) + output if loss is not None else output
|
| 403 |
+
|
| 404 |
+
return CausalLMOutputWithPast(
|
| 405 |
+
loss=loss,
|
| 406 |
+
logits=logits,
|
| 407 |
+
past_key_values=outputs.past_key_values,
|
| 408 |
+
hidden_states=outputs.hidden_states,
|
| 409 |
+
attentions=outputs.attentions,
|
| 410 |
+
)
|
fla/models/linear_attn/__init__.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
|
| 4 |
+
|
| 5 |
+
from fla.models.linear_attn.configuration_linear_attn import LinearAttentionConfig
|
| 6 |
+
from fla.models.linear_attn.modeling_linear_attn import LinearAttentionForCausalLM, LinearAttentionModel
|
| 7 |
+
|
| 8 |
+
AutoConfig.register(LinearAttentionConfig.model_type, LinearAttentionConfig)
|
| 9 |
+
AutoModel.register(LinearAttentionConfig, LinearAttentionModel)
|
| 10 |
+
AutoModelForCausalLM.register(LinearAttentionConfig, LinearAttentionForCausalLM)
|
| 11 |
+
|
| 12 |
+
__all__ = ['LinearAttentionConfig', 'LinearAttentionForCausalLM', 'LinearAttentionModel']
|
fla/models/linear_attn/configuration_linear_attn.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from typing import Dict, Optional
|
| 4 |
+
|
| 5 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class LinearAttentionConfig(PretrainedConfig):
|
| 9 |
+
|
| 10 |
+
model_type = 'linear_attn'
|
| 11 |
+
keys_to_ignore_at_inference = ['past_key_values']
|
| 12 |
+
|
| 13 |
+
def __init__(
|
| 14 |
+
self,
|
| 15 |
+
attn_mode: str = "fused_chunk",
|
| 16 |
+
hidden_size: int = 2048,
|
| 17 |
+
expand_k: int = 1,
|
| 18 |
+
expand_v: int = 1,
|
| 19 |
+
hidden_ratio: Optional[int] = 4,
|
| 20 |
+
intermediate_size: Optional[int] = None,
|
| 21 |
+
num_hidden_layers: int = 24,
|
| 22 |
+
num_heads: int = 4,
|
| 23 |
+
num_kv_heads: Optional[int] = None,
|
| 24 |
+
feature_map: str = "elementwise_product",
|
| 25 |
+
tie_feature_map_qk: bool = False,
|
| 26 |
+
norm_q: bool = False,
|
| 27 |
+
norm_k: bool = False,
|
| 28 |
+
norm_feature_map: bool = False,
|
| 29 |
+
hidden_act: str = "swish",
|
| 30 |
+
max_position_embeddings: int = 2048,
|
| 31 |
+
elementwise_affine: Optional[bool] = True,
|
| 32 |
+
norm_eps: float = 1e-6,
|
| 33 |
+
attn: Optional[Dict] = None,
|
| 34 |
+
use_cache: bool = True,
|
| 35 |
+
pad_token_id: int = None,
|
| 36 |
+
bos_token_id: int = 1,
|
| 37 |
+
eos_token_id: int = 2,
|
| 38 |
+
tie_word_embeddings: bool = False,
|
| 39 |
+
initializer_range: float = 0.006,
|
| 40 |
+
fuse_norm: bool = True,
|
| 41 |
+
fuse_swiglu: bool = True,
|
| 42 |
+
fuse_cross_entropy: bool = True,
|
| 43 |
+
vocab_size: int = 32000,
|
| 44 |
+
**kwargs
|
| 45 |
+
):
|
| 46 |
+
self.attn_mode = attn_mode
|
| 47 |
+
self.hidden_size = hidden_size
|
| 48 |
+
self.expand_k = expand_k
|
| 49 |
+
self.expand_v = expand_v
|
| 50 |
+
self.hidden_ratio = hidden_ratio
|
| 51 |
+
self.intermediate_size = intermediate_size
|
| 52 |
+
self.num_hidden_layers = num_hidden_layers
|
| 53 |
+
self.num_heads = num_heads
|
| 54 |
+
self.num_kv_heads = num_kv_heads
|
| 55 |
+
self.feature_map = feature_map
|
| 56 |
+
self.tie_feature_map_qk = tie_feature_map_qk
|
| 57 |
+
self.norm_q = norm_q
|
| 58 |
+
self.norm_k = norm_k
|
| 59 |
+
self.norm_feature_map = norm_feature_map
|
| 60 |
+
self.hidden_act = hidden_act
|
| 61 |
+
self.max_position_embeddings = max_position_embeddings
|
| 62 |
+
self.elementwise_affine = elementwise_affine
|
| 63 |
+
self.norm_eps = norm_eps
|
| 64 |
+
self.attn = attn
|
| 65 |
+
self.use_cache = use_cache
|
| 66 |
+
self.initializer_range = initializer_range
|
| 67 |
+
|
| 68 |
+
self.fuse_norm = fuse_norm
|
| 69 |
+
self.fuse_swiglu = fuse_swiglu
|
| 70 |
+
self.fuse_cross_entropy = fuse_cross_entropy
|
| 71 |
+
self.vocab_size = vocab_size
|
| 72 |
+
|
| 73 |
+
if attn is not None:
|
| 74 |
+
if not isinstance(attn, Dict):
|
| 75 |
+
raise ValueError("attn must be a dictionary")
|
| 76 |
+
if 'layers' not in attn:
|
| 77 |
+
raise ValueError("Layer indices must be provided to initialize hybrid attention layers")
|
| 78 |
+
if 'num_heads' not in attn:
|
| 79 |
+
raise ValueError("Number of heads must be provided to initialize hybrid attention layers")
|
| 80 |
+
attn['num_kv_heads'] = attn.get('num_kv_heads', attn['num_heads'])
|
| 81 |
+
attn['qkv_bias'] = attn.get('qkv_bias', False)
|
| 82 |
+
attn['window_size'] = attn.get('window_size', None)
|
| 83 |
+
attn['rope_theta'] = attn.get('rope_theta', 10000.)
|
| 84 |
+
|
| 85 |
+
super().__init__(
|
| 86 |
+
pad_token_id=pad_token_id,
|
| 87 |
+
bos_token_id=bos_token_id,
|
| 88 |
+
eos_token_id=eos_token_id,
|
| 89 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 90 |
+
**kwargs,
|
| 91 |
+
)
|
fla/models/linear_attn/modeling_linear_attn.py
ADDED
|
@@ -0,0 +1,406 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
import warnings
|
| 7 |
+
from typing import List, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import torch.utils.checkpoint
|
| 12 |
+
from transformers.generation import GenerationMixin
|
| 13 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
|
| 14 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 15 |
+
from transformers.utils import logging
|
| 16 |
+
from transformers.utils.deprecation import deprecate_kwarg
|
| 17 |
+
|
| 18 |
+
from fla.layers.attn import Attention
|
| 19 |
+
from fla.layers.linear_attn import LinearAttention
|
| 20 |
+
from fla.models.linear_attn.configuration_linear_attn import LinearAttentionConfig
|
| 21 |
+
from fla.models.utils import Cache
|
| 22 |
+
from fla.modules import FusedCrossEntropyLoss, FusedLinearCrossEntropyLoss
|
| 23 |
+
from fla.modules import GatedMLP as LinearAttentionMLP
|
| 24 |
+
from fla.modules import RMSNorm
|
| 25 |
+
|
| 26 |
+
logger = logging.get_logger(__name__)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class LinearAttentionBlock(nn.Module):
|
| 30 |
+
def __init__(self, config: LinearAttentionConfig, layer_idx: int):
|
| 31 |
+
super().__init__()
|
| 32 |
+
|
| 33 |
+
self.config = config
|
| 34 |
+
self.layer_idx = layer_idx
|
| 35 |
+
|
| 36 |
+
self.attn_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 37 |
+
if config.attn is not None and layer_idx in config.attn['layers']:
|
| 38 |
+
self.attn = Attention(
|
| 39 |
+
hidden_size=config.hidden_size,
|
| 40 |
+
num_heads=config.attn['num_heads'],
|
| 41 |
+
num_kv_heads=config.attn['num_kv_heads'],
|
| 42 |
+
qkv_bias=config.attn['qkv_bias'],
|
| 43 |
+
window_size=config.attn['window_size'],
|
| 44 |
+
rope_theta=config.attn['rope_theta'],
|
| 45 |
+
max_position_embeddings=config.max_position_embeddings,
|
| 46 |
+
layer_idx=layer_idx
|
| 47 |
+
)
|
| 48 |
+
else:
|
| 49 |
+
self.attn = LinearAttention(
|
| 50 |
+
mode=config.attn_mode,
|
| 51 |
+
hidden_size=config.hidden_size,
|
| 52 |
+
expand_k=config.expand_k,
|
| 53 |
+
expand_v=config.expand_v,
|
| 54 |
+
num_heads=config.num_heads,
|
| 55 |
+
num_kv_heads=config.num_kv_heads,
|
| 56 |
+
feature_map=config.feature_map,
|
| 57 |
+
tie_feature_map_qk=config.tie_feature_map_qk,
|
| 58 |
+
norm_q=config.norm_q,
|
| 59 |
+
norm_k=config.norm_k,
|
| 60 |
+
do_feature_map_norm=config.norm_feature_map,
|
| 61 |
+
elementwise_affine=config.elementwise_affine,
|
| 62 |
+
norm_eps=config.norm_eps,
|
| 63 |
+
layer_idx=layer_idx
|
| 64 |
+
)
|
| 65 |
+
self.mlp_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 66 |
+
self.mlp = LinearAttentionMLP(
|
| 67 |
+
hidden_size=config.hidden_size,
|
| 68 |
+
hidden_ratio=config.hidden_ratio,
|
| 69 |
+
intermediate_size=config.intermediate_size,
|
| 70 |
+
hidden_act=config.hidden_act,
|
| 71 |
+
fuse_swiglu=config.fuse_swiglu
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
def forward(
|
| 75 |
+
self,
|
| 76 |
+
hidden_states: torch.Tensor,
|
| 77 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 78 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 79 |
+
use_cache: Optional[bool] = False,
|
| 80 |
+
output_attentions: Optional[bool] = False,
|
| 81 |
+
**kwargs,
|
| 82 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 83 |
+
residual = hidden_states
|
| 84 |
+
# currently not supported
|
| 85 |
+
attentions, past_key_values = None, None
|
| 86 |
+
hidden_states = self.attn_norm(hidden_states)
|
| 87 |
+
hidden_states = self.attn(hidden_states=hidden_states, **kwargs)
|
| 88 |
+
if self.config.fuse_norm:
|
| 89 |
+
hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
|
| 90 |
+
else:
|
| 91 |
+
hidden_states = residual + hidden_states
|
| 92 |
+
residual = hidden_states
|
| 93 |
+
hidden_states = self.mlp_norm(hidden_states)
|
| 94 |
+
hidden_states = self.mlp(hidden_states, **kwargs)
|
| 95 |
+
hidden_states = residual + hidden_states
|
| 96 |
+
|
| 97 |
+
outputs = (hidden_states, attentions, past_key_values)
|
| 98 |
+
|
| 99 |
+
return outputs
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
class LinearAttentionPreTrainedModel(PreTrainedModel):
|
| 103 |
+
|
| 104 |
+
config_class = LinearAttentionConfig
|
| 105 |
+
base_model_prefix = 'model'
|
| 106 |
+
supports_gradient_checkpointing = True
|
| 107 |
+
_no_split_modules = ['LinearAttentionBlock']
|
| 108 |
+
_supports_cache_class = True
|
| 109 |
+
|
| 110 |
+
def __init__(self, *inputs, **kwargs):
|
| 111 |
+
super().__init__(*inputs, **kwargs)
|
| 112 |
+
|
| 113 |
+
def _init_weights(
|
| 114 |
+
self,
|
| 115 |
+
module: nn.Module,
|
| 116 |
+
prenorm_residual_strategy: Optional[str] = 'rescale',
|
| 117 |
+
num_residuals_per_layer: int = 2,
|
| 118 |
+
):
|
| 119 |
+
if isinstance(module, (nn.Linear, nn.Conv1d)):
|
| 120 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 121 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 122 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 123 |
+
if module.bias is not None:
|
| 124 |
+
nn.init.zeros_(module.bias)
|
| 125 |
+
elif isinstance(module, nn.Embedding):
|
| 126 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 127 |
+
elif hasattr(module, 'reset_parameters'):
|
| 128 |
+
module.reset_parameters()
|
| 129 |
+
|
| 130 |
+
if prenorm_residual_strategy is not None:
|
| 131 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
| 132 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
| 133 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
| 134 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
| 135 |
+
#
|
| 136 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
| 137 |
+
p = None
|
| 138 |
+
if hasattr(module, 'o_proj'):
|
| 139 |
+
p = module.o_proj.weight
|
| 140 |
+
elif hasattr(module, 'down_proj'):
|
| 141 |
+
p = module.down_proj.weight
|
| 142 |
+
if p is not None:
|
| 143 |
+
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
|
| 144 |
+
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
|
| 145 |
+
# We need to reinit p since this code could be called multiple times
|
| 146 |
+
# Having just p *= scale would repeatedly scale it down
|
| 147 |
+
if prenorm_residual_strategy == 'rescale':
|
| 148 |
+
nn.init.kaiming_uniform_(p, a=math.sqrt(5))
|
| 149 |
+
with torch.no_grad():
|
| 150 |
+
p /= math.sqrt(num_residuals_per_layer * self.config.num_hidden_layers)
|
| 151 |
+
elif prenorm_residual_strategy == 'zero':
|
| 152 |
+
nn.init.zeros_(p)
|
| 153 |
+
else:
|
| 154 |
+
raise ValueError(f"Invalid prenorm_residual_strategy: {prenorm_residual_strategy}")
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
class LinearAttentionModel(LinearAttentionPreTrainedModel):
|
| 158 |
+
|
| 159 |
+
def __init__(self, config: LinearAttentionConfig):
|
| 160 |
+
super().__init__(config)
|
| 161 |
+
self.padding_idx = config.pad_token_id
|
| 162 |
+
self.vocab_size = config.vocab_size
|
| 163 |
+
|
| 164 |
+
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 165 |
+
self.layers = nn.ModuleList([LinearAttentionBlock(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
|
| 166 |
+
self.norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 167 |
+
|
| 168 |
+
self.gradient_checkpointing = False
|
| 169 |
+
|
| 170 |
+
self.post_init()
|
| 171 |
+
|
| 172 |
+
def get_input_embeddings(self):
|
| 173 |
+
return self.embeddings
|
| 174 |
+
|
| 175 |
+
def set_input_embeddings(self, value):
|
| 176 |
+
self.embeddings = value
|
| 177 |
+
|
| 178 |
+
def forward(
|
| 179 |
+
self,
|
| 180 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 181 |
+
attention_mask: Optional[torch.Tensor] = None, # noqa
|
| 182 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 183 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 184 |
+
use_cache: Optional[bool] = None,
|
| 185 |
+
output_attentions: Optional[bool] = None,
|
| 186 |
+
output_hidden_states: Optional[bool] = None,
|
| 187 |
+
return_dict: Optional[bool] = None
|
| 188 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
| 189 |
+
if output_attentions:
|
| 190 |
+
warnings.warn(
|
| 191 |
+
"`LinearAttentionModel` does not support output attention weights now, "
|
| 192 |
+
"so `output_attentions` is set to `False`."
|
| 193 |
+
)
|
| 194 |
+
output_attentions = False
|
| 195 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 196 |
+
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 197 |
+
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
|
| 198 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 199 |
+
|
| 200 |
+
# retrieve input_ids and inputs_embeds
|
| 201 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 202 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 203 |
+
if input_ids is None and inputs_embeds is None:
|
| 204 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 205 |
+
|
| 206 |
+
if inputs_embeds is None:
|
| 207 |
+
inputs_embeds = self.embeddings(input_ids)
|
| 208 |
+
hidden_states = inputs_embeds
|
| 209 |
+
|
| 210 |
+
if use_cache and not isinstance(past_key_values, Cache):
|
| 211 |
+
past_key_values = Cache.from_legacy_cache(past_key_values)
|
| 212 |
+
|
| 213 |
+
if self.gradient_checkpointing and self.training and use_cache:
|
| 214 |
+
logger.warning_once("`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...")
|
| 215 |
+
use_cache = False
|
| 216 |
+
|
| 217 |
+
all_hidden_states = () if output_hidden_states else None
|
| 218 |
+
all_attns = () if output_attentions else None
|
| 219 |
+
|
| 220 |
+
for i, layer in enumerate(self.layers):
|
| 221 |
+
if output_hidden_states:
|
| 222 |
+
all_hidden_states += (hidden_states,)
|
| 223 |
+
|
| 224 |
+
if self.gradient_checkpointing and self.training:
|
| 225 |
+
hidden_states, attentions, past_key_values = self._gradient_checkpointing_func(
|
| 226 |
+
layer.__call__,
|
| 227 |
+
hidden_states,
|
| 228 |
+
attention_mask,
|
| 229 |
+
past_key_values,
|
| 230 |
+
use_cache,
|
| 231 |
+
output_attentions,
|
| 232 |
+
)
|
| 233 |
+
else:
|
| 234 |
+
hidden_states, attentions, past_key_values = layer(
|
| 235 |
+
hidden_states,
|
| 236 |
+
attention_mask=attention_mask,
|
| 237 |
+
past_key_values=past_key_values,
|
| 238 |
+
use_cache=use_cache,
|
| 239 |
+
output_attentions=output_attentions
|
| 240 |
+
)
|
| 241 |
+
|
| 242 |
+
if output_attentions:
|
| 243 |
+
all_attns += (attentions,)
|
| 244 |
+
|
| 245 |
+
hidden_states = self.norm(hidden_states)
|
| 246 |
+
|
| 247 |
+
# add hidden states from the last decoder layer
|
| 248 |
+
if output_hidden_states:
|
| 249 |
+
all_hidden_states += (hidden_states,)
|
| 250 |
+
|
| 251 |
+
if not return_dict:
|
| 252 |
+
return tuple(i for i in [hidden_states, past_key_values, all_hidden_states, all_attns] if i is not None)
|
| 253 |
+
return BaseModelOutputWithPast(
|
| 254 |
+
last_hidden_state=hidden_states,
|
| 255 |
+
past_key_values=past_key_values,
|
| 256 |
+
hidden_states=all_hidden_states,
|
| 257 |
+
attentions=all_attns
|
| 258 |
+
)
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
class LinearAttentionForCausalLM(LinearAttentionPreTrainedModel, GenerationMixin):
|
| 262 |
+
|
| 263 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 264 |
+
|
| 265 |
+
def __init__(self, config):
|
| 266 |
+
super().__init__(config)
|
| 267 |
+
self.model = LinearAttentionModel(config)
|
| 268 |
+
self.vocab_size = config.vocab_size
|
| 269 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 270 |
+
self.criterion = None
|
| 271 |
+
|
| 272 |
+
# Initialize weights and apply final processing
|
| 273 |
+
self.post_init()
|
| 274 |
+
|
| 275 |
+
def get_input_embeddings(self):
|
| 276 |
+
return self.model.embeddings
|
| 277 |
+
|
| 278 |
+
def set_input_embeddings(self, value):
|
| 279 |
+
self.model.embeddings = value
|
| 280 |
+
|
| 281 |
+
def get_output_embeddings(self):
|
| 282 |
+
return self.lm_head
|
| 283 |
+
|
| 284 |
+
def set_output_embeddings(self, new_embeddings):
|
| 285 |
+
self.lm_head = new_embeddings
|
| 286 |
+
|
| 287 |
+
def set_decoder(self, decoder):
|
| 288 |
+
self.model = decoder
|
| 289 |
+
|
| 290 |
+
def get_decoder(self):
|
| 291 |
+
return self.model
|
| 292 |
+
|
| 293 |
+
def generate(self, *args, **kwargs):
|
| 294 |
+
try:
|
| 295 |
+
return super().generate(*args, **kwargs)
|
| 296 |
+
except AttributeError as exception:
|
| 297 |
+
if 'past_key_values' in str(exception):
|
| 298 |
+
raise AttributeError(
|
| 299 |
+
f"You tried to call `generate` with a decoding strategy that manipulates `past_key_values`, "
|
| 300 |
+
f"which is not supported for {self.__class__.__name__}. "
|
| 301 |
+
f"Try another generation strategy instead. "
|
| 302 |
+
f"For the available generation strategies, check this doc: "
|
| 303 |
+
f"https://huggingface.co/docs/transformers/en/generation_strategies#decoding-strategies"
|
| 304 |
+
)
|
| 305 |
+
else:
|
| 306 |
+
raise exception
|
| 307 |
+
|
| 308 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 309 |
+
def prepare_inputs_for_generation(
|
| 310 |
+
self,
|
| 311 |
+
input_ids: torch.LongTensor = None,
|
| 312 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 313 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 314 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 315 |
+
use_cache: bool = True,
|
| 316 |
+
logits_to_keep: Optional[int] = None,
|
| 317 |
+
**kwargs
|
| 318 |
+
):
|
| 319 |
+
# only last token for `inputs_ids` if the `past_key_values` is not empty.
|
| 320 |
+
if past_key_values is not None and len(past_key_values) > 0:
|
| 321 |
+
input_ids = input_ids[:, -1:]
|
| 322 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 323 |
+
if inputs_embeds is not None and len(past_key_values) == 0:
|
| 324 |
+
model_inputs = {'inputs_embeds': inputs_embeds}
|
| 325 |
+
else:
|
| 326 |
+
# The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
|
| 327 |
+
# recompiles graphs as the stride of the inputs is a guard.
|
| 328 |
+
# Ref: https://github.com/huggingface/transformers/pull/29114
|
| 329 |
+
# TODO: use `next_tokens` directly instead.
|
| 330 |
+
model_inputs = {'input_ids': input_ids.contiguous()}
|
| 331 |
+
|
| 332 |
+
if logits_to_keep is not None:
|
| 333 |
+
model_inputs['logits_to_keep'] = logits_to_keep
|
| 334 |
+
|
| 335 |
+
model_inputs.update({
|
| 336 |
+
'past_key_values': past_key_values,
|
| 337 |
+
'use_cache': use_cache,
|
| 338 |
+
'attention_mask': attention_mask,
|
| 339 |
+
})
|
| 340 |
+
return model_inputs
|
| 341 |
+
|
| 342 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 343 |
+
def forward(
|
| 344 |
+
self,
|
| 345 |
+
input_ids: torch.LongTensor = None,
|
| 346 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 347 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 348 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 349 |
+
labels: Optional[torch.LongTensor] = None,
|
| 350 |
+
use_cache: Optional[bool] = None,
|
| 351 |
+
output_attentions: Optional[bool] = None,
|
| 352 |
+
output_hidden_states: Optional[bool] = None,
|
| 353 |
+
return_dict: Optional[bool] = None,
|
| 354 |
+
logits_to_keep: Optional[int] = 0
|
| 355 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 356 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 357 |
+
output_hidden_states = (
|
| 358 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 359 |
+
)
|
| 360 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 361 |
+
|
| 362 |
+
outputs = self.model(
|
| 363 |
+
input_ids=input_ids,
|
| 364 |
+
attention_mask=attention_mask,
|
| 365 |
+
inputs_embeds=inputs_embeds,
|
| 366 |
+
past_key_values=past_key_values,
|
| 367 |
+
use_cache=use_cache,
|
| 368 |
+
output_attentions=output_attentions,
|
| 369 |
+
output_hidden_states=output_hidden_states,
|
| 370 |
+
return_dict=return_dict
|
| 371 |
+
)
|
| 372 |
+
|
| 373 |
+
hidden_states = outputs[0]
|
| 374 |
+
fuse_linear_and_cross_entropy = self.config.fuse_cross_entropy and self.training
|
| 375 |
+
|
| 376 |
+
loss, logits = None, None
|
| 377 |
+
if not fuse_linear_and_cross_entropy or labels is None:
|
| 378 |
+
logits = self.lm_head(hidden_states if logits_to_keep is None else hidden_states[:, -logits_to_keep:])
|
| 379 |
+
if labels is not None:
|
| 380 |
+
if getattr(self, 'criterion', None) is None:
|
| 381 |
+
if fuse_linear_and_cross_entropy:
|
| 382 |
+
criterion = FusedLinearCrossEntropyLoss()
|
| 383 |
+
elif self.config.fuse_cross_entropy:
|
| 384 |
+
criterion = FusedCrossEntropyLoss(inplace_backward=True)
|
| 385 |
+
else:
|
| 386 |
+
criterion = nn.CrossEntropyLoss()
|
| 387 |
+
else:
|
| 388 |
+
criterion = self.criterion
|
| 389 |
+
labels = labels.to(hidden_states.device)
|
| 390 |
+
labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], criterion.ignore_index)), 1)
|
| 391 |
+
if fuse_linear_and_cross_entropy:
|
| 392 |
+
loss = criterion(hidden_states, labels, self.lm_head.weight, self.lm_head.bias)
|
| 393 |
+
else:
|
| 394 |
+
loss = criterion(logits.view(labels.numel(), -1), labels.view(-1))
|
| 395 |
+
|
| 396 |
+
if not return_dict:
|
| 397 |
+
output = (logits,) + outputs[1:]
|
| 398 |
+
return (loss,) + output if loss is not None else output
|
| 399 |
+
|
| 400 |
+
return CausalLMOutputWithPast(
|
| 401 |
+
loss=loss,
|
| 402 |
+
logits=logits,
|
| 403 |
+
past_key_values=outputs.past_key_values,
|
| 404 |
+
hidden_states=outputs.hidden_states,
|
| 405 |
+
attentions=outputs.attentions,
|
| 406 |
+
)
|
fla/models/mamba/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
|
| 4 |
+
|
| 5 |
+
from fla.models.mamba.configuration_mamba import MambaConfig
|
| 6 |
+
from fla.models.mamba.modeling_mamba import MambaBlock, MambaForCausalLM, MambaModel
|
| 7 |
+
|
| 8 |
+
AutoConfig.register(MambaConfig.model_type, MambaConfig, True)
|
| 9 |
+
AutoModel.register(MambaConfig, MambaModel, True)
|
| 10 |
+
AutoModelForCausalLM.register(MambaConfig, MambaForCausalLM, True)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
__all__ = ['MambaConfig', 'MambaForCausalLM', 'MambaModel', 'MambaBlock']
|
fla/models/mamba/configuration_mamba.py
ADDED
|
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2024 The HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""MAMBA configuration"""
|
| 16 |
+
|
| 17 |
+
import math
|
| 18 |
+
|
| 19 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class MambaConfig(PretrainedConfig):
|
| 23 |
+
"""
|
| 24 |
+
This is the configuration class to store the configuration of a [`MambaModel`]. It is used to instantiate a MAMBA
|
| 25 |
+
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
| 26 |
+
defaults will yield a similar configuration to that of the MAMBA
|
| 27 |
+
[state-spaces/mamba-2.8b](https://huggingface.co/state-spaces/mamba-2.8b) architecture.
|
| 28 |
+
|
| 29 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 30 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
Args:
|
| 34 |
+
vocab_size (`int`, *optional*):
|
| 35 |
+
Vocabulary size of the Mamba model.
|
| 36 |
+
hidden_size (`int`, *optional*):
|
| 37 |
+
Dimensionality of the embeddings and hidden states. Default: 2048.
|
| 38 |
+
state_size (`int`, *optional*):
|
| 39 |
+
Shape of the state space latents. Default: 16.
|
| 40 |
+
num_hidden_layers (`int`, *optional*):
|
| 41 |
+
Number of hidden layers in the model. Default: 48.
|
| 42 |
+
layer_norm_epsilon (`float`, *optional*):
|
| 43 |
+
The epsilon to use in the layer normalization layers. Default: 1e-5.
|
| 44 |
+
pad_token_id (`int`, *optional*):
|
| 45 |
+
Padding token id. Default: 0.
|
| 46 |
+
bos_token_id (`int`, *optional*):
|
| 47 |
+
The id of the beginning of sentence token in the vocabulary. Default: 0.
|
| 48 |
+
eos_token_id (`int`, *optional*):
|
| 49 |
+
The id of the end of sentence token in the vocabulary. Default: 0.
|
| 50 |
+
expand (`int`, *optional*):
|
| 51 |
+
Expanding factor used to determine the intermediate size. Default: 2.
|
| 52 |
+
conv_kernel (`int`, *optional*):
|
| 53 |
+
Size of the convolution kernel. Default: 4.
|
| 54 |
+
use_bias (`bool`, *optional*):
|
| 55 |
+
Whether or not to use bias in ["in_proj", "out_proj"] of the mixer block. Default: `False`.
|
| 56 |
+
use_conv_bias (`bool`, *optional*):
|
| 57 |
+
Whether or not to use bias in the convolution layer of the mixer block. Default: `True`.
|
| 58 |
+
hidden_act (`str`, *optional*):
|
| 59 |
+
The non-linear activation function (function or string) in the decoder. Default: `"silu"`.
|
| 60 |
+
initializer_range (`float`, *optional*):
|
| 61 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Default: 0.1.
|
| 62 |
+
residual_in_fp32 (`bool`, *optional*):
|
| 63 |
+
Whether or not residuals should be in `float32`.
|
| 64 |
+
If set to `False` residuals will keep the same `dtype` as the rest of the model. Default: `True`.
|
| 65 |
+
time_step_rank (`Union[int,str]`, *optional*):
|
| 66 |
+
Rank of the the discretization projection matrix.
|
| 67 |
+
`"auto"` means that it will default to `math.ceil(self.hidden_size / 16)`. Default: `"auto"`.
|
| 68 |
+
time_step_scale (`float`, *optional*):
|
| 69 |
+
Scale used used to scale `dt_proj.bias`. Default: 1.0.
|
| 70 |
+
time_step_min (`float`, *optional*):
|
| 71 |
+
Minimum `time_step` used to bound `dt_proj.bias`. Default: 0.001.
|
| 72 |
+
time_step_max (`float`, *optional*):
|
| 73 |
+
Maximum `time_step` used to bound `dt_proj.bias`. Default: 0.1.
|
| 74 |
+
time_step_init_scheme (`float`, *optional*):
|
| 75 |
+
Init scheme used for `dt_proj.weight`. Should be one of `["random","uniform"]`. Default: `"random"`.
|
| 76 |
+
time_step_floor (`float`, *optional*):
|
| 77 |
+
Minimum clamping value of the `dt_proj.bias` layer initialization. Default: 0.0001.
|
| 78 |
+
window_size (`int`, *optional*):
|
| 79 |
+
The window size used for sliding window attention. Default: 2048.
|
| 80 |
+
rescale_prenorm_residual (`bool`, *optional*):
|
| 81 |
+
Whether or not to rescale `out_proj` weights when initializing. Default: `False`.
|
| 82 |
+
use_cache (`bool`, *optional*):
|
| 83 |
+
Whether or not the cache should be used. Default: `True`.
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
Example:
|
| 87 |
+
|
| 88 |
+
```python
|
| 89 |
+
>>> from transformers import MambaConfig, MambaModel
|
| 90 |
+
|
| 91 |
+
>>> # Initializing a Mamba configuration
|
| 92 |
+
>>> configuration = MambaConfig()
|
| 93 |
+
|
| 94 |
+
>>> # Initializing a model (with random weights) from the configuration
|
| 95 |
+
>>> model = MambaModel(configuration)
|
| 96 |
+
|
| 97 |
+
>>> # Accessing the model configuration
|
| 98 |
+
>>> configuration = model.config
|
| 99 |
+
```"""
|
| 100 |
+
|
| 101 |
+
model_type = "mamba"
|
| 102 |
+
|
| 103 |
+
def __init__(
|
| 104 |
+
self,
|
| 105 |
+
vocab_size: int = 32000,
|
| 106 |
+
hidden_size: int = 2048,
|
| 107 |
+
state_size: int = 16,
|
| 108 |
+
num_hidden_layers: int = 48,
|
| 109 |
+
layer_norm_epsilon=1e-5,
|
| 110 |
+
pad_token_id: int = 0,
|
| 111 |
+
bos_token_id: int = 1,
|
| 112 |
+
eos_token_id: int = 2,
|
| 113 |
+
expand: int = 2,
|
| 114 |
+
conv_kernel: int = 4,
|
| 115 |
+
use_bias: bool = False,
|
| 116 |
+
use_conv_bias: bool = True,
|
| 117 |
+
hidden_act: str = "silu",
|
| 118 |
+
initializer_range: str = 0.1,
|
| 119 |
+
residual_in_fp32: bool = False,
|
| 120 |
+
time_step_rank: str = "auto",
|
| 121 |
+
time_step_scale: float = 1.0,
|
| 122 |
+
time_step_min: float = 0.001,
|
| 123 |
+
time_step_max: float = 0.1,
|
| 124 |
+
time_step_init_scheme: str = "random",
|
| 125 |
+
time_step_floor: float = 1e-4,
|
| 126 |
+
rescale_prenorm_residual: bool = False,
|
| 127 |
+
use_cache: bool = True,
|
| 128 |
+
fuse_norm: bool = True,
|
| 129 |
+
fuse_cross_entropy: bool = True,
|
| 130 |
+
tie_word_embeddings: bool = False,
|
| 131 |
+
**kwargs,
|
| 132 |
+
):
|
| 133 |
+
self.vocab_size = vocab_size
|
| 134 |
+
self.hidden_size = hidden_size
|
| 135 |
+
self.state_size = state_size
|
| 136 |
+
self.num_hidden_layers = num_hidden_layers
|
| 137 |
+
self.layer_norm_epsilon = layer_norm_epsilon
|
| 138 |
+
self.conv_kernel = conv_kernel
|
| 139 |
+
self.expand = expand
|
| 140 |
+
self.intermediate_size = int(expand * self.hidden_size)
|
| 141 |
+
self.bos_token_id = bos_token_id
|
| 142 |
+
self.eos_token_id = eos_token_id
|
| 143 |
+
self.pad_token_id = pad_token_id
|
| 144 |
+
self.use_bias = use_bias
|
| 145 |
+
self.use_conv_bias = use_conv_bias
|
| 146 |
+
self.hidden_act = hidden_act
|
| 147 |
+
self.initializer_range = initializer_range
|
| 148 |
+
self.time_step_rank = math.ceil(self.hidden_size / 16) if time_step_rank == "auto" else time_step_rank
|
| 149 |
+
self.time_step_scale = time_step_scale
|
| 150 |
+
self.time_step_min = time_step_min
|
| 151 |
+
self.time_step_max = time_step_max
|
| 152 |
+
self.time_step_init_scheme = time_step_init_scheme
|
| 153 |
+
self.time_step_floor = time_step_floor
|
| 154 |
+
self.rescale_prenorm_residual = rescale_prenorm_residual
|
| 155 |
+
self.residual_in_fp32 = residual_in_fp32
|
| 156 |
+
self.use_cache = use_cache
|
| 157 |
+
self.fuse_norm = fuse_norm
|
| 158 |
+
self.fuse_cross_entropy = fuse_cross_entropy
|
| 159 |
+
|
| 160 |
+
super().__init__(
|
| 161 |
+
bos_token_id=bos_token_id,
|
| 162 |
+
eos_token_id=eos_token_id,
|
| 163 |
+
pad_token_id=pad_token_id,
|
| 164 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 165 |
+
**kwargs
|
| 166 |
+
)
|
fla/models/mamba2/configuration_mamba2.py
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Inc. team.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""MAMBA2 configuration"""
|
| 15 |
+
|
| 16 |
+
import math
|
| 17 |
+
|
| 18 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class Mamba2Config(PretrainedConfig):
|
| 22 |
+
"""
|
| 23 |
+
This is the configuration class to store the configuration of a [`Mamba2Model`]. It is used to instantiate a MAMBA2
|
| 24 |
+
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
| 25 |
+
defaults will yield a similar configuration to that of the MAMBA2
|
| 26 |
+
[state-spaces/mamba2-2.8b](https://huggingface.co/state-spaces/mamba2-2.8b) architecture.
|
| 27 |
+
|
| 28 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 29 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
num_heads (`int`, *optional*, defaults to 64):
|
| 34 |
+
Number of heads for the evolution matrices of mamba 2.
|
| 35 |
+
head_dim (`int`, *optional*, defaults to 64):
|
| 36 |
+
Dimension of each head.
|
| 37 |
+
vocab_size (`int`, *optional*, defaults to 32768):
|
| 38 |
+
Vocabulary size of the MAMBA2 model. Defines the number of different tokens that can be represented by the
|
| 39 |
+
`inputs_ids` passed when calling [`Mamba2Model`].
|
| 40 |
+
hidden_size (`int`, *optional*, defaults to 2048):
|
| 41 |
+
Dimensionality of the embeddings and hidden states.
|
| 42 |
+
state_size (`int`, *optional*, defaults to 128): shape of the state space latents.
|
| 43 |
+
num_hidden_layers (`int`, *optional*, defaults to 48):
|
| 44 |
+
Number of hidden layers in the model.
|
| 45 |
+
layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
|
| 46 |
+
The epsilon to use in the layer normalization layers.
|
| 47 |
+
pad_token_id (`int`, *optional*, defaults to 0):
|
| 48 |
+
Padding token id.
|
| 49 |
+
bos_token_id (`int`, *optional*, defaults to 1):
|
| 50 |
+
The id of the beginning of sentence token in the vocabulary.
|
| 51 |
+
eos_token_id (`int`, *optional*, defaults to 2):
|
| 52 |
+
The id of the end of sentence token in the vocabulary.
|
| 53 |
+
expand (`int`, *optional*, defaults to 2): Expanding factor used to determine the intermediate size.
|
| 54 |
+
conv_kernel (`int`, *optional*, defaults to 4): Size of the convolution kernel.
|
| 55 |
+
n_groups (`int`, *optional*, defaults to 1):
|
| 56 |
+
Number of groups for the evolution matrices of mamba 2.
|
| 57 |
+
use_bias (`bool`, *optional*, defaults to `False`):
|
| 58 |
+
Whether or not to use bias in ["in_proj", "out_proj"] of the mixer block
|
| 59 |
+
use_conv_bias (`bool`, *optional*, defaults to `True`):
|
| 60 |
+
Whether or not to use bias in the convolution layer of the mixer block.
|
| 61 |
+
hidden_act (`str`, *optional*, defaults to `"silu"`):
|
| 62 |
+
The non-linear activation function (function or string) in the decoder.
|
| 63 |
+
initializer_range (`float`, *optional*, defaults to 0.1):
|
| 64 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 65 |
+
residual_in_fp32 (`bool`, *optional*, defaults to `True`):
|
| 66 |
+
Whether or not residuals should be in `float32`.
|
| 67 |
+
If set to `False` residuals will keep the same `dtype` as the rest of the model
|
| 68 |
+
time_step_rank (`Union[int,str]`, *optional*, defaults to `"auto"`):
|
| 69 |
+
Rank of the discretization projection matrix.
|
| 70 |
+
`"auto"` means that it will default to `math.ceil(self.hidden_size / 16)`
|
| 71 |
+
time_step_min (`float`, *optional*, defaults to 0.001):
|
| 72 |
+
Minimum `time_step` used to bound `dt_proj.bias`.
|
| 73 |
+
time_step_max (`float`, *optional*, defaults to 0.1):
|
| 74 |
+
Maximum `time_step` used to bound `dt_proj.bias`.
|
| 75 |
+
time_step_floor (`float`, *optional*, defaults to 0.0001):
|
| 76 |
+
Minimum clamping value of the `dt_proj.bias` layer initialization.
|
| 77 |
+
time_step_limit (`tuple`, *optional*, defaults to `(0.0, inf)`):
|
| 78 |
+
Accepted range of time step values.
|
| 79 |
+
rescale_prenorm_residual (`bool`, *optional*, defaults to `True`):
|
| 80 |
+
Whether or not to rescale `out_proj` weights when initializing.
|
| 81 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
| 82 |
+
Whether or not the cache should be used.
|
| 83 |
+
rms_norm (`bool`, *optional*, defaults to `True`):
|
| 84 |
+
Whether to use RMS norm or not.
|
| 85 |
+
chunk_size (`int`, *optional*, defaults to 256):
|
| 86 |
+
Size of the chunks that will comprise the sequence.
|
| 87 |
+
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
|
| 88 |
+
Whether to tie word embeddings or not.
|
| 89 |
+
"""
|
| 90 |
+
|
| 91 |
+
model_type = "mamba2"
|
| 92 |
+
|
| 93 |
+
def __init__(
|
| 94 |
+
self,
|
| 95 |
+
num_heads: int = 64,
|
| 96 |
+
head_dim: int = 64,
|
| 97 |
+
vocab_size: int = 32000,
|
| 98 |
+
hidden_size: int = 2048,
|
| 99 |
+
state_size: int = 128,
|
| 100 |
+
num_hidden_layers: int = 48,
|
| 101 |
+
layer_norm_epsilon: float = 1e-5,
|
| 102 |
+
pad_token_id: int = 0,
|
| 103 |
+
bos_token_id: int = 1,
|
| 104 |
+
eos_token_id: int = 2,
|
| 105 |
+
expand: int = 2,
|
| 106 |
+
conv_kernel: int = 4,
|
| 107 |
+
n_groups: int = 1,
|
| 108 |
+
use_bias: bool = False,
|
| 109 |
+
use_conv_bias: bool = True,
|
| 110 |
+
hidden_act: str = "silu",
|
| 111 |
+
initializer_range: float = 0.1,
|
| 112 |
+
residual_in_fp32: bool = True,
|
| 113 |
+
time_step_rank: str = "auto",
|
| 114 |
+
time_step_min: float = 0.001,
|
| 115 |
+
time_step_max: float = 0.1,
|
| 116 |
+
time_step_floor: float = 1e-4,
|
| 117 |
+
time_step_limit=(0.0, float("inf")),
|
| 118 |
+
rescale_prenorm_residual: bool = True,
|
| 119 |
+
use_cache: bool = True,
|
| 120 |
+
rms_norm: bool = True,
|
| 121 |
+
chunk_size: int = 256,
|
| 122 |
+
fuse_norm: bool = True,
|
| 123 |
+
fuse_cross_entropy: bool = True,
|
| 124 |
+
tie_word_embeddings: bool = False,
|
| 125 |
+
**kwargs,
|
| 126 |
+
):
|
| 127 |
+
self.vocab_size = vocab_size
|
| 128 |
+
self.hidden_size = hidden_size
|
| 129 |
+
self.state_size = state_size
|
| 130 |
+
self.num_hidden_layers = num_hidden_layers
|
| 131 |
+
self.layer_norm_epsilon = layer_norm_epsilon
|
| 132 |
+
self.conv_kernel = conv_kernel
|
| 133 |
+
self.expand = expand
|
| 134 |
+
|
| 135 |
+
self.bos_token_id = bos_token_id
|
| 136 |
+
self.eos_token_id = eos_token_id
|
| 137 |
+
self.pad_token_id = pad_token_id
|
| 138 |
+
self.use_bias = use_bias
|
| 139 |
+
self.use_conv_bias = use_conv_bias
|
| 140 |
+
self.hidden_act = hidden_act
|
| 141 |
+
self.initializer_range = initializer_range
|
| 142 |
+
self.time_step_rank = (
|
| 143 |
+
math.ceil(self.hidden_size / 16)
|
| 144 |
+
if time_step_rank == "auto"
|
| 145 |
+
else time_step_rank
|
| 146 |
+
)
|
| 147 |
+
self.time_step_min = time_step_min
|
| 148 |
+
self.time_step_max = time_step_max
|
| 149 |
+
self.time_step_floor = time_step_floor
|
| 150 |
+
self.rescale_prenorm_residual = rescale_prenorm_residual
|
| 151 |
+
self.residual_in_fp32 = residual_in_fp32
|
| 152 |
+
self.use_cache = use_cache
|
| 153 |
+
self.n_groups = n_groups
|
| 154 |
+
self.num_heads = num_heads
|
| 155 |
+
self.head_dim = head_dim
|
| 156 |
+
self.rms_norm = rms_norm
|
| 157 |
+
self.state_size = state_size
|
| 158 |
+
self.chunk_size = chunk_size
|
| 159 |
+
self.time_step_limit = time_step_limit
|
| 160 |
+
self.fuse_norm = fuse_norm
|
| 161 |
+
self.fuse_cross_entropy = fuse_cross_entropy
|
| 162 |
+
self.tie_word_embeddings = tie_word_embeddings
|
| 163 |
+
|
| 164 |
+
super().__init__(
|
| 165 |
+
bos_token_id=bos_token_id,
|
| 166 |
+
eos_token_id=eos_token_id,
|
| 167 |
+
pad_token_id=pad_token_id,
|
| 168 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 169 |
+
**kwargs,
|
| 170 |
+
)
|
fla/models/nsa/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (685 Bytes). View file
|
|
|
fla/models/nsa/configuration_nsa.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class NSAConfig(PretrainedConfig):
|
| 9 |
+
|
| 10 |
+
model_type = 'nsa'
|
| 11 |
+
keys_to_ignore_at_inference = ['past_key_values']
|
| 12 |
+
|
| 13 |
+
def __init__(
|
| 14 |
+
self,
|
| 15 |
+
hidden_size: int = 2048,
|
| 16 |
+
num_hidden_layers: int = 24,
|
| 17 |
+
num_heads: int = 64,
|
| 18 |
+
num_kv_heads: int = 4,
|
| 19 |
+
head_dim: int = 32,
|
| 20 |
+
qkv_bias: bool = False,
|
| 21 |
+
block_size: int = 64,
|
| 22 |
+
block_counts: Optional[int] = 16,
|
| 23 |
+
window_size: Optional[int] = 512,
|
| 24 |
+
rope_theta: Optional[float] = 10000.,
|
| 25 |
+
max_position_embeddings: int = 2048,
|
| 26 |
+
hidden_ratio: Optional[int] = 4,
|
| 27 |
+
intermediate_size: Optional[int] = None,
|
| 28 |
+
hidden_act: str = "swish",
|
| 29 |
+
initializer_range: float = 0.006,
|
| 30 |
+
elementwise_affine: Optional[bool] = True,
|
| 31 |
+
norm_eps: float = 1e-6,
|
| 32 |
+
use_cache: bool = True,
|
| 33 |
+
pad_token_id: int = None,
|
| 34 |
+
bos_token_id: int = 1,
|
| 35 |
+
eos_token_id: int = 2,
|
| 36 |
+
tie_word_embeddings: bool = False,
|
| 37 |
+
fuse_norm: bool = True,
|
| 38 |
+
fuse_swiglu: bool = True,
|
| 39 |
+
fuse_cross_entropy: bool = True,
|
| 40 |
+
vocab_size: int = 32000,
|
| 41 |
+
**kwargs,
|
| 42 |
+
):
|
| 43 |
+
self.hidden_size = hidden_size
|
| 44 |
+
self.num_hidden_layers = num_hidden_layers
|
| 45 |
+
self.num_heads = num_heads
|
| 46 |
+
self.num_kv_heads = num_kv_heads
|
| 47 |
+
self.head_dim = head_dim
|
| 48 |
+
self.qkv_bias = qkv_bias
|
| 49 |
+
self.block_size = block_size
|
| 50 |
+
self.block_counts = block_counts
|
| 51 |
+
self.window_size = window_size
|
| 52 |
+
self.rope_theta = rope_theta
|
| 53 |
+
self.max_position_embeddings = max_position_embeddings
|
| 54 |
+
|
| 55 |
+
self.hidden_ratio = hidden_ratio
|
| 56 |
+
self.intermediate_size = intermediate_size
|
| 57 |
+
self.hidden_act = hidden_act
|
| 58 |
+
|
| 59 |
+
self.initializer_range = initializer_range
|
| 60 |
+
self.elementwise_affine = elementwise_affine
|
| 61 |
+
self.norm_eps = norm_eps
|
| 62 |
+
self.use_cache = use_cache
|
| 63 |
+
|
| 64 |
+
self.fuse_norm = fuse_norm
|
| 65 |
+
self.fuse_swiglu = fuse_swiglu
|
| 66 |
+
self.fuse_cross_entropy = fuse_cross_entropy
|
| 67 |
+
self.vocab_size = vocab_size
|
| 68 |
+
|
| 69 |
+
super().__init__(
|
| 70 |
+
pad_token_id=pad_token_id,
|
| 71 |
+
bos_token_id=bos_token_id,
|
| 72 |
+
eos_token_id=eos_token_id,
|
| 73 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 74 |
+
**kwargs,
|
| 75 |
+
)
|
fla/models/nsa/modeling_nsa.py
ADDED
|
@@ -0,0 +1,398 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
import warnings
|
| 7 |
+
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import torch.utils.checkpoint
|
| 12 |
+
from transformers.generation import GenerationMixin
|
| 13 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
|
| 14 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 15 |
+
from transformers.utils import logging
|
| 16 |
+
from transformers.utils.deprecation import deprecate_kwarg
|
| 17 |
+
|
| 18 |
+
from fla.layers.nsa import NativeSparseAttention
|
| 19 |
+
from fla.models.nsa.configuration_nsa import NSAConfig
|
| 20 |
+
from fla.models.utils import Cache
|
| 21 |
+
from fla.modules import FusedCrossEntropyLoss, FusedLinearCrossEntropyLoss
|
| 22 |
+
from fla.modules import GatedMLP as NSAMLP
|
| 23 |
+
from fla.modules import RMSNorm
|
| 24 |
+
|
| 25 |
+
if TYPE_CHECKING:
|
| 26 |
+
from transformers.processing_utils import Unpack
|
| 27 |
+
|
| 28 |
+
logger = logging.get_logger(__name__)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class NSABlock(nn.Module):
|
| 32 |
+
def __init__(self, config: NSAConfig, layer_idx: int):
|
| 33 |
+
super().__init__()
|
| 34 |
+
|
| 35 |
+
self.config = config
|
| 36 |
+
self.layer_idx = layer_idx
|
| 37 |
+
|
| 38 |
+
self.attn_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 39 |
+
self.attn = NativeSparseAttention(
|
| 40 |
+
hidden_size=config.hidden_size,
|
| 41 |
+
num_heads=config.num_heads,
|
| 42 |
+
num_kv_heads=config.num_kv_heads,
|
| 43 |
+
qkv_bias=config.qkv_bias,
|
| 44 |
+
block_size=config.block_size,
|
| 45 |
+
block_counts=config.block_counts,
|
| 46 |
+
window_size=config.window_size,
|
| 47 |
+
rope_theta=config.rope_theta,
|
| 48 |
+
max_position_embeddings=config.max_position_embeddings,
|
| 49 |
+
layer_idx=layer_idx
|
| 50 |
+
)
|
| 51 |
+
self.mlp_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 52 |
+
self.mlp = NSAMLP(
|
| 53 |
+
hidden_size=config.hidden_size,
|
| 54 |
+
hidden_ratio=config.hidden_ratio,
|
| 55 |
+
intermediate_size=config.intermediate_size,
|
| 56 |
+
hidden_act=config.hidden_act,
|
| 57 |
+
fuse_swiglu=config.fuse_swiglu
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
def forward(
|
| 61 |
+
self,
|
| 62 |
+
hidden_states: torch.Tensor,
|
| 63 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 64 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 65 |
+
use_cache: Optional[bool] = False,
|
| 66 |
+
output_attentions: Optional[bool] = False,
|
| 67 |
+
**kwargs: Unpack[Dict]
|
| 68 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 69 |
+
residual = hidden_states
|
| 70 |
+
hidden_states = self.attn_norm(hidden_states)
|
| 71 |
+
hidden_states, attentions, past_key_values = self.attn(
|
| 72 |
+
hidden_states=hidden_states,
|
| 73 |
+
attention_mask=attention_mask,
|
| 74 |
+
past_key_values=past_key_values,
|
| 75 |
+
use_cache=use_cache,
|
| 76 |
+
output_attentions=output_attentions,
|
| 77 |
+
**kwargs
|
| 78 |
+
)
|
| 79 |
+
if self.config.fuse_norm:
|
| 80 |
+
hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
|
| 81 |
+
else:
|
| 82 |
+
hidden_states = residual + hidden_states
|
| 83 |
+
residual = hidden_states
|
| 84 |
+
hidden_states = self.mlp_norm(hidden_states)
|
| 85 |
+
hidden_states = self.mlp(hidden_states, **kwargs)
|
| 86 |
+
hidden_states = residual + hidden_states
|
| 87 |
+
|
| 88 |
+
outputs = (hidden_states, attentions, past_key_values)
|
| 89 |
+
|
| 90 |
+
return outputs
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
class NSAPreTrainedModel(PreTrainedModel):
|
| 94 |
+
|
| 95 |
+
config_class = NSAConfig
|
| 96 |
+
base_model_prefix = 'model'
|
| 97 |
+
supports_gradient_checkpointing = True
|
| 98 |
+
_no_split_modules = ['NSABlock']
|
| 99 |
+
_supports_cache_class = True
|
| 100 |
+
|
| 101 |
+
def __init__(self, *inputs, **kwargs):
|
| 102 |
+
super().__init__(*inputs, **kwargs)
|
| 103 |
+
|
| 104 |
+
def _init_weights(
|
| 105 |
+
self,
|
| 106 |
+
module: nn.Module,
|
| 107 |
+
prenorm_residual_strategy: Optional[str] = 'rescale',
|
| 108 |
+
num_residuals_per_layer: int = 2,
|
| 109 |
+
):
|
| 110 |
+
if isinstance(module, (nn.Linear, nn.Conv1d)):
|
| 111 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 112 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 113 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 114 |
+
if module.bias is not None:
|
| 115 |
+
nn.init.zeros_(module.bias)
|
| 116 |
+
elif isinstance(module, nn.Embedding):
|
| 117 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 118 |
+
elif hasattr(module, 'reset_parameters'):
|
| 119 |
+
module.reset_parameters()
|
| 120 |
+
|
| 121 |
+
if prenorm_residual_strategy is not None:
|
| 122 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
| 123 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
| 124 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
| 125 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
| 126 |
+
#
|
| 127 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
| 128 |
+
p = None
|
| 129 |
+
if hasattr(module, 'o_proj'):
|
| 130 |
+
p = module.o_proj.weight
|
| 131 |
+
elif hasattr(module, 'down_proj'):
|
| 132 |
+
p = module.down_proj.weight
|
| 133 |
+
if p is not None:
|
| 134 |
+
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
|
| 135 |
+
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
|
| 136 |
+
# We need to reinit p since this code could be called multiple times
|
| 137 |
+
# Having just p *= scale would repeatedly scale it down
|
| 138 |
+
if prenorm_residual_strategy == 'rescale':
|
| 139 |
+
nn.init.kaiming_uniform_(p, a=math.sqrt(5))
|
| 140 |
+
with torch.no_grad():
|
| 141 |
+
p /= math.sqrt(num_residuals_per_layer * self.config.num_hidden_layers)
|
| 142 |
+
elif prenorm_residual_strategy == 'zero':
|
| 143 |
+
nn.init.zeros_(p)
|
| 144 |
+
else:
|
| 145 |
+
raise ValueError(f"Invalid prenorm_residual_strategy: {prenorm_residual_strategy}")
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
class NSAModel(NSAPreTrainedModel):
|
| 149 |
+
|
| 150 |
+
def __init__(self, config: NSAConfig):
|
| 151 |
+
super().__init__(config)
|
| 152 |
+
self.padding_idx = config.pad_token_id
|
| 153 |
+
self.vocab_size = config.vocab_size
|
| 154 |
+
|
| 155 |
+
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 156 |
+
self.layers = nn.ModuleList([NSABlock(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
|
| 157 |
+
self.norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 158 |
+
|
| 159 |
+
self.gradient_checkpointing = False
|
| 160 |
+
|
| 161 |
+
self.post_init()
|
| 162 |
+
|
| 163 |
+
def get_input_embeddings(self):
|
| 164 |
+
return self.embeddings
|
| 165 |
+
|
| 166 |
+
def set_input_embeddings(self, value):
|
| 167 |
+
self.embeddings = value
|
| 168 |
+
|
| 169 |
+
def forward(
|
| 170 |
+
self,
|
| 171 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 172 |
+
attention_mask: Optional[torch.Tensor] = None, # noqa
|
| 173 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 174 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 175 |
+
use_cache: Optional[bool] = None,
|
| 176 |
+
output_attentions: Optional[bool] = None,
|
| 177 |
+
output_hidden_states: Optional[bool] = None,
|
| 178 |
+
return_dict: Optional[bool] = None,
|
| 179 |
+
**kwargs: Unpack[Dict]
|
| 180 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
| 181 |
+
if output_attentions:
|
| 182 |
+
warnings.warn("`NSAModel` does not `output_attentions` now, setting it to `False`.")
|
| 183 |
+
output_attentions = False
|
| 184 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 185 |
+
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 186 |
+
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
|
| 187 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 188 |
+
|
| 189 |
+
# retrieve input_ids and inputs_embeds
|
| 190 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 191 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 192 |
+
if input_ids is None and inputs_embeds is None:
|
| 193 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 194 |
+
|
| 195 |
+
if inputs_embeds is None:
|
| 196 |
+
inputs_embeds = self.embeddings(input_ids)
|
| 197 |
+
hidden_states = inputs_embeds
|
| 198 |
+
|
| 199 |
+
if use_cache and not isinstance(past_key_values, Cache):
|
| 200 |
+
past_key_values = Cache.from_legacy_cache(past_key_values)
|
| 201 |
+
|
| 202 |
+
if self.gradient_checkpointing and self.training and use_cache:
|
| 203 |
+
logger.warning_once("`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...")
|
| 204 |
+
use_cache = False
|
| 205 |
+
|
| 206 |
+
all_hidden_states = () if output_hidden_states else None
|
| 207 |
+
all_attns = () if output_attentions else None
|
| 208 |
+
for layer in self.layers:
|
| 209 |
+
if output_hidden_states:
|
| 210 |
+
all_hidden_states += (hidden_states,)
|
| 211 |
+
|
| 212 |
+
if self.gradient_checkpointing and self.training:
|
| 213 |
+
hidden_states, attentions, past_key_values = self._gradient_checkpointing_func(
|
| 214 |
+
layer.__call__,
|
| 215 |
+
hidden_states,
|
| 216 |
+
attention_mask,
|
| 217 |
+
past_key_values,
|
| 218 |
+
use_cache,
|
| 219 |
+
output_attentions,
|
| 220 |
+
**kwargs
|
| 221 |
+
)
|
| 222 |
+
else:
|
| 223 |
+
hidden_states, attentions, past_key_values = layer(
|
| 224 |
+
hidden_states,
|
| 225 |
+
attention_mask=attention_mask,
|
| 226 |
+
past_key_values=past_key_values,
|
| 227 |
+
use_cache=use_cache,
|
| 228 |
+
output_attentions=output_attentions,
|
| 229 |
+
**kwargs
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
if output_attentions:
|
| 233 |
+
all_attns += (attentions,)
|
| 234 |
+
|
| 235 |
+
hidden_states = self.norm(hidden_states)
|
| 236 |
+
|
| 237 |
+
# add hidden states from the last decoder layer
|
| 238 |
+
if output_hidden_states:
|
| 239 |
+
all_hidden_states += (hidden_states,)
|
| 240 |
+
|
| 241 |
+
if not return_dict:
|
| 242 |
+
return tuple(i for i in [hidden_states, past_key_values, all_hidden_states, all_attns] if i is not None)
|
| 243 |
+
return BaseModelOutputWithPast(
|
| 244 |
+
last_hidden_state=hidden_states,
|
| 245 |
+
past_key_values=past_key_values,
|
| 246 |
+
hidden_states=all_hidden_states,
|
| 247 |
+
attentions=all_attns
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
class NSAForCausalLM(NSAPreTrainedModel, GenerationMixin):
|
| 252 |
+
|
| 253 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 254 |
+
|
| 255 |
+
def __init__(self, config):
|
| 256 |
+
super().__init__(config)
|
| 257 |
+
self.model = NSAModel(config)
|
| 258 |
+
self.vocab_size = config.vocab_size
|
| 259 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 260 |
+
self.criterion = None
|
| 261 |
+
|
| 262 |
+
# Initialize weights and apply final processing
|
| 263 |
+
self.post_init()
|
| 264 |
+
|
| 265 |
+
def get_input_embeddings(self):
|
| 266 |
+
return self.model.embeddings
|
| 267 |
+
|
| 268 |
+
def set_input_embeddings(self, value):
|
| 269 |
+
self.model.embeddings = value
|
| 270 |
+
|
| 271 |
+
def get_output_embeddings(self):
|
| 272 |
+
return self.lm_head
|
| 273 |
+
|
| 274 |
+
def set_output_embeddings(self, new_embeddings):
|
| 275 |
+
self.lm_head = new_embeddings
|
| 276 |
+
|
| 277 |
+
def set_decoder(self, decoder):
|
| 278 |
+
self.model = decoder
|
| 279 |
+
|
| 280 |
+
def get_decoder(self):
|
| 281 |
+
return self.model
|
| 282 |
+
|
| 283 |
+
def generate(self, *args, **kwargs):
|
| 284 |
+
try:
|
| 285 |
+
return super().generate(*args, **kwargs)
|
| 286 |
+
except AttributeError as exception:
|
| 287 |
+
if 'past_key_values' in str(exception):
|
| 288 |
+
raise AttributeError(
|
| 289 |
+
f"You tried to call `generate` with a decoding strategy that manipulates `past_key_values`, "
|
| 290 |
+
f"which is not supported for {self.__class__.__name__}. "
|
| 291 |
+
f"Try another generation strategy instead. "
|
| 292 |
+
f"For the available generation strategies, check this doc: "
|
| 293 |
+
f"https://huggingface.co/docs/transformers/en/generation_strategies#decoding-strategies"
|
| 294 |
+
)
|
| 295 |
+
else:
|
| 296 |
+
raise exception
|
| 297 |
+
|
| 298 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 299 |
+
def prepare_inputs_for_generation(
|
| 300 |
+
self,
|
| 301 |
+
input_ids: torch.LongTensor = None,
|
| 302 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 303 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 304 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 305 |
+
use_cache: bool = True,
|
| 306 |
+
logits_to_keep: Optional[int] = None,
|
| 307 |
+
**kwargs
|
| 308 |
+
):
|
| 309 |
+
# only last token for `inputs_ids` if the `past_key_values` is not empty.
|
| 310 |
+
if past_key_values is not None and len(past_key_values) > 0:
|
| 311 |
+
input_ids = input_ids[:, -1:]
|
| 312 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 313 |
+
if inputs_embeds is not None and len(past_key_values) == 0:
|
| 314 |
+
model_inputs = {'inputs_embeds': inputs_embeds}
|
| 315 |
+
else:
|
| 316 |
+
# The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
|
| 317 |
+
# recompiles graphs as the stride of the inputs is a guard.
|
| 318 |
+
# Ref: https://github.com/huggingface/transformers/pull/29114
|
| 319 |
+
# TODO: use `next_tokens` directly instead.
|
| 320 |
+
model_inputs = {'input_ids': input_ids.contiguous()}
|
| 321 |
+
|
| 322 |
+
if logits_to_keep is not None:
|
| 323 |
+
model_inputs['logits_to_keep'] = logits_to_keep
|
| 324 |
+
|
| 325 |
+
model_inputs.update({
|
| 326 |
+
'past_key_values': past_key_values,
|
| 327 |
+
'use_cache': use_cache,
|
| 328 |
+
'attention_mask': attention_mask,
|
| 329 |
+
})
|
| 330 |
+
return model_inputs
|
| 331 |
+
|
| 332 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 333 |
+
def forward(
|
| 334 |
+
self,
|
| 335 |
+
input_ids: torch.LongTensor = None,
|
| 336 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 337 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 338 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 339 |
+
labels: Optional[torch.LongTensor] = None,
|
| 340 |
+
use_cache: Optional[bool] = None,
|
| 341 |
+
output_attentions: Optional[bool] = None,
|
| 342 |
+
output_hidden_states: Optional[bool] = None,
|
| 343 |
+
return_dict: Optional[bool] = None,
|
| 344 |
+
logits_to_keep: Optional[int] = 0,
|
| 345 |
+
**kwargs: Unpack[Dict]
|
| 346 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 347 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 348 |
+
output_hidden_states = (
|
| 349 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 350 |
+
)
|
| 351 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 352 |
+
|
| 353 |
+
outputs = self.model(
|
| 354 |
+
input_ids=input_ids,
|
| 355 |
+
attention_mask=attention_mask,
|
| 356 |
+
inputs_embeds=inputs_embeds,
|
| 357 |
+
past_key_values=past_key_values,
|
| 358 |
+
use_cache=use_cache,
|
| 359 |
+
output_attentions=output_attentions,
|
| 360 |
+
output_hidden_states=output_hidden_states,
|
| 361 |
+
return_dict=return_dict,
|
| 362 |
+
**kwargs
|
| 363 |
+
)
|
| 364 |
+
|
| 365 |
+
hidden_states = outputs[0]
|
| 366 |
+
fuse_linear_and_cross_entropy = self.config.fuse_cross_entropy and self.training
|
| 367 |
+
|
| 368 |
+
loss, logits = None, None
|
| 369 |
+
if not fuse_linear_and_cross_entropy or labels is None:
|
| 370 |
+
logits = self.lm_head(hidden_states if logits_to_keep is None else hidden_states[:, -logits_to_keep:])
|
| 371 |
+
if labels is not None:
|
| 372 |
+
if getattr(self, 'criterion', None) is None:
|
| 373 |
+
if fuse_linear_and_cross_entropy:
|
| 374 |
+
criterion = FusedLinearCrossEntropyLoss()
|
| 375 |
+
elif self.config.fuse_cross_entropy:
|
| 376 |
+
criterion = FusedCrossEntropyLoss(inplace_backward=True)
|
| 377 |
+
else:
|
| 378 |
+
criterion = nn.CrossEntropyLoss()
|
| 379 |
+
else:
|
| 380 |
+
criterion = self.criterion
|
| 381 |
+
labels = labels.to(hidden_states.device)
|
| 382 |
+
labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], criterion.ignore_index)), 1)
|
| 383 |
+
if fuse_linear_and_cross_entropy:
|
| 384 |
+
loss = criterion(hidden_states, labels, self.lm_head.weight, self.lm_head.bias)
|
| 385 |
+
else:
|
| 386 |
+
loss = criterion(logits.view(labels.numel(), -1), labels.view(-1))
|
| 387 |
+
|
| 388 |
+
if not return_dict:
|
| 389 |
+
output = (logits,) + outputs[1:]
|
| 390 |
+
return (loss,) + output if loss is not None else output
|
| 391 |
+
|
| 392 |
+
return CausalLMOutputWithPast(
|
| 393 |
+
loss=loss,
|
| 394 |
+
logits=logits,
|
| 395 |
+
past_key_values=outputs.past_key_values,
|
| 396 |
+
hidden_states=outputs.hidden_states,
|
| 397 |
+
attentions=outputs.attentions,
|
| 398 |
+
)
|
fla/models/retnet/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
|
| 4 |
+
|
| 5 |
+
from fla.models.retnet.configuration_retnet import RetNetConfig
|
| 6 |
+
from fla.models.retnet.modeling_retnet import RetNetForCausalLM, RetNetModel
|
| 7 |
+
|
| 8 |
+
AutoConfig.register(RetNetConfig.model_type, RetNetConfig)
|
| 9 |
+
AutoModel.register(RetNetConfig, RetNetModel)
|
| 10 |
+
AutoModelForCausalLM.register(RetNetConfig, RetNetForCausalLM)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
__all__ = ['RetNetConfig', 'RetNetForCausalLM', 'RetNetModel']
|
fla/models/retnet/configuration_retnet.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from typing import Dict, Optional
|
| 6 |
+
|
| 7 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class RetNetConfig(PretrainedConfig):
|
| 11 |
+
|
| 12 |
+
model_type = 'retnet'
|
| 13 |
+
keys_to_ignore_at_inference = ['past_key_values']
|
| 14 |
+
|
| 15 |
+
def __init__(
|
| 16 |
+
self,
|
| 17 |
+
attn_mode: str = "chunk",
|
| 18 |
+
hidden_size: int = 2048,
|
| 19 |
+
expand_k: int = 1,
|
| 20 |
+
expand_v: int = 2,
|
| 21 |
+
hidden_ratio: Optional[int] = 2,
|
| 22 |
+
intermediate_size: Optional[int] = None,
|
| 23 |
+
num_hidden_layers: int = 24,
|
| 24 |
+
num_heads: int = 8,
|
| 25 |
+
num_kv_heads: Optional[int] = None,
|
| 26 |
+
feature_map: Optional[str] = None,
|
| 27 |
+
hidden_act: str = "swish",
|
| 28 |
+
use_short_conv: bool = False,
|
| 29 |
+
conv_size: int = 4,
|
| 30 |
+
use_output_gate: bool = True,
|
| 31 |
+
max_position_embeddings: int = 2048,
|
| 32 |
+
elementwise_affine: Optional[bool] = True,
|
| 33 |
+
norm_eps: float = 1e-6,
|
| 34 |
+
attn: Optional[Dict] = None,
|
| 35 |
+
use_cache: bool = True,
|
| 36 |
+
pad_token_id: int = None,
|
| 37 |
+
bos_token_id: int = 1,
|
| 38 |
+
eos_token_id: int = 2,
|
| 39 |
+
tie_word_embeddings: bool = False,
|
| 40 |
+
initializer_range: float = 0.006,
|
| 41 |
+
fuse_norm: bool = True,
|
| 42 |
+
fuse_swiglu: bool = True,
|
| 43 |
+
fuse_cross_entropy: bool = True,
|
| 44 |
+
vocab_size: int = 32000,
|
| 45 |
+
**kwargs
|
| 46 |
+
) -> RetNetConfig:
|
| 47 |
+
self.attn_mode = attn_mode
|
| 48 |
+
self.hidden_size = hidden_size
|
| 49 |
+
self.expand_k = expand_k
|
| 50 |
+
self.expand_v = expand_v
|
| 51 |
+
self.hidden_ratio = hidden_ratio
|
| 52 |
+
self.intermediate_size = intermediate_size
|
| 53 |
+
self.num_hidden_layers = num_hidden_layers
|
| 54 |
+
self.num_heads = num_heads
|
| 55 |
+
self.num_kv_heads = num_kv_heads
|
| 56 |
+
self.feature_map = feature_map
|
| 57 |
+
self.hidden_act = hidden_act
|
| 58 |
+
self.use_short_conv = use_short_conv
|
| 59 |
+
self.conv_size = conv_size
|
| 60 |
+
self.use_output_gate = use_output_gate
|
| 61 |
+
self.hidden_act = hidden_act
|
| 62 |
+
self.max_position_embeddings = max_position_embeddings
|
| 63 |
+
self.elementwise_affine = elementwise_affine
|
| 64 |
+
self.norm_eps = norm_eps
|
| 65 |
+
self.attn = attn
|
| 66 |
+
self.use_cache = use_cache
|
| 67 |
+
self.initializer_range = initializer_range
|
| 68 |
+
|
| 69 |
+
self.fuse_norm = fuse_norm
|
| 70 |
+
self.fuse_swiglu = fuse_swiglu
|
| 71 |
+
self.fuse_cross_entropy = fuse_cross_entropy
|
| 72 |
+
self.vocab_size = vocab_size
|
| 73 |
+
|
| 74 |
+
if attn is not None:
|
| 75 |
+
if not isinstance(attn, Dict):
|
| 76 |
+
raise ValueError("attn must be a dictionary")
|
| 77 |
+
if 'layers' not in attn:
|
| 78 |
+
raise ValueError("Layer indices must be provided to initialize hybrid attention layers")
|
| 79 |
+
if 'num_heads' not in attn:
|
| 80 |
+
raise ValueError("Number of heads must be provided to initialize hybrid attention layers")
|
| 81 |
+
attn['num_kv_heads'] = attn.get('num_kv_heads', attn['num_heads'])
|
| 82 |
+
attn['qkv_bias'] = attn.get('qkv_bias', False)
|
| 83 |
+
attn['window_size'] = attn.get('window_size', None)
|
| 84 |
+
attn['rope_theta'] = attn.get('rope_theta', 10000.)
|
| 85 |
+
|
| 86 |
+
super().__init__(
|
| 87 |
+
pad_token_id=pad_token_id,
|
| 88 |
+
bos_token_id=bos_token_id,
|
| 89 |
+
eos_token_id=eos_token_id,
|
| 90 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 91 |
+
**kwargs,
|
| 92 |
+
)
|
fla/models/rwkv6/modeling_rwkv6.py
ADDED
|
@@ -0,0 +1,480 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
import warnings
|
| 7 |
+
from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import torch.utils.checkpoint
|
| 12 |
+
from transformers.generation import GenerationMixin
|
| 13 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
|
| 14 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 15 |
+
from transformers.utils import logging
|
| 16 |
+
from transformers.utils.deprecation import deprecate_kwarg
|
| 17 |
+
|
| 18 |
+
from fla.layers.attn import Attention
|
| 19 |
+
from fla.layers.rwkv6 import LerpLinear, RWKV6Attention
|
| 20 |
+
from fla.models.rwkv6.configuration_rwkv6 import RWKV6Config
|
| 21 |
+
from fla.models.utils import Cache
|
| 22 |
+
from fla.modules import FusedCrossEntropyLoss, FusedLinearCrossEntropyLoss, LayerNorm
|
| 23 |
+
from fla.modules.activations import ACT2FN
|
| 24 |
+
|
| 25 |
+
if TYPE_CHECKING:
|
| 26 |
+
from transformers.processing_utils import Unpack
|
| 27 |
+
|
| 28 |
+
logger = logging.get_logger(__name__)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class RWKV6FeedForward(nn.Module):
|
| 32 |
+
|
| 33 |
+
def __init__(
|
| 34 |
+
self,
|
| 35 |
+
hidden_size: int,
|
| 36 |
+
hidden_ratio: Optional[int] = None,
|
| 37 |
+
intermediate_size: Optional[int] = None,
|
| 38 |
+
hidden_act: str = 'sqrelu',
|
| 39 |
+
layer_idx: int = None
|
| 40 |
+
) -> RWKV6FeedForward:
|
| 41 |
+
super().__init__()
|
| 42 |
+
|
| 43 |
+
self.hidden_size = hidden_size
|
| 44 |
+
if hidden_ratio is None:
|
| 45 |
+
hidden_ratio = 3.5
|
| 46 |
+
if intermediate_size is None:
|
| 47 |
+
intermediate_size = int(hidden_size * hidden_ratio)
|
| 48 |
+
intermediate_size = 32 * ((intermediate_size + 32 - 1) // 32)
|
| 49 |
+
self.hidden_ratio = hidden_ratio
|
| 50 |
+
self.intermediate_size = intermediate_size
|
| 51 |
+
|
| 52 |
+
self.time_shift = nn.ZeroPad2d((0, 0, 1, -1))
|
| 53 |
+
|
| 54 |
+
self.key = LerpLinear(hidden_size, intermediate_size)
|
| 55 |
+
self.value = nn.Linear(intermediate_size, hidden_size, bias=False)
|
| 56 |
+
self.receptance = LerpLinear(hidden_size, hidden_size)
|
| 57 |
+
self.act_fn = ACT2FN[hidden_act]
|
| 58 |
+
|
| 59 |
+
self.layer_idx = layer_idx
|
| 60 |
+
|
| 61 |
+
def forward(
|
| 62 |
+
self,
|
| 63 |
+
x: torch.Tensor,
|
| 64 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 65 |
+
state: Optional[Cache] = None
|
| 66 |
+
) -> torch.Tensor:
|
| 67 |
+
if attention_mask is not None:
|
| 68 |
+
x = x.mul_(attention_mask[:, -x.shape[-2]:, None])
|
| 69 |
+
if x.shape[1] == 1 and state is not None and state[self.layer_idx]['ffn_state'] is not None:
|
| 70 |
+
shifted = state[self.layer_idx]['ffn_state'].unsqueeze(1)
|
| 71 |
+
else:
|
| 72 |
+
shifted = self.time_shift(x)
|
| 73 |
+
if state is not None and state[self.layer_idx]['ffn_state'] is not None:
|
| 74 |
+
shifted[:, 0] = state[self.layer_idx]['ffn_state']
|
| 75 |
+
delta = shifted - x
|
| 76 |
+
key = self.act_fn(self.key(x, delta))
|
| 77 |
+
value = self.value(key)
|
| 78 |
+
receptance = self.receptance(x, delta)
|
| 79 |
+
|
| 80 |
+
if state is not None:
|
| 81 |
+
# no need to update the offset twice
|
| 82 |
+
state.update(ffn_state=x[:, -1], layer_idx=self.layer_idx, offset=0)
|
| 83 |
+
return receptance.sigmoid() * value, state
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
class RWKV6Block(nn.Module):
|
| 87 |
+
def __init__(self, config: RWKV6Config, layer_idx: int):
|
| 88 |
+
super().__init__()
|
| 89 |
+
|
| 90 |
+
self.config = config
|
| 91 |
+
self.layer_idx = layer_idx
|
| 92 |
+
|
| 93 |
+
if config.norm_first and layer_idx == 0:
|
| 94 |
+
self.pre_norm = (LayerNorm if config.fuse_norm else nn.LayerNorm)(
|
| 95 |
+
config.hidden_size,
|
| 96 |
+
bias=config.norm_bias,
|
| 97 |
+
eps=config.norm_eps
|
| 98 |
+
)
|
| 99 |
+
self.attn_norm = (LayerNorm if config.fuse_norm else nn.LayerNorm)(
|
| 100 |
+
config.hidden_size,
|
| 101 |
+
bias=config.norm_bias,
|
| 102 |
+
eps=config.norm_eps
|
| 103 |
+
)
|
| 104 |
+
if config.attn is not None and layer_idx in config.attn['layers']:
|
| 105 |
+
self.attn = Attention(
|
| 106 |
+
hidden_size=config.hidden_size,
|
| 107 |
+
num_heads=config.attn['num_heads'],
|
| 108 |
+
num_kv_heads=config.attn['num_kv_heads'],
|
| 109 |
+
qkv_bias=config.attn['qkv_bias'],
|
| 110 |
+
window_size=config.attn['window_size'],
|
| 111 |
+
rope_theta=config.attn['rope_theta'],
|
| 112 |
+
max_position_embeddings=config.max_position_embeddings,
|
| 113 |
+
layer_idx=layer_idx
|
| 114 |
+
)
|
| 115 |
+
else:
|
| 116 |
+
self.attn = RWKV6Attention(
|
| 117 |
+
mode=config.attn_mode,
|
| 118 |
+
hidden_size=config.hidden_size,
|
| 119 |
+
expand_k=config.expand_k,
|
| 120 |
+
expand_v=config.expand_v,
|
| 121 |
+
num_heads=config.num_heads,
|
| 122 |
+
proj_low_rank_dim=config.proj_low_rank_dim,
|
| 123 |
+
gate_low_rank_dim=config.gate_low_rank_dim,
|
| 124 |
+
norm_eps=config.norm_eps,
|
| 125 |
+
fuse_norm=config.fuse_norm,
|
| 126 |
+
layer_idx=layer_idx
|
| 127 |
+
)
|
| 128 |
+
self.ffn_norm = (LayerNorm if config.fuse_norm else nn.LayerNorm)(
|
| 129 |
+
config.hidden_size,
|
| 130 |
+
bias=config.norm_bias,
|
| 131 |
+
eps=config.norm_eps
|
| 132 |
+
)
|
| 133 |
+
self.ffn = RWKV6FeedForward(
|
| 134 |
+
hidden_size=config.hidden_size,
|
| 135 |
+
hidden_ratio=config.hidden_ratio,
|
| 136 |
+
intermediate_size=config.intermediate_size,
|
| 137 |
+
hidden_act=config.hidden_act,
|
| 138 |
+
layer_idx=layer_idx
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
def forward(
|
| 142 |
+
self,
|
| 143 |
+
hidden_states: torch.Tensor,
|
| 144 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 145 |
+
past_key_values: Optional[Cache] = None,
|
| 146 |
+
use_cache: Optional[bool] = False,
|
| 147 |
+
output_attentions: Optional[bool] = False,
|
| 148 |
+
**kwargs,
|
| 149 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 150 |
+
residual = self.pre_norm(hidden_states) if hasattr(self, 'pre_norm') else hidden_states
|
| 151 |
+
hidden_states = self.attn_norm(residual)
|
| 152 |
+
hidden_states, attentions, past_key_values = self.attn(
|
| 153 |
+
hidden_states=hidden_states,
|
| 154 |
+
attention_mask=attention_mask,
|
| 155 |
+
past_key_values=past_key_values,
|
| 156 |
+
use_cache=use_cache,
|
| 157 |
+
output_attentions=output_attentions,
|
| 158 |
+
**kwargs
|
| 159 |
+
)
|
| 160 |
+
if self.config.fuse_norm:
|
| 161 |
+
hidden_states, residual = self.ffn_norm(hidden_states, residual, True)
|
| 162 |
+
else:
|
| 163 |
+
hidden_states = residual + hidden_states
|
| 164 |
+
residual = hidden_states
|
| 165 |
+
hidden_states = self.ffn_norm(hidden_states)
|
| 166 |
+
hidden_states, past_key_values = self.ffn(hidden_states, attention_mask, past_key_values)
|
| 167 |
+
hidden_states = residual + hidden_states
|
| 168 |
+
|
| 169 |
+
outputs = (hidden_states, attentions, past_key_values)
|
| 170 |
+
|
| 171 |
+
return outputs
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
class RWKV6PreTrainedModel(PreTrainedModel):
|
| 175 |
+
|
| 176 |
+
config_class = RWKV6Config
|
| 177 |
+
base_model_prefix = 'model'
|
| 178 |
+
supports_gradient_checkpointing = True
|
| 179 |
+
_no_split_modules = ['RWKV6Block']
|
| 180 |
+
_supports_cache_class = True
|
| 181 |
+
|
| 182 |
+
def __init__(self, *inputs, **kwargs):
|
| 183 |
+
super().__init__(*inputs, **kwargs)
|
| 184 |
+
|
| 185 |
+
def _init_weights(
|
| 186 |
+
self,
|
| 187 |
+
module: nn.Module,
|
| 188 |
+
rescale_prenorm_residual: bool = True,
|
| 189 |
+
num_residuals_per_layer: int = 2,
|
| 190 |
+
):
|
| 191 |
+
if isinstance(module, (nn.Linear, nn.Conv1d)):
|
| 192 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 193 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 194 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 195 |
+
if module.bias is not None:
|
| 196 |
+
nn.init.zeros_(module.bias)
|
| 197 |
+
elif isinstance(module, nn.Parameter):
|
| 198 |
+
nn.init.normal_(module, mean=0.0, std=self.config.initializer_range)
|
| 199 |
+
elif isinstance(module, nn.Embedding):
|
| 200 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 201 |
+
elif hasattr(module, 'reset_parameters'):
|
| 202 |
+
module.reset_parameters()
|
| 203 |
+
|
| 204 |
+
if rescale_prenorm_residual:
|
| 205 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
| 206 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
| 207 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
| 208 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
| 209 |
+
#
|
| 210 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
| 211 |
+
p = None
|
| 212 |
+
if hasattr(module, 'o_proj'):
|
| 213 |
+
p = module.o_proj.weight
|
| 214 |
+
elif hasattr(module, 'down_proj'):
|
| 215 |
+
p = module.down_proj.weight
|
| 216 |
+
if p is not None:
|
| 217 |
+
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
|
| 218 |
+
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
|
| 219 |
+
# We need to reinit p since this code could be called multiple times
|
| 220 |
+
# Having just p *= scale would repeatedly scale it down
|
| 221 |
+
nn.init.kaiming_uniform_(p, a=math.sqrt(5))
|
| 222 |
+
with torch.no_grad():
|
| 223 |
+
p /= math.sqrt(num_residuals_per_layer * self.config.num_hidden_layers)
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
class RWKV6Model(RWKV6PreTrainedModel):
|
| 227 |
+
|
| 228 |
+
def __init__(self, config: RWKV6Config):
|
| 229 |
+
super().__init__(config)
|
| 230 |
+
self.padding_idx = config.pad_token_id
|
| 231 |
+
self.vocab_size = config.vocab_size
|
| 232 |
+
|
| 233 |
+
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 234 |
+
self.layers = nn.ModuleList([RWKV6Block(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
|
| 235 |
+
self.norm = (LayerNorm if config.fuse_norm else nn.LayerNorm)(
|
| 236 |
+
config.hidden_size,
|
| 237 |
+
bias=config.norm_bias,
|
| 238 |
+
eps=config.norm_eps
|
| 239 |
+
)
|
| 240 |
+
|
| 241 |
+
self.gradient_checkpointing = False
|
| 242 |
+
|
| 243 |
+
self.post_init()
|
| 244 |
+
|
| 245 |
+
def get_input_embeddings(self):
|
| 246 |
+
return self.embeddings
|
| 247 |
+
|
| 248 |
+
def set_input_embeddings(self, value):
|
| 249 |
+
self.embeddings = value
|
| 250 |
+
|
| 251 |
+
def forward(
|
| 252 |
+
self,
|
| 253 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 254 |
+
attention_mask: Optional[torch.Tensor] = None, # noqa
|
| 255 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 256 |
+
past_key_values: Optional[Cache] = None,
|
| 257 |
+
use_cache: Optional[bool] = None,
|
| 258 |
+
output_attentions: Optional[bool] = None,
|
| 259 |
+
output_hidden_states: Optional[bool] = None,
|
| 260 |
+
return_dict: Optional[bool] = None,
|
| 261 |
+
**kwargs: Unpack[Dict]
|
| 262 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
| 263 |
+
if output_attentions:
|
| 264 |
+
warnings.warn("`RWKV6Model` does not `output_attentions` now, setting it to `False`.")
|
| 265 |
+
output_attentions = False
|
| 266 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 267 |
+
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 268 |
+
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
|
| 269 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 270 |
+
|
| 271 |
+
# retrieve input_ids and inputs_embeds
|
| 272 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 273 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 274 |
+
if input_ids is None and inputs_embeds is None:
|
| 275 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 276 |
+
|
| 277 |
+
if inputs_embeds is None:
|
| 278 |
+
inputs_embeds = self.embeddings(input_ids)
|
| 279 |
+
hidden_states = inputs_embeds
|
| 280 |
+
|
| 281 |
+
if use_cache and not isinstance(past_key_values, Cache):
|
| 282 |
+
past_key_values = Cache.from_legacy_cache(past_key_values)
|
| 283 |
+
|
| 284 |
+
if self.gradient_checkpointing and self.training and use_cache:
|
| 285 |
+
logger.warning_once("`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...")
|
| 286 |
+
use_cache = False
|
| 287 |
+
|
| 288 |
+
all_hidden_states = () if output_hidden_states else None
|
| 289 |
+
all_attns = () if output_attentions else None
|
| 290 |
+
for layer in self.layers:
|
| 291 |
+
if output_hidden_states:
|
| 292 |
+
all_hidden_states += (hidden_states,)
|
| 293 |
+
|
| 294 |
+
if self.gradient_checkpointing and self.training:
|
| 295 |
+
hidden_states, attentions, past_key_values = self._gradient_checkpointing_func(
|
| 296 |
+
layer.__call__,
|
| 297 |
+
hidden_states,
|
| 298 |
+
attention_mask,
|
| 299 |
+
past_key_values,
|
| 300 |
+
use_cache,
|
| 301 |
+
output_attentions,
|
| 302 |
+
**kwargs
|
| 303 |
+
)
|
| 304 |
+
else:
|
| 305 |
+
hidden_states, attentions, past_key_values = layer(
|
| 306 |
+
hidden_states,
|
| 307 |
+
attention_mask=attention_mask,
|
| 308 |
+
past_key_values=past_key_values,
|
| 309 |
+
use_cache=use_cache,
|
| 310 |
+
output_attentions=output_attentions,
|
| 311 |
+
**kwargs
|
| 312 |
+
)
|
| 313 |
+
|
| 314 |
+
if output_attentions:
|
| 315 |
+
all_attns += (attentions,)
|
| 316 |
+
|
| 317 |
+
hidden_states = self.norm(hidden_states)
|
| 318 |
+
|
| 319 |
+
# add hidden states from the last decoder layer
|
| 320 |
+
if output_hidden_states:
|
| 321 |
+
all_hidden_states += (hidden_states,)
|
| 322 |
+
|
| 323 |
+
if not return_dict:
|
| 324 |
+
return tuple(i for i in [hidden_states, past_key_values, all_hidden_states, all_attns] if i is not None)
|
| 325 |
+
return BaseModelOutputWithPast(
|
| 326 |
+
last_hidden_state=hidden_states,
|
| 327 |
+
past_key_values=past_key_values,
|
| 328 |
+
hidden_states=all_hidden_states,
|
| 329 |
+
attentions=all_attns
|
| 330 |
+
)
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
class RWKV6ForCausalLM(RWKV6PreTrainedModel, GenerationMixin):
|
| 334 |
+
|
| 335 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 336 |
+
|
| 337 |
+
def __init__(self, config):
|
| 338 |
+
super().__init__(config)
|
| 339 |
+
self.model = RWKV6Model(config)
|
| 340 |
+
self.vocab_size = config.vocab_size
|
| 341 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 342 |
+
self.criterion = None
|
| 343 |
+
|
| 344 |
+
# Initialize weights and apply final processing
|
| 345 |
+
self.post_init()
|
| 346 |
+
|
| 347 |
+
def get_input_embeddings(self):
|
| 348 |
+
return self.model.embeddings
|
| 349 |
+
|
| 350 |
+
def set_input_embeddings(self, value):
|
| 351 |
+
self.model.embeddings = value
|
| 352 |
+
|
| 353 |
+
def get_output_embeddings(self):
|
| 354 |
+
return self.lm_head
|
| 355 |
+
|
| 356 |
+
def set_output_embeddings(self, new_embeddings):
|
| 357 |
+
self.lm_head = new_embeddings
|
| 358 |
+
|
| 359 |
+
def set_decoder(self, decoder):
|
| 360 |
+
self.model = decoder
|
| 361 |
+
|
| 362 |
+
def get_decoder(self):
|
| 363 |
+
return self.model
|
| 364 |
+
|
| 365 |
+
def generate(self, *args, **kwargs):
|
| 366 |
+
try:
|
| 367 |
+
return super().generate(*args, **kwargs)
|
| 368 |
+
except AttributeError as exception:
|
| 369 |
+
if 'past_key_values' in str(exception):
|
| 370 |
+
raise AttributeError(
|
| 371 |
+
f"You tried to call `generate` with a decoding strategy that manipulates `past_key_values`, "
|
| 372 |
+
f"which is not supported for {self.__class__.__name__}. "
|
| 373 |
+
f"Try another generation strategy instead. "
|
| 374 |
+
f"For the available generation strategies, check this doc: "
|
| 375 |
+
f"https://huggingface.co/docs/transformers/en/generation_strategies#decoding-strategies"
|
| 376 |
+
)
|
| 377 |
+
else:
|
| 378 |
+
raise exception
|
| 379 |
+
|
| 380 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 381 |
+
def prepare_inputs_for_generation(
|
| 382 |
+
self,
|
| 383 |
+
input_ids: torch.LongTensor = None,
|
| 384 |
+
past_key_values: Optional[Cache] = None,
|
| 385 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 386 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 387 |
+
use_cache: bool = True,
|
| 388 |
+
logits_to_keep: Optional[int] = None,
|
| 389 |
+
**kwargs
|
| 390 |
+
):
|
| 391 |
+
# only last token for `inputs_ids` if the `past_key_values` is not empty.
|
| 392 |
+
if past_key_values is not None and len(past_key_values) > 0:
|
| 393 |
+
input_ids = input_ids[:, -1:]
|
| 394 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 395 |
+
if inputs_embeds is not None and len(past_key_values) == 0:
|
| 396 |
+
model_inputs = {'inputs_embeds': inputs_embeds}
|
| 397 |
+
else:
|
| 398 |
+
# The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
|
| 399 |
+
# recompiles graphs as the stride of the inputs is a guard.
|
| 400 |
+
# Ref: https://github.com/huggingface/transformers/pull/29114
|
| 401 |
+
# TODO: use `next_tokens` directly instead.
|
| 402 |
+
model_inputs = {'input_ids': input_ids.contiguous()}
|
| 403 |
+
|
| 404 |
+
if logits_to_keep is not None:
|
| 405 |
+
model_inputs['logits_to_keep'] = logits_to_keep
|
| 406 |
+
|
| 407 |
+
model_inputs.update({
|
| 408 |
+
'past_key_values': past_key_values,
|
| 409 |
+
'use_cache': use_cache,
|
| 410 |
+
'attention_mask': attention_mask,
|
| 411 |
+
})
|
| 412 |
+
return model_inputs
|
| 413 |
+
|
| 414 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 415 |
+
def forward(
|
| 416 |
+
self,
|
| 417 |
+
input_ids: torch.LongTensor = None,
|
| 418 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 419 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 420 |
+
past_key_values: Optional[Cache] = None,
|
| 421 |
+
labels: Optional[torch.LongTensor] = None,
|
| 422 |
+
use_cache: Optional[bool] = None,
|
| 423 |
+
output_attentions: Optional[bool] = None,
|
| 424 |
+
output_hidden_states: Optional[bool] = None,
|
| 425 |
+
return_dict: Optional[bool] = None,
|
| 426 |
+
logits_to_keep: Optional[int] = 0,
|
| 427 |
+
**kwargs: Unpack[Dict]
|
| 428 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 429 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 430 |
+
output_hidden_states = (
|
| 431 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 432 |
+
)
|
| 433 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 434 |
+
|
| 435 |
+
outputs = self.model(
|
| 436 |
+
input_ids=input_ids,
|
| 437 |
+
attention_mask=attention_mask,
|
| 438 |
+
inputs_embeds=inputs_embeds,
|
| 439 |
+
past_key_values=past_key_values,
|
| 440 |
+
use_cache=use_cache,
|
| 441 |
+
output_attentions=output_attentions,
|
| 442 |
+
output_hidden_states=output_hidden_states,
|
| 443 |
+
return_dict=return_dict,
|
| 444 |
+
**kwargs
|
| 445 |
+
)
|
| 446 |
+
|
| 447 |
+
hidden_states = outputs[0]
|
| 448 |
+
fuse_linear_and_cross_entropy = self.config.fuse_cross_entropy and self.training
|
| 449 |
+
|
| 450 |
+
loss, logits = None, None
|
| 451 |
+
if not fuse_linear_and_cross_entropy or labels is None:
|
| 452 |
+
logits = self.lm_head(hidden_states if logits_to_keep is None else hidden_states[:, -logits_to_keep:])
|
| 453 |
+
if labels is not None:
|
| 454 |
+
if getattr(self, 'criterion', None) is None:
|
| 455 |
+
if fuse_linear_and_cross_entropy:
|
| 456 |
+
criterion = FusedLinearCrossEntropyLoss()
|
| 457 |
+
elif self.config.fuse_cross_entropy:
|
| 458 |
+
criterion = FusedCrossEntropyLoss(inplace_backward=True)
|
| 459 |
+
else:
|
| 460 |
+
criterion = nn.CrossEntropyLoss()
|
| 461 |
+
else:
|
| 462 |
+
criterion = self.criterion
|
| 463 |
+
labels = labels.to(hidden_states.device)
|
| 464 |
+
labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], criterion.ignore_index)), 1)
|
| 465 |
+
if fuse_linear_and_cross_entropy:
|
| 466 |
+
loss = criterion(hidden_states, labels, self.lm_head.weight, self.lm_head.bias)
|
| 467 |
+
else:
|
| 468 |
+
loss = criterion(logits.view(labels.numel(), -1), labels.view(-1))
|
| 469 |
+
|
| 470 |
+
if not return_dict:
|
| 471 |
+
output = (logits,) + outputs[1:]
|
| 472 |
+
return (loss,) + output if loss is not None else output
|
| 473 |
+
|
| 474 |
+
return CausalLMOutputWithPast(
|
| 475 |
+
loss=loss,
|
| 476 |
+
logits=logits,
|
| 477 |
+
past_key_values=outputs.past_key_values,
|
| 478 |
+
hidden_states=outputs.hidden_states,
|
| 479 |
+
attentions=outputs.attentions,
|
| 480 |
+
)
|
fla/models/rwkv7/modeling_rwkv7.py
ADDED
|
@@ -0,0 +1,505 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
import warnings
|
| 7 |
+
from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import torch.utils.checkpoint
|
| 12 |
+
from transformers.generation import GenerationMixin
|
| 13 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
|
| 14 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 15 |
+
from transformers.utils import logging
|
| 16 |
+
from transformers.utils.deprecation import deprecate_kwarg
|
| 17 |
+
|
| 18 |
+
from fla.layers.attn import Attention
|
| 19 |
+
from fla.layers.rwkv7 import RWKV7Attention
|
| 20 |
+
from fla.models.rwkv7.configuration_rwkv7 import RWKV7Config
|
| 21 |
+
from fla.models.utils import Cache
|
| 22 |
+
from fla.modules import FusedCrossEntropyLoss, FusedLinearCrossEntropyLoss, LayerNorm
|
| 23 |
+
from fla.modules.activations import ACT2FN
|
| 24 |
+
|
| 25 |
+
if TYPE_CHECKING:
|
| 26 |
+
from transformers.processing_utils import Unpack
|
| 27 |
+
|
| 28 |
+
logger = logging.get_logger(__name__)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class RWKV7FeedForward(nn.Module):
|
| 32 |
+
|
| 33 |
+
def __init__(
|
| 34 |
+
self,
|
| 35 |
+
hidden_size: int,
|
| 36 |
+
hidden_ratio: Optional[int] = None,
|
| 37 |
+
intermediate_size: Optional[int] = None,
|
| 38 |
+
hidden_act: str = 'sqrelu',
|
| 39 |
+
layer_idx: int = None
|
| 40 |
+
) -> RWKV7FeedForward:
|
| 41 |
+
super().__init__()
|
| 42 |
+
|
| 43 |
+
self.hidden_size = hidden_size
|
| 44 |
+
if hidden_ratio is None:
|
| 45 |
+
hidden_ratio = 4
|
| 46 |
+
if intermediate_size is None:
|
| 47 |
+
intermediate_size = int(hidden_size * hidden_ratio)
|
| 48 |
+
intermediate_size = 32 * ((intermediate_size + 32 - 1) // 32)
|
| 49 |
+
self.hidden_ratio = hidden_ratio
|
| 50 |
+
self.intermediate_size = intermediate_size
|
| 51 |
+
|
| 52 |
+
self.time_shift = nn.ZeroPad2d((0, 0, 1, -1))
|
| 53 |
+
|
| 54 |
+
self.x_k = nn.Parameter(torch.zeros(hidden_size))
|
| 55 |
+
|
| 56 |
+
self.key = nn.Linear(hidden_size, intermediate_size, bias=False)
|
| 57 |
+
self.value = nn.Linear(intermediate_size, hidden_size, bias=False)
|
| 58 |
+
self.act_fn = ACT2FN[hidden_act]
|
| 59 |
+
|
| 60 |
+
self.layer_idx = layer_idx
|
| 61 |
+
|
| 62 |
+
def forward(
|
| 63 |
+
self,
|
| 64 |
+
x: torch.Tensor,
|
| 65 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 66 |
+
state: Optional[Cache] = None
|
| 67 |
+
) -> torch.Tensor:
|
| 68 |
+
if attention_mask is not None:
|
| 69 |
+
x = x.mul(attention_mask[:, -x.shape[-2]:, None])
|
| 70 |
+
if x.shape[1] == 1 and state is not None and state[self.layer_idx]['ffn_state'] is not None:
|
| 71 |
+
shifted = state[self.layer_idx]['ffn_state'].unsqueeze(1)
|
| 72 |
+
else:
|
| 73 |
+
shifted = self.time_shift(x)
|
| 74 |
+
if state is not None and state[self.layer_idx]['ffn_state'] is not None:
|
| 75 |
+
shifted[:, 0] = state[self.layer_idx]['ffn_state'][-1]
|
| 76 |
+
if state is not None:
|
| 77 |
+
# no need to update the offset twice
|
| 78 |
+
state.update(ffn_state=x[:, -1], layer_idx=self.layer_idx, offset=0)
|
| 79 |
+
return self.value(self.act_fn(self.key(x.addcmul(shifted - x, self.x_k)))), state
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
class RWKV7Block(nn.Module):
|
| 83 |
+
|
| 84 |
+
def __init__(
|
| 85 |
+
self,
|
| 86 |
+
config: RWKV7Config,
|
| 87 |
+
layer_idx: int
|
| 88 |
+
) -> RWKV7Block:
|
| 89 |
+
super().__init__()
|
| 90 |
+
|
| 91 |
+
self.config = config
|
| 92 |
+
self.layer_idx = layer_idx
|
| 93 |
+
|
| 94 |
+
if config.norm_first and layer_idx == 0:
|
| 95 |
+
self.pre_norm = (LayerNorm if config.fuse_norm else nn.LayerNorm)(
|
| 96 |
+
config.hidden_size,
|
| 97 |
+
bias=config.norm_bias,
|
| 98 |
+
eps=config.norm_eps
|
| 99 |
+
)
|
| 100 |
+
self.attn_norm = (LayerNorm if config.fuse_norm else nn.LayerNorm)(
|
| 101 |
+
config.hidden_size,
|
| 102 |
+
bias=config.norm_bias,
|
| 103 |
+
eps=config.norm_eps
|
| 104 |
+
)
|
| 105 |
+
if config.attn is not None and layer_idx in config.attn['layers']:
|
| 106 |
+
self.attn = Attention(
|
| 107 |
+
hidden_size=config.hidden_size,
|
| 108 |
+
num_heads=config.attn['num_heads'],
|
| 109 |
+
num_kv_heads=config.attn['num_kv_heads'],
|
| 110 |
+
qkv_bias=config.attn['qkv_bias'],
|
| 111 |
+
window_size=config.attn['window_size'],
|
| 112 |
+
rope_theta=config.attn['rope_theta'],
|
| 113 |
+
max_position_embeddings=config.max_position_embeddings,
|
| 114 |
+
layer_idx=layer_idx
|
| 115 |
+
)
|
| 116 |
+
else:
|
| 117 |
+
self.attn = RWKV7Attention(
|
| 118 |
+
mode=config.attn_mode,
|
| 119 |
+
hidden_size=config.hidden_size,
|
| 120 |
+
head_dim=config.head_dim,
|
| 121 |
+
num_heads=config.num_heads,
|
| 122 |
+
decay_low_rank_dim=config.decay_low_rank_dim,
|
| 123 |
+
gate_low_rank_dim=config.gate_low_rank_dim,
|
| 124 |
+
a_low_rank_dim=config.a_low_rank_dim,
|
| 125 |
+
v_low_rank_dim=config.v_low_rank_dim,
|
| 126 |
+
norm_eps=config.norm_eps,
|
| 127 |
+
fuse_norm=config.fuse_norm,
|
| 128 |
+
layer_idx=layer_idx,
|
| 129 |
+
value_dim=config.value_dim[layer_idx]
|
| 130 |
+
)
|
| 131 |
+
self.ffn_norm = (LayerNorm if config.fuse_norm else nn.LayerNorm)(
|
| 132 |
+
config.hidden_size,
|
| 133 |
+
bias=config.norm_bias,
|
| 134 |
+
eps=config.norm_eps
|
| 135 |
+
)
|
| 136 |
+
self.ffn = RWKV7FeedForward(
|
| 137 |
+
hidden_size=config.hidden_size,
|
| 138 |
+
hidden_ratio=config.hidden_ratio,
|
| 139 |
+
intermediate_size=config.intermediate_size,
|
| 140 |
+
hidden_act=config.hidden_act,
|
| 141 |
+
layer_idx=layer_idx
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
def forward(
|
| 145 |
+
self,
|
| 146 |
+
hidden_states: torch.Tensor,
|
| 147 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 148 |
+
past_key_values: Optional[Cache] = None,
|
| 149 |
+
use_cache: Optional[bool] = False,
|
| 150 |
+
output_attentions: Optional[bool] = False,
|
| 151 |
+
v_first: torch.Tensor = None,
|
| 152 |
+
**kwargs,
|
| 153 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 154 |
+
residual = self.pre_norm(hidden_states) if hasattr(self, 'pre_norm') else hidden_states
|
| 155 |
+
hidden_states = self.attn_norm(residual)
|
| 156 |
+
hidden_states, attentions, past_key_values, v_first = self.attn(
|
| 157 |
+
hidden_states=hidden_states,
|
| 158 |
+
attention_mask=attention_mask,
|
| 159 |
+
past_key_values=past_key_values,
|
| 160 |
+
use_cache=use_cache,
|
| 161 |
+
output_attentions=output_attentions,
|
| 162 |
+
v_first=v_first,
|
| 163 |
+
**kwargs
|
| 164 |
+
)
|
| 165 |
+
if self.config.fuse_norm:
|
| 166 |
+
hidden_states, residual = self.ffn_norm(hidden_states, residual, True)
|
| 167 |
+
else:
|
| 168 |
+
hidden_states = residual + hidden_states
|
| 169 |
+
residual = hidden_states
|
| 170 |
+
hidden_states = self.ffn_norm(hidden_states)
|
| 171 |
+
hidden_states, past_key_values = self.ffn(hidden_states, attention_mask, past_key_values)
|
| 172 |
+
hidden_states = residual + hidden_states
|
| 173 |
+
|
| 174 |
+
outputs = (hidden_states, attentions, past_key_values, v_first)
|
| 175 |
+
|
| 176 |
+
return outputs
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
class RWKV7PreTrainedModel(PreTrainedModel):
|
| 180 |
+
|
| 181 |
+
config_class = RWKV7Config
|
| 182 |
+
base_model_prefix = 'model'
|
| 183 |
+
supports_gradient_checkpointing = True
|
| 184 |
+
_no_split_modules = ['RWKV7Block']
|
| 185 |
+
_supports_cache_class = True
|
| 186 |
+
_skip_keys_device_placement = ["past_key_values"]
|
| 187 |
+
|
| 188 |
+
def __init__(self, *inputs, **kwargs):
|
| 189 |
+
super().__init__(*inputs, **kwargs)
|
| 190 |
+
|
| 191 |
+
def _init_weights(
|
| 192 |
+
self,
|
| 193 |
+
module: nn.Module,
|
| 194 |
+
rescale_prenorm_residual: bool = True,
|
| 195 |
+
num_residuals_per_layer: int = 2,
|
| 196 |
+
):
|
| 197 |
+
warnings.warn(
|
| 198 |
+
"RWKV-7 employs a carefully designed initialization strategy tailored to its architecture. "
|
| 199 |
+
"The detailed initialization scheme is currently not implemented here but can be found in the "
|
| 200 |
+
"official code repository. We emphasize that using the recommended initialization is essential "
|
| 201 |
+
"for replicating the results in RWKV-7 paper. Deviations from the prescribed initialization "
|
| 202 |
+
"may lead to performance degradation.\n"
|
| 203 |
+
"Alternatively, please generate initial weights from the official RWKV code repository, and "
|
| 204 |
+
"convert the PyTorch checkpoint into FLA supported format."
|
| 205 |
+
)
|
| 206 |
+
if isinstance(module, (nn.Linear, nn.Conv1d)):
|
| 207 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 208 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 209 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 210 |
+
if module.bias is not None:
|
| 211 |
+
nn.init.zeros_(module.bias)
|
| 212 |
+
elif isinstance(module, nn.Parameter):
|
| 213 |
+
nn.init.normal_(module, mean=0.0, std=self.config.initializer_range)
|
| 214 |
+
elif isinstance(module, nn.Embedding):
|
| 215 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 216 |
+
elif hasattr(module, 'reset_parameters'):
|
| 217 |
+
module.reset_parameters()
|
| 218 |
+
|
| 219 |
+
if rescale_prenorm_residual:
|
| 220 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
| 221 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
| 222 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
| 223 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
| 224 |
+
#
|
| 225 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
| 226 |
+
p = None
|
| 227 |
+
if hasattr(module, 'o_proj'):
|
| 228 |
+
p = module.o_proj.weight
|
| 229 |
+
elif hasattr(module, 'down_proj'):
|
| 230 |
+
p = module.down_proj.weight
|
| 231 |
+
if p is not None:
|
| 232 |
+
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
|
| 233 |
+
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
|
| 234 |
+
# We need to reinit p since this code could be called multiple times
|
| 235 |
+
# Having just p *= scale would repeatedly scale it down
|
| 236 |
+
nn.init.kaiming_uniform_(p, a=math.sqrt(5))
|
| 237 |
+
with torch.no_grad():
|
| 238 |
+
p /= math.sqrt(num_residuals_per_layer * self.config.num_hidden_layers)
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
class RWKV7Model(RWKV7PreTrainedModel):
|
| 242 |
+
|
| 243 |
+
def __init__(self, config: RWKV7Config):
|
| 244 |
+
super().__init__(config)
|
| 245 |
+
self.padding_idx = config.pad_token_id
|
| 246 |
+
self.vocab_size = config.vocab_size
|
| 247 |
+
|
| 248 |
+
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 249 |
+
self.layers = nn.ModuleList([RWKV7Block(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
|
| 250 |
+
self.norm = (LayerNorm if config.fuse_norm else nn.LayerNorm)(
|
| 251 |
+
config.hidden_size,
|
| 252 |
+
bias=config.norm_bias,
|
| 253 |
+
eps=config.norm_eps
|
| 254 |
+
)
|
| 255 |
+
|
| 256 |
+
self.gradient_checkpointing = False
|
| 257 |
+
|
| 258 |
+
self.post_init()
|
| 259 |
+
|
| 260 |
+
def get_input_embeddings(self):
|
| 261 |
+
return self.embeddings
|
| 262 |
+
|
| 263 |
+
def set_input_embeddings(self, value):
|
| 264 |
+
self.embeddings = value
|
| 265 |
+
|
| 266 |
+
def forward(
|
| 267 |
+
self,
|
| 268 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 269 |
+
attention_mask: Optional[torch.Tensor] = None, # noqa
|
| 270 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 271 |
+
past_key_values: Optional[Cache] = None,
|
| 272 |
+
use_cache: Optional[bool] = None,
|
| 273 |
+
output_attentions: Optional[bool] = None,
|
| 274 |
+
output_hidden_states: Optional[bool] = None,
|
| 275 |
+
return_dict: Optional[bool] = None,
|
| 276 |
+
**kwargs: Unpack[Dict]
|
| 277 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
| 278 |
+
if output_attentions:
|
| 279 |
+
warnings.warn("`RWKV7Model` does not `output_attentions` now, setting it to `False`.")
|
| 280 |
+
output_attentions = False
|
| 281 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 282 |
+
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 283 |
+
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
|
| 284 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 285 |
+
|
| 286 |
+
# retrieve input_ids and inputs_embeds
|
| 287 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 288 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 289 |
+
if input_ids is None and inputs_embeds is None:
|
| 290 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 291 |
+
|
| 292 |
+
if inputs_embeds is None:
|
| 293 |
+
inputs_embeds = self.embeddings(input_ids)
|
| 294 |
+
hidden_states = inputs_embeds
|
| 295 |
+
|
| 296 |
+
if use_cache and not isinstance(past_key_values, Cache):
|
| 297 |
+
past_key_values = Cache.from_legacy_cache(past_key_values)
|
| 298 |
+
|
| 299 |
+
if self.gradient_checkpointing and self.training and use_cache:
|
| 300 |
+
logger.warning_once("`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...")
|
| 301 |
+
use_cache = False
|
| 302 |
+
|
| 303 |
+
all_hidden_states = () if output_hidden_states else None
|
| 304 |
+
all_attns = () if output_attentions else None
|
| 305 |
+
|
| 306 |
+
v_first = torch.zeros_like(hidden_states)
|
| 307 |
+
for layer in self.layers:
|
| 308 |
+
if output_hidden_states:
|
| 309 |
+
all_hidden_states += (hidden_states,)
|
| 310 |
+
|
| 311 |
+
if self.gradient_checkpointing and self.training:
|
| 312 |
+
hidden_states, attentions, past_key_values, v_first = self._gradient_checkpointing_func(
|
| 313 |
+
layer.__call__,
|
| 314 |
+
hidden_states,
|
| 315 |
+
attention_mask,
|
| 316 |
+
past_key_values,
|
| 317 |
+
use_cache,
|
| 318 |
+
output_attentions,
|
| 319 |
+
v_first,
|
| 320 |
+
**kwargs
|
| 321 |
+
)
|
| 322 |
+
else:
|
| 323 |
+
hidden_states, attentions, past_key_values, v_first = layer(
|
| 324 |
+
hidden_states,
|
| 325 |
+
attention_mask=attention_mask,
|
| 326 |
+
past_key_values=past_key_values,
|
| 327 |
+
use_cache=use_cache,
|
| 328 |
+
output_attentions=output_attentions,
|
| 329 |
+
v_first=v_first,
|
| 330 |
+
**kwargs
|
| 331 |
+
)
|
| 332 |
+
|
| 333 |
+
if output_attentions:
|
| 334 |
+
all_attns += (attentions,)
|
| 335 |
+
|
| 336 |
+
hidden_states = self.norm(hidden_states)
|
| 337 |
+
|
| 338 |
+
# add hidden states from the last decoder layer
|
| 339 |
+
if output_hidden_states:
|
| 340 |
+
all_hidden_states += (hidden_states,)
|
| 341 |
+
|
| 342 |
+
if not return_dict:
|
| 343 |
+
return tuple(i for i in [hidden_states, past_key_values, all_hidden_states, all_attns] if i is not None)
|
| 344 |
+
return BaseModelOutputWithPast(
|
| 345 |
+
last_hidden_state=hidden_states,
|
| 346 |
+
past_key_values=past_key_values,
|
| 347 |
+
hidden_states=all_hidden_states,
|
| 348 |
+
attentions=all_attns
|
| 349 |
+
)
|
| 350 |
+
|
| 351 |
+
|
| 352 |
+
class RWKV7ForCausalLM(RWKV7PreTrainedModel, GenerationMixin):
|
| 353 |
+
|
| 354 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 355 |
+
|
| 356 |
+
def __init__(self, config):
|
| 357 |
+
super().__init__(config)
|
| 358 |
+
self.model = RWKV7Model(config)
|
| 359 |
+
self.vocab_size = config.vocab_size
|
| 360 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 361 |
+
self.criterion = None
|
| 362 |
+
|
| 363 |
+
# Initialize weights and apply final processing
|
| 364 |
+
self.post_init()
|
| 365 |
+
|
| 366 |
+
def get_input_embeddings(self):
|
| 367 |
+
return self.model.embeddings
|
| 368 |
+
|
| 369 |
+
def set_input_embeddings(self, value):
|
| 370 |
+
self.model.embeddings = value
|
| 371 |
+
|
| 372 |
+
def get_output_embeddings(self):
|
| 373 |
+
return self.lm_head
|
| 374 |
+
|
| 375 |
+
def set_output_embeddings(self, new_embeddings):
|
| 376 |
+
self.lm_head = new_embeddings
|
| 377 |
+
|
| 378 |
+
def set_decoder(self, decoder):
|
| 379 |
+
self.model = decoder
|
| 380 |
+
|
| 381 |
+
def get_decoder(self):
|
| 382 |
+
return self.model
|
| 383 |
+
|
| 384 |
+
def generate(self, *args, **kwargs):
|
| 385 |
+
try:
|
| 386 |
+
return super().generate(*args, **kwargs)
|
| 387 |
+
except AttributeError as exception:
|
| 388 |
+
if 'past_key_values' in str(exception):
|
| 389 |
+
raise AttributeError(
|
| 390 |
+
f"You tried to call `generate` with a decoding strategy that manipulates `past_key_values`, "
|
| 391 |
+
f"which is not supported for {self.__class__.__name__}. "
|
| 392 |
+
f"Try another generation strategy instead. "
|
| 393 |
+
f"For the available generation strategies, check this doc: "
|
| 394 |
+
f"https://huggingface.co/docs/transformers/en/generation_strategies#decoding-strategies"
|
| 395 |
+
)
|
| 396 |
+
else:
|
| 397 |
+
raise exception
|
| 398 |
+
|
| 399 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 400 |
+
def prepare_inputs_for_generation(
|
| 401 |
+
self,
|
| 402 |
+
input_ids: torch.LongTensor = None,
|
| 403 |
+
past_key_values: Optional[Cache] = None,
|
| 404 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 405 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 406 |
+
use_cache: bool = True,
|
| 407 |
+
logits_to_keep: Optional[int] = None,
|
| 408 |
+
**kwargs
|
| 409 |
+
):
|
| 410 |
+
# only last token for `inputs_ids` if the `past_key_values` is not empty.
|
| 411 |
+
if past_key_values is not None and len(past_key_values) > 0:
|
| 412 |
+
input_ids = input_ids[:, -1:]
|
| 413 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 414 |
+
if inputs_embeds is not None and len(past_key_values) == 0:
|
| 415 |
+
model_inputs = {'inputs_embeds': inputs_embeds}
|
| 416 |
+
else:
|
| 417 |
+
# The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
|
| 418 |
+
# recompiles graphs as the stride of the inputs is a guard.
|
| 419 |
+
# Ref: https://github.com/huggingface/transformers/pull/29114
|
| 420 |
+
# TODO: use `next_tokens` directly instead.
|
| 421 |
+
model_inputs = {'input_ids': input_ids.contiguous()}
|
| 422 |
+
|
| 423 |
+
if logits_to_keep is not None:
|
| 424 |
+
model_inputs['logits_to_keep'] = logits_to_keep
|
| 425 |
+
|
| 426 |
+
model_inputs.update({
|
| 427 |
+
'past_key_values': past_key_values,
|
| 428 |
+
'use_cache': use_cache,
|
| 429 |
+
'attention_mask': attention_mask,
|
| 430 |
+
})
|
| 431 |
+
return model_inputs
|
| 432 |
+
|
| 433 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 434 |
+
def forward(
|
| 435 |
+
self,
|
| 436 |
+
input_ids: torch.LongTensor = None,
|
| 437 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 438 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 439 |
+
past_key_values: Optional[Cache] = None,
|
| 440 |
+
labels: Optional[torch.LongTensor] = None,
|
| 441 |
+
shift_labels: Optional[torch.LongTensor] = None,
|
| 442 |
+
use_cache: Optional[bool] = None,
|
| 443 |
+
output_attentions: Optional[bool] = None,
|
| 444 |
+
output_hidden_states: Optional[bool] = None,
|
| 445 |
+
return_dict: Optional[bool] = None,
|
| 446 |
+
logits_to_keep: Optional[int] = 0,
|
| 447 |
+
**kwargs: Unpack[Dict]
|
| 448 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 449 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 450 |
+
output_hidden_states = (
|
| 451 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 452 |
+
)
|
| 453 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 454 |
+
|
| 455 |
+
outputs = self.model(
|
| 456 |
+
input_ids=input_ids,
|
| 457 |
+
attention_mask=attention_mask,
|
| 458 |
+
inputs_embeds=inputs_embeds,
|
| 459 |
+
past_key_values=past_key_values,
|
| 460 |
+
use_cache=use_cache,
|
| 461 |
+
output_attentions=output_attentions,
|
| 462 |
+
output_hidden_states=output_hidden_states,
|
| 463 |
+
return_dict=return_dict,
|
| 464 |
+
**kwargs
|
| 465 |
+
)
|
| 466 |
+
|
| 467 |
+
hidden_states = outputs[0]
|
| 468 |
+
fuse_linear_and_cross_entropy = self.config.fuse_cross_entropy and self.training
|
| 469 |
+
|
| 470 |
+
loss, logits = None, None
|
| 471 |
+
has_labels = (labels is not None) or (shift_labels is not None)
|
| 472 |
+
if not (fuse_linear_and_cross_entropy and has_labels):
|
| 473 |
+
logits = self.lm_head(hidden_states if logits_to_keep is None else hidden_states[:, -logits_to_keep:])
|
| 474 |
+
if has_labels:
|
| 475 |
+
if getattr(self, 'criterion', None) is None:
|
| 476 |
+
if fuse_linear_and_cross_entropy:
|
| 477 |
+
criterion = FusedLinearCrossEntropyLoss()
|
| 478 |
+
elif self.config.fuse_cross_entropy:
|
| 479 |
+
criterion = FusedCrossEntropyLoss(inplace_backward=True)
|
| 480 |
+
else:
|
| 481 |
+
criterion = nn.CrossEntropyLoss()
|
| 482 |
+
else:
|
| 483 |
+
criterion = self.criterion
|
| 484 |
+
|
| 485 |
+
# shift_labels: See https://github.com/huggingface/transformers/pull/36607/files.
|
| 486 |
+
if shift_labels is None:
|
| 487 |
+
shift_labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], criterion.ignore_index)), 1)
|
| 488 |
+
shift_labels = shift_labels.to(hidden_states.device)
|
| 489 |
+
|
| 490 |
+
if fuse_linear_and_cross_entropy:
|
| 491 |
+
loss = criterion(hidden_states, shift_labels, self.lm_head.weight, self.lm_head.bias)
|
| 492 |
+
else:
|
| 493 |
+
loss = criterion(logits.view(shift_labels.numel(), -1), shift_labels.view(-1))
|
| 494 |
+
|
| 495 |
+
if not return_dict:
|
| 496 |
+
output = (logits,) + outputs[1:]
|
| 497 |
+
return (loss,) + output if loss is not None else output
|
| 498 |
+
|
| 499 |
+
return CausalLMOutputWithPast(
|
| 500 |
+
loss=loss,
|
| 501 |
+
logits=logits,
|
| 502 |
+
past_key_values=outputs.past_key_values,
|
| 503 |
+
hidden_states=outputs.hidden_states,
|
| 504 |
+
attentions=outputs.attentions,
|
| 505 |
+
)
|
fla/models/samba/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
|
| 4 |
+
|
| 5 |
+
from fla.models.samba.configuration_samba import SambaConfig
|
| 6 |
+
from fla.models.samba.modeling_samba import SambaBlock, SambaForCausalLM, SambaModel
|
| 7 |
+
|
| 8 |
+
AutoConfig.register(SambaConfig.model_type, SambaConfig, True)
|
| 9 |
+
AutoModel.register(SambaConfig, SambaModel, True)
|
| 10 |
+
AutoModelForCausalLM.register(SambaConfig, SambaForCausalLM, True)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
__all__ = ['SambaConfig', 'SambaForCausalLM', 'SambaModel', 'SambaBlock']
|
fla/models/samba/modeling_samba.py
ADDED
|
@@ -0,0 +1,413 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
from dataclasses import dataclass
|
| 7 |
+
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.utils.checkpoint
|
| 11 |
+
from torch import nn
|
| 12 |
+
from transformers.generation import GenerationMixin
|
| 13 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 14 |
+
from transformers.utils import ModelOutput, logging
|
| 15 |
+
from transformers.utils.deprecation import deprecate_kwarg
|
| 16 |
+
|
| 17 |
+
from fla.layers.attn import Attention
|
| 18 |
+
from fla.models.mamba.modeling_mamba import MambaCache, MambaMixer
|
| 19 |
+
from fla.models.samba.configuration_samba import SambaConfig
|
| 20 |
+
from fla.modules import FusedCrossEntropyLoss, FusedLinearCrossEntropyLoss
|
| 21 |
+
from fla.modules import GatedMLP as SambaMLP
|
| 22 |
+
from fla.modules import RMSNorm
|
| 23 |
+
|
| 24 |
+
if TYPE_CHECKING:
|
| 25 |
+
from transformers.processing_utils import Unpack
|
| 26 |
+
|
| 27 |
+
logger = logging.get_logger(__name__)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class SambaBlock(nn.Module):
|
| 31 |
+
def __init__(self, config, layer_idx):
|
| 32 |
+
super().__init__()
|
| 33 |
+
|
| 34 |
+
self.config = config
|
| 35 |
+
self.layer_idx = layer_idx
|
| 36 |
+
|
| 37 |
+
self.mixer_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.norm_eps)
|
| 38 |
+
if config.attn is not None and layer_idx in config.attn['layers']:
|
| 39 |
+
self.mixer = Attention(
|
| 40 |
+
hidden_size=config.hidden_size,
|
| 41 |
+
num_heads=config.attn['num_heads'],
|
| 42 |
+
num_kv_heads=config.attn['num_kv_heads'],
|
| 43 |
+
qkv_bias=config.attn['qkv_bias'],
|
| 44 |
+
window_size=config.attn['window_size'],
|
| 45 |
+
rope_theta=config.attn['rope_theta'],
|
| 46 |
+
max_position_embeddings=config.max_position_embeddings,
|
| 47 |
+
layer_idx=layer_idx
|
| 48 |
+
)
|
| 49 |
+
else:
|
| 50 |
+
self.mixer = MambaMixer(config, layer_idx=layer_idx)
|
| 51 |
+
self.mlp_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 52 |
+
self.mlp = SambaMLP(
|
| 53 |
+
hidden_size=config.hidden_size,
|
| 54 |
+
hidden_ratio=config.hidden_ratio,
|
| 55 |
+
hidden_act=config.hidden_act,
|
| 56 |
+
fuse_swiglu=config.fuse_swiglu
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
def forward(
|
| 60 |
+
self,
|
| 61 |
+
hidden_states: torch.Tensor,
|
| 62 |
+
cache_params: Optional[Tuple[torch.Tensor]] = None,
|
| 63 |
+
**kwargs: Unpack[Dict]
|
| 64 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 65 |
+
|
| 66 |
+
residual = hidden_states
|
| 67 |
+
hidden_states = self.mixer_norm(hidden_states)
|
| 68 |
+
if isinstance(self.mixer, MambaMixer):
|
| 69 |
+
hidden_states = self.mixer(hidden_states, cache_params=cache_params, **kwargs)
|
| 70 |
+
else:
|
| 71 |
+
hidden_states, _, cache_params = self.mixer(hidden_states=hidden_states, past_key_values=cache_params, **kwargs)
|
| 72 |
+
if self.config.fuse_norm:
|
| 73 |
+
hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
|
| 74 |
+
else:
|
| 75 |
+
hidden_states = residual + hidden_states
|
| 76 |
+
residual = hidden_states
|
| 77 |
+
hidden_states = self.mlp_norm(hidden_states)
|
| 78 |
+
hidden_states = self.mlp(hidden_states, **kwargs)
|
| 79 |
+
hidden_states = residual + hidden_states
|
| 80 |
+
return hidden_states
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
class SambaPreTrainedModel(PreTrainedModel):
|
| 84 |
+
"""
|
| 85 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 86 |
+
models.
|
| 87 |
+
"""
|
| 88 |
+
|
| 89 |
+
config_class = SambaConfig
|
| 90 |
+
base_model_prefix = "backbone"
|
| 91 |
+
_no_split_modules = ["SambaBlock"]
|
| 92 |
+
supports_gradient_checkpointing = True
|
| 93 |
+
|
| 94 |
+
def _init_weights(self, module):
|
| 95 |
+
"""Initialize the weights."""
|
| 96 |
+
if isinstance(module, nn.Linear):
|
| 97 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 98 |
+
if module.bias is not None:
|
| 99 |
+
if not getattr(module.bias, "_no_reinit", False):
|
| 100 |
+
nn.init.zeros_(module.bias)
|
| 101 |
+
elif isinstance(module, MambaMixer):
|
| 102 |
+
module.A_log._no_weight_decay = True
|
| 103 |
+
module.D._no_weight_decay = True
|
| 104 |
+
|
| 105 |
+
dt_init_std = self.config.time_step_rank**-0.5 * self.config.time_step_scale
|
| 106 |
+
if self.config.time_step_init_scheme == "constant":
|
| 107 |
+
nn.init.constant_(module.dt_proj.weight, dt_init_std)
|
| 108 |
+
elif self.config.time_step_init_scheme == "random":
|
| 109 |
+
nn.init.uniform_(module.dt_proj.weight, -dt_init_std, dt_init_std)
|
| 110 |
+
|
| 111 |
+
dt = torch.exp(
|
| 112 |
+
torch.rand(self.config.intermediate_size)
|
| 113 |
+
* (math.log(self.config.time_step_max) - math.log(self.config.time_step_min))
|
| 114 |
+
+ math.log(self.config.time_step_min)
|
| 115 |
+
).clamp(min=self.config.time_step_floor)
|
| 116 |
+
# # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759
|
| 117 |
+
inv_dt = dt + torch.log(-torch.expm1(-dt))
|
| 118 |
+
with torch.no_grad():
|
| 119 |
+
module.dt_proj.bias.data = nn.Parameter(inv_dt.to(module.dt_proj.bias.device))
|
| 120 |
+
module.dt_proj.bias._no_reinit = True
|
| 121 |
+
elif isinstance(module, nn.Embedding):
|
| 122 |
+
nn.init.normal_(module.weight, std=self.config.initializer_range)
|
| 123 |
+
elif hasattr(module, 'reset_parameters'):
|
| 124 |
+
module.reset_parameters()
|
| 125 |
+
|
| 126 |
+
if self.config.rescale_prenorm_residual:
|
| 127 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
| 128 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
| 129 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
| 130 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
| 131 |
+
#
|
| 132 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
| 133 |
+
for name, p in module.named_parameters():
|
| 134 |
+
if name in ["out_proj.weight"]:
|
| 135 |
+
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
|
| 136 |
+
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
|
| 137 |
+
# We need to reinit p since this code could be called multiple times
|
| 138 |
+
# Having just p *= scale would repeatedly scale it down
|
| 139 |
+
nn.init.kaiming_uniform_(p, a=math.sqrt(5))
|
| 140 |
+
with torch.no_grad():
|
| 141 |
+
p /= math.sqrt(self.config.num_layers)
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
@dataclass
|
| 145 |
+
class SambaOutput(ModelOutput):
|
| 146 |
+
"""
|
| 147 |
+
Class for the Samba model outputs.
|
| 148 |
+
|
| 149 |
+
Args:
|
| 150 |
+
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
| 151 |
+
Sequence of hidden-states at the output of the last layer of the model.
|
| 152 |
+
cache_params (`MambaCache`):
|
| 153 |
+
The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to
|
| 154 |
+
avoid providing the old `input_ids`.
|
| 155 |
+
|
| 156 |
+
Includes both the State space model state matrices after the selective scan, and the Convolutional states
|
| 157 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*,
|
| 158 |
+
returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 159 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
| 160 |
+
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
| 161 |
+
|
| 162 |
+
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
| 163 |
+
"""
|
| 164 |
+
|
| 165 |
+
last_hidden_state: Optional[torch.FloatTensor] = None
|
| 166 |
+
cache_params: Optional[MambaCache] = None
|
| 167 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
@dataclass
|
| 171 |
+
class SambaCausalLMOutput(ModelOutput):
|
| 172 |
+
"""
|
| 173 |
+
Base class for causal language model (or autoregressive) outputs.
|
| 174 |
+
|
| 175 |
+
Args:
|
| 176 |
+
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
| 177 |
+
Language modeling loss (for next-token prediction).
|
| 178 |
+
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
|
| 179 |
+
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
| 180 |
+
cache_params (`MambaCache`):
|
| 181 |
+
The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to
|
| 182 |
+
avoid providing the old `input_ids`.
|
| 183 |
+
|
| 184 |
+
Includes both the State space model state matrices after the selective scan, and the Convolutional states
|
| 185 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*,
|
| 186 |
+
returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 187 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
| 188 |
+
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
| 189 |
+
|
| 190 |
+
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
| 191 |
+
"""
|
| 192 |
+
|
| 193 |
+
loss: Optional[torch.FloatTensor] = None
|
| 194 |
+
logits: Optional[torch.FloatTensor] = None
|
| 195 |
+
cache_params: Optional[MambaCache] = None
|
| 196 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
class SambaModel(SambaPreTrainedModel):
|
| 200 |
+
def __init__(self, config):
|
| 201 |
+
super().__init__(config)
|
| 202 |
+
|
| 203 |
+
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
|
| 204 |
+
self.layers = nn.ModuleList([SambaBlock(config, layer_idx=idx) for idx in range(config.num_hidden_layers)])
|
| 205 |
+
|
| 206 |
+
self.gradient_checkpointing = False
|
| 207 |
+
self.norm_f = RMSNorm(config.hidden_size, eps=config.norm_eps)
|
| 208 |
+
# Initialize weights and apply final processing
|
| 209 |
+
self.post_init()
|
| 210 |
+
|
| 211 |
+
def get_input_embeddings(self):
|
| 212 |
+
return self.embeddings
|
| 213 |
+
|
| 214 |
+
def set_input_embeddings(self, new_embeddings):
|
| 215 |
+
self.embeddings = new_embeddings
|
| 216 |
+
|
| 217 |
+
def forward(
|
| 218 |
+
self,
|
| 219 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 220 |
+
inputs_embeds: Optional[torch.LongTensor] = None,
|
| 221 |
+
cache_params: Optional[MambaCache] = None,
|
| 222 |
+
use_cache: Optional[bool] = None,
|
| 223 |
+
output_hidden_states: Optional[bool] = None,
|
| 224 |
+
return_dict: Optional[bool] = None,
|
| 225 |
+
**kwargs: Unpack[Dict]
|
| 226 |
+
) -> Union[Tuple, SambaOutput]:
|
| 227 |
+
output_hidden_states = (
|
| 228 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 229 |
+
)
|
| 230 |
+
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
|
| 231 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 232 |
+
|
| 233 |
+
if (input_ids is None) ^ (inputs_embeds is not None): # ^ is python for xor
|
| 234 |
+
raise ValueError(
|
| 235 |
+
"You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
|
| 236 |
+
)
|
| 237 |
+
|
| 238 |
+
if inputs_embeds is None:
|
| 239 |
+
inputs_embeds = self.embeddings(input_ids)
|
| 240 |
+
|
| 241 |
+
if self.gradient_checkpointing and self.training and use_cache:
|
| 242 |
+
use_cache = False
|
| 243 |
+
|
| 244 |
+
if cache_params is None and use_cache:
|
| 245 |
+
cache_params = MambaCache(
|
| 246 |
+
self.config, inputs_embeds.size(0), device=inputs_embeds.device, dtype=inputs_embeds.dtype
|
| 247 |
+
)
|
| 248 |
+
|
| 249 |
+
hidden_states = inputs_embeds
|
| 250 |
+
all_hidden_states = () if output_hidden_states else None
|
| 251 |
+
for mixer_block in self.layers:
|
| 252 |
+
if self.gradient_checkpointing and self.training:
|
| 253 |
+
hidden_states = self._gradient_checkpointing_func(
|
| 254 |
+
mixer_block.__call__,
|
| 255 |
+
hidden_states,
|
| 256 |
+
cache_params,
|
| 257 |
+
**kwargs
|
| 258 |
+
)
|
| 259 |
+
else:
|
| 260 |
+
hidden_states = mixer_block(
|
| 261 |
+
hidden_states,
|
| 262 |
+
cache_params=cache_params,
|
| 263 |
+
**kwargs
|
| 264 |
+
)
|
| 265 |
+
|
| 266 |
+
if output_hidden_states:
|
| 267 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 268 |
+
|
| 269 |
+
if use_cache:
|
| 270 |
+
cache_params.seqlen_offset += inputs_embeds.shape[1]
|
| 271 |
+
|
| 272 |
+
hidden_states = self.norm_f(hidden_states)
|
| 273 |
+
|
| 274 |
+
if output_hidden_states:
|
| 275 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 276 |
+
|
| 277 |
+
if not return_dict:
|
| 278 |
+
return tuple(v for v in [hidden_states, cache_params, all_hidden_states] if v is not None)
|
| 279 |
+
|
| 280 |
+
return SambaOutput(
|
| 281 |
+
last_hidden_state=hidden_states,
|
| 282 |
+
cache_params=cache_params if use_cache else None,
|
| 283 |
+
hidden_states=all_hidden_states,
|
| 284 |
+
)
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
class SambaForCausalLM(SambaPreTrainedModel, GenerationMixin):
|
| 288 |
+
|
| 289 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 290 |
+
|
| 291 |
+
def __init__(self, config):
|
| 292 |
+
super().__init__(config)
|
| 293 |
+
self.backbone = SambaModel(config)
|
| 294 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 295 |
+
self.criterion = None
|
| 296 |
+
|
| 297 |
+
# Initialize weights and apply final processing
|
| 298 |
+
self.post_init()
|
| 299 |
+
|
| 300 |
+
def get_output_embeddings(self):
|
| 301 |
+
return self.lm_head
|
| 302 |
+
|
| 303 |
+
def set_output_embeddings(self, new_embeddings):
|
| 304 |
+
self.lm_head = new_embeddings
|
| 305 |
+
|
| 306 |
+
def get_input_embeddings(self):
|
| 307 |
+
return self.backbone.get_input_embeddings()
|
| 308 |
+
|
| 309 |
+
def set_input_embeddings(self, new_embeddings):
|
| 310 |
+
return self.backbone.set_input_embeddings(new_embeddings)
|
| 311 |
+
|
| 312 |
+
def _update_model_kwargs_for_generation(
|
| 313 |
+
self, outputs: ModelOutput, model_kwargs: Dict[str, Any], **kwargs
|
| 314 |
+
) -> Dict[str, Any]:
|
| 315 |
+
model_kwargs["cache_params"] = outputs.get("cache_params", None)
|
| 316 |
+
return model_kwargs
|
| 317 |
+
|
| 318 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 319 |
+
def prepare_inputs_for_generation(
|
| 320 |
+
self,
|
| 321 |
+
input_ids,
|
| 322 |
+
cache_params:
|
| 323 |
+
Optional[MambaCache] = None,
|
| 324 |
+
inputs_embeds=None,
|
| 325 |
+
attention_mask=None,
|
| 326 |
+
use_cache: Optional[bool] = True,
|
| 327 |
+
logits_to_keep: Optional[int] = None,
|
| 328 |
+
**kwargs: Unpack[Dict]
|
| 329 |
+
):
|
| 330 |
+
# only last token for inputs_ids if the state is passed along.
|
| 331 |
+
if cache_params is not None:
|
| 332 |
+
input_ids = input_ids[:, -1].unsqueeze(-1)
|
| 333 |
+
|
| 334 |
+
if inputs_embeds is not None and cache_params is None:
|
| 335 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
| 336 |
+
else:
|
| 337 |
+
model_inputs = {"input_ids": input_ids}
|
| 338 |
+
|
| 339 |
+
if logits_to_keep is not None:
|
| 340 |
+
model_inputs['logits_to_keep'] = logits_to_keep
|
| 341 |
+
|
| 342 |
+
model_inputs.update({
|
| 343 |
+
'cache_params': cache_params,
|
| 344 |
+
'use_cache': use_cache,
|
| 345 |
+
'attention_mask': attention_mask,
|
| 346 |
+
'logits_to_keep': logits_to_keep,
|
| 347 |
+
})
|
| 348 |
+
return model_inputs
|
| 349 |
+
|
| 350 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 351 |
+
def forward(
|
| 352 |
+
self,
|
| 353 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 354 |
+
attention_mask: Optional[torch.Tensor] = None, # noqa
|
| 355 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 356 |
+
cache_params: Optional[MambaCache] = None,
|
| 357 |
+
labels: Optional[torch.LongTensor] = None,
|
| 358 |
+
output_hidden_states: Optional[bool] = None,
|
| 359 |
+
return_dict: Optional[bool] = None,
|
| 360 |
+
use_cache: Optional[bool] = None,
|
| 361 |
+
logits_to_keep: Optional[int] = 0,
|
| 362 |
+
**kwargs: Unpack[Dict]
|
| 363 |
+
) -> Union[Tuple, SambaCausalLMOutput]:
|
| 364 |
+
r"""
|
| 365 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 366 |
+
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
|
| 367 |
+
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
|
| 368 |
+
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
|
| 369 |
+
"""
|
| 370 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 371 |
+
|
| 372 |
+
outputs = self.backbone(
|
| 373 |
+
input_ids,
|
| 374 |
+
cache_params=cache_params,
|
| 375 |
+
inputs_embeds=inputs_embeds,
|
| 376 |
+
output_hidden_states=output_hidden_states,
|
| 377 |
+
return_dict=return_dict,
|
| 378 |
+
use_cache=use_cache,
|
| 379 |
+
**kwargs
|
| 380 |
+
)
|
| 381 |
+
hidden_states = outputs[0]
|
| 382 |
+
fuse_linear_and_cross_entropy = self.config.fuse_cross_entropy and self.training
|
| 383 |
+
|
| 384 |
+
loss, logits = None, None
|
| 385 |
+
if not fuse_linear_and_cross_entropy or labels is None:
|
| 386 |
+
logits = self.lm_head(hidden_states if logits_to_keep is None else hidden_states[:, -logits_to_keep:])
|
| 387 |
+
if labels is not None:
|
| 388 |
+
if getattr(self, 'criterion', None) is None:
|
| 389 |
+
if fuse_linear_and_cross_entropy:
|
| 390 |
+
criterion = FusedLinearCrossEntropyLoss()
|
| 391 |
+
elif self.config.fuse_cross_entropy:
|
| 392 |
+
criterion = FusedCrossEntropyLoss(inplace_backward=True)
|
| 393 |
+
else:
|
| 394 |
+
criterion = nn.CrossEntropyLoss()
|
| 395 |
+
else:
|
| 396 |
+
criterion = self.criterion
|
| 397 |
+
labels = labels.to(hidden_states.device)
|
| 398 |
+
labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], criterion.ignore_index)), 1)
|
| 399 |
+
if fuse_linear_and_cross_entropy:
|
| 400 |
+
loss = criterion(hidden_states, labels, self.lm_head.weight, self.lm_head.bias)
|
| 401 |
+
else:
|
| 402 |
+
loss = criterion(logits.view(labels.numel(), -1), labels.view(-1))
|
| 403 |
+
|
| 404 |
+
if not return_dict:
|
| 405 |
+
output = (logits,) + outputs[1:]
|
| 406 |
+
return (loss,) + output if loss is not None else output
|
| 407 |
+
|
| 408 |
+
return SambaCausalLMOutput(
|
| 409 |
+
loss=loss,
|
| 410 |
+
logits=logits,
|
| 411 |
+
cache_params=outputs.cache_params,
|
| 412 |
+
hidden_states=outputs.hidden_states,
|
| 413 |
+
)
|
fla/models/transformer/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
|
| 4 |
+
|
| 5 |
+
from fla.models.transformer.configuration_transformer import TransformerConfig
|
| 6 |
+
from fla.models.transformer.modeling_transformer import TransformerForCausalLM, TransformerModel
|
| 7 |
+
|
| 8 |
+
AutoConfig.register(TransformerConfig.model_type, TransformerConfig)
|
| 9 |
+
AutoModel.register(TransformerConfig, TransformerModel)
|
| 10 |
+
AutoModelForCausalLM.register(TransformerConfig, TransformerForCausalLM)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
__all__ = ['TransformerConfig', 'TransformerForCausalLM', 'TransformerModel']
|
fla/models/transformer_dsmtp/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
|
| 4 |
+
|
| 5 |
+
from fla.models.transformer_dsmtp.configuration_transformer import DSMTPTransformerConfig
|
| 6 |
+
from fla.models.transformer_dsmtp.modeling_transformer import DSMTPTransformerForCausalLM, DSMTPTransformerModel
|
| 7 |
+
|
| 8 |
+
AutoConfig.register(DSMTPTransformerConfig.model_type, DSMTPTransformerConfig)
|
| 9 |
+
AutoModel.register(DSMTPTransformerConfig, DSMTPTransformerModel)
|
| 10 |
+
AutoModelForCausalLM.register(DSMTPTransformerConfig, DSMTPTransformerForCausalLM)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
__all__ = ['DSMTPTransformerConfig', 'DSMTPTransformerForCausalLM', 'DSMTPTransformerModel']
|
fla/models/transformer_dsmtp/configuration_transformer.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class DSMTPTransformerConfig(PretrainedConfig):
|
| 9 |
+
|
| 10 |
+
model_type = 'dsmtp_transformer'
|
| 11 |
+
keys_to_ignore_at_inference = ['past_key_values']
|
| 12 |
+
|
| 13 |
+
def __init__(
|
| 14 |
+
self,
|
| 15 |
+
hidden_size: int = 2048,
|
| 16 |
+
num_hidden_layers: int = 24,
|
| 17 |
+
num_heads: int = 32,
|
| 18 |
+
num_kv_heads: int = None,
|
| 19 |
+
qkv_bias: bool = False,
|
| 20 |
+
qk_norm: bool = False,
|
| 21 |
+
window_size: Optional[int] = None,
|
| 22 |
+
rope_theta: Optional[float] = 10000.,
|
| 23 |
+
max_position_embeddings: int = 2048,
|
| 24 |
+
hidden_ratio: Optional[int] = 4,
|
| 25 |
+
intermediate_size: Optional[int] = None,
|
| 26 |
+
hidden_act: str = "swish",
|
| 27 |
+
initializer_range: float = 0.006,
|
| 28 |
+
elementwise_affine: Optional[bool] = True,
|
| 29 |
+
norm_eps: float = 1e-6,
|
| 30 |
+
use_cache: bool = True,
|
| 31 |
+
pad_token_id: int = None,
|
| 32 |
+
bos_token_id: int = 1,
|
| 33 |
+
eos_token_id: int = 2,
|
| 34 |
+
tie_word_embeddings: bool = False,
|
| 35 |
+
fuse_norm: bool = True,
|
| 36 |
+
fuse_swiglu: bool = True,
|
| 37 |
+
fuse_cross_entropy: bool = True,
|
| 38 |
+
vocab_size: int = 32000,
|
| 39 |
+
n_future_tokens: int = 1,
|
| 40 |
+
**kwargs,
|
| 41 |
+
):
|
| 42 |
+
self.hidden_size = hidden_size
|
| 43 |
+
self.num_hidden_layers = num_hidden_layers
|
| 44 |
+
self.num_heads = num_heads
|
| 45 |
+
self.num_kv_heads = num_kv_heads
|
| 46 |
+
self.qkv_bias = qkv_bias
|
| 47 |
+
self.qk_norm = qk_norm
|
| 48 |
+
self.window_size = window_size
|
| 49 |
+
self.rope_theta = rope_theta
|
| 50 |
+
self.max_position_embeddings = max_position_embeddings
|
| 51 |
+
|
| 52 |
+
self.hidden_ratio = hidden_ratio
|
| 53 |
+
self.intermediate_size = intermediate_size
|
| 54 |
+
self.hidden_act = hidden_act
|
| 55 |
+
|
| 56 |
+
self.initializer_range = initializer_range
|
| 57 |
+
self.elementwise_affine = elementwise_affine
|
| 58 |
+
self.norm_eps = norm_eps
|
| 59 |
+
self.use_cache = use_cache
|
| 60 |
+
|
| 61 |
+
self.fuse_norm = fuse_norm
|
| 62 |
+
self.fuse_swiglu = fuse_swiglu
|
| 63 |
+
self.fuse_cross_entropy = fuse_cross_entropy
|
| 64 |
+
self.vocab_size = vocab_size
|
| 65 |
+
self.n_future_tokens = n_future_tokens
|
| 66 |
+
|
| 67 |
+
super().__init__(
|
| 68 |
+
pad_token_id=pad_token_id,
|
| 69 |
+
bos_token_id=bos_token_id,
|
| 70 |
+
eos_token_id=eos_token_id,
|
| 71 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 72 |
+
**kwargs,
|
| 73 |
+
)
|
fla/models/transformer_dsmtp/modeling_transformer.py
ADDED
|
@@ -0,0 +1,494 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
import warnings
|
| 7 |
+
from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import torch.nn.functional as F
|
| 12 |
+
import torch.utils.checkpoint
|
| 13 |
+
from dataclasses import dataclass
|
| 14 |
+
from transformers.generation import GenerationMixin
|
| 15 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
|
| 16 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 17 |
+
from transformers.utils import logging
|
| 18 |
+
from transformers.utils.deprecation import deprecate_kwarg
|
| 19 |
+
|
| 20 |
+
import triton
|
| 21 |
+
import triton.language as tl
|
| 22 |
+
|
| 23 |
+
from fla.layers.attn import Attention
|
| 24 |
+
from fla.models.transformer_dsmtp.configuration_transformer import DSMTPTransformerConfig
|
| 25 |
+
from fla.models.utils import Cache
|
| 26 |
+
from fla.modules import FusedCrossEntropyLoss, FusedLinearCrossEntropyLoss
|
| 27 |
+
from fla.modules import GatedMLP as TransformerMLP
|
| 28 |
+
from fla.modules import RMSNorm
|
| 29 |
+
from fla.modules.seq_to_dsmtp import seq_to_dsmtp
|
| 30 |
+
|
| 31 |
+
if TYPE_CHECKING:
|
| 32 |
+
from transformers.processing_utils import Unpack
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
logger = logging.get_logger(__name__)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
@dataclass
|
| 39 |
+
class DSMTPLMOutputWithPast(CausalLMOutputWithPast):
|
| 40 |
+
ntp_loss: Optional[torch.FloatTensor] = None
|
| 41 |
+
mtp_loss: Optional[torch.FloatTensor] = None
|
| 42 |
+
|
| 43 |
+
class DSMTPTransformerBlock(nn.Module):
|
| 44 |
+
|
| 45 |
+
def __init__(self, config: DSMTPTransformerConfig, layer_idx: int):
|
| 46 |
+
super().__init__()
|
| 47 |
+
|
| 48 |
+
self.config = config
|
| 49 |
+
self.layer_idx = layer_idx
|
| 50 |
+
|
| 51 |
+
self.attn_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 52 |
+
self.attn = Attention(
|
| 53 |
+
hidden_size=config.hidden_size,
|
| 54 |
+
num_heads=config.num_heads,
|
| 55 |
+
num_kv_heads=config.num_kv_heads,
|
| 56 |
+
qkv_bias=config.qkv_bias,
|
| 57 |
+
qk_norm=config.qk_norm,
|
| 58 |
+
window_size=config.window_size,
|
| 59 |
+
rope_theta=config.rope_theta,
|
| 60 |
+
max_position_embeddings=config.max_position_embeddings,
|
| 61 |
+
layer_idx=layer_idx
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
self.mlp_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 65 |
+
self.mlp = TransformerMLP(
|
| 66 |
+
hidden_size=config.hidden_size,
|
| 67 |
+
hidden_ratio=config.hidden_ratio,
|
| 68 |
+
intermediate_size=config.intermediate_size,
|
| 69 |
+
hidden_act=config.hidden_act,
|
| 70 |
+
fuse_swiglu=config.fuse_swiglu
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
def forward(
|
| 74 |
+
self,
|
| 75 |
+
hidden_states: torch.Tensor,
|
| 76 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 77 |
+
past_key_values: Optional[Tuple[torch.Tensor]] = None,
|
| 78 |
+
output_attentions: Optional[bool] = False,
|
| 79 |
+
use_cache: Optional[bool] = False,
|
| 80 |
+
**kwargs: Unpack[Any]
|
| 81 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 82 |
+
|
| 83 |
+
residual = hidden_states
|
| 84 |
+
hidden_states = self.attn_norm(hidden_states)
|
| 85 |
+
hidden_states, attentions, past_key_values = self.attn(
|
| 86 |
+
hidden_states=hidden_states,
|
| 87 |
+
attention_mask=attention_mask,
|
| 88 |
+
past_key_values=past_key_values,
|
| 89 |
+
use_cache=use_cache,
|
| 90 |
+
output_attentions=output_attentions,
|
| 91 |
+
**kwargs
|
| 92 |
+
)
|
| 93 |
+
if self.config.fuse_norm:
|
| 94 |
+
hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
|
| 95 |
+
else:
|
| 96 |
+
hidden_states = residual + hidden_states
|
| 97 |
+
residual = hidden_states
|
| 98 |
+
hidden_states = self.mlp_norm(hidden_states)
|
| 99 |
+
hidden_states = self.mlp(hidden_states, **kwargs)
|
| 100 |
+
hidden_states = residual + hidden_states
|
| 101 |
+
|
| 102 |
+
outputs = (hidden_states,)
|
| 103 |
+
|
| 104 |
+
if output_attentions:
|
| 105 |
+
outputs += (attentions,)
|
| 106 |
+
|
| 107 |
+
if use_cache:
|
| 108 |
+
outputs += (past_key_values,)
|
| 109 |
+
|
| 110 |
+
return outputs
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
class DSMTPTransformerPreTrainedModel(PreTrainedModel):
|
| 114 |
+
|
| 115 |
+
config_class = DSMTPTransformerConfig
|
| 116 |
+
base_model_prefix = 'model'
|
| 117 |
+
supports_gradient_checkpointing = True
|
| 118 |
+
_no_split_modules = ['DSMTPTransformerBlock']
|
| 119 |
+
_supports_cache_class = True
|
| 120 |
+
|
| 121 |
+
def __init__(self, *inputs, **kwargs):
|
| 122 |
+
super().__init__(*inputs, **kwargs)
|
| 123 |
+
|
| 124 |
+
def _init_weights(
|
| 125 |
+
self,
|
| 126 |
+
module: nn.Module,
|
| 127 |
+
rescale_prenorm_residual: bool = False,
|
| 128 |
+
num_residuals_per_layer: int = 2,
|
| 129 |
+
):
|
| 130 |
+
if isinstance(module, (nn.Linear, nn.Conv1d)):
|
| 131 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 132 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 133 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 134 |
+
if module.bias is not None:
|
| 135 |
+
nn.init.zeros_(module.bias)
|
| 136 |
+
elif isinstance(module, nn.Embedding):
|
| 137 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 138 |
+
elif hasattr(module, 'reset_parameters'):
|
| 139 |
+
module.reset_parameters()
|
| 140 |
+
|
| 141 |
+
if rescale_prenorm_residual:
|
| 142 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
| 143 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
| 144 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
| 145 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
| 146 |
+
#
|
| 147 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
| 148 |
+
p = None
|
| 149 |
+
if hasattr(module, 'o_proj'):
|
| 150 |
+
p = module.o_proj.weight
|
| 151 |
+
elif hasattr(module, 'down_proj'):
|
| 152 |
+
p = module.down_proj.weight
|
| 153 |
+
if p is not None:
|
| 154 |
+
# Special Scaled Initialization --> There are 2 Layer Norms per DSMTPTransformer Block
|
| 155 |
+
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
|
| 156 |
+
# We need to reinit p since this code could be called multiple times
|
| 157 |
+
# Having just p *= scale would repeatedly scale it down
|
| 158 |
+
nn.init.kaiming_uniform_(p, a=math.sqrt(5))
|
| 159 |
+
with torch.no_grad():
|
| 160 |
+
p /= math.sqrt(num_residuals_per_layer * self.config.num_hidden_layers)
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
class DSMTPTransformerModel(DSMTPTransformerPreTrainedModel):
|
| 164 |
+
|
| 165 |
+
def __init__(
|
| 166 |
+
self,
|
| 167 |
+
config: DSMTPTransformerConfig
|
| 168 |
+
):
|
| 169 |
+
super().__init__(config)
|
| 170 |
+
self.padding_idx = config.pad_token_id
|
| 171 |
+
self.vocab_size = config.vocab_size
|
| 172 |
+
|
| 173 |
+
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 174 |
+
self.layers = nn.ModuleList([DSMTPTransformerBlock(config, layer_idx) for layer_idx in range(config.num_hidden_layers - config.n_future_tokens)])
|
| 175 |
+
self.norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 176 |
+
|
| 177 |
+
self.extra_heads = nn.ModuleList([DSMTPTransformerBlock(config, i) for i in range(config.n_future_tokens)])
|
| 178 |
+
self.projection_head = nn.ModuleList([nn.Linear(2 * config.hidden_size, config.hidden_size) for _ in range(config.n_future_tokens)])
|
| 179 |
+
self.norms_1 = nn.ModuleList([RMSNorm(config.hidden_size, eps=config.norm_eps) for _ in range(config.n_future_tokens)])
|
| 180 |
+
self.norms_2 = nn.ModuleList([RMSNorm(config.hidden_size, eps=config.norm_eps) for _ in range(config.n_future_tokens)])
|
| 181 |
+
|
| 182 |
+
self.gradient_checkpointing = False
|
| 183 |
+
|
| 184 |
+
self.post_init()
|
| 185 |
+
|
| 186 |
+
def get_input_embeddings(self):
|
| 187 |
+
return self.embeddings
|
| 188 |
+
|
| 189 |
+
def set_input_embeddings(self, value):
|
| 190 |
+
self.embeddings = value
|
| 191 |
+
|
| 192 |
+
def forward(
|
| 193 |
+
self,
|
| 194 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 195 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 196 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 197 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 198 |
+
use_cache: Optional[bool] = None,
|
| 199 |
+
output_attentions: Optional[bool] = None,
|
| 200 |
+
output_hidden_states: Optional[bool] = None,
|
| 201 |
+
return_dict: Optional[bool] = None,
|
| 202 |
+
return_all_heads: bool = False,
|
| 203 |
+
**kwargs: Unpack[Any]
|
| 204 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 205 |
+
if output_attentions:
|
| 206 |
+
warnings.warn(
|
| 207 |
+
"`DSMTPTransformerModel` does not support output attention weights now, so `output_attentions` is set to `False`."
|
| 208 |
+
)
|
| 209 |
+
output_attentions = False
|
| 210 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 211 |
+
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 212 |
+
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
|
| 213 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 214 |
+
|
| 215 |
+
# retrieve input_ids and inputs_embeds
|
| 216 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 217 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 218 |
+
elif input_ids is None and inputs_embeds is None:
|
| 219 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 220 |
+
|
| 221 |
+
if use_cache and not isinstance(past_key_values, Cache):
|
| 222 |
+
past_key_values = Cache.from_legacy_cache(past_key_values)
|
| 223 |
+
|
| 224 |
+
if inputs_embeds is None:
|
| 225 |
+
inputs_embeds = self.embeddings(input_ids)
|
| 226 |
+
|
| 227 |
+
# The input now is expected to be of shape (B, n_future_tokens, T, C)
|
| 228 |
+
# We take the first token embedding as the main input
|
| 229 |
+
hidden_states = inputs_embeds[:, 0, :, :]
|
| 230 |
+
|
| 231 |
+
if self.gradient_checkpointing and self.training:
|
| 232 |
+
if use_cache:
|
| 233 |
+
logger.warning_once(
|
| 234 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
| 235 |
+
)
|
| 236 |
+
use_cache = False
|
| 237 |
+
|
| 238 |
+
all_hidden_states = () if output_hidden_states else None
|
| 239 |
+
all_attns = () if output_attentions else None
|
| 240 |
+
next_cache = None
|
| 241 |
+
|
| 242 |
+
for layer in self.layers:
|
| 243 |
+
if output_hidden_states:
|
| 244 |
+
all_hidden_states += (hidden_states,)
|
| 245 |
+
|
| 246 |
+
if self.gradient_checkpointing and self.training:
|
| 247 |
+
layer_outputs = self._gradient_checkpointing_func(
|
| 248 |
+
layer.__call__,
|
| 249 |
+
hidden_states,
|
| 250 |
+
attention_mask,
|
| 251 |
+
past_key_values,
|
| 252 |
+
output_attentions,
|
| 253 |
+
use_cache,
|
| 254 |
+
**kwargs
|
| 255 |
+
)
|
| 256 |
+
else:
|
| 257 |
+
layer_outputs = layer(
|
| 258 |
+
hidden_states,
|
| 259 |
+
attention_mask=attention_mask,
|
| 260 |
+
past_key_values=past_key_values,
|
| 261 |
+
output_attentions=output_attentions,
|
| 262 |
+
use_cache=use_cache,
|
| 263 |
+
**kwargs
|
| 264 |
+
)
|
| 265 |
+
|
| 266 |
+
hidden_states = layer_outputs[0]
|
| 267 |
+
|
| 268 |
+
if use_cache:
|
| 269 |
+
next_cache = layer_outputs[2 if output_attentions else 1]
|
| 270 |
+
|
| 271 |
+
if output_attentions:
|
| 272 |
+
all_attns += (layer_outputs[1],)
|
| 273 |
+
|
| 274 |
+
n_heads_to_use = self.config.n_future_tokens if return_all_heads or self.training else 1
|
| 275 |
+
prediction_heads_to_use = self.extra_heads[:n_heads_to_use]
|
| 276 |
+
latents = []
|
| 277 |
+
for i, block in enumerate(prediction_heads_to_use):
|
| 278 |
+
if i < input_ids.shape[1]:
|
| 279 |
+
if i > 0:
|
| 280 |
+
hidden_states = self.norms_1[i](hidden_states)
|
| 281 |
+
new_input = self.norms_2[i](inputs_embeds[:, i, :, :])
|
| 282 |
+
hidden_states = torch.cat((hidden_states, new_input), dim=-1)
|
| 283 |
+
hidden_states = self.projection_head[i](hidden_states)
|
| 284 |
+
|
| 285 |
+
layer_outputs = block(
|
| 286 |
+
hidden_states,
|
| 287 |
+
attention_mask=attention_mask,
|
| 288 |
+
past_key_values=None, # No cache for extra heads
|
| 289 |
+
output_attentions=output_attentions,
|
| 290 |
+
use_cache=False,
|
| 291 |
+
**kwargs
|
| 292 |
+
)
|
| 293 |
+
hidden_states = layer_outputs[0]
|
| 294 |
+
latents.append(hidden_states)
|
| 295 |
+
elif return_all_heads and 'lm_head' in kwargs:
|
| 296 |
+
# at inference time, the golden future tokens don't exist
|
| 297 |
+
# so we need to sample on the fly
|
| 298 |
+
lm_head = kwargs['lm_head']
|
| 299 |
+
if i > 0:
|
| 300 |
+
new_inputs = lm_head(self.norm(hidden_states[:, -1:, :]))
|
| 301 |
+
sampled_tokens = torch.argmax(new_inputs, dim=-1)
|
| 302 |
+
sampled_embeds = self.embeddings(sampled_tokens)
|
| 303 |
+
inputs_embeds = torch.cat((inputs_embeds, sampled_embeds.unsqueeze(1)), dim=-2)
|
| 304 |
+
hidden_states = self.norms_1[i](hidden_states)
|
| 305 |
+
new_input = self.norms_2[i](inputs_embeds[:, 0, -hidden_states.shape[1]:, :])
|
| 306 |
+
hidden_states = torch.cat((hidden_states, new_input), dim=-1)
|
| 307 |
+
hidden_states = self.projection_head[i](hidden_states)
|
| 308 |
+
|
| 309 |
+
layer_outputs = block(
|
| 310 |
+
hidden_states,
|
| 311 |
+
attention_mask=attention_mask,
|
| 312 |
+
past_key_values=None, # No cache for extra heads
|
| 313 |
+
output_attentions=output_attentions,
|
| 314 |
+
use_cache=False,
|
| 315 |
+
**kwargs
|
| 316 |
+
)
|
| 317 |
+
hidden_states = layer_outputs[0]
|
| 318 |
+
latents.append(hidden_states)
|
| 319 |
+
|
| 320 |
+
hidden_states = torch.stack(latents, dim=1)
|
| 321 |
+
hidden_states = self.norm(hidden_states)
|
| 322 |
+
|
| 323 |
+
# add hidden states from the last decoder layer
|
| 324 |
+
if output_hidden_states:
|
| 325 |
+
all_hidden_states += (hidden_states,)
|
| 326 |
+
|
| 327 |
+
if not return_dict:
|
| 328 |
+
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_attns] if v is not None)
|
| 329 |
+
|
| 330 |
+
return BaseModelOutputWithPast(
|
| 331 |
+
last_hidden_state=hidden_states,
|
| 332 |
+
past_key_values=next_cache,
|
| 333 |
+
hidden_states=all_hidden_states,
|
| 334 |
+
attentions=all_attns
|
| 335 |
+
)
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
class DSMTPTransformerForCausalLM(DSMTPTransformerPreTrainedModel, GenerationMixin):
|
| 339 |
+
|
| 340 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 341 |
+
|
| 342 |
+
def __init__(self, config):
|
| 343 |
+
super().__init__(config)
|
| 344 |
+
self.model = DSMTPTransformerModel(config)
|
| 345 |
+
self.vocab_size = config.vocab_size
|
| 346 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 347 |
+
self.criterion = None
|
| 348 |
+
self.pad_token_id = config.pad_token_id
|
| 349 |
+
self.config = config
|
| 350 |
+
|
| 351 |
+
# Initialize weights and apply final processing
|
| 352 |
+
self.post_init()
|
| 353 |
+
|
| 354 |
+
def get_input_embeddings(self):
|
| 355 |
+
return self.model.embeddings
|
| 356 |
+
|
| 357 |
+
def set_input_embeddings(self, value):
|
| 358 |
+
self.model.embeddings = value
|
| 359 |
+
|
| 360 |
+
def get_output_embeddings(self):
|
| 361 |
+
return self.lm_head
|
| 362 |
+
|
| 363 |
+
def set_output_embeddings(self, new_embeddings):
|
| 364 |
+
self.lm_head = new_embeddings
|
| 365 |
+
|
| 366 |
+
def set_decoder(self, decoder):
|
| 367 |
+
self.model = decoder
|
| 368 |
+
|
| 369 |
+
def get_decoder(self):
|
| 370 |
+
return self.model
|
| 371 |
+
|
| 372 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 373 |
+
def prepare_inputs_for_generation(
|
| 374 |
+
self,
|
| 375 |
+
input_ids: torch.LongTensor = None,
|
| 376 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 377 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 378 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 379 |
+
use_cache: bool = True,
|
| 380 |
+
logits_to_keep: Optional[int] = None,
|
| 381 |
+
**kwargs
|
| 382 |
+
):
|
| 383 |
+
# only last token for `inputs_ids` if the `past_key_values` is not empty.
|
| 384 |
+
if past_key_values is not None and len(past_key_values) > 0:
|
| 385 |
+
input_ids = input_ids[:, -1:]
|
| 386 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 387 |
+
if inputs_embeds is not None and len(past_key_values) == 0:
|
| 388 |
+
model_inputs = {'inputs_embeds': inputs_embeds}
|
| 389 |
+
else:
|
| 390 |
+
# The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
|
| 391 |
+
# recompiles graphs as the stride of the inputs is a guard.
|
| 392 |
+
# Ref: https://github.com/huggingface/transformers/pull/29114
|
| 393 |
+
# TODO: use `next_tokens` directly instead.
|
| 394 |
+
model_inputs = {'input_ids': input_ids.contiguous()}
|
| 395 |
+
|
| 396 |
+
if logits_to_keep is not None:
|
| 397 |
+
model_inputs['logits_to_keep'] = logits_to_keep
|
| 398 |
+
|
| 399 |
+
model_inputs.update({
|
| 400 |
+
'past_key_values': past_key_values,
|
| 401 |
+
'use_cache': use_cache,
|
| 402 |
+
'attention_mask': attention_mask,
|
| 403 |
+
})
|
| 404 |
+
return model_inputs
|
| 405 |
+
|
| 406 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 407 |
+
def forward(
|
| 408 |
+
self,
|
| 409 |
+
input_ids: torch.LongTensor = None,
|
| 410 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 411 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 412 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 413 |
+
labels: Optional[torch.LongTensor] = None,
|
| 414 |
+
use_cache: Optional[bool] = None,
|
| 415 |
+
output_attentions: Optional[bool] = None,
|
| 416 |
+
output_hidden_states: Optional[bool] = None,
|
| 417 |
+
return_dict: Optional[bool] = None,
|
| 418 |
+
logits_to_keep: Optional[int] = 0,
|
| 419 |
+
**kwargs: Unpack[Any]
|
| 420 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 421 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 422 |
+
output_hidden_states = (
|
| 423 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 424 |
+
)
|
| 425 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 426 |
+
return_all_heads = self.training or ('output_dsmtp_logits' in kwargs and kwargs['output_dsmtp_logits'])
|
| 427 |
+
input_ids, all_labels = seq_to_dsmtp(input_ids, labels, n_future_tokens=self.config.n_future_tokens if labels is not None or return_all_heads else 1, model_seq_len=input_ids.shape[1])
|
| 428 |
+
|
| 429 |
+
outputs = self.model(
|
| 430 |
+
input_ids=input_ids,
|
| 431 |
+
attention_mask=attention_mask,
|
| 432 |
+
past_key_values=past_key_values,
|
| 433 |
+
inputs_embeds=inputs_embeds,
|
| 434 |
+
use_cache=use_cache,
|
| 435 |
+
output_attentions=output_attentions,
|
| 436 |
+
output_hidden_states=output_hidden_states,
|
| 437 |
+
return_dict=return_dict,
|
| 438 |
+
return_all_heads=return_all_heads,
|
| 439 |
+
lm_head=self.lm_head if return_all_heads and labels is None else None,
|
| 440 |
+
**kwargs
|
| 441 |
+
)
|
| 442 |
+
|
| 443 |
+
hidden_states = outputs[0]
|
| 444 |
+
n_heads_prediction = self.config.n_future_tokens
|
| 445 |
+
|
| 446 |
+
fuse_linear_and_cross_entropy = self.config.fuse_cross_entropy and self.training
|
| 447 |
+
all_logits = None if fuse_linear_and_cross_entropy else self.lm_head(hidden_states)
|
| 448 |
+
|
| 449 |
+
loss = None
|
| 450 |
+
if labels is not None:
|
| 451 |
+
B, n_heads, T, D = hidden_states.shape
|
| 452 |
+
loss = torch.zeros(1, device=hidden_states.device)
|
| 453 |
+
ntp_loss = torch.zeros(1, device=hidden_states.device)
|
| 454 |
+
mtp_loss = torch.zeros(1, device=hidden_states.device)
|
| 455 |
+
if getattr(self, 'criterion', None) is None:
|
| 456 |
+
if fuse_linear_and_cross_entropy:
|
| 457 |
+
criterion = FusedLinearCrossEntropyLoss()
|
| 458 |
+
elif self.config.fuse_cross_entropy:
|
| 459 |
+
criterion = FusedCrossEntropyLoss(inplace_backward=True)
|
| 460 |
+
else:
|
| 461 |
+
criterion = nn.CrossEntropyLoss()
|
| 462 |
+
else:
|
| 463 |
+
criterion = self.criterion
|
| 464 |
+
|
| 465 |
+
# Logits shape is
|
| 466 |
+
all_labels = all_labels.to(hidden_states.device)
|
| 467 |
+
for i in range(n_heads_prediction):
|
| 468 |
+
current_labels = all_labels[:, i, :]
|
| 469 |
+
if fuse_linear_and_cross_entropy:
|
| 470 |
+
current_loss = criterion(hidden_states[:, i, :, :].contiguous(), current_labels.contiguous(), self.lm_head.weight, self.lm_head.bias)
|
| 471 |
+
else:
|
| 472 |
+
logits = all_logits[:, i, :, :]
|
| 473 |
+
current_loss = criterion(logits.contiguous().view(current_labels.numel(), -1), current_labels.reshape(-1))
|
| 474 |
+
if i == 0:
|
| 475 |
+
ntp_loss = current_loss
|
| 476 |
+
else:
|
| 477 |
+
mtp_loss += current_loss
|
| 478 |
+
loss += current_loss
|
| 479 |
+
else:
|
| 480 |
+
all_logits = all_logits.squeeze(1) # (B, T, vocab_size)
|
| 481 |
+
|
| 482 |
+
if not return_dict:
|
| 483 |
+
output = (all_logits,) + outputs[1:]
|
| 484 |
+
return (loss,) + output if loss is not None else output
|
| 485 |
+
|
| 486 |
+
return DSMTPLMOutputWithPast(
|
| 487 |
+
loss=loss,
|
| 488 |
+
ntp_loss=ntp_loss if loss is not None else None,
|
| 489 |
+
mtp_loss=mtp_loss if loss is not None else None,
|
| 490 |
+
logits=all_logits,
|
| 491 |
+
past_key_values=outputs.past_key_values,
|
| 492 |
+
hidden_states=outputs.hidden_states,
|
| 493 |
+
attentions=outputs.attentions,
|
| 494 |
+
)
|
fla/models/transformer_mtp/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
|
| 4 |
+
|
| 5 |
+
from fla.models.transformer_mtp.configuration_transformer import MTPTransformerConfig
|
| 6 |
+
from fla.models.transformer_mtp.modeling_transformer import MTPTransformerForCausalLM, MTPTransformerModel
|
| 7 |
+
|
| 8 |
+
AutoConfig.register(MTPTransformerConfig.model_type, MTPTransformerConfig)
|
| 9 |
+
AutoModel.register(MTPTransformerConfig, MTPTransformerModel)
|
| 10 |
+
AutoModelForCausalLM.register(MTPTransformerConfig, MTPTransformerForCausalLM)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
__all__ = ['TransformerConfig', 'TransformerForCausalLM', 'TransformerModel']
|
fla/models/transformer_top/modeling_transformer.py
ADDED
|
@@ -0,0 +1,440 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
import warnings
|
| 7 |
+
from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import torch.nn.functional as F
|
| 12 |
+
import torch.utils.checkpoint
|
| 13 |
+
from dataclasses import dataclass
|
| 14 |
+
from transformers.generation import GenerationMixin
|
| 15 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
|
| 16 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 17 |
+
from transformers.utils import logging
|
| 18 |
+
from transformers.utils.deprecation import deprecate_kwarg
|
| 19 |
+
|
| 20 |
+
import triton
|
| 21 |
+
import triton.language as tl
|
| 22 |
+
|
| 23 |
+
from fla.layers.attn import Attention
|
| 24 |
+
from fla.models.transformer_top.configuration_transformer import TOPTransformerConfig
|
| 25 |
+
from fla.models.utils import Cache
|
| 26 |
+
from fla.modules import FusedCrossEntropyLoss, FusedLinearCrossEntropyLoss, FusedLinearListNetLoss
|
| 27 |
+
from fla.modules import GatedMLP as TransformerMLP
|
| 28 |
+
from fla.modules import RMSNorm
|
| 29 |
+
from fla.modules.seq_to_top import seq_to_top
|
| 30 |
+
|
| 31 |
+
if TYPE_CHECKING:
|
| 32 |
+
from transformers.processing_utils import Unpack
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
logger = logging.get_logger(__name__)
|
| 36 |
+
|
| 37 |
+
@dataclass
|
| 38 |
+
class TOPLMOutputWithPast(CausalLMOutputWithPast):
|
| 39 |
+
ntp_loss: Optional[torch.FloatTensor] = None
|
| 40 |
+
top_loss: Optional[torch.FloatTensor] = None
|
| 41 |
+
|
| 42 |
+
class TOPTransformerBlock(nn.Module):
|
| 43 |
+
|
| 44 |
+
def __init__(self, config: TOPTransformerConfig, layer_idx: int):
|
| 45 |
+
super().__init__()
|
| 46 |
+
|
| 47 |
+
self.config = config
|
| 48 |
+
self.layer_idx = layer_idx
|
| 49 |
+
|
| 50 |
+
self.attn_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 51 |
+
self.attn = Attention(
|
| 52 |
+
hidden_size=config.hidden_size,
|
| 53 |
+
num_heads=config.num_heads,
|
| 54 |
+
num_kv_heads=config.num_kv_heads,
|
| 55 |
+
qkv_bias=config.qkv_bias,
|
| 56 |
+
qk_norm=config.qk_norm,
|
| 57 |
+
window_size=config.window_size,
|
| 58 |
+
rope_theta=config.rope_theta,
|
| 59 |
+
max_position_embeddings=config.max_position_embeddings,
|
| 60 |
+
layer_idx=layer_idx
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
self.mlp_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 64 |
+
self.mlp = TransformerMLP(
|
| 65 |
+
hidden_size=config.hidden_size,
|
| 66 |
+
hidden_ratio=config.hidden_ratio,
|
| 67 |
+
intermediate_size=config.intermediate_size,
|
| 68 |
+
hidden_act=config.hidden_act,
|
| 69 |
+
fuse_swiglu=config.fuse_swiglu
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
def forward(
|
| 73 |
+
self,
|
| 74 |
+
hidden_states: torch.Tensor,
|
| 75 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 76 |
+
past_key_values: Optional[Tuple[torch.Tensor]] = None,
|
| 77 |
+
output_attentions: Optional[bool] = False,
|
| 78 |
+
use_cache: Optional[bool] = False,
|
| 79 |
+
**kwargs: Unpack[Any]
|
| 80 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 81 |
+
|
| 82 |
+
residual = hidden_states
|
| 83 |
+
hidden_states = self.attn_norm(hidden_states)
|
| 84 |
+
hidden_states, attentions, past_key_values = self.attn(
|
| 85 |
+
hidden_states=hidden_states,
|
| 86 |
+
attention_mask=attention_mask,
|
| 87 |
+
past_key_values=past_key_values,
|
| 88 |
+
use_cache=use_cache,
|
| 89 |
+
output_attentions=output_attentions,
|
| 90 |
+
**kwargs
|
| 91 |
+
)
|
| 92 |
+
if self.config.fuse_norm:
|
| 93 |
+
hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
|
| 94 |
+
else:
|
| 95 |
+
hidden_states = residual + hidden_states
|
| 96 |
+
residual = hidden_states
|
| 97 |
+
hidden_states = self.mlp_norm(hidden_states)
|
| 98 |
+
hidden_states = self.mlp(hidden_states, **kwargs)
|
| 99 |
+
hidden_states = residual + hidden_states
|
| 100 |
+
|
| 101 |
+
outputs = (hidden_states,)
|
| 102 |
+
|
| 103 |
+
if output_attentions:
|
| 104 |
+
outputs += (attentions,)
|
| 105 |
+
|
| 106 |
+
if use_cache:
|
| 107 |
+
outputs += (past_key_values,)
|
| 108 |
+
|
| 109 |
+
return outputs
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
class TOPTransformerPreTrainedModel(PreTrainedModel):
|
| 113 |
+
|
| 114 |
+
config_class = TOPTransformerConfig
|
| 115 |
+
base_model_prefix = 'model'
|
| 116 |
+
supports_gradient_checkpointing = True
|
| 117 |
+
_no_split_modules = ['TOPTransformerBlock']
|
| 118 |
+
_supports_cache_class = True
|
| 119 |
+
|
| 120 |
+
def __init__(self, *inputs, **kwargs):
|
| 121 |
+
super().__init__(*inputs, **kwargs)
|
| 122 |
+
|
| 123 |
+
def _init_weights(
|
| 124 |
+
self,
|
| 125 |
+
module: nn.Module,
|
| 126 |
+
rescale_prenorm_residual: bool = False,
|
| 127 |
+
num_residuals_per_layer: int = 2,
|
| 128 |
+
):
|
| 129 |
+
if isinstance(module, (nn.Linear, nn.Conv1d)):
|
| 130 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 131 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 132 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 133 |
+
if module.bias is not None:
|
| 134 |
+
nn.init.zeros_(module.bias)
|
| 135 |
+
elif isinstance(module, nn.Embedding):
|
| 136 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 137 |
+
elif hasattr(module, 'reset_parameters'):
|
| 138 |
+
module.reset_parameters()
|
| 139 |
+
|
| 140 |
+
if rescale_prenorm_residual:
|
| 141 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
| 142 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
| 143 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
| 144 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
| 145 |
+
#
|
| 146 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
| 147 |
+
p = None
|
| 148 |
+
if hasattr(module, 'o_proj'):
|
| 149 |
+
p = module.o_proj.weight
|
| 150 |
+
elif hasattr(module, 'down_proj'):
|
| 151 |
+
p = module.down_proj.weight
|
| 152 |
+
if p is not None:
|
| 153 |
+
# Special Scaled Initialization --> There are 2 Layer Norms per TOPTransformer Block
|
| 154 |
+
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
|
| 155 |
+
# We need to reinit p since this code could be called multiple times
|
| 156 |
+
# Having just p *= scale would repeatedly scale it down
|
| 157 |
+
nn.init.kaiming_uniform_(p, a=math.sqrt(5))
|
| 158 |
+
with torch.no_grad():
|
| 159 |
+
p /= math.sqrt(num_residuals_per_layer * self.config.num_hidden_layers)
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
class TOPTransformerModel(TOPTransformerPreTrainedModel):
|
| 163 |
+
|
| 164 |
+
def __init__(
|
| 165 |
+
self,
|
| 166 |
+
config: TOPTransformerConfig
|
| 167 |
+
) -> TOPTransformerModel:
|
| 168 |
+
super().__init__(config)
|
| 169 |
+
self.padding_idx = config.pad_token_id
|
| 170 |
+
self.vocab_size = config.vocab_size
|
| 171 |
+
|
| 172 |
+
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 173 |
+
self.layers = nn.ModuleList([TOPTransformerBlock(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
|
| 174 |
+
self.norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 175 |
+
|
| 176 |
+
self.gradient_checkpointing = False
|
| 177 |
+
|
| 178 |
+
self.post_init()
|
| 179 |
+
|
| 180 |
+
def get_input_embeddings(self):
|
| 181 |
+
return self.embeddings
|
| 182 |
+
|
| 183 |
+
def set_input_embeddings(self, value):
|
| 184 |
+
self.embeddings = value
|
| 185 |
+
|
| 186 |
+
def forward(
|
| 187 |
+
self,
|
| 188 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 189 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 190 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 191 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 192 |
+
use_cache: Optional[bool] = None,
|
| 193 |
+
output_attentions: Optional[bool] = None,
|
| 194 |
+
output_hidden_states: Optional[bool] = None,
|
| 195 |
+
return_dict: Optional[bool] = None,
|
| 196 |
+
**kwargs: Unpack[Any]
|
| 197 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 198 |
+
if output_attentions:
|
| 199 |
+
warnings.warn(
|
| 200 |
+
"`TOPTransformerModel` does not support output attention weights now, so `output_attentions` is set to `False`."
|
| 201 |
+
)
|
| 202 |
+
output_attentions = False
|
| 203 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 204 |
+
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 205 |
+
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
|
| 206 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 207 |
+
|
| 208 |
+
# retrieve input_ids and inputs_embeds
|
| 209 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 210 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 211 |
+
elif input_ids is None and inputs_embeds is None:
|
| 212 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 213 |
+
|
| 214 |
+
if use_cache and not isinstance(past_key_values, Cache):
|
| 215 |
+
past_key_values = Cache.from_legacy_cache(past_key_values)
|
| 216 |
+
|
| 217 |
+
if inputs_embeds is None:
|
| 218 |
+
inputs_embeds = self.embeddings(input_ids)
|
| 219 |
+
|
| 220 |
+
# embed positions
|
| 221 |
+
hidden_states = inputs_embeds
|
| 222 |
+
|
| 223 |
+
if self.gradient_checkpointing and self.training:
|
| 224 |
+
if use_cache:
|
| 225 |
+
logger.warning_once(
|
| 226 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
| 227 |
+
)
|
| 228 |
+
use_cache = False
|
| 229 |
+
|
| 230 |
+
all_hidden_states = () if output_hidden_states else None
|
| 231 |
+
all_attns = () if output_attentions else None
|
| 232 |
+
next_cache = None
|
| 233 |
+
|
| 234 |
+
for layer in self.layers:
|
| 235 |
+
if output_hidden_states:
|
| 236 |
+
all_hidden_states += (hidden_states,)
|
| 237 |
+
|
| 238 |
+
if self.gradient_checkpointing and self.training:
|
| 239 |
+
layer_outputs = self._gradient_checkpointing_func(
|
| 240 |
+
layer.__call__,
|
| 241 |
+
hidden_states,
|
| 242 |
+
attention_mask,
|
| 243 |
+
past_key_values,
|
| 244 |
+
output_attentions,
|
| 245 |
+
use_cache,
|
| 246 |
+
**kwargs
|
| 247 |
+
)
|
| 248 |
+
else:
|
| 249 |
+
layer_outputs = layer(
|
| 250 |
+
hidden_states,
|
| 251 |
+
attention_mask=attention_mask,
|
| 252 |
+
past_key_values=past_key_values,
|
| 253 |
+
output_attentions=output_attentions,
|
| 254 |
+
use_cache=use_cache,
|
| 255 |
+
**kwargs
|
| 256 |
+
)
|
| 257 |
+
|
| 258 |
+
hidden_states = layer_outputs[0]
|
| 259 |
+
|
| 260 |
+
if use_cache:
|
| 261 |
+
next_cache = layer_outputs[2 if output_attentions else 1]
|
| 262 |
+
|
| 263 |
+
if output_attentions:
|
| 264 |
+
all_attns += (layer_outputs[1],)
|
| 265 |
+
|
| 266 |
+
hidden_states = self.norm(hidden_states)
|
| 267 |
+
|
| 268 |
+
# add hidden states from the last decoder layer
|
| 269 |
+
if output_hidden_states:
|
| 270 |
+
all_hidden_states += (hidden_states,)
|
| 271 |
+
|
| 272 |
+
if not return_dict:
|
| 273 |
+
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_attns] if v is not None)
|
| 274 |
+
|
| 275 |
+
return BaseModelOutputWithPast(
|
| 276 |
+
last_hidden_state=hidden_states,
|
| 277 |
+
past_key_values=next_cache,
|
| 278 |
+
hidden_states=all_hidden_states,
|
| 279 |
+
attentions=all_attns
|
| 280 |
+
)
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
class TOPTransformerForCausalLM(TOPTransformerPreTrainedModel, GenerationMixin):
|
| 284 |
+
|
| 285 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 286 |
+
|
| 287 |
+
def __init__(self, config):
|
| 288 |
+
super().__init__(config)
|
| 289 |
+
self.model = TOPTransformerModel(config)
|
| 290 |
+
self.vocab_size = config.vocab_size
|
| 291 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 292 |
+
if config.use_top_loss:
|
| 293 |
+
self.top_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 294 |
+
self.top_criterion = FusedLinearListNetLoss()
|
| 295 |
+
self.top_window_size = config.top_window_size
|
| 296 |
+
self.criterion = None
|
| 297 |
+
self.pad_token_id = config.pad_token_id
|
| 298 |
+
|
| 299 |
+
# Initialize weights and apply final processing
|
| 300 |
+
self.post_init()
|
| 301 |
+
|
| 302 |
+
def get_input_embeddings(self):
|
| 303 |
+
return self.model.embeddings
|
| 304 |
+
|
| 305 |
+
def set_input_embeddings(self, value):
|
| 306 |
+
self.model.embeddings = value
|
| 307 |
+
|
| 308 |
+
def get_output_embeddings(self):
|
| 309 |
+
return self.lm_head
|
| 310 |
+
|
| 311 |
+
def set_output_embeddings(self, new_embeddings):
|
| 312 |
+
self.lm_head = new_embeddings
|
| 313 |
+
|
| 314 |
+
def set_decoder(self, decoder):
|
| 315 |
+
self.model = decoder
|
| 316 |
+
|
| 317 |
+
def get_decoder(self):
|
| 318 |
+
return self.model
|
| 319 |
+
|
| 320 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 321 |
+
def prepare_inputs_for_generation(
|
| 322 |
+
self,
|
| 323 |
+
input_ids: torch.LongTensor = None,
|
| 324 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 325 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 326 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 327 |
+
use_cache: bool = True,
|
| 328 |
+
logits_to_keep: Optional[int] = None,
|
| 329 |
+
**kwargs
|
| 330 |
+
):
|
| 331 |
+
# only last token for `inputs_ids` if the `past_key_values` is not empty.
|
| 332 |
+
if past_key_values is not None and len(past_key_values) > 0:
|
| 333 |
+
input_ids = input_ids[:, -1:]
|
| 334 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 335 |
+
if inputs_embeds is not None and len(past_key_values) == 0:
|
| 336 |
+
model_inputs = {'inputs_embeds': inputs_embeds}
|
| 337 |
+
else:
|
| 338 |
+
# The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
|
| 339 |
+
# recompiles graphs as the stride of the inputs is a guard.
|
| 340 |
+
# Ref: https://github.com/huggingface/transformers/pull/29114
|
| 341 |
+
# TODO: use `next_tokens` directly instead.
|
| 342 |
+
model_inputs = {'input_ids': input_ids.contiguous()}
|
| 343 |
+
|
| 344 |
+
if logits_to_keep is not None:
|
| 345 |
+
model_inputs['logits_to_keep'] = logits_to_keep
|
| 346 |
+
|
| 347 |
+
model_inputs.update({
|
| 348 |
+
'past_key_values': past_key_values,
|
| 349 |
+
'use_cache': use_cache,
|
| 350 |
+
'attention_mask': attention_mask,
|
| 351 |
+
})
|
| 352 |
+
return model_inputs
|
| 353 |
+
|
| 354 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 355 |
+
def forward(
|
| 356 |
+
self,
|
| 357 |
+
input_ids: torch.LongTensor = None,
|
| 358 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 359 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 360 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 361 |
+
labels: Optional[torch.LongTensor] = None,
|
| 362 |
+
use_cache: Optional[bool] = None,
|
| 363 |
+
output_attentions: Optional[bool] = None,
|
| 364 |
+
output_hidden_states: Optional[bool] = None,
|
| 365 |
+
return_dict: Optional[bool] = None,
|
| 366 |
+
logits_to_keep: Optional[int] = 0,
|
| 367 |
+
**kwargs: Unpack[Any]
|
| 368 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 369 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 370 |
+
output_hidden_states = (
|
| 371 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 372 |
+
)
|
| 373 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 374 |
+
|
| 375 |
+
outputs = self.model(
|
| 376 |
+
input_ids=input_ids,
|
| 377 |
+
attention_mask=attention_mask,
|
| 378 |
+
past_key_values=past_key_values,
|
| 379 |
+
inputs_embeds=inputs_embeds,
|
| 380 |
+
use_cache=use_cache,
|
| 381 |
+
output_attentions=output_attentions,
|
| 382 |
+
output_hidden_states=output_hidden_states,
|
| 383 |
+
return_dict=return_dict,
|
| 384 |
+
**kwargs
|
| 385 |
+
)
|
| 386 |
+
|
| 387 |
+
hidden_states = outputs[0]
|
| 388 |
+
fuse_linear_and_cross_entropy = self.config.fuse_cross_entropy and self.training
|
| 389 |
+
logits = None if fuse_linear_and_cross_entropy else self.lm_head(hidden_states[:, -logits_to_keep:])
|
| 390 |
+
|
| 391 |
+
loss = None
|
| 392 |
+
ntp_loss = None
|
| 393 |
+
top_loss = None
|
| 394 |
+
if labels is not None:
|
| 395 |
+
if getattr(self, 'criterion', None) is None:
|
| 396 |
+
if fuse_linear_and_cross_entropy:
|
| 397 |
+
criterion = FusedLinearCrossEntropyLoss()
|
| 398 |
+
elif self.config.fuse_cross_entropy:
|
| 399 |
+
criterion = FusedCrossEntropyLoss(inplace_backward=True)
|
| 400 |
+
else:
|
| 401 |
+
criterion = nn.CrossEntropyLoss()
|
| 402 |
+
else:
|
| 403 |
+
criterion = self.criterion
|
| 404 |
+
# Enable model parallelism
|
| 405 |
+
labels = labels.to(hidden_states.device)
|
| 406 |
+
labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], criterion.ignore_index)), 1)
|
| 407 |
+
ntp_labels = labels[..., :hidden_states.shape[1]].contiguous()
|
| 408 |
+
if fuse_linear_and_cross_entropy:
|
| 409 |
+
ntp_loss = criterion(hidden_states, ntp_labels, self.lm_head.weight, self.lm_head.bias)
|
| 410 |
+
else:
|
| 411 |
+
ntp_loss = criterion(logits.view(ntp_labels.numel(), -1), ntp_labels.reshape(-1))
|
| 412 |
+
|
| 413 |
+
if self.config.use_top_loss:
|
| 414 |
+
top_labels = seq_to_top(labels, ctx_len=input_ids.shape[1], vocab_size=self.vocab_size, window_size=self.top_window_size, pad_token_id=self.pad_token_id).contiguous()
|
| 415 |
+
top_loss = self.top_criterion(hidden_states, top_labels, self.top_head.weight, self.top_head.bias)
|
| 416 |
+
if self.config.top_loss_ratio == 0.5:
|
| 417 |
+
loss = ntp_loss + top_loss
|
| 418 |
+
else:
|
| 419 |
+
ratio = 2 * self.config.top_loss_ratio
|
| 420 |
+
loss = (2 - ratio) * ntp_loss + ratio * top_loss
|
| 421 |
+
else:
|
| 422 |
+
loss = ntp_loss
|
| 423 |
+
|
| 424 |
+
if 'output_top_logits' in kwargs and kwargs['output_top_logits']:
|
| 425 |
+
top_logits = self.top_head(hidden_states[:, -logits_to_keep:])
|
| 426 |
+
logits = (logits, top_logits)
|
| 427 |
+
|
| 428 |
+
if not return_dict:
|
| 429 |
+
output = (logits,) + outputs[1:]
|
| 430 |
+
return (loss,) + output if loss is not None else output
|
| 431 |
+
|
| 432 |
+
return TOPLMOutputWithPast(
|
| 433 |
+
loss=loss,
|
| 434 |
+
ntp_loss=ntp_loss,
|
| 435 |
+
top_loss=top_loss,
|
| 436 |
+
logits=logits,
|
| 437 |
+
past_key_values=outputs.past_key_values,
|
| 438 |
+
hidden_states=outputs.hidden_states,
|
| 439 |
+
attentions=outputs.attentions,
|
| 440 |
+
)
|
fla/ops/based/__pycache__/parallel.cpython-312.pyc
ADDED
|
Binary file (22.6 kB). View file
|
|
|
fla/ops/common/__pycache__/chunk_delta_h.cpython-312.pyc
ADDED
|
Binary file (24 kB). View file
|
|
|
fla/ops/common/__pycache__/chunk_h.cpython-312.pyc
ADDED
|
Binary file (24.9 kB). View file
|
|
|
fla/ops/common/__pycache__/fused_recurrent.cpython-312.pyc
ADDED
|
Binary file (32.4 kB). View file
|
|
|
fla/ops/common/__pycache__/utils.cpython-312.pyc
ADDED
|
Binary file (4.45 kB). View file
|
|
|
fla/ops/gated_delta_rule/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (347 Bytes). View file
|
|
|
fla/ops/gated_delta_rule/__pycache__/chunk.cpython-312.pyc
ADDED
|
Binary file (14.4 kB). View file
|
|
|
fla/ops/gated_delta_rule/__pycache__/fused_recurrent.cpython-312.pyc
ADDED
|
Binary file (15.1 kB). View file
|
|
|
fla/ops/generalized_delta_rule/dplr/__pycache__/chunk_A_bwd.cpython-312.pyc
ADDED
|
Binary file (30.6 kB). View file
|
|
|
fla/ops/generalized_delta_rule/dplr/__pycache__/chunk_h_bwd.cpython-312.pyc
ADDED
|
Binary file (12.2 kB). View file
|
|
|