Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- fla/models/__pycache__/__init__.cpython-312.pyc +0 -0
- fla/models/__pycache__/utils.cpython-312.pyc +0 -0
- fla/models/abc/configuration_abc.py +91 -0
- fla/models/abc/modeling_abc.py +418 -0
- fla/models/bitnet/__init__.py +13 -0
- fla/models/forgetting_transformer/__init__.py +16 -0
- fla/models/forgetting_transformer/configuration_forgetting_transformer.py +68 -0
- fla/models/gated_deltanet/__init__.py +12 -0
- fla/models/gated_deltanet/configuration_gated_deltanet.py +83 -0
- fla/models/gated_deltaproduct/__init__.py +14 -0
- fla/models/gla/__init__.py +13 -0
- fla/models/gla/configuration_gla.py +95 -0
- fla/models/gla/modeling_gla.py +417 -0
- fla/models/hgrn2/__init__.py +13 -0
- fla/models/linear_attn/__init__.py +12 -0
- fla/models/linear_attn/configuration_linear_attn.py +91 -0
- fla/models/linear_attn/modeling_linear_attn.py +406 -0
- fla/models/mamba/__init__.py +13 -0
- fla/models/mamba/modeling_mamba.py +843 -0
- fla/models/mamba2/configuration_mamba2.py +170 -0
- fla/models/mamba2/modeling_mamba2.py +1093 -0
- fla/models/nsa/configuration_nsa.py +75 -0
- fla/models/rwkv6/configuration_rwkv6.py +82 -0
- fla/models/rwkv6/modeling_rwkv6.py +480 -0
- fla/models/rwkv7/configuration_rwkv7.py +105 -0
- fla/models/rwkv7/modeling_rwkv7.py +505 -0
- fla/models/samba/__init__.py +13 -0
- fla/models/samba/configuration_samba.py +92 -0
- fla/models/samba/modeling_samba.py +413 -0
- fla/models/transformer/modeling_transformer.py +406 -0
- fla/models/transformer_mtp/configuration_transformer.py +76 -0
- fla/ops/attn/__pycache__/parallel.cpython-312.pyc +0 -0
- fla/ops/common/__pycache__/__init__.cpython-312.pyc +0 -0
- fla/ops/common/__pycache__/chunk_h.cpython-312.pyc +0 -0
- fla/ops/common/__pycache__/chunk_o.cpython-312.pyc +0 -0
- fla/ops/common/__pycache__/chunk_scaled_dot_kkt.cpython-312.pyc +0 -0
- fla/ops/common/__pycache__/fused_recurrent.cpython-312.pyc +0 -0
- fla/ops/common/__pycache__/utils.cpython-312.pyc +0 -0
- fla/ops/delta_rule/__pycache__/__init__.cpython-312.pyc +0 -0
- fla/ops/delta_rule/__pycache__/fused_chunk.cpython-312.pyc +0 -0
- fla/ops/delta_rule/__pycache__/fused_recurrent.cpython-312.pyc +0 -0
- fla/ops/delta_rule/__pycache__/wy_fast.cpython-312.pyc +0 -0
- fla/ops/forgetting_attn/__pycache__/__init__.cpython-312.pyc +0 -0
- fla/ops/forgetting_attn/__pycache__/parallel.cpython-312.pyc +0 -0
- fla/ops/gated_delta_rule/__pycache__/fused_recurrent.cpython-312.pyc +0 -0
- fla/ops/generalized_delta_rule/dplr/__pycache__/__init__.cpython-312.pyc +0 -0
- fla/ops/generalized_delta_rule/dplr/__pycache__/chunk_A_fwd.cpython-312.pyc +0 -0
- fla/ops/generalized_delta_rule/dplr/__pycache__/chunk_h_fwd.cpython-312.pyc +0 -0
- fla/ops/generalized_delta_rule/dplr/__pycache__/chunk_o_bwd.cpython-312.pyc +0 -0
- fla/ops/generalized_delta_rule/dplr/__pycache__/chunk_o_fwd.cpython-312.pyc +0 -0
fla/models/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (3.07 kB). View file
|
|
|
fla/models/__pycache__/utils.cpython-312.pyc
ADDED
|
Binary file (6.65 kB). View file
|
|
|
fla/models/abc/configuration_abc.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from typing import Dict, Optional
|
| 4 |
+
|
| 5 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class ABCConfig(PretrainedConfig):
|
| 9 |
+
|
| 10 |
+
model_type = 'abc'
|
| 11 |
+
keys_to_ignore_at_inference = ['past_key_values']
|
| 12 |
+
|
| 13 |
+
def __init__(
|
| 14 |
+
self,
|
| 15 |
+
hidden_size: int = 2048,
|
| 16 |
+
gate_low_rank_dim: int = 16,
|
| 17 |
+
clamp_min: float = -32,
|
| 18 |
+
clamp_max: float = 32,
|
| 19 |
+
hidden_ratio: Optional[int] = 4,
|
| 20 |
+
intermediate_size: Optional[int] = None,
|
| 21 |
+
num_hidden_layers: int = 24,
|
| 22 |
+
num_heads: int = 4,
|
| 23 |
+
num_slots: Optional[int] = 64,
|
| 24 |
+
use_short_conv: bool = False,
|
| 25 |
+
conv_size: int = 4,
|
| 26 |
+
exapnd_k: float = 0.5,
|
| 27 |
+
exapnd_v: float = 1,
|
| 28 |
+
hidden_act: str = "swish",
|
| 29 |
+
max_position_embeddings: int = 2048,
|
| 30 |
+
elementwise_affine: Optional[bool] = True,
|
| 31 |
+
norm_eps: float = 1e-6,
|
| 32 |
+
use_rope: bool = True,
|
| 33 |
+
attn: Optional[Dict] = None,
|
| 34 |
+
use_cache: bool = True,
|
| 35 |
+
pad_token_id: int = None,
|
| 36 |
+
bos_token_id: int = 1,
|
| 37 |
+
eos_token_id: int = 2,
|
| 38 |
+
tie_word_embeddings: bool = False,
|
| 39 |
+
initializer_range: float = 0.006,
|
| 40 |
+
fuse_norm: bool = True,
|
| 41 |
+
fuse_swiglu: bool = True,
|
| 42 |
+
fuse_cross_entropy: bool = True,
|
| 43 |
+
vocab_size: int = 32000,
|
| 44 |
+
**kwargs
|
| 45 |
+
):
|
| 46 |
+
self.hidden_size = hidden_size
|
| 47 |
+
self.gate_low_rank_dim = gate_low_rank_dim
|
| 48 |
+
self.clamp_min = clamp_min
|
| 49 |
+
self.clamp_max = clamp_max
|
| 50 |
+
self.hidden_ratio = hidden_ratio
|
| 51 |
+
self.intermediate_size = intermediate_size
|
| 52 |
+
self.num_hidden_layers = num_hidden_layers
|
| 53 |
+
self.num_heads = num_heads
|
| 54 |
+
self.num_slots = num_slots
|
| 55 |
+
self.use_short_conv = use_short_conv
|
| 56 |
+
self.conv_size = conv_size
|
| 57 |
+
self.expand_k = exapnd_k
|
| 58 |
+
self.expand_v = exapnd_v
|
| 59 |
+
self.hidden_act = hidden_act
|
| 60 |
+
self.max_position_embeddings = max_position_embeddings
|
| 61 |
+
self.elementwise_affine = elementwise_affine
|
| 62 |
+
self.norm_eps = norm_eps
|
| 63 |
+
self.use_rope = use_rope
|
| 64 |
+
self.attn = attn
|
| 65 |
+
self.use_cache = use_cache
|
| 66 |
+
self.initializer_range = initializer_range
|
| 67 |
+
|
| 68 |
+
self.fuse_norm = fuse_norm
|
| 69 |
+
self.fuse_swiglu = fuse_swiglu
|
| 70 |
+
self.fuse_cross_entropy = fuse_cross_entropy
|
| 71 |
+
self.vocab_size = vocab_size
|
| 72 |
+
|
| 73 |
+
if attn is not None:
|
| 74 |
+
if not isinstance(attn, Dict):
|
| 75 |
+
raise ValueError("attn must be a dictionary")
|
| 76 |
+
if 'layers' not in attn:
|
| 77 |
+
raise ValueError("Layer indices must be provided to initialize hybrid attention layers")
|
| 78 |
+
if 'num_heads' not in attn:
|
| 79 |
+
raise ValueError("Number of heads must be provided to initialize hybrid attention layers")
|
| 80 |
+
attn['num_kv_heads'] = attn.get('num_kv_heads', attn['num_heads'])
|
| 81 |
+
attn['qkv_bias'] = attn.get('qkv_bias', False)
|
| 82 |
+
attn['window_size'] = attn.get('window_size', None)
|
| 83 |
+
attn['rope_theta'] = attn.get('rope_theta', 10000.)
|
| 84 |
+
|
| 85 |
+
super().__init__(
|
| 86 |
+
pad_token_id=pad_token_id,
|
| 87 |
+
bos_token_id=bos_token_id,
|
| 88 |
+
eos_token_id=eos_token_id,
|
| 89 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 90 |
+
**kwargs,
|
| 91 |
+
)
|
fla/models/abc/modeling_abc.py
ADDED
|
@@ -0,0 +1,418 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
import warnings
|
| 7 |
+
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import torch.utils.checkpoint
|
| 12 |
+
from transformers.generation import GenerationMixin
|
| 13 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
|
| 14 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 15 |
+
from transformers.utils import logging
|
| 16 |
+
from transformers.utils.deprecation import deprecate_kwarg
|
| 17 |
+
|
| 18 |
+
from fla.layers.abc import ABCAttention
|
| 19 |
+
from fla.layers.attn import Attention
|
| 20 |
+
from fla.models.abc.configuration_abc import ABCConfig
|
| 21 |
+
from fla.models.utils import Cache
|
| 22 |
+
from fla.modules import FusedCrossEntropyLoss, FusedLinearCrossEntropyLoss
|
| 23 |
+
from fla.modules import GatedMLP as ABCMLP
|
| 24 |
+
from fla.modules import RMSNorm
|
| 25 |
+
|
| 26 |
+
logger = logging.get_logger(__name__)
|
| 27 |
+
|
| 28 |
+
if TYPE_CHECKING:
|
| 29 |
+
from transformers.processing_utils import Unpack
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class ABCBlock(nn.Module):
|
| 33 |
+
def __init__(self, config: ABCConfig, layer_idx: int):
|
| 34 |
+
super().__init__()
|
| 35 |
+
|
| 36 |
+
self.config = config
|
| 37 |
+
self.layer_idx = layer_idx
|
| 38 |
+
|
| 39 |
+
self.attn_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 40 |
+
if config.attn is not None and layer_idx in config.attn['layers']:
|
| 41 |
+
self.attn = Attention(
|
| 42 |
+
hidden_size=config.hidden_size,
|
| 43 |
+
num_heads=config.attn['num_heads'],
|
| 44 |
+
num_kv_heads=config.attn['num_kv_heads'],
|
| 45 |
+
qkv_bias=config.attn['qkv_bias'],
|
| 46 |
+
window_size=config.attn['window_size'],
|
| 47 |
+
rope_theta=config.attn['rope_theta'],
|
| 48 |
+
max_position_embeddings=config.max_position_embeddings,
|
| 49 |
+
layer_idx=layer_idx
|
| 50 |
+
)
|
| 51 |
+
else:
|
| 52 |
+
self.attn = ABCAttention(
|
| 53 |
+
hidden_size=config.hidden_size,
|
| 54 |
+
expand_k=config.expand_k,
|
| 55 |
+
expand_v=config.expand_v,
|
| 56 |
+
num_heads=config.num_heads,
|
| 57 |
+
num_slots=config.num_slots,
|
| 58 |
+
use_short_conv=config.use_short_conv,
|
| 59 |
+
conv_size=config.conv_size,
|
| 60 |
+
gate_fn=config.hidden_act,
|
| 61 |
+
elementwise_affine=config.elementwise_affine,
|
| 62 |
+
norm_eps=config.norm_eps,
|
| 63 |
+
use_rope=config.use_rope,
|
| 64 |
+
clamp_min=config.clamp_min,
|
| 65 |
+
clamp_max=config.clamp_max,
|
| 66 |
+
fuse_norm=config.fuse_norm,
|
| 67 |
+
layer_idx=layer_idx
|
| 68 |
+
)
|
| 69 |
+
self.mlp_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 70 |
+
self.mlp = ABCMLP(
|
| 71 |
+
hidden_size=config.hidden_size,
|
| 72 |
+
hidden_ratio=config.hidden_ratio,
|
| 73 |
+
intermediate_size=config.intermediate_size,
|
| 74 |
+
hidden_act=config.hidden_act,
|
| 75 |
+
fuse_swiglu=config.fuse_swiglu
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
def forward(
|
| 79 |
+
self,
|
| 80 |
+
hidden_states: torch.Tensor,
|
| 81 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 82 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 83 |
+
use_cache: Optional[bool] = False,
|
| 84 |
+
output_attentions: Optional[bool] = False,
|
| 85 |
+
**kwargs: Unpack[Dict]
|
| 86 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 87 |
+
|
| 88 |
+
residual = hidden_states
|
| 89 |
+
|
| 90 |
+
hidden_states = self.attn_norm(hidden_states)
|
| 91 |
+
hidden_states, attentions, past_key_values = self.attn(
|
| 92 |
+
hidden_states=hidden_states,
|
| 93 |
+
attention_mask=attention_mask,
|
| 94 |
+
past_key_values=past_key_values,
|
| 95 |
+
use_cache=use_cache,
|
| 96 |
+
output_attentions=output_attentions,
|
| 97 |
+
**kwargs
|
| 98 |
+
)
|
| 99 |
+
if self.config.fuse_norm:
|
| 100 |
+
hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
|
| 101 |
+
else:
|
| 102 |
+
hidden_states = residual + hidden_states
|
| 103 |
+
residual = hidden_states
|
| 104 |
+
hidden_states = self.mlp_norm(hidden_states)
|
| 105 |
+
hidden_states = self.mlp(hidden_states)
|
| 106 |
+
hidden_states = residual + hidden_states
|
| 107 |
+
|
| 108 |
+
outputs = (hidden_states, attentions, past_key_values)
|
| 109 |
+
|
| 110 |
+
return outputs
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
class ABCPreTrainedModel(PreTrainedModel):
|
| 114 |
+
|
| 115 |
+
config_class = ABCConfig
|
| 116 |
+
base_model_prefix = 'model'
|
| 117 |
+
supports_gradient_checkpointing = True
|
| 118 |
+
_no_split_modules = ['ABCBlock']
|
| 119 |
+
_supports_cache_class = True
|
| 120 |
+
|
| 121 |
+
def __init__(self, *inputs, **kwargs):
|
| 122 |
+
super().__init__(*inputs, **kwargs)
|
| 123 |
+
|
| 124 |
+
def _init_weights(
|
| 125 |
+
self,
|
| 126 |
+
module: nn.Module,
|
| 127 |
+
prenorm_residual_strategy: Optional[str] = 'rescale',
|
| 128 |
+
num_residuals_per_layer: int = 2,
|
| 129 |
+
):
|
| 130 |
+
if isinstance(module, (nn.Linear, nn.Conv1d)):
|
| 131 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 132 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 133 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 134 |
+
if module.bias is not None:
|
| 135 |
+
nn.init.zeros_(module.bias)
|
| 136 |
+
elif isinstance(module, nn.Embedding):
|
| 137 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 138 |
+
elif hasattr(module, 'reset_parameters'):
|
| 139 |
+
module.reset_parameters()
|
| 140 |
+
|
| 141 |
+
if prenorm_residual_strategy is not None:
|
| 142 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
| 143 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
| 144 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
| 145 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
| 146 |
+
#
|
| 147 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
| 148 |
+
p = None
|
| 149 |
+
if hasattr(module, 'o_proj'):
|
| 150 |
+
p = module.o_proj.weight
|
| 151 |
+
elif hasattr(module, 'down_proj'):
|
| 152 |
+
p = module.down_proj.weight
|
| 153 |
+
if p is not None:
|
| 154 |
+
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
|
| 155 |
+
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
|
| 156 |
+
# We need to reinit p since this code could be called multiple times
|
| 157 |
+
# Having just p *= scale would repeatedly scale it down
|
| 158 |
+
if prenorm_residual_strategy == 'rescale':
|
| 159 |
+
nn.init.kaiming_uniform_(p, a=math.sqrt(5))
|
| 160 |
+
with torch.no_grad():
|
| 161 |
+
p /= math.sqrt(num_residuals_per_layer * self.config.num_hidden_layers)
|
| 162 |
+
elif prenorm_residual_strategy == 'zero':
|
| 163 |
+
nn.init.zeros_(p)
|
| 164 |
+
else:
|
| 165 |
+
raise ValueError(f"Invalid prenorm_residual_strategy: {prenorm_residual_strategy}")
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
class ABCModel(ABCPreTrainedModel):
|
| 169 |
+
|
| 170 |
+
def __init__(self, config: ABCConfig):
|
| 171 |
+
super().__init__(config)
|
| 172 |
+
self.padding_idx = config.pad_token_id
|
| 173 |
+
self.vocab_size = config.vocab_size
|
| 174 |
+
|
| 175 |
+
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 176 |
+
self.layers = nn.ModuleList([ABCBlock(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
|
| 177 |
+
self.norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 178 |
+
|
| 179 |
+
self.gradient_checkpointing = False
|
| 180 |
+
|
| 181 |
+
self.post_init()
|
| 182 |
+
|
| 183 |
+
def get_input_embeddings(self):
|
| 184 |
+
return self.embeddings
|
| 185 |
+
|
| 186 |
+
def set_input_embeddings(self, value):
|
| 187 |
+
self.embeddings = value
|
| 188 |
+
|
| 189 |
+
def forward(
|
| 190 |
+
self,
|
| 191 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 192 |
+
attention_mask: Optional[torch.Tensor] = None, # noqa
|
| 193 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 194 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 195 |
+
use_cache: Optional[bool] = None,
|
| 196 |
+
output_attentions: Optional[bool] = None,
|
| 197 |
+
output_hidden_states: Optional[bool] = None,
|
| 198 |
+
return_dict: Optional[bool] = None,
|
| 199 |
+
**kwargs: Unpack[Dict]
|
| 200 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
| 201 |
+
if output_attentions:
|
| 202 |
+
warnings.warn("`ABCModel` does not `output_attentions` now, setting it to `False`.")
|
| 203 |
+
output_attentions = False
|
| 204 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 205 |
+
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 206 |
+
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
|
| 207 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 208 |
+
|
| 209 |
+
# retrieve input_ids and inputs_embeds
|
| 210 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 211 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 212 |
+
if input_ids is None and inputs_embeds is None:
|
| 213 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 214 |
+
|
| 215 |
+
if inputs_embeds is None:
|
| 216 |
+
inputs_embeds = self.embeddings(input_ids)
|
| 217 |
+
hidden_states = inputs_embeds
|
| 218 |
+
|
| 219 |
+
if use_cache and not isinstance(past_key_values, Cache):
|
| 220 |
+
past_key_values = Cache.from_legacy_cache(past_key_values)
|
| 221 |
+
|
| 222 |
+
if self.gradient_checkpointing and self.training and use_cache:
|
| 223 |
+
logger.warning_once("`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...")
|
| 224 |
+
use_cache = False
|
| 225 |
+
|
| 226 |
+
all_hidden_states = () if output_hidden_states else None
|
| 227 |
+
all_attns = () if output_attentions else None
|
| 228 |
+
for layer in self.layers:
|
| 229 |
+
if output_hidden_states:
|
| 230 |
+
all_hidden_states += (hidden_states,)
|
| 231 |
+
|
| 232 |
+
if self.gradient_checkpointing and self.training:
|
| 233 |
+
hidden_states, attentions, past_key_values = self._gradient_checkpointing_func(
|
| 234 |
+
layer.__call__,
|
| 235 |
+
hidden_states,
|
| 236 |
+
attention_mask,
|
| 237 |
+
past_key_values,
|
| 238 |
+
use_cache,
|
| 239 |
+
output_attentions,
|
| 240 |
+
**kwargs
|
| 241 |
+
)
|
| 242 |
+
else:
|
| 243 |
+
hidden_states, attentions, past_key_values = layer(
|
| 244 |
+
hidden_states,
|
| 245 |
+
attention_mask,
|
| 246 |
+
past_key_values=past_key_values,
|
| 247 |
+
use_cache=use_cache,
|
| 248 |
+
output_attentions=output_attentions,
|
| 249 |
+
**kwargs
|
| 250 |
+
)
|
| 251 |
+
|
| 252 |
+
if output_attentions:
|
| 253 |
+
all_attns += (attentions,)
|
| 254 |
+
|
| 255 |
+
hidden_states = self.norm(hidden_states)
|
| 256 |
+
|
| 257 |
+
# add hidden states from the last decoder layer
|
| 258 |
+
if output_hidden_states:
|
| 259 |
+
all_hidden_states += (hidden_states,)
|
| 260 |
+
|
| 261 |
+
if not return_dict:
|
| 262 |
+
return tuple(i for i in [hidden_states, past_key_values, all_hidden_states, all_attns] if i is not None)
|
| 263 |
+
return BaseModelOutputWithPast(
|
| 264 |
+
last_hidden_state=hidden_states,
|
| 265 |
+
past_key_values=past_key_values,
|
| 266 |
+
hidden_states=all_hidden_states,
|
| 267 |
+
attentions=all_attns
|
| 268 |
+
)
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
class ABCForCausalLM(ABCPreTrainedModel, GenerationMixin):
|
| 272 |
+
|
| 273 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 274 |
+
|
| 275 |
+
def __init__(self, config):
|
| 276 |
+
super().__init__(config)
|
| 277 |
+
self.model = ABCModel(config)
|
| 278 |
+
self.vocab_size = config.vocab_size
|
| 279 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 280 |
+
self.criterion = None
|
| 281 |
+
|
| 282 |
+
# Initialize weights and apply final processing
|
| 283 |
+
self.post_init()
|
| 284 |
+
|
| 285 |
+
def get_input_embeddings(self):
|
| 286 |
+
return self.model.embeddings
|
| 287 |
+
|
| 288 |
+
def set_input_embeddings(self, value):
|
| 289 |
+
self.model.embeddings = value
|
| 290 |
+
|
| 291 |
+
def get_output_embeddings(self):
|
| 292 |
+
return self.lm_head
|
| 293 |
+
|
| 294 |
+
def set_output_embeddings(self, new_embeddings):
|
| 295 |
+
self.lm_head = new_embeddings
|
| 296 |
+
|
| 297 |
+
def set_decoder(self, decoder):
|
| 298 |
+
self.model = decoder
|
| 299 |
+
|
| 300 |
+
def get_decoder(self):
|
| 301 |
+
return self.model
|
| 302 |
+
|
| 303 |
+
def generate(self, *args, **kwargs):
|
| 304 |
+
try:
|
| 305 |
+
return super().generate(*args, **kwargs)
|
| 306 |
+
except AttributeError as exception:
|
| 307 |
+
if 'past_key_values' in str(exception):
|
| 308 |
+
raise AttributeError(
|
| 309 |
+
f"You tried to call `generate` with a decoding strategy that manipulates `past_key_values`, "
|
| 310 |
+
f"which is not supported for {self.__class__.__name__}. "
|
| 311 |
+
f"Try another generation strategy instead. "
|
| 312 |
+
f"For the available generation strategies, check this doc: "
|
| 313 |
+
f"https://huggingface.co/docs/transformers/en/generation_strategies#decoding-strategies"
|
| 314 |
+
)
|
| 315 |
+
else:
|
| 316 |
+
raise exception
|
| 317 |
+
|
| 318 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 319 |
+
def prepare_inputs_for_generation(
|
| 320 |
+
self,
|
| 321 |
+
input_ids: torch.LongTensor = None,
|
| 322 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 323 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 324 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 325 |
+
use_cache: bool = True,
|
| 326 |
+
logits_to_keep: Optional[int] = None,
|
| 327 |
+
**kwargs
|
| 328 |
+
):
|
| 329 |
+
# only last token for `inputs_ids` if the `past_key_values` is not empty.
|
| 330 |
+
if past_key_values is not None and len(past_key_values) > 0:
|
| 331 |
+
input_ids = input_ids[:, -1:]
|
| 332 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 333 |
+
if inputs_embeds is not None and len(past_key_values) == 0:
|
| 334 |
+
model_inputs = {'inputs_embeds': inputs_embeds}
|
| 335 |
+
else:
|
| 336 |
+
# The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
|
| 337 |
+
# recompiles graphs as the stride of the inputs is a guard.
|
| 338 |
+
# Ref: https://github.com/huggingface/transformers/pull/29114
|
| 339 |
+
# TODO: use `next_tokens` directly instead.
|
| 340 |
+
model_inputs = {'input_ids': input_ids.contiguous()}
|
| 341 |
+
|
| 342 |
+
if logits_to_keep is not None:
|
| 343 |
+
model_inputs['logits_to_keep'] = logits_to_keep
|
| 344 |
+
|
| 345 |
+
model_inputs.update({
|
| 346 |
+
'past_key_values': past_key_values,
|
| 347 |
+
'use_cache': use_cache,
|
| 348 |
+
'attention_mask': attention_mask,
|
| 349 |
+
})
|
| 350 |
+
return model_inputs
|
| 351 |
+
|
| 352 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 353 |
+
def forward(
|
| 354 |
+
self,
|
| 355 |
+
input_ids: torch.LongTensor = None,
|
| 356 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 357 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 358 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 359 |
+
labels: Optional[torch.LongTensor] = None,
|
| 360 |
+
use_cache: Optional[bool] = None,
|
| 361 |
+
output_attentions: Optional[bool] = None,
|
| 362 |
+
output_hidden_states: Optional[bool] = None,
|
| 363 |
+
return_dict: Optional[bool] = None,
|
| 364 |
+
logits_to_keep: Optional[int] = 0,
|
| 365 |
+
**kwargs: Unpack[Dict]
|
| 366 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 367 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 368 |
+
output_hidden_states = (
|
| 369 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 370 |
+
)
|
| 371 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 372 |
+
|
| 373 |
+
outputs = self.model(
|
| 374 |
+
input_ids=input_ids,
|
| 375 |
+
attention_mask=attention_mask,
|
| 376 |
+
inputs_embeds=inputs_embeds,
|
| 377 |
+
past_key_values=past_key_values,
|
| 378 |
+
use_cache=use_cache,
|
| 379 |
+
output_attentions=output_attentions,
|
| 380 |
+
output_hidden_states=output_hidden_states,
|
| 381 |
+
return_dict=return_dict,
|
| 382 |
+
**kwargs
|
| 383 |
+
)
|
| 384 |
+
|
| 385 |
+
hidden_states = outputs[0]
|
| 386 |
+
fuse_linear_and_cross_entropy = self.config.fuse_cross_entropy and self.training
|
| 387 |
+
|
| 388 |
+
loss, logits = None, None
|
| 389 |
+
if not fuse_linear_and_cross_entropy or labels is None:
|
| 390 |
+
logits = self.lm_head(hidden_states if logits_to_keep is None else hidden_states[:, -logits_to_keep:])
|
| 391 |
+
if labels is not None:
|
| 392 |
+
if getattr(self, 'criterion', None) is None:
|
| 393 |
+
if fuse_linear_and_cross_entropy:
|
| 394 |
+
criterion = FusedLinearCrossEntropyLoss()
|
| 395 |
+
elif self.config.fuse_cross_entropy:
|
| 396 |
+
criterion = FusedCrossEntropyLoss(inplace_backward=True)
|
| 397 |
+
else:
|
| 398 |
+
criterion = nn.CrossEntropyLoss()
|
| 399 |
+
else:
|
| 400 |
+
criterion = self.criterion
|
| 401 |
+
labels = labels.to(hidden_states.device)
|
| 402 |
+
labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], criterion.ignore_index)), 1)
|
| 403 |
+
if fuse_linear_and_cross_entropy:
|
| 404 |
+
loss = criterion(hidden_states, labels, self.lm_head.weight, self.lm_head.bias)
|
| 405 |
+
else:
|
| 406 |
+
loss = criterion(logits.view(labels.numel(), -1), labels.view(-1))
|
| 407 |
+
|
| 408 |
+
if not return_dict:
|
| 409 |
+
output = (logits,) + outputs[1:]
|
| 410 |
+
return (loss,) + output if loss is not None else output
|
| 411 |
+
|
| 412 |
+
return CausalLMOutputWithPast(
|
| 413 |
+
loss=loss,
|
| 414 |
+
logits=logits,
|
| 415 |
+
past_key_values=outputs.past_key_values,
|
| 416 |
+
hidden_states=outputs.hidden_states,
|
| 417 |
+
attentions=outputs.attentions,
|
| 418 |
+
)
|
fla/models/bitnet/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
|
| 4 |
+
|
| 5 |
+
from fla.models.bitnet.configuration_bitnet import BitNetConfig
|
| 6 |
+
from fla.models.bitnet.modeling_bitnet import BitNetForCausalLM, BitNetModel
|
| 7 |
+
|
| 8 |
+
AutoConfig.register(BitNetConfig.model_type, BitNetConfig)
|
| 9 |
+
AutoModel.register(BitNetConfig, BitNetModel)
|
| 10 |
+
AutoModelForCausalLM.register(BitNetConfig, BitNetForCausalLM)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
__all__ = ['BitNetConfig', 'BitNetForCausalLM', 'BitNetModel']
|
fla/models/forgetting_transformer/__init__.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
|
| 4 |
+
|
| 5 |
+
from fla.models.forgetting_transformer.configuration_forgetting_transformer import ForgettingTransformerConfig
|
| 6 |
+
from fla.models.forgetting_transformer.modeling_forgetting_transformer import (
|
| 7 |
+
ForgettingTransformerForCausalLM,
|
| 8 |
+
ForgettingTransformerModel
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
AutoConfig.register(ForgettingTransformerConfig.model_type, ForgettingTransformerConfig)
|
| 12 |
+
AutoModel.register(ForgettingTransformerConfig, ForgettingTransformerModel)
|
| 13 |
+
AutoModelForCausalLM.register(ForgettingTransformerConfig, ForgettingTransformerForCausalLM)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
__all__ = ['ForgettingTransformerConfig', 'ForgettingTransformerForCausalLM', 'ForgettingTransformerModel']
|
fla/models/forgetting_transformer/configuration_forgetting_transformer.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class ForgettingTransformerConfig(PretrainedConfig):
|
| 9 |
+
|
| 10 |
+
model_type = 'forgetting_transformer'
|
| 11 |
+
keys_to_ignore_at_inference = ['past_key_values']
|
| 12 |
+
|
| 13 |
+
def __init__(
|
| 14 |
+
self,
|
| 15 |
+
hidden_size: int = 2048,
|
| 16 |
+
num_hidden_layers: int = 24,
|
| 17 |
+
num_heads: int = 32,
|
| 18 |
+
num_kv_heads: Optional[int] = None,
|
| 19 |
+
qkv_bias: bool = False,
|
| 20 |
+
qk_norm: bool = False,
|
| 21 |
+
window_size: Optional[int] = None,
|
| 22 |
+
use_output_gate: bool = False,
|
| 23 |
+
hidden_ratio: Optional[int] = 4,
|
| 24 |
+
intermediate_size: Optional[int] = None,
|
| 25 |
+
hidden_act: str = "swish",
|
| 26 |
+
initializer_range: float = 0.006,
|
| 27 |
+
elementwise_affine: Optional[bool] = True,
|
| 28 |
+
norm_eps: float = 1e-6,
|
| 29 |
+
use_cache: bool = True,
|
| 30 |
+
pad_token_id: Optional[int] = None,
|
| 31 |
+
bos_token_id: int = 1,
|
| 32 |
+
eos_token_id: int = 2,
|
| 33 |
+
tie_word_embeddings: bool = False,
|
| 34 |
+
fuse_norm: bool = True,
|
| 35 |
+
fuse_swiglu: bool = True,
|
| 36 |
+
fuse_cross_entropy: bool = True,
|
| 37 |
+
vocab_size: int = 32000,
|
| 38 |
+
**kwargs,
|
| 39 |
+
):
|
| 40 |
+
self.hidden_size = hidden_size
|
| 41 |
+
self.num_hidden_layers = num_hidden_layers
|
| 42 |
+
self.num_heads = num_heads
|
| 43 |
+
self.num_kv_heads = num_kv_heads
|
| 44 |
+
self.qkv_bias = qkv_bias
|
| 45 |
+
self.qk_norm = qk_norm
|
| 46 |
+
self.window_size = window_size
|
| 47 |
+
self.use_output_gate = use_output_gate
|
| 48 |
+
self.hidden_ratio = hidden_ratio
|
| 49 |
+
self.intermediate_size = intermediate_size
|
| 50 |
+
self.hidden_act = hidden_act
|
| 51 |
+
|
| 52 |
+
self.initializer_range = initializer_range
|
| 53 |
+
self.elementwise_affine = elementwise_affine
|
| 54 |
+
self.norm_eps = norm_eps
|
| 55 |
+
self.use_cache = use_cache
|
| 56 |
+
|
| 57 |
+
self.fuse_norm = fuse_norm
|
| 58 |
+
self.fuse_swiglu = fuse_swiglu
|
| 59 |
+
self.fuse_cross_entropy = fuse_cross_entropy
|
| 60 |
+
self.vocab_size = vocab_size
|
| 61 |
+
|
| 62 |
+
super().__init__(
|
| 63 |
+
pad_token_id=pad_token_id,
|
| 64 |
+
bos_token_id=bos_token_id,
|
| 65 |
+
eos_token_id=eos_token_id,
|
| 66 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 67 |
+
**kwargs,
|
| 68 |
+
)
|
fla/models/gated_deltanet/__init__.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
|
| 4 |
+
|
| 5 |
+
from fla.models.gated_deltanet.configuration_gated_deltanet import GatedDeltaNetConfig
|
| 6 |
+
from fla.models.gated_deltanet.modeling_gated_deltanet import GatedDeltaNetForCausalLM, GatedDeltaNetModel
|
| 7 |
+
|
| 8 |
+
AutoConfig.register(GatedDeltaNetConfig.model_type, GatedDeltaNetConfig)
|
| 9 |
+
AutoModel.register(GatedDeltaNetConfig, GatedDeltaNetModel)
|
| 10 |
+
AutoModelForCausalLM.register(GatedDeltaNetConfig, GatedDeltaNetForCausalLM)
|
| 11 |
+
|
| 12 |
+
__all__ = ['GatedDeltaNetConfig', 'GatedDeltaNetForCausalLM', 'GatedDeltaNetModel']
|
fla/models/gated_deltanet/configuration_gated_deltanet.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from typing import Dict, Optional
|
| 4 |
+
|
| 5 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class GatedDeltaNetConfig(PretrainedConfig):
|
| 9 |
+
model_type = 'gated_deltanet'
|
| 10 |
+
keys_to_ignore_at_inference = ['past_key_values']
|
| 11 |
+
|
| 12 |
+
def __init__(
|
| 13 |
+
self,
|
| 14 |
+
attn_mode: str = "chunk",
|
| 15 |
+
hidden_size: int = 2048,
|
| 16 |
+
expand_v: int = 2,
|
| 17 |
+
use_gate: bool = True,
|
| 18 |
+
use_short_conv: bool = True,
|
| 19 |
+
conv_size: int = 4,
|
| 20 |
+
head_dim: int = 256,
|
| 21 |
+
num_heads: int = 6,
|
| 22 |
+
max_position_embeddings: int = 2048,
|
| 23 |
+
hidden_ratio: Optional[int] = 4,
|
| 24 |
+
intermediate_size: Optional[int] = None,
|
| 25 |
+
hidden_act: str = "swish",
|
| 26 |
+
num_hidden_layers: int = 21,
|
| 27 |
+
norm_eps: float = 1e-6,
|
| 28 |
+
attn: Optional[Dict] = None,
|
| 29 |
+
use_cache: bool = True,
|
| 30 |
+
pad_token_id: int = None,
|
| 31 |
+
bos_token_id: int = 1,
|
| 32 |
+
eos_token_id: int = 2,
|
| 33 |
+
tie_word_embeddings: bool = False,
|
| 34 |
+
initializer_range: float = 0.006,
|
| 35 |
+
fuse_norm: bool = True,
|
| 36 |
+
fuse_swiglu: bool = True,
|
| 37 |
+
fuse_cross_entropy: bool = True,
|
| 38 |
+
vocab_size: int = 32000,
|
| 39 |
+
**kwargs
|
| 40 |
+
):
|
| 41 |
+
self.attn_mode = attn_mode
|
| 42 |
+
self.hidden_size = hidden_size
|
| 43 |
+
self.expand_v = expand_v
|
| 44 |
+
self.use_gate = use_gate
|
| 45 |
+
self.use_short_conv = use_short_conv
|
| 46 |
+
self.conv_size = conv_size
|
| 47 |
+
self.head_dim = head_dim
|
| 48 |
+
self.num_heads = num_heads
|
| 49 |
+
self.max_position_embeddings = max_position_embeddings
|
| 50 |
+
|
| 51 |
+
self.hidden_ratio = hidden_ratio
|
| 52 |
+
self.intermediate_size = intermediate_size
|
| 53 |
+
self.hidden_act = hidden_act
|
| 54 |
+
self.num_hidden_layers = num_hidden_layers
|
| 55 |
+
self.norm_eps = norm_eps
|
| 56 |
+
self.attn = attn
|
| 57 |
+
self.use_cache = use_cache
|
| 58 |
+
self.initializer_range = initializer_range
|
| 59 |
+
|
| 60 |
+
self.fuse_norm = fuse_norm
|
| 61 |
+
self.fuse_swiglu = fuse_swiglu
|
| 62 |
+
self.fuse_cross_entropy = fuse_cross_entropy
|
| 63 |
+
self.vocab_size = vocab_size
|
| 64 |
+
|
| 65 |
+
if attn is not None:
|
| 66 |
+
if not isinstance(attn, Dict):
|
| 67 |
+
raise ValueError("attn must be a dictionary")
|
| 68 |
+
if 'layers' not in attn:
|
| 69 |
+
raise ValueError("Layer indices must be provided to initialize hybrid attention layers")
|
| 70 |
+
if 'num_heads' not in attn:
|
| 71 |
+
raise ValueError("Number of heads must be provided to initialize hybrid attention layers")
|
| 72 |
+
attn['num_kv_heads'] = attn.get('num_kv_heads', attn['num_heads'])
|
| 73 |
+
attn['qkv_bias'] = attn.get('qkv_bias', False)
|
| 74 |
+
attn['window_size'] = attn.get('window_size', None)
|
| 75 |
+
attn['rope_theta'] = attn.get('rope_theta', 10000.)
|
| 76 |
+
|
| 77 |
+
super().__init__(
|
| 78 |
+
pad_token_id=pad_token_id,
|
| 79 |
+
bos_token_id=bos_token_id,
|
| 80 |
+
eos_token_id=eos_token_id,
|
| 81 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 82 |
+
**kwargs,
|
| 83 |
+
)
|
fla/models/gated_deltaproduct/__init__.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
|
| 2 |
+
|
| 3 |
+
from fla.models.gated_deltaproduct.configuration_gated_deltaproduct import GatedDeltaProductConfig
|
| 4 |
+
from fla.models.gated_deltaproduct.modeling_gated_deltaproduct import GatedDeltaProductForCausalLM, GatedDeltaProductModel
|
| 5 |
+
|
| 6 |
+
AutoConfig.register(GatedDeltaProductConfig.model_type, GatedDeltaProductConfig)
|
| 7 |
+
AutoModel.register(GatedDeltaProductConfig, GatedDeltaProductModel)
|
| 8 |
+
AutoModelForCausalLM.register(GatedDeltaProductConfig, GatedDeltaProductForCausalLM)
|
| 9 |
+
|
| 10 |
+
__all__ = [
|
| 11 |
+
"GatedDeltaProductConfig",
|
| 12 |
+
"GatedDeltaProductForCausalLM",
|
| 13 |
+
"GatedDeltaProductModel",
|
| 14 |
+
]
|
fla/models/gla/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
|
| 4 |
+
|
| 5 |
+
from fla.models.gla.configuration_gla import GLAConfig
|
| 6 |
+
from fla.models.gla.modeling_gla import GLAForCausalLM, GLAModel
|
| 7 |
+
|
| 8 |
+
AutoConfig.register(GLAConfig.model_type, GLAConfig)
|
| 9 |
+
AutoModel.register(GLAConfig, GLAModel)
|
| 10 |
+
AutoModelForCausalLM.register(GLAConfig, GLAForCausalLM)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
__all__ = ['GLAConfig', 'GLAForCausalLM', 'GLAModel']
|
fla/models/gla/configuration_gla.py
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from typing import Dict, Optional
|
| 4 |
+
|
| 5 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class GLAConfig(PretrainedConfig):
|
| 9 |
+
|
| 10 |
+
model_type = 'gla'
|
| 11 |
+
keys_to_ignore_at_inference = ['past_key_values']
|
| 12 |
+
|
| 13 |
+
def __init__(
|
| 14 |
+
self,
|
| 15 |
+
hidden_size: int = 2048,
|
| 16 |
+
expand_k: int = 0.5,
|
| 17 |
+
expand_v: int = 1,
|
| 18 |
+
hidden_ratio: Optional[int] = 4,
|
| 19 |
+
intermediate_size: Optional[int] = None,
|
| 20 |
+
num_hidden_layers: int = 24,
|
| 21 |
+
num_heads: int = 4,
|
| 22 |
+
num_kv_heads: Optional[int] = None,
|
| 23 |
+
feature_map: Optional[str] = None,
|
| 24 |
+
attn_mode: str = "chunk",
|
| 25 |
+
use_short_conv: bool = False,
|
| 26 |
+
conv_size: int = 4,
|
| 27 |
+
use_output_gate: bool = True,
|
| 28 |
+
clamp_min: Optional[float] = None,
|
| 29 |
+
hidden_act: str = "swish",
|
| 30 |
+
max_position_embeddings: int = 2048,
|
| 31 |
+
elementwise_affine: Optional[bool] = True,
|
| 32 |
+
norm_eps: float = 1e-6,
|
| 33 |
+
use_gk: bool = True,
|
| 34 |
+
use_gv: bool = False,
|
| 35 |
+
attn: Optional[Dict] = None,
|
| 36 |
+
use_cache: bool = True,
|
| 37 |
+
pad_token_id: int = None,
|
| 38 |
+
bos_token_id: int = 1,
|
| 39 |
+
eos_token_id: int = 2,
|
| 40 |
+
tie_word_embeddings: bool = False,
|
| 41 |
+
initializer_range: float = 0.006,
|
| 42 |
+
fuse_norm: bool = True,
|
| 43 |
+
fuse_swiglu: bool = True,
|
| 44 |
+
fuse_cross_entropy: bool = True,
|
| 45 |
+
vocab_size: int = 32000,
|
| 46 |
+
**kwargs
|
| 47 |
+
):
|
| 48 |
+
self.hidden_size = hidden_size
|
| 49 |
+
self.expand_k = expand_k
|
| 50 |
+
self.expand_v = expand_v
|
| 51 |
+
self.hidden_ratio = hidden_ratio
|
| 52 |
+
self.intermediate_size = intermediate_size
|
| 53 |
+
self.num_hidden_layers = num_hidden_layers
|
| 54 |
+
self.num_heads = num_heads
|
| 55 |
+
self.num_kv_heads = num_kv_heads
|
| 56 |
+
self.feature_map = feature_map
|
| 57 |
+
self.attn_mode = attn_mode
|
| 58 |
+
self.use_short_conv = use_short_conv
|
| 59 |
+
self.conv_size = conv_size
|
| 60 |
+
self.use_output_gate = use_output_gate
|
| 61 |
+
self.clamp_min = clamp_min
|
| 62 |
+
self.hidden_act = hidden_act
|
| 63 |
+
self.max_position_embeddings = max_position_embeddings
|
| 64 |
+
self.elementwise_affine = elementwise_affine
|
| 65 |
+
self.norm_eps = norm_eps
|
| 66 |
+
self.use_gk = use_gk
|
| 67 |
+
self.use_gv = use_gv
|
| 68 |
+
self.attn = attn
|
| 69 |
+
self.use_cache = use_cache
|
| 70 |
+
self.initializer_range = initializer_range
|
| 71 |
+
|
| 72 |
+
self.fuse_norm = fuse_norm
|
| 73 |
+
self.fuse_swiglu = fuse_swiglu
|
| 74 |
+
self.fuse_cross_entropy = fuse_cross_entropy
|
| 75 |
+
self.vocab_size = vocab_size
|
| 76 |
+
|
| 77 |
+
if attn is not None:
|
| 78 |
+
if not isinstance(attn, Dict):
|
| 79 |
+
raise ValueError("attn must be a dictionary")
|
| 80 |
+
if 'layers' not in attn:
|
| 81 |
+
raise ValueError("Layer indices must be provided to initialize hybrid attention layers")
|
| 82 |
+
if 'num_heads' not in attn:
|
| 83 |
+
raise ValueError("Number of heads must be provided to initialize hybrid attention layers")
|
| 84 |
+
attn['num_kv_heads'] = attn.get('num_kv_heads', attn['num_heads'])
|
| 85 |
+
attn['qkv_bias'] = attn.get('qkv_bias', False)
|
| 86 |
+
attn['window_size'] = attn.get('window_size', None)
|
| 87 |
+
attn['rope_theta'] = attn.get('rope_theta', 10000.)
|
| 88 |
+
|
| 89 |
+
super().__init__(
|
| 90 |
+
pad_token_id=pad_token_id,
|
| 91 |
+
bos_token_id=bos_token_id,
|
| 92 |
+
eos_token_id=eos_token_id,
|
| 93 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 94 |
+
**kwargs,
|
| 95 |
+
)
|
fla/models/gla/modeling_gla.py
ADDED
|
@@ -0,0 +1,417 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
import warnings
|
| 7 |
+
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import torch.utils.checkpoint
|
| 12 |
+
from transformers.generation import GenerationMixin
|
| 13 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
|
| 14 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 15 |
+
from transformers.utils import logging
|
| 16 |
+
from transformers.utils.deprecation import deprecate_kwarg
|
| 17 |
+
|
| 18 |
+
from fla.layers.attn import Attention
|
| 19 |
+
from fla.layers.gla import GatedLinearAttention
|
| 20 |
+
from fla.models.gla.configuration_gla import GLAConfig
|
| 21 |
+
from fla.models.utils import Cache
|
| 22 |
+
from fla.modules import FusedCrossEntropyLoss, FusedLinearCrossEntropyLoss
|
| 23 |
+
from fla.modules import GatedMLP as GLAMLP
|
| 24 |
+
from fla.modules import RMSNorm
|
| 25 |
+
|
| 26 |
+
if TYPE_CHECKING:
|
| 27 |
+
from transformers.processing_utils import Unpack
|
| 28 |
+
|
| 29 |
+
logger = logging.get_logger(__name__)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class GLABlock(nn.Module):
|
| 33 |
+
def __init__(self, config: GLAConfig, layer_idx: int):
|
| 34 |
+
super().__init__()
|
| 35 |
+
|
| 36 |
+
self.config = config
|
| 37 |
+
self.layer_idx = layer_idx
|
| 38 |
+
|
| 39 |
+
self.attn_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 40 |
+
if config.attn is not None and layer_idx in config.attn['layers']:
|
| 41 |
+
self.attn = Attention(
|
| 42 |
+
hidden_size=config.hidden_size,
|
| 43 |
+
num_heads=config.attn['num_heads'],
|
| 44 |
+
num_kv_heads=config.attn['num_kv_heads'],
|
| 45 |
+
qkv_bias=config.attn['qkv_bias'],
|
| 46 |
+
window_size=config.attn['window_size'],
|
| 47 |
+
rope_theta=config.attn['rope_theta'],
|
| 48 |
+
max_position_embeddings=config.max_position_embeddings,
|
| 49 |
+
layer_idx=layer_idx
|
| 50 |
+
)
|
| 51 |
+
else:
|
| 52 |
+
self.attn = GatedLinearAttention(
|
| 53 |
+
mode=config.attn_mode,
|
| 54 |
+
hidden_size=config.hidden_size,
|
| 55 |
+
expand_k=config.expand_k,
|
| 56 |
+
expand_v=config.expand_v,
|
| 57 |
+
num_heads=config.num_heads,
|
| 58 |
+
num_kv_heads=config.num_kv_heads,
|
| 59 |
+
feature_map=config.feature_map,
|
| 60 |
+
use_short_conv=config.use_short_conv,
|
| 61 |
+
conv_size=config.conv_size,
|
| 62 |
+
use_output_gate=config.use_output_gate,
|
| 63 |
+
gate_fn=config.hidden_act,
|
| 64 |
+
elementwise_affine=config.elementwise_affine,
|
| 65 |
+
norm_eps=config.norm_eps,
|
| 66 |
+
clamp_min=config.clamp_min,
|
| 67 |
+
fuse_norm=config.fuse_norm,
|
| 68 |
+
layer_idx=layer_idx
|
| 69 |
+
)
|
| 70 |
+
self.mlp_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 71 |
+
self.mlp = GLAMLP(
|
| 72 |
+
hidden_size=config.hidden_size,
|
| 73 |
+
hidden_ratio=config.hidden_ratio,
|
| 74 |
+
intermediate_size=config.intermediate_size,
|
| 75 |
+
hidden_act=config.hidden_act,
|
| 76 |
+
fuse_swiglu=config.fuse_swiglu
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
def forward(
|
| 80 |
+
self,
|
| 81 |
+
hidden_states: torch.Tensor,
|
| 82 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 83 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 84 |
+
use_cache: Optional[bool] = False,
|
| 85 |
+
output_attentions: Optional[bool] = False,
|
| 86 |
+
**kwargs: Unpack[Dict]
|
| 87 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 88 |
+
residual = hidden_states
|
| 89 |
+
hidden_states = self.attn_norm(hidden_states)
|
| 90 |
+
hidden_states, attentions, past_key_values = self.attn(
|
| 91 |
+
hidden_states=hidden_states,
|
| 92 |
+
attention_mask=attention_mask,
|
| 93 |
+
past_key_values=past_key_values,
|
| 94 |
+
use_cache=use_cache,
|
| 95 |
+
output_attentions=output_attentions,
|
| 96 |
+
**kwargs
|
| 97 |
+
)
|
| 98 |
+
if self.config.fuse_norm:
|
| 99 |
+
hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
|
| 100 |
+
else:
|
| 101 |
+
hidden_states = residual + hidden_states
|
| 102 |
+
residual = hidden_states
|
| 103 |
+
hidden_states = self.mlp_norm(hidden_states)
|
| 104 |
+
hidden_states = self.mlp(hidden_states, **kwargs)
|
| 105 |
+
hidden_states = residual + hidden_states
|
| 106 |
+
|
| 107 |
+
outputs = (hidden_states, attentions, past_key_values)
|
| 108 |
+
|
| 109 |
+
return outputs
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
class GLAPreTrainedModel(PreTrainedModel):
|
| 113 |
+
|
| 114 |
+
config_class = GLAConfig
|
| 115 |
+
base_model_prefix = 'model'
|
| 116 |
+
supports_gradient_checkpointing = True
|
| 117 |
+
_no_split_modules = ['GLABlock']
|
| 118 |
+
_supports_cache_class = True
|
| 119 |
+
|
| 120 |
+
def __init__(self, *inputs, **kwargs):
|
| 121 |
+
super().__init__(*inputs, **kwargs)
|
| 122 |
+
|
| 123 |
+
def _init_weights(
|
| 124 |
+
self,
|
| 125 |
+
module: nn.Module,
|
| 126 |
+
prenorm_residual_strategy: Optional[str] = 'rescale',
|
| 127 |
+
num_residuals_per_layer: int = 2,
|
| 128 |
+
):
|
| 129 |
+
if isinstance(module, (nn.Linear, nn.Conv1d)):
|
| 130 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 131 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 132 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 133 |
+
if module.bias is not None:
|
| 134 |
+
nn.init.zeros_(module.bias)
|
| 135 |
+
elif isinstance(module, nn.Embedding):
|
| 136 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 137 |
+
elif hasattr(module, 'reset_parameters'):
|
| 138 |
+
module.reset_parameters()
|
| 139 |
+
|
| 140 |
+
if prenorm_residual_strategy is not None:
|
| 141 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
| 142 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
| 143 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
| 144 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
| 145 |
+
#
|
| 146 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
| 147 |
+
p = None
|
| 148 |
+
if hasattr(module, 'o_proj'):
|
| 149 |
+
p = module.o_proj.weight
|
| 150 |
+
elif hasattr(module, 'down_proj'):
|
| 151 |
+
p = module.down_proj.weight
|
| 152 |
+
if p is not None:
|
| 153 |
+
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
|
| 154 |
+
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
|
| 155 |
+
# We need to reinit p since this code could be called multiple times
|
| 156 |
+
# Having just p *= scale would repeatedly scale it down
|
| 157 |
+
if prenorm_residual_strategy == 'rescale':
|
| 158 |
+
nn.init.kaiming_uniform_(p, a=math.sqrt(5))
|
| 159 |
+
with torch.no_grad():
|
| 160 |
+
p /= math.sqrt(num_residuals_per_layer * self.config.num_hidden_layers)
|
| 161 |
+
elif prenorm_residual_strategy == 'zero':
|
| 162 |
+
nn.init.zeros_(p)
|
| 163 |
+
else:
|
| 164 |
+
raise ValueError(f"Invalid prenorm_residual_strategy: {prenorm_residual_strategy}")
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
class GLAModel(GLAPreTrainedModel):
|
| 168 |
+
|
| 169 |
+
def __init__(self, config: GLAConfig):
|
| 170 |
+
super().__init__(config)
|
| 171 |
+
self.padding_idx = config.pad_token_id
|
| 172 |
+
self.vocab_size = config.vocab_size
|
| 173 |
+
|
| 174 |
+
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 175 |
+
self.layers = nn.ModuleList([GLABlock(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
|
| 176 |
+
self.norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 177 |
+
|
| 178 |
+
self.gradient_checkpointing = False
|
| 179 |
+
|
| 180 |
+
self.post_init()
|
| 181 |
+
|
| 182 |
+
def get_input_embeddings(self):
|
| 183 |
+
return self.embeddings
|
| 184 |
+
|
| 185 |
+
def set_input_embeddings(self, value):
|
| 186 |
+
self.embeddings = value
|
| 187 |
+
|
| 188 |
+
def forward(
|
| 189 |
+
self,
|
| 190 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 191 |
+
attention_mask: Optional[torch.Tensor] = None, # noqa
|
| 192 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 193 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 194 |
+
use_cache: Optional[bool] = None,
|
| 195 |
+
output_attentions: Optional[bool] = None,
|
| 196 |
+
output_hidden_states: Optional[bool] = None,
|
| 197 |
+
return_dict: Optional[bool] = None,
|
| 198 |
+
**kwargs: Unpack[Dict]
|
| 199 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
| 200 |
+
if output_attentions:
|
| 201 |
+
warnings.warn("`GLAModel` does not `output_attentions` now, setting it to `False`.")
|
| 202 |
+
output_attentions = False
|
| 203 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 204 |
+
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 205 |
+
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
|
| 206 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 207 |
+
|
| 208 |
+
# retrieve input_ids and inputs_embeds
|
| 209 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 210 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 211 |
+
if input_ids is None and inputs_embeds is None:
|
| 212 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 213 |
+
|
| 214 |
+
if inputs_embeds is None:
|
| 215 |
+
inputs_embeds = self.embeddings(input_ids)
|
| 216 |
+
hidden_states = inputs_embeds
|
| 217 |
+
|
| 218 |
+
if use_cache and not isinstance(past_key_values, Cache):
|
| 219 |
+
past_key_values = Cache.from_legacy_cache(past_key_values)
|
| 220 |
+
|
| 221 |
+
if self.gradient_checkpointing and self.training and use_cache:
|
| 222 |
+
logger.warning_once("`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...")
|
| 223 |
+
use_cache = False
|
| 224 |
+
|
| 225 |
+
all_hidden_states = () if output_hidden_states else None
|
| 226 |
+
all_attns = () if output_attentions else None
|
| 227 |
+
for layer in self.layers:
|
| 228 |
+
if output_hidden_states:
|
| 229 |
+
all_hidden_states += (hidden_states,)
|
| 230 |
+
|
| 231 |
+
if self.gradient_checkpointing and self.training:
|
| 232 |
+
hidden_states, attentions, past_key_values = self._gradient_checkpointing_func(
|
| 233 |
+
layer.__call__,
|
| 234 |
+
hidden_states,
|
| 235 |
+
attention_mask,
|
| 236 |
+
past_key_values,
|
| 237 |
+
use_cache,
|
| 238 |
+
output_attentions,
|
| 239 |
+
**kwargs
|
| 240 |
+
)
|
| 241 |
+
else:
|
| 242 |
+
hidden_states, attentions, past_key_values = layer(
|
| 243 |
+
hidden_states,
|
| 244 |
+
attention_mask=attention_mask,
|
| 245 |
+
past_key_values=past_key_values,
|
| 246 |
+
use_cache=use_cache,
|
| 247 |
+
output_attentions=output_attentions,
|
| 248 |
+
**kwargs
|
| 249 |
+
)
|
| 250 |
+
|
| 251 |
+
if output_attentions:
|
| 252 |
+
all_attns += (attentions,)
|
| 253 |
+
|
| 254 |
+
hidden_states = self.norm(hidden_states)
|
| 255 |
+
|
| 256 |
+
# add hidden states from the last decoder layer
|
| 257 |
+
if output_hidden_states:
|
| 258 |
+
all_hidden_states += (hidden_states,)
|
| 259 |
+
|
| 260 |
+
if not return_dict:
|
| 261 |
+
return tuple(i for i in [hidden_states, past_key_values, all_hidden_states, all_attns] if i is not None)
|
| 262 |
+
return BaseModelOutputWithPast(
|
| 263 |
+
last_hidden_state=hidden_states,
|
| 264 |
+
past_key_values=past_key_values,
|
| 265 |
+
hidden_states=all_hidden_states,
|
| 266 |
+
attentions=all_attns
|
| 267 |
+
)
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
class GLAForCausalLM(GLAPreTrainedModel, GenerationMixin):
|
| 271 |
+
|
| 272 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 273 |
+
|
| 274 |
+
def __init__(self, config):
|
| 275 |
+
super().__init__(config)
|
| 276 |
+
self.model = GLAModel(config)
|
| 277 |
+
self.vocab_size = config.vocab_size
|
| 278 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 279 |
+
self.criterion = None
|
| 280 |
+
|
| 281 |
+
# Initialize weights and apply final processing
|
| 282 |
+
self.post_init()
|
| 283 |
+
|
| 284 |
+
def get_input_embeddings(self):
|
| 285 |
+
return self.model.embeddings
|
| 286 |
+
|
| 287 |
+
def set_input_embeddings(self, value):
|
| 288 |
+
self.model.embeddings = value
|
| 289 |
+
|
| 290 |
+
def get_output_embeddings(self):
|
| 291 |
+
return self.lm_head
|
| 292 |
+
|
| 293 |
+
def set_output_embeddings(self, new_embeddings):
|
| 294 |
+
self.lm_head = new_embeddings
|
| 295 |
+
|
| 296 |
+
def set_decoder(self, decoder):
|
| 297 |
+
self.model = decoder
|
| 298 |
+
|
| 299 |
+
def get_decoder(self):
|
| 300 |
+
return self.model
|
| 301 |
+
|
| 302 |
+
def generate(self, *args, **kwargs):
|
| 303 |
+
try:
|
| 304 |
+
return super().generate(*args, **kwargs)
|
| 305 |
+
except AttributeError as exception:
|
| 306 |
+
if 'past_key_values' in str(exception):
|
| 307 |
+
raise AttributeError(
|
| 308 |
+
f"You tried to call `generate` with a decoding strategy that manipulates `past_key_values`, "
|
| 309 |
+
f"which is not supported for {self.__class__.__name__}. "
|
| 310 |
+
f"Try another generation strategy instead. "
|
| 311 |
+
f"For the available generation strategies, check this doc: "
|
| 312 |
+
f"https://huggingface.co/docs/transformers/en/generation_strategies#decoding-strategies"
|
| 313 |
+
)
|
| 314 |
+
else:
|
| 315 |
+
raise exception
|
| 316 |
+
|
| 317 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 318 |
+
def prepare_inputs_for_generation(
|
| 319 |
+
self,
|
| 320 |
+
input_ids: torch.LongTensor = None,
|
| 321 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 322 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 323 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 324 |
+
use_cache: bool = True,
|
| 325 |
+
logits_to_keep: Optional[int] = None,
|
| 326 |
+
**kwargs
|
| 327 |
+
):
|
| 328 |
+
# only last token for `inputs_ids` if the `past_key_values` is not empty.
|
| 329 |
+
if past_key_values is not None and len(past_key_values) > 0:
|
| 330 |
+
input_ids = input_ids[:, -1:]
|
| 331 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 332 |
+
if inputs_embeds is not None and len(past_key_values) == 0:
|
| 333 |
+
model_inputs = {'inputs_embeds': inputs_embeds}
|
| 334 |
+
else:
|
| 335 |
+
# The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
|
| 336 |
+
# recompiles graphs as the stride of the inputs is a guard.
|
| 337 |
+
# Ref: https://github.com/huggingface/transformers/pull/29114
|
| 338 |
+
# TODO: use `next_tokens` directly instead.
|
| 339 |
+
model_inputs = {'input_ids': input_ids.contiguous()}
|
| 340 |
+
|
| 341 |
+
if logits_to_keep is not None:
|
| 342 |
+
model_inputs['logits_to_keep'] = logits_to_keep
|
| 343 |
+
|
| 344 |
+
model_inputs.update({
|
| 345 |
+
'past_key_values': past_key_values,
|
| 346 |
+
'use_cache': use_cache,
|
| 347 |
+
'attention_mask': attention_mask,
|
| 348 |
+
})
|
| 349 |
+
return model_inputs
|
| 350 |
+
|
| 351 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 352 |
+
def forward(
|
| 353 |
+
self,
|
| 354 |
+
input_ids: torch.LongTensor = None,
|
| 355 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 356 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 357 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 358 |
+
labels: Optional[torch.LongTensor] = None,
|
| 359 |
+
use_cache: Optional[bool] = None,
|
| 360 |
+
output_attentions: Optional[bool] = None,
|
| 361 |
+
output_hidden_states: Optional[bool] = None,
|
| 362 |
+
return_dict: Optional[bool] = None,
|
| 363 |
+
logits_to_keep: Optional[int] = 0,
|
| 364 |
+
**kwargs: Unpack[Dict]
|
| 365 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 366 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 367 |
+
output_hidden_states = (
|
| 368 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 369 |
+
)
|
| 370 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 371 |
+
|
| 372 |
+
outputs = self.model(
|
| 373 |
+
input_ids=input_ids,
|
| 374 |
+
attention_mask=attention_mask,
|
| 375 |
+
inputs_embeds=inputs_embeds,
|
| 376 |
+
past_key_values=past_key_values,
|
| 377 |
+
use_cache=use_cache,
|
| 378 |
+
output_attentions=output_attentions,
|
| 379 |
+
output_hidden_states=output_hidden_states,
|
| 380 |
+
return_dict=return_dict,
|
| 381 |
+
**kwargs
|
| 382 |
+
)
|
| 383 |
+
|
| 384 |
+
hidden_states = outputs[0]
|
| 385 |
+
fuse_linear_and_cross_entropy = self.config.fuse_cross_entropy and self.training
|
| 386 |
+
|
| 387 |
+
loss, logits = None, None
|
| 388 |
+
if not fuse_linear_and_cross_entropy or labels is None:
|
| 389 |
+
logits = self.lm_head(hidden_states if logits_to_keep is None else hidden_states[:, -logits_to_keep:])
|
| 390 |
+
if labels is not None:
|
| 391 |
+
if getattr(self, 'criterion', None) is None:
|
| 392 |
+
if fuse_linear_and_cross_entropy:
|
| 393 |
+
criterion = FusedLinearCrossEntropyLoss()
|
| 394 |
+
elif self.config.fuse_cross_entropy:
|
| 395 |
+
criterion = FusedCrossEntropyLoss(inplace_backward=True)
|
| 396 |
+
else:
|
| 397 |
+
criterion = nn.CrossEntropyLoss()
|
| 398 |
+
else:
|
| 399 |
+
criterion = self.criterion
|
| 400 |
+
labels = labels.to(hidden_states.device)
|
| 401 |
+
labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], criterion.ignore_index)), 1)
|
| 402 |
+
if fuse_linear_and_cross_entropy:
|
| 403 |
+
loss = criterion(hidden_states, labels, self.lm_head.weight, self.lm_head.bias)
|
| 404 |
+
else:
|
| 405 |
+
loss = criterion(logits.view(labels.numel(), -1), labels.view(-1))
|
| 406 |
+
|
| 407 |
+
if not return_dict:
|
| 408 |
+
output = (logits,) + outputs[1:]
|
| 409 |
+
return (loss,) + output if loss is not None else output
|
| 410 |
+
|
| 411 |
+
return CausalLMOutputWithPast(
|
| 412 |
+
loss=loss,
|
| 413 |
+
logits=logits,
|
| 414 |
+
past_key_values=outputs.past_key_values,
|
| 415 |
+
hidden_states=outputs.hidden_states,
|
| 416 |
+
attentions=outputs.attentions,
|
| 417 |
+
)
|
fla/models/hgrn2/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
|
| 4 |
+
|
| 5 |
+
from fla.models.hgrn2.configuration_hgrn2 import HGRN2Config
|
| 6 |
+
from fla.models.hgrn2.modeling_hgrn2 import HGRN2ForCausalLM, HGRN2Model
|
| 7 |
+
|
| 8 |
+
AutoConfig.register(HGRN2Config.model_type, HGRN2Config)
|
| 9 |
+
AutoModel.register(HGRN2Config, HGRN2Model)
|
| 10 |
+
AutoModelForCausalLM.register(HGRN2Config, HGRN2ForCausalLM)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
__all__ = ['HGRN2Config', 'HGRN2ForCausalLM', 'HGRN2Model']
|
fla/models/linear_attn/__init__.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
|
| 4 |
+
|
| 5 |
+
from fla.models.linear_attn.configuration_linear_attn import LinearAttentionConfig
|
| 6 |
+
from fla.models.linear_attn.modeling_linear_attn import LinearAttentionForCausalLM, LinearAttentionModel
|
| 7 |
+
|
| 8 |
+
AutoConfig.register(LinearAttentionConfig.model_type, LinearAttentionConfig)
|
| 9 |
+
AutoModel.register(LinearAttentionConfig, LinearAttentionModel)
|
| 10 |
+
AutoModelForCausalLM.register(LinearAttentionConfig, LinearAttentionForCausalLM)
|
| 11 |
+
|
| 12 |
+
__all__ = ['LinearAttentionConfig', 'LinearAttentionForCausalLM', 'LinearAttentionModel']
|
fla/models/linear_attn/configuration_linear_attn.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from typing import Dict, Optional
|
| 4 |
+
|
| 5 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class LinearAttentionConfig(PretrainedConfig):
|
| 9 |
+
|
| 10 |
+
model_type = 'linear_attn'
|
| 11 |
+
keys_to_ignore_at_inference = ['past_key_values']
|
| 12 |
+
|
| 13 |
+
def __init__(
|
| 14 |
+
self,
|
| 15 |
+
attn_mode: str = "fused_chunk",
|
| 16 |
+
hidden_size: int = 2048,
|
| 17 |
+
expand_k: int = 1,
|
| 18 |
+
expand_v: int = 1,
|
| 19 |
+
hidden_ratio: Optional[int] = 4,
|
| 20 |
+
intermediate_size: Optional[int] = None,
|
| 21 |
+
num_hidden_layers: int = 24,
|
| 22 |
+
num_heads: int = 4,
|
| 23 |
+
num_kv_heads: Optional[int] = None,
|
| 24 |
+
feature_map: str = "elementwise_product",
|
| 25 |
+
tie_feature_map_qk: bool = False,
|
| 26 |
+
norm_q: bool = False,
|
| 27 |
+
norm_k: bool = False,
|
| 28 |
+
norm_feature_map: bool = False,
|
| 29 |
+
hidden_act: str = "swish",
|
| 30 |
+
max_position_embeddings: int = 2048,
|
| 31 |
+
elementwise_affine: Optional[bool] = True,
|
| 32 |
+
norm_eps: float = 1e-6,
|
| 33 |
+
attn: Optional[Dict] = None,
|
| 34 |
+
use_cache: bool = True,
|
| 35 |
+
pad_token_id: int = None,
|
| 36 |
+
bos_token_id: int = 1,
|
| 37 |
+
eos_token_id: int = 2,
|
| 38 |
+
tie_word_embeddings: bool = False,
|
| 39 |
+
initializer_range: float = 0.006,
|
| 40 |
+
fuse_norm: bool = True,
|
| 41 |
+
fuse_swiglu: bool = True,
|
| 42 |
+
fuse_cross_entropy: bool = True,
|
| 43 |
+
vocab_size: int = 32000,
|
| 44 |
+
**kwargs
|
| 45 |
+
):
|
| 46 |
+
self.attn_mode = attn_mode
|
| 47 |
+
self.hidden_size = hidden_size
|
| 48 |
+
self.expand_k = expand_k
|
| 49 |
+
self.expand_v = expand_v
|
| 50 |
+
self.hidden_ratio = hidden_ratio
|
| 51 |
+
self.intermediate_size = intermediate_size
|
| 52 |
+
self.num_hidden_layers = num_hidden_layers
|
| 53 |
+
self.num_heads = num_heads
|
| 54 |
+
self.num_kv_heads = num_kv_heads
|
| 55 |
+
self.feature_map = feature_map
|
| 56 |
+
self.tie_feature_map_qk = tie_feature_map_qk
|
| 57 |
+
self.norm_q = norm_q
|
| 58 |
+
self.norm_k = norm_k
|
| 59 |
+
self.norm_feature_map = norm_feature_map
|
| 60 |
+
self.hidden_act = hidden_act
|
| 61 |
+
self.max_position_embeddings = max_position_embeddings
|
| 62 |
+
self.elementwise_affine = elementwise_affine
|
| 63 |
+
self.norm_eps = norm_eps
|
| 64 |
+
self.attn = attn
|
| 65 |
+
self.use_cache = use_cache
|
| 66 |
+
self.initializer_range = initializer_range
|
| 67 |
+
|
| 68 |
+
self.fuse_norm = fuse_norm
|
| 69 |
+
self.fuse_swiglu = fuse_swiglu
|
| 70 |
+
self.fuse_cross_entropy = fuse_cross_entropy
|
| 71 |
+
self.vocab_size = vocab_size
|
| 72 |
+
|
| 73 |
+
if attn is not None:
|
| 74 |
+
if not isinstance(attn, Dict):
|
| 75 |
+
raise ValueError("attn must be a dictionary")
|
| 76 |
+
if 'layers' not in attn:
|
| 77 |
+
raise ValueError("Layer indices must be provided to initialize hybrid attention layers")
|
| 78 |
+
if 'num_heads' not in attn:
|
| 79 |
+
raise ValueError("Number of heads must be provided to initialize hybrid attention layers")
|
| 80 |
+
attn['num_kv_heads'] = attn.get('num_kv_heads', attn['num_heads'])
|
| 81 |
+
attn['qkv_bias'] = attn.get('qkv_bias', False)
|
| 82 |
+
attn['window_size'] = attn.get('window_size', None)
|
| 83 |
+
attn['rope_theta'] = attn.get('rope_theta', 10000.)
|
| 84 |
+
|
| 85 |
+
super().__init__(
|
| 86 |
+
pad_token_id=pad_token_id,
|
| 87 |
+
bos_token_id=bos_token_id,
|
| 88 |
+
eos_token_id=eos_token_id,
|
| 89 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 90 |
+
**kwargs,
|
| 91 |
+
)
|
fla/models/linear_attn/modeling_linear_attn.py
ADDED
|
@@ -0,0 +1,406 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
import warnings
|
| 7 |
+
from typing import List, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import torch.utils.checkpoint
|
| 12 |
+
from transformers.generation import GenerationMixin
|
| 13 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
|
| 14 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 15 |
+
from transformers.utils import logging
|
| 16 |
+
from transformers.utils.deprecation import deprecate_kwarg
|
| 17 |
+
|
| 18 |
+
from fla.layers.attn import Attention
|
| 19 |
+
from fla.layers.linear_attn import LinearAttention
|
| 20 |
+
from fla.models.linear_attn.configuration_linear_attn import LinearAttentionConfig
|
| 21 |
+
from fla.models.utils import Cache
|
| 22 |
+
from fla.modules import FusedCrossEntropyLoss, FusedLinearCrossEntropyLoss
|
| 23 |
+
from fla.modules import GatedMLP as LinearAttentionMLP
|
| 24 |
+
from fla.modules import RMSNorm
|
| 25 |
+
|
| 26 |
+
logger = logging.get_logger(__name__)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class LinearAttentionBlock(nn.Module):
|
| 30 |
+
def __init__(self, config: LinearAttentionConfig, layer_idx: int):
|
| 31 |
+
super().__init__()
|
| 32 |
+
|
| 33 |
+
self.config = config
|
| 34 |
+
self.layer_idx = layer_idx
|
| 35 |
+
|
| 36 |
+
self.attn_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 37 |
+
if config.attn is not None and layer_idx in config.attn['layers']:
|
| 38 |
+
self.attn = Attention(
|
| 39 |
+
hidden_size=config.hidden_size,
|
| 40 |
+
num_heads=config.attn['num_heads'],
|
| 41 |
+
num_kv_heads=config.attn['num_kv_heads'],
|
| 42 |
+
qkv_bias=config.attn['qkv_bias'],
|
| 43 |
+
window_size=config.attn['window_size'],
|
| 44 |
+
rope_theta=config.attn['rope_theta'],
|
| 45 |
+
max_position_embeddings=config.max_position_embeddings,
|
| 46 |
+
layer_idx=layer_idx
|
| 47 |
+
)
|
| 48 |
+
else:
|
| 49 |
+
self.attn = LinearAttention(
|
| 50 |
+
mode=config.attn_mode,
|
| 51 |
+
hidden_size=config.hidden_size,
|
| 52 |
+
expand_k=config.expand_k,
|
| 53 |
+
expand_v=config.expand_v,
|
| 54 |
+
num_heads=config.num_heads,
|
| 55 |
+
num_kv_heads=config.num_kv_heads,
|
| 56 |
+
feature_map=config.feature_map,
|
| 57 |
+
tie_feature_map_qk=config.tie_feature_map_qk,
|
| 58 |
+
norm_q=config.norm_q,
|
| 59 |
+
norm_k=config.norm_k,
|
| 60 |
+
do_feature_map_norm=config.norm_feature_map,
|
| 61 |
+
elementwise_affine=config.elementwise_affine,
|
| 62 |
+
norm_eps=config.norm_eps,
|
| 63 |
+
layer_idx=layer_idx
|
| 64 |
+
)
|
| 65 |
+
self.mlp_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 66 |
+
self.mlp = LinearAttentionMLP(
|
| 67 |
+
hidden_size=config.hidden_size,
|
| 68 |
+
hidden_ratio=config.hidden_ratio,
|
| 69 |
+
intermediate_size=config.intermediate_size,
|
| 70 |
+
hidden_act=config.hidden_act,
|
| 71 |
+
fuse_swiglu=config.fuse_swiglu
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
def forward(
|
| 75 |
+
self,
|
| 76 |
+
hidden_states: torch.Tensor,
|
| 77 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 78 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 79 |
+
use_cache: Optional[bool] = False,
|
| 80 |
+
output_attentions: Optional[bool] = False,
|
| 81 |
+
**kwargs,
|
| 82 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 83 |
+
residual = hidden_states
|
| 84 |
+
# currently not supported
|
| 85 |
+
attentions, past_key_values = None, None
|
| 86 |
+
hidden_states = self.attn_norm(hidden_states)
|
| 87 |
+
hidden_states = self.attn(hidden_states=hidden_states, **kwargs)
|
| 88 |
+
if self.config.fuse_norm:
|
| 89 |
+
hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
|
| 90 |
+
else:
|
| 91 |
+
hidden_states = residual + hidden_states
|
| 92 |
+
residual = hidden_states
|
| 93 |
+
hidden_states = self.mlp_norm(hidden_states)
|
| 94 |
+
hidden_states = self.mlp(hidden_states, **kwargs)
|
| 95 |
+
hidden_states = residual + hidden_states
|
| 96 |
+
|
| 97 |
+
outputs = (hidden_states, attentions, past_key_values)
|
| 98 |
+
|
| 99 |
+
return outputs
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
class LinearAttentionPreTrainedModel(PreTrainedModel):
|
| 103 |
+
|
| 104 |
+
config_class = LinearAttentionConfig
|
| 105 |
+
base_model_prefix = 'model'
|
| 106 |
+
supports_gradient_checkpointing = True
|
| 107 |
+
_no_split_modules = ['LinearAttentionBlock']
|
| 108 |
+
_supports_cache_class = True
|
| 109 |
+
|
| 110 |
+
def __init__(self, *inputs, **kwargs):
|
| 111 |
+
super().__init__(*inputs, **kwargs)
|
| 112 |
+
|
| 113 |
+
def _init_weights(
|
| 114 |
+
self,
|
| 115 |
+
module: nn.Module,
|
| 116 |
+
prenorm_residual_strategy: Optional[str] = 'rescale',
|
| 117 |
+
num_residuals_per_layer: int = 2,
|
| 118 |
+
):
|
| 119 |
+
if isinstance(module, (nn.Linear, nn.Conv1d)):
|
| 120 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 121 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 122 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 123 |
+
if module.bias is not None:
|
| 124 |
+
nn.init.zeros_(module.bias)
|
| 125 |
+
elif isinstance(module, nn.Embedding):
|
| 126 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 127 |
+
elif hasattr(module, 'reset_parameters'):
|
| 128 |
+
module.reset_parameters()
|
| 129 |
+
|
| 130 |
+
if prenorm_residual_strategy is not None:
|
| 131 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
| 132 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
| 133 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
| 134 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
| 135 |
+
#
|
| 136 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
| 137 |
+
p = None
|
| 138 |
+
if hasattr(module, 'o_proj'):
|
| 139 |
+
p = module.o_proj.weight
|
| 140 |
+
elif hasattr(module, 'down_proj'):
|
| 141 |
+
p = module.down_proj.weight
|
| 142 |
+
if p is not None:
|
| 143 |
+
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
|
| 144 |
+
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
|
| 145 |
+
# We need to reinit p since this code could be called multiple times
|
| 146 |
+
# Having just p *= scale would repeatedly scale it down
|
| 147 |
+
if prenorm_residual_strategy == 'rescale':
|
| 148 |
+
nn.init.kaiming_uniform_(p, a=math.sqrt(5))
|
| 149 |
+
with torch.no_grad():
|
| 150 |
+
p /= math.sqrt(num_residuals_per_layer * self.config.num_hidden_layers)
|
| 151 |
+
elif prenorm_residual_strategy == 'zero':
|
| 152 |
+
nn.init.zeros_(p)
|
| 153 |
+
else:
|
| 154 |
+
raise ValueError(f"Invalid prenorm_residual_strategy: {prenorm_residual_strategy}")
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
class LinearAttentionModel(LinearAttentionPreTrainedModel):
|
| 158 |
+
|
| 159 |
+
def __init__(self, config: LinearAttentionConfig):
|
| 160 |
+
super().__init__(config)
|
| 161 |
+
self.padding_idx = config.pad_token_id
|
| 162 |
+
self.vocab_size = config.vocab_size
|
| 163 |
+
|
| 164 |
+
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 165 |
+
self.layers = nn.ModuleList([LinearAttentionBlock(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
|
| 166 |
+
self.norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 167 |
+
|
| 168 |
+
self.gradient_checkpointing = False
|
| 169 |
+
|
| 170 |
+
self.post_init()
|
| 171 |
+
|
| 172 |
+
def get_input_embeddings(self):
|
| 173 |
+
return self.embeddings
|
| 174 |
+
|
| 175 |
+
def set_input_embeddings(self, value):
|
| 176 |
+
self.embeddings = value
|
| 177 |
+
|
| 178 |
+
def forward(
|
| 179 |
+
self,
|
| 180 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 181 |
+
attention_mask: Optional[torch.Tensor] = None, # noqa
|
| 182 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 183 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 184 |
+
use_cache: Optional[bool] = None,
|
| 185 |
+
output_attentions: Optional[bool] = None,
|
| 186 |
+
output_hidden_states: Optional[bool] = None,
|
| 187 |
+
return_dict: Optional[bool] = None
|
| 188 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
| 189 |
+
if output_attentions:
|
| 190 |
+
warnings.warn(
|
| 191 |
+
"`LinearAttentionModel` does not support output attention weights now, "
|
| 192 |
+
"so `output_attentions` is set to `False`."
|
| 193 |
+
)
|
| 194 |
+
output_attentions = False
|
| 195 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 196 |
+
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 197 |
+
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
|
| 198 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 199 |
+
|
| 200 |
+
# retrieve input_ids and inputs_embeds
|
| 201 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 202 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 203 |
+
if input_ids is None and inputs_embeds is None:
|
| 204 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 205 |
+
|
| 206 |
+
if inputs_embeds is None:
|
| 207 |
+
inputs_embeds = self.embeddings(input_ids)
|
| 208 |
+
hidden_states = inputs_embeds
|
| 209 |
+
|
| 210 |
+
if use_cache and not isinstance(past_key_values, Cache):
|
| 211 |
+
past_key_values = Cache.from_legacy_cache(past_key_values)
|
| 212 |
+
|
| 213 |
+
if self.gradient_checkpointing and self.training and use_cache:
|
| 214 |
+
logger.warning_once("`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...")
|
| 215 |
+
use_cache = False
|
| 216 |
+
|
| 217 |
+
all_hidden_states = () if output_hidden_states else None
|
| 218 |
+
all_attns = () if output_attentions else None
|
| 219 |
+
|
| 220 |
+
for i, layer in enumerate(self.layers):
|
| 221 |
+
if output_hidden_states:
|
| 222 |
+
all_hidden_states += (hidden_states,)
|
| 223 |
+
|
| 224 |
+
if self.gradient_checkpointing and self.training:
|
| 225 |
+
hidden_states, attentions, past_key_values = self._gradient_checkpointing_func(
|
| 226 |
+
layer.__call__,
|
| 227 |
+
hidden_states,
|
| 228 |
+
attention_mask,
|
| 229 |
+
past_key_values,
|
| 230 |
+
use_cache,
|
| 231 |
+
output_attentions,
|
| 232 |
+
)
|
| 233 |
+
else:
|
| 234 |
+
hidden_states, attentions, past_key_values = layer(
|
| 235 |
+
hidden_states,
|
| 236 |
+
attention_mask=attention_mask,
|
| 237 |
+
past_key_values=past_key_values,
|
| 238 |
+
use_cache=use_cache,
|
| 239 |
+
output_attentions=output_attentions
|
| 240 |
+
)
|
| 241 |
+
|
| 242 |
+
if output_attentions:
|
| 243 |
+
all_attns += (attentions,)
|
| 244 |
+
|
| 245 |
+
hidden_states = self.norm(hidden_states)
|
| 246 |
+
|
| 247 |
+
# add hidden states from the last decoder layer
|
| 248 |
+
if output_hidden_states:
|
| 249 |
+
all_hidden_states += (hidden_states,)
|
| 250 |
+
|
| 251 |
+
if not return_dict:
|
| 252 |
+
return tuple(i for i in [hidden_states, past_key_values, all_hidden_states, all_attns] if i is not None)
|
| 253 |
+
return BaseModelOutputWithPast(
|
| 254 |
+
last_hidden_state=hidden_states,
|
| 255 |
+
past_key_values=past_key_values,
|
| 256 |
+
hidden_states=all_hidden_states,
|
| 257 |
+
attentions=all_attns
|
| 258 |
+
)
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
class LinearAttentionForCausalLM(LinearAttentionPreTrainedModel, GenerationMixin):
|
| 262 |
+
|
| 263 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 264 |
+
|
| 265 |
+
def __init__(self, config):
|
| 266 |
+
super().__init__(config)
|
| 267 |
+
self.model = LinearAttentionModel(config)
|
| 268 |
+
self.vocab_size = config.vocab_size
|
| 269 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 270 |
+
self.criterion = None
|
| 271 |
+
|
| 272 |
+
# Initialize weights and apply final processing
|
| 273 |
+
self.post_init()
|
| 274 |
+
|
| 275 |
+
def get_input_embeddings(self):
|
| 276 |
+
return self.model.embeddings
|
| 277 |
+
|
| 278 |
+
def set_input_embeddings(self, value):
|
| 279 |
+
self.model.embeddings = value
|
| 280 |
+
|
| 281 |
+
def get_output_embeddings(self):
|
| 282 |
+
return self.lm_head
|
| 283 |
+
|
| 284 |
+
def set_output_embeddings(self, new_embeddings):
|
| 285 |
+
self.lm_head = new_embeddings
|
| 286 |
+
|
| 287 |
+
def set_decoder(self, decoder):
|
| 288 |
+
self.model = decoder
|
| 289 |
+
|
| 290 |
+
def get_decoder(self):
|
| 291 |
+
return self.model
|
| 292 |
+
|
| 293 |
+
def generate(self, *args, **kwargs):
|
| 294 |
+
try:
|
| 295 |
+
return super().generate(*args, **kwargs)
|
| 296 |
+
except AttributeError as exception:
|
| 297 |
+
if 'past_key_values' in str(exception):
|
| 298 |
+
raise AttributeError(
|
| 299 |
+
f"You tried to call `generate` with a decoding strategy that manipulates `past_key_values`, "
|
| 300 |
+
f"which is not supported for {self.__class__.__name__}. "
|
| 301 |
+
f"Try another generation strategy instead. "
|
| 302 |
+
f"For the available generation strategies, check this doc: "
|
| 303 |
+
f"https://huggingface.co/docs/transformers/en/generation_strategies#decoding-strategies"
|
| 304 |
+
)
|
| 305 |
+
else:
|
| 306 |
+
raise exception
|
| 307 |
+
|
| 308 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 309 |
+
def prepare_inputs_for_generation(
|
| 310 |
+
self,
|
| 311 |
+
input_ids: torch.LongTensor = None,
|
| 312 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 313 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 314 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 315 |
+
use_cache: bool = True,
|
| 316 |
+
logits_to_keep: Optional[int] = None,
|
| 317 |
+
**kwargs
|
| 318 |
+
):
|
| 319 |
+
# only last token for `inputs_ids` if the `past_key_values` is not empty.
|
| 320 |
+
if past_key_values is not None and len(past_key_values) > 0:
|
| 321 |
+
input_ids = input_ids[:, -1:]
|
| 322 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 323 |
+
if inputs_embeds is not None and len(past_key_values) == 0:
|
| 324 |
+
model_inputs = {'inputs_embeds': inputs_embeds}
|
| 325 |
+
else:
|
| 326 |
+
# The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
|
| 327 |
+
# recompiles graphs as the stride of the inputs is a guard.
|
| 328 |
+
# Ref: https://github.com/huggingface/transformers/pull/29114
|
| 329 |
+
# TODO: use `next_tokens` directly instead.
|
| 330 |
+
model_inputs = {'input_ids': input_ids.contiguous()}
|
| 331 |
+
|
| 332 |
+
if logits_to_keep is not None:
|
| 333 |
+
model_inputs['logits_to_keep'] = logits_to_keep
|
| 334 |
+
|
| 335 |
+
model_inputs.update({
|
| 336 |
+
'past_key_values': past_key_values,
|
| 337 |
+
'use_cache': use_cache,
|
| 338 |
+
'attention_mask': attention_mask,
|
| 339 |
+
})
|
| 340 |
+
return model_inputs
|
| 341 |
+
|
| 342 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 343 |
+
def forward(
|
| 344 |
+
self,
|
| 345 |
+
input_ids: torch.LongTensor = None,
|
| 346 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 347 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 348 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 349 |
+
labels: Optional[torch.LongTensor] = None,
|
| 350 |
+
use_cache: Optional[bool] = None,
|
| 351 |
+
output_attentions: Optional[bool] = None,
|
| 352 |
+
output_hidden_states: Optional[bool] = None,
|
| 353 |
+
return_dict: Optional[bool] = None,
|
| 354 |
+
logits_to_keep: Optional[int] = 0
|
| 355 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 356 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 357 |
+
output_hidden_states = (
|
| 358 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 359 |
+
)
|
| 360 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 361 |
+
|
| 362 |
+
outputs = self.model(
|
| 363 |
+
input_ids=input_ids,
|
| 364 |
+
attention_mask=attention_mask,
|
| 365 |
+
inputs_embeds=inputs_embeds,
|
| 366 |
+
past_key_values=past_key_values,
|
| 367 |
+
use_cache=use_cache,
|
| 368 |
+
output_attentions=output_attentions,
|
| 369 |
+
output_hidden_states=output_hidden_states,
|
| 370 |
+
return_dict=return_dict
|
| 371 |
+
)
|
| 372 |
+
|
| 373 |
+
hidden_states = outputs[0]
|
| 374 |
+
fuse_linear_and_cross_entropy = self.config.fuse_cross_entropy and self.training
|
| 375 |
+
|
| 376 |
+
loss, logits = None, None
|
| 377 |
+
if not fuse_linear_and_cross_entropy or labels is None:
|
| 378 |
+
logits = self.lm_head(hidden_states if logits_to_keep is None else hidden_states[:, -logits_to_keep:])
|
| 379 |
+
if labels is not None:
|
| 380 |
+
if getattr(self, 'criterion', None) is None:
|
| 381 |
+
if fuse_linear_and_cross_entropy:
|
| 382 |
+
criterion = FusedLinearCrossEntropyLoss()
|
| 383 |
+
elif self.config.fuse_cross_entropy:
|
| 384 |
+
criterion = FusedCrossEntropyLoss(inplace_backward=True)
|
| 385 |
+
else:
|
| 386 |
+
criterion = nn.CrossEntropyLoss()
|
| 387 |
+
else:
|
| 388 |
+
criterion = self.criterion
|
| 389 |
+
labels = labels.to(hidden_states.device)
|
| 390 |
+
labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], criterion.ignore_index)), 1)
|
| 391 |
+
if fuse_linear_and_cross_entropy:
|
| 392 |
+
loss = criterion(hidden_states, labels, self.lm_head.weight, self.lm_head.bias)
|
| 393 |
+
else:
|
| 394 |
+
loss = criterion(logits.view(labels.numel(), -1), labels.view(-1))
|
| 395 |
+
|
| 396 |
+
if not return_dict:
|
| 397 |
+
output = (logits,) + outputs[1:]
|
| 398 |
+
return (loss,) + output if loss is not None else output
|
| 399 |
+
|
| 400 |
+
return CausalLMOutputWithPast(
|
| 401 |
+
loss=loss,
|
| 402 |
+
logits=logits,
|
| 403 |
+
past_key_values=outputs.past_key_values,
|
| 404 |
+
hidden_states=outputs.hidden_states,
|
| 405 |
+
attentions=outputs.attentions,
|
| 406 |
+
)
|
fla/models/mamba/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
|
| 4 |
+
|
| 5 |
+
from fla.models.mamba.configuration_mamba import MambaConfig
|
| 6 |
+
from fla.models.mamba.modeling_mamba import MambaBlock, MambaForCausalLM, MambaModel
|
| 7 |
+
|
| 8 |
+
AutoConfig.register(MambaConfig.model_type, MambaConfig, True)
|
| 9 |
+
AutoModel.register(MambaConfig, MambaModel, True)
|
| 10 |
+
AutoModelForCausalLM.register(MambaConfig, MambaForCausalLM, True)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
__all__ = ['MambaConfig', 'MambaForCausalLM', 'MambaModel', 'MambaBlock']
|
fla/models/mamba/modeling_mamba.py
ADDED
|
@@ -0,0 +1,843 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2024 state-spaces/mamba org and HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""PyTorch MAMBA model."""
|
| 16 |
+
|
| 17 |
+
import math
|
| 18 |
+
import warnings
|
| 19 |
+
from dataclasses import dataclass
|
| 20 |
+
from typing import Any, Dict, Optional, Tuple, Union
|
| 21 |
+
|
| 22 |
+
import torch
|
| 23 |
+
import torch.utils.checkpoint
|
| 24 |
+
from torch import nn
|
| 25 |
+
from transformers.activations import ACT2FN
|
| 26 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 27 |
+
from transformers.generation import GenerationMixin
|
| 28 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 29 |
+
from transformers.utils import ModelOutput, logging
|
| 30 |
+
from transformers.utils.deprecation import deprecate_kwarg
|
| 31 |
+
|
| 32 |
+
from fla.models.mamba.configuration_mamba import MambaConfig
|
| 33 |
+
from fla.modules import FusedCrossEntropyLoss, FusedLinearCrossEntropyLoss, RMSNorm
|
| 34 |
+
|
| 35 |
+
logger = logging.get_logger(__name__)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
with warnings.catch_warnings():
|
| 39 |
+
warnings.simplefilter('ignore')
|
| 40 |
+
try:
|
| 41 |
+
from mamba_ssm.ops.selective_scan_interface import mamba_inner_fn, selective_scan_fn
|
| 42 |
+
from mamba_ssm.ops.triton.selective_state_update import selective_state_update
|
| 43 |
+
except ImportError:
|
| 44 |
+
selective_state_update, selective_scan_fn, mamba_inner_fn = None, None, None
|
| 45 |
+
|
| 46 |
+
try:
|
| 47 |
+
from causal_conv1d import causal_conv1d_fn, causal_conv1d_update
|
| 48 |
+
except ImportError:
|
| 49 |
+
causal_conv1d_update, causal_conv1d_fn = None, None
|
| 50 |
+
is_fast_path_available = all((
|
| 51 |
+
selective_state_update,
|
| 52 |
+
selective_scan_fn,
|
| 53 |
+
causal_conv1d_fn,
|
| 54 |
+
causal_conv1d_update,
|
| 55 |
+
mamba_inner_fn
|
| 56 |
+
))
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class MambaCache:
|
| 60 |
+
"""
|
| 61 |
+
Cache for mamba model which does not have attention mechanism and key value states.
|
| 62 |
+
|
| 63 |
+
Arguments:
|
| 64 |
+
config (`PretrainedConfig):
|
| 65 |
+
The configuration file defining the shape-related attributes required to initialize the static cache.
|
| 66 |
+
batch_size (`int`):
|
| 67 |
+
The batch size with which the model will be used. Note that a new instance must be instantiated if a
|
| 68 |
+
smaller batch size is used.
|
| 69 |
+
dtype (`torch.dtype`, *optional*, defaults to `torch.float16`):
|
| 70 |
+
The default `dtype` to use when initializing the layer.
|
| 71 |
+
device (`torch.device` or `str`, *optional*):
|
| 72 |
+
The device on which the cache should be initialized. Should be the same as the layer.
|
| 73 |
+
|
| 74 |
+
Attributes:
|
| 75 |
+
dtype: (`torch.dtype`):
|
| 76 |
+
The default `dtype` used to initializing the cache.
|
| 77 |
+
intermediate_size: (`int`):
|
| 78 |
+
Model's intermediate_size taken from config.
|
| 79 |
+
ssm_state_size: (`int`):
|
| 80 |
+
Model's state_size taken from config.
|
| 81 |
+
conv_kernel_size: (`int`):
|
| 82 |
+
Model's convolution kernel size taken from config
|
| 83 |
+
conv_states: (`torch.Tensor`):
|
| 84 |
+
A tensor of shape `[layer_idx, batch_size, intermediate_size, conv_kernel_size]` that holds convolutional states.
|
| 85 |
+
ssm_states: (`torch.Tensor`):
|
| 86 |
+
A tensor of shape `[layer_idx, batch_size, intermediate_size, ssm_state_size]` that holds ssm states
|
| 87 |
+
|
| 88 |
+
Example:
|
| 89 |
+
|
| 90 |
+
```python
|
| 91 |
+
>>> from transformers import AutoTokenizer, MambaForCausalLM, MambaCache
|
| 92 |
+
|
| 93 |
+
>>> model = MambaForCausalLM.from_pretrained("state-spaces/mamba-130m-hf")
|
| 94 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("state-spaces/mamba-130m-hf")
|
| 95 |
+
|
| 96 |
+
>>> inputs = tokenizer(text="My name is Mamba", return_tensors="pt")
|
| 97 |
+
|
| 98 |
+
>>> # Prepare a cache class and pass it to model's forward
|
| 99 |
+
>>> past_key_values = MambaCache(config=model.config, batch_size=1, device=model.device, dtype=model.dtype)
|
| 100 |
+
>>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True)
|
| 101 |
+
>>> outputs.past_key_values
|
| 102 |
+
MambaCache()
|
| 103 |
+
```
|
| 104 |
+
"""
|
| 105 |
+
|
| 106 |
+
# TODO (joao): remove `=None` in non-optional arguments in v4.46. Remove from `OBJECTS_TO_IGNORE` as well.
|
| 107 |
+
def __init__(
|
| 108 |
+
self,
|
| 109 |
+
config: PretrainedConfig,
|
| 110 |
+
batch_size: int = None,
|
| 111 |
+
dtype: torch.dtype = torch.float16,
|
| 112 |
+
device: Optional[Union[torch.device, str]] = None,
|
| 113 |
+
max_batch_size: Optional[int] = None,
|
| 114 |
+
):
|
| 115 |
+
if max_batch_size is not None:
|
| 116 |
+
logger.warning_once(
|
| 117 |
+
f"The 'max_batch_size' argument of {self.__class__.__name__} is deprecated and will be removed in "
|
| 118 |
+
"v4.46. Use the more precisely named 'batch_size' argument instead."
|
| 119 |
+
)
|
| 120 |
+
self.dtype = dtype
|
| 121 |
+
self.batch_size = batch_size or max_batch_size
|
| 122 |
+
self.intermediate_size = config.intermediate_size
|
| 123 |
+
self.ssm_state_size = config.state_size
|
| 124 |
+
self.conv_kernel_size = config.conv_kernel
|
| 125 |
+
|
| 126 |
+
self.conv_states: torch.Tensor = torch.zeros(
|
| 127 |
+
config.num_hidden_layers,
|
| 128 |
+
self.batch_size,
|
| 129 |
+
self.intermediate_size,
|
| 130 |
+
self.conv_kernel_size,
|
| 131 |
+
device=device,
|
| 132 |
+
dtype=dtype,
|
| 133 |
+
)
|
| 134 |
+
self.ssm_states: torch.Tensor = torch.zeros(
|
| 135 |
+
config.num_hidden_layers,
|
| 136 |
+
self.batch_size,
|
| 137 |
+
self.intermediate_size,
|
| 138 |
+
self.ssm_state_size,
|
| 139 |
+
device=device,
|
| 140 |
+
dtype=dtype,
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
torch._dynamo.mark_static_address(self.conv_states)
|
| 144 |
+
torch._dynamo.mark_static_address(self.ssm_states)
|
| 145 |
+
|
| 146 |
+
def update_conv_state(
|
| 147 |
+
self, layer_idx: int, new_conv_state: torch.Tensor, cache_position: torch.LongTensor
|
| 148 |
+
) -> torch.Tensor:
|
| 149 |
+
conv_state = self.conv_states[layer_idx]
|
| 150 |
+
cache_position = cache_position.clamp(0, self.conv_kernel_size - 1)
|
| 151 |
+
|
| 152 |
+
conv_state = conv_state.roll(shifts=-1, dims=-1)
|
| 153 |
+
conv_state[:, :, cache_position] = new_conv_state.to(conv_state.device)
|
| 154 |
+
self.conv_states[layer_idx].zero_()
|
| 155 |
+
self.conv_states[layer_idx] += conv_state
|
| 156 |
+
return self.conv_states[layer_idx]
|
| 157 |
+
|
| 158 |
+
def update_ssm_state(self, layer_idx: int, new_ssm_state: torch.Tensor):
|
| 159 |
+
self.ssm_states[layer_idx] = new_ssm_state.to(self.ssm_states.device)
|
| 160 |
+
return self.ssm_states[layer_idx]
|
| 161 |
+
|
| 162 |
+
def reset(self):
|
| 163 |
+
self.conv_states.zero_()
|
| 164 |
+
self.ssm_states.zero_()
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
class MambaMixer(nn.Module):
|
| 168 |
+
"""
|
| 169 |
+
Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`.
|
| 170 |
+
A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective)
|
| 171 |
+
∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4,
|
| 172 |
+
and is why Mamba is called **selective** state spaces)
|
| 173 |
+
"""
|
| 174 |
+
|
| 175 |
+
def __init__(self, config: MambaConfig, layer_idx: int):
|
| 176 |
+
super().__init__()
|
| 177 |
+
self.config = config
|
| 178 |
+
self.hidden_size = config.hidden_size
|
| 179 |
+
self.ssm_state_size = config.state_size
|
| 180 |
+
self.conv_kernel_size = config.conv_kernel
|
| 181 |
+
self.intermediate_size = config.intermediate_size
|
| 182 |
+
self.time_step_rank = int(config.time_step_rank)
|
| 183 |
+
self.layer_idx = layer_idx
|
| 184 |
+
self.use_conv_bias = config.use_conv_bias
|
| 185 |
+
self.conv1d = nn.Conv1d(
|
| 186 |
+
in_channels=self.intermediate_size,
|
| 187 |
+
out_channels=self.intermediate_size,
|
| 188 |
+
bias=config.use_conv_bias,
|
| 189 |
+
kernel_size=config.conv_kernel,
|
| 190 |
+
groups=self.intermediate_size,
|
| 191 |
+
padding=config.conv_kernel - 1,
|
| 192 |
+
)
|
| 193 |
+
|
| 194 |
+
self.activation = config.hidden_act
|
| 195 |
+
self.act = ACT2FN[config.hidden_act]
|
| 196 |
+
|
| 197 |
+
# projection of the input hidden states
|
| 198 |
+
self.in_proj = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=config.use_bias)
|
| 199 |
+
# selective projection used to make dt, B and C input dependant
|
| 200 |
+
self.x_proj = nn.Linear(self.intermediate_size, self.time_step_rank + self.ssm_state_size * 2, bias=False)
|
| 201 |
+
# time step projection (discretization)
|
| 202 |
+
self.dt_proj = nn.Linear(self.time_step_rank, self.intermediate_size, bias=True)
|
| 203 |
+
|
| 204 |
+
# S4D real initialization. These are not discretized!
|
| 205 |
+
# The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded
|
| 206 |
+
A = torch.arange(1, self.ssm_state_size + 1, dtype=torch.float32)[None, :]
|
| 207 |
+
A = A.expand(self.intermediate_size, -1).contiguous()
|
| 208 |
+
|
| 209 |
+
self.A_log = nn.Parameter(torch.log(A))
|
| 210 |
+
self.D = nn.Parameter(torch.ones(self.intermediate_size))
|
| 211 |
+
self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.use_bias)
|
| 212 |
+
self.use_bias = config.use_bias
|
| 213 |
+
|
| 214 |
+
if not is_fast_path_available:
|
| 215 |
+
logger.warning_once(
|
| 216 |
+
"The fast path is not available because on of "
|
| 217 |
+
"`(selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)`"
|
| 218 |
+
" is None. Falling back to the naive implementation. "
|
| 219 |
+
"To install follow https://github.com/state-spaces/mamba/#installation and"
|
| 220 |
+
" https://github.com/Dao-AILab/causal-conv1d"
|
| 221 |
+
)
|
| 222 |
+
|
| 223 |
+
def cuda_kernels_forward(
|
| 224 |
+
self,
|
| 225 |
+
hidden_states: torch.Tensor,
|
| 226 |
+
cache_params: Optional[MambaCache] = None,
|
| 227 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 228 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
| 229 |
+
):
|
| 230 |
+
# 1. Gated MLP's linear projection
|
| 231 |
+
projected_states = self.in_proj(hidden_states).transpose(1, 2)
|
| 232 |
+
|
| 233 |
+
if self.training and cache_params is None: # Doesn't support outputting the states -> used for training
|
| 234 |
+
contextualized_states = mamba_inner_fn(
|
| 235 |
+
projected_states,
|
| 236 |
+
self.conv1d.weight,
|
| 237 |
+
self.conv1d.bias if self.use_conv_bias else None,
|
| 238 |
+
self.x_proj.weight,
|
| 239 |
+
self.dt_proj.weight,
|
| 240 |
+
self.out_proj.weight,
|
| 241 |
+
self.out_proj.bias.float() if self.use_bias else None,
|
| 242 |
+
-torch.exp(self.A_log.float()),
|
| 243 |
+
None, # input-dependent B
|
| 244 |
+
None, # input-dependent C
|
| 245 |
+
self.D.float(),
|
| 246 |
+
delta_bias=self.dt_proj.bias.float(),
|
| 247 |
+
delta_softplus=True,
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
else:
|
| 251 |
+
hidden_states, gate = projected_states.chunk(2, dim=1)
|
| 252 |
+
|
| 253 |
+
if attention_mask is not None:
|
| 254 |
+
hidden_states = hidden_states * attention_mask.unsqueeze(1)
|
| 255 |
+
|
| 256 |
+
# 2. Convolution sequence transformation
|
| 257 |
+
conv_weights = self.conv1d.weight.view(self.conv1d.weight.size(0), self.conv1d.weight.size(2))
|
| 258 |
+
if cache_params is not None and cache_position[0] > 0:
|
| 259 |
+
hidden_states = causal_conv1d_update(
|
| 260 |
+
hidden_states.squeeze(-1),
|
| 261 |
+
cache_params.conv_states[self.layer_idx],
|
| 262 |
+
conv_weights,
|
| 263 |
+
self.conv1d.bias,
|
| 264 |
+
self.activation,
|
| 265 |
+
)
|
| 266 |
+
hidden_states = hidden_states.unsqueeze(-1)
|
| 267 |
+
else:
|
| 268 |
+
if cache_params is not None:
|
| 269 |
+
conv_states = nn.functional.pad(
|
| 270 |
+
hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0)
|
| 271 |
+
)
|
| 272 |
+
cache_params.update_conv_state(self.layer_idx, conv_states, cache_position)
|
| 273 |
+
hidden_states = causal_conv1d_fn(
|
| 274 |
+
hidden_states, conv_weights, self.conv1d.bias, activation=self.activation
|
| 275 |
+
)
|
| 276 |
+
|
| 277 |
+
if attention_mask is not None:
|
| 278 |
+
hidden_states = hidden_states * attention_mask.unsqueeze(1)
|
| 279 |
+
|
| 280 |
+
# 3. State Space Model sequence transformation
|
| 281 |
+
# 3.a. input varying initialization of time_step, B and C
|
| 282 |
+
ssm_parameters = self.x_proj(hidden_states.transpose(1, 2))
|
| 283 |
+
time_step, B, C = torch.split(
|
| 284 |
+
ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1
|
| 285 |
+
)
|
| 286 |
+
discrete_time_step = self.dt_proj.weight @ time_step.transpose(1, 2)
|
| 287 |
+
|
| 288 |
+
A = -torch.exp(self.A_log.float())
|
| 289 |
+
# 3.c perform the recurrence y ← SSM(A, B, C)(x)
|
| 290 |
+
time_proj_bias = self.dt_proj.bias.float() if hasattr(self.dt_proj, "bias") else None
|
| 291 |
+
if cache_params is not None and cache_position[0] > 0:
|
| 292 |
+
scan_outputs = selective_state_update(
|
| 293 |
+
cache_params.ssm_states[self.layer_idx],
|
| 294 |
+
hidden_states[..., 0],
|
| 295 |
+
discrete_time_step[..., 0],
|
| 296 |
+
A,
|
| 297 |
+
B[:, 0],
|
| 298 |
+
C[:, 0],
|
| 299 |
+
self.D,
|
| 300 |
+
gate[..., 0],
|
| 301 |
+
time_proj_bias,
|
| 302 |
+
dt_softplus=True,
|
| 303 |
+
).unsqueeze(-1)
|
| 304 |
+
else:
|
| 305 |
+
scan_outputs, ssm_state = selective_scan_fn(
|
| 306 |
+
hidden_states,
|
| 307 |
+
discrete_time_step,
|
| 308 |
+
A,
|
| 309 |
+
B.transpose(1, 2),
|
| 310 |
+
C.transpose(1, 2),
|
| 311 |
+
self.D.float(),
|
| 312 |
+
gate,
|
| 313 |
+
time_proj_bias,
|
| 314 |
+
delta_softplus=True,
|
| 315 |
+
return_last_state=True,
|
| 316 |
+
)
|
| 317 |
+
if ssm_state is not None and cache_params is not None:
|
| 318 |
+
cache_params.update_ssm_state(self.layer_idx, ssm_state)
|
| 319 |
+
|
| 320 |
+
# 4. Final linear projection
|
| 321 |
+
contextualized_states = self.out_proj(scan_outputs.transpose(1, 2))
|
| 322 |
+
return contextualized_states
|
| 323 |
+
|
| 324 |
+
def slow_forward(
|
| 325 |
+
self,
|
| 326 |
+
input_states,
|
| 327 |
+
cache_params: Optional[MambaCache] = None,
|
| 328 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 329 |
+
attention_mask: Optional[torch.LongTensor] = None
|
| 330 |
+
):
|
| 331 |
+
batch_size, seq_len, _ = input_states.shape
|
| 332 |
+
dtype = input_states.dtype
|
| 333 |
+
# 1. Gated MLP's linear projection
|
| 334 |
+
# [batch, 2 * intermediate_size, seq_len]
|
| 335 |
+
projected_states = self.in_proj(input_states).transpose(1, 2)
|
| 336 |
+
hidden_states, gate = projected_states.chunk(2, dim=1)
|
| 337 |
+
|
| 338 |
+
if attention_mask is not None:
|
| 339 |
+
hidden_states = hidden_states * attention_mask.unsqueeze(1)
|
| 340 |
+
|
| 341 |
+
# 2. Convolution sequence transformation
|
| 342 |
+
if cache_params is not None:
|
| 343 |
+
ssm_state = cache_params.ssm_states[self.layer_idx].clone()
|
| 344 |
+
ssm_state = ssm_state.to(hidden_states.device)
|
| 345 |
+
# use `cache_position.shape[0]` to check whether we are in prefill
|
| 346 |
+
# stage, it's equivalent to check `cache_position[0] == 0`, which
|
| 347 |
+
# breaks dynamo fullgraph constraints
|
| 348 |
+
if cache_position.shape[0] == self.conv_kernel_size:
|
| 349 |
+
conv_state = nn.functional.pad(
|
| 350 |
+
hidden_states,
|
| 351 |
+
(self.conv_kernel_size - hidden_states.shape[-1], 0)
|
| 352 |
+
)
|
| 353 |
+
|
| 354 |
+
cache_params.update_conv_state(self.layer_idx, conv_state, cache_position)
|
| 355 |
+
# [batch, intermediate_size, seq_len]
|
| 356 |
+
hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len])
|
| 357 |
+
else:
|
| 358 |
+
conv_state = cache_params.update_conv_state(self.layer_idx, hidden_states, cache_position)
|
| 359 |
+
hidden_states = torch.sum(conv_state * self.conv1d.weight[:, 0, :], dim=-1)
|
| 360 |
+
if self.use_conv_bias:
|
| 361 |
+
hidden_states += self.conv1d.bias
|
| 362 |
+
# [batch, intermediate_size, 1] : decoding
|
| 363 |
+
hidden_states = self.act(hidden_states).to(dtype).unsqueeze(-1)
|
| 364 |
+
else:
|
| 365 |
+
ssm_state = torch.zeros(
|
| 366 |
+
(batch_size, self.intermediate_size, self.ssm_state_size),
|
| 367 |
+
device=hidden_states.device, dtype=dtype
|
| 368 |
+
)
|
| 369 |
+
# [batch, intermediate_size, seq_len]
|
| 370 |
+
hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len])
|
| 371 |
+
|
| 372 |
+
if attention_mask is not None:
|
| 373 |
+
hidden_states = hidden_states * attention_mask.unsqueeze(1)
|
| 374 |
+
|
| 375 |
+
# 3. State Space Model sequence transformation
|
| 376 |
+
# 3.a. Selection: [batch, seq_len, self.time_step_rank + self.ssm_state_size * 2]
|
| 377 |
+
ssm_parameters = self.x_proj(hidden_states.transpose(1, 2))
|
| 378 |
+
time_step, B, C = torch.split(
|
| 379 |
+
ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1
|
| 380 |
+
)
|
| 381 |
+
# [batch, seq_len, intermediate_size]
|
| 382 |
+
discrete_time_step = self.dt_proj(time_step)
|
| 383 |
+
# [batch, intermediate_size, seq_len]
|
| 384 |
+
discrete_time_step = nn.functional.softplus(discrete_time_step).transpose(1, 2)
|
| 385 |
+
|
| 386 |
+
# 3.b. Discretization: B and C to [batch, seq_len, intermediate_size, ssm_state_size] (SRAM)
|
| 387 |
+
# [intermediate_size, ssm_state_size]
|
| 388 |
+
A = -torch.exp(self.A_log.float())
|
| 389 |
+
# [batch, intermediate_size, seq_len, ssm_state_size]
|
| 390 |
+
discrete_A = torch.exp(A[None, :, None, :] * discrete_time_step[:, :, :, None])
|
| 391 |
+
# [batch, intermediate_size, seq_len, ssm_state_size]
|
| 392 |
+
discrete_B = discrete_time_step[:, :, :, None] * B[:, None, :, :].float()
|
| 393 |
+
deltaB_u = discrete_B * hidden_states[:, :, :, None].float()
|
| 394 |
+
|
| 395 |
+
# 3.c perform the recurrence y ← SSM(A, B, C)(x)
|
| 396 |
+
scan_outputs = []
|
| 397 |
+
for i in range(seq_len):
|
| 398 |
+
# [batch, intermediade_size, ssm_state]
|
| 399 |
+
ssm_state = discrete_A[:, :, i, :] * ssm_state + deltaB_u[:, :, i, :]
|
| 400 |
+
# [batch, intermediade_size, 1]
|
| 401 |
+
scan_output = torch.matmul(ssm_state.to(dtype), C[:, i, :].unsqueeze(-1))
|
| 402 |
+
scan_outputs.append(scan_output[:, :, 0])
|
| 403 |
+
# [batch, seq_len, intermediade_size]
|
| 404 |
+
scan_output = torch.stack(scan_outputs, dim=-1)
|
| 405 |
+
scan_output = scan_output + (hidden_states * self.D[None, :, None])
|
| 406 |
+
scan_output = (scan_output * self.act(gate))
|
| 407 |
+
|
| 408 |
+
if cache_params is not None:
|
| 409 |
+
cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
|
| 410 |
+
|
| 411 |
+
# 4. Final linear projection
|
| 412 |
+
# [batch, seq_len, hidden_size]
|
| 413 |
+
contextualized_states = self.out_proj(scan_output.transpose(1, 2))
|
| 414 |
+
return contextualized_states
|
| 415 |
+
# fmt: on
|
| 416 |
+
|
| 417 |
+
def forward(
|
| 418 |
+
self,
|
| 419 |
+
hidden_states,
|
| 420 |
+
cache_params: Optional[MambaCache] = None,
|
| 421 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 422 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
| 423 |
+
):
|
| 424 |
+
if is_fast_path_available and "cuda" in self.x_proj.weight.device.type:
|
| 425 |
+
return self.cuda_kernels_forward(hidden_states, cache_params, cache_position, attention_mask)
|
| 426 |
+
return self.slow_forward(hidden_states, cache_params, cache_position, attention_mask)
|
| 427 |
+
|
| 428 |
+
|
| 429 |
+
class MambaBlock(nn.Module):
|
| 430 |
+
def __init__(self, config, layer_idx):
|
| 431 |
+
super().__init__()
|
| 432 |
+
self.config = config
|
| 433 |
+
self.layer_idx = layer_idx
|
| 434 |
+
self.residual_in_fp32 = config.residual_in_fp32
|
| 435 |
+
self.norm = RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon)
|
| 436 |
+
self.mixer = MambaMixer(config, layer_idx=layer_idx)
|
| 437 |
+
|
| 438 |
+
def forward(
|
| 439 |
+
self,
|
| 440 |
+
hidden_states,
|
| 441 |
+
cache_params: Optional[MambaCache] = None,
|
| 442 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 443 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
| 444 |
+
):
|
| 445 |
+
residual = hidden_states
|
| 446 |
+
hidden_states = self.norm(hidden_states)
|
| 447 |
+
if self.residual_in_fp32:
|
| 448 |
+
residual = residual.to(torch.float32)
|
| 449 |
+
|
| 450 |
+
hidden_states = self.mixer(
|
| 451 |
+
hidden_states, cache_params=cache_params, cache_position=cache_position, attention_mask=attention_mask
|
| 452 |
+
)
|
| 453 |
+
hidden_states = residual + hidden_states
|
| 454 |
+
if self.residual_in_fp32:
|
| 455 |
+
hidden_states = hidden_states.to(dtype=self.norm.weight.dtype)
|
| 456 |
+
return hidden_states
|
| 457 |
+
|
| 458 |
+
|
| 459 |
+
class MambaPreTrainedModel(PreTrainedModel):
|
| 460 |
+
"""
|
| 461 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 462 |
+
models.
|
| 463 |
+
"""
|
| 464 |
+
|
| 465 |
+
config_class = MambaConfig
|
| 466 |
+
base_model_prefix = "backbone"
|
| 467 |
+
_no_split_modules = ["MambaBlock", "MambaMixer"]
|
| 468 |
+
supports_gradient_checkpointing = True
|
| 469 |
+
_is_stateful = True
|
| 470 |
+
|
| 471 |
+
def _init_weights(self, module):
|
| 472 |
+
"""Initialize the weights."""
|
| 473 |
+
if isinstance(module, nn.Linear):
|
| 474 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 475 |
+
if module.bias is not None:
|
| 476 |
+
if not getattr(module.bias, "_no_reinit", False):
|
| 477 |
+
nn.init.zeros_(module.bias)
|
| 478 |
+
elif isinstance(module, MambaMixer):
|
| 479 |
+
module.A_log._no_weight_decay = True
|
| 480 |
+
module.D._no_weight_decay = True
|
| 481 |
+
|
| 482 |
+
dt_init_std = self.config.time_step_rank**-0.5 * self.config.time_step_scale
|
| 483 |
+
if self.config.time_step_init_scheme == "constant":
|
| 484 |
+
nn.init.constant_(module.dt_proj.weight, dt_init_std)
|
| 485 |
+
elif self.config.time_step_init_scheme == "random":
|
| 486 |
+
nn.init.uniform_(module.dt_proj.weight, -dt_init_std, dt_init_std)
|
| 487 |
+
|
| 488 |
+
dt = torch.exp(
|
| 489 |
+
torch.rand(self.config.intermediate_size)
|
| 490 |
+
* (math.log(self.config.time_step_max) - math.log(self.config.time_step_min))
|
| 491 |
+
+ math.log(self.config.time_step_min)
|
| 492 |
+
).clamp(min=self.config.time_step_floor)
|
| 493 |
+
# # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759
|
| 494 |
+
inv_dt = dt + torch.log(-torch.expm1(-dt))
|
| 495 |
+
with torch.no_grad():
|
| 496 |
+
module.dt_proj.bias.data = nn.Parameter(inv_dt.to(module.dt_proj.bias.device))
|
| 497 |
+
module.dt_proj.bias._no_reinit = True
|
| 498 |
+
elif isinstance(module, nn.Embedding):
|
| 499 |
+
nn.init.normal_(module.weight, std=self.config.initializer_range)
|
| 500 |
+
elif hasattr(module, 'reset_parameters'):
|
| 501 |
+
module.reset_parameters()
|
| 502 |
+
|
| 503 |
+
if self.config.rescale_prenorm_residual:
|
| 504 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
| 505 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
| 506 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
| 507 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
| 508 |
+
#
|
| 509 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
| 510 |
+
for name, p in module.named_parameters():
|
| 511 |
+
if name in ["out_proj.weight"]:
|
| 512 |
+
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
|
| 513 |
+
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
|
| 514 |
+
# We need to reinit p since this code could be called multiple times
|
| 515 |
+
# Having just p *= scale would repeatedly scale it down
|
| 516 |
+
nn.init.kaiming_uniform_(p, a=math.sqrt(5))
|
| 517 |
+
with torch.no_grad():
|
| 518 |
+
p /= math.sqrt(self.config.num_hidden_layers)
|
| 519 |
+
|
| 520 |
+
|
| 521 |
+
@dataclass
|
| 522 |
+
class MambaOutput(ModelOutput):
|
| 523 |
+
"""
|
| 524 |
+
Class for the MAMBA model outputs.
|
| 525 |
+
|
| 526 |
+
Args:
|
| 527 |
+
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
| 528 |
+
Sequence of hidden-states at the output of the last layer of the model.
|
| 529 |
+
cache_params (`MambaCache`):
|
| 530 |
+
The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to
|
| 531 |
+
avoid providing the old `input_ids`.
|
| 532 |
+
|
| 533 |
+
Includes both the State space model state matrices after the selective scan, and the Convolutional states
|
| 534 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*,
|
| 535 |
+
returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 536 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
| 537 |
+
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
| 538 |
+
|
| 539 |
+
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
| 540 |
+
"""
|
| 541 |
+
|
| 542 |
+
last_hidden_state: Optional[torch.FloatTensor] = None
|
| 543 |
+
cache_params: Optional[MambaCache] = None
|
| 544 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
| 545 |
+
|
| 546 |
+
|
| 547 |
+
@dataclass
|
| 548 |
+
class MambaCausalLMOutput(ModelOutput):
|
| 549 |
+
"""
|
| 550 |
+
Base class for causal language model (or autoregressive) outputs.
|
| 551 |
+
|
| 552 |
+
Args:
|
| 553 |
+
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
| 554 |
+
Language modeling loss (for next-token prediction).
|
| 555 |
+
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
|
| 556 |
+
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
| 557 |
+
cache_params (`MambaCache`):
|
| 558 |
+
The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to
|
| 559 |
+
avoid providing the old `input_ids`.
|
| 560 |
+
|
| 561 |
+
Includes both the State space model state matrices after the selective scan, and the Convolutional states
|
| 562 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*,
|
| 563 |
+
returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 564 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
| 565 |
+
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
| 566 |
+
|
| 567 |
+
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
| 568 |
+
"""
|
| 569 |
+
|
| 570 |
+
loss: Optional[torch.FloatTensor] = None
|
| 571 |
+
logits: Optional[torch.FloatTensor] = None
|
| 572 |
+
cache_params: Optional[MambaCache] = None
|
| 573 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
| 574 |
+
|
| 575 |
+
|
| 576 |
+
class MambaModel(MambaPreTrainedModel):
|
| 577 |
+
def __init__(self, config):
|
| 578 |
+
super().__init__(config)
|
| 579 |
+
|
| 580 |
+
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
|
| 581 |
+
self.layers = nn.ModuleList([MambaBlock(config, layer_idx=idx) for idx in range(config.num_hidden_layers)])
|
| 582 |
+
|
| 583 |
+
self.gradient_checkpointing = False
|
| 584 |
+
self.norm_f = RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon)
|
| 585 |
+
# Initialize weights and apply final processing
|
| 586 |
+
self._register_load_state_dict_pre_hook(self.load_hook)
|
| 587 |
+
self.post_init()
|
| 588 |
+
|
| 589 |
+
def load_hook(self, state_dict, prefix, *args):
|
| 590 |
+
for k in state_dict:
|
| 591 |
+
if "embedding." in k:
|
| 592 |
+
state_dict[k.replace("embedding.", "embeddings.")] = state_dict.pop(k)
|
| 593 |
+
break
|
| 594 |
+
|
| 595 |
+
def get_input_embeddings(self):
|
| 596 |
+
return self.embeddings
|
| 597 |
+
|
| 598 |
+
def set_input_embeddings(self, new_embeddings):
|
| 599 |
+
self.embeddings = new_embeddings
|
| 600 |
+
|
| 601 |
+
def forward(
|
| 602 |
+
self,
|
| 603 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 604 |
+
inputs_embeds: Optional[torch.LongTensor] = None,
|
| 605 |
+
cache_params: Optional[MambaCache] = None,
|
| 606 |
+
use_cache: Optional[bool] = None,
|
| 607 |
+
output_hidden_states: Optional[bool] = None,
|
| 608 |
+
return_dict: Optional[bool] = None,
|
| 609 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 610 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
| 611 |
+
) -> Union[Tuple, MambaOutput]:
|
| 612 |
+
output_hidden_states = (
|
| 613 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 614 |
+
)
|
| 615 |
+
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
|
| 616 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 617 |
+
|
| 618 |
+
if (input_ids is None) ^ (inputs_embeds is not None): # ^ is python for xor
|
| 619 |
+
raise ValueError(
|
| 620 |
+
"You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
|
| 621 |
+
)
|
| 622 |
+
|
| 623 |
+
if inputs_embeds is None:
|
| 624 |
+
inputs_embeds = self.embeddings(input_ids)
|
| 625 |
+
|
| 626 |
+
if self.gradient_checkpointing and self.training and use_cache:
|
| 627 |
+
use_cache = False
|
| 628 |
+
|
| 629 |
+
if use_cache:
|
| 630 |
+
if cache_params is None:
|
| 631 |
+
cache_params = MambaCache(
|
| 632 |
+
self.config, inputs_embeds.size(0), device=inputs_embeds.device, dtype=inputs_embeds.dtype
|
| 633 |
+
)
|
| 634 |
+
cache_position = torch.arange(0, self.config.conv_kernel, device=inputs_embeds.device)
|
| 635 |
+
elif cache_position is None:
|
| 636 |
+
# cases when we do manual forward instead of using `model.generate` which will initiate
|
| 637 |
+
# `cache_position` and makes sure it is not None, throw error here instead of doing some
|
| 638 |
+
# hack to conjecture the current cache position
|
| 639 |
+
raise ValueError(
|
| 640 |
+
"You have to specify the `cache_position` manually when `use_cache=True` and `cache_params` is passed, "
|
| 641 |
+
"you don't have to pass a `cache_params` if you are in prefilling stage because in that case it will "
|
| 642 |
+
"be initialized for you automatically"
|
| 643 |
+
)
|
| 644 |
+
else:
|
| 645 |
+
cache_params = None
|
| 646 |
+
|
| 647 |
+
hidden_states = inputs_embeds
|
| 648 |
+
all_hidden_states = () if output_hidden_states else None
|
| 649 |
+
for mixer_block in self.layers:
|
| 650 |
+
if self.gradient_checkpointing and self.training:
|
| 651 |
+
hidden_states = self._gradient_checkpointing_func(
|
| 652 |
+
mixer_block.__call__, hidden_states, cache_params, cache_position, attention_mask
|
| 653 |
+
)
|
| 654 |
+
else:
|
| 655 |
+
hidden_states = mixer_block(
|
| 656 |
+
hidden_states,
|
| 657 |
+
cache_params=cache_params,
|
| 658 |
+
cache_position=cache_position,
|
| 659 |
+
attention_mask=attention_mask,
|
| 660 |
+
)
|
| 661 |
+
|
| 662 |
+
if output_hidden_states:
|
| 663 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 664 |
+
|
| 665 |
+
hidden_states = self.norm_f(hidden_states)
|
| 666 |
+
|
| 667 |
+
if output_hidden_states:
|
| 668 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 669 |
+
|
| 670 |
+
if not return_dict:
|
| 671 |
+
return tuple(v for v in [hidden_states, cache_params, all_hidden_states] if v is not None)
|
| 672 |
+
|
| 673 |
+
return MambaOutput(
|
| 674 |
+
last_hidden_state=hidden_states,
|
| 675 |
+
cache_params=cache_params if use_cache else None,
|
| 676 |
+
hidden_states=all_hidden_states,
|
| 677 |
+
)
|
| 678 |
+
|
| 679 |
+
|
| 680 |
+
class MambaForCausalLM(MambaPreTrainedModel, GenerationMixin):
|
| 681 |
+
|
| 682 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 683 |
+
|
| 684 |
+
def __init__(self, config):
|
| 685 |
+
super().__init__(config)
|
| 686 |
+
self.backbone = MambaModel(config)
|
| 687 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 688 |
+
self.criterion = None
|
| 689 |
+
|
| 690 |
+
# Initialize weights and apply final processing
|
| 691 |
+
self.post_init()
|
| 692 |
+
|
| 693 |
+
def get_output_embeddings(self):
|
| 694 |
+
return self.lm_head
|
| 695 |
+
|
| 696 |
+
def set_output_embeddings(self, new_embeddings):
|
| 697 |
+
self.lm_head = new_embeddings
|
| 698 |
+
|
| 699 |
+
def get_input_embeddings(self):
|
| 700 |
+
return self.backbone.get_input_embeddings()
|
| 701 |
+
|
| 702 |
+
def set_input_embeddings(self, new_embeddings):
|
| 703 |
+
return self.backbone.set_input_embeddings(new_embeddings)
|
| 704 |
+
|
| 705 |
+
def _update_model_kwargs_for_generation(
|
| 706 |
+
self, outputs: ModelOutput,
|
| 707 |
+
model_kwargs: Dict[str, Any],
|
| 708 |
+
num_new_tokens: int = 1,
|
| 709 |
+
**kwargs
|
| 710 |
+
) -> Dict[str, Any]:
|
| 711 |
+
model_kwargs["cache_params"] = outputs.get("cache_params", None)
|
| 712 |
+
if (
|
| 713 |
+
model_kwargs.get("use_cache", True)
|
| 714 |
+
and "cache_position" in model_kwargs
|
| 715 |
+
and model_kwargs["cache_position"] is not None
|
| 716 |
+
):
|
| 717 |
+
model_kwargs["cache_position"] = model_kwargs["cache_position"][-1:] + num_new_tokens
|
| 718 |
+
|
| 719 |
+
if "attention_mask" in model_kwargs:
|
| 720 |
+
attention_mask = model_kwargs["attention_mask"]
|
| 721 |
+
model_kwargs["attention_mask"] = torch.cat(
|
| 722 |
+
[attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
|
| 723 |
+
)
|
| 724 |
+
|
| 725 |
+
return model_kwargs
|
| 726 |
+
|
| 727 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 728 |
+
def prepare_inputs_for_generation(
|
| 729 |
+
self,
|
| 730 |
+
input_ids,
|
| 731 |
+
inputs_embeds=None,
|
| 732 |
+
use_cache=None,
|
| 733 |
+
cache_params: Optional[MambaCache] = None,
|
| 734 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 735 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
| 736 |
+
logits_to_keep: Optional[int] = None,
|
| 737 |
+
**kwargs,
|
| 738 |
+
):
|
| 739 |
+
if use_cache:
|
| 740 |
+
# `cache_position` should have been initialized in `generate`
|
| 741 |
+
if cache_position is None:
|
| 742 |
+
raise ValueError(
|
| 743 |
+
"`cache_position` should not be None as it should have been initialized in "
|
| 744 |
+
"`model.generate`, you are responsible for passing in a valid `cache_position` if "
|
| 745 |
+
"you are calling `prepare_inputs_for_generation` directly with `use_cache=True`"
|
| 746 |
+
)
|
| 747 |
+
if cache_position[0] > 0:
|
| 748 |
+
input_ids = input_ids[:, -1].unsqueeze(-1)
|
| 749 |
+
|
| 750 |
+
if attention_mask is not None:
|
| 751 |
+
attention_mask = None
|
| 752 |
+
|
| 753 |
+
else:
|
| 754 |
+
# we initialize the `cache_position` to full size of `conv_states` at prefill stage
|
| 755 |
+
# considering padding will be applied when input length is shorter, and truncation
|
| 756 |
+
# will be applied when it is longer, so it will be equivalent to always have it match
|
| 757 |
+
# the length of `cache_params.conv_states`, which is `config.conv_kernel`
|
| 758 |
+
cache_position = torch.arange(0, self.config.conv_kernel, device=input_ids.device)
|
| 759 |
+
|
| 760 |
+
if inputs_embeds is not None and cache_params is None:
|
| 761 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
| 762 |
+
else:
|
| 763 |
+
model_inputs = {"input_ids": input_ids.contiguous()}
|
| 764 |
+
|
| 765 |
+
if logits_to_keep is not None:
|
| 766 |
+
model_inputs['logits_to_keep'] = logits_to_keep
|
| 767 |
+
|
| 768 |
+
model_inputs.update({
|
| 769 |
+
'cache_params': cache_params,
|
| 770 |
+
'use_cache': use_cache,
|
| 771 |
+
'cache_position': cache_position,
|
| 772 |
+
'attention_mask': attention_mask,
|
| 773 |
+
'logits_to_keep': logits_to_keep,
|
| 774 |
+
})
|
| 775 |
+
return model_inputs
|
| 776 |
+
|
| 777 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 778 |
+
def forward(
|
| 779 |
+
self,
|
| 780 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 781 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
| 782 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 783 |
+
cache_params: Optional[MambaCache] = None,
|
| 784 |
+
labels: Optional[torch.LongTensor] = None,
|
| 785 |
+
output_hidden_states: Optional[bool] = None,
|
| 786 |
+
return_dict: Optional[bool] = None,
|
| 787 |
+
use_cache: Optional[bool] = None,
|
| 788 |
+
cache_position: Optional[torch.Tensor] = None,
|
| 789 |
+
logits_to_keep: Optional[int] = 0,
|
| 790 |
+
**kwargs, # for now we need this for generation
|
| 791 |
+
) -> Union[Tuple, MambaCausalLMOutput]:
|
| 792 |
+
r"""
|
| 793 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 794 |
+
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
|
| 795 |
+
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
|
| 796 |
+
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
|
| 797 |
+
"""
|
| 798 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 799 |
+
|
| 800 |
+
mamba_outputs = self.backbone(
|
| 801 |
+
input_ids,
|
| 802 |
+
cache_params=cache_params,
|
| 803 |
+
inputs_embeds=inputs_embeds,
|
| 804 |
+
output_hidden_states=output_hidden_states,
|
| 805 |
+
return_dict=return_dict,
|
| 806 |
+
use_cache=use_cache,
|
| 807 |
+
cache_position=cache_position,
|
| 808 |
+
attention_mask=attention_mask,
|
| 809 |
+
)
|
| 810 |
+
hidden_states = mamba_outputs[0]
|
| 811 |
+
fuse_linear_and_cross_entropy = self.config.fuse_cross_entropy and self.training
|
| 812 |
+
|
| 813 |
+
loss, logits = None, None
|
| 814 |
+
if not fuse_linear_and_cross_entropy or labels is None:
|
| 815 |
+
logits = self.lm_head(hidden_states if logits_to_keep is None else hidden_states[:, -logits_to_keep:])
|
| 816 |
+
if labels is not None:
|
| 817 |
+
if getattr(self, 'criterion', None) is None:
|
| 818 |
+
if fuse_linear_and_cross_entropy:
|
| 819 |
+
criterion = FusedLinearCrossEntropyLoss()
|
| 820 |
+
elif self.config.fuse_cross_entropy:
|
| 821 |
+
criterion = FusedCrossEntropyLoss(inplace_backward=True)
|
| 822 |
+
else:
|
| 823 |
+
criterion = nn.CrossEntropyLoss()
|
| 824 |
+
else:
|
| 825 |
+
criterion = self.criterion
|
| 826 |
+
# Enable model parallelism
|
| 827 |
+
labels = labels.to(hidden_states.device)
|
| 828 |
+
labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], criterion.ignore_index)), 1)
|
| 829 |
+
if fuse_linear_and_cross_entropy:
|
| 830 |
+
loss = criterion(hidden_states, labels, self.lm_head.weight, self.lm_head.bias)
|
| 831 |
+
else:
|
| 832 |
+
loss = criterion(logits.view(labels.numel(), -1), labels.view(-1))
|
| 833 |
+
|
| 834 |
+
if not return_dict:
|
| 835 |
+
output = (logits,) + mamba_outputs[1:]
|
| 836 |
+
return (loss,) + output if loss is not None else output
|
| 837 |
+
|
| 838 |
+
return MambaCausalLMOutput(
|
| 839 |
+
loss=loss,
|
| 840 |
+
logits=logits,
|
| 841 |
+
cache_params=mamba_outputs.cache_params,
|
| 842 |
+
hidden_states=mamba_outputs.hidden_states,
|
| 843 |
+
)
|
fla/models/mamba2/configuration_mamba2.py
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Inc. team.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""MAMBA2 configuration"""
|
| 15 |
+
|
| 16 |
+
import math
|
| 17 |
+
|
| 18 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class Mamba2Config(PretrainedConfig):
|
| 22 |
+
"""
|
| 23 |
+
This is the configuration class to store the configuration of a [`Mamba2Model`]. It is used to instantiate a MAMBA2
|
| 24 |
+
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
| 25 |
+
defaults will yield a similar configuration to that of the MAMBA2
|
| 26 |
+
[state-spaces/mamba2-2.8b](https://huggingface.co/state-spaces/mamba2-2.8b) architecture.
|
| 27 |
+
|
| 28 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 29 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
num_heads (`int`, *optional*, defaults to 64):
|
| 34 |
+
Number of heads for the evolution matrices of mamba 2.
|
| 35 |
+
head_dim (`int`, *optional*, defaults to 64):
|
| 36 |
+
Dimension of each head.
|
| 37 |
+
vocab_size (`int`, *optional*, defaults to 32768):
|
| 38 |
+
Vocabulary size of the MAMBA2 model. Defines the number of different tokens that can be represented by the
|
| 39 |
+
`inputs_ids` passed when calling [`Mamba2Model`].
|
| 40 |
+
hidden_size (`int`, *optional*, defaults to 2048):
|
| 41 |
+
Dimensionality of the embeddings and hidden states.
|
| 42 |
+
state_size (`int`, *optional*, defaults to 128): shape of the state space latents.
|
| 43 |
+
num_hidden_layers (`int`, *optional*, defaults to 48):
|
| 44 |
+
Number of hidden layers in the model.
|
| 45 |
+
layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
|
| 46 |
+
The epsilon to use in the layer normalization layers.
|
| 47 |
+
pad_token_id (`int`, *optional*, defaults to 0):
|
| 48 |
+
Padding token id.
|
| 49 |
+
bos_token_id (`int`, *optional*, defaults to 1):
|
| 50 |
+
The id of the beginning of sentence token in the vocabulary.
|
| 51 |
+
eos_token_id (`int`, *optional*, defaults to 2):
|
| 52 |
+
The id of the end of sentence token in the vocabulary.
|
| 53 |
+
expand (`int`, *optional*, defaults to 2): Expanding factor used to determine the intermediate size.
|
| 54 |
+
conv_kernel (`int`, *optional*, defaults to 4): Size of the convolution kernel.
|
| 55 |
+
n_groups (`int`, *optional*, defaults to 1):
|
| 56 |
+
Number of groups for the evolution matrices of mamba 2.
|
| 57 |
+
use_bias (`bool`, *optional*, defaults to `False`):
|
| 58 |
+
Whether or not to use bias in ["in_proj", "out_proj"] of the mixer block
|
| 59 |
+
use_conv_bias (`bool`, *optional*, defaults to `True`):
|
| 60 |
+
Whether or not to use bias in the convolution layer of the mixer block.
|
| 61 |
+
hidden_act (`str`, *optional*, defaults to `"silu"`):
|
| 62 |
+
The non-linear activation function (function or string) in the decoder.
|
| 63 |
+
initializer_range (`float`, *optional*, defaults to 0.1):
|
| 64 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 65 |
+
residual_in_fp32 (`bool`, *optional*, defaults to `True`):
|
| 66 |
+
Whether or not residuals should be in `float32`.
|
| 67 |
+
If set to `False` residuals will keep the same `dtype` as the rest of the model
|
| 68 |
+
time_step_rank (`Union[int,str]`, *optional*, defaults to `"auto"`):
|
| 69 |
+
Rank of the discretization projection matrix.
|
| 70 |
+
`"auto"` means that it will default to `math.ceil(self.hidden_size / 16)`
|
| 71 |
+
time_step_min (`float`, *optional*, defaults to 0.001):
|
| 72 |
+
Minimum `time_step` used to bound `dt_proj.bias`.
|
| 73 |
+
time_step_max (`float`, *optional*, defaults to 0.1):
|
| 74 |
+
Maximum `time_step` used to bound `dt_proj.bias`.
|
| 75 |
+
time_step_floor (`float`, *optional*, defaults to 0.0001):
|
| 76 |
+
Minimum clamping value of the `dt_proj.bias` layer initialization.
|
| 77 |
+
time_step_limit (`tuple`, *optional*, defaults to `(0.0, inf)`):
|
| 78 |
+
Accepted range of time step values.
|
| 79 |
+
rescale_prenorm_residual (`bool`, *optional*, defaults to `True`):
|
| 80 |
+
Whether or not to rescale `out_proj` weights when initializing.
|
| 81 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
| 82 |
+
Whether or not the cache should be used.
|
| 83 |
+
rms_norm (`bool`, *optional*, defaults to `True`):
|
| 84 |
+
Whether to use RMS norm or not.
|
| 85 |
+
chunk_size (`int`, *optional*, defaults to 256):
|
| 86 |
+
Size of the chunks that will comprise the sequence.
|
| 87 |
+
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
|
| 88 |
+
Whether to tie word embeddings or not.
|
| 89 |
+
"""
|
| 90 |
+
|
| 91 |
+
model_type = "mamba2"
|
| 92 |
+
|
| 93 |
+
def __init__(
|
| 94 |
+
self,
|
| 95 |
+
num_heads: int = 64,
|
| 96 |
+
head_dim: int = 64,
|
| 97 |
+
vocab_size: int = 32000,
|
| 98 |
+
hidden_size: int = 2048,
|
| 99 |
+
state_size: int = 128,
|
| 100 |
+
num_hidden_layers: int = 48,
|
| 101 |
+
layer_norm_epsilon: float = 1e-5,
|
| 102 |
+
pad_token_id: int = 0,
|
| 103 |
+
bos_token_id: int = 1,
|
| 104 |
+
eos_token_id: int = 2,
|
| 105 |
+
expand: int = 2,
|
| 106 |
+
conv_kernel: int = 4,
|
| 107 |
+
n_groups: int = 1,
|
| 108 |
+
use_bias: bool = False,
|
| 109 |
+
use_conv_bias: bool = True,
|
| 110 |
+
hidden_act: str = "silu",
|
| 111 |
+
initializer_range: float = 0.1,
|
| 112 |
+
residual_in_fp32: bool = True,
|
| 113 |
+
time_step_rank: str = "auto",
|
| 114 |
+
time_step_min: float = 0.001,
|
| 115 |
+
time_step_max: float = 0.1,
|
| 116 |
+
time_step_floor: float = 1e-4,
|
| 117 |
+
time_step_limit=(0.0, float("inf")),
|
| 118 |
+
rescale_prenorm_residual: bool = True,
|
| 119 |
+
use_cache: bool = True,
|
| 120 |
+
rms_norm: bool = True,
|
| 121 |
+
chunk_size: int = 256,
|
| 122 |
+
fuse_norm: bool = True,
|
| 123 |
+
fuse_cross_entropy: bool = True,
|
| 124 |
+
tie_word_embeddings: bool = False,
|
| 125 |
+
**kwargs,
|
| 126 |
+
):
|
| 127 |
+
self.vocab_size = vocab_size
|
| 128 |
+
self.hidden_size = hidden_size
|
| 129 |
+
self.state_size = state_size
|
| 130 |
+
self.num_hidden_layers = num_hidden_layers
|
| 131 |
+
self.layer_norm_epsilon = layer_norm_epsilon
|
| 132 |
+
self.conv_kernel = conv_kernel
|
| 133 |
+
self.expand = expand
|
| 134 |
+
|
| 135 |
+
self.bos_token_id = bos_token_id
|
| 136 |
+
self.eos_token_id = eos_token_id
|
| 137 |
+
self.pad_token_id = pad_token_id
|
| 138 |
+
self.use_bias = use_bias
|
| 139 |
+
self.use_conv_bias = use_conv_bias
|
| 140 |
+
self.hidden_act = hidden_act
|
| 141 |
+
self.initializer_range = initializer_range
|
| 142 |
+
self.time_step_rank = (
|
| 143 |
+
math.ceil(self.hidden_size / 16)
|
| 144 |
+
if time_step_rank == "auto"
|
| 145 |
+
else time_step_rank
|
| 146 |
+
)
|
| 147 |
+
self.time_step_min = time_step_min
|
| 148 |
+
self.time_step_max = time_step_max
|
| 149 |
+
self.time_step_floor = time_step_floor
|
| 150 |
+
self.rescale_prenorm_residual = rescale_prenorm_residual
|
| 151 |
+
self.residual_in_fp32 = residual_in_fp32
|
| 152 |
+
self.use_cache = use_cache
|
| 153 |
+
self.n_groups = n_groups
|
| 154 |
+
self.num_heads = num_heads
|
| 155 |
+
self.head_dim = head_dim
|
| 156 |
+
self.rms_norm = rms_norm
|
| 157 |
+
self.state_size = state_size
|
| 158 |
+
self.chunk_size = chunk_size
|
| 159 |
+
self.time_step_limit = time_step_limit
|
| 160 |
+
self.fuse_norm = fuse_norm
|
| 161 |
+
self.fuse_cross_entropy = fuse_cross_entropy
|
| 162 |
+
self.tie_word_embeddings = tie_word_embeddings
|
| 163 |
+
|
| 164 |
+
super().__init__(
|
| 165 |
+
bos_token_id=bos_token_id,
|
| 166 |
+
eos_token_id=eos_token_id,
|
| 167 |
+
pad_token_id=pad_token_id,
|
| 168 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 169 |
+
**kwargs,
|
| 170 |
+
)
|
fla/models/mamba2/modeling_mamba2.py
ADDED
|
@@ -0,0 +1,1093 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 state-spaces/mamba2 org and HuggingFace Inc. team.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""PyTorch MAMBA2 model."""
|
| 15 |
+
|
| 16 |
+
import math
|
| 17 |
+
import warnings
|
| 18 |
+
from dataclasses import dataclass
|
| 19 |
+
from typing import Optional, Tuple, Union
|
| 20 |
+
|
| 21 |
+
import torch
|
| 22 |
+
import torch.utils.checkpoint
|
| 23 |
+
from torch import nn
|
| 24 |
+
from transformers.activations import ACT2FN
|
| 25 |
+
from transformers.generation import GenerationMixin
|
| 26 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 27 |
+
from transformers.utils import ModelOutput, logging
|
| 28 |
+
from transformers.utils.deprecation import deprecate_kwarg
|
| 29 |
+
|
| 30 |
+
from fla.models.mamba2.configuration_mamba2 import Mamba2Config
|
| 31 |
+
from fla.modules import FusedCrossEntropyLoss, FusedLinearCrossEntropyLoss, RMSNorm
|
| 32 |
+
from fla.modules.layernorm_gated import RMSNormGated
|
| 33 |
+
|
| 34 |
+
logger = logging.get_logger(__name__)
|
| 35 |
+
|
| 36 |
+
with warnings.catch_warnings():
|
| 37 |
+
warnings.simplefilter('ignore')
|
| 38 |
+
try:
|
| 39 |
+
from mamba_ssm.ops.triton.selective_state_update import selective_state_update
|
| 40 |
+
from mamba_ssm.ops.triton.ssd_combined import mamba_chunk_scan_combined, mamba_split_conv1d_scan_combined
|
| 41 |
+
except ImportError:
|
| 42 |
+
(
|
| 43 |
+
selective_state_update,
|
| 44 |
+
mamba_chunk_scan_combined,
|
| 45 |
+
mamba_split_conv1d_scan_combined,
|
| 46 |
+
) = (None, None, None)
|
| 47 |
+
try:
|
| 48 |
+
from causal_conv1d import causal_conv1d_fn, causal_conv1d_update
|
| 49 |
+
except ImportError:
|
| 50 |
+
causal_conv1d_update, causal_conv1d_fn = None, None
|
| 51 |
+
is_fast_path_available = all((
|
| 52 |
+
selective_state_update,
|
| 53 |
+
causal_conv1d_fn,
|
| 54 |
+
causal_conv1d_update
|
| 55 |
+
))
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def pad_tensor_by_size(input_tensor: torch.Tensor, pad_size: int):
|
| 59 |
+
"""
|
| 60 |
+
Padding x tensor with `pad_size` on the seq_len dim (dim=1)
|
| 61 |
+
|
| 62 |
+
Assumes that we only have tensors of either size 4 or 3
|
| 63 |
+
"""
|
| 64 |
+
pad_shape = (0, 0, 0, 0, 0, pad_size, 0, 0) if len(input_tensor.shape) == 4 else (0, 0, 0, pad_size, 0, 0)
|
| 65 |
+
|
| 66 |
+
return torch.nn.functional.pad(input_tensor, pad_shape, mode="constant", value=0)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def reshape_into_chunks(input_tensor, pad_size, chunk_size):
|
| 70 |
+
"""
|
| 71 |
+
Padding input_tensor with `pad_size` on the seq_len dim (dim=1) and
|
| 72 |
+
simultaneously splitting it into chunk sequences.
|
| 73 |
+
|
| 74 |
+
Assumes that we only have tensors of either size 4 or 3
|
| 75 |
+
"""
|
| 76 |
+
# [bsz, seq_len, ...] -> [bsz, seq_len multiple of chunk_size, ...]
|
| 77 |
+
input_tensor = pad_tensor_by_size(input_tensor, pad_size)
|
| 78 |
+
|
| 79 |
+
if len(input_tensor.shape) == 3:
|
| 80 |
+
# [bsz, seq_len multiple of chunk_size, num_heads] -> [bsz, -1, chunk_size, num_heads]
|
| 81 |
+
return input_tensor.reshape(input_tensor.shape[0], -1, chunk_size, input_tensor.shape[2])
|
| 82 |
+
else:
|
| 83 |
+
# [bsz, seq_len multiple of chunk_size, num_heads, head_dim or state_size] ->
|
| 84 |
+
# [bsz, -1, chunk_size, num_heads, head_dim or state_size]
|
| 85 |
+
return input_tensor.reshape(
|
| 86 |
+
input_tensor.shape[0], -1, chunk_size, input_tensor.shape[2], input_tensor.shape[3]
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def segment_sum(input_tensor):
|
| 91 |
+
"""
|
| 92 |
+
More stable segment sum calculation. Uses cumulative sums and masking instead of direct subtractions.
|
| 93 |
+
"""
|
| 94 |
+
chunk_size = input_tensor.size(-1)
|
| 95 |
+
# 1. expand input tensor to have an additional dimension and repeat along that dimension
|
| 96 |
+
# [..., chunk_size] -> [..., chunk_size, chunk_size]
|
| 97 |
+
input_tensor = input_tensor[..., None].expand(*input_tensor.size(), chunk_size)
|
| 98 |
+
# 2. create a lower triangular mask with the diagonal set to 0 to 0 out elements above diag
|
| 99 |
+
mask = torch.tril(torch.ones(chunk_size, chunk_size, device=input_tensor.device, dtype=torch.bool), diagonal=-1)
|
| 100 |
+
input_tensor = input_tensor.masked_fill(~mask, 0)
|
| 101 |
+
# 3. compute actual cumsum
|
| 102 |
+
tensor_segsum = torch.cumsum(input_tensor, dim=-2)
|
| 103 |
+
|
| 104 |
+
# 4. apply mask to keep only the lower triangular part of the cumulative sum result (incl diagonal this time)
|
| 105 |
+
mask = torch.tril(torch.ones(chunk_size, chunk_size, device=input_tensor.device, dtype=torch.bool), diagonal=0)
|
| 106 |
+
tensor_segsum = tensor_segsum.masked_fill(~mask, -torch.inf)
|
| 107 |
+
return tensor_segsum
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def apply_mask_to_padding_states(hidden_states, attention_mask):
|
| 111 |
+
"""
|
| 112 |
+
Tunes out the hidden states for padding tokens, see https://github.com/state-spaces/mamba/issues/66
|
| 113 |
+
"""
|
| 114 |
+
if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1:
|
| 115 |
+
dtype = hidden_states.dtype
|
| 116 |
+
hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
|
| 117 |
+
|
| 118 |
+
return hidden_states
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
class Mamba2Cache:
|
| 122 |
+
"""
|
| 123 |
+
Arguments:
|
| 124 |
+
config: Mamba2Config
|
| 125 |
+
batch_size: int
|
| 126 |
+
dtype: torch.dtype
|
| 127 |
+
device: torch.device
|
| 128 |
+
|
| 129 |
+
Attributes:
|
| 130 |
+
dtype: (`torch.dtype`):
|
| 131 |
+
The default `dtype` used to initializing the cache.
|
| 132 |
+
conv_kernel_size: (`int`):
|
| 133 |
+
Model's convolution kernel size taken from config.
|
| 134 |
+
n_groups: (`int`):
|
| 135 |
+
Model's number of groups taken from the config - similar to tensor parallel in Transformer.
|
| 136 |
+
state_size: (`int`):
|
| 137 |
+
Model's SSM state size taken from config.
|
| 138 |
+
num_heads: (`int`):
|
| 139 |
+
The number of heads used in the linear attention / SSM.
|
| 140 |
+
head_dim: (`int`):
|
| 141 |
+
The respective dimension of the heads used in the linear attention / SSM.
|
| 142 |
+
intermediate_size: (`int`):
|
| 143 |
+
Model's intermediate_size based on (expand * hidden_dim) from config.
|
| 144 |
+
conv_states: (`torch.Tensor`):
|
| 145 |
+
A tensor of shape `[num_layers, batch_size, conv_kernel_size, intermediate_size + 2 * n_groups * state_size]`
|
| 146 |
+
that holds convolutional states.
|
| 147 |
+
ssm_states: (`torch.Tensor`):
|
| 148 |
+
A tensor of shape `[num_layers, batch_size, num_heads, head_dim, state_size]` that holds ssm states.
|
| 149 |
+
"""
|
| 150 |
+
|
| 151 |
+
def __init__(
|
| 152 |
+
self,
|
| 153 |
+
config: Mamba2Config,
|
| 154 |
+
batch_size: int,
|
| 155 |
+
dtype: torch.dtype = torch.float16,
|
| 156 |
+
device: Optional[str] = None,
|
| 157 |
+
):
|
| 158 |
+
self.dtype = dtype
|
| 159 |
+
self.conv_kernel_size = config.conv_kernel
|
| 160 |
+
self.n_groups = config.n_groups
|
| 161 |
+
self.state_size = config.state_size
|
| 162 |
+
self.num_heads = config.num_heads
|
| 163 |
+
self.head_dim = config.head_dim
|
| 164 |
+
self.intermediate_size = int(config.expand * config.hidden_size)
|
| 165 |
+
|
| 166 |
+
self.conv_states = torch.zeros(
|
| 167 |
+
config.num_hidden_layers,
|
| 168 |
+
batch_size,
|
| 169 |
+
self.intermediate_size + 2 * self.n_groups * self.state_size,
|
| 170 |
+
self.conv_kernel_size,
|
| 171 |
+
device=device,
|
| 172 |
+
dtype=dtype,
|
| 173 |
+
)
|
| 174 |
+
self.ssm_states = torch.zeros(
|
| 175 |
+
config.num_hidden_layers,
|
| 176 |
+
batch_size,
|
| 177 |
+
self.num_heads,
|
| 178 |
+
self.head_dim,
|
| 179 |
+
self.state_size,
|
| 180 |
+
device=device,
|
| 181 |
+
dtype=dtype,
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
def update_conv_state(
|
| 185 |
+
self,
|
| 186 |
+
layer_idx: int,
|
| 187 |
+
new_conv_state: torch.Tensor,
|
| 188 |
+
cache_init: bool = False
|
| 189 |
+
) -> torch.Tensor:
|
| 190 |
+
if cache_init:
|
| 191 |
+
self.conv_states[layer_idx] = new_conv_state.to(self.conv_states.device)
|
| 192 |
+
else:
|
| 193 |
+
self.conv_states[layer_idx] = self.conv_states[layer_idx].roll(shifts=-1, dims=-1)
|
| 194 |
+
self.conv_states[layer_idx][:, :, -1] = new_conv_state[:, 0, :].to(self.conv_states.device)
|
| 195 |
+
return self.conv_states[layer_idx]
|
| 196 |
+
|
| 197 |
+
def update_ssm_state(self, layer_idx: int, new_ssm_state: torch.Tensor):
|
| 198 |
+
self.ssm_states[layer_idx] = new_ssm_state.to(self.ssm_states.device)
|
| 199 |
+
return self.ssm_states[layer_idx]
|
| 200 |
+
|
| 201 |
+
def reset(self):
|
| 202 |
+
self.conv_states.zero_()
|
| 203 |
+
self.ssm_states.zero_()
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
class Mamba2Mixer(nn.Module):
|
| 207 |
+
"""
|
| 208 |
+
Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`.
|
| 209 |
+
A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective)
|
| 210 |
+
∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4,
|
| 211 |
+
and is why Mamba is called **selective** state spaces)
|
| 212 |
+
"""
|
| 213 |
+
|
| 214 |
+
def __init__(self, config: Mamba2Config, layer_idx: int):
|
| 215 |
+
super().__init__()
|
| 216 |
+
self.num_heads = config.num_heads
|
| 217 |
+
self.hidden_size = config.hidden_size
|
| 218 |
+
self.ssm_state_size = config.state_size
|
| 219 |
+
self.conv_kernel_size = config.conv_kernel
|
| 220 |
+
self.intermediate_size = int(config.expand * self.hidden_size)
|
| 221 |
+
self.time_step_rank = int(config.time_step_rank)
|
| 222 |
+
self.layer_idx = layer_idx
|
| 223 |
+
self.use_conv_bias = config.use_conv_bias
|
| 224 |
+
self.activation = config.hidden_act
|
| 225 |
+
self.act = ACT2FN[config.hidden_act]
|
| 226 |
+
|
| 227 |
+
self.layer_norm_epsilon = config.layer_norm_epsilon
|
| 228 |
+
self.rms_norm = config.rms_norm
|
| 229 |
+
|
| 230 |
+
self.n_groups = config.n_groups
|
| 231 |
+
self.head_dim = config.head_dim
|
| 232 |
+
self.chunk_size = config.chunk_size
|
| 233 |
+
|
| 234 |
+
self.time_step_limit = config.time_step_limit
|
| 235 |
+
self.time_step_min = config.time_step_min
|
| 236 |
+
self.time_step_max = config.time_step_max
|
| 237 |
+
|
| 238 |
+
self.conv_dim = self.intermediate_size + 2 * self.n_groups * self.ssm_state_size
|
| 239 |
+
self.conv1d = nn.Conv1d(
|
| 240 |
+
in_channels=self.conv_dim,
|
| 241 |
+
out_channels=self.conv_dim,
|
| 242 |
+
bias=config.use_conv_bias,
|
| 243 |
+
kernel_size=config.conv_kernel,
|
| 244 |
+
groups=self.conv_dim,
|
| 245 |
+
padding=config.conv_kernel - 1,
|
| 246 |
+
)
|
| 247 |
+
|
| 248 |
+
# projection of the input hidden states
|
| 249 |
+
projection_size = self.intermediate_size + self.conv_dim + self.num_heads
|
| 250 |
+
self.in_proj = nn.Linear(
|
| 251 |
+
self.hidden_size,
|
| 252 |
+
projection_size,
|
| 253 |
+
bias=config.use_bias,
|
| 254 |
+
)
|
| 255 |
+
# selective projection used to make dt, B and C input dependant
|
| 256 |
+
|
| 257 |
+
# time step projection (discretization)
|
| 258 |
+
# instantiate once and copy inv_dt in init_weights of PretrainedModel
|
| 259 |
+
self.dt_bias = nn.Parameter(torch.ones(self.num_heads))
|
| 260 |
+
|
| 261 |
+
# S4D real initialization. These are not discretized!
|
| 262 |
+
# The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded
|
| 263 |
+
A = torch.arange(1, self.num_heads + 1)
|
| 264 |
+
self.A_log = nn.Parameter(torch.log(A))
|
| 265 |
+
self.A_log._no_weight_decay = True
|
| 266 |
+
self.norm = RMSNormGated(
|
| 267 |
+
self.intermediate_size, eps=self.layer_norm_epsilon, norm_before_gate=False
|
| 268 |
+
)
|
| 269 |
+
self.D = nn.Parameter(torch.ones(self.num_heads))
|
| 270 |
+
self.D._no_weight_decay = True
|
| 271 |
+
|
| 272 |
+
self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.use_bias)
|
| 273 |
+
self.use_bias = config.use_bias
|
| 274 |
+
|
| 275 |
+
if not is_fast_path_available:
|
| 276 |
+
logger.warning_once(
|
| 277 |
+
"The fast path is not available because one of "
|
| 278 |
+
"`(selective_state_update, causal_conv1d_fn, causal_conv1d_update)` is None. "
|
| 279 |
+
"Falling back to the naive implementation. "
|
| 280 |
+
"To install follow https://github.com/state-spaces/mamba/#installation and"
|
| 281 |
+
"https://github.com/Dao-AILab/causal-conv1d"
|
| 282 |
+
)
|
| 283 |
+
|
| 284 |
+
def cuda_kernels_forward(
|
| 285 |
+
self,
|
| 286 |
+
hidden_states: torch.Tensor,
|
| 287 |
+
cache_params: Optional[Mamba2Cache] = None,
|
| 288 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 289 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 290 |
+
):
|
| 291 |
+
# 1. Gated MLP's linear projection
|
| 292 |
+
hidden_states = apply_mask_to_padding_states(hidden_states, attention_mask)
|
| 293 |
+
projected_states = self.in_proj(hidden_states)
|
| 294 |
+
|
| 295 |
+
# Set up dimensions for reshapes later
|
| 296 |
+
batch_size, seq_len, _ = hidden_states.shape
|
| 297 |
+
groups_time_state_size = self.n_groups * self.ssm_state_size
|
| 298 |
+
d_mlp = (
|
| 299 |
+
projected_states.shape[-1]
|
| 300 |
+
- 2 * self.intermediate_size
|
| 301 |
+
- 2 * self.n_groups * self.ssm_state_size
|
| 302 |
+
- self.num_heads
|
| 303 |
+
) // 2
|
| 304 |
+
|
| 305 |
+
# Single step calculations via cache
|
| 306 |
+
if cache_params is not None and cache_position is not None and cache_position[0] > 0:
|
| 307 |
+
_, _, gate, hidden_states_B_C, dt = projected_states.squeeze(1).split(
|
| 308 |
+
[d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1
|
| 309 |
+
)
|
| 310 |
+
|
| 311 |
+
# 2. Convolution sequence transformation
|
| 312 |
+
hidden_states_B_C = causal_conv1d_update(
|
| 313 |
+
hidden_states_B_C,
|
| 314 |
+
cache_params.conv_states[self.layer_idx],
|
| 315 |
+
self.conv1d.weight.squeeze(1),
|
| 316 |
+
self.conv1d.bias,
|
| 317 |
+
self.activation,
|
| 318 |
+
)
|
| 319 |
+
|
| 320 |
+
hidden_states, B, C = torch.split(
|
| 321 |
+
hidden_states_B_C,
|
| 322 |
+
[
|
| 323 |
+
self.intermediate_size,
|
| 324 |
+
groups_time_state_size,
|
| 325 |
+
groups_time_state_size,
|
| 326 |
+
],
|
| 327 |
+
dim=-1,
|
| 328 |
+
)
|
| 329 |
+
|
| 330 |
+
# 3. SSM transformation
|
| 331 |
+
A = -torch.exp(self.A_log.float()) # (nheads,)
|
| 332 |
+
A = A[:, None, ...][:, :, None].expand(-1, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
|
| 333 |
+
dt = dt[:, :, None].expand(-1, -1, self.head_dim)
|
| 334 |
+
dt_bias = self.dt_bias[:, None, ...].expand(-1, self.head_dim)
|
| 335 |
+
D = self.D[:, None, ...].expand(-1, self.head_dim)
|
| 336 |
+
B = B.view(batch_size, self.n_groups, B.shape[1] // self.n_groups)
|
| 337 |
+
C = C.view(batch_size, self.n_groups, C.shape[1] // self.n_groups)
|
| 338 |
+
hidden_states_reshaped = hidden_states.view(batch_size, self.num_heads, self.head_dim)
|
| 339 |
+
|
| 340 |
+
hidden_states = selective_state_update(
|
| 341 |
+
cache_params.ssm_states[self.layer_idx],
|
| 342 |
+
hidden_states_reshaped,
|
| 343 |
+
dt,
|
| 344 |
+
A,
|
| 345 |
+
B,
|
| 346 |
+
C,
|
| 347 |
+
D,
|
| 348 |
+
z=None,
|
| 349 |
+
dt_bias=dt_bias,
|
| 350 |
+
dt_softplus=True,
|
| 351 |
+
)
|
| 352 |
+
hidden_states = hidden_states.view(batch_size, self.num_heads * self.head_dim)
|
| 353 |
+
hidden_states = self.norm(hidden_states, gate)
|
| 354 |
+
|
| 355 |
+
# 4. Final linear projection
|
| 356 |
+
out = self.out_proj(hidden_states)[:, None, ...]
|
| 357 |
+
|
| 358 |
+
# Fused calculations or step by step if no initialized cache is found
|
| 359 |
+
else:
|
| 360 |
+
A = -torch.exp(self.A_log.float()) # (num_heads) or (intermediate_size, state_size)
|
| 361 |
+
dt_limit_kwargs = {} if self.time_step_limit == (0.0, float("inf")) else {"dt_limit": self.time_step_limit}
|
| 362 |
+
|
| 363 |
+
# 2-4. Fused kernel for conv1d, SSM, and the final projection
|
| 364 |
+
if self.training and cache_params is None:
|
| 365 |
+
out = mamba_split_conv1d_scan_combined(
|
| 366 |
+
projected_states,
|
| 367 |
+
self.conv1d.weight.squeeze(1),
|
| 368 |
+
self.conv1d.bias,
|
| 369 |
+
self.dt_bias,
|
| 370 |
+
A,
|
| 371 |
+
D=self.D,
|
| 372 |
+
chunk_size=self.chunk_size,
|
| 373 |
+
seq_idx=None, # was seq_idx
|
| 374 |
+
activation=self.activation,
|
| 375 |
+
rmsnorm_weight=self.norm.weight,
|
| 376 |
+
rmsnorm_eps=self.norm.eps,
|
| 377 |
+
outproj_weight=self.out_proj.weight,
|
| 378 |
+
outproj_bias=self.out_proj.bias,
|
| 379 |
+
headdim=self.head_dim,
|
| 380 |
+
ngroups=self.n_groups,
|
| 381 |
+
norm_before_gate=False,
|
| 382 |
+
return_final_states=False,
|
| 383 |
+
**dt_limit_kwargs,
|
| 384 |
+
)
|
| 385 |
+
|
| 386 |
+
else:
|
| 387 |
+
_, _, gate, hidden_states_B_C, dt = projected_states.split(
|
| 388 |
+
[d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1
|
| 389 |
+
)
|
| 390 |
+
|
| 391 |
+
# 2. Convolution sequence transformation
|
| 392 |
+
# Init cache
|
| 393 |
+
if cache_params is not None:
|
| 394 |
+
hidden_states_B_C_transposed = hidden_states_B_C.transpose(1, 2)
|
| 395 |
+
conv_states = nn.functional.pad(
|
| 396 |
+
hidden_states_B_C_transposed,
|
| 397 |
+
(cache_params.conv_kernel_size - hidden_states_B_C_transposed.shape[-1], 0),
|
| 398 |
+
)
|
| 399 |
+
cache_params.update_conv_state(
|
| 400 |
+
layer_idx=self.layer_idx, new_conv_state=conv_states, cache_init=True
|
| 401 |
+
)
|
| 402 |
+
|
| 403 |
+
if self.activation not in ["silu", "swish"]:
|
| 404 |
+
hidden_states_B_C = self.act(
|
| 405 |
+
self.conv1d(hidden_states_B_C.transpose(1, 2))[..., :seq_len].transpose(1, 2)
|
| 406 |
+
)
|
| 407 |
+
else:
|
| 408 |
+
hidden_states_B_C = causal_conv1d_fn(
|
| 409 |
+
x=hidden_states_B_C.transpose(1, 2),
|
| 410 |
+
weight=self.conv1d.weight.squeeze(1),
|
| 411 |
+
bias=self.conv1d.bias,
|
| 412 |
+
activation=self.activation,
|
| 413 |
+
).transpose(1, 2)
|
| 414 |
+
|
| 415 |
+
hidden_states_B_C = apply_mask_to_padding_states(hidden_states_B_C, attention_mask)
|
| 416 |
+
hidden_states, B, C = torch.split(
|
| 417 |
+
hidden_states_B_C,
|
| 418 |
+
[self.intermediate_size, groups_time_state_size, groups_time_state_size],
|
| 419 |
+
dim=-1,
|
| 420 |
+
)
|
| 421 |
+
|
| 422 |
+
# 3. SSM transformation
|
| 423 |
+
scan_output, ssm_state = mamba_chunk_scan_combined(
|
| 424 |
+
hidden_states.view(batch_size, seq_len, -1, self.head_dim),
|
| 425 |
+
dt,
|
| 426 |
+
A,
|
| 427 |
+
B.view(batch_size, seq_len, self.n_groups, -1),
|
| 428 |
+
C.view(batch_size, seq_len, self.n_groups, -1),
|
| 429 |
+
chunk_size=self.chunk_size,
|
| 430 |
+
D=self.D,
|
| 431 |
+
z=None,
|
| 432 |
+
seq_idx=None,
|
| 433 |
+
return_final_states=True,
|
| 434 |
+
dt_bias=self.dt_bias,
|
| 435 |
+
dt_softplus=True,
|
| 436 |
+
**dt_limit_kwargs,
|
| 437 |
+
)
|
| 438 |
+
|
| 439 |
+
# Init cache
|
| 440 |
+
if ssm_state is not None and cache_params is not None:
|
| 441 |
+
cache_params.update_ssm_state(layer_idx=self.layer_idx, new_ssm_state=ssm_state)
|
| 442 |
+
|
| 443 |
+
scan_output = scan_output.view(batch_size, seq_len, -1)
|
| 444 |
+
# Multiply "gate" branch and apply extra normalization layer
|
| 445 |
+
scan_output = self.norm(scan_output, gate)
|
| 446 |
+
|
| 447 |
+
# 4. Final linear projection
|
| 448 |
+
out = self.out_proj(scan_output)
|
| 449 |
+
return out
|
| 450 |
+
|
| 451 |
+
# fmt: off
|
| 452 |
+
def torch_forward(
|
| 453 |
+
self,
|
| 454 |
+
input_states,
|
| 455 |
+
cache_params: Optional[Mamba2Cache] = None,
|
| 456 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 457 |
+
attention_mask: Optional[torch.Tensor] = None
|
| 458 |
+
):
|
| 459 |
+
batch_size, seq_len, _ = input_states.shape
|
| 460 |
+
dtype = input_states.dtype
|
| 461 |
+
|
| 462 |
+
# 1. Gated MLP's linear projection
|
| 463 |
+
input_states = apply_mask_to_padding_states(input_states, attention_mask)
|
| 464 |
+
projected_states = self.in_proj(input_states)
|
| 465 |
+
d_mlp = (projected_states.shape[-1] - 2 * self.intermediate_size -
|
| 466 |
+
2 * self.n_groups * self.ssm_state_size - self.num_heads) // 2
|
| 467 |
+
_, _, gate, hidden_states_B_C, dt = projected_states.split(
|
| 468 |
+
[d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1
|
| 469 |
+
)
|
| 470 |
+
|
| 471 |
+
# 2. Convolution sequence transformation
|
| 472 |
+
if cache_params is not None and cache_position is not None and cache_position[0] > 0:
|
| 473 |
+
cache_params.update_conv_state(layer_idx=self.layer_idx, new_conv_state=hidden_states_B_C, cache_init=False)
|
| 474 |
+
|
| 475 |
+
# We need to guarantee that anything regarding the cache is on the same device
|
| 476 |
+
conv_states = cache_params.conv_states[self.layer_idx].to(device=self.conv1d.weight.device)
|
| 477 |
+
|
| 478 |
+
hidden_states_B_C = torch.sum(
|
| 479 |
+
conv_states * self.conv1d.weight.squeeze(1), dim=-1
|
| 480 |
+
)
|
| 481 |
+
if self.use_conv_bias:
|
| 482 |
+
hidden_states_B_C = hidden_states_B_C + self.conv1d.bias
|
| 483 |
+
hidden_states_B_C = self.act(hidden_states_B_C)
|
| 484 |
+
else:
|
| 485 |
+
# Init cache
|
| 486 |
+
if cache_params is not None:
|
| 487 |
+
hidden_states_B_C_transposed = hidden_states_B_C.transpose(1, 2)
|
| 488 |
+
conv_states = nn.functional.pad(
|
| 489 |
+
hidden_states_B_C_transposed, (cache_params.conv_kernel_size - hidden_states_B_C_transposed.shape[-1], 0)
|
| 490 |
+
)
|
| 491 |
+
cache_params.update_conv_state(layer_idx=self.layer_idx, new_conv_state=conv_states, cache_init=True)
|
| 492 |
+
|
| 493 |
+
hidden_states_B_C = self.act(self.conv1d(hidden_states_B_C.transpose(1, 2))[..., :seq_len].transpose(1, 2))
|
| 494 |
+
|
| 495 |
+
hidden_states_B_C = apply_mask_to_padding_states(hidden_states_B_C, attention_mask)
|
| 496 |
+
hidden_states, B, C = torch.split(
|
| 497 |
+
hidden_states_B_C,
|
| 498 |
+
[self.intermediate_size, self.n_groups * self.ssm_state_size, self.n_groups * self.ssm_state_size],
|
| 499 |
+
dim=-1
|
| 500 |
+
)
|
| 501 |
+
|
| 502 |
+
# 3. SSM transformation
|
| 503 |
+
A = -torch.exp(self.A_log.float()) # [num_heads]
|
| 504 |
+
if cache_params is not None and cache_position is not None and cache_position[0] > 0:
|
| 505 |
+
# We need to guarantee that anything regarding the cache is on the same device
|
| 506 |
+
cache_device = cache_params.ssm_states.device
|
| 507 |
+
|
| 508 |
+
# Note: there is no need to pad parameter matrices here, as there is just one new token
|
| 509 |
+
# for batched generation
|
| 510 |
+
dt = dt[:, 0, :][:, None, ...]
|
| 511 |
+
dt = dt.transpose(1, 2).expand(batch_size, dt.shape[-1], self.head_dim)
|
| 512 |
+
# [num_heads] -> [num_heads, head_dim]
|
| 513 |
+
dt_bias = self.dt_bias[..., None].expand(self.dt_bias.shape[0], self.head_dim)
|
| 514 |
+
|
| 515 |
+
dt = torch.nn.functional.softplus(dt + dt_bias.to(dt.dtype))
|
| 516 |
+
dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1])
|
| 517 |
+
A = A[..., None, None].expand(self.num_heads, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
|
| 518 |
+
# [bsz, num_heads, head_dim, state_size]
|
| 519 |
+
dA = (torch.exp(dt[..., None] * A)).to(device=cache_device)
|
| 520 |
+
|
| 521 |
+
# Discretize B
|
| 522 |
+
# [bsz, n_groups * state_size] -> [bsz, n_groups, 1, state_size] ->
|
| 523 |
+
# -> [bsz, n_groups, group to head repetition factor, state_size] -> [bsz, num_heads, state_size]
|
| 524 |
+
B = B.reshape(batch_size, self.n_groups, -1)[..., None, :]
|
| 525 |
+
B = B.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, B.shape[-1]).contiguous()
|
| 526 |
+
B = B.reshape(batch_size, -1, B.shape[-1])
|
| 527 |
+
# [bsz, num_heads, head_dim, state_size]
|
| 528 |
+
dB = dt[..., None] * B[..., None, :]
|
| 529 |
+
|
| 530 |
+
# Discretize x into dB
|
| 531 |
+
# [bsz, intermediate_size] -> [bsz, num_heads, head_dim]
|
| 532 |
+
hidden_states = hidden_states.reshape(batch_size, -1, self.head_dim)
|
| 533 |
+
dBx = (dB * hidden_states[..., None]).to(device=cache_device)
|
| 534 |
+
|
| 535 |
+
# State calculation
|
| 536 |
+
cache_params.update_ssm_state(
|
| 537 |
+
layer_idx=self.layer_idx,
|
| 538 |
+
new_ssm_state=cache_params.ssm_states[self.layer_idx] * dA + dBx
|
| 539 |
+
)
|
| 540 |
+
|
| 541 |
+
# Subsequent output
|
| 542 |
+
# [bsz, n_groups * state_size] -> [bsz, num_heads, state_size]
|
| 543 |
+
C = C.reshape(batch_size, self.n_groups, -1)[..., None, :]
|
| 544 |
+
C = C.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, C.shape[-1]).contiguous()
|
| 545 |
+
C = C.reshape(batch_size, -1, C.shape[-1])
|
| 546 |
+
# [bsz, num_heads, head_dim]
|
| 547 |
+
|
| 548 |
+
ssm_states = cache_params.ssm_states[self.layer_idx].to(device=C.device, dtype=C.dtype) # Shape: [b, h, d, n]
|
| 549 |
+
# Reshape ssm_states to merge the first two dimensions
|
| 550 |
+
# Shape: [b*h, d, n]
|
| 551 |
+
ssm_states_reshaped = ssm_states.view(batch_size * self.num_heads, self.head_dim, self.ssm_state_size)
|
| 552 |
+
C_reshaped = C.view(batch_size * self.num_heads, self.ssm_state_size, 1) # Shape: [b*h, n, 1]
|
| 553 |
+
y = torch.bmm(ssm_states_reshaped, C_reshaped)
|
| 554 |
+
y = y.view(batch_size, self.num_heads, self.head_dim)
|
| 555 |
+
|
| 556 |
+
# D skip connection
|
| 557 |
+
# [num_heads] -> [num_heads, head_dim]
|
| 558 |
+
D = self.D[..., None].expand(self.D.shape[0], self.head_dim)
|
| 559 |
+
y = (y + hidden_states * D).to(y.dtype)
|
| 560 |
+
|
| 561 |
+
# [bsz, num_heads, head_dim] -> [bsz, 1, intermediate_size]
|
| 562 |
+
y = y.reshape(batch_size, -1)[:, None, ...]
|
| 563 |
+
else:
|
| 564 |
+
# begin ssd naive implementation without einsums
|
| 565 |
+
dt = nn.functional.softplus(dt + self.dt_bias)
|
| 566 |
+
dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1])
|
| 567 |
+
hidden_states = hidden_states.reshape(batch_size, seq_len, -1, self.head_dim).float()
|
| 568 |
+
B = B.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
|
| 569 |
+
C = C.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
|
| 570 |
+
B = B.repeat(1, 1, self.num_heads // self.n_groups, 1)
|
| 571 |
+
C = C.repeat(1, 1, self.num_heads // self.n_groups, 1)
|
| 572 |
+
pad_size = (self.chunk_size - seq_len % self.chunk_size) % self.chunk_size
|
| 573 |
+
|
| 574 |
+
D_residual = self.D[..., None] * pad_tensor_by_size(hidden_states, pad_size)
|
| 575 |
+
|
| 576 |
+
# Discretize x and A
|
| 577 |
+
hidden_states = hidden_states * dt[..., None]
|
| 578 |
+
A = A.to(hidden_states.dtype) * dt
|
| 579 |
+
|
| 580 |
+
# Rearrange into blocks/chunks
|
| 581 |
+
hidden_states, A, B, C = [reshape_into_chunks(t, pad_size, self.chunk_size) for t in (hidden_states, A, B, C)]
|
| 582 |
+
|
| 583 |
+
# [bsz, -1, chunk_size, num_heads] -> [bsz, num_heads, -1, chunk_size]
|
| 584 |
+
A = A.permute(0, 3, 1, 2)
|
| 585 |
+
A_cumsum = torch.cumsum(A, dim=-1)
|
| 586 |
+
|
| 587 |
+
# 1. Compute the output for each intra-chunk (diagonal blocks)
|
| 588 |
+
# This is the analog of a causal mask
|
| 589 |
+
L = torch.exp(segment_sum(A))
|
| 590 |
+
|
| 591 |
+
# Contraction of C and B to get G (attention-weights like)
|
| 592 |
+
# shape: (b, c, l, s, h, n)
|
| 593 |
+
G_intermediate = C[:, :, :, None, :, :] * B[:, :, None, :, :, :]
|
| 594 |
+
G = G_intermediate.sum(dim=-1) # shape: (b, c, l, s, h)
|
| 595 |
+
|
| 596 |
+
# Compute M, equivalent to applying attention mask to weights
|
| 597 |
+
M_intermediate = G[..., None] * L.permute(0, 2, 3, 4, 1)[..., None]
|
| 598 |
+
M = M_intermediate.sum(dim=-1)
|
| 599 |
+
|
| 600 |
+
# Compute Y_diag (apply to values)
|
| 601 |
+
Y_diag = (M[..., None] * hidden_states[:, :, None]).sum(dim=3)
|
| 602 |
+
|
| 603 |
+
# 2. Compute the state for each intra-chunk
|
| 604 |
+
# (right term of low-rank factorization of off-diagonal blocks; B terms)
|
| 605 |
+
decay_states = torch.exp((A_cumsum[:, :, :, -1:] - A_cumsum))
|
| 606 |
+
B_decay = B * decay_states.permute(0, -2, -1, 1)[..., None]
|
| 607 |
+
states = (B_decay[..., None, :] * hidden_states[..., None]).sum(dim=2)
|
| 608 |
+
|
| 609 |
+
# 3. Compute the inter-chunk SSM recurrence; produces correct SSM states at chunk boundaries
|
| 610 |
+
# (middle term of factorization of off-diag blocks; A terms)
|
| 611 |
+
if cache_params is not None and cache_position is not None and cache_position[0] > 0:
|
| 612 |
+
previous_states = cache_params.ssm_states[self.layer_idx][:, None, ...].to(device=states.device)
|
| 613 |
+
else:
|
| 614 |
+
previous_states = torch.zeros_like(states[:, :1])
|
| 615 |
+
states = torch.cat([previous_states, states], dim=1)
|
| 616 |
+
decay_chunk = torch.exp(segment_sum(nn.functional.pad(A_cumsum[:, :, :, -1], (1, 0))))
|
| 617 |
+
decay_chunk = decay_chunk.transpose(1, 3)
|
| 618 |
+
new_states = (decay_chunk[..., None, None] * states[:, :, None, ...]).sum(dim=1)
|
| 619 |
+
states, ssm_state = new_states[:, :-1], new_states[:, -1]
|
| 620 |
+
|
| 621 |
+
# 4. Compute state -> output conversion per chunk
|
| 622 |
+
# (left term of low-rank factorization of off-diagonal blocks; C terms)
|
| 623 |
+
state_decay_out = torch.exp(A_cumsum)
|
| 624 |
+
C_times_states = (C[..., None, :] * states[:, :, None, ...])
|
| 625 |
+
state_decay_out_permuted = state_decay_out.permute(0, 2, 3, 1)
|
| 626 |
+
Y_off = (C_times_states.sum(-1) * state_decay_out_permuted[..., None])
|
| 627 |
+
|
| 628 |
+
# Add output of intra-chunk and inter-chunk terms (diagonal and off-diagonal blocks)
|
| 629 |
+
y = Y_diag + Y_off
|
| 630 |
+
# [bsz, -1, self.chunk_size, num_heads, head_dim] -> [bsz, (padded) seq_len, num_heads, head_dim]
|
| 631 |
+
y = y.reshape(batch_size, -1, self.num_heads, self.head_dim)
|
| 632 |
+
|
| 633 |
+
y = y + D_residual
|
| 634 |
+
# Cutting off padded chunks
|
| 635 |
+
if pad_size > 0:
|
| 636 |
+
y = y[:, :seq_len, :, :]
|
| 637 |
+
y = y.reshape(batch_size, seq_len, -1)
|
| 638 |
+
|
| 639 |
+
# Init cache
|
| 640 |
+
if ssm_state is not None and cache_params is not None:
|
| 641 |
+
cache_params.update_ssm_state(layer_idx=self.layer_idx, new_ssm_state=ssm_state)
|
| 642 |
+
|
| 643 |
+
scan_output = self.norm(y, gate)
|
| 644 |
+
|
| 645 |
+
# end ssd naive
|
| 646 |
+
|
| 647 |
+
# 4. Final linear projection
|
| 648 |
+
contextualized_states = self.out_proj(scan_output.to(dtype)) # [batch, seq_len, hidden_size]
|
| 649 |
+
return contextualized_states
|
| 650 |
+
# fmt: on
|
| 651 |
+
|
| 652 |
+
def forward(
|
| 653 |
+
self,
|
| 654 |
+
hidden_states,
|
| 655 |
+
cache_params: Optional[Mamba2Cache] = None,
|
| 656 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 657 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 658 |
+
):
|
| 659 |
+
if is_fast_path_available and "cuda" in self.in_proj.weight.device.type:
|
| 660 |
+
return self.cuda_kernels_forward(hidden_states, cache_params, cache_position, attention_mask)
|
| 661 |
+
dtype = hidden_states.dtype
|
| 662 |
+
if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1:
|
| 663 |
+
# tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66
|
| 664 |
+
hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
|
| 665 |
+
|
| 666 |
+
return self.torch_forward(hidden_states, cache_params, cache_position, attention_mask)
|
| 667 |
+
|
| 668 |
+
|
| 669 |
+
class Mamba2Block(nn.Module):
|
| 670 |
+
def __init__(self, config, layer_idx):
|
| 671 |
+
super().__init__()
|
| 672 |
+
self.config = config
|
| 673 |
+
self.layer_idx = layer_idx
|
| 674 |
+
self.residual_in_fp32 = config.residual_in_fp32
|
| 675 |
+
self.norm = RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon)
|
| 676 |
+
self.mixer = Mamba2Mixer(config, layer_idx=layer_idx)
|
| 677 |
+
|
| 678 |
+
def forward(
|
| 679 |
+
self,
|
| 680 |
+
hidden_states,
|
| 681 |
+
cache_params: Optional[Mamba2Cache] = None,
|
| 682 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 683 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 684 |
+
):
|
| 685 |
+
residual = hidden_states
|
| 686 |
+
hidden_states = self.norm(hidden_states)
|
| 687 |
+
if self.residual_in_fp32:
|
| 688 |
+
residual = residual.to(torch.float32)
|
| 689 |
+
|
| 690 |
+
hidden_states = self.mixer(
|
| 691 |
+
hidden_states,
|
| 692 |
+
cache_params=cache_params,
|
| 693 |
+
cache_position=cache_position,
|
| 694 |
+
attention_mask=attention_mask,
|
| 695 |
+
)
|
| 696 |
+
hidden_states = residual + hidden_states
|
| 697 |
+
if self.residual_in_fp32:
|
| 698 |
+
hidden_states = hidden_states.to(dtype=self.norm.weight.dtype)
|
| 699 |
+
return hidden_states
|
| 700 |
+
|
| 701 |
+
|
| 702 |
+
class Mamba2PreTrainedModel(PreTrainedModel, GenerationMixin):
|
| 703 |
+
"""
|
| 704 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 705 |
+
models.
|
| 706 |
+
"""
|
| 707 |
+
|
| 708 |
+
config_class = Mamba2Config
|
| 709 |
+
base_model_prefix = "backbone"
|
| 710 |
+
_no_split_modules = ["Mamba2Block"]
|
| 711 |
+
supports_gradient_checkpointing = True
|
| 712 |
+
_is_stateful = True
|
| 713 |
+
|
| 714 |
+
def _init_weights(
|
| 715 |
+
self,
|
| 716 |
+
module: nn.Module,
|
| 717 |
+
num_residuals_per_layer: int = 1,
|
| 718 |
+
):
|
| 719 |
+
"""Initialize the weights."""
|
| 720 |
+
if isinstance(module, Mamba2Mixer):
|
| 721 |
+
|
| 722 |
+
# --- A_log ---
|
| 723 |
+
A = torch.arange(1, module.num_heads + 1)
|
| 724 |
+
with torch.no_grad():
|
| 725 |
+
if not isinstance(module.A_log, torch.distributed.tensor.DTensor):
|
| 726 |
+
module.A_log.copy_(torch.log(A))
|
| 727 |
+
else:
|
| 728 |
+
logger.warning_once("`A_log` is a DTensor, skipping initialization")
|
| 729 |
+
module.A_log._no_weight_decay = True
|
| 730 |
+
|
| 731 |
+
# --- D ---
|
| 732 |
+
nn.init.ones_(module.D)
|
| 733 |
+
module.D._no_weight_decay = True
|
| 734 |
+
|
| 735 |
+
# --- dt_bias ---
|
| 736 |
+
dt = torch.exp(
|
| 737 |
+
torch.rand(self.config.num_heads)
|
| 738 |
+
* (math.log(self.config.time_step_max) - math.log(self.config.time_step_min))
|
| 739 |
+
+ math.log(self.config.time_step_min)
|
| 740 |
+
).clamp(min=self.config.time_step_floor)
|
| 741 |
+
|
| 742 |
+
# Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759
|
| 743 |
+
inv_dt = dt + torch.log(-torch.expm1(-dt))
|
| 744 |
+
with torch.no_grad():
|
| 745 |
+
if not isinstance(module.dt_bias, torch.distributed.tensor.DTensor):
|
| 746 |
+
module.dt_bias.copy_(inv_dt)
|
| 747 |
+
else:
|
| 748 |
+
logger.warning_once("`dt_bias` is a DTensor, skipping initialization")
|
| 749 |
+
module.dt_bias._no_reinit = True
|
| 750 |
+
|
| 751 |
+
elif isinstance(module, (nn.Linear, nn.Conv1d)):
|
| 752 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 753 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 754 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 755 |
+
if module.bias is not None:
|
| 756 |
+
nn.init.zeros_(module.bias)
|
| 757 |
+
# guard against deprecated behavior
|
| 758 |
+
if hasattr(module.bias, "_no_reinit"):
|
| 759 |
+
raise ValueError("This is not supposed to happen")
|
| 760 |
+
elif isinstance(module, nn.Embedding):
|
| 761 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 762 |
+
elif hasattr(module, 'reset_parameters'):
|
| 763 |
+
module.reset_parameters()
|
| 764 |
+
|
| 765 |
+
if self.config.rescale_prenorm_residual:
|
| 766 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
| 767 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
| 768 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
| 769 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
| 770 |
+
#
|
| 771 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
| 772 |
+
p = None
|
| 773 |
+
if hasattr(module, 'o_proj'):
|
| 774 |
+
# p = module.o_proj.weight
|
| 775 |
+
# guard against deprecated behavior
|
| 776 |
+
raise ValueError("This is not supposed to happen")
|
| 777 |
+
elif hasattr(module, 'out_proj'):
|
| 778 |
+
p = module.out_proj.weight
|
| 779 |
+
elif hasattr(module, 'down_proj'):
|
| 780 |
+
p = module.down_proj.weight
|
| 781 |
+
if p is not None:
|
| 782 |
+
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
|
| 783 |
+
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
|
| 784 |
+
# We need to reinit p since this code could be called multiple times
|
| 785 |
+
# Having just p *= scale would repeatedly scale it down
|
| 786 |
+
nn.init.kaiming_uniform_(p, a=math.sqrt(5))
|
| 787 |
+
with torch.no_grad():
|
| 788 |
+
p /= math.sqrt(num_residuals_per_layer * self.config.num_hidden_layers)
|
| 789 |
+
|
| 790 |
+
|
| 791 |
+
@dataclass
|
| 792 |
+
# Copied from transformers.models.mamba.modeling_mamba.MambaOutput with MAMBA->MAMBA2,Mamba->Mamba2
|
| 793 |
+
class Mamba2Output(ModelOutput):
|
| 794 |
+
"""
|
| 795 |
+
Class for the MAMBA2 model outputs.
|
| 796 |
+
|
| 797 |
+
Args:
|
| 798 |
+
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
| 799 |
+
Sequence of hidden-states at the output of the last layer of the model.
|
| 800 |
+
cache_params (`Mamba2Cache`):
|
| 801 |
+
The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to
|
| 802 |
+
avoid providing the old `input_ids`.
|
| 803 |
+
|
| 804 |
+
Includes both the State space model state matrices after the selective scan, and the Convolutional states
|
| 805 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*,
|
| 806 |
+
returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 807 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
| 808 |
+
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
| 809 |
+
|
| 810 |
+
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
| 811 |
+
"""
|
| 812 |
+
|
| 813 |
+
last_hidden_state: Optional[torch.FloatTensor] = None
|
| 814 |
+
cache_params: Optional[Mamba2Cache] = None
|
| 815 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
| 816 |
+
|
| 817 |
+
|
| 818 |
+
@dataclass
|
| 819 |
+
# Copied from transformers.models.mamba.modeling_mamba.MambaCausalLMOutput with Mamba->Mamba2
|
| 820 |
+
class Mamba2CausalLMOutput(ModelOutput):
|
| 821 |
+
"""
|
| 822 |
+
Base class for causal language model (or autoregressive) outputs.
|
| 823 |
+
|
| 824 |
+
Args:
|
| 825 |
+
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
| 826 |
+
Language modeling loss (for next-token prediction).
|
| 827 |
+
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
|
| 828 |
+
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
| 829 |
+
cache_params (`Mamba2Cache`):
|
| 830 |
+
The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to
|
| 831 |
+
avoid providing the old `input_ids`.
|
| 832 |
+
|
| 833 |
+
Includes both the State space model state matrices after the selective scan, and the Convolutional states
|
| 834 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*,
|
| 835 |
+
returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 836 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
| 837 |
+
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
| 838 |
+
|
| 839 |
+
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
| 840 |
+
"""
|
| 841 |
+
|
| 842 |
+
loss: Optional[torch.FloatTensor] = None
|
| 843 |
+
logits: Optional[torch.FloatTensor] = None
|
| 844 |
+
cache_params: Optional[Mamba2Cache] = None
|
| 845 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
| 846 |
+
|
| 847 |
+
|
| 848 |
+
class Mamba2Model(Mamba2PreTrainedModel):
|
| 849 |
+
def __init__(self, config):
|
| 850 |
+
super().__init__(config)
|
| 851 |
+
|
| 852 |
+
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
|
| 853 |
+
self.layers = nn.ModuleList([Mamba2Block(config, layer_idx=idx) for idx in range(config.num_hidden_layers)])
|
| 854 |
+
|
| 855 |
+
self.gradient_checkpointing = False
|
| 856 |
+
self.norm_f = RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon)
|
| 857 |
+
# Initialize weights and apply final processing
|
| 858 |
+
self._register_load_state_dict_pre_hook(self.load_hook)
|
| 859 |
+
self.post_init()
|
| 860 |
+
|
| 861 |
+
def load_hook(self, state_dict, prefix, *args):
|
| 862 |
+
for k in state_dict:
|
| 863 |
+
if "embedding." in k:
|
| 864 |
+
state_dict[k.replace("embedding.", "embeddings.")] = state_dict.pop(k)
|
| 865 |
+
break
|
| 866 |
+
|
| 867 |
+
def get_input_embeddings(self):
|
| 868 |
+
return self.embeddings
|
| 869 |
+
|
| 870 |
+
def set_input_embeddings(self, new_embeddings):
|
| 871 |
+
self.embeddings = new_embeddings
|
| 872 |
+
|
| 873 |
+
def forward(
|
| 874 |
+
self,
|
| 875 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 876 |
+
inputs_embeds: Optional[torch.LongTensor] = None,
|
| 877 |
+
cache_params: Optional[Mamba2Cache] = None,
|
| 878 |
+
use_cache: Optional[bool] = None,
|
| 879 |
+
output_hidden_states: Optional[bool] = None,
|
| 880 |
+
return_dict: Optional[bool] = None,
|
| 881 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 882 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 883 |
+
**kwargs,
|
| 884 |
+
) -> Union[Tuple, Mamba2Output]:
|
| 885 |
+
output_hidden_states = (
|
| 886 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 887 |
+
)
|
| 888 |
+
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
|
| 889 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 890 |
+
|
| 891 |
+
if (input_ids is None) ^ (inputs_embeds is not None): # ^ is python for xor
|
| 892 |
+
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
|
| 893 |
+
|
| 894 |
+
if inputs_embeds is None:
|
| 895 |
+
inputs_embeds = self.embeddings(input_ids)
|
| 896 |
+
|
| 897 |
+
if self.gradient_checkpointing and self.training and use_cache:
|
| 898 |
+
use_cache = False
|
| 899 |
+
|
| 900 |
+
if use_cache:
|
| 901 |
+
if cache_params is None:
|
| 902 |
+
cache_params = Mamba2Cache(
|
| 903 |
+
self.config, inputs_embeds.size(0), device=inputs_embeds.device, dtype=inputs_embeds.dtype
|
| 904 |
+
)
|
| 905 |
+
cache_position = torch.arange(0, self.config.conv_kernel, device=inputs_embeds.device)
|
| 906 |
+
elif cache_position is None:
|
| 907 |
+
# cases when we do manual forward instead of using `model.generate` which will initiate
|
| 908 |
+
# `cache_position` and makes sure it is not None, throw error here instead of doing some
|
| 909 |
+
# hack to conjecture the current cache position
|
| 910 |
+
raise ValueError(
|
| 911 |
+
"You have to specify the `cache_position` manually when `use_cache=True` and `cache_params` is passed, "
|
| 912 |
+
"you don't have to pass a `cache_params` if you are in prefilling stage because in that case it will "
|
| 913 |
+
"be initialized for you automatically"
|
| 914 |
+
)
|
| 915 |
+
else:
|
| 916 |
+
cache_params = None
|
| 917 |
+
|
| 918 |
+
hidden_states = inputs_embeds
|
| 919 |
+
all_hidden_states = () if output_hidden_states else None
|
| 920 |
+
for mixer_block in self.layers:
|
| 921 |
+
if self.gradient_checkpointing and self.training:
|
| 922 |
+
hidden_states = self._gradient_checkpointing_func(
|
| 923 |
+
mixer_block.__call__,
|
| 924 |
+
hidden_states,
|
| 925 |
+
cache_params,
|
| 926 |
+
cache_position,
|
| 927 |
+
attention_mask,
|
| 928 |
+
)
|
| 929 |
+
else:
|
| 930 |
+
hidden_states = mixer_block(
|
| 931 |
+
hidden_states,
|
| 932 |
+
cache_params=cache_params,
|
| 933 |
+
cache_position=cache_position,
|
| 934 |
+
attention_mask=attention_mask,
|
| 935 |
+
)
|
| 936 |
+
|
| 937 |
+
if output_hidden_states:
|
| 938 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 939 |
+
|
| 940 |
+
hidden_states = self.norm_f(hidden_states)
|
| 941 |
+
|
| 942 |
+
if output_hidden_states:
|
| 943 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 944 |
+
|
| 945 |
+
if not return_dict:
|
| 946 |
+
return tuple(v for v in [hidden_states, cache_params, all_hidden_states] if v is not None)
|
| 947 |
+
|
| 948 |
+
return Mamba2Output(
|
| 949 |
+
last_hidden_state=hidden_states,
|
| 950 |
+
cache_params=cache_params if use_cache else None,
|
| 951 |
+
hidden_states=all_hidden_states,
|
| 952 |
+
)
|
| 953 |
+
|
| 954 |
+
|
| 955 |
+
class Mamba2ForCausalLM(Mamba2PreTrainedModel):
|
| 956 |
+
_tied_weights_keys = []
|
| 957 |
+
|
| 958 |
+
def __init__(self, config):
|
| 959 |
+
super().__init__(config)
|
| 960 |
+
self.backbone = Mamba2Model(config)
|
| 961 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 962 |
+
self.criterion = None
|
| 963 |
+
|
| 964 |
+
# Initialize weights and apply final processing
|
| 965 |
+
self.post_init()
|
| 966 |
+
|
| 967 |
+
def get_output_embeddings(self):
|
| 968 |
+
return self.lm_head
|
| 969 |
+
|
| 970 |
+
def set_output_embeddings(self, new_embeddings):
|
| 971 |
+
self.lm_head = new_embeddings
|
| 972 |
+
|
| 973 |
+
def get_input_embeddings(self):
|
| 974 |
+
return self.backbone.get_input_embeddings()
|
| 975 |
+
|
| 976 |
+
def set_input_embeddings(self, new_embeddings):
|
| 977 |
+
return self.backbone.set_input_embeddings(new_embeddings)
|
| 978 |
+
|
| 979 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 980 |
+
def prepare_inputs_for_generation(
|
| 981 |
+
self,
|
| 982 |
+
input_ids,
|
| 983 |
+
inputs_embeds=None,
|
| 984 |
+
use_cache=None,
|
| 985 |
+
cache_params: Optional[Mamba2Cache] = None,
|
| 986 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 987 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 988 |
+
logits_to_keep: Optional[int] = None,
|
| 989 |
+
**kwargs,
|
| 990 |
+
):
|
| 991 |
+
if use_cache:
|
| 992 |
+
# `cache_position` should have been initialized in `generate`
|
| 993 |
+
if cache_position is None:
|
| 994 |
+
raise ValueError(
|
| 995 |
+
"`cache_position` should not be None as it should have been initialized in "
|
| 996 |
+
"`model.generate`, you are responsible for passing in a valid `cache_position` if "
|
| 997 |
+
"you are calling `prepare_inputs_for_generation` directly with `use_cache=True`"
|
| 998 |
+
)
|
| 999 |
+
if cache_position[0] > 0:
|
| 1000 |
+
input_ids = input_ids[:, -1][..., None]
|
| 1001 |
+
|
| 1002 |
+
if attention_mask is not None:
|
| 1003 |
+
attention_mask = None
|
| 1004 |
+
else:
|
| 1005 |
+
# we initialize the `cache_position` to full size of `conv_states` at prefill stage
|
| 1006 |
+
# considering padding will be applied when input length is shorter, and truncation
|
| 1007 |
+
# will be applied when it is longer, so it will be equivalent to always have it match
|
| 1008 |
+
# the length of `cache_params.conv_states`, which is `config.conv_kernel`
|
| 1009 |
+
cache_position = torch.arange(0, self.config.conv_kernel, device=input_ids.device)
|
| 1010 |
+
|
| 1011 |
+
if inputs_embeds is not None and cache_params is None:
|
| 1012 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
| 1013 |
+
else:
|
| 1014 |
+
model_inputs = {"input_ids": input_ids}
|
| 1015 |
+
|
| 1016 |
+
if logits_to_keep is not None:
|
| 1017 |
+
model_inputs['logits_to_keep'] = logits_to_keep
|
| 1018 |
+
|
| 1019 |
+
model_inputs.update({
|
| 1020 |
+
'attention_mask': attention_mask,
|
| 1021 |
+
'cache_params': cache_params,
|
| 1022 |
+
'use_cache': use_cache,
|
| 1023 |
+
'cache_position': cache_position,
|
| 1024 |
+
'logits_to_keep': logits_to_keep
|
| 1025 |
+
})
|
| 1026 |
+
return model_inputs
|
| 1027 |
+
|
| 1028 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 1029 |
+
def forward(
|
| 1030 |
+
self,
|
| 1031 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 1032 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1033 |
+
cache_params: Optional[Mamba2Cache] = None,
|
| 1034 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1035 |
+
output_hidden_states: Optional[bool] = None,
|
| 1036 |
+
return_dict: Optional[bool] = None,
|
| 1037 |
+
use_cache: Optional[bool] = None,
|
| 1038 |
+
cache_position: Optional[torch.Tensor] = None,
|
| 1039 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1040 |
+
logits_to_keep: Optional[int] = 0,
|
| 1041 |
+
**kwargs, # for now we need this for generation
|
| 1042 |
+
) -> Union[Tuple, Mamba2CausalLMOutput]:
|
| 1043 |
+
r"""
|
| 1044 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1045 |
+
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
|
| 1046 |
+
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
|
| 1047 |
+
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
|
| 1048 |
+
"""
|
| 1049 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1050 |
+
|
| 1051 |
+
outputs = self.backbone(
|
| 1052 |
+
input_ids,
|
| 1053 |
+
cache_params=cache_params,
|
| 1054 |
+
inputs_embeds=inputs_embeds,
|
| 1055 |
+
output_hidden_states=output_hidden_states,
|
| 1056 |
+
return_dict=return_dict,
|
| 1057 |
+
use_cache=use_cache,
|
| 1058 |
+
cache_position=cache_position,
|
| 1059 |
+
attention_mask=attention_mask,
|
| 1060 |
+
)
|
| 1061 |
+
hidden_states = outputs[0]
|
| 1062 |
+
fuse_linear_and_cross_entropy = self.config.fuse_cross_entropy and self.training
|
| 1063 |
+
|
| 1064 |
+
loss, logits = None, None
|
| 1065 |
+
if not fuse_linear_and_cross_entropy or labels is None:
|
| 1066 |
+
logits = self.lm_head(hidden_states if logits_to_keep is None else hidden_states[:, -logits_to_keep:])
|
| 1067 |
+
if labels is not None:
|
| 1068 |
+
if getattr(self, 'criterion', None) is None:
|
| 1069 |
+
if fuse_linear_and_cross_entropy:
|
| 1070 |
+
criterion = FusedLinearCrossEntropyLoss()
|
| 1071 |
+
elif self.config.fuse_cross_entropy:
|
| 1072 |
+
criterion = FusedCrossEntropyLoss(inplace_backward=True)
|
| 1073 |
+
else:
|
| 1074 |
+
criterion = nn.CrossEntropyLoss()
|
| 1075 |
+
else:
|
| 1076 |
+
criterion = self.criterion
|
| 1077 |
+
labels = labels.to(hidden_states.device)
|
| 1078 |
+
labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], criterion.ignore_index)), 1)
|
| 1079 |
+
if fuse_linear_and_cross_entropy:
|
| 1080 |
+
loss = criterion(hidden_states, labels, self.lm_head.weight, self.lm_head.bias)
|
| 1081 |
+
else:
|
| 1082 |
+
loss = criterion(logits.view(labels.numel(), -1), labels.view(-1))
|
| 1083 |
+
|
| 1084 |
+
if not return_dict:
|
| 1085 |
+
output = (logits,) + outputs[1:]
|
| 1086 |
+
return (loss,) + output if loss is not None else output
|
| 1087 |
+
|
| 1088 |
+
return Mamba2CausalLMOutput(
|
| 1089 |
+
loss=loss,
|
| 1090 |
+
logits=logits,
|
| 1091 |
+
cache_params=outputs.cache_params,
|
| 1092 |
+
hidden_states=outputs.hidden_states,
|
| 1093 |
+
)
|
fla/models/nsa/configuration_nsa.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class NSAConfig(PretrainedConfig):
|
| 9 |
+
|
| 10 |
+
model_type = 'nsa'
|
| 11 |
+
keys_to_ignore_at_inference = ['past_key_values']
|
| 12 |
+
|
| 13 |
+
def __init__(
|
| 14 |
+
self,
|
| 15 |
+
hidden_size: int = 2048,
|
| 16 |
+
num_hidden_layers: int = 24,
|
| 17 |
+
num_heads: int = 64,
|
| 18 |
+
num_kv_heads: int = 4,
|
| 19 |
+
head_dim: int = 32,
|
| 20 |
+
qkv_bias: bool = False,
|
| 21 |
+
block_size: int = 64,
|
| 22 |
+
block_counts: Optional[int] = 16,
|
| 23 |
+
window_size: Optional[int] = 512,
|
| 24 |
+
rope_theta: Optional[float] = 10000.,
|
| 25 |
+
max_position_embeddings: int = 2048,
|
| 26 |
+
hidden_ratio: Optional[int] = 4,
|
| 27 |
+
intermediate_size: Optional[int] = None,
|
| 28 |
+
hidden_act: str = "swish",
|
| 29 |
+
initializer_range: float = 0.006,
|
| 30 |
+
elementwise_affine: Optional[bool] = True,
|
| 31 |
+
norm_eps: float = 1e-6,
|
| 32 |
+
use_cache: bool = True,
|
| 33 |
+
pad_token_id: int = None,
|
| 34 |
+
bos_token_id: int = 1,
|
| 35 |
+
eos_token_id: int = 2,
|
| 36 |
+
tie_word_embeddings: bool = False,
|
| 37 |
+
fuse_norm: bool = True,
|
| 38 |
+
fuse_swiglu: bool = True,
|
| 39 |
+
fuse_cross_entropy: bool = True,
|
| 40 |
+
vocab_size: int = 32000,
|
| 41 |
+
**kwargs,
|
| 42 |
+
):
|
| 43 |
+
self.hidden_size = hidden_size
|
| 44 |
+
self.num_hidden_layers = num_hidden_layers
|
| 45 |
+
self.num_heads = num_heads
|
| 46 |
+
self.num_kv_heads = num_kv_heads
|
| 47 |
+
self.head_dim = head_dim
|
| 48 |
+
self.qkv_bias = qkv_bias
|
| 49 |
+
self.block_size = block_size
|
| 50 |
+
self.block_counts = block_counts
|
| 51 |
+
self.window_size = window_size
|
| 52 |
+
self.rope_theta = rope_theta
|
| 53 |
+
self.max_position_embeddings = max_position_embeddings
|
| 54 |
+
|
| 55 |
+
self.hidden_ratio = hidden_ratio
|
| 56 |
+
self.intermediate_size = intermediate_size
|
| 57 |
+
self.hidden_act = hidden_act
|
| 58 |
+
|
| 59 |
+
self.initializer_range = initializer_range
|
| 60 |
+
self.elementwise_affine = elementwise_affine
|
| 61 |
+
self.norm_eps = norm_eps
|
| 62 |
+
self.use_cache = use_cache
|
| 63 |
+
|
| 64 |
+
self.fuse_norm = fuse_norm
|
| 65 |
+
self.fuse_swiglu = fuse_swiglu
|
| 66 |
+
self.fuse_cross_entropy = fuse_cross_entropy
|
| 67 |
+
self.vocab_size = vocab_size
|
| 68 |
+
|
| 69 |
+
super().__init__(
|
| 70 |
+
pad_token_id=pad_token_id,
|
| 71 |
+
bos_token_id=bos_token_id,
|
| 72 |
+
eos_token_id=eos_token_id,
|
| 73 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 74 |
+
**kwargs,
|
| 75 |
+
)
|
fla/models/rwkv6/configuration_rwkv6.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from typing import Dict, Optional
|
| 4 |
+
|
| 5 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class RWKV6Config(PretrainedConfig):
|
| 9 |
+
|
| 10 |
+
model_type = 'rwkv6'
|
| 11 |
+
keys_to_ignore_at_inference = ['past_key_values']
|
| 12 |
+
|
| 13 |
+
def __init__(
|
| 14 |
+
self,
|
| 15 |
+
attn_mode: str = "chunk",
|
| 16 |
+
hidden_size: int = 2048,
|
| 17 |
+
expand_k: int = 0.5,
|
| 18 |
+
expand_v: int = 1,
|
| 19 |
+
hidden_ratio: Optional[int] = 3.5,
|
| 20 |
+
intermediate_size: Optional[int] = None,
|
| 21 |
+
num_hidden_layers: int = 24,
|
| 22 |
+
num_heads: int = 4,
|
| 23 |
+
proj_low_rank_dim: int = 32,
|
| 24 |
+
gate_low_rank_dim: int = 64,
|
| 25 |
+
hidden_act: str = "sqrelu",
|
| 26 |
+
max_position_embeddings: int = 2048,
|
| 27 |
+
norm_first: bool = True,
|
| 28 |
+
norm_bias: bool = True,
|
| 29 |
+
norm_eps: float = 1e-5,
|
| 30 |
+
attn: Optional[Dict] = None,
|
| 31 |
+
use_cache: bool = True,
|
| 32 |
+
pad_token_id: int = None,
|
| 33 |
+
bos_token_id: int = 1,
|
| 34 |
+
eos_token_id: int = 2,
|
| 35 |
+
tie_word_embeddings: bool = False,
|
| 36 |
+
initializer_range: float = 0.006,
|
| 37 |
+
fuse_norm: bool = True,
|
| 38 |
+
fuse_cross_entropy: bool = True,
|
| 39 |
+
vocab_size: int = 32000,
|
| 40 |
+
**kwargs
|
| 41 |
+
):
|
| 42 |
+
self.attn_mode = attn_mode
|
| 43 |
+
self.hidden_size = hidden_size
|
| 44 |
+
self.expand_k = expand_k
|
| 45 |
+
self.expand_v = expand_v
|
| 46 |
+
self.hidden_ratio = hidden_ratio
|
| 47 |
+
self.intermediate_size = intermediate_size
|
| 48 |
+
self.norm_first = norm_first
|
| 49 |
+
self.num_hidden_layers = num_hidden_layers
|
| 50 |
+
self.num_heads = num_heads
|
| 51 |
+
self.proj_low_rank_dim = proj_low_rank_dim
|
| 52 |
+
self.gate_low_rank_dim = gate_low_rank_dim
|
| 53 |
+
self.hidden_act = hidden_act
|
| 54 |
+
self.max_position_embeddings = max_position_embeddings
|
| 55 |
+
self.norm_bias = norm_bias
|
| 56 |
+
self.norm_eps = norm_eps
|
| 57 |
+
self.attn = attn
|
| 58 |
+
self.use_cache = use_cache
|
| 59 |
+
self.initializer_range = initializer_range
|
| 60 |
+
self.fuse_norm = fuse_norm
|
| 61 |
+
self.fuse_cross_entropy = fuse_cross_entropy
|
| 62 |
+
self.vocab_size = vocab_size
|
| 63 |
+
|
| 64 |
+
if attn is not None:
|
| 65 |
+
if not isinstance(attn, Dict):
|
| 66 |
+
raise ValueError("attn must be a dictionary")
|
| 67 |
+
if 'layers' not in attn:
|
| 68 |
+
raise ValueError("Layer indices must be provided to initialize hybrid attention layers")
|
| 69 |
+
if 'num_heads' not in attn:
|
| 70 |
+
raise ValueError("Number of heads must be provided to initialize hybrid attention layers")
|
| 71 |
+
attn['num_kv_heads'] = attn.get('num_kv_heads', attn['num_heads'])
|
| 72 |
+
attn['qkv_bias'] = attn.get('qkv_bias', False)
|
| 73 |
+
attn['window_size'] = attn.get('window_size', None)
|
| 74 |
+
attn['rope_theta'] = attn.get('rope_theta', 10000.)
|
| 75 |
+
|
| 76 |
+
super().__init__(
|
| 77 |
+
pad_token_id=pad_token_id,
|
| 78 |
+
bos_token_id=bos_token_id,
|
| 79 |
+
eos_token_id=eos_token_id,
|
| 80 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 81 |
+
**kwargs,
|
| 82 |
+
)
|
fla/models/rwkv6/modeling_rwkv6.py
ADDED
|
@@ -0,0 +1,480 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
import warnings
|
| 7 |
+
from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import torch.utils.checkpoint
|
| 12 |
+
from transformers.generation import GenerationMixin
|
| 13 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
|
| 14 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 15 |
+
from transformers.utils import logging
|
| 16 |
+
from transformers.utils.deprecation import deprecate_kwarg
|
| 17 |
+
|
| 18 |
+
from fla.layers.attn import Attention
|
| 19 |
+
from fla.layers.rwkv6 import LerpLinear, RWKV6Attention
|
| 20 |
+
from fla.models.rwkv6.configuration_rwkv6 import RWKV6Config
|
| 21 |
+
from fla.models.utils import Cache
|
| 22 |
+
from fla.modules import FusedCrossEntropyLoss, FusedLinearCrossEntropyLoss, LayerNorm
|
| 23 |
+
from fla.modules.activations import ACT2FN
|
| 24 |
+
|
| 25 |
+
if TYPE_CHECKING:
|
| 26 |
+
from transformers.processing_utils import Unpack
|
| 27 |
+
|
| 28 |
+
logger = logging.get_logger(__name__)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class RWKV6FeedForward(nn.Module):
|
| 32 |
+
|
| 33 |
+
def __init__(
|
| 34 |
+
self,
|
| 35 |
+
hidden_size: int,
|
| 36 |
+
hidden_ratio: Optional[int] = None,
|
| 37 |
+
intermediate_size: Optional[int] = None,
|
| 38 |
+
hidden_act: str = 'sqrelu',
|
| 39 |
+
layer_idx: int = None
|
| 40 |
+
) -> RWKV6FeedForward:
|
| 41 |
+
super().__init__()
|
| 42 |
+
|
| 43 |
+
self.hidden_size = hidden_size
|
| 44 |
+
if hidden_ratio is None:
|
| 45 |
+
hidden_ratio = 3.5
|
| 46 |
+
if intermediate_size is None:
|
| 47 |
+
intermediate_size = int(hidden_size * hidden_ratio)
|
| 48 |
+
intermediate_size = 32 * ((intermediate_size + 32 - 1) // 32)
|
| 49 |
+
self.hidden_ratio = hidden_ratio
|
| 50 |
+
self.intermediate_size = intermediate_size
|
| 51 |
+
|
| 52 |
+
self.time_shift = nn.ZeroPad2d((0, 0, 1, -1))
|
| 53 |
+
|
| 54 |
+
self.key = LerpLinear(hidden_size, intermediate_size)
|
| 55 |
+
self.value = nn.Linear(intermediate_size, hidden_size, bias=False)
|
| 56 |
+
self.receptance = LerpLinear(hidden_size, hidden_size)
|
| 57 |
+
self.act_fn = ACT2FN[hidden_act]
|
| 58 |
+
|
| 59 |
+
self.layer_idx = layer_idx
|
| 60 |
+
|
| 61 |
+
def forward(
|
| 62 |
+
self,
|
| 63 |
+
x: torch.Tensor,
|
| 64 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 65 |
+
state: Optional[Cache] = None
|
| 66 |
+
) -> torch.Tensor:
|
| 67 |
+
if attention_mask is not None:
|
| 68 |
+
x = x.mul_(attention_mask[:, -x.shape[-2]:, None])
|
| 69 |
+
if x.shape[1] == 1 and state is not None and state[self.layer_idx]['ffn_state'] is not None:
|
| 70 |
+
shifted = state[self.layer_idx]['ffn_state'].unsqueeze(1)
|
| 71 |
+
else:
|
| 72 |
+
shifted = self.time_shift(x)
|
| 73 |
+
if state is not None and state[self.layer_idx]['ffn_state'] is not None:
|
| 74 |
+
shifted[:, 0] = state[self.layer_idx]['ffn_state']
|
| 75 |
+
delta = shifted - x
|
| 76 |
+
key = self.act_fn(self.key(x, delta))
|
| 77 |
+
value = self.value(key)
|
| 78 |
+
receptance = self.receptance(x, delta)
|
| 79 |
+
|
| 80 |
+
if state is not None:
|
| 81 |
+
# no need to update the offset twice
|
| 82 |
+
state.update(ffn_state=x[:, -1], layer_idx=self.layer_idx, offset=0)
|
| 83 |
+
return receptance.sigmoid() * value, state
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
class RWKV6Block(nn.Module):
|
| 87 |
+
def __init__(self, config: RWKV6Config, layer_idx: int):
|
| 88 |
+
super().__init__()
|
| 89 |
+
|
| 90 |
+
self.config = config
|
| 91 |
+
self.layer_idx = layer_idx
|
| 92 |
+
|
| 93 |
+
if config.norm_first and layer_idx == 0:
|
| 94 |
+
self.pre_norm = (LayerNorm if config.fuse_norm else nn.LayerNorm)(
|
| 95 |
+
config.hidden_size,
|
| 96 |
+
bias=config.norm_bias,
|
| 97 |
+
eps=config.norm_eps
|
| 98 |
+
)
|
| 99 |
+
self.attn_norm = (LayerNorm if config.fuse_norm else nn.LayerNorm)(
|
| 100 |
+
config.hidden_size,
|
| 101 |
+
bias=config.norm_bias,
|
| 102 |
+
eps=config.norm_eps
|
| 103 |
+
)
|
| 104 |
+
if config.attn is not None and layer_idx in config.attn['layers']:
|
| 105 |
+
self.attn = Attention(
|
| 106 |
+
hidden_size=config.hidden_size,
|
| 107 |
+
num_heads=config.attn['num_heads'],
|
| 108 |
+
num_kv_heads=config.attn['num_kv_heads'],
|
| 109 |
+
qkv_bias=config.attn['qkv_bias'],
|
| 110 |
+
window_size=config.attn['window_size'],
|
| 111 |
+
rope_theta=config.attn['rope_theta'],
|
| 112 |
+
max_position_embeddings=config.max_position_embeddings,
|
| 113 |
+
layer_idx=layer_idx
|
| 114 |
+
)
|
| 115 |
+
else:
|
| 116 |
+
self.attn = RWKV6Attention(
|
| 117 |
+
mode=config.attn_mode,
|
| 118 |
+
hidden_size=config.hidden_size,
|
| 119 |
+
expand_k=config.expand_k,
|
| 120 |
+
expand_v=config.expand_v,
|
| 121 |
+
num_heads=config.num_heads,
|
| 122 |
+
proj_low_rank_dim=config.proj_low_rank_dim,
|
| 123 |
+
gate_low_rank_dim=config.gate_low_rank_dim,
|
| 124 |
+
norm_eps=config.norm_eps,
|
| 125 |
+
fuse_norm=config.fuse_norm,
|
| 126 |
+
layer_idx=layer_idx
|
| 127 |
+
)
|
| 128 |
+
self.ffn_norm = (LayerNorm if config.fuse_norm else nn.LayerNorm)(
|
| 129 |
+
config.hidden_size,
|
| 130 |
+
bias=config.norm_bias,
|
| 131 |
+
eps=config.norm_eps
|
| 132 |
+
)
|
| 133 |
+
self.ffn = RWKV6FeedForward(
|
| 134 |
+
hidden_size=config.hidden_size,
|
| 135 |
+
hidden_ratio=config.hidden_ratio,
|
| 136 |
+
intermediate_size=config.intermediate_size,
|
| 137 |
+
hidden_act=config.hidden_act,
|
| 138 |
+
layer_idx=layer_idx
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
def forward(
|
| 142 |
+
self,
|
| 143 |
+
hidden_states: torch.Tensor,
|
| 144 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 145 |
+
past_key_values: Optional[Cache] = None,
|
| 146 |
+
use_cache: Optional[bool] = False,
|
| 147 |
+
output_attentions: Optional[bool] = False,
|
| 148 |
+
**kwargs,
|
| 149 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 150 |
+
residual = self.pre_norm(hidden_states) if hasattr(self, 'pre_norm') else hidden_states
|
| 151 |
+
hidden_states = self.attn_norm(residual)
|
| 152 |
+
hidden_states, attentions, past_key_values = self.attn(
|
| 153 |
+
hidden_states=hidden_states,
|
| 154 |
+
attention_mask=attention_mask,
|
| 155 |
+
past_key_values=past_key_values,
|
| 156 |
+
use_cache=use_cache,
|
| 157 |
+
output_attentions=output_attentions,
|
| 158 |
+
**kwargs
|
| 159 |
+
)
|
| 160 |
+
if self.config.fuse_norm:
|
| 161 |
+
hidden_states, residual = self.ffn_norm(hidden_states, residual, True)
|
| 162 |
+
else:
|
| 163 |
+
hidden_states = residual + hidden_states
|
| 164 |
+
residual = hidden_states
|
| 165 |
+
hidden_states = self.ffn_norm(hidden_states)
|
| 166 |
+
hidden_states, past_key_values = self.ffn(hidden_states, attention_mask, past_key_values)
|
| 167 |
+
hidden_states = residual + hidden_states
|
| 168 |
+
|
| 169 |
+
outputs = (hidden_states, attentions, past_key_values)
|
| 170 |
+
|
| 171 |
+
return outputs
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
class RWKV6PreTrainedModel(PreTrainedModel):
|
| 175 |
+
|
| 176 |
+
config_class = RWKV6Config
|
| 177 |
+
base_model_prefix = 'model'
|
| 178 |
+
supports_gradient_checkpointing = True
|
| 179 |
+
_no_split_modules = ['RWKV6Block']
|
| 180 |
+
_supports_cache_class = True
|
| 181 |
+
|
| 182 |
+
def __init__(self, *inputs, **kwargs):
|
| 183 |
+
super().__init__(*inputs, **kwargs)
|
| 184 |
+
|
| 185 |
+
def _init_weights(
|
| 186 |
+
self,
|
| 187 |
+
module: nn.Module,
|
| 188 |
+
rescale_prenorm_residual: bool = True,
|
| 189 |
+
num_residuals_per_layer: int = 2,
|
| 190 |
+
):
|
| 191 |
+
if isinstance(module, (nn.Linear, nn.Conv1d)):
|
| 192 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 193 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 194 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 195 |
+
if module.bias is not None:
|
| 196 |
+
nn.init.zeros_(module.bias)
|
| 197 |
+
elif isinstance(module, nn.Parameter):
|
| 198 |
+
nn.init.normal_(module, mean=0.0, std=self.config.initializer_range)
|
| 199 |
+
elif isinstance(module, nn.Embedding):
|
| 200 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 201 |
+
elif hasattr(module, 'reset_parameters'):
|
| 202 |
+
module.reset_parameters()
|
| 203 |
+
|
| 204 |
+
if rescale_prenorm_residual:
|
| 205 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
| 206 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
| 207 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
| 208 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
| 209 |
+
#
|
| 210 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
| 211 |
+
p = None
|
| 212 |
+
if hasattr(module, 'o_proj'):
|
| 213 |
+
p = module.o_proj.weight
|
| 214 |
+
elif hasattr(module, 'down_proj'):
|
| 215 |
+
p = module.down_proj.weight
|
| 216 |
+
if p is not None:
|
| 217 |
+
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
|
| 218 |
+
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
|
| 219 |
+
# We need to reinit p since this code could be called multiple times
|
| 220 |
+
# Having just p *= scale would repeatedly scale it down
|
| 221 |
+
nn.init.kaiming_uniform_(p, a=math.sqrt(5))
|
| 222 |
+
with torch.no_grad():
|
| 223 |
+
p /= math.sqrt(num_residuals_per_layer * self.config.num_hidden_layers)
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
class RWKV6Model(RWKV6PreTrainedModel):
|
| 227 |
+
|
| 228 |
+
def __init__(self, config: RWKV6Config):
|
| 229 |
+
super().__init__(config)
|
| 230 |
+
self.padding_idx = config.pad_token_id
|
| 231 |
+
self.vocab_size = config.vocab_size
|
| 232 |
+
|
| 233 |
+
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 234 |
+
self.layers = nn.ModuleList([RWKV6Block(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
|
| 235 |
+
self.norm = (LayerNorm if config.fuse_norm else nn.LayerNorm)(
|
| 236 |
+
config.hidden_size,
|
| 237 |
+
bias=config.norm_bias,
|
| 238 |
+
eps=config.norm_eps
|
| 239 |
+
)
|
| 240 |
+
|
| 241 |
+
self.gradient_checkpointing = False
|
| 242 |
+
|
| 243 |
+
self.post_init()
|
| 244 |
+
|
| 245 |
+
def get_input_embeddings(self):
|
| 246 |
+
return self.embeddings
|
| 247 |
+
|
| 248 |
+
def set_input_embeddings(self, value):
|
| 249 |
+
self.embeddings = value
|
| 250 |
+
|
| 251 |
+
def forward(
|
| 252 |
+
self,
|
| 253 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 254 |
+
attention_mask: Optional[torch.Tensor] = None, # noqa
|
| 255 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 256 |
+
past_key_values: Optional[Cache] = None,
|
| 257 |
+
use_cache: Optional[bool] = None,
|
| 258 |
+
output_attentions: Optional[bool] = None,
|
| 259 |
+
output_hidden_states: Optional[bool] = None,
|
| 260 |
+
return_dict: Optional[bool] = None,
|
| 261 |
+
**kwargs: Unpack[Dict]
|
| 262 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
| 263 |
+
if output_attentions:
|
| 264 |
+
warnings.warn("`RWKV6Model` does not `output_attentions` now, setting it to `False`.")
|
| 265 |
+
output_attentions = False
|
| 266 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 267 |
+
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 268 |
+
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
|
| 269 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 270 |
+
|
| 271 |
+
# retrieve input_ids and inputs_embeds
|
| 272 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 273 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 274 |
+
if input_ids is None and inputs_embeds is None:
|
| 275 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 276 |
+
|
| 277 |
+
if inputs_embeds is None:
|
| 278 |
+
inputs_embeds = self.embeddings(input_ids)
|
| 279 |
+
hidden_states = inputs_embeds
|
| 280 |
+
|
| 281 |
+
if use_cache and not isinstance(past_key_values, Cache):
|
| 282 |
+
past_key_values = Cache.from_legacy_cache(past_key_values)
|
| 283 |
+
|
| 284 |
+
if self.gradient_checkpointing and self.training and use_cache:
|
| 285 |
+
logger.warning_once("`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...")
|
| 286 |
+
use_cache = False
|
| 287 |
+
|
| 288 |
+
all_hidden_states = () if output_hidden_states else None
|
| 289 |
+
all_attns = () if output_attentions else None
|
| 290 |
+
for layer in self.layers:
|
| 291 |
+
if output_hidden_states:
|
| 292 |
+
all_hidden_states += (hidden_states,)
|
| 293 |
+
|
| 294 |
+
if self.gradient_checkpointing and self.training:
|
| 295 |
+
hidden_states, attentions, past_key_values = self._gradient_checkpointing_func(
|
| 296 |
+
layer.__call__,
|
| 297 |
+
hidden_states,
|
| 298 |
+
attention_mask,
|
| 299 |
+
past_key_values,
|
| 300 |
+
use_cache,
|
| 301 |
+
output_attentions,
|
| 302 |
+
**kwargs
|
| 303 |
+
)
|
| 304 |
+
else:
|
| 305 |
+
hidden_states, attentions, past_key_values = layer(
|
| 306 |
+
hidden_states,
|
| 307 |
+
attention_mask=attention_mask,
|
| 308 |
+
past_key_values=past_key_values,
|
| 309 |
+
use_cache=use_cache,
|
| 310 |
+
output_attentions=output_attentions,
|
| 311 |
+
**kwargs
|
| 312 |
+
)
|
| 313 |
+
|
| 314 |
+
if output_attentions:
|
| 315 |
+
all_attns += (attentions,)
|
| 316 |
+
|
| 317 |
+
hidden_states = self.norm(hidden_states)
|
| 318 |
+
|
| 319 |
+
# add hidden states from the last decoder layer
|
| 320 |
+
if output_hidden_states:
|
| 321 |
+
all_hidden_states += (hidden_states,)
|
| 322 |
+
|
| 323 |
+
if not return_dict:
|
| 324 |
+
return tuple(i for i in [hidden_states, past_key_values, all_hidden_states, all_attns] if i is not None)
|
| 325 |
+
return BaseModelOutputWithPast(
|
| 326 |
+
last_hidden_state=hidden_states,
|
| 327 |
+
past_key_values=past_key_values,
|
| 328 |
+
hidden_states=all_hidden_states,
|
| 329 |
+
attentions=all_attns
|
| 330 |
+
)
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
class RWKV6ForCausalLM(RWKV6PreTrainedModel, GenerationMixin):
|
| 334 |
+
|
| 335 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 336 |
+
|
| 337 |
+
def __init__(self, config):
|
| 338 |
+
super().__init__(config)
|
| 339 |
+
self.model = RWKV6Model(config)
|
| 340 |
+
self.vocab_size = config.vocab_size
|
| 341 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 342 |
+
self.criterion = None
|
| 343 |
+
|
| 344 |
+
# Initialize weights and apply final processing
|
| 345 |
+
self.post_init()
|
| 346 |
+
|
| 347 |
+
def get_input_embeddings(self):
|
| 348 |
+
return self.model.embeddings
|
| 349 |
+
|
| 350 |
+
def set_input_embeddings(self, value):
|
| 351 |
+
self.model.embeddings = value
|
| 352 |
+
|
| 353 |
+
def get_output_embeddings(self):
|
| 354 |
+
return self.lm_head
|
| 355 |
+
|
| 356 |
+
def set_output_embeddings(self, new_embeddings):
|
| 357 |
+
self.lm_head = new_embeddings
|
| 358 |
+
|
| 359 |
+
def set_decoder(self, decoder):
|
| 360 |
+
self.model = decoder
|
| 361 |
+
|
| 362 |
+
def get_decoder(self):
|
| 363 |
+
return self.model
|
| 364 |
+
|
| 365 |
+
def generate(self, *args, **kwargs):
|
| 366 |
+
try:
|
| 367 |
+
return super().generate(*args, **kwargs)
|
| 368 |
+
except AttributeError as exception:
|
| 369 |
+
if 'past_key_values' in str(exception):
|
| 370 |
+
raise AttributeError(
|
| 371 |
+
f"You tried to call `generate` with a decoding strategy that manipulates `past_key_values`, "
|
| 372 |
+
f"which is not supported for {self.__class__.__name__}. "
|
| 373 |
+
f"Try another generation strategy instead. "
|
| 374 |
+
f"For the available generation strategies, check this doc: "
|
| 375 |
+
f"https://huggingface.co/docs/transformers/en/generation_strategies#decoding-strategies"
|
| 376 |
+
)
|
| 377 |
+
else:
|
| 378 |
+
raise exception
|
| 379 |
+
|
| 380 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 381 |
+
def prepare_inputs_for_generation(
|
| 382 |
+
self,
|
| 383 |
+
input_ids: torch.LongTensor = None,
|
| 384 |
+
past_key_values: Optional[Cache] = None,
|
| 385 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 386 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 387 |
+
use_cache: bool = True,
|
| 388 |
+
logits_to_keep: Optional[int] = None,
|
| 389 |
+
**kwargs
|
| 390 |
+
):
|
| 391 |
+
# only last token for `inputs_ids` if the `past_key_values` is not empty.
|
| 392 |
+
if past_key_values is not None and len(past_key_values) > 0:
|
| 393 |
+
input_ids = input_ids[:, -1:]
|
| 394 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 395 |
+
if inputs_embeds is not None and len(past_key_values) == 0:
|
| 396 |
+
model_inputs = {'inputs_embeds': inputs_embeds}
|
| 397 |
+
else:
|
| 398 |
+
# The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
|
| 399 |
+
# recompiles graphs as the stride of the inputs is a guard.
|
| 400 |
+
# Ref: https://github.com/huggingface/transformers/pull/29114
|
| 401 |
+
# TODO: use `next_tokens` directly instead.
|
| 402 |
+
model_inputs = {'input_ids': input_ids.contiguous()}
|
| 403 |
+
|
| 404 |
+
if logits_to_keep is not None:
|
| 405 |
+
model_inputs['logits_to_keep'] = logits_to_keep
|
| 406 |
+
|
| 407 |
+
model_inputs.update({
|
| 408 |
+
'past_key_values': past_key_values,
|
| 409 |
+
'use_cache': use_cache,
|
| 410 |
+
'attention_mask': attention_mask,
|
| 411 |
+
})
|
| 412 |
+
return model_inputs
|
| 413 |
+
|
| 414 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 415 |
+
def forward(
|
| 416 |
+
self,
|
| 417 |
+
input_ids: torch.LongTensor = None,
|
| 418 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 419 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 420 |
+
past_key_values: Optional[Cache] = None,
|
| 421 |
+
labels: Optional[torch.LongTensor] = None,
|
| 422 |
+
use_cache: Optional[bool] = None,
|
| 423 |
+
output_attentions: Optional[bool] = None,
|
| 424 |
+
output_hidden_states: Optional[bool] = None,
|
| 425 |
+
return_dict: Optional[bool] = None,
|
| 426 |
+
logits_to_keep: Optional[int] = 0,
|
| 427 |
+
**kwargs: Unpack[Dict]
|
| 428 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 429 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 430 |
+
output_hidden_states = (
|
| 431 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 432 |
+
)
|
| 433 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 434 |
+
|
| 435 |
+
outputs = self.model(
|
| 436 |
+
input_ids=input_ids,
|
| 437 |
+
attention_mask=attention_mask,
|
| 438 |
+
inputs_embeds=inputs_embeds,
|
| 439 |
+
past_key_values=past_key_values,
|
| 440 |
+
use_cache=use_cache,
|
| 441 |
+
output_attentions=output_attentions,
|
| 442 |
+
output_hidden_states=output_hidden_states,
|
| 443 |
+
return_dict=return_dict,
|
| 444 |
+
**kwargs
|
| 445 |
+
)
|
| 446 |
+
|
| 447 |
+
hidden_states = outputs[0]
|
| 448 |
+
fuse_linear_and_cross_entropy = self.config.fuse_cross_entropy and self.training
|
| 449 |
+
|
| 450 |
+
loss, logits = None, None
|
| 451 |
+
if not fuse_linear_and_cross_entropy or labels is None:
|
| 452 |
+
logits = self.lm_head(hidden_states if logits_to_keep is None else hidden_states[:, -logits_to_keep:])
|
| 453 |
+
if labels is not None:
|
| 454 |
+
if getattr(self, 'criterion', None) is None:
|
| 455 |
+
if fuse_linear_and_cross_entropy:
|
| 456 |
+
criterion = FusedLinearCrossEntropyLoss()
|
| 457 |
+
elif self.config.fuse_cross_entropy:
|
| 458 |
+
criterion = FusedCrossEntropyLoss(inplace_backward=True)
|
| 459 |
+
else:
|
| 460 |
+
criterion = nn.CrossEntropyLoss()
|
| 461 |
+
else:
|
| 462 |
+
criterion = self.criterion
|
| 463 |
+
labels = labels.to(hidden_states.device)
|
| 464 |
+
labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], criterion.ignore_index)), 1)
|
| 465 |
+
if fuse_linear_and_cross_entropy:
|
| 466 |
+
loss = criterion(hidden_states, labels, self.lm_head.weight, self.lm_head.bias)
|
| 467 |
+
else:
|
| 468 |
+
loss = criterion(logits.view(labels.numel(), -1), labels.view(-1))
|
| 469 |
+
|
| 470 |
+
if not return_dict:
|
| 471 |
+
output = (logits,) + outputs[1:]
|
| 472 |
+
return (loss,) + output if loss is not None else output
|
| 473 |
+
|
| 474 |
+
return CausalLMOutputWithPast(
|
| 475 |
+
loss=loss,
|
| 476 |
+
logits=logits,
|
| 477 |
+
past_key_values=outputs.past_key_values,
|
| 478 |
+
hidden_states=outputs.hidden_states,
|
| 479 |
+
attentions=outputs.attentions,
|
| 480 |
+
)
|
fla/models/rwkv7/configuration_rwkv7.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from typing import Dict, List, Optional, Union
|
| 4 |
+
|
| 5 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class RWKV7Config(PretrainedConfig):
|
| 9 |
+
|
| 10 |
+
model_type = 'rwkv7'
|
| 11 |
+
keys_to_ignore_at_inference = ['past_key_values']
|
| 12 |
+
|
| 13 |
+
def __init__(
|
| 14 |
+
self,
|
| 15 |
+
attn_mode: str = "chunk",
|
| 16 |
+
hidden_size: int = 2048,
|
| 17 |
+
hidden_ratio: Optional[int] = 4,
|
| 18 |
+
intermediate_size: Optional[int] = None,
|
| 19 |
+
num_hidden_layers: int = 24,
|
| 20 |
+
head_dim: Optional[int] = 64,
|
| 21 |
+
num_heads: Optional[int] = None,
|
| 22 |
+
decay_low_rank_dim: int = 64,
|
| 23 |
+
gate_low_rank_dim: int = 128,
|
| 24 |
+
a_low_rank_dim: int = 64,
|
| 25 |
+
v_low_rank_dim: int = 16,
|
| 26 |
+
hidden_act: str = "sqrelu",
|
| 27 |
+
max_position_embeddings: int = 2048,
|
| 28 |
+
norm_first: bool = True,
|
| 29 |
+
norm_bias: bool = True,
|
| 30 |
+
norm_eps: float = 1e-5,
|
| 31 |
+
attn: Optional[Dict] = None,
|
| 32 |
+
use_cache: bool = True,
|
| 33 |
+
pad_token_id: int = None,
|
| 34 |
+
bos_token_id: int = 1,
|
| 35 |
+
eos_token_id: int = 2,
|
| 36 |
+
tie_word_embeddings: bool = False,
|
| 37 |
+
initializer_range: float = 0.006,
|
| 38 |
+
fuse_norm: bool = True,
|
| 39 |
+
fuse_cross_entropy: bool = True,
|
| 40 |
+
vocab_size: int = 32000,
|
| 41 |
+
value_dim: Optional[Union[int, List[int]]] = None,
|
| 42 |
+
**kwargs
|
| 43 |
+
):
|
| 44 |
+
self.attn_mode = attn_mode
|
| 45 |
+
self.hidden_size = hidden_size
|
| 46 |
+
self.hidden_ratio = hidden_ratio
|
| 47 |
+
self.intermediate_size = intermediate_size
|
| 48 |
+
self.norm_first = norm_first
|
| 49 |
+
self.num_hidden_layers = num_hidden_layers
|
| 50 |
+
|
| 51 |
+
if head_dim is None and num_heads is not None:
|
| 52 |
+
head_dim = int(hidden_size // num_heads)
|
| 53 |
+
elif head_dim is not None and num_heads is None:
|
| 54 |
+
num_heads = int(hidden_size // head_dim)
|
| 55 |
+
|
| 56 |
+
if value_dim is None:
|
| 57 |
+
value_dim = [hidden_size] * num_hidden_layers
|
| 58 |
+
elif isinstance(value_dim, int):
|
| 59 |
+
assert value_dim >= hidden_size, "value_dim must be greater than hidden_size"
|
| 60 |
+
assert value_dim % hidden_size == 0, "value_dim must be divisible by hidden_size"
|
| 61 |
+
value_dim = [value_dim] * num_hidden_layers
|
| 62 |
+
else:
|
| 63 |
+
assert len(value_dim) == num_hidden_layers, "value_dim must have the same length as num_hidden_layers"
|
| 64 |
+
for v in value_dim:
|
| 65 |
+
assert v >= hidden_size, "value_dim must be greater than hidden_size"
|
| 66 |
+
assert v % hidden_size == 0, "value_dim must be divisible by hidden_size"
|
| 67 |
+
|
| 68 |
+
self.head_dim = head_dim
|
| 69 |
+
self.num_heads = num_heads
|
| 70 |
+
self.value_dim = value_dim
|
| 71 |
+
|
| 72 |
+
self.decay_low_rank_dim = decay_low_rank_dim
|
| 73 |
+
self.gate_low_rank_dim = gate_low_rank_dim
|
| 74 |
+
self.a_low_rank_dim = a_low_rank_dim
|
| 75 |
+
self.v_low_rank_dim = v_low_rank_dim
|
| 76 |
+
self.hidden_act = hidden_act
|
| 77 |
+
self.max_position_embeddings = max_position_embeddings
|
| 78 |
+
self.norm_bias = norm_bias
|
| 79 |
+
self.norm_eps = norm_eps
|
| 80 |
+
self.attn = attn
|
| 81 |
+
self.use_cache = use_cache
|
| 82 |
+
self.initializer_range = initializer_range
|
| 83 |
+
self.fuse_norm = fuse_norm
|
| 84 |
+
self.fuse_cross_entropy = fuse_cross_entropy
|
| 85 |
+
self.vocab_size = vocab_size
|
| 86 |
+
|
| 87 |
+
if attn is not None:
|
| 88 |
+
if not isinstance(attn, Dict):
|
| 89 |
+
raise ValueError("attn must be a dictionary")
|
| 90 |
+
if 'layers' not in attn:
|
| 91 |
+
raise ValueError("Layer indices must be provided to initialize hybrid attention layers")
|
| 92 |
+
if 'num_heads' not in attn:
|
| 93 |
+
raise ValueError("Number of heads must be provided to initialize hybrid attention layers")
|
| 94 |
+
attn['num_kv_heads'] = attn.get('num_kv_heads', attn['num_heads'])
|
| 95 |
+
attn['qkv_bias'] = attn.get('qkv_bias', False)
|
| 96 |
+
attn['window_size'] = attn.get('window_size', None)
|
| 97 |
+
attn['rope_theta'] = attn.get('rope_theta', 10000.)
|
| 98 |
+
|
| 99 |
+
super().__init__(
|
| 100 |
+
pad_token_id=pad_token_id,
|
| 101 |
+
bos_token_id=bos_token_id,
|
| 102 |
+
eos_token_id=eos_token_id,
|
| 103 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 104 |
+
**kwargs,
|
| 105 |
+
)
|
fla/models/rwkv7/modeling_rwkv7.py
ADDED
|
@@ -0,0 +1,505 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
import warnings
|
| 7 |
+
from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import torch.utils.checkpoint
|
| 12 |
+
from transformers.generation import GenerationMixin
|
| 13 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
|
| 14 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 15 |
+
from transformers.utils import logging
|
| 16 |
+
from transformers.utils.deprecation import deprecate_kwarg
|
| 17 |
+
|
| 18 |
+
from fla.layers.attn import Attention
|
| 19 |
+
from fla.layers.rwkv7 import RWKV7Attention
|
| 20 |
+
from fla.models.rwkv7.configuration_rwkv7 import RWKV7Config
|
| 21 |
+
from fla.models.utils import Cache
|
| 22 |
+
from fla.modules import FusedCrossEntropyLoss, FusedLinearCrossEntropyLoss, LayerNorm
|
| 23 |
+
from fla.modules.activations import ACT2FN
|
| 24 |
+
|
| 25 |
+
if TYPE_CHECKING:
|
| 26 |
+
from transformers.processing_utils import Unpack
|
| 27 |
+
|
| 28 |
+
logger = logging.get_logger(__name__)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class RWKV7FeedForward(nn.Module):
|
| 32 |
+
|
| 33 |
+
def __init__(
|
| 34 |
+
self,
|
| 35 |
+
hidden_size: int,
|
| 36 |
+
hidden_ratio: Optional[int] = None,
|
| 37 |
+
intermediate_size: Optional[int] = None,
|
| 38 |
+
hidden_act: str = 'sqrelu',
|
| 39 |
+
layer_idx: int = None
|
| 40 |
+
) -> RWKV7FeedForward:
|
| 41 |
+
super().__init__()
|
| 42 |
+
|
| 43 |
+
self.hidden_size = hidden_size
|
| 44 |
+
if hidden_ratio is None:
|
| 45 |
+
hidden_ratio = 4
|
| 46 |
+
if intermediate_size is None:
|
| 47 |
+
intermediate_size = int(hidden_size * hidden_ratio)
|
| 48 |
+
intermediate_size = 32 * ((intermediate_size + 32 - 1) // 32)
|
| 49 |
+
self.hidden_ratio = hidden_ratio
|
| 50 |
+
self.intermediate_size = intermediate_size
|
| 51 |
+
|
| 52 |
+
self.time_shift = nn.ZeroPad2d((0, 0, 1, -1))
|
| 53 |
+
|
| 54 |
+
self.x_k = nn.Parameter(torch.zeros(hidden_size))
|
| 55 |
+
|
| 56 |
+
self.key = nn.Linear(hidden_size, intermediate_size, bias=False)
|
| 57 |
+
self.value = nn.Linear(intermediate_size, hidden_size, bias=False)
|
| 58 |
+
self.act_fn = ACT2FN[hidden_act]
|
| 59 |
+
|
| 60 |
+
self.layer_idx = layer_idx
|
| 61 |
+
|
| 62 |
+
def forward(
|
| 63 |
+
self,
|
| 64 |
+
x: torch.Tensor,
|
| 65 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 66 |
+
state: Optional[Cache] = None
|
| 67 |
+
) -> torch.Tensor:
|
| 68 |
+
if attention_mask is not None:
|
| 69 |
+
x = x.mul(attention_mask[:, -x.shape[-2]:, None])
|
| 70 |
+
if x.shape[1] == 1 and state is not None and state[self.layer_idx]['ffn_state'] is not None:
|
| 71 |
+
shifted = state[self.layer_idx]['ffn_state'].unsqueeze(1)
|
| 72 |
+
else:
|
| 73 |
+
shifted = self.time_shift(x)
|
| 74 |
+
if state is not None and state[self.layer_idx]['ffn_state'] is not None:
|
| 75 |
+
shifted[:, 0] = state[self.layer_idx]['ffn_state'][-1]
|
| 76 |
+
if state is not None:
|
| 77 |
+
# no need to update the offset twice
|
| 78 |
+
state.update(ffn_state=x[:, -1], layer_idx=self.layer_idx, offset=0)
|
| 79 |
+
return self.value(self.act_fn(self.key(x.addcmul(shifted - x, self.x_k)))), state
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
class RWKV7Block(nn.Module):
|
| 83 |
+
|
| 84 |
+
def __init__(
|
| 85 |
+
self,
|
| 86 |
+
config: RWKV7Config,
|
| 87 |
+
layer_idx: int
|
| 88 |
+
) -> RWKV7Block:
|
| 89 |
+
super().__init__()
|
| 90 |
+
|
| 91 |
+
self.config = config
|
| 92 |
+
self.layer_idx = layer_idx
|
| 93 |
+
|
| 94 |
+
if config.norm_first and layer_idx == 0:
|
| 95 |
+
self.pre_norm = (LayerNorm if config.fuse_norm else nn.LayerNorm)(
|
| 96 |
+
config.hidden_size,
|
| 97 |
+
bias=config.norm_bias,
|
| 98 |
+
eps=config.norm_eps
|
| 99 |
+
)
|
| 100 |
+
self.attn_norm = (LayerNorm if config.fuse_norm else nn.LayerNorm)(
|
| 101 |
+
config.hidden_size,
|
| 102 |
+
bias=config.norm_bias,
|
| 103 |
+
eps=config.norm_eps
|
| 104 |
+
)
|
| 105 |
+
if config.attn is not None and layer_idx in config.attn['layers']:
|
| 106 |
+
self.attn = Attention(
|
| 107 |
+
hidden_size=config.hidden_size,
|
| 108 |
+
num_heads=config.attn['num_heads'],
|
| 109 |
+
num_kv_heads=config.attn['num_kv_heads'],
|
| 110 |
+
qkv_bias=config.attn['qkv_bias'],
|
| 111 |
+
window_size=config.attn['window_size'],
|
| 112 |
+
rope_theta=config.attn['rope_theta'],
|
| 113 |
+
max_position_embeddings=config.max_position_embeddings,
|
| 114 |
+
layer_idx=layer_idx
|
| 115 |
+
)
|
| 116 |
+
else:
|
| 117 |
+
self.attn = RWKV7Attention(
|
| 118 |
+
mode=config.attn_mode,
|
| 119 |
+
hidden_size=config.hidden_size,
|
| 120 |
+
head_dim=config.head_dim,
|
| 121 |
+
num_heads=config.num_heads,
|
| 122 |
+
decay_low_rank_dim=config.decay_low_rank_dim,
|
| 123 |
+
gate_low_rank_dim=config.gate_low_rank_dim,
|
| 124 |
+
a_low_rank_dim=config.a_low_rank_dim,
|
| 125 |
+
v_low_rank_dim=config.v_low_rank_dim,
|
| 126 |
+
norm_eps=config.norm_eps,
|
| 127 |
+
fuse_norm=config.fuse_norm,
|
| 128 |
+
layer_idx=layer_idx,
|
| 129 |
+
value_dim=config.value_dim[layer_idx]
|
| 130 |
+
)
|
| 131 |
+
self.ffn_norm = (LayerNorm if config.fuse_norm else nn.LayerNorm)(
|
| 132 |
+
config.hidden_size,
|
| 133 |
+
bias=config.norm_bias,
|
| 134 |
+
eps=config.norm_eps
|
| 135 |
+
)
|
| 136 |
+
self.ffn = RWKV7FeedForward(
|
| 137 |
+
hidden_size=config.hidden_size,
|
| 138 |
+
hidden_ratio=config.hidden_ratio,
|
| 139 |
+
intermediate_size=config.intermediate_size,
|
| 140 |
+
hidden_act=config.hidden_act,
|
| 141 |
+
layer_idx=layer_idx
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
def forward(
|
| 145 |
+
self,
|
| 146 |
+
hidden_states: torch.Tensor,
|
| 147 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 148 |
+
past_key_values: Optional[Cache] = None,
|
| 149 |
+
use_cache: Optional[bool] = False,
|
| 150 |
+
output_attentions: Optional[bool] = False,
|
| 151 |
+
v_first: torch.Tensor = None,
|
| 152 |
+
**kwargs,
|
| 153 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 154 |
+
residual = self.pre_norm(hidden_states) if hasattr(self, 'pre_norm') else hidden_states
|
| 155 |
+
hidden_states = self.attn_norm(residual)
|
| 156 |
+
hidden_states, attentions, past_key_values, v_first = self.attn(
|
| 157 |
+
hidden_states=hidden_states,
|
| 158 |
+
attention_mask=attention_mask,
|
| 159 |
+
past_key_values=past_key_values,
|
| 160 |
+
use_cache=use_cache,
|
| 161 |
+
output_attentions=output_attentions,
|
| 162 |
+
v_first=v_first,
|
| 163 |
+
**kwargs
|
| 164 |
+
)
|
| 165 |
+
if self.config.fuse_norm:
|
| 166 |
+
hidden_states, residual = self.ffn_norm(hidden_states, residual, True)
|
| 167 |
+
else:
|
| 168 |
+
hidden_states = residual + hidden_states
|
| 169 |
+
residual = hidden_states
|
| 170 |
+
hidden_states = self.ffn_norm(hidden_states)
|
| 171 |
+
hidden_states, past_key_values = self.ffn(hidden_states, attention_mask, past_key_values)
|
| 172 |
+
hidden_states = residual + hidden_states
|
| 173 |
+
|
| 174 |
+
outputs = (hidden_states, attentions, past_key_values, v_first)
|
| 175 |
+
|
| 176 |
+
return outputs
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
class RWKV7PreTrainedModel(PreTrainedModel):
|
| 180 |
+
|
| 181 |
+
config_class = RWKV7Config
|
| 182 |
+
base_model_prefix = 'model'
|
| 183 |
+
supports_gradient_checkpointing = True
|
| 184 |
+
_no_split_modules = ['RWKV7Block']
|
| 185 |
+
_supports_cache_class = True
|
| 186 |
+
_skip_keys_device_placement = ["past_key_values"]
|
| 187 |
+
|
| 188 |
+
def __init__(self, *inputs, **kwargs):
|
| 189 |
+
super().__init__(*inputs, **kwargs)
|
| 190 |
+
|
| 191 |
+
def _init_weights(
|
| 192 |
+
self,
|
| 193 |
+
module: nn.Module,
|
| 194 |
+
rescale_prenorm_residual: bool = True,
|
| 195 |
+
num_residuals_per_layer: int = 2,
|
| 196 |
+
):
|
| 197 |
+
warnings.warn(
|
| 198 |
+
"RWKV-7 employs a carefully designed initialization strategy tailored to its architecture. "
|
| 199 |
+
"The detailed initialization scheme is currently not implemented here but can be found in the "
|
| 200 |
+
"official code repository. We emphasize that using the recommended initialization is essential "
|
| 201 |
+
"for replicating the results in RWKV-7 paper. Deviations from the prescribed initialization "
|
| 202 |
+
"may lead to performance degradation.\n"
|
| 203 |
+
"Alternatively, please generate initial weights from the official RWKV code repository, and "
|
| 204 |
+
"convert the PyTorch checkpoint into FLA supported format."
|
| 205 |
+
)
|
| 206 |
+
if isinstance(module, (nn.Linear, nn.Conv1d)):
|
| 207 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 208 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 209 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 210 |
+
if module.bias is not None:
|
| 211 |
+
nn.init.zeros_(module.bias)
|
| 212 |
+
elif isinstance(module, nn.Parameter):
|
| 213 |
+
nn.init.normal_(module, mean=0.0, std=self.config.initializer_range)
|
| 214 |
+
elif isinstance(module, nn.Embedding):
|
| 215 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 216 |
+
elif hasattr(module, 'reset_parameters'):
|
| 217 |
+
module.reset_parameters()
|
| 218 |
+
|
| 219 |
+
if rescale_prenorm_residual:
|
| 220 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
| 221 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
| 222 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
| 223 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
| 224 |
+
#
|
| 225 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
| 226 |
+
p = None
|
| 227 |
+
if hasattr(module, 'o_proj'):
|
| 228 |
+
p = module.o_proj.weight
|
| 229 |
+
elif hasattr(module, 'down_proj'):
|
| 230 |
+
p = module.down_proj.weight
|
| 231 |
+
if p is not None:
|
| 232 |
+
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
|
| 233 |
+
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
|
| 234 |
+
# We need to reinit p since this code could be called multiple times
|
| 235 |
+
# Having just p *= scale would repeatedly scale it down
|
| 236 |
+
nn.init.kaiming_uniform_(p, a=math.sqrt(5))
|
| 237 |
+
with torch.no_grad():
|
| 238 |
+
p /= math.sqrt(num_residuals_per_layer * self.config.num_hidden_layers)
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
class RWKV7Model(RWKV7PreTrainedModel):
|
| 242 |
+
|
| 243 |
+
def __init__(self, config: RWKV7Config):
|
| 244 |
+
super().__init__(config)
|
| 245 |
+
self.padding_idx = config.pad_token_id
|
| 246 |
+
self.vocab_size = config.vocab_size
|
| 247 |
+
|
| 248 |
+
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 249 |
+
self.layers = nn.ModuleList([RWKV7Block(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
|
| 250 |
+
self.norm = (LayerNorm if config.fuse_norm else nn.LayerNorm)(
|
| 251 |
+
config.hidden_size,
|
| 252 |
+
bias=config.norm_bias,
|
| 253 |
+
eps=config.norm_eps
|
| 254 |
+
)
|
| 255 |
+
|
| 256 |
+
self.gradient_checkpointing = False
|
| 257 |
+
|
| 258 |
+
self.post_init()
|
| 259 |
+
|
| 260 |
+
def get_input_embeddings(self):
|
| 261 |
+
return self.embeddings
|
| 262 |
+
|
| 263 |
+
def set_input_embeddings(self, value):
|
| 264 |
+
self.embeddings = value
|
| 265 |
+
|
| 266 |
+
def forward(
|
| 267 |
+
self,
|
| 268 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 269 |
+
attention_mask: Optional[torch.Tensor] = None, # noqa
|
| 270 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 271 |
+
past_key_values: Optional[Cache] = None,
|
| 272 |
+
use_cache: Optional[bool] = None,
|
| 273 |
+
output_attentions: Optional[bool] = None,
|
| 274 |
+
output_hidden_states: Optional[bool] = None,
|
| 275 |
+
return_dict: Optional[bool] = None,
|
| 276 |
+
**kwargs: Unpack[Dict]
|
| 277 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
| 278 |
+
if output_attentions:
|
| 279 |
+
warnings.warn("`RWKV7Model` does not `output_attentions` now, setting it to `False`.")
|
| 280 |
+
output_attentions = False
|
| 281 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 282 |
+
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 283 |
+
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
|
| 284 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 285 |
+
|
| 286 |
+
# retrieve input_ids and inputs_embeds
|
| 287 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 288 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 289 |
+
if input_ids is None and inputs_embeds is None:
|
| 290 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 291 |
+
|
| 292 |
+
if inputs_embeds is None:
|
| 293 |
+
inputs_embeds = self.embeddings(input_ids)
|
| 294 |
+
hidden_states = inputs_embeds
|
| 295 |
+
|
| 296 |
+
if use_cache and not isinstance(past_key_values, Cache):
|
| 297 |
+
past_key_values = Cache.from_legacy_cache(past_key_values)
|
| 298 |
+
|
| 299 |
+
if self.gradient_checkpointing and self.training and use_cache:
|
| 300 |
+
logger.warning_once("`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...")
|
| 301 |
+
use_cache = False
|
| 302 |
+
|
| 303 |
+
all_hidden_states = () if output_hidden_states else None
|
| 304 |
+
all_attns = () if output_attentions else None
|
| 305 |
+
|
| 306 |
+
v_first = torch.zeros_like(hidden_states)
|
| 307 |
+
for layer in self.layers:
|
| 308 |
+
if output_hidden_states:
|
| 309 |
+
all_hidden_states += (hidden_states,)
|
| 310 |
+
|
| 311 |
+
if self.gradient_checkpointing and self.training:
|
| 312 |
+
hidden_states, attentions, past_key_values, v_first = self._gradient_checkpointing_func(
|
| 313 |
+
layer.__call__,
|
| 314 |
+
hidden_states,
|
| 315 |
+
attention_mask,
|
| 316 |
+
past_key_values,
|
| 317 |
+
use_cache,
|
| 318 |
+
output_attentions,
|
| 319 |
+
v_first,
|
| 320 |
+
**kwargs
|
| 321 |
+
)
|
| 322 |
+
else:
|
| 323 |
+
hidden_states, attentions, past_key_values, v_first = layer(
|
| 324 |
+
hidden_states,
|
| 325 |
+
attention_mask=attention_mask,
|
| 326 |
+
past_key_values=past_key_values,
|
| 327 |
+
use_cache=use_cache,
|
| 328 |
+
output_attentions=output_attentions,
|
| 329 |
+
v_first=v_first,
|
| 330 |
+
**kwargs
|
| 331 |
+
)
|
| 332 |
+
|
| 333 |
+
if output_attentions:
|
| 334 |
+
all_attns += (attentions,)
|
| 335 |
+
|
| 336 |
+
hidden_states = self.norm(hidden_states)
|
| 337 |
+
|
| 338 |
+
# add hidden states from the last decoder layer
|
| 339 |
+
if output_hidden_states:
|
| 340 |
+
all_hidden_states += (hidden_states,)
|
| 341 |
+
|
| 342 |
+
if not return_dict:
|
| 343 |
+
return tuple(i for i in [hidden_states, past_key_values, all_hidden_states, all_attns] if i is not None)
|
| 344 |
+
return BaseModelOutputWithPast(
|
| 345 |
+
last_hidden_state=hidden_states,
|
| 346 |
+
past_key_values=past_key_values,
|
| 347 |
+
hidden_states=all_hidden_states,
|
| 348 |
+
attentions=all_attns
|
| 349 |
+
)
|
| 350 |
+
|
| 351 |
+
|
| 352 |
+
class RWKV7ForCausalLM(RWKV7PreTrainedModel, GenerationMixin):
|
| 353 |
+
|
| 354 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 355 |
+
|
| 356 |
+
def __init__(self, config):
|
| 357 |
+
super().__init__(config)
|
| 358 |
+
self.model = RWKV7Model(config)
|
| 359 |
+
self.vocab_size = config.vocab_size
|
| 360 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 361 |
+
self.criterion = None
|
| 362 |
+
|
| 363 |
+
# Initialize weights and apply final processing
|
| 364 |
+
self.post_init()
|
| 365 |
+
|
| 366 |
+
def get_input_embeddings(self):
|
| 367 |
+
return self.model.embeddings
|
| 368 |
+
|
| 369 |
+
def set_input_embeddings(self, value):
|
| 370 |
+
self.model.embeddings = value
|
| 371 |
+
|
| 372 |
+
def get_output_embeddings(self):
|
| 373 |
+
return self.lm_head
|
| 374 |
+
|
| 375 |
+
def set_output_embeddings(self, new_embeddings):
|
| 376 |
+
self.lm_head = new_embeddings
|
| 377 |
+
|
| 378 |
+
def set_decoder(self, decoder):
|
| 379 |
+
self.model = decoder
|
| 380 |
+
|
| 381 |
+
def get_decoder(self):
|
| 382 |
+
return self.model
|
| 383 |
+
|
| 384 |
+
def generate(self, *args, **kwargs):
|
| 385 |
+
try:
|
| 386 |
+
return super().generate(*args, **kwargs)
|
| 387 |
+
except AttributeError as exception:
|
| 388 |
+
if 'past_key_values' in str(exception):
|
| 389 |
+
raise AttributeError(
|
| 390 |
+
f"You tried to call `generate` with a decoding strategy that manipulates `past_key_values`, "
|
| 391 |
+
f"which is not supported for {self.__class__.__name__}. "
|
| 392 |
+
f"Try another generation strategy instead. "
|
| 393 |
+
f"For the available generation strategies, check this doc: "
|
| 394 |
+
f"https://huggingface.co/docs/transformers/en/generation_strategies#decoding-strategies"
|
| 395 |
+
)
|
| 396 |
+
else:
|
| 397 |
+
raise exception
|
| 398 |
+
|
| 399 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 400 |
+
def prepare_inputs_for_generation(
|
| 401 |
+
self,
|
| 402 |
+
input_ids: torch.LongTensor = None,
|
| 403 |
+
past_key_values: Optional[Cache] = None,
|
| 404 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 405 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 406 |
+
use_cache: bool = True,
|
| 407 |
+
logits_to_keep: Optional[int] = None,
|
| 408 |
+
**kwargs
|
| 409 |
+
):
|
| 410 |
+
# only last token for `inputs_ids` if the `past_key_values` is not empty.
|
| 411 |
+
if past_key_values is not None and len(past_key_values) > 0:
|
| 412 |
+
input_ids = input_ids[:, -1:]
|
| 413 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 414 |
+
if inputs_embeds is not None and len(past_key_values) == 0:
|
| 415 |
+
model_inputs = {'inputs_embeds': inputs_embeds}
|
| 416 |
+
else:
|
| 417 |
+
# The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
|
| 418 |
+
# recompiles graphs as the stride of the inputs is a guard.
|
| 419 |
+
# Ref: https://github.com/huggingface/transformers/pull/29114
|
| 420 |
+
# TODO: use `next_tokens` directly instead.
|
| 421 |
+
model_inputs = {'input_ids': input_ids.contiguous()}
|
| 422 |
+
|
| 423 |
+
if logits_to_keep is not None:
|
| 424 |
+
model_inputs['logits_to_keep'] = logits_to_keep
|
| 425 |
+
|
| 426 |
+
model_inputs.update({
|
| 427 |
+
'past_key_values': past_key_values,
|
| 428 |
+
'use_cache': use_cache,
|
| 429 |
+
'attention_mask': attention_mask,
|
| 430 |
+
})
|
| 431 |
+
return model_inputs
|
| 432 |
+
|
| 433 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 434 |
+
def forward(
|
| 435 |
+
self,
|
| 436 |
+
input_ids: torch.LongTensor = None,
|
| 437 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 438 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 439 |
+
past_key_values: Optional[Cache] = None,
|
| 440 |
+
labels: Optional[torch.LongTensor] = None,
|
| 441 |
+
shift_labels: Optional[torch.LongTensor] = None,
|
| 442 |
+
use_cache: Optional[bool] = None,
|
| 443 |
+
output_attentions: Optional[bool] = None,
|
| 444 |
+
output_hidden_states: Optional[bool] = None,
|
| 445 |
+
return_dict: Optional[bool] = None,
|
| 446 |
+
logits_to_keep: Optional[int] = 0,
|
| 447 |
+
**kwargs: Unpack[Dict]
|
| 448 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 449 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 450 |
+
output_hidden_states = (
|
| 451 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 452 |
+
)
|
| 453 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 454 |
+
|
| 455 |
+
outputs = self.model(
|
| 456 |
+
input_ids=input_ids,
|
| 457 |
+
attention_mask=attention_mask,
|
| 458 |
+
inputs_embeds=inputs_embeds,
|
| 459 |
+
past_key_values=past_key_values,
|
| 460 |
+
use_cache=use_cache,
|
| 461 |
+
output_attentions=output_attentions,
|
| 462 |
+
output_hidden_states=output_hidden_states,
|
| 463 |
+
return_dict=return_dict,
|
| 464 |
+
**kwargs
|
| 465 |
+
)
|
| 466 |
+
|
| 467 |
+
hidden_states = outputs[0]
|
| 468 |
+
fuse_linear_and_cross_entropy = self.config.fuse_cross_entropy and self.training
|
| 469 |
+
|
| 470 |
+
loss, logits = None, None
|
| 471 |
+
has_labels = (labels is not None) or (shift_labels is not None)
|
| 472 |
+
if not (fuse_linear_and_cross_entropy and has_labels):
|
| 473 |
+
logits = self.lm_head(hidden_states if logits_to_keep is None else hidden_states[:, -logits_to_keep:])
|
| 474 |
+
if has_labels:
|
| 475 |
+
if getattr(self, 'criterion', None) is None:
|
| 476 |
+
if fuse_linear_and_cross_entropy:
|
| 477 |
+
criterion = FusedLinearCrossEntropyLoss()
|
| 478 |
+
elif self.config.fuse_cross_entropy:
|
| 479 |
+
criterion = FusedCrossEntropyLoss(inplace_backward=True)
|
| 480 |
+
else:
|
| 481 |
+
criterion = nn.CrossEntropyLoss()
|
| 482 |
+
else:
|
| 483 |
+
criterion = self.criterion
|
| 484 |
+
|
| 485 |
+
# shift_labels: See https://github.com/huggingface/transformers/pull/36607/files.
|
| 486 |
+
if shift_labels is None:
|
| 487 |
+
shift_labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], criterion.ignore_index)), 1)
|
| 488 |
+
shift_labels = shift_labels.to(hidden_states.device)
|
| 489 |
+
|
| 490 |
+
if fuse_linear_and_cross_entropy:
|
| 491 |
+
loss = criterion(hidden_states, shift_labels, self.lm_head.weight, self.lm_head.bias)
|
| 492 |
+
else:
|
| 493 |
+
loss = criterion(logits.view(shift_labels.numel(), -1), shift_labels.view(-1))
|
| 494 |
+
|
| 495 |
+
if not return_dict:
|
| 496 |
+
output = (logits,) + outputs[1:]
|
| 497 |
+
return (loss,) + output if loss is not None else output
|
| 498 |
+
|
| 499 |
+
return CausalLMOutputWithPast(
|
| 500 |
+
loss=loss,
|
| 501 |
+
logits=logits,
|
| 502 |
+
past_key_values=outputs.past_key_values,
|
| 503 |
+
hidden_states=outputs.hidden_states,
|
| 504 |
+
attentions=outputs.attentions,
|
| 505 |
+
)
|
fla/models/samba/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
|
| 4 |
+
|
| 5 |
+
from fla.models.samba.configuration_samba import SambaConfig
|
| 6 |
+
from fla.models.samba.modeling_samba import SambaBlock, SambaForCausalLM, SambaModel
|
| 7 |
+
|
| 8 |
+
AutoConfig.register(SambaConfig.model_type, SambaConfig, True)
|
| 9 |
+
AutoModel.register(SambaConfig, SambaModel, True)
|
| 10 |
+
AutoModelForCausalLM.register(SambaConfig, SambaForCausalLM, True)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
__all__ = ['SambaConfig', 'SambaForCausalLM', 'SambaModel', 'SambaBlock']
|
fla/models/samba/configuration_samba.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
import math
|
| 4 |
+
from typing import Dict, Optional
|
| 5 |
+
|
| 6 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class SambaConfig(PretrainedConfig):
|
| 10 |
+
|
| 11 |
+
model_type = "samba"
|
| 12 |
+
|
| 13 |
+
def __init__(
|
| 14 |
+
self,
|
| 15 |
+
hidden_size: int = 2304,
|
| 16 |
+
state_size: int = 16,
|
| 17 |
+
num_hidden_layers: int = 18,
|
| 18 |
+
norm_eps=1e-5,
|
| 19 |
+
pad_token_id: int = 0,
|
| 20 |
+
bos_token_id: int = 1,
|
| 21 |
+
eos_token_id: int = 2,
|
| 22 |
+
expand: int = 2,
|
| 23 |
+
conv_kernel: int = 4,
|
| 24 |
+
use_bias: bool = False,
|
| 25 |
+
use_conv_bias: bool = True,
|
| 26 |
+
hidden_act: str = "swish",
|
| 27 |
+
initializer_range: str = 0.02,
|
| 28 |
+
residual_in_fp32: bool = False,
|
| 29 |
+
time_step_rank: str = "auto",
|
| 30 |
+
time_step_scale: float = 1.0,
|
| 31 |
+
time_step_min: float = 0.001,
|
| 32 |
+
time_step_max: float = 0.1,
|
| 33 |
+
time_step_init_scheme: str = "random",
|
| 34 |
+
time_step_floor: float = 1e-4,
|
| 35 |
+
max_position_embeddings: int = 2048,
|
| 36 |
+
attn: Optional[Dict] = {
|
| 37 |
+
'layers': (1, 3, 5, 7, 9, 11, 13, 15, 17),
|
| 38 |
+
'num_heads': 18,
|
| 39 |
+
'num_kv_heads': 18,
|
| 40 |
+
'qkv_bias': False,
|
| 41 |
+
'window_size': 2048,
|
| 42 |
+
'rope_theta': 10000.
|
| 43 |
+
},
|
| 44 |
+
hidden_ratio: Optional[int] = 4,
|
| 45 |
+
rescale_prenorm_residual: bool = False,
|
| 46 |
+
use_cache: bool = True,
|
| 47 |
+
fuse_norm: bool = True,
|
| 48 |
+
fuse_swiglu: bool = True,
|
| 49 |
+
fuse_cross_entropy: bool = True,
|
| 50 |
+
vocab_size: int = 32000,
|
| 51 |
+
tie_word_embeddings: bool = False,
|
| 52 |
+
**kwargs,
|
| 53 |
+
):
|
| 54 |
+
self.hidden_size = hidden_size
|
| 55 |
+
self.state_size = state_size
|
| 56 |
+
self.num_hidden_layers = num_hidden_layers
|
| 57 |
+
self.norm_eps = norm_eps
|
| 58 |
+
self.conv_kernel = conv_kernel
|
| 59 |
+
self.expand = expand
|
| 60 |
+
self.intermediate_size = int(expand * self.hidden_size)
|
| 61 |
+
self.bos_token_id = bos_token_id
|
| 62 |
+
self.eos_token_id = eos_token_id
|
| 63 |
+
self.pad_token_id = pad_token_id
|
| 64 |
+
self.use_bias = use_bias
|
| 65 |
+
self.use_conv_bias = use_conv_bias
|
| 66 |
+
self.hidden_act = hidden_act
|
| 67 |
+
self.initializer_range = initializer_range
|
| 68 |
+
self.time_step_rank = math.ceil(self.hidden_size / 16) if time_step_rank == "auto" else time_step_rank
|
| 69 |
+
self.time_step_scale = time_step_scale
|
| 70 |
+
self.time_step_min = time_step_min
|
| 71 |
+
self.time_step_max = time_step_max
|
| 72 |
+
self.time_step_init_scheme = time_step_init_scheme
|
| 73 |
+
self.time_step_floor = time_step_floor
|
| 74 |
+
self.max_position_embeddings = max_position_embeddings
|
| 75 |
+
self.attn = attn
|
| 76 |
+
self.hidden_ratio = hidden_ratio
|
| 77 |
+
self.rescale_prenorm_residual = rescale_prenorm_residual
|
| 78 |
+
self.residual_in_fp32 = residual_in_fp32
|
| 79 |
+
self.use_cache = use_cache
|
| 80 |
+
|
| 81 |
+
self.fuse_norm = fuse_norm
|
| 82 |
+
self.fuse_swiglu = fuse_swiglu
|
| 83 |
+
self.fuse_cross_entropy = fuse_cross_entropy
|
| 84 |
+
self.vocab_size = vocab_size
|
| 85 |
+
|
| 86 |
+
super().__init__(
|
| 87 |
+
bos_token_id=bos_token_id,
|
| 88 |
+
eos_token_id=eos_token_id,
|
| 89 |
+
pad_token_id=pad_token_id,
|
| 90 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 91 |
+
**kwargs
|
| 92 |
+
)
|
fla/models/samba/modeling_samba.py
ADDED
|
@@ -0,0 +1,413 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
from dataclasses import dataclass
|
| 7 |
+
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.utils.checkpoint
|
| 11 |
+
from torch import nn
|
| 12 |
+
from transformers.generation import GenerationMixin
|
| 13 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 14 |
+
from transformers.utils import ModelOutput, logging
|
| 15 |
+
from transformers.utils.deprecation import deprecate_kwarg
|
| 16 |
+
|
| 17 |
+
from fla.layers.attn import Attention
|
| 18 |
+
from fla.models.mamba.modeling_mamba import MambaCache, MambaMixer
|
| 19 |
+
from fla.models.samba.configuration_samba import SambaConfig
|
| 20 |
+
from fla.modules import FusedCrossEntropyLoss, FusedLinearCrossEntropyLoss
|
| 21 |
+
from fla.modules import GatedMLP as SambaMLP
|
| 22 |
+
from fla.modules import RMSNorm
|
| 23 |
+
|
| 24 |
+
if TYPE_CHECKING:
|
| 25 |
+
from transformers.processing_utils import Unpack
|
| 26 |
+
|
| 27 |
+
logger = logging.get_logger(__name__)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class SambaBlock(nn.Module):
|
| 31 |
+
def __init__(self, config, layer_idx):
|
| 32 |
+
super().__init__()
|
| 33 |
+
|
| 34 |
+
self.config = config
|
| 35 |
+
self.layer_idx = layer_idx
|
| 36 |
+
|
| 37 |
+
self.mixer_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.norm_eps)
|
| 38 |
+
if config.attn is not None and layer_idx in config.attn['layers']:
|
| 39 |
+
self.mixer = Attention(
|
| 40 |
+
hidden_size=config.hidden_size,
|
| 41 |
+
num_heads=config.attn['num_heads'],
|
| 42 |
+
num_kv_heads=config.attn['num_kv_heads'],
|
| 43 |
+
qkv_bias=config.attn['qkv_bias'],
|
| 44 |
+
window_size=config.attn['window_size'],
|
| 45 |
+
rope_theta=config.attn['rope_theta'],
|
| 46 |
+
max_position_embeddings=config.max_position_embeddings,
|
| 47 |
+
layer_idx=layer_idx
|
| 48 |
+
)
|
| 49 |
+
else:
|
| 50 |
+
self.mixer = MambaMixer(config, layer_idx=layer_idx)
|
| 51 |
+
self.mlp_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 52 |
+
self.mlp = SambaMLP(
|
| 53 |
+
hidden_size=config.hidden_size,
|
| 54 |
+
hidden_ratio=config.hidden_ratio,
|
| 55 |
+
hidden_act=config.hidden_act,
|
| 56 |
+
fuse_swiglu=config.fuse_swiglu
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
def forward(
|
| 60 |
+
self,
|
| 61 |
+
hidden_states: torch.Tensor,
|
| 62 |
+
cache_params: Optional[Tuple[torch.Tensor]] = None,
|
| 63 |
+
**kwargs: Unpack[Dict]
|
| 64 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 65 |
+
|
| 66 |
+
residual = hidden_states
|
| 67 |
+
hidden_states = self.mixer_norm(hidden_states)
|
| 68 |
+
if isinstance(self.mixer, MambaMixer):
|
| 69 |
+
hidden_states = self.mixer(hidden_states, cache_params=cache_params, **kwargs)
|
| 70 |
+
else:
|
| 71 |
+
hidden_states, _, cache_params = self.mixer(hidden_states=hidden_states, past_key_values=cache_params, **kwargs)
|
| 72 |
+
if self.config.fuse_norm:
|
| 73 |
+
hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
|
| 74 |
+
else:
|
| 75 |
+
hidden_states = residual + hidden_states
|
| 76 |
+
residual = hidden_states
|
| 77 |
+
hidden_states = self.mlp_norm(hidden_states)
|
| 78 |
+
hidden_states = self.mlp(hidden_states, **kwargs)
|
| 79 |
+
hidden_states = residual + hidden_states
|
| 80 |
+
return hidden_states
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
class SambaPreTrainedModel(PreTrainedModel):
|
| 84 |
+
"""
|
| 85 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 86 |
+
models.
|
| 87 |
+
"""
|
| 88 |
+
|
| 89 |
+
config_class = SambaConfig
|
| 90 |
+
base_model_prefix = "backbone"
|
| 91 |
+
_no_split_modules = ["SambaBlock"]
|
| 92 |
+
supports_gradient_checkpointing = True
|
| 93 |
+
|
| 94 |
+
def _init_weights(self, module):
|
| 95 |
+
"""Initialize the weights."""
|
| 96 |
+
if isinstance(module, nn.Linear):
|
| 97 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 98 |
+
if module.bias is not None:
|
| 99 |
+
if not getattr(module.bias, "_no_reinit", False):
|
| 100 |
+
nn.init.zeros_(module.bias)
|
| 101 |
+
elif isinstance(module, MambaMixer):
|
| 102 |
+
module.A_log._no_weight_decay = True
|
| 103 |
+
module.D._no_weight_decay = True
|
| 104 |
+
|
| 105 |
+
dt_init_std = self.config.time_step_rank**-0.5 * self.config.time_step_scale
|
| 106 |
+
if self.config.time_step_init_scheme == "constant":
|
| 107 |
+
nn.init.constant_(module.dt_proj.weight, dt_init_std)
|
| 108 |
+
elif self.config.time_step_init_scheme == "random":
|
| 109 |
+
nn.init.uniform_(module.dt_proj.weight, -dt_init_std, dt_init_std)
|
| 110 |
+
|
| 111 |
+
dt = torch.exp(
|
| 112 |
+
torch.rand(self.config.intermediate_size)
|
| 113 |
+
* (math.log(self.config.time_step_max) - math.log(self.config.time_step_min))
|
| 114 |
+
+ math.log(self.config.time_step_min)
|
| 115 |
+
).clamp(min=self.config.time_step_floor)
|
| 116 |
+
# # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759
|
| 117 |
+
inv_dt = dt + torch.log(-torch.expm1(-dt))
|
| 118 |
+
with torch.no_grad():
|
| 119 |
+
module.dt_proj.bias.data = nn.Parameter(inv_dt.to(module.dt_proj.bias.device))
|
| 120 |
+
module.dt_proj.bias._no_reinit = True
|
| 121 |
+
elif isinstance(module, nn.Embedding):
|
| 122 |
+
nn.init.normal_(module.weight, std=self.config.initializer_range)
|
| 123 |
+
elif hasattr(module, 'reset_parameters'):
|
| 124 |
+
module.reset_parameters()
|
| 125 |
+
|
| 126 |
+
if self.config.rescale_prenorm_residual:
|
| 127 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
| 128 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
| 129 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
| 130 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
| 131 |
+
#
|
| 132 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
| 133 |
+
for name, p in module.named_parameters():
|
| 134 |
+
if name in ["out_proj.weight"]:
|
| 135 |
+
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
|
| 136 |
+
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
|
| 137 |
+
# We need to reinit p since this code could be called multiple times
|
| 138 |
+
# Having just p *= scale would repeatedly scale it down
|
| 139 |
+
nn.init.kaiming_uniform_(p, a=math.sqrt(5))
|
| 140 |
+
with torch.no_grad():
|
| 141 |
+
p /= math.sqrt(self.config.num_layers)
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
@dataclass
|
| 145 |
+
class SambaOutput(ModelOutput):
|
| 146 |
+
"""
|
| 147 |
+
Class for the Samba model outputs.
|
| 148 |
+
|
| 149 |
+
Args:
|
| 150 |
+
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
| 151 |
+
Sequence of hidden-states at the output of the last layer of the model.
|
| 152 |
+
cache_params (`MambaCache`):
|
| 153 |
+
The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to
|
| 154 |
+
avoid providing the old `input_ids`.
|
| 155 |
+
|
| 156 |
+
Includes both the State space model state matrices after the selective scan, and the Convolutional states
|
| 157 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*,
|
| 158 |
+
returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 159 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
| 160 |
+
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
| 161 |
+
|
| 162 |
+
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
| 163 |
+
"""
|
| 164 |
+
|
| 165 |
+
last_hidden_state: Optional[torch.FloatTensor] = None
|
| 166 |
+
cache_params: Optional[MambaCache] = None
|
| 167 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
@dataclass
|
| 171 |
+
class SambaCausalLMOutput(ModelOutput):
|
| 172 |
+
"""
|
| 173 |
+
Base class for causal language model (or autoregressive) outputs.
|
| 174 |
+
|
| 175 |
+
Args:
|
| 176 |
+
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
| 177 |
+
Language modeling loss (for next-token prediction).
|
| 178 |
+
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
|
| 179 |
+
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
| 180 |
+
cache_params (`MambaCache`):
|
| 181 |
+
The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to
|
| 182 |
+
avoid providing the old `input_ids`.
|
| 183 |
+
|
| 184 |
+
Includes both the State space model state matrices after the selective scan, and the Convolutional states
|
| 185 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*,
|
| 186 |
+
returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 187 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
| 188 |
+
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
| 189 |
+
|
| 190 |
+
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
| 191 |
+
"""
|
| 192 |
+
|
| 193 |
+
loss: Optional[torch.FloatTensor] = None
|
| 194 |
+
logits: Optional[torch.FloatTensor] = None
|
| 195 |
+
cache_params: Optional[MambaCache] = None
|
| 196 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
class SambaModel(SambaPreTrainedModel):
|
| 200 |
+
def __init__(self, config):
|
| 201 |
+
super().__init__(config)
|
| 202 |
+
|
| 203 |
+
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
|
| 204 |
+
self.layers = nn.ModuleList([SambaBlock(config, layer_idx=idx) for idx in range(config.num_hidden_layers)])
|
| 205 |
+
|
| 206 |
+
self.gradient_checkpointing = False
|
| 207 |
+
self.norm_f = RMSNorm(config.hidden_size, eps=config.norm_eps)
|
| 208 |
+
# Initialize weights and apply final processing
|
| 209 |
+
self.post_init()
|
| 210 |
+
|
| 211 |
+
def get_input_embeddings(self):
|
| 212 |
+
return self.embeddings
|
| 213 |
+
|
| 214 |
+
def set_input_embeddings(self, new_embeddings):
|
| 215 |
+
self.embeddings = new_embeddings
|
| 216 |
+
|
| 217 |
+
def forward(
|
| 218 |
+
self,
|
| 219 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 220 |
+
inputs_embeds: Optional[torch.LongTensor] = None,
|
| 221 |
+
cache_params: Optional[MambaCache] = None,
|
| 222 |
+
use_cache: Optional[bool] = None,
|
| 223 |
+
output_hidden_states: Optional[bool] = None,
|
| 224 |
+
return_dict: Optional[bool] = None,
|
| 225 |
+
**kwargs: Unpack[Dict]
|
| 226 |
+
) -> Union[Tuple, SambaOutput]:
|
| 227 |
+
output_hidden_states = (
|
| 228 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 229 |
+
)
|
| 230 |
+
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
|
| 231 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 232 |
+
|
| 233 |
+
if (input_ids is None) ^ (inputs_embeds is not None): # ^ is python for xor
|
| 234 |
+
raise ValueError(
|
| 235 |
+
"You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
|
| 236 |
+
)
|
| 237 |
+
|
| 238 |
+
if inputs_embeds is None:
|
| 239 |
+
inputs_embeds = self.embeddings(input_ids)
|
| 240 |
+
|
| 241 |
+
if self.gradient_checkpointing and self.training and use_cache:
|
| 242 |
+
use_cache = False
|
| 243 |
+
|
| 244 |
+
if cache_params is None and use_cache:
|
| 245 |
+
cache_params = MambaCache(
|
| 246 |
+
self.config, inputs_embeds.size(0), device=inputs_embeds.device, dtype=inputs_embeds.dtype
|
| 247 |
+
)
|
| 248 |
+
|
| 249 |
+
hidden_states = inputs_embeds
|
| 250 |
+
all_hidden_states = () if output_hidden_states else None
|
| 251 |
+
for mixer_block in self.layers:
|
| 252 |
+
if self.gradient_checkpointing and self.training:
|
| 253 |
+
hidden_states = self._gradient_checkpointing_func(
|
| 254 |
+
mixer_block.__call__,
|
| 255 |
+
hidden_states,
|
| 256 |
+
cache_params,
|
| 257 |
+
**kwargs
|
| 258 |
+
)
|
| 259 |
+
else:
|
| 260 |
+
hidden_states = mixer_block(
|
| 261 |
+
hidden_states,
|
| 262 |
+
cache_params=cache_params,
|
| 263 |
+
**kwargs
|
| 264 |
+
)
|
| 265 |
+
|
| 266 |
+
if output_hidden_states:
|
| 267 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 268 |
+
|
| 269 |
+
if use_cache:
|
| 270 |
+
cache_params.seqlen_offset += inputs_embeds.shape[1]
|
| 271 |
+
|
| 272 |
+
hidden_states = self.norm_f(hidden_states)
|
| 273 |
+
|
| 274 |
+
if output_hidden_states:
|
| 275 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 276 |
+
|
| 277 |
+
if not return_dict:
|
| 278 |
+
return tuple(v for v in [hidden_states, cache_params, all_hidden_states] if v is not None)
|
| 279 |
+
|
| 280 |
+
return SambaOutput(
|
| 281 |
+
last_hidden_state=hidden_states,
|
| 282 |
+
cache_params=cache_params if use_cache else None,
|
| 283 |
+
hidden_states=all_hidden_states,
|
| 284 |
+
)
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
class SambaForCausalLM(SambaPreTrainedModel, GenerationMixin):
|
| 288 |
+
|
| 289 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 290 |
+
|
| 291 |
+
def __init__(self, config):
|
| 292 |
+
super().__init__(config)
|
| 293 |
+
self.backbone = SambaModel(config)
|
| 294 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 295 |
+
self.criterion = None
|
| 296 |
+
|
| 297 |
+
# Initialize weights and apply final processing
|
| 298 |
+
self.post_init()
|
| 299 |
+
|
| 300 |
+
def get_output_embeddings(self):
|
| 301 |
+
return self.lm_head
|
| 302 |
+
|
| 303 |
+
def set_output_embeddings(self, new_embeddings):
|
| 304 |
+
self.lm_head = new_embeddings
|
| 305 |
+
|
| 306 |
+
def get_input_embeddings(self):
|
| 307 |
+
return self.backbone.get_input_embeddings()
|
| 308 |
+
|
| 309 |
+
def set_input_embeddings(self, new_embeddings):
|
| 310 |
+
return self.backbone.set_input_embeddings(new_embeddings)
|
| 311 |
+
|
| 312 |
+
def _update_model_kwargs_for_generation(
|
| 313 |
+
self, outputs: ModelOutput, model_kwargs: Dict[str, Any], **kwargs
|
| 314 |
+
) -> Dict[str, Any]:
|
| 315 |
+
model_kwargs["cache_params"] = outputs.get("cache_params", None)
|
| 316 |
+
return model_kwargs
|
| 317 |
+
|
| 318 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 319 |
+
def prepare_inputs_for_generation(
|
| 320 |
+
self,
|
| 321 |
+
input_ids,
|
| 322 |
+
cache_params:
|
| 323 |
+
Optional[MambaCache] = None,
|
| 324 |
+
inputs_embeds=None,
|
| 325 |
+
attention_mask=None,
|
| 326 |
+
use_cache: Optional[bool] = True,
|
| 327 |
+
logits_to_keep: Optional[int] = None,
|
| 328 |
+
**kwargs: Unpack[Dict]
|
| 329 |
+
):
|
| 330 |
+
# only last token for inputs_ids if the state is passed along.
|
| 331 |
+
if cache_params is not None:
|
| 332 |
+
input_ids = input_ids[:, -1].unsqueeze(-1)
|
| 333 |
+
|
| 334 |
+
if inputs_embeds is not None and cache_params is None:
|
| 335 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
| 336 |
+
else:
|
| 337 |
+
model_inputs = {"input_ids": input_ids}
|
| 338 |
+
|
| 339 |
+
if logits_to_keep is not None:
|
| 340 |
+
model_inputs['logits_to_keep'] = logits_to_keep
|
| 341 |
+
|
| 342 |
+
model_inputs.update({
|
| 343 |
+
'cache_params': cache_params,
|
| 344 |
+
'use_cache': use_cache,
|
| 345 |
+
'attention_mask': attention_mask,
|
| 346 |
+
'logits_to_keep': logits_to_keep,
|
| 347 |
+
})
|
| 348 |
+
return model_inputs
|
| 349 |
+
|
| 350 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 351 |
+
def forward(
|
| 352 |
+
self,
|
| 353 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 354 |
+
attention_mask: Optional[torch.Tensor] = None, # noqa
|
| 355 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 356 |
+
cache_params: Optional[MambaCache] = None,
|
| 357 |
+
labels: Optional[torch.LongTensor] = None,
|
| 358 |
+
output_hidden_states: Optional[bool] = None,
|
| 359 |
+
return_dict: Optional[bool] = None,
|
| 360 |
+
use_cache: Optional[bool] = None,
|
| 361 |
+
logits_to_keep: Optional[int] = 0,
|
| 362 |
+
**kwargs: Unpack[Dict]
|
| 363 |
+
) -> Union[Tuple, SambaCausalLMOutput]:
|
| 364 |
+
r"""
|
| 365 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 366 |
+
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
|
| 367 |
+
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
|
| 368 |
+
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
|
| 369 |
+
"""
|
| 370 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 371 |
+
|
| 372 |
+
outputs = self.backbone(
|
| 373 |
+
input_ids,
|
| 374 |
+
cache_params=cache_params,
|
| 375 |
+
inputs_embeds=inputs_embeds,
|
| 376 |
+
output_hidden_states=output_hidden_states,
|
| 377 |
+
return_dict=return_dict,
|
| 378 |
+
use_cache=use_cache,
|
| 379 |
+
**kwargs
|
| 380 |
+
)
|
| 381 |
+
hidden_states = outputs[0]
|
| 382 |
+
fuse_linear_and_cross_entropy = self.config.fuse_cross_entropy and self.training
|
| 383 |
+
|
| 384 |
+
loss, logits = None, None
|
| 385 |
+
if not fuse_linear_and_cross_entropy or labels is None:
|
| 386 |
+
logits = self.lm_head(hidden_states if logits_to_keep is None else hidden_states[:, -logits_to_keep:])
|
| 387 |
+
if labels is not None:
|
| 388 |
+
if getattr(self, 'criterion', None) is None:
|
| 389 |
+
if fuse_linear_and_cross_entropy:
|
| 390 |
+
criterion = FusedLinearCrossEntropyLoss()
|
| 391 |
+
elif self.config.fuse_cross_entropy:
|
| 392 |
+
criterion = FusedCrossEntropyLoss(inplace_backward=True)
|
| 393 |
+
else:
|
| 394 |
+
criterion = nn.CrossEntropyLoss()
|
| 395 |
+
else:
|
| 396 |
+
criterion = self.criterion
|
| 397 |
+
labels = labels.to(hidden_states.device)
|
| 398 |
+
labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], criterion.ignore_index)), 1)
|
| 399 |
+
if fuse_linear_and_cross_entropy:
|
| 400 |
+
loss = criterion(hidden_states, labels, self.lm_head.weight, self.lm_head.bias)
|
| 401 |
+
else:
|
| 402 |
+
loss = criterion(logits.view(labels.numel(), -1), labels.view(-1))
|
| 403 |
+
|
| 404 |
+
if not return_dict:
|
| 405 |
+
output = (logits,) + outputs[1:]
|
| 406 |
+
return (loss,) + output if loss is not None else output
|
| 407 |
+
|
| 408 |
+
return SambaCausalLMOutput(
|
| 409 |
+
loss=loss,
|
| 410 |
+
logits=logits,
|
| 411 |
+
cache_params=outputs.cache_params,
|
| 412 |
+
hidden_states=outputs.hidden_states,
|
| 413 |
+
)
|
fla/models/transformer/modeling_transformer.py
ADDED
|
@@ -0,0 +1,406 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
import warnings
|
| 7 |
+
from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import torch.utils.checkpoint
|
| 12 |
+
from transformers.generation import GenerationMixin
|
| 13 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
|
| 14 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 15 |
+
from transformers.utils import logging
|
| 16 |
+
from transformers.utils.deprecation import deprecate_kwarg
|
| 17 |
+
|
| 18 |
+
from fla.layers.attn import Attention
|
| 19 |
+
from fla.models.transformer.configuration_transformer import TransformerConfig
|
| 20 |
+
from fla.models.utils import Cache
|
| 21 |
+
from fla.modules import FusedCrossEntropyLoss, FusedLinearCrossEntropyLoss
|
| 22 |
+
from fla.modules import GatedMLP as TransformerMLP
|
| 23 |
+
from fla.modules import RMSNorm
|
| 24 |
+
|
| 25 |
+
if TYPE_CHECKING:
|
| 26 |
+
from transformers.processing_utils import Unpack
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
logger = logging.get_logger(__name__)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class TransformerBlock(nn.Module):
|
| 33 |
+
|
| 34 |
+
def __init__(self, config: TransformerConfig, layer_idx: int):
|
| 35 |
+
super().__init__()
|
| 36 |
+
|
| 37 |
+
self.config = config
|
| 38 |
+
self.layer_idx = layer_idx
|
| 39 |
+
|
| 40 |
+
self.attn_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 41 |
+
self.attn = Attention(
|
| 42 |
+
hidden_size=config.hidden_size,
|
| 43 |
+
num_heads=config.num_heads,
|
| 44 |
+
num_kv_heads=config.num_kv_heads,
|
| 45 |
+
qkv_bias=config.qkv_bias,
|
| 46 |
+
qk_norm=config.qk_norm,
|
| 47 |
+
window_size=config.window_size,
|
| 48 |
+
rope_theta=config.rope_theta,
|
| 49 |
+
max_position_embeddings=config.max_position_embeddings,
|
| 50 |
+
layer_idx=layer_idx
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
self.mlp_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 54 |
+
self.mlp = TransformerMLP(
|
| 55 |
+
hidden_size=config.hidden_size,
|
| 56 |
+
hidden_ratio=config.hidden_ratio,
|
| 57 |
+
intermediate_size=config.intermediate_size,
|
| 58 |
+
hidden_act=config.hidden_act,
|
| 59 |
+
fuse_swiglu=config.fuse_swiglu
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
def forward(
|
| 63 |
+
self,
|
| 64 |
+
hidden_states: torch.Tensor,
|
| 65 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 66 |
+
past_key_values: Optional[Tuple[torch.Tensor]] = None,
|
| 67 |
+
output_attentions: Optional[bool] = False,
|
| 68 |
+
use_cache: Optional[bool] = False,
|
| 69 |
+
**kwargs: Unpack[Any]
|
| 70 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 71 |
+
|
| 72 |
+
residual = hidden_states
|
| 73 |
+
hidden_states = self.attn_norm(hidden_states)
|
| 74 |
+
hidden_states, attentions, past_key_values = self.attn(
|
| 75 |
+
hidden_states=hidden_states,
|
| 76 |
+
attention_mask=attention_mask,
|
| 77 |
+
past_key_values=past_key_values,
|
| 78 |
+
use_cache=use_cache,
|
| 79 |
+
output_attentions=output_attentions,
|
| 80 |
+
**kwargs
|
| 81 |
+
)
|
| 82 |
+
if self.config.fuse_norm:
|
| 83 |
+
hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
|
| 84 |
+
else:
|
| 85 |
+
hidden_states = residual + hidden_states
|
| 86 |
+
residual = hidden_states
|
| 87 |
+
hidden_states = self.mlp_norm(hidden_states)
|
| 88 |
+
hidden_states = self.mlp(hidden_states, **kwargs)
|
| 89 |
+
hidden_states = residual + hidden_states
|
| 90 |
+
|
| 91 |
+
outputs = (hidden_states,)
|
| 92 |
+
|
| 93 |
+
if output_attentions:
|
| 94 |
+
outputs += (attentions,)
|
| 95 |
+
|
| 96 |
+
if use_cache:
|
| 97 |
+
outputs += (past_key_values,)
|
| 98 |
+
|
| 99 |
+
return outputs
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
class TransformerPreTrainedModel(PreTrainedModel):
|
| 103 |
+
|
| 104 |
+
config_class = TransformerConfig
|
| 105 |
+
base_model_prefix = 'model'
|
| 106 |
+
supports_gradient_checkpointing = True
|
| 107 |
+
_no_split_modules = ['TransformerBlock']
|
| 108 |
+
_supports_cache_class = True
|
| 109 |
+
|
| 110 |
+
def __init__(self, *inputs, **kwargs):
|
| 111 |
+
super().__init__(*inputs, **kwargs)
|
| 112 |
+
|
| 113 |
+
def _init_weights(
|
| 114 |
+
self,
|
| 115 |
+
module: nn.Module,
|
| 116 |
+
rescale_prenorm_residual: bool = False,
|
| 117 |
+
num_residuals_per_layer: int = 2,
|
| 118 |
+
):
|
| 119 |
+
if isinstance(module, (nn.Linear, nn.Conv1d)):
|
| 120 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 121 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 122 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 123 |
+
if module.bias is not None:
|
| 124 |
+
nn.init.zeros_(module.bias)
|
| 125 |
+
elif isinstance(module, nn.Embedding):
|
| 126 |
+
nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
|
| 127 |
+
elif hasattr(module, 'reset_parameters'):
|
| 128 |
+
module.reset_parameters()
|
| 129 |
+
|
| 130 |
+
if rescale_prenorm_residual:
|
| 131 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
| 132 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
| 133 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
| 134 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
| 135 |
+
#
|
| 136 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
| 137 |
+
p = None
|
| 138 |
+
if hasattr(module, 'o_proj'):
|
| 139 |
+
p = module.o_proj.weight
|
| 140 |
+
elif hasattr(module, 'down_proj'):
|
| 141 |
+
p = module.down_proj.weight
|
| 142 |
+
if p is not None:
|
| 143 |
+
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
|
| 144 |
+
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
|
| 145 |
+
# We need to reinit p since this code could be called multiple times
|
| 146 |
+
# Having just p *= scale would repeatedly scale it down
|
| 147 |
+
nn.init.kaiming_uniform_(p, a=math.sqrt(5))
|
| 148 |
+
with torch.no_grad():
|
| 149 |
+
p /= math.sqrt(num_residuals_per_layer * self.config.num_hidden_layers)
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
class TransformerModel(TransformerPreTrainedModel):
|
| 153 |
+
|
| 154 |
+
def __init__(
|
| 155 |
+
self,
|
| 156 |
+
config: TransformerConfig
|
| 157 |
+
) -> TransformerModel:
|
| 158 |
+
super().__init__(config)
|
| 159 |
+
self.padding_idx = config.pad_token_id
|
| 160 |
+
self.vocab_size = config.vocab_size
|
| 161 |
+
|
| 162 |
+
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 163 |
+
self.layers = nn.ModuleList([TransformerBlock(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
|
| 164 |
+
self.norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
|
| 165 |
+
|
| 166 |
+
self.gradient_checkpointing = False
|
| 167 |
+
|
| 168 |
+
self.post_init()
|
| 169 |
+
|
| 170 |
+
def get_input_embeddings(self):
|
| 171 |
+
return self.embeddings
|
| 172 |
+
|
| 173 |
+
def set_input_embeddings(self, value):
|
| 174 |
+
self.embeddings = value
|
| 175 |
+
|
| 176 |
+
def forward(
|
| 177 |
+
self,
|
| 178 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 179 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 180 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 181 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 182 |
+
use_cache: Optional[bool] = None,
|
| 183 |
+
output_attentions: Optional[bool] = None,
|
| 184 |
+
output_hidden_states: Optional[bool] = None,
|
| 185 |
+
return_dict: Optional[bool] = None,
|
| 186 |
+
**kwargs: Unpack[Any]
|
| 187 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 188 |
+
if output_attentions:
|
| 189 |
+
warnings.warn(
|
| 190 |
+
"`TransformerModel` does not support output attention weights now, so `output_attentions` is set to `False`."
|
| 191 |
+
)
|
| 192 |
+
output_attentions = False
|
| 193 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 194 |
+
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 195 |
+
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
|
| 196 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 197 |
+
|
| 198 |
+
# retrieve input_ids and inputs_embeds
|
| 199 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 200 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 201 |
+
elif input_ids is None and inputs_embeds is None:
|
| 202 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 203 |
+
|
| 204 |
+
if use_cache and not isinstance(past_key_values, Cache):
|
| 205 |
+
past_key_values = Cache.from_legacy_cache(past_key_values)
|
| 206 |
+
|
| 207 |
+
if inputs_embeds is None:
|
| 208 |
+
inputs_embeds = self.embeddings(input_ids)
|
| 209 |
+
|
| 210 |
+
# embed positions
|
| 211 |
+
hidden_states = inputs_embeds
|
| 212 |
+
|
| 213 |
+
if self.gradient_checkpointing and self.training:
|
| 214 |
+
if use_cache:
|
| 215 |
+
logger.warning_once(
|
| 216 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
| 217 |
+
)
|
| 218 |
+
use_cache = False
|
| 219 |
+
|
| 220 |
+
all_hidden_states = () if output_hidden_states else None
|
| 221 |
+
all_attns = () if output_attentions else None
|
| 222 |
+
next_cache = None
|
| 223 |
+
|
| 224 |
+
for layer in self.layers:
|
| 225 |
+
if output_hidden_states:
|
| 226 |
+
all_hidden_states += (hidden_states,)
|
| 227 |
+
|
| 228 |
+
if self.gradient_checkpointing and self.training:
|
| 229 |
+
layer_outputs = self._gradient_checkpointing_func(
|
| 230 |
+
layer.__call__,
|
| 231 |
+
hidden_states,
|
| 232 |
+
attention_mask,
|
| 233 |
+
past_key_values,
|
| 234 |
+
output_attentions,
|
| 235 |
+
use_cache,
|
| 236 |
+
**kwargs
|
| 237 |
+
)
|
| 238 |
+
else:
|
| 239 |
+
layer_outputs = layer(
|
| 240 |
+
hidden_states,
|
| 241 |
+
attention_mask=attention_mask,
|
| 242 |
+
past_key_values=past_key_values,
|
| 243 |
+
output_attentions=output_attentions,
|
| 244 |
+
use_cache=use_cache,
|
| 245 |
+
**kwargs
|
| 246 |
+
)
|
| 247 |
+
|
| 248 |
+
hidden_states = layer_outputs[0]
|
| 249 |
+
|
| 250 |
+
if use_cache:
|
| 251 |
+
next_cache = layer_outputs[2 if output_attentions else 1]
|
| 252 |
+
|
| 253 |
+
if output_attentions:
|
| 254 |
+
all_attns += (layer_outputs[1],)
|
| 255 |
+
|
| 256 |
+
hidden_states = self.norm(hidden_states)
|
| 257 |
+
|
| 258 |
+
# add hidden states from the last decoder layer
|
| 259 |
+
if output_hidden_states:
|
| 260 |
+
all_hidden_states += (hidden_states,)
|
| 261 |
+
|
| 262 |
+
if not return_dict:
|
| 263 |
+
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_attns] if v is not None)
|
| 264 |
+
|
| 265 |
+
return BaseModelOutputWithPast(
|
| 266 |
+
last_hidden_state=hidden_states,
|
| 267 |
+
past_key_values=next_cache,
|
| 268 |
+
hidden_states=all_hidden_states,
|
| 269 |
+
attentions=all_attns
|
| 270 |
+
)
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
class TransformerForCausalLM(TransformerPreTrainedModel, GenerationMixin):
|
| 274 |
+
|
| 275 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 276 |
+
|
| 277 |
+
def __init__(self, config):
|
| 278 |
+
super().__init__(config)
|
| 279 |
+
self.model = TransformerModel(config)
|
| 280 |
+
self.vocab_size = config.vocab_size
|
| 281 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 282 |
+
self.criterion = None
|
| 283 |
+
|
| 284 |
+
# Initialize weights and apply final processing
|
| 285 |
+
self.post_init()
|
| 286 |
+
|
| 287 |
+
def get_input_embeddings(self):
|
| 288 |
+
return self.model.embeddings
|
| 289 |
+
|
| 290 |
+
def set_input_embeddings(self, value):
|
| 291 |
+
self.model.embeddings = value
|
| 292 |
+
|
| 293 |
+
def get_output_embeddings(self):
|
| 294 |
+
return self.lm_head
|
| 295 |
+
|
| 296 |
+
def set_output_embeddings(self, new_embeddings):
|
| 297 |
+
self.lm_head = new_embeddings
|
| 298 |
+
|
| 299 |
+
def set_decoder(self, decoder):
|
| 300 |
+
self.model = decoder
|
| 301 |
+
|
| 302 |
+
def get_decoder(self):
|
| 303 |
+
return self.model
|
| 304 |
+
|
| 305 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 306 |
+
def prepare_inputs_for_generation(
|
| 307 |
+
self,
|
| 308 |
+
input_ids: torch.LongTensor = None,
|
| 309 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 310 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 311 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 312 |
+
use_cache: bool = True,
|
| 313 |
+
logits_to_keep: Optional[int] = None,
|
| 314 |
+
**kwargs
|
| 315 |
+
):
|
| 316 |
+
# only last token for `inputs_ids` if the `past_key_values` is not empty.
|
| 317 |
+
if past_key_values is not None and len(past_key_values) > 0:
|
| 318 |
+
input_ids = input_ids[:, -1:]
|
| 319 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 320 |
+
if inputs_embeds is not None and len(past_key_values) == 0:
|
| 321 |
+
model_inputs = {'inputs_embeds': inputs_embeds}
|
| 322 |
+
else:
|
| 323 |
+
# The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
|
| 324 |
+
# recompiles graphs as the stride of the inputs is a guard.
|
| 325 |
+
# Ref: https://github.com/huggingface/transformers/pull/29114
|
| 326 |
+
# TODO: use `next_tokens` directly instead.
|
| 327 |
+
model_inputs = {'input_ids': input_ids.contiguous()}
|
| 328 |
+
|
| 329 |
+
if logits_to_keep is not None:
|
| 330 |
+
model_inputs['logits_to_keep'] = logits_to_keep
|
| 331 |
+
|
| 332 |
+
model_inputs.update({
|
| 333 |
+
'past_key_values': past_key_values,
|
| 334 |
+
'use_cache': use_cache,
|
| 335 |
+
'attention_mask': attention_mask,
|
| 336 |
+
})
|
| 337 |
+
return model_inputs
|
| 338 |
+
|
| 339 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
| 340 |
+
def forward(
|
| 341 |
+
self,
|
| 342 |
+
input_ids: torch.LongTensor = None,
|
| 343 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 344 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 345 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 346 |
+
labels: Optional[torch.LongTensor] = None,
|
| 347 |
+
use_cache: Optional[bool] = None,
|
| 348 |
+
output_attentions: Optional[bool] = None,
|
| 349 |
+
output_hidden_states: Optional[bool] = None,
|
| 350 |
+
return_dict: Optional[bool] = None,
|
| 351 |
+
logits_to_keep: Optional[int] = 0,
|
| 352 |
+
**kwargs: Unpack[Any]
|
| 353 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 354 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 355 |
+
output_hidden_states = (
|
| 356 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 357 |
+
)
|
| 358 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 359 |
+
|
| 360 |
+
outputs = self.model(
|
| 361 |
+
input_ids=input_ids,
|
| 362 |
+
attention_mask=attention_mask,
|
| 363 |
+
past_key_values=past_key_values,
|
| 364 |
+
inputs_embeds=inputs_embeds,
|
| 365 |
+
use_cache=use_cache,
|
| 366 |
+
output_attentions=output_attentions,
|
| 367 |
+
output_hidden_states=output_hidden_states,
|
| 368 |
+
return_dict=return_dict,
|
| 369 |
+
**kwargs
|
| 370 |
+
)
|
| 371 |
+
|
| 372 |
+
hidden_states = outputs[0]
|
| 373 |
+
fuse_linear_and_cross_entropy = self.config.fuse_cross_entropy and self.training
|
| 374 |
+
logits = None if fuse_linear_and_cross_entropy else self.lm_head(hidden_states[:, -logits_to_keep:])
|
| 375 |
+
|
| 376 |
+
loss = None
|
| 377 |
+
if labels is not None:
|
| 378 |
+
if getattr(self, 'criterion', None) is None:
|
| 379 |
+
if fuse_linear_and_cross_entropy:
|
| 380 |
+
criterion = FusedLinearCrossEntropyLoss()
|
| 381 |
+
elif self.config.fuse_cross_entropy:
|
| 382 |
+
criterion = FusedCrossEntropyLoss(inplace_backward=True)
|
| 383 |
+
else:
|
| 384 |
+
criterion = nn.CrossEntropyLoss()
|
| 385 |
+
else:
|
| 386 |
+
criterion = self.criterion
|
| 387 |
+
# Enable model parallelism
|
| 388 |
+
labels = labels.to(hidden_states.device)
|
| 389 |
+
labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], criterion.ignore_index)), 1)
|
| 390 |
+
labels = labels[..., :hidden_states.shape[1]].contiguous()
|
| 391 |
+
if fuse_linear_and_cross_entropy:
|
| 392 |
+
loss = criterion(hidden_states, labels, self.lm_head.weight, self.lm_head.bias)
|
| 393 |
+
else:
|
| 394 |
+
loss = criterion(logits.view(labels.numel(), -1), labels.view(-1))
|
| 395 |
+
|
| 396 |
+
if not return_dict:
|
| 397 |
+
output = (logits,) + outputs[1:]
|
| 398 |
+
return (loss,) + output if loss is not None else output
|
| 399 |
+
|
| 400 |
+
return CausalLMOutputWithPast(
|
| 401 |
+
loss=loss,
|
| 402 |
+
logits=logits,
|
| 403 |
+
past_key_values=outputs.past_key_values,
|
| 404 |
+
hidden_states=outputs.hidden_states,
|
| 405 |
+
attentions=outputs.attentions,
|
| 406 |
+
)
|
fla/models/transformer_mtp/configuration_transformer.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class MTPTransformerConfig(PretrainedConfig):
|
| 9 |
+
|
| 10 |
+
model_type = 'mtp_transformer'
|
| 11 |
+
keys_to_ignore_at_inference = ['past_key_values']
|
| 12 |
+
|
| 13 |
+
def __init__(
|
| 14 |
+
self,
|
| 15 |
+
hidden_size: int = 2048,
|
| 16 |
+
num_hidden_layers: int = 24,
|
| 17 |
+
num_heads: int = 32,
|
| 18 |
+
num_kv_heads: int = None,
|
| 19 |
+
qkv_bias: bool = False,
|
| 20 |
+
qk_norm: bool = False,
|
| 21 |
+
window_size: Optional[int] = None,
|
| 22 |
+
rope_theta: Optional[float] = 10000.,
|
| 23 |
+
max_position_embeddings: int = 2048,
|
| 24 |
+
hidden_ratio: Optional[int] = 4,
|
| 25 |
+
intermediate_size: Optional[int] = None,
|
| 26 |
+
hidden_act: str = "swish",
|
| 27 |
+
initializer_range: float = 0.006,
|
| 28 |
+
elementwise_affine: Optional[bool] = True,
|
| 29 |
+
norm_eps: float = 1e-6,
|
| 30 |
+
use_cache: bool = True,
|
| 31 |
+
pad_token_id: int = None,
|
| 32 |
+
bos_token_id: int = 1,
|
| 33 |
+
eos_token_id: int = 2,
|
| 34 |
+
tie_word_embeddings: bool = False,
|
| 35 |
+
fuse_norm: bool = True,
|
| 36 |
+
fuse_swiglu: bool = True,
|
| 37 |
+
fuse_cross_entropy: bool = True,
|
| 38 |
+
vocab_size: int = 32000,
|
| 39 |
+
n_future_tokens: int = 1,
|
| 40 |
+
use_custom_backward: Optional[bool] = False,
|
| 41 |
+
**kwargs,
|
| 42 |
+
):
|
| 43 |
+
self.hidden_size = hidden_size
|
| 44 |
+
self.num_hidden_layers = num_hidden_layers
|
| 45 |
+
self.num_heads = num_heads
|
| 46 |
+
self.num_kv_heads = num_kv_heads
|
| 47 |
+
self.qkv_bias = qkv_bias
|
| 48 |
+
self.qk_norm = qk_norm
|
| 49 |
+
self.window_size = window_size
|
| 50 |
+
self.rope_theta = rope_theta
|
| 51 |
+
self.max_position_embeddings = max_position_embeddings
|
| 52 |
+
|
| 53 |
+
self.hidden_ratio = hidden_ratio
|
| 54 |
+
self.intermediate_size = intermediate_size
|
| 55 |
+
self.hidden_act = hidden_act
|
| 56 |
+
|
| 57 |
+
self.initializer_range = initializer_range
|
| 58 |
+
self.elementwise_affine = elementwise_affine
|
| 59 |
+
self.norm_eps = norm_eps
|
| 60 |
+
self.use_cache = use_cache
|
| 61 |
+
|
| 62 |
+
self.fuse_norm = fuse_norm
|
| 63 |
+
self.fuse_swiglu = fuse_swiglu
|
| 64 |
+
self.fuse_cross_entropy = fuse_cross_entropy
|
| 65 |
+
self.vocab_size = vocab_size
|
| 66 |
+
|
| 67 |
+
self.n_future_tokens = n_future_tokens
|
| 68 |
+
self.use_custom_backward = use_custom_backward
|
| 69 |
+
|
| 70 |
+
super().__init__(
|
| 71 |
+
pad_token_id=pad_token_id,
|
| 72 |
+
bos_token_id=bos_token_id,
|
| 73 |
+
eos_token_id=eos_token_id,
|
| 74 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 75 |
+
**kwargs,
|
| 76 |
+
)
|
fla/ops/attn/__pycache__/parallel.cpython-312.pyc
ADDED
|
Binary file (33.1 kB). View file
|
|
|
fla/ops/common/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (139 Bytes). View file
|
|
|
fla/ops/common/__pycache__/chunk_h.cpython-312.pyc
ADDED
|
Binary file (24.9 kB). View file
|
|
|
fla/ops/common/__pycache__/chunk_o.cpython-312.pyc
ADDED
|
Binary file (37 kB). View file
|
|
|
fla/ops/common/__pycache__/chunk_scaled_dot_kkt.cpython-312.pyc
ADDED
|
Binary file (6.74 kB). View file
|
|
|
fla/ops/common/__pycache__/fused_recurrent.cpython-312.pyc
ADDED
|
Binary file (32.4 kB). View file
|
|
|
fla/ops/common/__pycache__/utils.cpython-312.pyc
ADDED
|
Binary file (4.42 kB). View file
|
|
|
fla/ops/delta_rule/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (361 Bytes). View file
|
|
|
fla/ops/delta_rule/__pycache__/fused_chunk.cpython-312.pyc
ADDED
|
Binary file (392 Bytes). View file
|
|
|
fla/ops/delta_rule/__pycache__/fused_recurrent.cpython-312.pyc
ADDED
|
Binary file (34 kB). View file
|
|
|
fla/ops/delta_rule/__pycache__/wy_fast.cpython-312.pyc
ADDED
|
Binary file (20.5 kB). View file
|
|
|
fla/ops/forgetting_attn/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (242 Bytes). View file
|
|
|
fla/ops/forgetting_attn/__pycache__/parallel.cpython-312.pyc
ADDED
|
Binary file (39 kB). View file
|
|
|
fla/ops/gated_delta_rule/__pycache__/fused_recurrent.cpython-312.pyc
ADDED
|
Binary file (15.1 kB). View file
|
|
|
fla/ops/generalized_delta_rule/dplr/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (328 Bytes). View file
|
|
|
fla/ops/generalized_delta_rule/dplr/__pycache__/chunk_A_fwd.cpython-312.pyc
ADDED
|
Binary file (25.4 kB). View file
|
|
|
fla/ops/generalized_delta_rule/dplr/__pycache__/chunk_h_fwd.cpython-312.pyc
ADDED
|
Binary file (12.5 kB). View file
|
|
|
fla/ops/generalized_delta_rule/dplr/__pycache__/chunk_o_bwd.cpython-312.pyc
ADDED
|
Binary file (28 kB). View file
|
|
|
fla/ops/generalized_delta_rule/dplr/__pycache__/chunk_o_fwd.cpython-312.pyc
ADDED
|
Binary file (8.91 kB). View file
|
|
|