|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""PyTorch Bamba model.""" |
|
|
|
|
|
from typing import Optional, TypedDict, Union |
|
|
|
|
|
import torch |
|
|
from torch import nn |
|
|
|
|
|
from transformers.activations import ACT2FN |
|
|
from transformers.models.jamba.modeling_jamba import HybridMambaAttentionDynamicCache, JambaAttentionDecoderLayer |
|
|
from transformers.models.llama.modeling_llama import ( |
|
|
LlamaAttention, |
|
|
LlamaForCausalLM, |
|
|
LlamaMLP, |
|
|
LlamaRMSNorm, |
|
|
LlamaRotaryEmbedding, |
|
|
rotate_half, |
|
|
) |
|
|
from transformers.models.mamba2.modeling_mamba2 import ( |
|
|
MambaRMSNormGated, |
|
|
pad_tensor_by_size, |
|
|
reshape_into_chunks, |
|
|
segment_sum, |
|
|
) |
|
|
|
|
|
from ...modeling_attn_mask_utils import AttentionMaskConverter |
|
|
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast |
|
|
from ...modeling_utils import PreTrainedModel |
|
|
from ...processing_utils import Unpack |
|
|
from ...utils import ( |
|
|
auto_docstring, |
|
|
can_return_tuple, |
|
|
logging, |
|
|
) |
|
|
from ...utils.deprecation import deprecate_kwarg |
|
|
from ...utils.import_utils import is_causal_conv1d_available, is_mamba_2_ssm_available |
|
|
from .configuration_bamba import BambaConfig |
|
|
|
|
|
|
|
|
if is_mamba_2_ssm_available(): |
|
|
from mamba_ssm.ops.triton.selective_state_update import selective_state_update |
|
|
from mamba_ssm.ops.triton.ssd_combined import mamba_chunk_scan_combined, mamba_split_conv1d_scan_combined |
|
|
else: |
|
|
selective_state_update = None |
|
|
|
|
|
if is_causal_conv1d_available(): |
|
|
from causal_conv1d import causal_conv1d_fn, causal_conv1d_update |
|
|
else: |
|
|
causal_conv1d_update, causal_conv1d_fn = None, None |
|
|
|
|
|
is_fast_path_available = all((selective_state_update, causal_conv1d_fn, causal_conv1d_update)) |
|
|
|
|
|
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
|
|
|
|
|
class BambaFlashAttentionKwargs(TypedDict, total=False): |
|
|
""" |
|
|
Keyword arguments for advanced Flash Attention, causal-conv1d, and mamba_ssm kernel usage. |
|
|
Use cases include padding-free training and fewer `torch.compile` graph breaks. |
|
|
|
|
|
Attributes: |
|
|
cu_seq_lens_q (`torch.LongTensor`) |
|
|
Gets cumulative sequence length for query state. |
|
|
cu_seq_lens_k (`torch.LongTensor`) |
|
|
Gets cumulative sequence length for key state. |
|
|
max_length_q (`int`): |
|
|
Maximum sequence length for query state. |
|
|
max_length_k (`int`): |
|
|
Maximum sequence length for key state. |
|
|
seq_idx (`torch.IntTensor): |
|
|
Index of each packed sequence. |
|
|
""" |
|
|
|
|
|
cu_seq_lens_q: torch.LongTensor |
|
|
cu_seq_lens_k: torch.LongTensor |
|
|
max_length_q: int |
|
|
max_length_k: int |
|
|
seq_idx: torch.IntTensor |
|
|
|
|
|
|
|
|
|
|
|
class HybridMambaAttentionDynamicCache(HybridMambaAttentionDynamicCache): |
|
|
""" |
|
|
A dynamic cache that can handle both the attention cache (which has a seq_len dimension) and the mamba cache |
|
|
(which has a constant shape regardless of seq_len). |
|
|
|
|
|
This cache has two sets of lists of tensors: `key_cache` and `value_cache` for attention cache and `conv_states` |
|
|
and `ssm_states` for mamba cache. Each of these lists has `num_layers` tensors. The expected shape for each tensor |
|
|
For attention layers, `key_cache` and `value_cache` have a shape of `(batch_size, num_heads, seq_len, head_dim)`, |
|
|
while `conv_states` and `ssm_states` have a shape of `(batch_size, 0)` (empty tensors). |
|
|
For mamba layers, `key_cache` and `value_cache` have a shape of `(batch_size, 0)` (empty tensors), |
|
|
while `conv_states` represents the convolution state and has a shape of `(batch_size, d_inner, d_conv)`, |
|
|
and `ssm_states` represents the ssm state and has a shape of `(batch_size, d_inner, d_state)`. |
|
|
""" |
|
|
|
|
|
def __init__(self, config: BambaConfig, batch_size, dtype=torch.float16, device=None): |
|
|
self.layers_block_type = config.layers_block_type |
|
|
self.has_previous_state = False |
|
|
conv_kernel_size = config.mamba_d_conv |
|
|
ssm_state_size = config.mamba_d_state |
|
|
|
|
|
self.conv_states = [] |
|
|
self.ssm_states = [] |
|
|
self.transformer_layers = [] |
|
|
for i in range(config.num_hidden_layers): |
|
|
if self.layers_block_type[i] == "mamba": |
|
|
self.conv_states += [ |
|
|
torch.zeros( |
|
|
batch_size, |
|
|
(config.mamba_expand * config.hidden_size + 2 * config.mamba_n_groups * ssm_state_size), |
|
|
conv_kernel_size, |
|
|
device=device, |
|
|
dtype=dtype, |
|
|
) |
|
|
] |
|
|
self.ssm_states += [ |
|
|
torch.zeros( |
|
|
batch_size, |
|
|
config.mamba_n_heads, |
|
|
config.mamba_d_head, |
|
|
ssm_state_size, |
|
|
device=device, |
|
|
dtype=dtype, |
|
|
) |
|
|
] |
|
|
else: |
|
|
self.conv_states += [torch.tensor([[]] * batch_size, device=device)] |
|
|
self.ssm_states += [torch.tensor([[]] * batch_size, device=device)] |
|
|
self.transformer_layers.append(i) |
|
|
|
|
|
self.key_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)] |
|
|
self.value_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)] |
|
|
|
|
|
|
|
|
class BambaRotaryEmbedding(LlamaRotaryEmbedding): |
|
|
pass |
|
|
|
|
|
|
|
|
|
|
|
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): |
|
|
"""Applies Rotary Position Embedding to the query and key tensors. |
|
|
|
|
|
Removes the interleaving of cos and sin from GLM |
|
|
|
|
|
Args: |
|
|
q (`torch.Tensor`): The query tensor. |
|
|
k (`torch.Tensor`): The key tensor. |
|
|
cos (`torch.Tensor`): The cosine part of the rotary embedding. |
|
|
sin (`torch.Tensor`): The sine part of the rotary embedding. |
|
|
position_ids (`torch.Tensor`, *optional*): |
|
|
Deprecated and unused. |
|
|
unsqueeze_dim (`int`, *optional*, defaults to 1): |
|
|
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and |
|
|
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note |
|
|
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and |
|
|
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes |
|
|
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have |
|
|
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. |
|
|
Returns: |
|
|
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. |
|
|
""" |
|
|
cos = cos.unsqueeze(unsqueeze_dim) |
|
|
sin = sin.unsqueeze(unsqueeze_dim) |
|
|
|
|
|
|
|
|
rotary_dim = cos.shape[-1] |
|
|
q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:] |
|
|
k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:] |
|
|
|
|
|
|
|
|
q_embed = (q_rot * cos) + (rotate_half(q_rot) * sin) |
|
|
k_embed = (k_rot * cos) + (rotate_half(k_rot) * sin) |
|
|
|
|
|
|
|
|
q_embed = torch.cat([q_embed, q_pass], dim=-1) |
|
|
k_embed = torch.cat([k_embed, k_pass], dim=-1) |
|
|
return q_embed, k_embed |
|
|
|
|
|
|
|
|
class BambaAttention(LlamaAttention): |
|
|
pass |
|
|
|
|
|
|
|
|
class BambaRMSNormGated(MambaRMSNormGated): |
|
|
pass |
|
|
|
|
|
|
|
|
def apply_mask_to_padding_states(hidden_states, attention_mask): |
|
|
""" |
|
|
Tunes out the hidden states for padding tokens, see https://github.com/state-spaces/mamba/issues/66 |
|
|
""" |
|
|
if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1: |
|
|
dtype = hidden_states.dtype |
|
|
hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype) |
|
|
|
|
|
return hidden_states |
|
|
|
|
|
|
|
|
|
|
|
class BambaMixer(nn.Module): |
|
|
""" |
|
|
Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`. |
|
|
A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective) |
|
|
∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4, |
|
|
and is why Mamba is called **selective** state spaces) |
|
|
|
|
|
The are a few differences between this and Mamba2Mixer: |
|
|
- The variable use_precomputed_states is slightly different due to the hybrid cache structure |
|
|
- There's a few non-obvious bugs fixed with batching in the slow path that exist in main |
|
|
- Some extra variables that our layer doesn't need have been removed |
|
|
- We ported most of the refactors in https://github.com/huggingface/transformers/pull/35154, which is (as of Dec 18, 2024) unmerged |
|
|
""" |
|
|
|
|
|
def __init__(self, config: BambaConfig, layer_idx: int): |
|
|
super().__init__() |
|
|
self.num_heads = config.mamba_n_heads |
|
|
self.hidden_size = config.hidden_size |
|
|
self.ssm_state_size = config.mamba_d_state |
|
|
self.conv_kernel_size = config.mamba_d_conv |
|
|
self.intermediate_size = int(config.mamba_expand * self.hidden_size) |
|
|
self.layer_idx = layer_idx |
|
|
self.use_conv_bias = config.mamba_conv_bias |
|
|
self.activation = config.hidden_act |
|
|
self.act = ACT2FN[config.hidden_act] |
|
|
self.use_bias = config.mamba_proj_bias |
|
|
|
|
|
self.layer_norm_epsilon = config.rms_norm_eps |
|
|
|
|
|
self.n_groups = config.mamba_n_groups |
|
|
self.head_dim = config.mamba_d_head |
|
|
self.chunk_size = config.mamba_chunk_size |
|
|
|
|
|
|
|
|
self.time_step_limit = (0.0, float("inf")) |
|
|
self.time_step_min = 0.001 |
|
|
self.time_step_max = 0.1 |
|
|
|
|
|
self.conv_dim = self.intermediate_size + 2 * self.n_groups * self.ssm_state_size |
|
|
self.conv1d = nn.Conv1d( |
|
|
in_channels=self.conv_dim, |
|
|
out_channels=self.conv_dim, |
|
|
bias=config.mamba_conv_bias, |
|
|
kernel_size=self.conv_kernel_size, |
|
|
groups=self.conv_dim, |
|
|
padding=self.conv_kernel_size - 1, |
|
|
) |
|
|
|
|
|
|
|
|
projection_size = self.intermediate_size + self.conv_dim + self.num_heads |
|
|
self.in_proj = nn.Linear( |
|
|
self.hidden_size, |
|
|
projection_size, |
|
|
bias=self.use_bias, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.dt_bias = nn.Parameter(torch.ones(self.num_heads)) |
|
|
|
|
|
|
|
|
|
|
|
A = torch.arange(1, self.num_heads + 1) |
|
|
self.A_log = nn.Parameter(torch.log(A)) |
|
|
self.norm = BambaRMSNormGated(self.intermediate_size, eps=self.layer_norm_epsilon) |
|
|
self.D = nn.Parameter(torch.ones(self.num_heads)) |
|
|
|
|
|
self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=self.use_bias) |
|
|
|
|
|
if not is_fast_path_available: |
|
|
logger.warning_once( |
|
|
"The fast path is not available because one of `(selective_state_update, causal_conv1d_fn, causal_conv1d_update)`" |
|
|
" is None. Falling back to the naive implementation. To install follow https://github.com/state-spaces/mamba/#installation and" |
|
|
" https://github.com/Dao-AILab/causal-conv1d" |
|
|
) |
|
|
else: |
|
|
logger.warning_once("The fast path for Bamba will be used when running the model on a GPU") |
|
|
|
|
|
def cuda_kernels_forward( |
|
|
self, |
|
|
hidden_states: torch.Tensor, |
|
|
cache_params: Optional[HybridMambaAttentionDynamicCache] = None, |
|
|
cache_position: Optional[torch.LongTensor] = None, |
|
|
attention_mask: Optional[torch.Tensor] = None, |
|
|
seq_idx: Optional[torch.IntTensor] = None, |
|
|
): |
|
|
|
|
|
hidden_states = apply_mask_to_padding_states(hidden_states, attention_mask) |
|
|
projected_states = self.in_proj(hidden_states) |
|
|
|
|
|
|
|
|
batch_size, seq_len, _ = hidden_states.shape |
|
|
groups_time_state_size = self.n_groups * self.ssm_state_size |
|
|
|
|
|
use_precomputed_states = ( |
|
|
cache_params is not None |
|
|
and cache_params.has_previous_state |
|
|
and seq_len == 1 |
|
|
and cache_params.conv_states[self.layer_idx].shape[0] |
|
|
== cache_params.ssm_states[self.layer_idx].shape[0] |
|
|
== batch_size |
|
|
and cache_position is not None |
|
|
and cache_position[0] > 0 |
|
|
) |
|
|
|
|
|
|
|
|
if use_precomputed_states: |
|
|
gate, hidden_states_B_C, dt = projected_states.squeeze(1).split( |
|
|
[self.intermediate_size, self.conv_dim, self.num_heads], dim=-1 |
|
|
) |
|
|
|
|
|
|
|
|
hidden_states_B_C = causal_conv1d_update( |
|
|
hidden_states_B_C, |
|
|
cache_params.conv_states[self.layer_idx], |
|
|
self.conv1d.weight.squeeze(1), |
|
|
self.conv1d.bias, |
|
|
self.activation, |
|
|
) |
|
|
|
|
|
hidden_states, B, C = torch.split( |
|
|
hidden_states_B_C, |
|
|
[self.intermediate_size, groups_time_state_size, groups_time_state_size], |
|
|
dim=-1, |
|
|
) |
|
|
|
|
|
|
|
|
A = -torch.exp(self.A_log.float()) |
|
|
A = A[:, None, ...][:, :, None].expand(-1, self.head_dim, self.ssm_state_size).to(dtype=torch.float32) |
|
|
dt = dt[:, :, None].expand(-1, -1, self.head_dim) |
|
|
dt_bias = self.dt_bias[:, None, ...].expand(-1, self.head_dim) |
|
|
D = self.D[:, None, ...].expand(-1, self.head_dim) |
|
|
B = B.view(batch_size, self.n_groups, B.shape[1] // self.n_groups) |
|
|
C = C.view(batch_size, self.n_groups, C.shape[1] // self.n_groups) |
|
|
hidden_states_reshaped = hidden_states.view(batch_size, self.num_heads, self.head_dim) |
|
|
hidden_states = selective_state_update( |
|
|
cache_params.ssm_states[self.layer_idx], |
|
|
hidden_states_reshaped, |
|
|
dt, |
|
|
A, |
|
|
B, |
|
|
C, |
|
|
D, |
|
|
z=None, |
|
|
dt_bias=dt_bias, |
|
|
dt_softplus=True, |
|
|
) |
|
|
hidden_states = hidden_states.view(batch_size, self.num_heads * self.head_dim) |
|
|
hidden_states = self.norm(hidden_states, gate) |
|
|
|
|
|
|
|
|
out = self.out_proj(hidden_states)[:, None, ...] |
|
|
|
|
|
else: |
|
|
A = -torch.exp(self.A_log.float()) |
|
|
dt_limit_kwargs = {} if self.time_step_limit == (0.0, float("inf")) else {"dt_limit": self.time_step_limit} |
|
|
|
|
|
|
|
|
if self.training and cache_params is None: |
|
|
out = mamba_split_conv1d_scan_combined( |
|
|
projected_states, |
|
|
self.conv1d.weight.squeeze(1), |
|
|
self.conv1d.bias, |
|
|
self.dt_bias, |
|
|
A, |
|
|
D=self.D, |
|
|
chunk_size=self.chunk_size, |
|
|
seq_idx=seq_idx, |
|
|
activation=self.activation, |
|
|
rmsnorm_weight=self.norm.weight, |
|
|
rmsnorm_eps=self.norm.variance_epsilon, |
|
|
outproj_weight=self.out_proj.weight, |
|
|
outproj_bias=self.out_proj.bias, |
|
|
headdim=self.head_dim, |
|
|
ngroups=self.n_groups, |
|
|
norm_before_gate=False, |
|
|
return_final_states=False, |
|
|
**dt_limit_kwargs, |
|
|
) |
|
|
|
|
|
else: |
|
|
gate, hidden_states_B_C, dt = projected_states.split( |
|
|
[self.intermediate_size, self.conv_dim, self.num_heads], dim=-1 |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
if cache_params is not None: |
|
|
|
|
|
|
|
|
|
|
|
hidden_states_B_C_transposed = hidden_states_B_C.transpose(1, 2) |
|
|
conv_states = nn.functional.pad( |
|
|
hidden_states_B_C_transposed, |
|
|
(self.conv_kernel_size - hidden_states_B_C_transposed.shape[-1], 0), |
|
|
) |
|
|
cache_params.conv_states[self.layer_idx].copy_(conv_states) |
|
|
|
|
|
if self.activation not in ["silu", "swish"]: |
|
|
hidden_states_B_C = self.act( |
|
|
self.conv1d(hidden_states_B_C.transpose(1, 2))[..., :seq_len].transpose(1, 2) |
|
|
) |
|
|
else: |
|
|
hidden_states_B_C = causal_conv1d_fn( |
|
|
x=hidden_states_B_C.transpose(1, 2), |
|
|
weight=self.conv1d.weight.squeeze(1), |
|
|
bias=self.conv1d.bias, |
|
|
activation=self.activation, |
|
|
seq_idx=seq_idx, |
|
|
).transpose(1, 2) |
|
|
|
|
|
hidden_states_B_C = apply_mask_to_padding_states(hidden_states_B_C, attention_mask) |
|
|
hidden_states, B, C = torch.split( |
|
|
hidden_states_B_C, |
|
|
[self.intermediate_size, groups_time_state_size, groups_time_state_size], |
|
|
dim=-1, |
|
|
) |
|
|
|
|
|
|
|
|
scan_output, ssm_state = mamba_chunk_scan_combined( |
|
|
hidden_states.view(batch_size, seq_len, -1, self.head_dim), |
|
|
dt, |
|
|
A, |
|
|
B.view(batch_size, seq_len, self.n_groups, -1), |
|
|
C.view(batch_size, seq_len, self.n_groups, -1), |
|
|
chunk_size=self.chunk_size, |
|
|
D=self.D, |
|
|
z=None, |
|
|
seq_idx=seq_idx, |
|
|
return_final_states=True, |
|
|
dt_bias=self.dt_bias, |
|
|
dt_softplus=True, |
|
|
**dt_limit_kwargs, |
|
|
) |
|
|
|
|
|
|
|
|
if ssm_state is not None and cache_params is not None: |
|
|
cache_params.ssm_states[self.layer_idx].copy_(ssm_state) |
|
|
|
|
|
scan_output = scan_output.view(batch_size, seq_len, -1) |
|
|
|
|
|
scan_output = self.norm(scan_output, gate) |
|
|
|
|
|
|
|
|
out = self.out_proj(scan_output) |
|
|
return out |
|
|
|
|
|
|
|
|
def torch_forward( |
|
|
self, |
|
|
input_states, |
|
|
cache_params: Optional[HybridMambaAttentionDynamicCache] = None, |
|
|
cache_position: Optional[torch.LongTensor] = None, |
|
|
attention_mask: Optional[torch.Tensor] = None, |
|
|
): |
|
|
batch_size, seq_len, _ = input_states.shape |
|
|
dtype = input_states.dtype |
|
|
|
|
|
|
|
|
input_states = apply_mask_to_padding_states(input_states, attention_mask) |
|
|
projected_states = self.in_proj(input_states) |
|
|
gate, hidden_states_B_C, dt = projected_states.split( |
|
|
[self.intermediate_size, self.conv_dim, self.num_heads], dim=-1 |
|
|
) |
|
|
|
|
|
use_precomputed_states = ( |
|
|
cache_params is not None |
|
|
and cache_params.has_previous_state |
|
|
and seq_len == 1 |
|
|
and cache_params.conv_states[self.layer_idx].shape[0] |
|
|
== cache_params.ssm_states[self.layer_idx].shape[0] |
|
|
== batch_size |
|
|
and cache_position is not None |
|
|
and cache_position[0] > 0 |
|
|
) |
|
|
|
|
|
|
|
|
if use_precomputed_states: |
|
|
cache_params.conv_states[self.layer_idx] = cache_params.conv_states[self.layer_idx].roll(shifts=-1, dims=-1) |
|
|
cache_params.conv_states[self.layer_idx][:, :, -1] = hidden_states_B_C[:, 0, :].to(cache_params.conv_states[self.layer_idx].device) |
|
|
|
|
|
|
|
|
conv_states = cache_params.conv_states[self.layer_idx].to(device=self.conv1d.weight.device) |
|
|
|
|
|
hidden_states_B_C = torch.sum( |
|
|
conv_states * self.conv1d.weight.squeeze(1), dim=-1 |
|
|
) |
|
|
if self.use_conv_bias: |
|
|
hidden_states_B_C = hidden_states_B_C + self.conv1d.bias |
|
|
hidden_states_B_C = self.act(hidden_states_B_C) |
|
|
else: |
|
|
|
|
|
if cache_params is not None: |
|
|
hidden_states_B_C_transposed = hidden_states_B_C.transpose(1, 2) |
|
|
conv_states = nn.functional.pad( |
|
|
hidden_states_B_C_transposed, (self.conv_kernel_size - hidden_states_B_C_transposed.shape[-1], 0) |
|
|
) |
|
|
cache_params.conv_states[self.layer_idx].copy_(conv_states) |
|
|
|
|
|
hidden_states_B_C = self.act(self.conv1d(hidden_states_B_C.transpose(1, 2))[..., :seq_len].transpose(1, 2)) |
|
|
|
|
|
hidden_states_B_C = apply_mask_to_padding_states(hidden_states_B_C, attention_mask) |
|
|
hidden_states, B, C = torch.split( |
|
|
hidden_states_B_C, |
|
|
[self.intermediate_size, self.n_groups * self.ssm_state_size, self.n_groups * self.ssm_state_size], |
|
|
dim=-1 |
|
|
) |
|
|
|
|
|
|
|
|
A = -torch.exp(self.A_log.float()) |
|
|
if use_precomputed_states: |
|
|
|
|
|
cache_device = cache_params.ssm_states[self.layer_idx].device |
|
|
|
|
|
|
|
|
|
|
|
dt = dt[:, 0, :][:, None, ...] |
|
|
dt = dt.transpose(1, 2).expand(batch_size, dt.shape[-1], self.head_dim) |
|
|
|
|
|
dt_bias = self.dt_bias[..., None].expand(self.dt_bias.shape[0], self.head_dim) |
|
|
|
|
|
dt = torch.nn.functional.softplus(dt + dt_bias.to(dt.dtype)) |
|
|
dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1]) |
|
|
A = A[..., None, None].expand(self.num_heads, self.head_dim, self.ssm_state_size).to(dtype=torch.float32) |
|
|
|
|
|
dA = (torch.exp(dt[..., None] * A)).to(device=cache_device) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
B = B.reshape(batch_size, self.n_groups, -1)[..., None, :] |
|
|
B = B.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, B.shape[-1]).contiguous() |
|
|
B = B.reshape(batch_size, -1, B.shape[-1]) |
|
|
|
|
|
dB = dt[..., None] * B[..., None, :] |
|
|
|
|
|
|
|
|
|
|
|
hidden_states = hidden_states.reshape(batch_size, -1, self.head_dim) |
|
|
dBx = (dB * hidden_states[..., None]).to(device=cache_device) |
|
|
|
|
|
|
|
|
cache_params.ssm_states[self.layer_idx].copy_( |
|
|
cache_params.ssm_states[self.layer_idx] * dA + dBx |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
C = C.reshape(batch_size, self.n_groups, -1)[..., None, :] |
|
|
C = C.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, C.shape[-1]).contiguous() |
|
|
C = C.reshape(batch_size, -1, C.shape[-1]) |
|
|
|
|
|
|
|
|
ssm_states = cache_params.ssm_states[self.layer_idx].to(device=C.device, dtype=C.dtype) |
|
|
|
|
|
ssm_states_reshaped = ssm_states.view(batch_size * self.num_heads, self.head_dim, self.ssm_state_size) |
|
|
C_reshaped = C.view(batch_size * self.num_heads, self.ssm_state_size, 1) |
|
|
y = torch.bmm(ssm_states_reshaped, C_reshaped) |
|
|
y = y.view(batch_size, self.num_heads, self.head_dim) |
|
|
|
|
|
|
|
|
|
|
|
D = self.D[..., None].expand(self.D.shape[0], self.head_dim) |
|
|
y = (y + hidden_states * D).to(y.dtype) |
|
|
|
|
|
|
|
|
y = y.reshape(batch_size, -1)[:, None, ...] |
|
|
else: |
|
|
|
|
|
dt = nn.functional.softplus(dt + self.dt_bias) |
|
|
dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1]) |
|
|
hidden_states = hidden_states.reshape(batch_size, seq_len, -1, self.head_dim).float() |
|
|
B = B.reshape(batch_size, seq_len, -1, self.ssm_state_size).float() |
|
|
C = C.reshape(batch_size, seq_len, -1, self.ssm_state_size).float() |
|
|
B = B.repeat_interleave(self.num_heads // self.n_groups, dim=2, output_size=self.num_heads) |
|
|
C = C.repeat_interleave(self.num_heads // self.n_groups, dim=2, output_size=self.num_heads) |
|
|
pad_size = (self.chunk_size - seq_len % self.chunk_size) % self.chunk_size |
|
|
|
|
|
D_residual = self.D[..., None] * pad_tensor_by_size(hidden_states, pad_size) |
|
|
|
|
|
|
|
|
hidden_states = hidden_states * dt[..., None] |
|
|
A = A.to(hidden_states.dtype) * dt |
|
|
|
|
|
|
|
|
hidden_states, A, B, C = [reshape_into_chunks(t, pad_size, self.chunk_size) for t in (hidden_states, A, B, C)] |
|
|
|
|
|
|
|
|
A = A.permute(0, 3, 1, 2) |
|
|
A_cumsum = torch.cumsum(A, dim=-1) |
|
|
|
|
|
|
|
|
|
|
|
L = torch.exp(segment_sum(A)) |
|
|
|
|
|
|
|
|
G_intermediate = C[:, :, :, None, :, :] * B[:, :, None, :, :, :] |
|
|
G = G_intermediate.sum(dim=-1) |
|
|
|
|
|
|
|
|
M_intermediate = G[..., None] * L.permute(0, 2, 3, 4, 1)[..., None] |
|
|
M = M_intermediate.sum(dim=-1) |
|
|
|
|
|
|
|
|
Y_diag = (M[..., None] * hidden_states[:, :, None]).sum(dim=3) |
|
|
|
|
|
|
|
|
|
|
|
decay_states = torch.exp(A_cumsum[:, :, :, -1:] - A_cumsum) |
|
|
B_decay = B * decay_states.permute(0, -2, -1, 1)[..., None] |
|
|
states = (B_decay[..., None, :] * hidden_states[..., None]).sum(dim=2) |
|
|
|
|
|
|
|
|
|
|
|
if use_precomputed_states: |
|
|
previous_states = cache_params.ssm_states[self.layer_idx][:, None, ...].to(device=states.device) |
|
|
else: |
|
|
previous_states = torch.zeros_like(states[:, :1]) |
|
|
states = torch.cat([previous_states, states], dim=1) |
|
|
decay_chunk = torch.exp(segment_sum(nn.functional.pad(A_cumsum[:, :, :, -1], (1, 0)))) |
|
|
decay_chunk = decay_chunk.transpose(1, 3) |
|
|
new_states = (decay_chunk[..., None, None] * states[:, :, None, ...]).sum(dim=1) |
|
|
states, ssm_state = new_states[:, :-1], new_states[:, -1] |
|
|
|
|
|
|
|
|
|
|
|
state_decay_out = torch.exp(A_cumsum) |
|
|
C_times_states = (C[..., None, :] * states[:, :, None, ...]) |
|
|
state_decay_out_permuted = state_decay_out.permute(0, 2, 3, 1) |
|
|
Y_off = (C_times_states.sum(-1) * state_decay_out_permuted[..., None]) |
|
|
|
|
|
|
|
|
y = Y_diag + Y_off |
|
|
|
|
|
y = y.reshape(batch_size, -1, self.num_heads, self.head_dim) |
|
|
|
|
|
y = y + D_residual |
|
|
|
|
|
if pad_size > 0: |
|
|
y = y[:, :seq_len, :, :] |
|
|
y = y.reshape(batch_size, seq_len, -1) |
|
|
|
|
|
|
|
|
if ssm_state is not None and cache_params is not None: |
|
|
cache_params.ssm_states[self.layer_idx].copy_(ssm_state) |
|
|
|
|
|
scan_output = self.norm(y, gate) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
contextualized_states = self.out_proj(scan_output.to(dtype)) |
|
|
return contextualized_states |
|
|
|
|
|
|
|
|
def forward( |
|
|
self, |
|
|
hidden_states, |
|
|
cache_params: Optional[HybridMambaAttentionDynamicCache] = None, |
|
|
cache_position: Optional[torch.LongTensor] = None, |
|
|
attention_mask: Optional[torch.Tensor] = None, |
|
|
seq_idx: Optional[torch.IntTensor] = None, |
|
|
**kwargs, |
|
|
): |
|
|
if is_fast_path_available and "cuda" in self.in_proj.weight.device.type: |
|
|
return self.cuda_kernels_forward(hidden_states, cache_params, cache_position, attention_mask, seq_idx) |
|
|
if seq_idx is not None: |
|
|
raise NotImplementedError( |
|
|
"`seq_idx` support requires fast path support. Please install `mamba_ssm` and `causal_conv1d`" |
|
|
) |
|
|
dtype = hidden_states.dtype |
|
|
if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1: |
|
|
|
|
|
hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype) |
|
|
|
|
|
return self.torch_forward(hidden_states, cache_params, cache_position, attention_mask) |
|
|
|
|
|
|
|
|
class BambaMLP(LlamaMLP): |
|
|
pass |
|
|
|
|
|
|
|
|
class BambaRMSNorm(LlamaRMSNorm): |
|
|
pass |
|
|
|
|
|
|
|
|
class BambaDecoderLayer(JambaAttentionDecoderLayer): |
|
|
def __init__(self, config: BambaConfig, layer_idx: int, layer_type: str = "mamba"): |
|
|
super().__init__(config, layer_idx) |
|
|
|
|
|
del self.self_attn |
|
|
|
|
|
num_experts = 1 |
|
|
ffn_layer_class = BambaMLP if num_experts == 1 else None |
|
|
self.feed_forward = ffn_layer_class(config) |
|
|
|
|
|
self.layer_type = layer_type |
|
|
if layer_type == "mamba": |
|
|
self.mamba = BambaMixer(config=config, layer_idx=layer_idx) |
|
|
elif layer_type == "attention": |
|
|
self.self_attn = BambaAttention(config, layer_idx) |
|
|
else: |
|
|
raise ValueError("Invalid layer_type") |
|
|
|
|
|
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") |
|
|
def forward( |
|
|
self, |
|
|
hidden_states: torch.Tensor, |
|
|
attention_mask: Optional[torch.Tensor] = None, |
|
|
position_ids: Optional[torch.LongTensor] = None, |
|
|
past_key_values: Optional[HybridMambaAttentionDynamicCache] = None, |
|
|
output_attentions: Optional[bool] = False, |
|
|
use_cache: Optional[bool] = False, |
|
|
cache_position: Optional[torch.LongTensor] = None, |
|
|
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, |
|
|
**kwargs: Unpack[BambaFlashAttentionKwargs], |
|
|
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: |
|
|
""" |
|
|
Args: |
|
|
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` |
|
|
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size |
|
|
`(batch, sequence_length)` where padding elements are indicated by 0. |
|
|
past_key_values (`HybridMambaAttentionDynamicCache`, *optional*): cached past key and value projection states |
|
|
output_attentions (`bool`, *optional*): |
|
|
Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
|
|
returned tensors for more detail. |
|
|
use_cache (`bool`, *optional*): |
|
|
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding |
|
|
(see `past_key_values`). |
|
|
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): |
|
|
Indices depicting the position of the input sequence tokens in the sequence. |
|
|
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): |
|
|
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, |
|
|
with `head_dim` being the embedding dimension of each attention head. |
|
|
kwargs (`dict`, *optional*): |
|
|
Arbitrary kwargs. Can be used to provide `BambaFlashAttentionKwargs` for |
|
|
padding-free training and/or improve torch.compile performance. |
|
|
""" |
|
|
|
|
|
residual = hidden_states |
|
|
|
|
|
hidden_states = self.input_layernorm(hidden_states) |
|
|
|
|
|
|
|
|
if self.layer_type == "mamba": |
|
|
hidden_states = self.mamba( |
|
|
hidden_states=hidden_states, |
|
|
cache_params=past_key_values, |
|
|
cache_position=cache_position, |
|
|
attention_mask=attention_mask, |
|
|
**kwargs, |
|
|
) |
|
|
self_attn_weights = None |
|
|
elif self.layer_type == "attention": |
|
|
hidden_states, self_attn_weights = self.self_attn( |
|
|
hidden_states=hidden_states, |
|
|
attention_mask=attention_mask, |
|
|
position_ids=position_ids, |
|
|
past_key_values=past_key_values, |
|
|
output_attentions=output_attentions, |
|
|
use_cache=use_cache, |
|
|
cache_position=cache_position, |
|
|
position_embeddings=position_embeddings, |
|
|
**kwargs, |
|
|
) |
|
|
|
|
|
|
|
|
hidden_states = residual + hidden_states |
|
|
|
|
|
|
|
|
residual = hidden_states |
|
|
hidden_states = self.pre_ff_layernorm(hidden_states) |
|
|
hidden_states = self.feed_forward(hidden_states) |
|
|
hidden_states = residual + hidden_states |
|
|
|
|
|
outputs = (hidden_states,) |
|
|
|
|
|
if output_attentions: |
|
|
outputs += (self_attn_weights,) |
|
|
|
|
|
return outputs |
|
|
|
|
|
|
|
|
@auto_docstring |
|
|
class BambaPreTrainedModel(PreTrainedModel): |
|
|
config: BambaConfig |
|
|
base_model_prefix = "model" |
|
|
supports_gradient_checkpointing = True |
|
|
_no_split_modules = ["BambaDecoderLayer"] |
|
|
_skip_keys_device_placement = "past_key_values" |
|
|
_supports_flash_attn = True |
|
|
_supports_sdpa = True |
|
|
|
|
|
_is_stateful = True |
|
|
|
|
|
def _init_weights(self, module): |
|
|
super()._init_weights(module) |
|
|
if isinstance(module, BambaMixer): |
|
|
module.dt_bias.data.fill_(1.0) |
|
|
module.A_log.data = torch.log(torch.arange(1, module.num_heads + 1)) |
|
|
module.D.data.fill_(1.0) |
|
|
|
|
|
|
|
|
@auto_docstring |
|
|
class BambaModel(BambaPreTrainedModel): |
|
|
def __init__(self, config: BambaConfig): |
|
|
super().__init__(config) |
|
|
self.padding_idx = config.pad_token_id |
|
|
self.vocab_size = config.vocab_size |
|
|
|
|
|
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) |
|
|
decoder_layers = [] |
|
|
for i in range(config.num_hidden_layers): |
|
|
decoder_layers.append(BambaDecoderLayer(config, layer_idx=i, layer_type=config.layers_block_type[i])) |
|
|
self.layers = nn.ModuleList(decoder_layers) |
|
|
|
|
|
self._attn_implementation = config._attn_implementation |
|
|
self.final_layernorm = BambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) |
|
|
self.rotary_emb = BambaRotaryEmbedding(config=config) |
|
|
|
|
|
self.gradient_checkpointing = False |
|
|
|
|
|
self.post_init() |
|
|
|
|
|
@can_return_tuple |
|
|
@auto_docstring |
|
|
def forward( |
|
|
self, |
|
|
input_ids: Optional[torch.LongTensor] = None, |
|
|
attention_mask: Optional[torch.Tensor] = None, |
|
|
position_ids: Optional[torch.LongTensor] = None, |
|
|
past_key_values: Optional[HybridMambaAttentionDynamicCache] = None, |
|
|
inputs_embeds: Optional[torch.FloatTensor] = None, |
|
|
use_cache: Optional[bool] = None, |
|
|
output_attentions: Optional[bool] = None, |
|
|
output_hidden_states: Optional[bool] = None, |
|
|
cache_position: Optional[torch.LongTensor] = None, |
|
|
**kwargs: Unpack[BambaFlashAttentionKwargs], |
|
|
) -> BaseModelOutputWithPast: |
|
|
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
|
|
output_hidden_states = ( |
|
|
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
|
|
) |
|
|
use_cache = use_cache if use_cache is not None else self.config.use_cache |
|
|
|
|
|
if (input_ids is None) ^ (inputs_embeds is not None): |
|
|
raise ValueError("You must specify exactly one of input_ids or inputs_embeds") |
|
|
|
|
|
if self.gradient_checkpointing and self.training and use_cache: |
|
|
logger.warning_once( |
|
|
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." |
|
|
) |
|
|
use_cache = False |
|
|
|
|
|
if inputs_embeds is None: |
|
|
inputs_embeds = self.embed_tokens(input_ids) |
|
|
hidden_states = inputs_embeds |
|
|
|
|
|
if use_cache and past_key_values is None: |
|
|
logger.warning_once( |
|
|
"Bamba requires an initialized `HybridMambaAttentionDynamicCache` to return a cache. None was " |
|
|
"provided, so no cache will be returned." |
|
|
) |
|
|
|
|
|
if cache_position is None: |
|
|
cache_position = torch.arange(hidden_states.shape[1], device=hidden_states.device) |
|
|
if position_ids is None: |
|
|
position_ids = cache_position.unsqueeze(0) |
|
|
|
|
|
causal_mask = self._update_causal_mask( |
|
|
attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions |
|
|
) |
|
|
mamba_mask = self._update_mamba_mask(attention_mask, cache_position) |
|
|
|
|
|
|
|
|
position_embeddings = self.rotary_emb(hidden_states, position_ids) |
|
|
|
|
|
all_hidden_states = () if output_hidden_states else None |
|
|
all_self_attns = () if output_attentions else None |
|
|
|
|
|
for decoder_layer in self.layers: |
|
|
|
|
|
layer_mask = mamba_mask if decoder_layer.layer_type == "mamba" else causal_mask |
|
|
|
|
|
if output_hidden_states: |
|
|
all_hidden_states += (hidden_states,) |
|
|
|
|
|
layer_outputs = decoder_layer( |
|
|
hidden_states, |
|
|
attention_mask=layer_mask, |
|
|
position_ids=position_ids, |
|
|
past_key_values=past_key_values, |
|
|
output_attentions=output_attentions, |
|
|
use_cache=use_cache, |
|
|
cache_position=cache_position, |
|
|
position_embeddings=position_embeddings, |
|
|
**kwargs, |
|
|
) |
|
|
|
|
|
hidden_states = layer_outputs[0] |
|
|
|
|
|
if output_attentions: |
|
|
if layer_outputs[1] is not None: |
|
|
|
|
|
all_self_attns += (layer_outputs[1],) |
|
|
|
|
|
hidden_states = self.final_layernorm(hidden_states) |
|
|
|
|
|
|
|
|
if output_hidden_states: |
|
|
all_hidden_states += (hidden_states,) |
|
|
|
|
|
if past_key_values and not past_key_values.has_previous_state: |
|
|
past_key_values.has_previous_state = True |
|
|
|
|
|
next_cache = None if not use_cache else past_key_values |
|
|
|
|
|
return BaseModelOutputWithPast( |
|
|
last_hidden_state=hidden_states, |
|
|
past_key_values=next_cache, |
|
|
hidden_states=all_hidden_states, |
|
|
attentions=all_self_attns, |
|
|
) |
|
|
|
|
|
def _update_causal_mask( |
|
|
self, |
|
|
attention_mask: torch.Tensor, |
|
|
input_tensor: torch.Tensor, |
|
|
cache_position: torch.Tensor, |
|
|
past_key_values: HybridMambaAttentionDynamicCache, |
|
|
output_attentions: bool, |
|
|
): |
|
|
if self.config._attn_implementation == "flash_attention_2": |
|
|
if attention_mask is not None and 0.0 in attention_mask: |
|
|
return attention_mask |
|
|
return None |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 |
|
|
|
|
|
|
|
|
if self.config._attn_implementation == "sdpa" and not output_attentions: |
|
|
if AttentionMaskConverter._ignore_causal_mask_sdpa( |
|
|
attention_mask, |
|
|
inputs_embeds=input_tensor, |
|
|
past_key_values_length=past_seen_tokens, |
|
|
is_training=self.training, |
|
|
): |
|
|
return None |
|
|
|
|
|
dtype = input_tensor.dtype |
|
|
sequence_length = input_tensor.shape[1] |
|
|
target_length = ( |
|
|
attention_mask.shape[-1] |
|
|
if isinstance(attention_mask, torch.Tensor) |
|
|
else past_seen_tokens + sequence_length + 1 |
|
|
) |
|
|
|
|
|
|
|
|
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( |
|
|
attention_mask, |
|
|
sequence_length=sequence_length, |
|
|
target_length=target_length, |
|
|
dtype=dtype, |
|
|
cache_position=cache_position, |
|
|
batch_size=input_tensor.shape[0], |
|
|
) |
|
|
|
|
|
if ( |
|
|
self.config._attn_implementation == "sdpa" |
|
|
and attention_mask is not None |
|
|
and attention_mask.device.type in ["cuda", "xpu", "npu"] |
|
|
and not output_attentions |
|
|
): |
|
|
|
|
|
|
|
|
|
|
|
min_dtype = torch.finfo(dtype).min |
|
|
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) |
|
|
|
|
|
return causal_mask |
|
|
|
|
|
@staticmethod |
|
|
def _prepare_4d_causal_attention_mask_with_cache_position( |
|
|
attention_mask: torch.Tensor, |
|
|
sequence_length: int, |
|
|
target_length: int, |
|
|
dtype: torch.dtype, |
|
|
cache_position: torch.Tensor, |
|
|
batch_size: int, |
|
|
**kwargs, |
|
|
): |
|
|
""" |
|
|
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape |
|
|
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. |
|
|
|
|
|
Args: |
|
|
attention_mask (`torch.Tensor`): |
|
|
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape |
|
|
`(batch_size, 1, query_length, key_value_length)`. |
|
|
sequence_length (`int`): |
|
|
The sequence length being processed. |
|
|
target_length (`int`): |
|
|
The target length: when generating with static cache, the mask should be as long as the static cache, |
|
|
to account for the 0 padding, the part of the cache that is not filled yet. |
|
|
dtype (`torch.dtype`): |
|
|
The dtype to use for the 4D attention mask. |
|
|
cache_position (`torch.Tensor`): |
|
|
Indices depicting the position of the input sequence tokens in the sequence. |
|
|
batch_size (`torch.Tensor`): |
|
|
Batch size. |
|
|
""" |
|
|
if attention_mask is not None and attention_mask.dim() == 4: |
|
|
|
|
|
causal_mask = attention_mask |
|
|
else: |
|
|
min_dtype = torch.finfo(dtype).min |
|
|
causal_mask = torch.full( |
|
|
(sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device |
|
|
) |
|
|
if sequence_length != 1: |
|
|
causal_mask = torch.triu(causal_mask, diagonal=1) |
|
|
causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) |
|
|
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) |
|
|
if attention_mask is not None: |
|
|
causal_mask = causal_mask.clone() |
|
|
mask_length = attention_mask.shape[-1] |
|
|
padding_attention_mask = (attention_mask[:, None, None, :] == attention_mask[:, None, :, None])[ |
|
|
:, :, -sequence_length:, : |
|
|
].to(dtype) |
|
|
padding_mask = causal_mask[:, :, :, :mask_length] + padding_attention_mask |
|
|
padding_mask = padding_mask == 0 |
|
|
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( |
|
|
padding_mask, min_dtype |
|
|
) |
|
|
|
|
|
return causal_mask |
|
|
|
|
|
def _update_mamba_mask(self, attention_mask, cache_position): |
|
|
""" |
|
|
No need for zeroing states when |
|
|
1. Cached forward |
|
|
2. Attending to all inputs |
|
|
""" |
|
|
mamba_mask = attention_mask |
|
|
if cache_position[0] > 0 or (attention_mask is not None and torch.all(attention_mask == 1)): |
|
|
mamba_mask = None |
|
|
return mamba_mask |
|
|
|
|
|
|
|
|
class BambaForCausalLM(LlamaForCausalLM): |
|
|
def __init__(self, config): |
|
|
super().__init__(config) |
|
|
self.z_loss_coefficient = config.z_loss_coefficient |
|
|
|
|
|
|
|
|
self.post_init() |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
input_ids: Optional[torch.LongTensor] = None, |
|
|
attention_mask: Optional[torch.Tensor] = None, |
|
|
position_ids: Optional[torch.LongTensor] = None, |
|
|
past_key_values: Optional[HybridMambaAttentionDynamicCache] = None, |
|
|
inputs_embeds: Optional[torch.FloatTensor] = None, |
|
|
labels: Optional[torch.LongTensor] = None, |
|
|
use_cache: Optional[bool] = None, |
|
|
output_attentions: Optional[bool] = None, |
|
|
output_hidden_states: Optional[bool] = None, |
|
|
cache_position: Optional[torch.LongTensor] = None, |
|
|
logits_to_keep: Union[int, torch.Tensor] = 0, |
|
|
**kwargs, |
|
|
) -> CausalLMOutputWithPast: |
|
|
r""" |
|
|
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
|
|
Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., |
|
|
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored |
|
|
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. |
|
|
|
|
|
Example: |
|
|
|
|
|
```python |
|
|
>>> from transformers import AutoTokenizer, BambaForCausalLM |
|
|
|
|
|
>>> model = BambaForCausalLM.from_pretrained("...") |
|
|
>>> tokenizer = AutoTokenizer.from_pretrained("...") |
|
|
|
|
|
>>> prompt = "Hey, are you conscious? Can you talk to me?" |
|
|
>>> inputs = tokenizer(prompt, return_tensors="pt") |
|
|
|
|
|
>>> # Generate |
|
|
>>> generate_ids = model.generate(inputs.input_ids, max_length=30) |
|
|
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] |
|
|
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." |
|
|
```""" |
|
|
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
|
|
output_hidden_states = ( |
|
|
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
|
|
) |
|
|
|
|
|
|
|
|
outputs: BaseModelOutputWithPast = self.model( |
|
|
input_ids=input_ids, |
|
|
attention_mask=attention_mask, |
|
|
position_ids=position_ids, |
|
|
past_key_values=past_key_values, |
|
|
inputs_embeds=inputs_embeds, |
|
|
use_cache=use_cache, |
|
|
output_attentions=output_attentions, |
|
|
output_hidden_states=output_hidden_states, |
|
|
cache_position=cache_position, |
|
|
**kwargs, |
|
|
) |
|
|
|
|
|
hidden_states = outputs.last_hidden_state |
|
|
|
|
|
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep |
|
|
logits = self.lm_head(hidden_states[:, slice_indices, :]) |
|
|
|
|
|
loss = None |
|
|
if labels is not None: |
|
|
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs) |
|
|
if self.z_loss_coefficient > 0: |
|
|
|
|
|
z_loss = logits.logsumexp(dim=-1).to(dtype=loss.dtype).pow(2).mean() |
|
|
loss = loss + self.z_loss_coefficient * z_loss |
|
|
|
|
|
return CausalLMOutputWithPast( |
|
|
loss=loss, |
|
|
logits=logits, |
|
|
past_key_values=outputs.past_key_values, |
|
|
hidden_states=outputs.hidden_states, |
|
|
attentions=outputs.attentions, |
|
|
) |
|
|
|
|
|
def prepare_inputs_for_generation( |
|
|
self, |
|
|
input_ids, |
|
|
past_key_values=None, |
|
|
attention_mask=None, |
|
|
inputs_embeds=None, |
|
|
cache_position=None, |
|
|
position_ids=None, |
|
|
use_cache=True, |
|
|
**kwargs, |
|
|
): |
|
|
|
|
|
|
|
|
empty_past_kv = past_key_values is None |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if not empty_past_kv: |
|
|
if ( |
|
|
inputs_embeds is not None |
|
|
or cache_position[-1] >= input_ids.shape[1] |
|
|
): |
|
|
input_ids = input_ids[:, -cache_position.shape[0] :] |
|
|
elif input_ids.shape[1] != cache_position.shape[0]: |
|
|
input_ids = input_ids[:, cache_position] |
|
|
else: |
|
|
past_key_values = HybridMambaAttentionDynamicCache( |
|
|
self.config, input_ids.shape[0], self.dtype, device=self.device |
|
|
) |
|
|
|
|
|
if attention_mask is not None and position_ids is None: |
|
|
|
|
|
position_ids = attention_mask.long().cumsum(-1) - 1 |
|
|
position_ids.masked_fill_(attention_mask == 0, 1) |
|
|
if not empty_past_kv: |
|
|
position_ids = position_ids[:, -input_ids.shape[1] :] |
|
|
|
|
|
|
|
|
if inputs_embeds is not None and empty_past_kv: |
|
|
model_inputs = {"inputs_embeds": inputs_embeds} |
|
|
else: |
|
|
model_inputs = {"input_ids": input_ids.contiguous()} |
|
|
|
|
|
model_inputs.update( |
|
|
{ |
|
|
"position_ids": position_ids, |
|
|
"past_key_values": past_key_values, |
|
|
"use_cache": use_cache, |
|
|
"attention_mask": attention_mask, |
|
|
"logits_to_keep": self.config.num_logits_to_keep, |
|
|
"cache_position": cache_position, |
|
|
} |
|
|
) |
|
|
|
|
|
|
|
|
for key, value in kwargs.items(): |
|
|
if key not in model_inputs: |
|
|
model_inputs[key] = value |
|
|
|
|
|
return model_inputs |
|
|
|
|
|
|
|
|
__all__ = ["BambaModel", "BambaForCausalLM", "BambaPreTrainedModel"] |
|
|
|