# coding=utf-8 # Copyright 2024 HuggingFace Inc. team. # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch SuperlinearExp model.""" import math from contextlib import nullcontext from dataclasses import dataclass from typing import Any, Dict, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss import torch.nn.functional as F from transformers.activations import ACT2FN from transformers.cache_utils import DynamicCache # we need __iter__ and __len__ of pkv from transformers.generation import GenerationMixin from transformers.modeling_utils import PreTrainedModel from transformers.utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, ) from transformers.utils.import_utils import ( is_causal_conv1d_available, is_mamba_2_ssm_available, ) try: from transformers.modeling_flash_attention_utils import _flash_attention_forward from transformers.utils import is_flash_attn_greater_or_equal_2_10 except ImportError: _flash_attention_forward = None def is_flash_attn_greater_or_equal_2_10(): return False from .configuration_superlinear_exp import SuperlinearExpConfig try: from .moe import fused_experts_moe, shared_fused_moe_is_available except Exception: # pragma: no cover fused_experts_moe = None def shared_fused_moe_is_available() -> bool: # type: ignore[no-redef] return False try: from superlinear.kernels.superlinear.attention import ( build_sw_blockmask, fused_prefill_with_swflex, fused_prefill_with_swflex_gqa, full_span_attention_fused_with_search_values, full_span_attention_fused_with_search_values_gqa, ) from superlinear.kernels.superlinear.span import ( decode_span_attention_staged, decode_span_attention_staged_gqa as decode_span_attention_staged_gqa_kernel_v2, ) from superlinear.kernels.common.power import window_len_from_sw_index except Exception as exc: # pragma: no cover raise ImportError( "SuperlinearExp span-attention requires the `superlinear` package.\n" "Install from the repo root with `pip install -e .` (or `pip install superlinear` if published)." ) from exc try: from superlinear.kernels.superlinear.attention import ( fused_prefill_with_swtriton, fused_prefill_with_swtriton_gqa, fused_prefill_with_swtriton_bucketed_gqa, ) except Exception: # pragma: no cover fused_prefill_with_swtriton = None fused_prefill_with_swtriton_gqa = None fused_prefill_with_swtriton_bucketed_gqa = None try: from torch.nn.attention.bias import causal_lower_right except Exception: # pragma: no cover causal_lower_right = None logger = logging.get_logger(__name__) # Copied from transformers.models.mamba.modeling_mamba2.modeling_mamba2.py with MAMBA2->SUPERLINEAREXP,Mamba2->SuperlinearExp # For Mamba2 components Mamba2->SuperlinearExpMamba2 if is_mamba_2_ssm_available(): from mamba_ssm.ops.triton.selective_state_update import selective_state_update from mamba_ssm.ops.triton.ssd_combined import mamba_chunk_scan_combined, mamba_split_conv1d_scan_combined else: mamba_chunk_scan_combined, mamba_split_conv1d_scan_combined, selective_state_update = None, None, None try: #from mamba_ssm.ops.triton.layernorm_gated import RMSNorm as RMSNormGated from mamba_ssm.ops.triton.layernorm_gated import rmsnorm_fn except ImportError: raise ImportError("mamba-ssm is required by the Mamba model but cannot be imported") if is_causal_conv1d_available(): from causal_conv1d import causal_conv1d_fn, causal_conv1d_update else: causal_conv1d_update, causal_conv1d_fn = None, None is_fast_path_available = all( ( selective_state_update, mamba_chunk_scan_combined, mamba_split_conv1d_scan_combined, causal_conv1d_fn, causal_conv1d_update, ) ) _CHECKPOINT_FOR_DOC = "concavity.ai/SuperlinearExpV0.1-30B" _CONFIG_FOR_DOC = "SuperlinearExpConfig" # Helper methods for segment sum computation def pad_tensor_by_size(input_tensor: torch.Tensor, pad_size: int): """ Padding x tensor with `pad_size` on the seq_len dim (dim=1) Assumes that we only have tensors of either size 4 or 3 """ pad_shape = (0, 0, 0, 0, 0, pad_size, 0, 0) if len(input_tensor.shape) == 4 else (0, 0, 0, pad_size, 0, 0) return torch.nn.functional.pad(input_tensor, pad_shape, mode="constant", value=0) def reshape_into_chunks(input_tensor, pad_size, chunk_size): """ Padding input_tensor with `pad_size` on the seq_len dim (dim=1) and simultaneously splitting it into chunk sequences. Assumes that we only have tensors of either size 4 or 3 """ # [bsz, seq_len, ...] -> [bsz, seq_len multiple of chunk_size, ...] input_tensor = pad_tensor_by_size(input_tensor, pad_size) if len(input_tensor.shape) == 3: # [bsz, seq_len multiple of chunk_size, num_heads] -> [bsz, -1, chunk_size, num_heads] return input_tensor.reshape(input_tensor.shape[0], -1, chunk_size, input_tensor.shape[2]) else: # [bsz, seq_len multiple of chunk_size, num_heads, head_dim or state_size] -> [bsz, -1, chunk_size, num_heads, head_dim or state_size] return input_tensor.reshape( input_tensor.shape[0], -1, chunk_size, input_tensor.shape[2], input_tensor.shape[3] ) def segment_sum(input_tensor): """ More stable segment sum calculation. Uses cumulative sums and masking instead of direct subtractions. """ chunk_size = input_tensor.size(-1) # 1. expand input tensor to have an additional dimension and repeat along that dimension # [..., chunk_size] -> [..., chunk_size, chunk_size] input_tensor = input_tensor[..., None].expand(*input_tensor.size(), chunk_size) # 2. create a lower triangular mask with the diagonal set to 0 to 0 out elements above diag mask = torch.tril(torch.ones(chunk_size, chunk_size, device=input_tensor.device, dtype=torch.bool), diagonal=-1) input_tensor = input_tensor.masked_fill(~mask, 0) # 3. compute actual cumsum tensor_segsum = torch.cumsum(input_tensor, dim=-2) # 4. apply mask to keep only the lower triangular part of the cumulative sum result (incl diagonal this time) mask = torch.tril(torch.ones(chunk_size, chunk_size, device=input_tensor.device, dtype=torch.bool), diagonal=0) tensor_segsum = tensor_segsum.masked_fill(~mask, -torch.inf) return tensor_segsum def apply_mask_to_padding_states(hidden_states, attention_mask): """ Tunes out the hidden states for padding tokens, see https://github.com/state-spaces/mamba/issues/66 """ if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1: dtype = hidden_states.dtype hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype) return hidden_states # Copied from https://github.com/huggingface/transformers/blob/main/src/transformers/models/jamba/modeling_jamba.py class HybridMambaAttentionDynamicCache(DynamicCache): """ A dynamic cache that can handle both the attention cache (which has a seq_len dimension) and the mamba cache (which has a constant shape regardless of seq_len). This cache has two sets of lists of tensors: `key_cache` and `value_cache` for attention cache and `conv_states` and `ssm_states` for mamba cache. Each of these lists has `num_layers` tensors. The expected shape for each tensor For attention layers, `key_cache` and `value_cache` have a shape of `(batch_size, num_heads, seq_len, head_dim)`, while `conv_states` and `ssm_states` have a shape of `(batch_size, 0)` (empty tensors). For mamba layers, `key_cache` and `value_cache` have a shape of `(batch_size, 0)` (empty tensors), while `conv_states` represents the convolution state and has a shape of `(batch_size, d_inner, d_conv)`, and `ssm_states` represents the ssm state and has a shape of `(batch_size, d_inner, d_state)`. """ def __init__(self, config, batch_size, dtype=torch.float16, device=None): super().__init__() self.dtype = dtype self.hybrid_override_pattern = config.hybrid_override_pattern self.has_previous_state = False # only used by mamba intermediate_size = config.mamba_num_heads * config.mamba_head_dim ssm_state_size = config.ssm_state_size conv_kernel_size = config.conv_kernel self.conv_kernel_size = conv_kernel_size # Allocate fixed-shape state buffers to support CUDA graph capture on Mamba decode. conv_dim = intermediate_size + 2 * config.n_groups * ssm_state_size # conv_states shape: (num_layers, batch_size, conv_dim, conv_kernel_size) self.conv_states = torch.zeros( config.num_hidden_layers, batch_size, conv_dim, conv_kernel_size, device=device, dtype=dtype, ) # ssm_states shape: (num_layers, batch_size, num_heads, head_dim, ssm_state_size) # # IMPORTANT: Keep SSM states in fp32 to avoid cumulative quantization drift during # long decode. This makes cached decode numerically consistent with full recompute. self.ssm_states = torch.zeros( config.num_hidden_layers, batch_size, config.mamba_num_heads, config.mamba_head_dim, ssm_state_size, device=device, dtype=torch.float32, ) self.transformer_layers = [] # Only actual attention layers ('*') for i in range(config.num_hidden_layers): if self.hybrid_override_pattern[i] == "*": self.transformer_layers.append(i) self.key_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)] self.value_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)] def update( self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[Dict[str, Any]] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: # Update the cache if self.key_cache[layer_idx].shape[-1] == 0: self.key_cache[layer_idx] = key_states self.value_cache[layer_idx] = value_states else: self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=2) self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=2) return self.key_cache[layer_idx], self.value_cache[layer_idx] def reorder_cache(self, beam_idx: torch.LongTensor): """Reorders the cache for beam search, given the selected beam indices.""" for layer_idx in range(len(self.key_cache)): device = self.key_cache[layer_idx].device self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device)) device = self.value_cache[layer_idx].device self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device)) beam_idx_conv = beam_idx.to(self.conv_states.device) self.conv_states.copy_(self.conv_states.index_select(1, beam_idx_conv)) beam_idx_ssm = beam_idx.to(self.ssm_states.device) self.ssm_states.copy_(self.ssm_states.index_select(1, beam_idx_ssm)) def get_seq_length(self, layer_idx: Optional[int] = 0) -> int: """Returns the sequence length of the cached states. A layer index can be optionally passed.""" # take any layer that contains cache and not empty tensor layer_idx = self.transformer_layers[0] if layer_idx not in self.transformer_layers else layer_idx if len(self.key_cache) <= layer_idx: return 0 cache_tensor = self.key_cache[layer_idx] # Empty cache is initialized as 2D tensor with shape (batch, 0) # Actual cache is 4D: (batch_size, num_heads, seq_len, head_dim) if cache_tensor.dim() < 4: return 0 return cache_tensor.shape[-2] def to_legacy_cache(self) -> Tuple[Tuple[torch.Tensor], Tuple[torch.Tensor]]: raise NotImplementedError("HybridMambaAttentionDynamicCache does not have a legacy cache equivalent.") @classmethod def from_legacy_cache(cls, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None) -> "DynamicCache": raise NotImplementedError("HybridMambaAttentionDynamicCache does not have a legacy cache equivalent.") # Copied from modeling_mamba2.py def update_conv_state( self, layer_idx: int, new_conv_state: torch.Tensor, cache_init: bool = False ) -> torch.Tensor: conv_layer = self.conv_states.select(0, layer_idx) if cache_init: conv_layer.copy_(new_conv_state.to(device=self.conv_states.device, dtype=conv_layer.dtype).contiguous()) else: conv_kernel_size = conv_layer.shape[-1] # Shift left by 1. Avoid torch.roll (not supported in CUDA graph capture). conv_layer[:, :, : conv_kernel_size - 1].copy_(conv_layer[:, :, 1:conv_kernel_size].clone()) if new_conv_state.dim() == 3 and new_conv_state.shape[1] == 1: x = new_conv_state[:, 0, :] elif new_conv_state.dim() == 2: x = new_conv_state else: raise ValueError( f"new_conv_state must be (B, CONV_DIM) or (B, 1, CONV_DIM), got shape {tuple(new_conv_state.shape)}" ) conv_layer[:, :, -1].copy_(x.to(device=self.conv_states.device, dtype=conv_layer.dtype)) return conv_layer def update_ssm_state(self, layer_idx: int, new_ssm_state: torch.Tensor): ssm_layer = self.ssm_states.select(0, layer_idx) ssm_layer.copy_(new_ssm_state.to(device=self.ssm_states.device, dtype=ssm_layer.dtype).contiguous()) return ssm_layer def reset(self): self.conv_states.zero_() self.ssm_states.zero_() class HybridMambaAttentionStaticCache(HybridMambaAttentionDynamicCache): """ Static cache variant to avoid O(L) concat overhead during long-context decode. `cache_mode='auto'` returns slices during prefill (`seq_len > 1`) and full buffers during decode (`seq_len == 1`). This keeps the returned K/V shapes stable during decoding, which is required for CUDA graph capture. Notes: - During prefill: increments `_layer_seen_tokens` normally. - During decode: does NOT increment `_layer_seen_tokens` (allows repeated warmup/capture/replay at a fixed position). """ def __init__( self, config: SuperlinearExpConfig, batch_size: int, max_seq_len: int, dtype: torch.dtype = torch.float16, device: Optional[Union[str, torch.device]] = None, cache_mode: str = "auto", ): super().__init__(config, batch_size, dtype=dtype, device=device) self.max_seq_len = int(max_seq_len) self._layer_seen_tokens = [0] * config.num_hidden_layers # Decode-time return-mode controls: # - For CUDA graph replay we must return full fixed-size buffers (stable shapes). # - For eager decode (no CUDA graph), returning only the valid prefix can be # numerically closer to DynamicCache (and avoids some SDPA/stripe heuristics # keying off the allocation length). # # These are set externally by the model's forward() based on the execution mode. self._decode_return_slices: bool = False self._decode_slice_len: Optional[int] = None if cache_mode not in ("prefill", "decode", "auto"): raise ValueError(f"cache_mode must be 'prefill', 'decode', or 'auto', got {cache_mode!r}") self.cache_mode = cache_mode for layer_idx in range(config.num_hidden_layers): if self.hybrid_override_pattern[layer_idx] == "*": head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads num_kv_heads = config.num_key_value_heads cache_shape = (batch_size, num_kv_heads, self.max_seq_len, head_dim) self.key_cache[layer_idx] = torch.zeros(cache_shape, dtype=dtype, device=device) self.value_cache[layer_idx] = torch.zeros(cache_shape, dtype=dtype, device=device) def update( self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[Dict[str, Any]] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: seq_len = key_states.shape[2] is_decode = seq_len == 1 # For CUDA graph compatibility: use cache_position tensor if provided, otherwise fallback to counter cache_position = cache_kwargs.get("cache_position") if cache_kwargs else None if cache_position is not None and is_decode: # During decode, use index_copy_ for CUDA graph compatibility. # key_states/value_states are (B, H, 1, D), we write to position cache_position[0] batch_size = key_states.shape[0] num_heads = key_states.shape[1] head_dim = key_states.shape[3] max_seq = self.key_cache[layer_idx].shape[2] # Flatten (B, H, S, D) -> (B*H, S, D) for index_copy_ along dim=1 key_cache_flat = self.key_cache[layer_idx].view(batch_size * num_heads, max_seq, head_dim) value_cache_flat = self.value_cache[layer_idx].view(batch_size * num_heads, max_seq, head_dim) key_states_flat = key_states.view(batch_size * num_heads, 1, head_dim) value_states_flat = value_states.view(batch_size * num_heads, 1, head_dim) # index_copy_(dim, index, source) - index must be 1D key_cache_flat.index_copy_(1, cache_position, key_states_flat) value_cache_flat.index_copy_(1, cache_position, value_states_flat) else: # Prefill path: use counter-based indexing start_pos = self._layer_seen_tokens[layer_idx] if start_pos + seq_len > self.max_seq_len: raise ValueError(f"StaticCache overflow: {start_pos + seq_len} > {self.max_seq_len}") self.key_cache[layer_idx][:, :, start_pos : start_pos + seq_len, :] = key_states self.value_cache[layer_idx][:, :, start_pos : start_pos + seq_len, :] = value_states # Only advance during prefill; keep position fixed for decode/capture/replay. if not is_decode: self._layer_seen_tokens[layer_idx] += seq_len # Return slices for prefill, full buffer for decode (auto mode). return_slice = (seq_len > 1) if self.cache_mode == "auto" else (self.cache_mode == "prefill") if is_decode and self._decode_return_slices: # Eager decode mode: return only the currently-valid prefix so downstream # attention logic sees an effective kv_len instead of the allocation length. end_pos = self._decode_slice_len if end_pos is None: return self.key_cache[layer_idx], self.value_cache[layer_idx] end_pos = min(int(end_pos), self.max_seq_len) return ( self.key_cache[layer_idx][:, :, :end_pos, :], self.value_cache[layer_idx][:, :, :end_pos, :], ) if return_slice: end_pos = self._layer_seen_tokens[layer_idx] return ( self.key_cache[layer_idx][:, :, :end_pos, :], self.value_cache[layer_idx][:, :, :end_pos, :], ) return self.key_cache[layer_idx], self.value_cache[layer_idx] def get_seq_length(self, layer_idx: Optional[int] = 0) -> int: if layer_idx is None or layer_idx not in self.transformer_layers: if not self.transformer_layers: return 0 layer_idx = self.transformer_layers[0] return self._layer_seen_tokens[layer_idx] class MambaRMSNormGated(torch.nn.Module): def __init__(self, hidden_size, group_size, eps=1e-5): super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps self.group_size = group_size # jan28b version def forward(self, hidden_states, gate=None): return rmsnorm_fn(x=hidden_states, weight=self.weight, bias=None, # No bias z=gate, eps=self.variance_epsilon, group_size=self.group_size, norm_before_gate=False ) class SuperlinearExpMamba2Mixer(nn.Module): """ Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`. A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective) ∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4, and is why Mamba is called **selective** state spaces) """ def __init__(self, config: SuperlinearExpConfig, layer_idx: int): super().__init__() self.config = config self.num_heads = config.mamba_num_heads self.hidden_size = config.hidden_size self.ssm_state_size = config.ssm_state_size self.conv_kernel_size = config.conv_kernel self.intermediate_size = config.mamba_num_heads * config.mamba_head_dim self.layer_idx = layer_idx self.use_conv_bias = config.use_conv_bias self.activation = config.mamba_hidden_act self.act = ACT2FN[config.mamba_hidden_act] self.layer_norm_epsilon = config.layer_norm_epsilon self.n_groups = config.n_groups self.head_dim = config.mamba_head_dim self.chunk_size = config.chunk_size self.time_step_limit = config.time_step_limit self.time_step_min = config.time_step_min self.time_step_max = config.time_step_max self.conv_dim = self.intermediate_size + 2 * self.n_groups * self.ssm_state_size self.conv1d = nn.Conv1d( in_channels=self.conv_dim, out_channels=self.conv_dim, bias=config.use_conv_bias, kernel_size=config.conv_kernel, groups=self.conv_dim, padding=config.conv_kernel - 1, ) # projection of the input hidden states projection_size = self.intermediate_size + self.conv_dim + self.num_heads self.in_proj = nn.Linear( self.hidden_size, projection_size, bias=config.use_bias, ) # selective projection used to make dt, B and C input dependant # time step projection (discretization) # instantiate once and copy inv_dt in init_weights of PretrainedModel self.dt_bias = nn.Parameter(torch.ones(self.num_heads)) # S4D real initialization. These are not discretized! # The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded A = torch.arange(1, self.num_heads + 1) self.A_log = nn.Parameter(torch.log(A)) self.A_log._no_weight_decay = True self.norm = MambaRMSNormGated(self.intermediate_size, eps=self.layer_norm_epsilon, group_size=self.intermediate_size // self.n_groups) self.D = nn.Parameter(torch.ones(self.num_heads)) self.D._no_weight_decay = True self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.use_bias) self.use_bias = config.use_bias if not is_fast_path_available: logger.warning_once( "The fast path is not available because on of `(selective_state_update, causal_conv1d_fn, causal_conv1d_update)`" " is None. Falling back to the naive implementation. To install follow https://github.com/state-spaces/mamba/#installation and" " https://github.com/Dao-AILab/causal-conv1d" ) def cuda_kernels_forward( self, hidden_states: torch.Tensor, cache_params: Optional[HybridMambaAttentionDynamicCache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, ): layer_device = self.A_log.device with torch.cuda.device(layer_device): if hidden_states.device != layer_device: hidden_states = hidden_states.to(layer_device) return self._cuda_kernels_forward_impl(hidden_states, cache_params, cache_position, attention_mask) def _cuda_kernels_forward_impl( self, hidden_states: torch.Tensor, cache_params: Optional[HybridMambaAttentionDynamicCache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, ): # 1. Gated MLP's linear projection hidden_states = apply_mask_to_padding_states(hidden_states, attention_mask) projected_states = self.in_proj(hidden_states) # Set up dimensions for reshapes later batch_size, seq_len, _ = hidden_states.shape groups_time_state_size = self.n_groups * self.ssm_state_size d_mlp = ( projected_states.shape[-1] - 2 * self.intermediate_size - 2 * self.n_groups * self.ssm_state_size - self.num_heads ) // 2 # Single step calculations via cache (decode mode: seq_len == 1 and cache exists). is_decode = seq_len == 1 and cache_params is not None and cache_position is not None if is_decode: _, _, gate, hidden_states_B_C, dt = projected_states.squeeze(1).split( [d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1 ) # 2. Convolution sequence transformation (PyTorch-based for CUDA graph compatibility) # NOTE: Avoid causal_conv1d_update (custom kernel) for graph safety. conv_state = cache_params.update_conv_state( layer_idx=self.layer_idx, new_conv_state=hidden_states_B_C, cache_init=False ) conv_w = self.conv1d.weight.squeeze(1) # Move conv_state to the same device as conv_w for multi-GPU compatibility conv_state = conv_state.to(conv_w.device) hidden_states_B_C = (conv_state * conv_w.unsqueeze(0)).sum(dim=-1) if self.use_conv_bias: hidden_states_B_C = hidden_states_B_C + self.conv1d.bias hidden_states_B_C = self.act(hidden_states_B_C) hidden_states, B, C = torch.split( hidden_states_B_C, [self.intermediate_size, groups_time_state_size, groups_time_state_size], dim=-1, ) # 3. SSM transformation (PyTorch-based for CUDA graph compatibility) # NOTE: Avoid selective_state_update (Triton kernel) for graph safety. # Get device from layer weights for multi-GPU compatibility device = self.A_log.device dt_head = (dt.to(device=device, dtype=torch.float32) + self.dt_bias.to(torch.float32)) # (B, NHEADS) dt_head = torch.nn.functional.softplus(dt_head) dt_head = torch.clamp(dt_head, self.time_step_limit[0], self.time_step_limit[1]) A_head = -torch.exp(self.A_log.to(torch.float32)) # (NHEADS,) dA = torch.exp(dt_head * A_head.unsqueeze(0)) # (B, NHEADS) heads_per_group = self.num_heads // self.n_groups B = B.to(device=device).view(batch_size, self.n_groups, self.ssm_state_size).repeat_interleave(heads_per_group, dim=1) C = C.to(device=device).view(batch_size, self.n_groups, self.ssm_state_size).repeat_interleave(heads_per_group, dim=1) B = B.to(torch.float32) C = C.to(torch.float32) hidden_states_reshaped = hidden_states.to(device=device).view(batch_size, self.num_heads, self.head_dim) x = hidden_states_reshaped.to(torch.float32) dB = dt_head[:, :, None] * B # (B, NHEADS, DSTATE) dBx = x[:, :, :, None] * dB[:, :, None, :] # (B, NHEADS, HEAD_DIM, DSTATE) # Update SSM state in-place: state = state * dA + dBx # Move ssm_state to current device for computation, then copy back ssm_state = cache_params.ssm_states.select(0, self.layer_idx) ssm_state_fp32 = ssm_state.to(device=device, dtype=torch.float32) ssm_state_fp32 = ssm_state_fp32 * dA[:, :, None, None] + dBx ssm_state.copy_(ssm_state_fp32.to(device=ssm_state.device, dtype=ssm_state.dtype)) D = self.D.to(torch.float32) # (NHEADS,) y = (ssm_state_fp32 * C[:, :, None, :]).sum(dim=-1) + x * D[None, :, None] # (B, NHEADS, HEAD_DIM) hidden_states = y.to(device=device, dtype=hidden_states.dtype).reshape(batch_size, self.num_heads * self.head_dim) # Ensure gate is on the same device as hidden_states for norm hidden_states = self.norm(hidden_states, gate.to(device=device)) # 4. Final linear projection out = self.out_proj(hidden_states)[:, None, ...] # Fused calculations or step by step if no initialized cache is found else: A = -torch.exp(self.A_log.float()) # (num_heads) or (intermediate_size, state_size) dt_limit_kwargs = {} if self.time_step_limit == (0.0, float("inf")) else {"dt_limit": self.time_step_limit} # 2-4. Fused kernel for conv1d, SSM, and the final projection if self.training and cache_params is None: out = mamba_split_conv1d_scan_combined( projected_states, self.conv1d.weight.squeeze(1), self.conv1d.bias, self.dt_bias, A, D=self.D, chunk_size=self.chunk_size, seq_idx=None, # was seq_idx activation=self.activation, rmsnorm_weight=self.norm.weight, rmsnorm_eps=self.norm.variance_epsilon, outproj_weight=self.out_proj.weight, outproj_bias=self.out_proj.bias, headdim=self.head_dim, ngroups=self.n_groups, norm_before_gate=False, return_final_states=False, **dt_limit_kwargs, ) else: _, _, gate, hidden_states_B_C, dt = projected_states.split( [d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1 ) # 2. Convolution sequence transformation # # For long-context prefill we may process the prompt in chunks (seq_len > 1) while reusing # an initialized cache across calls. In that case the causal conv needs the last (K-1) raw # conv inputs from the previous chunk to match full-prefill results. conv_in = hidden_states_B_C.transpose(1, 2) # (B, conv_dim, seq_len) has_cache = cache_params is not None is_continuation = ( has_cache and cache_position is not None and cache_position.numel() > 0 and int(cache_position[0].item()) > 0 ) kernel_size = cache_params.conv_kernel_size if has_cache else self.conv1d.kernel_size[0] prefix_len = max(int(kernel_size) - 1, 0) if is_continuation and prefix_len > 0: prev_conv_state = cache_params.conv_states.select(0, self.layer_idx).to(device=conv_in.device, dtype=conv_in.dtype) conv_in_full = torch.cat([prev_conv_state[:, :, 1:], conv_in], dim=-1) if self.activation not in ["silu", "swish"]: conv_out_full = self.conv1d(conv_in_full)[..., : conv_in_full.shape[-1]] conv_out_full = self.act(conv_out_full) else: conv_out_full = causal_conv1d_fn( x=conv_in_full, weight=self.conv1d.weight.squeeze(1), bias=self.conv1d.bias, activation=self.activation, ) hidden_states_B_C = conv_out_full[..., -seq_len:].transpose(1, 2) else: if self.activation not in ["silu", "swish"]: hidden_states_B_C = self.act(self.conv1d(conv_in)[..., :seq_len]).transpose(1, 2) else: hidden_states_B_C = causal_conv1d_fn( x=conv_in, weight=self.conv1d.weight.squeeze(1), bias=self.conv1d.bias, activation=self.activation, ).transpose(1, 2) # Init cache (conv state stores the last `kernel_size` raw conv inputs). if has_cache: prev_conv_state = cache_params.conv_states.select(0, self.layer_idx).to(device=conv_in.device, dtype=conv_in.dtype) if conv_in.shape[-1] >= kernel_size: new_conv_state = conv_in[:, :, -kernel_size:] else: needed = int(kernel_size) - int(conv_in.shape[-1]) if needed > 0: new_conv_state = torch.cat([prev_conv_state[:, :, -needed:], conv_in], dim=-1) else: new_conv_state = conv_in cache_params.update_conv_state(layer_idx=self.layer_idx, new_conv_state=new_conv_state, cache_init=True) hidden_states_B_C = apply_mask_to_padding_states(hidden_states_B_C, attention_mask) hidden_states, B, C = torch.split( hidden_states_B_C, [self.intermediate_size, groups_time_state_size, groups_time_state_size], dim=-1, ) # 3. SSM transformation initial_states = None if has_cache: # Always pass an explicit initial state when using a cache so full-prefill and # chunked-prefill take the same kernel path. Reset to zeros when starting a # new sequence (cache_position[0] == 0) to avoid leaking stale cache state. ssm_cache = cache_params.ssm_states.select(0, self.layer_idx) if not is_continuation: ssm_cache.zero_() initial_states = ssm_cache.to(device=hidden_states.device) scan_output, ssm_state = mamba_chunk_scan_combined( hidden_states.view(batch_size, seq_len, -1, self.head_dim), dt, A, B.view(batch_size, seq_len, self.n_groups, -1), C.view(batch_size, seq_len, self.n_groups, -1), chunk_size=self.chunk_size, D=self.D, z=None, seq_idx=None, return_final_states=True, dt_bias=self.dt_bias, initial_states=initial_states, dt_softplus=True, **dt_limit_kwargs, ) # Init cache if ssm_state is not None and cache_params is not None: cache_params.update_ssm_state(layer_idx=self.layer_idx, new_ssm_state=ssm_state) scan_output = scan_output.view(batch_size, seq_len, -1) # Multiply "gate" branch and apply extra normalization layer scan_output = self.norm(scan_output, gate) # 4. Final linear projection out = self.out_proj(scan_output) return out # fmt: off def torch_forward(self, input_states, cache_params: Optional[HybridMambaAttentionDynamicCache]=None, cache_position:Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None): batch_size, seq_len, _ = input_states.shape dtype = input_states.dtype # 1. Gated MLP's linear projection input_states = apply_mask_to_padding_states(input_states, attention_mask) projected_states = self.in_proj(input_states) d_mlp = (projected_states.shape[-1] - 2 * self.intermediate_size - 2 * self.n_groups * self.ssm_state_size-self.num_heads) // 2 _, _, gate, hidden_states_B_C, dt = projected_states.split( [d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1 ) # 2. Convolution sequence transformation is_decode = seq_len == 1 and cache_params is not None and cache_position is not None if is_decode: cache_params.update_conv_state(layer_idx=self.layer_idx, new_conv_state=hidden_states_B_C, cache_init=False) # We need to guarantee that anything regarding the cache is on the same device conv_states = cache_params.conv_states[self.layer_idx].to(device=self.conv1d.weight.device) hidden_states_B_C = torch.sum(conv_states * self.conv1d.weight.squeeze(1), dim=-1) if self.use_conv_bias: hidden_states_B_C = hidden_states_B_C + self.conv1d.bias hidden_states_B_C = self.act(hidden_states_B_C) else: conv_in = hidden_states_B_C.transpose(1, 2) # (B, conv_dim, seq_len) has_cache = cache_params is not None is_continuation = ( has_cache and cache_position is not None and cache_position.numel() > 0 and int(cache_position[0].item()) > 0 ) kernel_size = cache_params.conv_kernel_size if has_cache else self.conv1d.kernel_size[0] prefix_len = max(int(kernel_size) - 1, 0) if is_continuation and prefix_len > 0: prev_conv_state = cache_params.conv_states.select(0, self.layer_idx).to(device=conv_in.device, dtype=conv_in.dtype) conv_in_full = torch.cat([prev_conv_state[:, :, 1:], conv_in], dim=-1) conv_out_full = self.conv1d(conv_in_full)[..., : conv_in_full.shape[-1]] conv_out_full = self.act(conv_out_full) hidden_states_B_C = conv_out_full[..., -seq_len:].transpose(1, 2) else: hidden_states_B_C = self.act(self.conv1d(conv_in)[..., :seq_len]).transpose(1, 2) # Init cache (conv state stores the last `kernel_size` raw conv inputs). if has_cache: prev_conv_state = cache_params.conv_states.select(0, self.layer_idx).to(device=conv_in.device, dtype=conv_in.dtype) if conv_in.shape[-1] >= kernel_size: new_conv_state = conv_in[:, :, -kernel_size:] else: needed = int(kernel_size) - int(conv_in.shape[-1]) new_conv_state = torch.cat([prev_conv_state[:, :, -needed:], conv_in], dim=-1) cache_params.update_conv_state(layer_idx=self.layer_idx, new_conv_state=new_conv_state, cache_init=True) hidden_states_B_C = apply_mask_to_padding_states(hidden_states_B_C, attention_mask) hidden_states, B, C = torch.split( hidden_states_B_C, [self.intermediate_size, self.n_groups * self.ssm_state_size, self.n_groups * self.ssm_state_size], dim=-1 ) # 3. SSM transformation A = -torch.exp(self.A_log.float()) # [num_heads] if is_decode: # We need to guarantee that anything regarding the cache is on the same device cache_device = cache_params.ssm_states.device # Note: there is no need to pad parameter matrices here, as there is just one new token # for batched generation dt = dt[:, 0, :][:, None, ...] dt = dt.transpose(1, 2).expand(batch_size, dt.shape[-1], self.head_dim) # [num_heads] -> [num_heads, head_dim] dt_bias = self.dt_bias[..., None].expand(self.dt_bias.shape[0], self.head_dim) dt = torch.nn.functional.softplus(dt + dt_bias.to(dt.dtype)) dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1]) A = A[..., None, None].expand(self.num_heads, self.head_dim, self.ssm_state_size).to(dtype=torch.float32) # [bsz, num_heads, head_dim, state_size] dA = (torch.exp(dt[..., None] * A)).to(device=cache_device) # Discretize B # [bsz, n_groups * state_size] -> [bsz, n_groups, 1, state_size] -> # -> [bsz, n_groups, group to head repetition factor, state_size] -> [bsz, num_heads, state_size] B = B.reshape(batch_size, self.n_groups, -1)[..., None, :] B = B.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, B.shape[-1]).contiguous() B = B.reshape(batch_size, -1, B.shape[-1]) # [bsz, num_heads, head_dim, state_size] dB = dt[..., None] * B[..., None, :] # Discretize x into dB # [bsz, intermediate_size] -> [bsz, num_heads, head_dim] hidden_states = hidden_states.reshape(batch_size, -1, self.head_dim) dBx = (dB * hidden_states[..., None]).to(device=cache_device) # State calculation cache_params.update_ssm_state( layer_idx=self.layer_idx, new_ssm_state=cache_params.ssm_states[self.layer_idx] * dA + dBx ) # Subsequent output # [bsz, n_groups * state_size] -> [bsz, num_heads, state_size] C = C.reshape(batch_size, self.n_groups, -1)[..., None, :] C = C.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, C.shape[-1]).contiguous() C = C.reshape(batch_size, -1, C.shape[-1]) # [bsz, num_heads, head_dim] ssm_states = cache_params.ssm_states[self.layer_idx].to(device=C.device, dtype=C.dtype) # Shape: [b, h, d, n] # Reshape ssm_states to merge the first two dimensions ssm_states_reshaped = ssm_states.view(batch_size * self.num_heads, self.head_dim, self.ssm_state_size) # Shape: [b*h, d, n] C_reshaped = C.view(batch_size * self.num_heads, self.ssm_state_size, 1) # Shape: [b*h, n, 1] y = torch.bmm(ssm_states_reshaped, C_reshaped) y = y.view(batch_size, self.num_heads, self.head_dim) # D skip connection # [num_heads] -> [num_heads, head_dim] D = self.D[..., None].expand(self.D.shape[0], self.head_dim) y = (y + hidden_states * D).to(y.dtype) # [bsz, num_heads, head_dim] -> [bsz, 1, intermediate_size] y = y.reshape(batch_size, -1)[:, None, ...] else: # begin ssd naive implementation without einsums dt = nn.functional.softplus(dt + self.dt_bias) dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1]) hidden_states = hidden_states.reshape(batch_size, seq_len, -1, self.head_dim).float() B = B.reshape(batch_size, seq_len, -1, self.ssm_state_size).float() C = C.reshape(batch_size, seq_len, -1, self.ssm_state_size).float() B = B.repeat_interleave(self.num_heads // self.n_groups, dim=2, output_size=self.num_heads) C = C.repeat_interleave(self.num_heads // self.n_groups, dim=2, output_size=self.num_heads) pad_size = (self.chunk_size - seq_len % self.chunk_size) % self.chunk_size D_residual = self.D[..., None] * pad_tensor_by_size(hidden_states, pad_size) # Discretize x and A hidden_states = hidden_states * dt[..., None] A = A.to(hidden_states.dtype) * dt # Rearrange into blocks/chunks hidden_states, A, B, C = [reshape_into_chunks(t, pad_size, self.chunk_size) for t in (hidden_states, A, B, C)] # [bsz, -1, chunk_size, num_heads] -> [bsz, num_heads, -1, chunk_size] A = A.permute(0, 3, 1, 2) A_cumsum = torch.cumsum(A, dim=-1) # 1. Compute the output for each intra-chunk (diagonal blocks) # This is the analog of a causal mask L = torch.exp(segment_sum(A)) # Contraction of C and B to get G (attention-weights like) G_intermediate = C[:, :, :, None, :, :] * B[:, :, None, :, :, :] # shape: (b, c, l, s, h, n) G = G_intermediate.sum(dim=-1) # shape: (b, c, l, s, h) # Compute M, equivalent to applying attention mask to weights M_intermediate = G[..., None] * L.permute(0, 2, 3, 4, 1)[..., None] M = M_intermediate.sum(dim=-1) # Compute Y_diag (apply to values) Y_diag = (M[..., None] * hidden_states[:, :, None]).sum(dim=3) # 2. Compute the state for each intra-chunk # (right term of low-rank factorization of off-diagonal blocks; B terms) decay_states = torch.exp((A_cumsum[:, :, :, -1:] - A_cumsum)) B_decay = B * decay_states.permute(0, -2, -1, 1)[..., None] states = (B_decay[..., None, :] * hidden_states[..., None]).sum(dim=2) # 3. Compute the inter-chunk SSM recurrence; produces correct SSM states at chunk boundaries # (middle term of factorization of off-diag blocks; A terms) if cache_params is not None and cache_position is not None and cache_position[0] > 0: previous_states = cache_params.ssm_states[self.layer_idx][:, None, ...].to(device=states.device) else: previous_states = torch.zeros_like(states[:, :1]) states = torch.cat([previous_states, states], dim=1) decay_chunk = torch.exp(segment_sum(nn.functional.pad(A_cumsum[:, :, :, -1], (1, 0)))) decay_chunk = decay_chunk.transpose(1, 3) new_states = (decay_chunk[..., None, None] * states[:, :, None, ...]).sum(dim=1) states, ssm_state = new_states[:, :-1], new_states[:, -1] # 4. Compute state -> output conversion per chunk # (left term of low-rank factorization of off-diagonal blocks; C terms) state_decay_out = torch.exp(A_cumsum) C_times_states = (C[..., None, :] * states[:, :, None, ...]) state_decay_out_permuted = state_decay_out.permute(0, 2, 3, 1) Y_off = (C_times_states.sum(-1) * state_decay_out_permuted[..., None]) # Add output of intra-chunk and inter-chunk terms (diagonal and off-diagonal blocks) y = Y_diag + Y_off # [bsz, -1, self.chunk_size, num_heads, head_dim] -> [bsz, (padded) seq_len, num_heads, head_dim] y = y.reshape(batch_size, -1, self.num_heads, self.head_dim) y = y + D_residual # Cutting off padded chunks if pad_size > 0: y = y[:, :seq_len, :, :] y = y.reshape(batch_size, seq_len, -1) # Init cache if ssm_state is not None and cache_params is not None: cache_params.update_ssm_state(layer_idx=self.layer_idx, new_ssm_state=ssm_state) scan_output = self.norm(y, gate) # end ssd naive # 4. Final linear projection contextualized_states = self.out_proj(scan_output.to(dtype)) # [batch, seq_len, hidden_size] return contextualized_states # fmt: on def forward( self, hidden_states, cache_params: Optional[HybridMambaAttentionDynamicCache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, ): if is_fast_path_available and "cuda" in self.in_proj.weight.device.type: return self.cuda_kernels_forward(hidden_states, cache_params, cache_position, attention_mask) dtype = hidden_states.dtype if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1: # tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66 hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype) return self.torch_forward(hidden_states, cache_params, cache_position, attention_mask) class SuperlinearExpRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ SuperlinearExpRMSNorm is equivalent to T5LayerNorm and LlamaRMSNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # Weights are in float32 return (self.weight.to(torch.float32) * hidden_states).to(input_dtype) class SuperlinearExpBlock(nn.Module): def __init__(self, config, layer_idx): super().__init__() self.config = config self.layer_idx = layer_idx self.residual_in_fp32 = config.residual_in_fp32 self.norm = SuperlinearExpRMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) # M: Mamba2, *: Attention, -: MLP self.block_type = config.layers_block_type[layer_idx] if self.block_type == "mamba": self.mixer = SuperlinearExpMamba2Mixer(config, layer_idx=layer_idx) elif self.block_type == "attention": self.mixer = SUPERLINEAREXP_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx=layer_idx) elif self.block_type == "mlp": self.mixer = SuperlinearExpMLP(config, layer_idx=layer_idx) elif self.block_type == "moe": self.mixer = SuperlinearExpMOE(config, layer_idx=layer_idx) else: raise ValueError(f"Invalid layer pattern {config.hybrid_override_pattern[layer_idx]}") def forward( self, hidden_states, cache_params: Optional[HybridMambaAttentionDynamicCache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, sw_block_mask=None, ): # NOTE: Removed stream context switch for CUDA graph compatibility. # The original code used torch.cuda.stream(torch.cuda.default_stream(...)) to avoid NaN # issues with multiple GPUs, but this breaks CUDA graph capture since operations must # happen on the capture stream. residual = hidden_states hidden_states = self.norm(hidden_states.to(dtype=self.norm.weight.dtype)) if self.residual_in_fp32: residual = residual.to(torch.float32) if self.block_type == "mamba": hidden_states = self.mixer( hidden_states, cache_params=cache_params, cache_position=cache_position, attention_mask=attention_mask, ) elif self.block_type == "attention": if isinstance(self.mixer, FlexSpanAttention) or isinstance(self.mixer, FlexSpanAttentionGQA): hidden_states = self.mixer( hidden_states, cache_position=cache_position, past_key_value=cache_params, attention_mask=attention_mask, sw_block_mask=sw_block_mask, ) else: hidden_states = self.mixer( hidden_states, cache_position=cache_position, past_key_value=cache_params, attention_mask=attention_mask, ) hidden_states = hidden_states[0] elif self.block_type in ["mlp", "moe"]: hidden_states = self.mixer(hidden_states) else: raise ValueError(f"Invalid block_type: {self.block_type}") hidden_states = residual + hidden_states return hidden_states # Copied from transformers.models.nemotron.modeling_nemotron Nemotron->SuperlinearExp class SuperlinearExpMLP(nn.Module): def __init__(self, config, intermediate_size=None, layer_idx: Optional[int] = None): super().__init__() self.config = config self.layer_idx = layer_idx if layer_idx is None: logger.warning_once( f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) self.hidden_size = config.hidden_size self.intermediate_size = intermediate_size or config.intermediate_size self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias) self.act_fn = ACT2FN[config.mlp_hidden_act] def forward(self, x): return self.down_proj(self.act_fn(self.up_proj(x))) class SuperlinearExpFusedLinear(nn.Module): def __init__(self, n_experts: int, in_features: int, out_features: int, bias: bool): super().__init__() self.n_experts = n_experts self.in_features = in_features self.out_features = out_features self.weight = nn.Parameter(torch.empty((n_experts, out_features, in_features))) if bias: self.bias = nn.Parameter(torch.empty((n_experts, out_features))) else: self.register_parameter("bias", None) def forward(self, x: torch.Tensor, expert_idx: int) -> torch.Tensor: bias = None if self.bias is None else self.bias[expert_idx] return F.linear(x, self.weight[expert_idx], bias=bias) class SuperlinearExpExpert(nn.Module): def __init__(self, config, intermediate_size: Optional[int] = None): super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = intermediate_size or config.moe_intermediate_size self.n_experts = config.n_routed_experts self.up_proj = SuperlinearExpFusedLinear( n_experts=self.n_experts, in_features=self.hidden_size, out_features=self.intermediate_size, bias=config.mlp_bias, ) self.down_proj = SuperlinearExpFusedLinear( n_experts=self.n_experts, in_features=self.intermediate_size, out_features=self.hidden_size, bias=config.mlp_bias, ) self.act_fn = ACT2FN[config.mlp_hidden_act] def forward(self, x: torch.Tensor, expert_idx: int) -> torch.Tensor: return self.down_proj(self.act_fn(self.up_proj(x, expert_idx)), expert_idx) class SuperlinearExpMOE(nn.Module): def __init__(self, config, layer_idx: Optional[int] = None): super().__init__() self.config = config self.experts = SuperlinearExpExpert(config, intermediate_size=config.moe_intermediate_size) self.gate = SuperlinearExpTopkRouter(config) self.shared_experts = SuperlinearExpMLP( config=config, intermediate_size=config.moe_shared_expert_intermediate_size, layer_idx=layer_idx ) def moe(self, hidden_states: torch.Tensor, topk_indices: torch.Tensor, topk_weights: torch.Tensor): # `hidden_states` is expected to be flattened: [tokens, hidden_size] # `topk_indices` / `topk_weights` are [tokens, top_k] num_tokens, hidden_size = hidden_states.shape top_k = topk_indices.shape[-1] if num_tokens == 0: return hidden_states # Flatten token->expert assignments token_indices = ( torch.arange(num_tokens, device=hidden_states.device, dtype=torch.long) .unsqueeze(1) .expand(num_tokens, top_k) .reshape(-1) ) expert_indices = topk_indices.reshape(-1) expert_weights = topk_weights.reshape(-1) # Group by expert to avoid iterating over all experts (and avoid computing empty experts). sort_order = torch.argsort(expert_indices) expert_indices = expert_indices[sort_order] token_indices = token_indices[sort_order] expert_weights = expert_weights[sort_order] unique_experts, counts = torch.unique_consecutive(expert_indices, return_counts=True) final_hidden_states = torch.zeros( (num_tokens, hidden_size), device=hidden_states.device, dtype=expert_weights.dtype ) offset = 0 for expert_idx, count in zip(unique_experts.tolist(), counts.tolist()): tokens = token_indices[offset : offset + count] weights = expert_weights[offset : offset + count] offset += count if count == 0: continue expert_input = hidden_states.index_select(0, tokens) expert_output = self.experts(expert_input, expert_idx) final_hidden_states.index_add_(0, tokens, expert_output * weights.unsqueeze(-1)) return final_hidden_states.to(hidden_states.dtype) def _fused_moe_forward(self, hidden_states, topk_indices, topk_weights): """Execute fused MoE (eager or CUDA graph).""" act = f"{self.config.mlp_hidden_act}_no_mul" return fused_experts_moe( hidden_states, self.experts.up_proj.weight, self.experts.down_proj.weight, topk_weights, topk_indices, activation=act, ) def forward(self, hidden_states): layer_device = self.gate.weight.device with torch.cuda.device(layer_device): if hidden_states.device != layer_device: hidden_states = hidden_states.to(layer_device) return self._forward_impl(hidden_states) def _forward_impl(self, hidden_states): residuals = hidden_states orig_shape = hidden_states.shape topk_indices, topk_weights = self.gate(hidden_states) hidden_states = hidden_states.view(-1, hidden_states.shape[-1]) use_fused = ( getattr(self.config, "enable_shared_fused_moe", False) and fused_experts_moe is not None and shared_fused_moe_is_available() and hidden_states.is_cuda ) if use_fused: try: hidden_states = self._fused_moe_forward(hidden_states, topk_indices, topk_weights).view(*orig_shape) except Exception as exc: # pragma: no cover logger.warning_once(f"Shared fused MoE failed: {exc}. Falling back to eager MoE.") hidden_states = self.moe(hidden_states, topk_indices, topk_weights).view(*orig_shape) else: hidden_states = self.moe(hidden_states, topk_indices, topk_weights).view(*orig_shape) hidden_states = hidden_states + self.shared_experts(residuals) return hidden_states class SuperlinearExpTopkRouter(nn.Module): def __init__(self, config): super().__init__() self.config = config self.top_k = config.num_experts_per_tok self.n_routed_experts = config.n_routed_experts self.routed_scaling_factor = config.routed_scaling_factor self.n_group = config.n_group self.topk_group = config.topk_group self.norm_topk_prob = config.norm_topk_prob self.weight = nn.Parameter(torch.empty((self.n_routed_experts, config.hidden_size), dtype=torch.float32)) self.register_buffer("e_score_correction_bias", torch.zeros(self.n_routed_experts, dtype=torch.float32)) def _route_eager(self, hidden_states): """Eager routing implementation.""" router_logits = F.linear(hidden_states, self.weight) scores = router_logits.sigmoid() # Get topk indices scores_for_choice = scores + self.e_score_correction_bias.unsqueeze(0) if self.n_group > 1 and self.topk_group < self.n_group: group_scores = ( scores_for_choice.view(-1, self.n_group, self.n_routed_experts // self.n_group) .topk(2, dim=-1)[0] .sum(dim=-1) ) group_idx = torch.topk(group_scores, k=self.topk_group, dim=-1, sorted=False)[1] group_mask = torch.zeros_like(group_scores) group_mask.scatter_(1, group_idx, 1) score_mask = ( group_mask.unsqueeze(-1) .expand(-1, self.n_group, self.n_routed_experts // self.n_group) .reshape(-1, self.n_routed_experts) ) scores_for_choice = scores_for_choice.masked_fill(~score_mask.bool(), 0.0) topk_indices = torch.topk(scores_for_choice, k=self.top_k, dim=-1, sorted=False)[1] topk_weights = scores.gather(1, topk_indices) if self.norm_topk_prob: denominator = topk_weights.sum(dim=-1, keepdim=True) + 1e-20 topk_weights = topk_weights / denominator topk_weights = topk_weights * self.routed_scaling_factor return topk_indices, topk_weights def forward(self, hidden_states): hidden_states = hidden_states.view(-1, self.config.hidden_size).to(torch.float32) return self._route_eager(hidden_states) @torch.no_grad() def get_topk_indices(self, scores): """Legacy method for compatibility.""" scores_for_choice = scores.view(-1, self.n_routed_experts) + self.e_score_correction_bias.unsqueeze(0) group_scores = ( scores_for_choice.view(-1, self.n_group, self.n_routed_experts // self.n_group) .topk(2, dim=-1)[0] .sum(dim=-1) ) group_idx = torch.topk(group_scores, k=self.topk_group, dim=-1, sorted=False)[1] group_mask = torch.zeros_like(group_scores) group_mask.scatter_(1, group_idx, 1) score_mask = ( group_mask.unsqueeze(-1) .expand(-1, self.n_group, self.n_routed_experts // self.n_group) .reshape(-1, self.n_routed_experts) ) scores_for_choice = scores_for_choice.masked_fill(~score_mask.bool(), 0.0) topk_indices = torch.topk(scores_for_choice, k=self.top_k, dim=-1, sorted=False)[1] return topk_indices # Pre-allocated buffers for repeat_kv during CUDA graph capture _repeat_kv_buffer = {} # Copied from transformers.models.llama.modeling_llama.repeat_kv def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) For CUDA graph compatibility, uses a pre-allocated output buffer and avoids shape-changing views. """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states output_shape = (batch, num_key_value_heads * n_rep, slen, head_dim) key = (output_shape, hidden_states.dtype, hidden_states.device) if key not in _repeat_kv_buffer: _repeat_kv_buffer[key] = torch.empty(output_shape, dtype=hidden_states.dtype, device=hidden_states.device) output = _repeat_kv_buffer[key] for i in range(n_rep): output[:, i::n_rep, :, :] = hidden_states return output class SuperlinearExpAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: SuperlinearExpConfig, layer_idx: Optional[int] = None): super().__init__() self.config = config self.layer_idx = layer_idx if layer_idx is None: logger.warning_once( f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) self.attention_dropout = config.attention_dropout self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads if hasattr(config, "head_dim") and config.head_dim is not None: self.head_dim = config.head_dim else: self.head_dim = config.hidden_size // self.num_attention_heads self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.is_causal = True self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias) self.s_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.o_proj = nn.Linear(self.head_dim * self.num_heads, self.hidden_size, bias=config.attention_bias) def forward( self, hidden_states: torch.Tensor, # position_embeddings: Tuple[torch.Tensor, torch.Tensor], #TODO attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[HybridMambaAttentionDynamicCache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) search_states = self.s_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) search_states = search_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) if past_key_value is not None: cache_kwargs = {"cache_position": cache_position} if cache_position is not None else None key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) kv_len = key_states.shape[-2] dropout_p = self.attention_dropout if self.training else 0.0 # Backwards-compatibility: accept precomputed 4D additive masks. if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask[:, :, :, :kv_len] if query_states.device.type == "cuda": query_states = query_states.contiguous() key_states = key_states.contiguous() value_states = value_states.contiguous() attn_output = torch.nn.functional.scaled_dot_product_attention( query_states, key_states, value_states, attn_mask=causal_mask, dropout_p=dropout_p, is_causal=False, ) else: key_padding_mask = None if attention_mask is not None: if attention_mask.dim() != 2: raise ValueError( f"Unsupported attention_mask rank {attention_mask.dim()} for SuperlinearExpAttention" ) key_padding_mask = attention_mask[:, :kv_len].to(torch.bool) if key_padding_mask is None: # No padding -> avoid passing a dense (L,S) mask so SDPA can stay on flash/mem-efficient paths. if q_len > 1: if kv_len == q_len: attn_mask = None is_causal = True else: is_causal = False if causal_lower_right is None: diagonal_offset = kv_len - q_len attn_mask = torch.tril( torch.ones(q_len, kv_len, device=query_states.device, dtype=torch.bool), diagonal=diagonal_offset, ) else: attn_mask = causal_lower_right(q_len, kv_len) else: attn_mask = None is_causal = False attn_output = torch.nn.functional.scaled_dot_product_attention( query_states, key_states, value_states, attn_mask=attn_mask, dropout_p=dropout_p, is_causal=is_causal, ) elif q_len == 1: # Decoding: causal masking is unnecessary (K/V contain only the prefix), so we can apply the # key padding mask via broadcasting without materializing (B, 1, L, S). attn_mask = key_padding_mask[:, None, None, :] if query_states.device.type == "cuda": query_states = query_states.contiguous() key_states = key_states.contiguous() value_states = value_states.contiguous() attn_output = torch.nn.functional.scaled_dot_product_attention( query_states, key_states, value_states, attn_mask=attn_mask, dropout_p=dropout_p, is_causal=False, ) else: # Prefill with padding: drop padded tokens and run SDPA mask-free on the trimmed sequences. attn_output = query_states.new_zeros((bsz, self.num_heads, q_len, self.head_dim)) for batch_idx in range(bsz): mask_b = key_padding_mask[batch_idx] valid_len = int(mask_b.sum().item()) if valid_len == 0: continue left_padded = bool(mask_b[-1].item()) if left_padded: kv_start = kv_len - valid_len else: kv_start = 0 kv_end = kv_start + valid_len if kv_len == q_len: q_start, q_end = kv_start, kv_end q_b = query_states[batch_idx : batch_idx + 1, :, q_start:q_end, :] else: q_b = query_states[batch_idx : batch_idx + 1, :, :, :] k_b = key_states[batch_idx : batch_idx + 1, :, kv_start:kv_end, :] v_b = value_states[batch_idx : batch_idx + 1, :, kv_start:kv_end, :] if q_b.shape[-2] <= 1: attn_mask = None is_causal = False elif k_b.shape[-2] == q_b.shape[-2]: attn_mask = None is_causal = True else: is_causal = False if causal_lower_right is None: diagonal_offset = k_b.shape[-2] - q_b.shape[-2] attn_mask = torch.tril( torch.ones(q_b.shape[-2], k_b.shape[-2], device=query_states.device, dtype=torch.bool), diagonal=diagonal_offset, ) else: attn_mask = causal_lower_right(q_b.shape[-2], k_b.shape[-2]) out_b = torch.nn.functional.scaled_dot_product_attention( q_b, k_b, v_b, attn_mask=attn_mask, dropout_p=dropout_p, is_causal=is_causal, ) if kv_len == q_len: attn_output[batch_idx : batch_idx + 1, :, q_start:q_end, :] = out_b else: attn_output[batch_idx : batch_idx + 1, :, :, :] = out_b attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, self.num_heads * self.head_dim) attn_output = self.o_proj(attn_output) return attn_output, None, past_key_value # Adapted from transformers.models.mistral.modeling_mistral.MistralFlashAttention2 with Mistral->Jamba #class JambaFlashAttention2(JambaAttention): class SuperlinearExpFlashAttention2(SuperlinearExpAttention): """ Jamba flash attention module. This module inherits from `JambaAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[HybridMambaAttentionDynamicCache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, **kwargs, ): bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) # Flash attention requires the input to have the shape # batch_size x seq_length x head_dim x hidden_dim # therefore we just need to keep the original shape query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) if past_key_value is not None: cache_kwargs = {"cache_position": cache_position} if cache_position is not None else None key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) # repeat k/v heads if n_kv_heads < n_heads key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) dropout_rate = 0.0 if not self.training else self.attention_dropout # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in float16 just to be sure everything works as expected. input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() # Handle the case where the model is quantized elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" f" {target_dtype}." ) query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) # Reashape to the expected shape for Flash Attention key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) # For CUDA graph compatibility during decode (q_len == 1), don't pass attention_mask # as it triggers dynamic operations (torch.nonzero) in _upad_input that are incompatible # with CUDA graph capture. For decode, is_causal=False is sufficient since we only # attend to past positions. fa_attention_mask = None if q_len == 1 else attention_mask fa_is_causal = self.is_causal if q_len > 1 else False attn_output = _flash_attention_forward( query_states, key_states, value_states, fa_attention_mask, q_len, dropout=dropout_rate, sliding_window=getattr(self.config, "sliding_window", None), is_causal=fa_is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask, ) #attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous() attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.head_dim).contiguous() attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value # Adapted from transformers.models.mistral.modeling_mistral.MistralSdpaAttention with Mistral->Jamba #class JambaSdpaAttention(JambaAttention): class SuperlinearExpSdpaAttention(SuperlinearExpAttention): """ Jamba attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from `JambaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to SDPA API. """ # Adapted from SuperlinearExpAttention.forward def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[HybridMambaAttentionDynamicCache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if output_attentions: # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented. logger.warning_once( "SuperlinearExpModel is using SuperlinearExpSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, " 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) return super().forward( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) if past_key_value is not None: cache_kwargs = {"cache_position": cache_position} if cache_position is not None else None key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) kv_len = key_states.shape[-2] dropout_p = self.attention_dropout if self.training else 0.0 # Handle both 2D and 4D attention masks (same logic as parent SuperlinearExpAttention) if attention_mask is not None and attention_mask.dim() == 4: # 4D mask case causal_mask = attention_mask[:, :, :, :kv_len] if query_states.device.type == "cuda": query_states = query_states.contiguous() key_states = key_states.contiguous() value_states = value_states.contiguous() attn_output = torch.nn.functional.scaled_dot_product_attention( query_states, key_states, value_states, attn_mask=causal_mask, dropout_p=dropout_p, is_causal=False, ) else: # 2D mask or no mask case # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d # For chunked prefill (q_len != kv_len), create explicit causal mask using cache_position # so queries attend to all previous KV cache entries, not just within the current chunk if self.is_causal and cache_position is not None and q_len != kv_len and q_len > 1: # Create causal mask: query at cache_position[i] attends to KV positions 0..cache_position[i] # Shape: (1, 1, q_len, kv_len) causal_mask = cache_position.view(1, 1, -1, 1) >= torch.arange(kv_len, device=cache_position.device).view(1, 1, 1, -1) causal_mask = torch.where(causal_mask, 0.0, float('-inf')).to(query_states.dtype) attn_output = torch.nn.functional.scaled_dot_product_attention( query_states, key_states, value_states, attn_mask=causal_mask, dropout_p=dropout_p, is_causal=False, ) else: is_causal = True if self.is_causal and attention_mask is None and q_len > 1 else False attn_output = torch.nn.functional.scaled_dot_product_attention( query_states, key_states, value_states, attn_mask=None, dropout_p=dropout_p, is_causal=is_causal, ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, self.num_heads * self.head_dim) attn_output = self.o_proj(attn_output) return attn_output, None, past_key_value class EagerSpanAttention(SuperlinearExpAttention): """ Span attention variant that reuses span search logits for gating. """ def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[HybridMambaAttentionDynamicCache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if output_attentions: logger.warning_once("EagerSpanAttention does not return attention weights; output_attentions is ignored.") bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) search_states = self.s_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) search_states = search_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) if past_key_value is not None: cache_kwargs = {"cache_position": cache_position} if cache_position is not None else None key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) if cache_position is None: if past_key_value is not None and q_len > 1 and hasattr(past_key_value, "get_seq_length"): end_pos = int(past_key_value.get_seq_length(self.layer_idx)) start_pos = end_pos - q_len cache_position = torch.arange( start_pos, end_pos, device=hidden_states.device, dtype=torch.int64, ) else: cache_position = torch.arange(q_len, device=hidden_states.device, dtype=torch.int64) else: cache_position = cache_position.to(device=hidden_states.device, dtype=torch.int64).view(-1) span_attn_mask = None if attention_mask is not None: if attention_mask.dim() == 4: span_attn_mask = attention_mask[:, 0, -1, : key_states.shape[-2]] == 0 elif attention_mask.dim() == 2: span_attn_mask = attention_mask[:, : key_states.shape[-2]].to(torch.bool) else: raise ValueError(f"Unsupported attention_mask rank {attention_mask.dim()} for EagerSpanAttention") decode_kernel = getattr(self.config, "decode_kernel", None) if (not self.training) and q_len == 1 and decode_kernel is not None: if decode_kernel == "staged": attn_output = decode_span_attention_staged( search_states, query_states, key_states, value_states, cache_position=cache_position, attention_mask=span_attn_mask, sw_index=self.config.span_attention_sw_index, topk=self.config.span_attention_num_spans, backward_factor=self.config.span_attention_backward_factor, forward_factor=self.config.span_attention_forward_factor, span_power=getattr(self.config, "span_attention_span_power", 0.5), search_power=getattr(self.config, "span_attention_search_power", None), inv_search_power_int=getattr(self.config, "span_attention_inv_search_power_int", 2), ) elif decode_kernel == "staged-gqa": attn_output = decode_span_attention_staged_gqa_kernel_v2( search_states, query_states, key_states, value_states, cache_position=cache_position, attention_mask=span_attn_mask, sw_index=self.config.span_attention_sw_index, topk=self.config.span_attention_num_spans, enable_gqa=False, backward_factor=self.config.span_attention_backward_factor, forward_factor=self.config.span_attention_forward_factor, span_power=getattr(self.config, "span_attention_span_power", 0.5), search_power=getattr(self.config, "span_attention_search_power", None), inv_search_power_int=getattr(self.config, "span_attention_inv_search_power_int", 2), force_mode=getattr(self.config, "_span_attention_decode_mode", None), ) else: raise ValueError( f"Unsupported decode_kernel={decode_kernel!r} for span attention; " "expected one of None, 'staged', 'staged-gqa'." ) else: attn_output = full_span_attention_fused_with_search_values( search_states, query_states, key_states, value_states, cache_position=cache_position, attention_mask=span_attn_mask, sw_index=self.config.span_attention_sw_index, num_spans=self.config.span_attention_num_spans, backward_factor=self.config.span_attention_backward_factor, forward_factor=self.config.span_attention_forward_factor, span_power=getattr(self.config, "span_attention_span_power", 0.5), search_power=getattr(self.config, "span_attention_search_power", None), inv_search_power_int=getattr(self.config, "span_attention_inv_search_power_int", 2), ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, self.num_heads * self.head_dim) attn_output = self.o_proj(attn_output) return attn_output, None, past_key_value class EagerSpanAttentionGQA(SuperlinearExpAttention): """ Span attention variant that reuses span search logits for gating, using grouped-query attention (GQA) without repeating K/V heads. """ def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[HybridMambaAttentionDynamicCache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if output_attentions: logger.warning_once("EagerSpanAttentionGQA does not return attention weights; output_attentions is ignored.") bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) search_states = self.s_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) search_states = search_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) if past_key_value is not None: cache_kwargs = {"cache_position": cache_position} if cache_position is not None else None key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) if cache_position is None: if past_key_value is not None and q_len > 1 and hasattr(past_key_value, "get_seq_length"): end_pos = int(past_key_value.get_seq_length(self.layer_idx)) start_pos = end_pos - q_len cache_position = torch.arange( start_pos, end_pos, device=hidden_states.device, dtype=torch.int64, ) else: cache_position = torch.arange(q_len, device=hidden_states.device, dtype=torch.int64) else: cache_position = cache_position.to(device=hidden_states.device, dtype=torch.int64).view(-1) span_attn_mask = None if attention_mask is not None: if attention_mask.dim() == 4: span_attn_mask = attention_mask[:, 0, -1, : key_states.shape[-2]] == 0 elif attention_mask.dim() == 2: span_attn_mask = attention_mask[:, : key_states.shape[-2]].to(torch.bool) else: raise ValueError(f"Unsupported attention_mask rank {attention_mask.dim()} for EagerSpanAttentionGQA") decode_kernel = getattr(self.config, "decode_kernel", None) if (not self.training) and q_len == 1 and decode_kernel is not None: enable_gqa = key_states.shape[1] != query_states.shape[1] if decode_kernel == "staged": if enable_gqa: raise ValueError( "decode_kernel='staged' requires MHA layout (K/V heads == Q heads). " "Use decode_kernel='staged-gqa' for GQA." ) attn_output = decode_span_attention_staged( search_states, query_states, key_states, value_states, cache_position=cache_position, attention_mask=span_attn_mask, sw_index=self.config.span_attention_sw_index, topk=self.config.span_attention_num_spans, backward_factor=self.config.span_attention_backward_factor, forward_factor=self.config.span_attention_forward_factor, span_power=getattr(self.config, "span_attention_span_power", 0.5), search_power=getattr(self.config, "span_attention_search_power", None), inv_search_power_int=getattr(self.config, "span_attention_inv_search_power_int", 2), ) elif decode_kernel == "staged-gqa": attn_output = decode_span_attention_staged_gqa_kernel_v2( search_states, query_states, key_states, value_states, cache_position=cache_position, attention_mask=span_attn_mask, sw_index=self.config.span_attention_sw_index, topk=self.config.span_attention_num_spans, enable_gqa=enable_gqa, backward_factor=self.config.span_attention_backward_factor, forward_factor=self.config.span_attention_forward_factor, span_power=getattr(self.config, "span_attention_span_power", 0.5), search_power=getattr(self.config, "span_attention_search_power", None), inv_search_power_int=getattr(self.config, "span_attention_inv_search_power_int", 2), force_mode=getattr(self.config, "_span_attention_decode_mode", None), ) else: raise ValueError( f"Unsupported decode_kernel={decode_kernel!r} for span attention; " "expected one of None, 'staged', 'staged-gqa'." ) else: attn_output = full_span_attention_fused_with_search_values_gqa( search_states, query_states, key_states, value_states, cache_position=cache_position, attention_mask=span_attn_mask, sw_index=self.config.span_attention_sw_index, num_spans=self.config.span_attention_num_spans, backward_factor=self.config.span_attention_backward_factor, forward_factor=self.config.span_attention_forward_factor, span_power=getattr(self.config, "span_attention_span_power", 0.5), search_power=getattr(self.config, "span_attention_search_power", None), inv_search_power_int=getattr(self.config, "span_attention_inv_search_power_int", 2), ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, self.num_heads * self.head_dim) attn_output = self.o_proj(attn_output) return attn_output, None, past_key_value class FlexSpanAttention(SuperlinearExpAttention): """ Flex-attention span variant that fuses span search, sliding-window flex attention, and span aggregation. Expects a prebuilt sliding-window block mask to avoid re-creating it in every layer. """ def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[HybridMambaAttentionDynamicCache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, sw_block_mask=None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if output_attentions: logger.warning_once("FlexSpanAttention does not return attention weights; output_attentions is ignored.") bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) search_states = self.s_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) search_states = search_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) if past_key_value is not None: cache_kwargs = {"cache_position": cache_position} if cache_position is not None else None key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) if cache_position is None: if past_key_value is not None and q_len > 1 and hasattr(past_key_value, "get_seq_length"): end_pos = int(past_key_value.get_seq_length(self.layer_idx)) start_pos = end_pos - q_len cache_position = torch.arange( start_pos, end_pos, device=hidden_states.device, dtype=torch.int64, ) else: cache_position = torch.arange(q_len, device=hidden_states.device, dtype=torch.int64) else: cache_position = cache_position.to(device=hidden_states.device, dtype=torch.int64).view(-1) span_attn_mask = None if attention_mask is not None: if attention_mask.dim() == 4: span_attn_mask = attention_mask[:, 0, -1, : key_states.shape[-2]] == 0 elif attention_mask.dim() == 2: span_attn_mask = attention_mask[:, : key_states.shape[-2]].to(torch.bool) else: raise ValueError(f"Unsupported attention_mask rank {attention_mask.dim()} for FlexSpanAttention") if sw_block_mask is None: sw_block_mask = build_sw_blockmask( bsz, q_len, key_states.shape[-2], sw_index=self.config.span_attention_sw_index, attention_mask=span_attn_mask, cache_position=cache_position, device=hidden_states.device, search_power=getattr(self.config, "span_attention_search_power", None), inv_search_power_int=getattr(self.config, "span_attention_inv_search_power_int", 2), ) decode_kernel = getattr(self.config, "decode_kernel", None) if (not self.training) and q_len == 1 and decode_kernel is not None: if decode_kernel == "staged": attn_output = decode_span_attention_staged( search_states, query_states, key_states, value_states, cache_position=cache_position, attention_mask=span_attn_mask, sw_index=self.config.span_attention_sw_index, topk=self.config.span_attention_num_spans, backward_factor=self.config.span_attention_backward_factor, forward_factor=self.config.span_attention_forward_factor, span_power=getattr(self.config, "span_attention_span_power", 0.5), search_power=getattr(self.config, "span_attention_search_power", None), inv_search_power_int=getattr(self.config, "span_attention_inv_search_power_int", 2), ) elif decode_kernel == "staged-gqa": attn_output = decode_span_attention_staged_gqa_kernel_v2( search_states, query_states, key_states, value_states, cache_position=cache_position, attention_mask=span_attn_mask, sw_index=self.config.span_attention_sw_index, topk=self.config.span_attention_num_spans, enable_gqa=True, # GQA class requires enable_gqa=True for proper head handling backward_factor=self.config.span_attention_backward_factor, forward_factor=self.config.span_attention_forward_factor, span_power=getattr(self.config, "span_attention_span_power", 0.5), search_power=getattr(self.config, "span_attention_search_power", None), inv_search_power_int=getattr(self.config, "span_attention_inv_search_power_int", 2), force_mode=getattr(self.config, "_span_attention_decode_mode", None), ) else: raise ValueError( f"Unsupported decode_kernel={decode_kernel!r} for span attention; " "expected one of None, 'staged', 'staged-gqa'." ) else: attn_output = fused_prefill_with_swflex( search_states, query_states, key_states, value_states, cache_pos=cache_position, attention_mask=span_attn_mask, sw_block_mask=sw_block_mask, sw_index=self.config.span_attention_sw_index, topk=self.config.span_attention_num_spans, backward_factor=self.config.span_attention_backward_factor, forward_factor=self.config.span_attention_forward_factor, span_power=getattr(self.config, "span_attention_span_power", 0.5), search_power=getattr(self.config, "span_attention_search_power", None), inv_search_power_int=getattr(self.config, "span_attention_inv_search_power_int", 2), ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, self.num_heads * self.head_dim) attn_output = self.o_proj(attn_output) return attn_output, None, past_key_value class FlexSpanAttentionGQA(SuperlinearExpAttention): """ Flex-attention span variant that keeps K/V in grouped-query layout (no head repetition). This uses `fused_prefill_with_swflex_gqa` which combines: - span search (GQA-aware), - sliding-window FlexAttention with `enable_gqa=True`, - fused span attention that reuses the SW output/LSE. """ def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[HybridMambaAttentionDynamicCache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, sw_block_mask=None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if output_attentions: logger.warning_once( "FlexSpanAttentionGQA does not return attention weights; output_attentions is ignored." ) bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) search_states = self.s_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) search_states = search_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) if past_key_value is not None: cache_kwargs = {"cache_position": cache_position} if cache_position is not None else None key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) if cache_position is None: if past_key_value is not None and q_len > 1 and hasattr(past_key_value, "get_seq_length"): end_pos = int(past_key_value.get_seq_length(self.layer_idx)) start_pos = end_pos - q_len cache_position = torch.arange( start_pos, end_pos, device=hidden_states.device, dtype=torch.int64, ) else: cache_position = torch.arange(q_len, device=hidden_states.device, dtype=torch.int64) else: cache_position = cache_position.to(device=hidden_states.device, dtype=torch.int64).view(-1) span_attn_mask = None if attention_mask is not None: if attention_mask.dim() == 4: span_attn_mask = attention_mask[:, 0, -1, : key_states.shape[-2]] == 0 elif attention_mask.dim() == 2: span_attn_mask = attention_mask[:, : key_states.shape[-2]].to(torch.bool) else: raise ValueError(f"Unsupported attention_mask rank {attention_mask.dim()} for FlexSpanAttentionGQA") if sw_block_mask is None: sw_block_mask = build_sw_blockmask( bsz, q_len, key_states.shape[-2], sw_index=self.config.span_attention_sw_index, attention_mask=span_attn_mask, cache_position=cache_position, device=hidden_states.device, search_power=getattr(self.config, "span_attention_search_power", None), inv_search_power_int=getattr(self.config, "span_attention_inv_search_power_int", 2), ) decode_kernel = getattr(self.config, "decode_kernel", None) if (not self.training) and q_len == 1 and decode_kernel is not None: enable_gqa = key_states.shape[1] != query_states.shape[1] if decode_kernel == "staged": if enable_gqa: raise ValueError( "decode_kernel='staged' requires MHA layout (K/V heads == Q heads). " "Use decode_kernel='staged-gqa' for GQA." ) attn_output = decode_span_attention_staged( search_states, query_states, key_states, value_states, cache_position=cache_position, attention_mask=span_attn_mask, sw_index=self.config.span_attention_sw_index, topk=self.config.span_attention_num_spans, backward_factor=self.config.span_attention_backward_factor, forward_factor=self.config.span_attention_forward_factor, span_power=getattr(self.config, "span_attention_span_power", 0.5), search_power=getattr(self.config, "span_attention_search_power", None), inv_search_power_int=getattr(self.config, "span_attention_inv_search_power_int", 2), ) elif decode_kernel == "staged-gqa": attn_output = decode_span_attention_staged_gqa_kernel_v2( search_states, query_states, key_states, value_states, cache_position=cache_position, attention_mask=span_attn_mask, sw_index=self.config.span_attention_sw_index, topk=self.config.span_attention_num_spans, enable_gqa=enable_gqa, backward_factor=self.config.span_attention_backward_factor, forward_factor=self.config.span_attention_forward_factor, span_power=getattr(self.config, "span_attention_span_power", 0.5), search_power=getattr(self.config, "span_attention_search_power", None), inv_search_power_int=getattr(self.config, "span_attention_inv_search_power_int", 2), force_mode=getattr(self.config, "_span_attention_decode_mode", None), ) else: raise ValueError( f"Unsupported decode_kernel={decode_kernel!r} for span attention; " "expected one of None, 'staged', 'staged-gqa'." ) else: attn_output = fused_prefill_with_swflex_gqa( search_states, query_states, key_states, value_states, cache_pos=cache_position, attention_mask=span_attn_mask, sw_block_mask=sw_block_mask, sw_index=self.config.span_attention_sw_index, topk=self.config.span_attention_num_spans, backward_factor=self.config.span_attention_backward_factor, forward_factor=self.config.span_attention_forward_factor, span_power=getattr(self.config, "span_attention_span_power", 0.5), search_power=getattr(self.config, "span_attention_search_power", None), inv_search_power_int=getattr(self.config, "span_attention_inv_search_power_int", 2), ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, self.num_heads * self.head_dim) attn_output = self.o_proj(attn_output) return attn_output, None, past_key_value class BlockSpanAttention(SuperlinearExpAttention): """ Triton sliding-window span variant that replaces FlexAttention SW with a block-of-queries Triton kernel. """ def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[HybridMambaAttentionDynamicCache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if output_attentions: logger.warning_once("BlockSpanAttention does not return attention weights; output_attentions is ignored.") if fused_prefill_with_swtriton is None: raise ImportError( "BlockSpanAttention requires the local `span_attention_with_block_query_kernels` package.\n" "Ensure the repo root is on PYTHONPATH (or install it) to use attn_implementation='block-span'." ) bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) search_states = self.s_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) search_states = search_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) if past_key_value is not None: cache_kwargs = {"cache_position": cache_position} if cache_position is not None else None key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) if cache_position is None: if past_key_value is not None and q_len > 1 and hasattr(past_key_value, "get_seq_length"): end_pos = int(past_key_value.get_seq_length(self.layer_idx)) start_pos = end_pos - q_len cache_position = torch.arange( start_pos, end_pos, device=hidden_states.device, dtype=torch.int64, ) else: cache_position = torch.arange(q_len, device=hidden_states.device, dtype=torch.int64) else: cache_position = cache_position.to(device=hidden_states.device, dtype=torch.int64).view(-1) span_attn_mask = None if attention_mask is not None: if attention_mask.dim() == 4: span_attn_mask = attention_mask[:, 0, -1, : key_states.shape[-2]] == 0 elif attention_mask.dim() == 2: span_attn_mask = attention_mask[:, : key_states.shape[-2]].to(torch.bool) else: raise ValueError(f"Unsupported attention_mask rank {attention_mask.dim()} for BlockSpanAttention") decode_kernel = getattr(self.config, "decode_kernel", None) if (not self.training) and q_len == 1 and decode_kernel is not None: if decode_kernel == "staged": attn_output = decode_span_attention_staged( search_states, query_states, key_states, value_states, cache_position=cache_position, attention_mask=span_attn_mask, sw_index=self.config.span_attention_sw_index, topk=self.config.span_attention_num_spans, backward_factor=self.config.span_attention_backward_factor, forward_factor=self.config.span_attention_forward_factor, span_power=getattr(self.config, "span_attention_span_power", 0.5), search_power=getattr(self.config, "span_attention_search_power", None), inv_search_power_int=getattr(self.config, "span_attention_inv_search_power_int", 2), ) elif decode_kernel == "staged-gqa": attn_output = decode_span_attention_staged_gqa_kernel_v2( search_states, query_states, key_states, value_states, cache_position=cache_position, attention_mask=span_attn_mask, sw_index=self.config.span_attention_sw_index, topk=self.config.span_attention_num_spans, enable_gqa=True, backward_factor=self.config.span_attention_backward_factor, forward_factor=self.config.span_attention_forward_factor, span_power=getattr(self.config, "span_attention_span_power", 0.5), search_power=getattr(self.config, "span_attention_search_power", None), inv_search_power_int=getattr(self.config, "span_attention_inv_search_power_int", 2), force_mode=getattr(self.config, "_span_attention_decode_mode", None), ) else: raise ValueError( f"Unsupported decode_kernel={decode_kernel!r} for span attention; " "expected one of None, 'staged', 'staged-gqa'." ) else: attn_output = fused_prefill_with_swtriton( search_states, query_states, key_states, value_states, cache_pos=cache_position, attention_mask=span_attn_mask, sw_index=self.config.span_attention_sw_index, topk=self.config.span_attention_num_spans, backward_factor=self.config.span_attention_backward_factor, forward_factor=self.config.span_attention_forward_factor, span_power=getattr(self.config, "span_attention_span_power", 0.5), search_power=getattr(self.config, "span_attention_search_power", None), inv_search_power_int=getattr(self.config, "span_attention_inv_search_power_int", 2), ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, self.num_heads * self.head_dim) attn_output = self.o_proj(attn_output) return attn_output, None, past_key_value class BlockSpanAttentionGQA(SuperlinearExpAttention): """ Triton sliding-window span variant that keeps K/V in grouped-query layout (no head repetition). """ def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[HybridMambaAttentionDynamicCache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if output_attentions: logger.warning_once("BlockSpanAttentionGQA does not return attention weights; output_attentions is ignored.") if fused_prefill_with_swtriton_bucketed_gqa is None: raise ImportError( "BlockSpanAttentionGQA requires the local `span_attention_with_block_query_kernels` package.\n" "Ensure the repo root is on PYTHONPATH (or install it) to use attn_implementation='block-span-gqa'." ) bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) search_states = self.s_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) search_states = search_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) if past_key_value is not None: cache_kwargs = {"cache_position": cache_position} if cache_position is not None else None key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) if cache_position is None: if past_key_value is not None and q_len > 1 and hasattr(past_key_value, "get_seq_length"): end_pos = int(past_key_value.get_seq_length(self.layer_idx)) start_pos = end_pos - q_len cache_position = torch.arange( start_pos, end_pos, device=hidden_states.device, dtype=torch.int64, ) else: cache_position = torch.arange(q_len, device=hidden_states.device, dtype=torch.int64) else: cache_position = cache_position.to(device=hidden_states.device, dtype=torch.int64).view(-1) span_attn_mask = None if attention_mask is not None: if attention_mask.dim() == 4: span_attn_mask = attention_mask[:, 0, -1, : key_states.shape[-2]] == 0 elif attention_mask.dim() == 2: span_attn_mask = attention_mask[:, : key_states.shape[-2]].to(torch.bool) else: raise ValueError(f"Unsupported attention_mask rank {attention_mask.dim()} for BlockSpanAttentionGQA") decode_kernel = getattr(self.config, "decode_kernel", None) if (not self.training) and q_len == 1 and decode_kernel is not None: enable_gqa = key_states.shape[1] != query_states.shape[1] if decode_kernel == "staged": if enable_gqa: raise ValueError( "decode_kernel='staged' requires MHA layout (K/V heads == Q heads). " "Use decode_kernel='staged-gqa' for GQA." ) attn_output = decode_span_attention_staged( search_states, query_states, key_states, value_states, cache_position=cache_position, attention_mask=span_attn_mask, sw_index=self.config.span_attention_sw_index, topk=self.config.span_attention_num_spans, backward_factor=self.config.span_attention_backward_factor, forward_factor=self.config.span_attention_forward_factor, span_power=getattr(self.config, "span_attention_span_power", 0.5), search_power=getattr(self.config, "span_attention_search_power", None), inv_search_power_int=getattr(self.config, "span_attention_inv_search_power_int", 2), ) elif decode_kernel == "staged-gqa": attn_output = decode_span_attention_staged_gqa_kernel_v2( search_states, query_states, key_states, value_states, cache_position=cache_position, attention_mask=span_attn_mask, sw_index=self.config.span_attention_sw_index, topk=self.config.span_attention_num_spans, enable_gqa=enable_gqa, backward_factor=self.config.span_attention_backward_factor, forward_factor=self.config.span_attention_forward_factor, span_power=getattr(self.config, "span_attention_span_power", 0.5), search_power=getattr(self.config, "span_attention_search_power", None), inv_search_power_int=getattr(self.config, "span_attention_inv_search_power_int", 2), force_mode=getattr(self.config, "_span_attention_decode_mode", None), ) else: raise ValueError( f"Unsupported decode_kernel={decode_kernel!r} for span attention; " "expected one of None, 'staged', 'staged-gqa'." ) else: # Use the bucketed kernel for optimized prefill (2.8-3.3x faster than FlexAttention) attn_output = fused_prefill_with_swtriton_bucketed_gqa( search_states, query_states, key_states, value_states, cache_pos=cache_position, attention_mask=span_attn_mask, sw_index=self.config.span_attention_sw_index, topk=self.config.span_attention_num_spans, backward_factor=self.config.span_attention_backward_factor, forward_factor=self.config.span_attention_forward_factor, span_power=getattr(self.config, "span_attention_span_power", 0.5), search_power=getattr(self.config, "span_attention_search_power", None), inv_search_power_int=getattr(self.config, "span_attention_inv_search_power_int", 2), ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, self.num_heads * self.head_dim) attn_output = self.o_proj(attn_output) return attn_output, None, past_key_value SUPERLINEAREXP_ATTENTION_CLASSES = { "eager": SuperlinearExpAttention, "sdpa": SuperlinearExpSdpaAttention, "flash_attention_2": SuperlinearExpFlashAttention2, "eager-span": EagerSpanAttention, "eager-span-gqa": EagerSpanAttentionGQA, "flex-span": FlexSpanAttention, "flex-span-gqa": FlexSpanAttentionGQA, "block-span": BlockSpanAttention, "block-span-gqa": BlockSpanAttentionGQA, } # Custom attention implementations supported by SuperlinearExp CUSTOM_ATTENTION_IMPLEMENTATIONS = frozenset(SUPERLINEAREXP_ATTENTION_CLASSES.keys()) # Copied from transformers.models.mamba.modeling_mamba2.Mamba2PreTrainedModel class SuperlinearExpPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = SuperlinearExpConfig base_model_prefix = "backbone" _no_split_modules = ["SuperlinearExpBlock"] supports_gradient_checkpointing = True _is_stateful = True _supports_flash_attn_2 = False def get_correct_attn_implementation(self, requested_attention, is_init_check=False): """ Override parent class to allow custom span attention implementations. SuperlinearExp supports additional attention implementations beyond the standard ones: - "eager-span": EagerSpanAttention (uses repeat_kv for GQA expansion) - "eager-span-gqa": EagerSpanAttentionGQA (GQA-native, no repeat_kv needed) - "flex-span": FlexSpanAttention (FlexAttention-based with GQA expansion) - "flex-span-gqa": FlexSpanAttentionGQA (FlexAttention-based, GQA-native) """ if requested_attention in CUSTOM_ATTENTION_IMPLEMENTATIONS: return requested_attention return super().get_correct_attn_implementation(requested_attention, is_init_check) def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, SuperlinearExpMamba2Mixer): module.A_log._no_weight_decay = True module.D._no_weight_decay = True dt = torch.exp( torch.rand(self.config.mamba_num_heads) * (math.log(self.config.time_step_max) - math.log(self.config.time_step_min)) + math.log(self.config.time_step_min) ).clamp(min=self.config.time_step_floor) # # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759 inv_dt = dt + torch.log(-torch.expm1(-dt)) with torch.no_grad(): module.dt_bias.copy_(inv_dt) module.dt_bias._no_reinit = True if isinstance(module, nn.Linear): if module.bias is not None: if not getattr(module.bias, "_no_reinit", False): nn.init.zeros_(module.bias) elif isinstance(module, nn.Embedding): nn.init.normal_(module.weight, std=self.config.initializer_range) # TODO: Check if self.config.rescale_prenorm_residual: # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme: # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers. # > -- GPT-2 :: https://openai.com/blog/better-language-models/ # # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py for name, p in module.named_parameters(): if name in ["out_proj.weight"]: # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block # Following Pytorch init, except scale by 1/sqrt(2 * n_layer) # We need to reinit p since this code could be called multiple times # Having just p *= scale would repeatedly scale it down nn.init.kaiming_uniform_(p, a=math.sqrt(5)) with torch.no_grad(): p /= math.sqrt(self.config.num_hidden_layers) @dataclass # Copied from transformers.models.mamba.modeling_mamba2.Mamba2Output with MAMBA2->SuperlinearExp,Mamba2->SuperlinearExp class SuperlinearExpOutput(ModelOutput): """ Class for the SuperlinearExp model outputs. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. past_key_values (`HybridMambaAttentionDynamicCache`): The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to avoid providing the old `input_ids`. Includes both the State space model state matrices after the selective scan, and the Convolutional states hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. """ last_hidden_state: Optional[torch.FloatTensor] = None past_key_values: Optional[HybridMambaAttentionDynamicCache] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass # Copied from transformers.models.mamba2.modeling_mamba2.MambaCausalLMOutput with Mamba2->SuperlinearExp class SuperlinearExpCausalLMOutput(ModelOutput): """ Base class for causal language model (or autoregressive) outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`HybridMambaAttentionDynamicCache`): The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to avoid providing the old `input_ids`. Includes both the State space model state matrices after the selective scan, and the Convolutional states hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None past_key_values: Optional[HybridMambaAttentionDynamicCache] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None SUPERLINEAREXP_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`SuperlinearExpConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ SUPERLINEAREXP_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*): Indices of input sequence tokens in the vocabulary. If `cache_params.seqlen_offset>0`, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. position_ids (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. cache_params (`HybridMambaAttentionDynamicCache`, *optional*): If passed along, the model uses the previous state in all the blocks (which will give the output for the `input_ids` provided as if the model add `state_input_ids + input_ids` as context). use_cache (`bool`, *optional*): If set to `True`, the `cache_params` is returned and can be used to quickly generate the next logits. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. cache_position (`torch.LongTensor` of shape `(batch_size,)`, *optional*): The position of the current input in the cache. This is used to ensure that the cache is correctly updated. If `cache_params` is passed, `cache_position` should also be passed. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) """ @add_start_docstrings( "The bare SuperlinearExp Model transformer outputting raw hidden-states without any specific head on top.", SUPERLINEAREXP_START_DOCSTRING, ) class SuperlinearExpModel(SuperlinearExpPreTrainedModel): def __init__(self, config): super().__init__(config) self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size) self.layers = nn.ModuleList([SuperlinearExpBlock(config, layer_idx=idx) for idx in range(config.num_hidden_layers)]) self.gradient_checkpointing = False self.norm_f = SuperlinearExpRMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) # Initialize weights and apply final processing self._register_load_state_dict_pre_hook(self.load_hook) self.post_init() def load_hook(self, state_dict, prefix, *args): for k in state_dict: if "embedding." in k: state_dict[k.replace("embedding.", "embeddings.")] = state_dict.pop(k) break # Backward-compat: if a checkpoint predates `s_proj`, initialize it from `q_proj`. for layer_idx, layer in enumerate(self.layers): if layer.block_type != "attention": continue q_weight_key = f"{prefix}layers.{layer_idx}.mixer.q_proj.weight" s_weight_key = f"{prefix}layers.{layer_idx}.mixer.s_proj.weight" if q_weight_key in state_dict and s_weight_key not in state_dict: state_dict[s_weight_key] = state_dict[q_weight_key] q_bias_key = f"{prefix}layers.{layer_idx}.mixer.q_proj.bias" s_bias_key = f"{prefix}layers.{layer_idx}.mixer.s_proj.bias" if q_bias_key in state_dict and s_bias_key not in state_dict: state_dict[s_bias_key] = state_dict[q_bias_key] def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, new_embeddings): self.embeddings = new_embeddings @add_start_docstrings_to_model_forward(SUPERLINEAREXP_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=SuperlinearExpOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, cache_params: Optional[HybridMambaAttentionDynamicCache] = None, past_key_values: Optional[HybridMambaAttentionDynamicCache] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, **kwargs, ) -> Union[Tuple, SuperlinearExpOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) # use_cache = use_cache if use_cache is not None else self.config.use_cache use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): # ^ is python for xor raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embeddings(input_ids) if self.gradient_checkpointing and self.training and use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." ) use_cache = False if cache_params is None and past_key_values is not None: cache_params = past_key_values # From zamba_modeling.py if use_cache and cache_params is None: logger.warning_once( "SuperlinearExp requires an initialized `SuperlinearExpHybridDynamicCache` to return a cache. None was " "provided, so no cache will be returned." ) hidden_states = inputs_embeds if cache_position is None: if cache_params is not None and hasattr(cache_params, "get_seq_length") and hidden_states.shape[1] > 1: start_pos = int(cache_params.get_seq_length()) cache_position = torch.arange( start_pos, start_pos + hidden_states.shape[1], device=hidden_states.device, dtype=torch.int64, ) else: cache_position = torch.arange(hidden_states.shape[1], device=hidden_states.device, dtype=torch.int64) else: cache_position = cache_position.to(device=hidden_states.device, dtype=torch.int64).view(-1) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position) mamba_mask = self._update_mamba_mask(attention_mask, cache_position) sw_block_mask = None if self.config._attn_implementation in ("flex-span", "flex-span-gqa"): # Compute kv_len correctly for chunked processing: # - If cache exists, kv_len = current_cache_len + new_seq_len # - Otherwise, kv_len = cache_position[-1] + 1 if cache_params is not None and hasattr(cache_params, "get_seq_length"): current_cache_len = cache_params.get_seq_length() new_seq_len = hidden_states.shape[1] kv_len = current_cache_len + new_seq_len elif causal_mask is not None: kv_len = causal_mask.shape[-1] else: kv_len = int(cache_position[-1].item()) + 1 span_attn_mask = None if causal_mask is not None: if causal_mask.dim() == 4: span_attn_mask = causal_mask[:, 0, -1, :kv_len] == 0 elif causal_mask.dim() == 2: span_attn_mask = causal_mask[:, :kv_len].to(torch.bool) else: raise ValueError(f"Unsupported attention_mask rank {causal_mask.dim()} for FlexSpanAttention") sw_block_mask = build_sw_blockmask( hidden_states.shape[0], hidden_states.shape[1], kv_len, sw_index=self.config.span_attention_sw_index, attention_mask=span_attn_mask, cache_position=cache_position, device=hidden_states.device, search_power=getattr(self.config, "span_attention_search_power", None), inv_search_power_int=getattr(self.config, "span_attention_inv_search_power_int", 2), ) all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None # Until HERE for layer_idx, mixer_block in enumerate(self.layers): # Depending on the layer type we opt for 2D base attention mask (Mamba) or 4D causal mask (Attention) if mixer_block.block_type == "mamba": layer_mask = mamba_mask elif mixer_block.block_type == "attention": layer_mask = causal_mask elif mixer_block.block_type in ["mlp", "moe"]: layer_mask = None else: raise ValueError(f"Invalid block_type: {self.block_type}") if output_hidden_states: all_hidden_states += (hidden_states,) if self.gradient_checkpointing and self.training: if mixer_block.block_type == "attention" and isinstance( mixer_block.mixer, (FlexSpanAttention, FlexSpanAttentionGQA) ): def custom_forward(*inputs): return mixer_block(*inputs, attention_mask=layer_mask, sw_block_mask=sw_block_mask) hidden_states = self._gradient_checkpointing_func(custom_forward, hidden_states, cache_params, cache_position) else: hidden_states = self._gradient_checkpointing_func( mixer_block.__call__, hidden_states, cache_params, cache_position, layer_mask ) else: hidden_states = mixer_block( hidden_states, cache_params=cache_params, cache_position=cache_position, attention_mask=layer_mask, sw_block_mask=sw_block_mask, ) # TODO: Store attentions # if output_attentions: # if layer_outputs[1] is not None: # # append attentions only of attention layers. Mamba layers return `None` as the attention weights # all_self_attns += (layer_outputs[1],) # TODO (Check): should it happen before the forward pass? # if output_hidden_states: # all_hidden_states = all_hidden_states + (hidden_states,) hidden_states = self.norm_f(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, cache_params, all_hidden_states] if v is not None) return SuperlinearExpOutput( last_hidden_state=hidden_states, past_key_values=cache_params if use_cache else None, hidden_states=all_hidden_states, attentions=all_self_attns, ) # Copied from transformers.models.jamba.modeling_jamba.JambaModel._update_causal_mask def _update_causal_mask(self, attention_mask, input_tensor, cache_position): # We only need a key padding mask (2D). Causal masking is handled inside the attention kernels # (SDPA via `is_causal`/`CausalBias`, span attention via `cache_position`). if attention_mask is None: return None if attention_mask.dim() == 4: return attention_mask if attention_mask.dim() != 2: raise ValueError(f"Unsupported attention_mask rank {attention_mask.dim()} in SuperlinearExpModel") # If there is no padding, avoid passing a mask entirely. This prevents accidentally forcing SDPA # into dense mask paths (and keeps span attention from seeing an unnecessary (B,1,L,S) tensor). # # For decode (seq_len == 1): Return the mask so batched generation with padding works correctly. # The CUDA graph caller is responsible for using a static buffer. # For prefill: Skip the `.all()` check to avoid CPU-GPU sync, return the mask as-is. seq_len = input_tensor.shape[1] if seq_len == 1: # Decode mode: return the 2D mask for proper padding handling return attention_mask # For prefill, check if all ones (no padding) - but avoid this check for CUDA graph # by only doing it when we have more than 1 token if bool(attention_mask.to(torch.bool).all()): return None return attention_mask def _update_mamba_mask(self, attention_mask, cache_position): """ No need for zeroing states when 1. Cached forward 2. Attending to all inputs """ mamba_mask = attention_mask # Use cache_position shape to detect decode mode (1 element) and avoid CPU sync from cache_position[0] > 0. is_decode = cache_position is not None and cache_position.numel() == 1 if is_decode: return None if attention_mask is None: return None all_ones = bool((attention_mask == 1).all()) return None if all_ones else attention_mask @add_start_docstrings( """ The SUPERLINEAREXP Model transformer with a language modeling head on top (linear layer with weights not tied to the input embeddings). """, SUPERLINEAREXP_START_DOCSTRING, ) class SuperlinearExpForCausalLM(SuperlinearExpPreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): super().__init__(config) self.backbone = SuperlinearExpModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Full-model CUDA graph state (for decode with seq_len=1) self._cuda_graph: Optional[torch.cuda.CUDAGraph] = None # Two-graph strategy: capture separate decode graphs for SDPA vs span attention to # avoid computing both paths every step while keeping CUDA-graph-safe fixed shapes. self._cuda_graph_sdpa: Optional[torch.cuda.CUDAGraph] = None self._cuda_graph_span: Optional[torch.cuda.CUDAGraph] = None self._static_input_ids: Optional[torch.Tensor] = None self._static_cache_position: Optional[torch.Tensor] = None self._static_output: Optional["SuperlinearExpCausalLMOutput"] = None self._static_output_sdpa: Optional["SuperlinearExpCausalLMOutput"] = None self._static_output_span: Optional["SuperlinearExpCausalLMOutput"] = None self._graph_cache_params: Optional[HybridMambaAttentionDynamicCache] = None self._graph_batch_size: Optional[int] = None self._graph_max_seq_len: Optional[int] = None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.backbone.get_input_embeddings() def set_input_embeddings(self, new_embeddings): return self.backbone.set_input_embeddings(new_embeddings) def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def get_decoder(self): return self.model def set_decoder(self, decoder): self.model = decoder def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, max_cache_len=None, **kwargs, ): # Copy from https://github.com/huggingface/transformers/blob/main/src/transformers/models/jamba/modeling_jamba.py # Overwitten -- uses `cache_params` as opposed to `past_key_values` empty_past_kv = past_key_values is None # If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens # Exception 1: when passing input_embeds, input_ids may be missing entries # Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here # Exception 3: with synced GPUs cache_position may go out of bounds, but we only want dummy token in that case. # (we can't check exception 3 while compiling) if not empty_past_kv: if ( inputs_embeds is not None # Exception 1 or cache_position[-1] >= input_ids.shape[1] # Exception 3 ): input_ids = input_ids[:, -cache_position.shape[0] :] elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2) input_ids = input_ids[:, cache_position] else: # When CUDA graph is enabled, use StaticCache for fixed memory addresses # This is required because CUDA graphs require stable tensor shapes/addresses use_cuda_graph = getattr(self.config, 'enable_cuda_graph', False) if use_cuda_graph: # Use max_position_embeddings as the cache size for CUDA graph compatibility # This ensures the cache can handle any sequence up to the model's max length if max_cache_len is None: max_cache_len = self.config.max_position_embeddings past_key_values = HybridMambaAttentionStaticCache( config=self.config, batch_size=input_ids.shape[0], max_seq_len=max_cache_len, dtype=self.dtype, device=self.device, ) else: past_key_values = HybridMambaAttentionDynamicCache( self.config, input_ids.shape[0], self.dtype, device=self.device ) if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if not empty_past_kv: position_ids = position_ids[:, -input_ids.shape[1] :] # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and empty_past_kv: # TODO(pjin): workaround fix for properly extending inputs_embeds; # longer term, may be better handled elsewhere in .generate(). if input_ids is not None and inputs_embeds.shape[1] < input_ids.shape[1]: new_token_embeds = self.get_input_embeddings()(input_ids[:, inputs_embeds.shape[1] :]) inputs_embeds = torch.cat([inputs_embeds, new_token_embeds], dim=1) model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids.contiguous()} # `contiguous()` needed for compilation use cases model_inputs.update( { "position_ids": position_ids, "past_key_values": past_key_values, "use_cache": use_cache, "attention_mask": attention_mask, "logits_to_keep": self.config.num_logits_to_keep, "cache_position": cache_position, } ) return model_inputs @add_start_docstrings_to_model_forward(SUPERLINEAREXP_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=SuperlinearExpCausalLMOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, cache_params: Optional[HybridMambaAttentionDynamicCache] = None, past_key_values: Optional[HybridMambaAttentionDynamicCache] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, **kwargs, # for now we need this for generation ) -> Union[Tuple, SuperlinearExpCausalLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if cache_params is None and past_key_values is not None: cache_params = past_key_values # Determine if this is a decode step (seq_len == 1) eligible for CUDA graph seq_len = ( input_ids.shape[1] if input_ids is not None else (inputs_embeds.shape[1] if inputs_embeds is not None else 0) ) batch_size = ( input_ids.shape[0] if input_ids is not None else (inputs_embeds.shape[0] if inputs_embeds is not None else 0) ) is_static_cache = cache_params is not None and isinstance(cache_params, HybridMambaAttentionStaticCache) if is_static_cache: # Default to full-buffer returns for decode; may be overridden below for eager decode. cache_params._decode_return_slices = False cache_params._decode_slice_len = None use_cuda_graph = ( getattr(self.config, "enable_cuda_graph", False) and seq_len == 1 and not self.training and labels is None and input_ids is not None and is_static_cache and cache_position is not None and not output_attentions and not output_hidden_states ) if use_cuda_graph: return self._forward_with_cuda_graph( input_ids=input_ids, cache_params=cache_params, cache_position=cache_position, attention_mask=attention_mask, batch_size=batch_size, ) # Standard forward path (prefill or when CUDA graph is disabled) model_outputs = self.backbone( input_ids, cache_params=cache_params, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, use_cache=use_cache, cache_position=cache_position, attention_mask=attention_mask, ) hidden_states = model_outputs[0] logits = self.lm_head(hidden_states.to(self.lm_head.weight.dtype)).float() loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) if not return_dict: output = (logits,) + model_outputs[1:] return ((loss,) + output) if loss is not None else output return SuperlinearExpCausalLMOutput( loss=loss, logits=logits, past_key_values=model_outputs.past_key_values, hidden_states=model_outputs.hidden_states, attentions=model_outputs.attentions, ) def _forward_with_cuda_graph( self, input_ids: torch.LongTensor, cache_params: HybridMambaAttentionDynamicCache, cache_position: torch.Tensor, attention_mask: Optional[torch.Tensor], batch_size: int, ) -> SuperlinearExpCausalLMOutput: """Execute forward pass using full-model CUDA graph for decode (seq_len=1).""" max_seq_len = None if hasattr(cache_params, "max_seq_len"): max_seq_len = int(cache_params.max_seq_len) elif hasattr(self.config, "max_position_embeddings"): max_seq_len = int(self.config.max_position_embeddings) else: # Fallback: use current cache length + large buffer current_cache_len = cache_params.get_seq_length() max_seq_len = int(current_cache_len + 8192) # Check if we need to (re-)capture the graph(s) need_capture = ( self._cuda_graph is None or self._cuda_graph_sdpa is None or self._cuda_graph_span is None or self._graph_batch_size != batch_size or self._graph_cache_params is not cache_params or getattr(self, "_graph_attention_mask_provided", None) != (attention_mask is not None) or self._graph_max_seq_len != max_seq_len ) if need_capture: # Store reference to cache for graph validity check self._graph_cache_params = cache_params self._graph_batch_size = batch_size self._graph_attention_mask_provided = attention_mask is not None self._graph_max_seq_len = max_seq_len # Allocate static tensors for graph capture self._static_input_ids = input_ids.clone() self._static_cache_position = cache_position.clone() self._static_attention_mask_positions = None if attention_mask is not None: if attention_mask.shape[1] > max_seq_len: raise ValueError( f"attention_mask length {attention_mask.shape[1]} exceeds StaticCache max_seq_len {max_seq_len}" ) # IMPORTANT: initialize with zeros so positions beyond the current sequence # are treated as masked-out (future tokens) when using StaticCache (full K/V buffers). self._static_attention_mask = torch.zeros( batch_size, max_seq_len, dtype=attention_mask.dtype, device=input_ids.device, ) # Copy actual mask content (left-padded) mask_len = attention_mask.shape[1] self._static_attention_mask[:, :mask_len].copy_(attention_mask) else: # If no attention_mask is provided, we can avoid dense masking entirely in the # span-kernel decode path (it uses cache_position to enforce the valid prefix). # We still build a static prefix mask buffer so the SDPA path can be captured # safely if ever needed, but by default we will capture/replay only the span graph. self._static_attention_mask = torch.zeros( batch_size, max_seq_len, dtype=torch.bool, device=input_ids.device, ) self._static_attention_mask_positions = torch.arange( max_seq_len, dtype=self._static_cache_position.dtype, device=input_ids.device, ) prefix_mask = self._static_attention_mask_positions <= self._static_cache_position[0] self._static_attention_mask.copy_(prefix_mask.unsqueeze(0).expand(batch_size, -1)) # Save all cache state before warmup/capture (warmup modifies cache in-place) saved_conv_states = cache_params.conv_states.clone() saved_ssm_states = cache_params.ssm_states.clone() saved_key_cache = [k.clone() if k is not None else None for k in cache_params.key_cache] saved_value_cache = [v.clone() if v is not None else None for v in cache_params.value_cache] def _restore_cache_state(): cache_params.conv_states.copy_(saved_conv_states) cache_params.ssm_states.copy_(saved_ssm_states) for i, (k, v) in enumerate(zip(saved_key_cache, saved_value_cache)): if k is not None: cache_params.key_cache[i].copy_(k) cache_params.value_cache[i].copy_(v) def _warmup_for_mode(mode: str): _restore_cache_state() self.config._span_attention_decode_mode = mode for _ in range(3): _ = self.backbone( self._static_input_ids, cache_params=cache_params, inputs_embeds=None, output_attentions=False, output_hidden_states=False, return_dict=True, use_cache=True, cache_position=self._static_cache_position, attention_mask=(None if (attention_mask is None and mode == "span") else self._static_attention_mask), ) torch.cuda.synchronize() def _capture_for_mode(mode: str): _restore_cache_state() self.config._span_attention_decode_mode = mode graph = torch.cuda.CUDAGraph() with torch.cuda.graph(graph): if self._static_attention_mask_positions is not None and not (attention_mask is None and mode == "span"): prefix_mask = self._static_attention_mask_positions <= self._static_cache_position[0] self._static_attention_mask.copy_(prefix_mask.unsqueeze(0).expand(batch_size, -1)) backbone_out = self.backbone( self._static_input_ids, cache_params=cache_params, inputs_embeds=None, output_attentions=False, output_hidden_states=False, return_dict=True, use_cache=True, cache_position=self._static_cache_position, attention_mask=(None if (attention_mask is None and mode == "span") else self._static_attention_mask), ) hidden_states = backbone_out[0] logits = self.lm_head(hidden_states.to(self.lm_head.weight.dtype)).float() static_output = SuperlinearExpCausalLMOutput( loss=None, logits=logits, past_key_values=cache_params, hidden_states=None, attentions=None, ) torch.cuda.synchronize() return graph, static_output prev_mode = getattr(self.config, "_span_attention_decode_mode", None) try: # If caller did not provide an attention_mask (no padding), prefer capturing only the # span-kernel graph. This avoids constructing/propagating an all-ones padding mask and # prevents per-step syncs to select between SDPA vs span. _warmup_for_mode("span") self._cuda_graph_span, self._static_output_span = _capture_for_mode("span") if attention_mask is None: # For compatibility, point the SDPA graph alias at the span graph. self._cuda_graph_sdpa, self._static_output_sdpa = self._cuda_graph_span, self._static_output_span else: _warmup_for_mode("sdpa") self._cuda_graph_sdpa, self._static_output_sdpa = _capture_for_mode("sdpa") finally: # Restore whatever setting the caller had (typically None). self.config._span_attention_decode_mode = prev_mode # Backwards-compatible aliases used by some notebooks. self._cuda_graph = self._cuda_graph_span self._static_output = self._static_output_span # Copy input data to static buffers self._static_input_ids.copy_(input_ids) self._static_cache_position.copy_(cache_position) # Update attention mask in static buffer before replay (mask grows during generation) if attention_mask is not None and self._static_attention_mask is not None: mask_len = attention_mask.shape[1] if mask_len > self._static_attention_mask.shape[1]: raise ValueError( f"attention_mask length {mask_len} exceeds static buffer length {self._static_attention_mask.shape[1]}" ) self._static_attention_mask[:, :mask_len].copy_(attention_mask) sw_index = int(getattr(self.config, "span_attention_sw_index", 0)) span_search_power = getattr(self.config, "span_attention_search_power", None) span_inv_search_power_int = getattr(self.config, "span_attention_inv_search_power_int", 2) window_len = window_len_from_sw_index( sw_index, search_power=span_search_power, inv_search_power_int=span_inv_search_power_int, ) if attention_mask is None: # No padding: always replay the span graph. This avoids a per-token GPU->CPU sync # to compute prefix_len and keeps masking out of the hot path. use_sdpa_graph = False else: prefix_len = attention_mask.shape[1] use_sdpa_graph = prefix_len <= window_len if use_sdpa_graph: graph = self._cuda_graph_sdpa static_output = self._static_output_sdpa else: graph = self._cuda_graph_span static_output = self._static_output_span # Backwards-compatible aliases used by some notebooks. self._cuda_graph = graph self._static_output = static_output graph.replay() return static_output # ========================================================================= # Cache Factory Methods & Class Attributes # ========================================================================= # Expose cache classes for advanced users who need direct access static_cache_class = HybridMambaAttentionStaticCache dynamic_cache_class = HybridMambaAttentionDynamicCache def create_static_cache( self, batch_size: int, max_seq_len: int, dtype: Optional[torch.dtype] = None, device: Optional[Union[str, torch.device]] = None, ) -> HybridMambaAttentionStaticCache: """ Create a static cache for efficient long-context generation with CUDA graphs. Args: batch_size: Number of sequences in the batch. max_seq_len: Maximum sequence length the cache should support. dtype: Data type for cache tensors. Defaults to model's dtype. device: Device for cache tensors. Defaults to model's device. Returns: HybridMambaAttentionStaticCache: A pre-allocated static cache instance. Example: >>> model = AutoModelForCausalLM.from_pretrained("org/superlinear-exp", trust_remote_code=True) >>> cache = model.create_static_cache(batch_size=1, max_seq_len=4096) >>> outputs = model(input_ids, past_key_values=cache, use_cache=True) """ return HybridMambaAttentionStaticCache( config=self.config, batch_size=batch_size, max_seq_len=max_seq_len, dtype=dtype if dtype is not None else self.dtype, device=device if device is not None else self.device, ) def create_dynamic_cache( self, batch_size: int, dtype: Optional[torch.dtype] = None, device: Optional[Union[str, torch.device]] = None, ) -> HybridMambaAttentionDynamicCache: """ Create a dynamic cache that grows as needed during generation. Args: batch_size: Number of sequences in the batch. dtype: Data type for cache tensors. Defaults to model's dtype. device: Device for cache tensors. Defaults to model's device. Returns: HybridMambaAttentionDynamicCache: A dynamically-growing cache instance. Example: >>> model = AutoModelForCausalLM.from_pretrained("org/superlinear-exp", trust_remote_code=True) >>> cache = model.create_dynamic_cache(batch_size=1) >>> outputs = model(input_ids, past_key_values=cache, use_cache=True) """ return HybridMambaAttentionDynamicCache( config=self.config, batch_size=batch_size, dtype=dtype if dtype is not None else self.dtype, device=device if device is not None else self.device, )