diff --git "a/modeling_fast_slm.py" "b/modeling_fast_slm.py" deleted file mode 100644--- "a/modeling_fast_slm.py" +++ /dev/null @@ -1,2401 +0,0 @@ -# coding=utf-8 -# Copyright 2024 AI21 Labs Ltd. and the HuggingFace Inc. team. All rights reserved. -# -# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX -# and OPT implementations in this library. It has been modified from its -# original forms to accommodate minor architectural differences compared -# to GPT-NeoX and OPT used by the Meta AI team that trained the model. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" PyTorch FastSLM model.""" -import inspect -import math -import copy -import warnings -from dataclasses import dataclass, field -from typing import Any, Dict, List, Optional, Tuple, Union -import time -from collections import OrderedDict -from functools import partial -import numpy as np -import os - -import torch -import torch.nn.functional as F -import torch.utils.checkpoint -from torch import nn -from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss - -torch._inductor.config.max_autotune_gemm_backends = ["aten"] - -from transformers.activations import ACT2FN -from transformers.cache_utils import Cache, DynamicCache -from transformers.modeling_outputs import ( - MoeCausalLMOutputWithPast, - MoeModelOutputWithPast, - SequenceClassifierOutputWithPast, -) -from transformers.modeling_utils import PreTrainedModel - -try: - from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS - from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update -except ImportError: - pass - -from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_13 -from transformers.utils import ( - add_start_docstrings, - add_start_docstrings_to_model_forward, - is_flash_attn_greater_or_equal_2_10, - logging, - replace_return_docstrings, -) -from transformers.utils.import_utils import is_torch_fx_available -from .configuration_fast_slm import FastSLMConfig -from torch.utils.checkpoint import checkpoint - -import torch.distributed as dist -import math -import random - -from flash_attn import flash_attn_func, flash_attn_varlen_func -from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa - -_flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters) - -from einops import rearrange, repeat, reduce, pack, unpack -from einops.layers.torch import Rearrange - -from mamba_ssm.ops.selective_scan_interface import mamba_inner_fn, selective_scan_fn -from mamba_ssm.ops.triton.selective_state_update import selective_state_update - -from causal_conv1d import causal_conv1d_fn, causal_conv1d_update - -from .fused_mha_with_cache import fused_mha_interface - -from .mamba2 import Mamba2 -from mamba_ssm.utils.generation import InferenceParams -from .delta_net import Cache as fla_cache -from .delta_net import DeltaNet -import torch._dynamo -torch._dynamo.config.suppress_errors = True - -from torch.cuda import CUDAGraph - -logger = logging.get_logger(__name__) - -_CONFIG_FOR_DOC = "FastSLMConfig" - - - -# Copied from transformers.models.llama.modeling_llama._get_unpad_data -def _get_unpad_data(attention_mask): - seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) - indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() - max_seqlen_in_batch = seqlens_in_batch.max().item() - cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) - return ( - indices, - cu_seqlens, - max_seqlen_in_batch, - ) - - -### Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->FastSLM -class FastSLMRMSNorm(nn.Module): - def __init__(self, hidden_size, learnable_weight=True, eps=1e-6): - """ - FastSLMRMSNorm is equivalent to T5LayerNorm - """ - super().__init__() - if learnable_weight: - self.weight = nn.Parameter(torch.ones(hidden_size)) - else: - self.weight = None - self.variance_epsilon = eps - - def forward(self, hidden_states): - input_dtype = hidden_states.dtype - hidden_states = hidden_states.to(torch.float32) - variance = hidden_states.pow(2).mean(-1, keepdim=True) - hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) - - if self.weight is not None: - return self.weight * hidden_states.to(input_dtype) - else: - return hidden_states.to(input_dtype) - -class LlamaRotaryEmbedding(nn.Module): - def __init__(self, config, dim, base=10000, device=None, scaling_factor=1.0): - super().__init__() - self.scaling_factor = scaling_factor - self.dim = dim - self.base = base - self.config = config - - self.rope_type = config.rope_type - - self.factor = 2 - - max_position_embeddings = self.config.max_position_embeddings - - if config.rope_type is None or config.rope_type == "default": - inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim)) - self.max_seq_len_cached = max_position_embeddings - - elif config.rope_type == 'ntk': - assert self.config.orig_max_position_embeddings is not None - orig_max_position_embeddings = self.config.orig_max_position_embeddings - - base = base * ((self.factor * max_position_embeddings / orig_max_position_embeddings) - (self.factor - 1)) ** (self.dim / (self.dim - 2)) - inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim)) - - self.max_seq_len_cached = orig_max_position_embeddings - - elif config.rope_type == 'dynamic_ntk': - inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim)) - self.original_inv_freq = inv_freq - self.max_seq_len_cached = self.config.orig_max_position_embeddings - - else: - raise ValueError(f"Not support rope_type: {config.rope_type}") - - self.register_buffer("inv_freq", inv_freq, persistent=False) - - - def _dynamic_frequency_update(self, position_ids, device): - """ - dynamic RoPE layers should recompute `inv_freq` in the following situations: - 1 - growing beyond the cached sequence length (allow scaling) - 2 - the current sequence length is in the original scale (avoid losing precision with small sequences) - """ - - seq_len = torch.max(position_ids) + 1 - if seq_len > self.max_seq_len_cached: # growth - base = self.base * ((self.factor * seq_len / self.config.orig_max_position_embeddings) - (self.factor - 1)) ** (self.dim / (self.dim - 2)) - inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim)) - - self.register_buffer("inv_freq", inv_freq, persistent=False) - self.max_seq_len_cached = seq_len - - if seq_len < self.config.orig_max_position_embeddings and self.max_seq_len_cached > self.config.orig_max_position_embeddings: # reset - self.register_buffer("inv_freq", self.original_inv_freq, persistent=False) - self.max_seq_len_cached = self.config.orig_max_position_embeddings - - - @torch.no_grad() - def forward(self, x, position_ids): - if self.rope_type == 'dynamic_ntk': - self._dynamic_frequency_update(position_ids, device=x.device) - - # x: [bs, num_attention_heads, seq_len, head_size] - inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) - position_ids_expanded = position_ids[:, None, :].float() - # Force float32 since bfloat16 loses precision on long contexts - # See https://github.com/huggingface/transformers/pull/29285 - device_type = x.device.type - device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu" - with torch.autocast(device_type=device_type, enabled=False): - freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) - emb = torch.cat((freqs, freqs), dim=-1) - cos = emb.cos() - sin = emb.sin() - return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) - - -def rotate_half(x): - """Rotates half the hidden dims of the input.""" - x1 = x[..., : x.shape[-1] // 2] - x2 = x[..., x.shape[-1] // 2 :] - return torch.cat((-x2, x1), dim=-1) - - -def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): - """Applies Rotary Position Embedding to the query and key tensors. - - Args: - q (`torch.Tensor`): The query tensor. - k (`torch.Tensor`): The key tensor. - cos (`torch.Tensor`): The cosine part of the rotary embedding. - sin (`torch.Tensor`): The sine part of the rotary embedding. - position_ids (`torch.Tensor`, *optional*): - Deprecated and unused. - unsqueeze_dim (`int`, *optional*, defaults to 1): - The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and - sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note - that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and - k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes - cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have - the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. - Returns: - `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. - """ - cos = cos.unsqueeze(unsqueeze_dim) - sin = sin.unsqueeze(unsqueeze_dim) - if q is not None: - q_embed = (q * cos) + (rotate_half(q) * sin) - - else: - q_embed = None - - if k is not None: - k_embed = (k * cos) + (rotate_half(k) * sin) - else: - k_embed = None - return q_embed, k_embed - - -# Copied from transformers.models.llama.modeling_llama.repeat_kv -def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: - """ - This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, - num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) - """ - batch, num_key_value_heads, slen, head_dim = hidden_states.shape - if n_rep == 1: - return hidden_states - hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) - return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) - - - -class HybridMambaAttentionDynamicCache(DynamicCache): - """ - A dynamic cache that can handle both the attention cache (which has a seq_len dimension) and the mamba cache - (which has a constant shape regardless of seq_len). - - This cache has two sets of lists of tensors: `key_cache` and `value_cache` for attention cache and `conv_states` - and `ssm_states` for mamba cache. Each of these lists has `num_layers` tensors. The expected shape for each tensor - For attention layers, `key_cache` and `value_cache` have a shape of `(batch_size, num_heads, seq_len, head_dim)`, - while `conv_states` and `ssm_states` have a shape of `(batch_size, 0)` (empty tensors). - For mamba layers, `key_cache` and `value_cache` have a shape of `(batch_size, 0)` (empty tensors), - while `conv_states` represents the convolution state and has a shape of `(batch_size, d_inner, d_conv)`, - and `ssm_states` represents the ssm state and has a shape of `(batch_size, d_inner, d_state)`. - """ - - def __init__(self, config, batch_size, dtype=torch.float16, device=None, layer_type=None): - self.dtype = dtype - # self.layers_block_type = config.layers_block_type - intermediate_size = config.mamba_expand * config.hidden_size - ssm_state_size = config.mamba_d_state - conv_kernel_size = config.mamba_d_conv - self.conv_states = [] - self.ssm_states = [] - - self.layer_type = layer_type - - for i in range(config.num_hidden_layers): - has_mamba_state = self.layer_type[i] == 'h' or self.layer_type[i] == 'm' - - if has_mamba_state: - if hasattr(config, 'conv_dim'): - conv_dim = config.conv_dim[str(i)] - else: - conv_dim = intermediate_size - self.conv_states += [ - torch.zeros(batch_size, conv_dim, conv_kernel_size, device=device, dtype=dtype) - ] - self.ssm_states += [ - torch.zeros(batch_size, intermediate_size, ssm_state_size, device=device, dtype=dtype) - ] - else: - self.conv_states += [torch.tensor([[]] * batch_size, device=device)] - self.ssm_states += [torch.tensor([[]] * batch_size, device=device)] - - self.key_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)] - self.value_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)] - - self.mamba_past_length = [0 for _ in range(config.num_hidden_layers)] - - def update( - self, - key_states: torch.Tensor, - value_states: torch.Tensor, - layer_idx: int, - cache_kwargs: Optional[Dict[str, Any]] = None, - ) -> Tuple[torch.Tensor, torch.Tensor]: - # Update the cache - if self.key_cache[layer_idx].shape[-1] == 0: - self.key_cache[layer_idx] = key_states - self.value_cache[layer_idx] = value_states - else: - self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=2) - self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=2) - - return self.key_cache[layer_idx], self.value_cache[layer_idx] - - def reorder_cache(self, beam_idx: torch.LongTensor): - """Reorders the cache for beam search, given the selected beam indices.""" - for layer_idx in range(len(self.key_cache)): - device = self.key_cache[layer_idx].device - self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device)) - device = self.value_cache[layer_idx].device - self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device)) - - device = self.conv_states[layer_idx].device - self.conv_states[layer_idx] = self.conv_states[layer_idx].index_select(0, beam_idx.to(device)) - device = self.ssm_states[layer_idx].device - self.ssm_states[layer_idx] = self.ssm_states[layer_idx].index_select(0, beam_idx.to(device)) - - def get_seq_length(self, layer_idx=None) -> int: - """Returns the sequence length of the cached states. A layer index can be optionally passed.""" - # take any layer that contains cache and not empty tensor - - if layer_idx is None: - max_mamba_len = max(self.mamba_past_length) - if max_mamba_len > 0: - return max_mamba_len - - else: - max_key_len = max(cache.shape[-2] for cache in self.key_cache) - return max_key_len - - if self.layer_type[layer_idx] == 'm': - return self.mamba_past_length[layer_idx] - - if self.key_cache[layer_idx].shape[-1] == 0: - return 0 - - return self.key_cache[layer_idx].shape[-2] - - # def get_max_length(self) -> Optional[int]: - # """Returns the maximum sequence length of the cached states. Cache does not have a maximum length.""" - # return None - - def to_legacy_cache(self) -> Tuple[Tuple[torch.Tensor], Tuple[torch.Tensor]]: - raise NotImplementedError("HybridMambaAttentionDynamicCache does not have a legacy cache equivalent.") - - @classmethod - def from_legacy_cache(cls, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None) -> "DynamicCache": - raise NotImplementedError("HybridMambaAttentionDynamicCache does not have a legacy cache equivalent.") - - -@dataclass -class MambaCacheParams: - seqlen_offset: int = 0 - conv_states: Dict[int, torch.Tensor] = field(default_factory=dict) - ssm_states: Dict[int, torch.Tensor] = field(default_factory=dict) - - - -# Adapted from transformers.models.mistral.modeling_mistral.MistralAttention with Mistral->FastSLM -class FastSLMAttention(nn.Module): - """ - Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer - and "Generating Long Sequences with Sparse Transformers". - """ - - def __init__(self, config: FastSLMConfig, layer_idx: Optional[int] = None, input_hidden_size=None, output_hidden_size=None): - super().__init__() - self.config = config - self.layer_idx = layer_idx - if layer_idx is None: - logger.warning_once( - f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " - "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " - "when creating this class." - ) - - # self.hidden_size = config.hidden_size - self.hidden_size = config.attn_hidden_size if config.attn_hidden_size > 0 else config.hidden_size - self.num_heads = config.num_attention_heads - self.head_dim = self.hidden_size // self.num_heads - self.max_position_embeddings = config.max_position_embeddings - self.rope_theta = config.rope_theta - - self.kq_head_dim = config.kq_head_dim if config.kq_head_dim > 0 else self.head_dim - self.v_head_dim = config.v_head_dim if config.v_head_dim > 0 else self.head_dim - - self.num_key_value_heads = config.num_key_value_heads - self.num_key_value_groups = self.num_heads // self.num_key_value_heads - self.is_causal = True - self.attention_dropout = config.attention_dropout - - if (self.head_dim * self.num_heads) != self.hidden_size and self.kq_head_dim == self.head_dim: - raise ValueError( - f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" - f" and `num_heads`: {self.num_heads})." - ) - - self.q_proj = nn.Linear(self.hidden_size if input_hidden_size is None else input_hidden_size, self.num_heads * self.kq_head_dim, bias=False) - self.k_proj = nn.Linear(self.hidden_size if input_hidden_size is None else input_hidden_size, self.num_key_value_heads * self.kq_head_dim, bias=False) - self.v_proj = nn.Linear(self.hidden_size if input_hidden_size is None else input_hidden_size, self.num_key_value_heads * self.v_head_dim, bias=False) - - if output_hidden_size is None: - output_hidden_size = self.hidden_size - - self.o_proj = nn.Linear(self.num_heads * self.v_head_dim, output_hidden_size, bias=False) - - if self.config.kq_norm == "rms": - self.k_norm = FastSLMRMSNorm(self.kq_head_dim) - self.q_norm = FastSLMRMSNorm(self.kq_head_dim) - elif self.config.kq_norm == "none": - self.k_norm = None - self.q_norm = None - else: - raise NotImplementedError(f"Unknown kq_norm: {self.config.kq_norm}") - - if self.config.rope: - # print("===> Using Rotary Position Embedding") - self._init_rope() - - def _init_rope(self): - # assert 1==0, f"max_position_embeddings: {self.max_position_embeddings}" - self.rotary_emb = LlamaRotaryEmbedding( - config=self.config, - dim=self.kq_head_dim, - base=self.rope_theta, - device=torch.device("cuda"), - ) - - def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): - return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() - - def forward( - self, - hidden_states: torch.Tensor, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.LongTensor] = None, - past_key_value: Optional[Cache] = None, - output_attentions: bool = False, - use_cache: bool = False, - # kv_proj_last_layer = None, - use_swa=False, - query_states = None, - key_states=None, - value_states=None, - **kwargs, - ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: - raise NotImplementedError("FastSLMAttention is an abstract class. Use one of the subclasses.") - - - -# Adapted from transformers.models.mistral.modeling_mistral.MistralFlashAttention2 with Mistral->FastSLM -class FastSLMFlashAttention2(FastSLMAttention): - """ - FastSLM flash attention module. This module inherits from `FastSLMAttention` as the weights of the module stays - untouched. The only required change would be on the forward pass where it needs to correctly call the public API of - flash attention and deal with padding tokens in case the input contains any of them. - """ - - # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__ - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. - # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. - # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). - self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() - - def forward( - self, - hidden_states: torch.Tensor = None, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.LongTensor] = None, - past_key_value: Optional[Cache] = None, - output_attentions: bool = False, - use_cache: bool = False, - # kv_proj_last_layer = None, - use_swa=False, - query_states = None, - key_states=None, - value_states=None, - **kwargs, - ): - - if "padding_mask" in kwargs: - warnings.warn( - "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" - ) - - # overwrite attention_mask with padding_mask - attention_mask = kwargs.pop("padding_mask") - - bsz, q_len, _ = hidden_states.size() - - query_states = self.q_proj(hidden_states) - - query_states = query_states.view(bsz, q_len, self.num_heads, self.kq_head_dim).transpose(1, 2).contiguous() - - if self.q_norm is not None: - query_states = self.q_norm(query_states) - - # we do kq_norm first before rope according to - # https://github.com/huggingface/transformers/blob/6c1d0b069de22d7ed8aa83f733c25045eea0585d/src/transformers/models/cohere/modeling_cohere.py#L568 - if self.config.rope: - cos, sin = self.rotary_emb(hidden_states, position_ids) - query_states, _ = apply_rotary_pos_emb(query_states, None, cos, sin) - - - key_states = self.k_proj(hidden_states) - value_states = self.v_proj(hidden_states) - - key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.kq_head_dim).transpose(1, 2) - value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.v_head_dim).transpose(1, 2) - - if self.k_norm is not None: - key_states = self.k_norm(key_states) - - if self.config.rope: - _, key_states = apply_rotary_pos_emb(None, key_states, cos, sin) - - - kv_seq_len = key_states.shape[-2] - # if past_key_value is not None: - # if self.layer_idx is None: - # raise ValueError( - # f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} " - # "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class " - # "with a layer index." - # ) - # kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) - - use_sliding_windows = ( - _flash_supports_window_size - and getattr(self.config, "sliding_window", None) is not None - # and kv_seq_len > (self.config.sliding_window + self.config.num_memory_tokens if self.config.num_memory_tokens > 0 else self.config.sliding_window) - and kv_seq_len > self.config.sliding_window - and use_swa - ) - - if not _flash_supports_window_size: - logger.warning_once( - "The current flash attention version does not support sliding window attention, for a more memory efficient implementation" - " make sure to upgrade flash-attn library." - ) - - swa_processed_flag = False - if past_key_value is not None and use_cache: - kv_layer_idx = self.layer_idx - - cache_has_contents = past_key_value.get_seq_length(kv_layer_idx) > 0 - - if ( - getattr(self.config, "sliding_window", None) is not None - # and kv_seq_len > (self.config.sliding_window + self.config.num_memory_tokens if self.config.num_memory_tokens > 0 else self.config.sliding_window) - and kv_seq_len > self.config.sliding_window - and cache_has_contents - and use_swa - ): - slicing_tokens = 1 - self.config.sliding_window - - past_key = past_key_value[kv_layer_idx][0] - past_value = past_key_value[kv_layer_idx][1] - - past_key = past_key[:, :, slicing_tokens:, :].contiguous() - past_value = past_value[:, :, slicing_tokens:, :].contiguous() - - past_key_value.key_cache[kv_layer_idx] = past_key - past_key_value.value_cache[kv_layer_idx] = past_value - - if attention_mask is not None: - attention_mask = attention_mask[:, slicing_tokens:] - attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1) - - swa_processed_flag = True - - key_states, value_states = past_key_value.update(key_states, value_states, kv_layer_idx) - - # repeat k/v heads if n_kv_heads < n_heads - key_states_no_repeat = key_states - value_states_no_repeat = value_states - - key_states = repeat_kv(key_states, self.num_key_value_groups) - value_states = repeat_kv(value_states, self.num_key_value_groups) - dropout_rate = 0.0 if not self.training else self.attention_dropout - - # In PEFT, usually we cast the layer norms in float32 for training stability reasons - # therefore the input hidden states gets silently casted in float32. Hence, we need - # cast them back in float16 just to be sure everything works as expected. - input_dtype = query_states.dtype - if input_dtype == torch.float32: - if torch.is_autocast_enabled(): - target_dtype = torch.get_autocast_gpu_dtype() - # Handle the case where the model is quantized - elif hasattr(self.config, "_pre_quantization_dtype"): - target_dtype = self.config._pre_quantization_dtype - else: - target_dtype = self.q_proj.weight.dtype - - logger.warning_once( - f"The input hidden states seems to be silently casted in float32, this might be related to" - f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" - f" {target_dtype}." - ) - - query_states = query_states.to(target_dtype) - key_states = key_states.to(target_dtype) - value_states = value_states.to(target_dtype) - - # Reashape to the expected shape for Flash Attention - query_states = query_states.transpose(1, 2) # (batch, slen, num_heads, head_dim) - key_states = key_states.transpose(1, 2) # (batch, slen, num_heads, head_dim) - value_states = value_states.transpose(1, 2) # (batch, slen, num_heads, head_dim) - - attn_output = self._flash_attention_forward( - query_states, - key_states, - value_states, - attention_mask, - q_len, - dropout=dropout_rate, - use_sliding_windows=use_sliding_windows and not swa_processed_flag, - ) - - v_dim = value_states.shape[-2] * value_states.shape[-1] - attn_output = attn_output.reshape(-1, q_len, v_dim).contiguous() - - attn_output = self.o_proj(attn_output) - - if not output_attentions: - attn_weights = None - - return attn_output, attn_weights, past_key_value, (key_states_no_repeat, value_states_no_repeat) - - def _flash_attention_forward( - self, - query_states, - key_states, - value_states, - attention_mask, - query_length, - dropout=0.0, - softmax_scale=None, - use_sliding_windows=False, - ): - """ - Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token - first unpad the input, then computes the attention scores and pad the final attention scores. - - Args: - query_states (`torch.Tensor`): - Input query states to be passed to Flash Attention API - key_states (`torch.Tensor`): - Input key states to be passed to Flash Attention API - value_states (`torch.Tensor`): - Input value states to be passed to Flash Attention API - attention_mask (`torch.Tensor`): - The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the - position of padding tokens and 1 for the position of non-padding tokens. - dropout (`int`, *optional*): - Attention dropout - softmax_scale (`float`, *optional*): - The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) - use_sliding_windows (`bool`, *optional*): - Whether to activate sliding window attention. - """ - if not self._flash_attn_uses_top_left_mask: - causal = self.is_causal - else: - # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__. - causal = self.is_causal and query_length != 1 - - if attention_mask is not None: - batch_size = query_states.shape[0] - query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( - query_states, key_states, value_states, attention_mask, query_length - ) - - cu_seqlens_q, cu_seqlens_k = cu_seq_lens - max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens - - if not use_sliding_windows: - attn_output_unpad = flash_attn_varlen_func( - query_states, - key_states, - value_states, - cu_seqlens_q=cu_seqlens_q, - cu_seqlens_k=cu_seqlens_k, - max_seqlen_q=max_seqlen_in_batch_q, - max_seqlen_k=max_seqlen_in_batch_k, - dropout_p=dropout, - softmax_scale=softmax_scale, - causal=causal, - ) - else: - attn_output_unpad = flash_attn_varlen_func( - query_states, - key_states, - value_states, - cu_seqlens_q=cu_seqlens_q, - cu_seqlens_k=cu_seqlens_k, - max_seqlen_q=max_seqlen_in_batch_q, - max_seqlen_k=max_seqlen_in_batch_k, - dropout_p=dropout, - softmax_scale=softmax_scale, - causal=causal, - window_size=(self.config.sliding_window, self.config.sliding_window), - ) - - attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) - else: - if not use_sliding_windows: - attn_output = flash_attn_func( - query_states, - key_states, - value_states, - dropout, - softmax_scale=softmax_scale, - causal=causal, - ) - else: - attn_output = flash_attn_func( - query_states, - key_states, - value_states, - dropout, - softmax_scale=softmax_scale, - causal=causal, - window_size=(self.config.sliding_window, self.config.sliding_window), - ) - - return attn_output - - def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): - batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape - - # On the first iteration we need to properly re-create the padding mask - # by slicing it on the proper place - if kv_seq_len != attention_mask.shape[-1]: - attention_mask_num_tokens = attention_mask.shape[-1] - attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :] - - indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) - - if not self.training and not type(key_layer) == torch.Tensor: ## this is for handling Mamba2 with output type - key_layer = torch.tensor(key_layer.clone()) - value_layer = torch.tensor(value_layer.clone()) - query_layer = torch.tensor(query_layer.clone()) - - key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k) - value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k) - - if query_length == kv_seq_len: - query_layer = index_first_axis( - query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k - ) - cu_seqlens_q = cu_seqlens_k - max_seqlen_in_batch_q = max_seqlen_in_batch_k - indices_q = indices_k - elif query_length == 1: - max_seqlen_in_batch_q = 1 - cu_seqlens_q = torch.arange( - batch_size + 1, dtype=torch.int32, device=query_layer.device - ) # There is a memcpy here, that is very bad. - indices_q = cu_seqlens_q[:-1] - query_layer = query_layer.squeeze(1) - else: - # The -q_len: slice assumes left padding. - attention_mask = attention_mask[:, -query_length:] - query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) - - return ( - query_layer, - key_layer, - value_layer, - indices_q, - (cu_seqlens_q, cu_seqlens_k), - (max_seqlen_in_batch_q, max_seqlen_in_batch_k), - ) - - - -class FastSLMSDPAAttention(nn.Module): - - def __init__(self, config, layer_idx: int, reuse_kv=False): - super().__init__() - self.config = config - self.layer_idx = layer_idx - self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) - self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads - self.scaling = self.head_dim**-0.5 - self.attention_dropout = config.attention_dropout - self.is_causal = True - - self.q_proj = nn.Linear( - config.hidden_size, config.num_attention_heads * self.head_dim, bias=False - ) - self.k_proj = nn.Linear( - config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False - ) - self.v_proj = nn.Linear( - config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False - ) - self.o_proj = nn.Linear( - config.num_attention_heads * self.head_dim, config.hidden_size, bias=False - ) - - self.sliding_window = self.config.sliding_window if self.layer_idx not in self.config.global_attn_idx else None - - self.rotary_emb = FastSLMRotaryEmbedding(config=config) - - def forward( - self, - hidden_states: torch.Tensor, - # position_embeddings: tuple[torch.Tensor, torch.Tensor], - attention_mask: Optional[torch.Tensor], - position_ids: Optional[torch.LongTensor] = None, - past_key_value: Optional[Cache] = None, - **kwargs, - ) -> tuple[torch.Tensor, Optional[torch.Tensor]]: - input_shape = hidden_states.shape[:-1] - hidden_shape = (*input_shape, -1, self.head_dim) - - query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) - key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) - value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) - - cos, sin = self.rotary_emb(hidden_states, position_ids) - query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) - - if past_key_value is not None: - past_seen_tokens = past_key_value.get_seq_length() - cache_position = torch.arange( - past_seen_tokens, past_seen_tokens + hidden_states.shape[1], device=hidden_states.device - ) - cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} - key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) - - attention_interface = ALL_ATTENTION_FUNCTIONS['flash_attention_2'] - - attn_output, attn_weights = attention_interface( - self, - query_states, - key_states, - value_states, - attention_mask, - dropout=0.0 if not self.training else self.attention_dropout, - scaling=self.scaling, - sliding_window=self.sliding_window, # diff with Llama - **kwargs, - ) - - attn_output = attn_output.reshape(*input_shape, -1).contiguous() - attn_output = self.o_proj(attn_output) - - return attn_output, attn_weights, past_key_value, (key_states, value_states) - - -class FastSLMRotaryEmbedding(nn.Module): - def __init__(self, config, device=None): - super().__init__() - # BC: "rope_type" was originally "type" - if hasattr(config, "rope_scaling") and config.rope_scaling is not None: - self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) - else: - self.rope_type = "default" - self.max_seq_len_cached = config.max_position_embeddings - self.original_max_seq_len = config.max_position_embeddings - - self.config = config - self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] - - inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) - self.register_buffer("inv_freq", inv_freq, persistent=False) - self.original_inv_freq = self.inv_freq - - @torch.no_grad() - @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope) - def forward(self, x, position_ids): - inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) - position_ids_expanded = position_ids[:, None, :].float() - - device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" - with torch.autocast(device_type=device_type, enabled=False): # Force float32 - freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) - emb = torch.cat((freqs, freqs), dim=-1) - cos = emb.cos() * self.attention_scaling - sin = emb.sin() * self.attention_scaling - - return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) - - -class FastSLMFused_MHA(FastSLMAttention): - """ - FastSLM flash attention module. This module inherits from `FastSLMAttention` as the weights of the module stays - untouched. The only required change would be on the forward pass where it needs to correctly call the public API of - flash attention and deal with padding tokens in case the input contains any of them. - """ - - # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__ - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self.fused_mha_interface = fused_mha_interface - - # self.init_kv_cache(max_batch_size=1, max_seq_len=8000) - - - def init_kv_cache(self, max_batch_size, max_seq_len, page_size=-1): - if hasattr(self, 'k_cache'): - del self.k_cache - del self.v_cache - - if hasattr(self, 'page_table') and self.page_table is not None: - del self.page_table - - import gc - gc.collect() - - torch.cuda.empty_cache() - - if page_size is not None and page_size > 0: - batch_max_pages = (max_seq_len + page_size - 1) // page_size - cache_max_pages = (max_batch_size * max_seq_len + page_size - 1) // page_size - self.k_cache = torch.zeros(cache_max_pages, page_size, self.num_key_value_heads, self.kq_head_dim).to(self.q_proj.weight) - self.v_cache = torch.zeros(cache_max_pages, page_size, self.num_key_value_heads, self.v_head_dim).to(self.q_proj.weight) - - self.page_table = torch.zeros(max_batch_size, batch_max_pages, device=self.q_proj.weight.device, dtype=torch.int32) - else: - self.k_cache = torch.zeros(max_batch_size, max_seq_len, self.num_key_value_heads, self.kq_head_dim).to(self.q_proj.weight) - self.v_cache = torch.zeros(max_batch_size, max_seq_len, self.num_key_value_heads, self.v_head_dim).to(self.q_proj.weight) - - self.page_table = None - - self.max_seq_len = max_seq_len - - - def reset_kv_cache(self): - self.k_cache = self.k_cache.zero_() - self.v_cache = self.v_cache.zero_() - - if self.page_table is not None: - self.page_table = self.page_table.zero_() - - - def forward( - self, - hidden_states: torch.Tensor = None, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.LongTensor] = None, - past_key_value: Optional[Cache] = None, - output_attentions: bool = False, - use_cache: bool = False, - use_swa=False, - query_states = None, - key_states=None, - value_states=None, - **kwargs, - ): - - # print(f"Flash Attn - layer_idx: {self.layer_idx}, attn_mask is none: {attention_mask is None}") - # print(f"layer_idx: {self.layer_idx}, use_swq: {use_swa}") - if not hasattr(self, 'k_cache'): - self.init_kv_cache(max_batch_size=1, max_seq_len=8000) - - if "padding_mask" in kwargs: - warnings.warn( - "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" - ) - - attention_mask = kwargs.pop("padding_mask") - - bsz, q_len, _ = hidden_states.size() - - query_states = self.q_proj(hidden_states) - - query_states = query_states.view(bsz, q_len, self.num_heads, self.kq_head_dim).transpose(1, 2).contiguous() - - if self.q_norm is not None: - query_states = self.q_norm(query_states) - - # we do kq_norm first before rope according to - # https://github.com/huggingface/transformers/blob/6c1d0b069de22d7ed8aa83f733c25045eea0585d/src/transformers/models/cohere/modeling_cohere.py#L568 - if self.config.rope: - cos, sin = self.rotary_emb(hidden_states, position_ids) - query_states, _ = apply_rotary_pos_emb(query_states, None, cos, sin) - - key_states = self.k_proj(hidden_states) - value_states = self.v_proj(hidden_states) - - key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.kq_head_dim).transpose(1, 2) - value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.v_head_dim).transpose(1, 2) - - if self.k_norm is not None: - key_states = self.k_norm(key_states) - - if self.config.rope: - # cos, sin = self.rotary_emb(hidden_states, position_ids) - _, key_states = apply_rotary_pos_emb(None, key_states, cos, sin) - - key_states_no_repeat = key_states - value_states_no_repeat = value_states - - # Reashape to the expected shape for Flash Attention - query_states = query_states.transpose(1, 2) # (batch, slen, num_heads, head_dim) - key_states = key_states.transpose(1, 2) # (batch, slen, num_kv_heads, head_dim) - value_states = value_states.transpose(1, 2) # (batch, slen, num_kv_heads, head_dim) - - if self.k_cache.device != query_states.device: - self.k_cache = self.k_cache.to(query_states) - self.v_cache = self.v_cache.to(query_states) - - attn_output = self.fused_mha_interface( - query_states, - key_states, - value_states, - k_cache=self.k_cache, - v_cache=self.v_cache, - page_table=self.page_table, - max_seq_len=self.max_seq_len, - position_ids=position_ids, - ) - - v_dim = query_states.shape[-2] * value_states.shape[-1] - attn_output = attn_output.reshape(bsz, q_len, v_dim).contiguous() - - attn_output = self.o_proj(attn_output) - - if not output_attentions: - attn_weights = None - - return attn_output, attn_weights, past_key_value, (key_states_no_repeat, value_states_no_repeat) - - -JAMBA_ATTENTION_CLASSES = { - "flash_attention_2": FastSLMFlashAttention2, - "fused_mha": FastSLMFused_MHA, - "sdpa": FastSLMSDPAAttention, -} - -class FastSLMMLP(nn.Module): - def __init__(self, config: FastSLMConfig, layer_idx: int): - super().__init__() - self.config = config - self.act_fn_name = config.mlp_hidden_act - self.act_fn = ACT2FN[self.act_fn_name] - - if config.ffn_expand_ratio is not None: - self.ffn_dim = int(config.ffn_expand_ratio * config.hidden_size) // 128 * 128 - else: - self.ffn_dim = config.intermediate_size - - self.hidden_dim = config.hidden_size - - self.layer_idx = layer_idx - - if self.act_fn_name == "silu": - self.gate_proj = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False) - self.down_proj = nn.Linear(self.ffn_dim, self.hidden_dim, bias=False) - self.up_proj = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False) - - - def forward(self, x): - if self.act_fn_name == "silu": - output = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) - elif self.act_fn_name == "relu2": - output = self.down_proj(self.act_fn(self.up_proj(x))) - else: - raise NotImplementedError(f"No such hidden_act: {self.act_fn_name}") - - return output - - -# Adapted from transformers.models.mixtral.modeling_mixtral.MixtralSparseMoeBlock with Mistral->FastSLM -class FastSLMSparseMoeBlock(nn.Module): - """ - This implementation is - strictly equivalent to standard MoE with full capacity (no - dropped tokens). It's faster since it formulates MoE operations - in terms of block-sparse operations to accomodate imbalanced - assignments of tokens to experts, whereas standard MoE either - (1) drop tokens at the cost of reduced performance or (2) set - capacity factor to number of experts and thus waste computation - and memory on padding. - """ - - def __init__(self, config: FastSLMConfig, num_experts: int, num_experts_per_tok: int, layer_idx: int): - super().__init__() - self.hidden_dim = config.hidden_size - self.ffn_dim = config.intermediate_size - - self.layer_idx = layer_idx - - # these values are decided on runtime depending on the layer index - self.num_experts = num_experts - self.top_k = num_experts_per_tok - - if num_experts > 1: - # expert routing - self.router = nn.Linear(self.hidden_dim, self.num_experts, bias=False) - else: - self.router = None - - self.experts = nn.ModuleList([FastSLMMLP(config, layer_idx=layer_idx) for _ in range(self.num_experts)]) - - def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - """ """ - if len(hidden_states.shape) == 3: - batch_size, sequence_length, hidden_dim = hidden_states.shape - bs_times_seq_len = batch_size * sequence_length - elif len(hidden_states.shape) == 2: - assert self.num_experts == 1 - bs_times_seq_len, hidden_dim = hidden_states.shape - else: - batch_size, sequence_length, _, hidden_dim = hidden_states.shape - bs_times_seq_len = batch_size * sequence_length - - if self.num_experts == 1: - # in this case we have a single MLP block and don't need to do any routing - final_hidden_states = self.experts[0](hidden_states) - - router_logits = torch.ones( - (bs_times_seq_len, 1), - device=hidden_states.device, - dtype=hidden_states.dtype, - requires_grad=hidden_states.requires_grad, - ) - return final_hidden_states, router_logits - - # in this case we have multiple experts and need to do routing - hidden_states = hidden_states.view(-1, hidden_dim) - # router_logits: (batch * sequence_length, n_experts) - router_logits = self.router(hidden_states) - routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float) - routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1) - # we cast back to the input dtype - routing_weights = routing_weights.to(hidden_states.dtype) - - final_hidden_states = torch.zeros( - (batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device - ) - - # One hot encode the selected experts to create an expert mask - # this will be used to easily index which expert is going to be sollicitated - expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0) - - # Loop over all available experts in the model and perform the computation on each expert - for expert_idx in range(self.num_experts): - expert_layer = self.experts[expert_idx] - idx, top_x = torch.where(expert_mask[expert_idx]) - - if top_x.shape[0] == 0: - continue - - # in torch it is faster to index using lists than torch tensors - top_x_list = top_x.tolist() - idx_list = idx.tolist() - - # Index the correct hidden states and compute the expert hidden state for - # the current expert. We need to make sure to multiply the output hidden - # states by `routing_weights` on the corresponding tokens (top-1 and top-2) - current_state = hidden_states[None, top_x_list].reshape(-1, hidden_dim) - current_hidden_states = expert_layer(current_state) * routing_weights[top_x_list, idx_list, None] - - # However `index_add_` only support torch tensors for indexing so we'll use - # the `top_x` tensor here. - final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype)) - - final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim) - return final_hidden_states, router_logits - - - - - -class FastSLMAttentionDecoderLayer(nn.Module): - def __init__(self, config: FastSLMConfig, num_experts: int, layer_idx: int,): - super().__init__() - - self.config = config - - self.layer_idx = layer_idx - - self.self_attn = JAMBA_ATTENTION_CLASSES[config.attn_implementation](config, layer_idx) - - if self.config.intermediate_size > 0: - num_experts_per_tok = config.num_experts_per_tok if num_experts > 1 else 1 - self.moe = FastSLMSparseMoeBlock(config, num_experts=num_experts, num_experts_per_tok=num_experts_per_tok, layer_idx=layer_idx) - else: - self.moe = None - - self.input_layernorm = FastSLMRMSNorm(config.hidden_size, eps=config.rms_norm_eps) - self.pre_moe_layernorm = FastSLMRMSNorm(config.hidden_size, eps=config.rms_norm_eps) - - def forward( - self, - hidden_states: torch.Tensor, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.LongTensor] = None, - past_key_value: Optional[Tuple[torch.Tensor]] = None, - output_attentions: Optional[bool] = False, - output_router_logits: Optional[bool] = False, - use_cache: Optional[bool] = False, - use_swa=False, - **kwargs, - ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: - if "padding_mask" in kwargs: - warnings.warn( - "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" - ) - """ - Args: - hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` - attention_mask (`torch.FloatTensor`, *optional*): attention mask of size - `(batch, sequence_length)` where padding elements are indicated by 0. - past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - output_router_logits (`bool`, *optional*): - Whether or not to return the logits of all the routers. They are useful for computing the router loss, and - should not be returned during inference. - use_cache (`bool`, *optional*): - If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding - (see `past_key_values`). - """ - - if position_ids is not None and position_ids.shape[1] != hidden_states.shape[1]: - position_ids = torch.arange(hidden_states.shape[1], device=hidden_states.device).unsqueeze(0) - - residual = hidden_states - - if self.input_layernorm is not None: - hidden_states = self.input_layernorm(hidden_states) - - hidden_states, self_attn_weights, present_key_value, current_kv = self.self_attn( - hidden_states=hidden_states, - attention_mask=attention_mask, - position_ids=position_ids, - past_key_value=past_key_value, - output_attentions=output_attentions, - use_cache=use_cache, - use_swa=use_swa, - ) - - hidden_states = residual + hidden_states - - if self.moe is not None: - residual = hidden_states - if self.pre_moe_layernorm is not None: - hidden_states = self.pre_moe_layernorm(hidden_states) - hidden_states, router_logits = self.moe(hidden_states) - - hidden_states = residual + hidden_states - else: - router_logits = None - - outputs = (hidden_states,) - - if output_attentions: - outputs += (self_attn_weights,) - - if use_cache: - outputs += (present_key_value,) - - if output_router_logits: - outputs += (router_logits,) - - outputs += (current_kv,) - - return outputs - - - -class FFNDecoderLayer(nn.Module): - def __init__(self, config: FastSLMConfig, num_experts: int, layer_idx: int): - super().__init__() - - self.config = config - - self.layer_idx = layer_idx - - num_experts_per_tok = config.num_experts_per_tok if num_experts > 1 else 1 - self.moe = FastSLMSparseMoeBlock(config, num_experts=num_experts, num_experts_per_tok=num_experts_per_tok, layer_idx=layer_idx) - - self.pre_moe_layernorm = FastSLMRMSNorm(config.hidden_size, eps=config.rms_norm_eps) - - def forward( - self, - hidden_states: torch.Tensor, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.LongTensor] = None, - past_key_value: Optional[Tuple[torch.Tensor]] = None, - output_attentions: Optional[bool] = False, - output_router_logits: Optional[bool] = False, - use_cache: Optional[bool] = False, - use_swa=False, - **kwargs, - ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: - if "padding_mask" in kwargs: - warnings.warn( - "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" - ) - """ - Args: - hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` - attention_mask (`torch.FloatTensor`, *optional*): attention mask of size - `(batch, sequence_length)` where padding elements are indicated by 0. - past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - output_router_logits (`bool`, *optional*): - Whether or not to return the logits of all the routers. They are useful for computing the router loss, and - should not be returned during inference. - use_cache (`bool`, *optional*): - If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding - (see `past_key_values`). - """ - - residual = hidden_states - if self.pre_moe_layernorm is not None: - hidden_states = self.pre_moe_layernorm(hidden_states) - hidden_states, router_logits = self.moe(hidden_states) - - hidden_states = residual + hidden_states - - outputs = (hidden_states,) - - if output_attentions: - outputs += (None,) - - if use_cache: - outputs += (None,) - - if output_router_logits: - outputs += (router_logits,) - - return outputs - - - -class FastSLMMambaDecoderLayer(nn.Module): - def __init__(self, config: FastSLMConfig, num_experts: int, layer_idx: int): - super().__init__() - - self.config = config - self.layer_idx = layer_idx - - self.mamba = Mamba2(config=config, layer_idx=layer_idx) - - self.intermediate_size = config.intermediate_size - if self.intermediate_size > 0: - num_experts_per_tok = config.num_experts_per_tok if num_experts > 1 else 1 - self.moe = FastSLMSparseMoeBlock(config, num_experts=num_experts, num_experts_per_tok=num_experts_per_tok, layer_idx=layer_idx) - - self.input_layernorm = FastSLMRMSNorm(config.hidden_size, eps=config.rms_norm_eps) - - if self.intermediate_size > 0: - self.pre_moe_layernorm = FastSLMRMSNorm(config.hidden_size, eps=config.rms_norm_eps) - else: - self.pre_moe_layernorm = None - - self.meta_added_flag = False - - def forward( - self, - hidden_states: torch.Tensor, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.LongTensor] = None, - past_key_value: Optional[HybridMambaAttentionDynamicCache] = None, - output_attentions: Optional[bool] = False, - output_router_logits: Optional[bool] = False, - use_cache: Optional[bool] = False, - use_swa=False, - mamba_inference_params=None, - **kwargs, - ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: - if "padding_mask" in kwargs: - warnings.warn( - "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" - ) - """ - Args: - hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` - attention_mask (`torch.FloatTensor`, *optional*): attention mask of size - `(batch, sequence_length)` where padding elements are indicated by 0. - past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - output_router_logits (`bool`, *optional*): - Whether or not to return the logits of all the routers. They are useful for computing the router loss, and - should not be returned during inference. - use_cache (`bool`, *optional*): - If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding - (see `past_key_values`). - """ - - if position_ids is not None and position_ids.shape[1] != hidden_states.shape[1]: - position_ids = torch.arange(hidden_states.shape[1], device=hidden_states.device).unsqueeze(0) - - residual = hidden_states - - if self.input_layernorm is not None: - hidden_states = self.input_layernorm(hidden_states) - - hidden_states, present_key_value = self.mamba( - hidden_states=hidden_states, - past_key_value=past_key_value, - attention_mask=attention_mask, - inference_params=mamba_inference_params, - ) - - attn_key_value = None - - hidden_states = residual + hidden_states - - if self.intermediate_size > 0: - residual = hidden_states - - if self.pre_moe_layernorm is not None: - hidden_states = self.pre_moe_layernorm(hidden_states) - - hidden_states, router_logits = self.moe(hidden_states) - - hidden_states = residual + hidden_states - - outputs = (hidden_states,) - - if use_cache: - outputs += (present_key_value,) - - if output_router_logits: - outputs += (router_logits,) - - outputs += (attn_key_value,) - - return outputs - - def _get_past_seqlen(self, past_key_value, seqlen): - if past_key_value is None: - return seqlen - past_seqlen = past_key_value.get_seq_length(self.layer_idx) - - if past_seqlen == 0: - return seqlen - - return past_seqlen - - - -class FastSLMHybridDecoderLayer(nn.Module): - def __init__(self, config: FastSLMConfig, num_experts: int, layer_idx: int): - super().__init__() - - self.config = config - - self.layer_idx = layer_idx - - if config.hybrid_decoder_layer == 'mamba': - self.mamba = Mamba2(config=config, layer_idx=layer_idx) - if config.hybrid_decoder_layer == 'deltanet': - ## this is to properly handle cache index - if config.layer_types is not None: - deltanet_idx = sum(1 for i in range(layer_idx) if config.layer_types[i] == 'deltanet') - else: - deltanet_idx = layer_idx - - self.gla = DeltaNet(hidden_size=config.hidden_size, num_heads=config.num_attention_heads, layer_idx=deltanet_idx, config=self.config) - else: - raise ValueError(f"Not supported: {config.hybrid_decoder_layer}") - - self.config = config - - if self.config.intermediate_size > 0: - num_experts_per_tok = config.num_experts_per_tok if num_experts > 1 else 1 - self.moe = FastSLMSparseMoeBlock(config, num_experts=num_experts, num_experts_per_tok=num_experts_per_tok, layer_idx=layer_idx) - self.pre_moe_layernorm = FastSLMRMSNorm(config.hidden_size, eps=config.rms_norm_eps) - else: - self.moe = None - self.pre_moe_layernorm = None - - self.input_layernorm = FastSLMRMSNorm(config.hidden_size, eps=config.rms_norm_eps) - - - def forward( - self, - hidden_states: torch.Tensor, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.LongTensor] = None, - past_key_value: Optional[HybridMambaAttentionDynamicCache] = None, - output_attentions: Optional[bool] = False, - output_router_logits: Optional[bool] = False, - use_cache: Optional[bool] = False, - fla_past_key_values = None, - mamba_inference_params = None, - use_swa=False, - **kwargs, - ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: - if "padding_mask" in kwargs: - warnings.warn( - "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" - ) - """ - Args: - hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` - attention_mask (`torch.FloatTensor`, *optional*): attention mask of size - `(batch, sequence_length)` where padding elements are indicated by 0. - past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - output_router_logits (`bool`, *optional*): - Whether or not to return the logits of all the routers. They are useful for computing the router loss, and - should not be returned during inference. - use_cache (`bool`, *optional*): - If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding - (see `past_key_values`). - """ - - residual = hidden_states - - hidden_states = self.input_layernorm(hidden_states) - - if self.config.hybrid_decoder_layer == 'mamba': - hybrid_op_hidden_states, mamba_present_key_value = self.mamba( - hidden_states=hidden_states, - past_key_value=past_key_value, - attention_mask=attention_mask, - inference_params=mamba_inference_params, - ) - - else: - hybrid_op_hidden_states, _, fla_past_key_values = self.gla( - hidden_states=hidden_states, - attention_mask=attention_mask, - past_key_values=fla_past_key_values, - use_cache=use_cache, - ) - - self_attn_weights = self_attn_present_key_value = current_kv = None - - hidden_states = residual + hybrid_op_hidden_states - - if self.moe is not None: - residual = hidden_states - hidden_states = self.pre_moe_layernorm(hidden_states) - - hidden_states, router_logits = self.moe(hidden_states) - - hidden_states = residual + hidden_states - - outputs = (hidden_states,) - - if output_attentions: - outputs += (self_attn_weights,) - - if use_cache: - outputs += (self_attn_present_key_value,) - - if output_router_logits: - outputs += (router_logits,) - - outputs += (current_kv,) - - - return outputs - - - -# Adapted from transformers.models.mistral.modeling_mistral.MistralPreTrainedModel with Mistral->FastSLM -class FastSLMPreTrainedModel(PreTrainedModel): - config_class = FastSLMConfig - base_model_prefix = "model" - supports_gradient_checkpointing = True - _no_split_modules = ["FastSLMAttentionDecoderLayer", "FastSLMMambaDecoderLayer"] - _skip_keys_device_placement = "past_key_values" - _supports_flash_attn_2 = True - _supports_sdpa = True - _supports_cache_class = True - - def _init_weights(self, module): - std = self.config.initializer_range - if isinstance(module, (nn.Linear, nn.Conv1d)): - module.weight.data.normal_(mean=0.0, std=std) - if module.bias is not None: - module.bias.data.zero_() - elif isinstance(module, nn.Embedding): - module.weight.data.normal_(mean=0.0, std=std) - if module.padding_idx is not None: - module.weight.data[module.padding_idx].zero_() - - @staticmethod - def _convert_to_standard_cache( - past_key_value: Tuple[Tuple[torch.Tensor, torch.Tensor]], batch_size: int - ) -> Tuple[Tuple[torch.Tensor, torch.Tensor]]: - """ - Standardizes the format of the cache so as to match most implementations, i.e. have the seqlen as the third dim - also for mamba layers - """ - attn_layer_index = [k.shape == v.shape for k, v in past_key_value].index(True) - seqlen = past_key_value[attn_layer_index][0].shape[2] - standard_past_key_value = () - for k, v in past_key_value: - if k.shape != v.shape: - # mamba layer - # expand doesn't use more memory, so it's fine to do it here - standard_past_key_value += ((k.expand(-1, -1, seqlen, -1), v.expand(-1, -1, seqlen, -1)),) - else: - standard_past_key_value += ((k, v),) - return standard_past_key_value - - @staticmethod - def _convert_to_jamba_cache( - past_key_value: Tuple[Tuple[torch.Tensor, torch.Tensor]], - ) -> Tuple[Tuple[torch.Tensor, torch.Tensor]]: - """ - Converts the cache to the format expected by FastSLM, i.e. dummy seqlen dimesion with size 1 for mamba layers - """ - jamba_past_key_value = () - for k, v in past_key_value: - if k.shape != v.shape: - # mamba layer - jamba_past_key_value += ((k[:, :, :1, :], v[:, :, :1, :]),) - else: - jamba_past_key_value += ((k, v),) - return jamba_past_key_value - - -# Adapted from transformers.models.mistral.modeling_mistral.MistralModel with MISTRAL->JAMBA, Mistral->FastSLM -class FastSLMModel(FastSLMPreTrainedModel): - """ - Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`FastSLMDecoderLayer`] - - Args: - config: FastSLMConfig - """ - - def __init__(self, config: FastSLMConfig): - super().__init__(config) - - config.attn_implementation = config.attn_implementation_new - config._attn_implementation = config.attn_implementation_new - - self.config = config - - self.padding_idx = config.pad_token_id - self.vocab_size = config.vocab_size - - self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) - - decoder_layers = [] - - layer_type = [] - for i in range(config.num_hidden_layers): - num_experts = 1 - - if config.layer_types[i] in ['deltanet']: - layer_type.append('m') - config_new = copy.deepcopy(config) - config_new.hybrid_decoder_layer = 'deltanet' - decoder_layer = FastSLMHybridDecoderLayer(config_new, num_experts=num_experts, layer_idx=i) - elif config.layer_types[i] in ['m', 'm2']: - layer_type.append('m') - decoder_layer = FastSLMMambaDecoderLayer(config, num_experts=num_experts, layer_idx=i) - elif config.layer_types[i] == 'a': - layer_type.append('a') - decoder_layer = FastSLMAttentionDecoderLayer(config, num_experts=num_experts, layer_idx=i) - elif config.layer_types[i] == 'f': - layer_type.append('a') - decoder_layer = FFNDecoderLayer(config, num_experts=num_experts, layer_idx=i) - else: - raise ValueError(f"Unsupported layer type {config.layer_types[i]}") - - decoder_layers.append(decoder_layer) - - config.layer_type = layer_type - - if config.sliding_window is not None: - self.sliding_window = config.sliding_window - self.global_attn_idx = config.global_attn_idx - else: - self.sliding_window = None - self.global_attn_idx = None - - if not any(isinstance(layer, FastSLMAttentionDecoderLayer) for layer in decoder_layers): - # raise ValueError("At least one layer in the decoder must be an attention layer") - self._attn_layer_index = [] - else: - self._attn_layer_index = [isinstance(layer, FastSLMAttentionDecoderLayer) for layer in decoder_layers].index( - True - ) - - if not any(isinstance(layer, FastSLMMambaDecoderLayer) for layer in decoder_layers): - # raise ValueError("At least one layer in the decoder must be a Mamba layer") - self._mamba_layer_index = [] - else: - self._mamba_layer_index = [isinstance(layer, FastSLMMambaDecoderLayer) for layer in decoder_layers].index(True) - - # if ( - # decoder_layers[self._mamba_layer_index].mamba.ssm_state_size - # == decoder_layers[self._mamba_layer_index].mamba.conv_kernel_size - # ): - # raise ValueError("Mamba state size and convolution size must be different") - - self.layers = nn.ModuleList(decoder_layers) - - self._attn_implementation = config.attn_implementation - - self.final_layernorm = FastSLMRMSNorm(config.hidden_size, eps=config.rms_norm_eps) - - if self.config.num_memory_tokens > 0: - self.memory_tokens = nn.Parameter(torch.randn(self.config.num_memory_tokens, self.config.hidden_size)) - - self.gradient_checkpointing = False - # Initialize weights and apply final processing - self.post_init() - - self.has_previous_state = False - - - def get_input_embeddings(self): - return self.embed_tokens - - def set_input_embeddings(self, value): - self.embed_tokens = value - - - def forward( - self, - input_ids: torch.LongTensor = None, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.LongTensor] = None, - past_key_values: Optional[Union[List[torch.FloatTensor], HybridMambaAttentionDynamicCache]] = None, - inputs_embeds: Optional[torch.FloatTensor] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - output_router_logits: Optional[bool] = None, - return_dict: Optional[bool] = None, - fla_past_key_values = None, - mamba_inference_params = None, - ) -> Union[Tuple, MoeModelOutputWithPast]: - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_router_logits = ( - output_router_logits if output_router_logits is not None else self.config.output_router_logits - ) - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - use_cache = use_cache if use_cache is not None else self.config.use_cache - - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - # retrieve input_ids and inputs_embeds - if input_ids is not None and inputs_embeds is not None: - raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") - elif input_ids is not None: - batch_size, seq_length = input_ids.shape - elif inputs_embeds is not None: - batch_size, seq_length, _ = inputs_embeds.shape - else: - raise ValueError("You have to specify either input_ids or inputs_embeds") - - if self.gradient_checkpointing and self.training: - if use_cache: - logger.warning_once( - "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." - ) - use_cache = False - - if position_ids is None: - device = input_ids.device if input_ids is not None else inputs_embeds.device - position_ids = torch.arange(0, seq_length, dtype=torch.long, device=device - ) - position_ids = position_ids.unsqueeze(0).view(-1, seq_length) - else: - if self.config.num_memory_tokens > 0 and past_key_values is not None and not self.has_previous_state: - position_ids = position_ids.view(-1, seq_length + self.config.num_memory_tokens).long() - else: - position_ids = position_ids.view(-1, seq_length).long() - - if inputs_embeds is None: - inputs_embeds = self.embed_tokens(input_ids) - - ori_b, ori_n = inputs_embeds.shape[0], inputs_embeds.shape[1] - - if self.config.num_memory_tokens > 0 and (past_key_values is None or not self.has_previous_state): - mem = repeat(self.memory_tokens, 'n d -> b n d', b = inputs_embeds.shape[0]) # prepend the memory to every segment of m by repeating the memory tokens - inputs_embeds, mem_packed_shape = pack((mem, inputs_embeds), 'b * d') - - if position_ids is not None and position_ids.shape[1] != inputs_embeds.shape[1]: - position_ids = torch.arange(inputs_embeds.shape[1], device=inputs_embeds.device).unsqueeze(0) - - if attention_mask is not None and attention_mask.shape[1] < inputs_embeds.shape[1]: - assert attention_mask.shape[1] + self.config.num_memory_tokens == inputs_embeds.shape[1] - attention_mask = torch.cat([torch.ones(inputs_embeds.shape[0], self.config.num_memory_tokens, device=attention_mask.device), attention_mask], dim=1) - - - if attention_mask is not None and self._attn_implementation == "flash_attention_2" and use_cache: - is_padding_right = attention_mask[:, -1].sum().item() != batch_size - if is_padding_right: - raise ValueError( - "You are attempting to perform batched generation with padding_side='right'" - " this may lead to unexpected behaviour for Flash Attention version of FastSLM. Make sure to " - " call `tokenizer.padding_side = 'left'` before tokenizing the input. " - ) - - attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None - - hidden_states = inputs_embeds - - # decoder layers - all_hidden_states = () if output_hidden_states else None - all_self_attns = () if output_attentions else None - all_router_logits = () if output_router_logits else None - next_decoder_cache = None - - for i, decoder_layer in enumerate(self.layers): - if output_hidden_states: - all_hidden_states += (hidden_states,) - - if self.gradient_checkpointing and self.training: - layer_outputs = self._gradient_checkpointing_func( - decoder_layer.__call__, - hidden_states, - attention_mask, - position_ids, - past_key_values, - output_attentions, - output_router_logits, - use_cache, - ) - else: - layer_outputs = decoder_layer( - hidden_states, - attention_mask=attention_mask, - position_ids=position_ids, - past_key_value=past_key_values, - output_attentions=output_attentions, - output_router_logits=output_router_logits, - use_cache=use_cache, - use_swa=self.sliding_window is not None and i not in self.global_attn_idx, - fla_past_key_values=fla_past_key_values, - mamba_inference_params=mamba_inference_params, - ) - - hidden_states = layer_outputs[0] - - if use_cache: - next_decoder_cache = layer_outputs[2 if output_attentions else 1] - - if output_attentions: - all_self_attns += (layer_outputs[1],) - - if output_router_logits: - all_router_logits += (layer_outputs[3],) - - if self.final_layernorm is not None: - hidden_states = self.final_layernorm(hidden_states) - - if output_hidden_states: - all_hidden_states += (hidden_states,) - - if self.config.num_memory_tokens > 0 and (past_key_values is None or not self.has_previous_state): - mem, hidden_states = unpack(hidden_states, mem_packed_shape, 'b * d') - hidden_states = hidden_states[:, :ori_n, :] - - if past_key_values is not None and not self.has_previous_state: - self.has_previous_state = True - - next_cache = None - if use_cache: - next_cache = next_decoder_cache - - if not return_dict: - return tuple( - v - for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_router_logits] - if v is not None - ) - return MoeModelOutputWithPast( - last_hidden_state=hidden_states, - past_key_values=past_key_values if (fla_past_key_values is None and mamba_inference_params is None) else (past_key_values, fla_past_key_values, mamba_inference_params), - hidden_states=all_hidden_states, - attentions=all_self_attns, - router_logits=all_router_logits, - ) - - -# Adapted from transformers.models.mixtral.modeling_mixtral.MixtralForCausalLM with MIXTRAL->JAMBA, Mixtral->FastSLM -class FastSLMForCausalLM(FastSLMPreTrainedModel): - _tied_weights_keys = ["lm_head.weight"] - - def __init__(self, config: FastSLMConfig): - super().__init__(config) - self.config = config - self.model = FastSLMModel(config) - self.vocab_size = config.vocab_size - self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - self.router_aux_loss_coef = config.router_aux_loss_coef - self.num_experts = config.num_experts - self.num_experts_per_tok = config.num_experts_per_tok - # Initialize weights and apply final processing - self.post_init() - - def get_input_embeddings(self): - return self.model.embed_tokens - - def set_input_embeddings(self, value): - self.model.embed_tokens = value - - def get_output_embeddings(self): - return self.lm_head - - def set_output_embeddings(self, new_embeddings): - self.lm_head = new_embeddings - - def set_decoder(self, decoder): - self.model = decoder - - def get_decoder(self): - return self.model - - @replace_return_docstrings(output_type=MoeCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) - # Ignore copy - def forward( - self, - input_ids: torch.LongTensor = None, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.LongTensor] = None, - past_key_values: Optional[List[torch.FloatTensor]] = None, - inputs_embeds: Optional[torch.FloatTensor] = None, - labels: Optional[torch.LongTensor] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - output_router_logits: Optional[bool] = None, - return_dict: Optional[bool] = None, - calc_logits_for_entire_prompt: Optional[bool] = True, - fla_past_key_values = None, - mamba_inference_params = None, - ) -> Union[Tuple, MoeCausalLMOutputWithPast]: - r""" - Args: - labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): - Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., - config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored - (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. - - calc_logits_for_entire_prompt (`bool`, *optional*): - Whether or not to calculate the logits for the entire prompt, or just the last token. Only last token - logits are needed for generation, and calculating them only for that token can save memory, - which becomes pretty significant for long sequences. - - Returns: - ```""" - - # print(input_ids.max()) - - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_router_logits = ( - output_router_logits if output_router_logits is not None else self.config.output_router_logits - ) - - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) - outputs = self.model( - input_ids=input_ids, - attention_mask=attention_mask, - position_ids=position_ids, - past_key_values=past_key_values, - inputs_embeds=inputs_embeds, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - output_router_logits=output_router_logits, - fla_past_key_values=fla_past_key_values, - mamba_inference_params=mamba_inference_params, - return_dict=return_dict, - ) - - hidden_states = outputs[0] - if calc_logits_for_entire_prompt: - logits = self.lm_head(hidden_states) - else: - logits = self.lm_head(hidden_states[..., -1:, :]) - - logits = logits / self.lm_head.weight.norm(p=2, dim=1) - - logits = logits.float() - - loss = None - if labels is not None: - # Shift so that tokens < n predict n - shift_logits = logits[..., :-1, :].contiguous() - shift_labels = labels[..., 1:].contiguous() - - loss_fct = CrossEntropyLoss() - shift_logits = shift_logits.view(-1, self.config.vocab_size) - shift_labels = shift_labels.view(-1) - # Enable model parallelism - shift_labels = shift_labels.to(shift_logits.device) - loss = loss_fct(shift_logits, shift_labels) - - if not return_dict: - output = (logits,) + outputs[1:] - return (loss,) + output if loss is not None else output - - # print("hidden_states.shape:", hidden_states.shape, "input_ids.shape:", input_ids.shape, "logits.shape:", logits.shape) - - return MoeCausalLMOutputWithPast( - loss=loss, - logits=logits, - past_key_values=outputs.past_key_values, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - router_logits=outputs.router_logits, - ) - - def get_init_cache(self, max_seqlen, batch_size=1): - past_key_values = HybridMambaAttentionDynamicCache( - self.config, batch_size, self.dtype, device=self.device, layer_type=self.config.layer_type - ) - - mamba_inference_params = InferenceParams(max_seqlen=max_seqlen, max_batch_size=batch_size) - - fla_past_key_values = fla_cache.from_legacy_cache(None) - - return past_key_values, fla_past_key_values, mamba_inference_params - - - def init_cuda_graph_generation( - self, - max_new_tokens=128, - batch_size=1, - device=None, - ): - """ - Initialize CUDA graph for generation with proper cache handling and warmup. - This function should be called once before generation to set up the graph. - - Args: - max_new_tokens: Maximum number of new tokens to generate - batch_size: Batch size for generation - device: Device to use (defaults to model device) - - Returns: - generation_state: Dictionary containing all necessary state for generation - """ - if device is None: - device = next(self.parameters()).device - - self.eval() - - # Initialize caches - max_seqlen = max_new_tokens + 2048 + self.config.num_memory_tokens # Add buffer for input - past_key_values, fla_past_key_values, mamba_inference_params = self.get_init_cache( - max_seqlen=max_seqlen, batch_size=batch_size - ) - - # Initialize KV caches for all modules - for module in self.modules(): - if hasattr(module, 'init_kv_cache'): - module.init_kv_cache(max_batch_size=batch_size, max_seq_len=max_seqlen) - - with torch.no_grad(): - # Warmup runs - dummy_input = torch.ones((batch_size, 10), dtype=torch.long, device=device) - for _ in range(10): - self(dummy_input) - - # Prepare static tensors for CUDA graph - static_current_input = torch.zeros((batch_size, 1), dtype=torch.long, device=device) - static_position_ids = torch.zeros((batch_size, 1), dtype=torch.long, device=device) - static_logits = torch.zeros((batch_size, self.config.vocab_size), device=device) - - # Set up for graph capture - self.model.has_previous_state = True - if mamba_inference_params is not None: - mamba_inference_params.seqlen_offset = 1 - - # Warmup runs for graph capture - for _ in range(10): - model_kwargs_warmup = { - 'input_ids': static_current_input, - 'fla_past_key_values': fla_past_key_values, - 'mamba_inference_params': mamba_inference_params, - 'past_key_values': past_key_values, - 'use_cache': True, - 'position_ids': static_position_ids, - } - warmup_outputs = self(**model_kwargs_warmup) - - # Capture CUDA graph - generation_graph = CUDAGraph() - with torch.cuda.graph(generation_graph): - model_kwargs_graph = { - 'input_ids': static_current_input, - 'fla_past_key_values': fla_past_key_values, - 'mamba_inference_params': mamba_inference_params, - 'past_key_values': past_key_values, - 'use_cache': True, - 'position_ids': static_position_ids, - } - graph_outputs = self(**model_kwargs_graph) - static_logits.copy_(graph_outputs.logits[:, -1, :]) - - if fla_past_key_values is not None: - fla_past_key_values.reset() - - if mamba_inference_params is not None: - mamba_inference_params.reset(mamba_inference_params.max_seqlen, mamba_inference_params.max_batch_size) - for key in mamba_inference_params.key_value_memory_dict: - conv_state, ssm_state = mamba_inference_params.key_value_memory_dict[key] - conv_state.zero_() - ssm_state.zero_() - - for module in self.modules(): - if hasattr(module, 'reset_kv_cache'): - module.reset_kv_cache() - - self.model.has_previous_state = False - - # Return generation state - generation_state = { - 'generation_graph': generation_graph, - 'static_current_input': static_current_input, - 'static_position_ids': static_position_ids, - 'static_logits': static_logits, - 'past_key_values': past_key_values, - 'fla_past_key_values': fla_past_key_values, - 'mamba_inference_params': mamba_inference_params, - 'max_seqlen': max_seqlen, - 'batch_size': batch_size, - 'device': device, - } - - return generation_state - - def generate_with_cuda_graph( - self, - input_ids, - generation_state, - max_new_tokens=128, - temperature=1.0, - top_k=0, - top_p=0.9, - eos_token_id=None, - verbose=False, - profiling=False, - multi_round=False, - ): - """ - Generate text using pre-initialized CUDA graph state. - - Args: - input_ids: Input token IDs tensor of shape (batch_size, seq_len) - generation_state: State dictionary returned by init_cuda_graph_generation - max_new_tokens: Maximum number of new tokens to generate - temperature: Sampling temperature (0 for greedy) - top_k: Top-k filtering (0 to disable) - top_p: Top-p filtering (1.0 to disable) - eos_token_id: End-of-sequence token ID - pad_token_id: Padding token ID - verbose: Whether to print generated tokens - profiling: Whether to return timing information - - Returns: - generated_ids: Tensor of shape (batch_size, input_len + generated_len) - or decode_latency if profiling=True - """ - self.eval() - batch_size = input_ids.shape[0] - device = input_ids.device - - # Extract state - generation_graph = generation_state['generation_graph'] - static_current_input = generation_state['static_current_input'] - static_position_ids = generation_state['static_position_ids'] - static_logits = generation_state['static_logits'] - past_key_values = generation_state['past_key_values'] - fla_past_key_values = generation_state['fla_past_key_values'] - mamba_inference_params = generation_state['mamba_inference_params'] - - with torch.no_grad(): - if not multi_round or mamba_inference_params.seqlen_offset == 0: - if fla_past_key_values is not None: - fla_past_key_values.reset() - - if mamba_inference_params is not None: - mamba_inference_params.reset(mamba_inference_params.max_seqlen, mamba_inference_params.max_batch_size) - for key in mamba_inference_params.key_value_memory_dict: - conv_state, ssm_state = mamba_inference_params.key_value_memory_dict[key] - conv_state.zero_() - ssm_state.zero_() - - for module in self.modules(): - if hasattr(module, 'reset_kv_cache'): - module.reset_kv_cache() - - self.model.has_previous_state = False - - # Prefill phase - process input sequence - position_ids = torch.arange( - self.config.num_memory_tokens + input_ids.shape[1], dtype=torch.long, device=device - ).unsqueeze(0).expand(batch_size, -1) - - else: - # Prefill phase - process input sequence - position_ids = torch.arange( - mamba_inference_params.seqlen_offset, mamba_inference_params.seqlen_offset + input_ids.shape[1], dtype=torch.long, device=device - ).unsqueeze(0).expand(batch_size, -1) - - current_input = input_ids - - model_kwargs = { - 'input_ids': current_input, - 'past_key_values': past_key_values, - 'fla_past_key_values': fla_past_key_values, - 'mamba_inference_params': mamba_inference_params, - 'use_cache': True, - 'position_ids': position_ids, - } - - if profiling: - torch.cuda.synchronize() - t1 = time.time() - - # Forward pass for prefill - outputs = self(**model_kwargs) - - if mamba_inference_params is not None: - if mamba_inference_params.seqlen_offset == 0: - mamba_inference_params.seqlen_offset = current_input.shape[1] + self.config.num_memory_tokens - else: - mamba_inference_params.seqlen_offset += current_input.shape[1] - - static_position_ids.fill_(position_ids[0, -1]) - - logits = outputs.logits[:, -1, :] # (batch_size, vocab_size) - generated_tokens = [] - - # Generation loop using CUDA graph replay - for step in range(max_new_tokens): - # Sample next token using current logits - if temperature == 0: - next_token = torch.argmax(logits, dim=-1, keepdim=True) - else: - next_token = sample_token(logits, temperature=temperature, top_k=top_k, top_p=top_p) - - generated_tokens.append(next_token) - - # Check for EOS - if not profiling and eos_token_id is not None and (next_token == eos_token_id).all(): - if verbose: - print("\nEOS reached") - break - - # Update static tensors for graph replay - static_current_input.copy_(next_token) - static_position_ids.add_(1) - - # Replay the captured graph - generation_graph.replay() - - if mamba_inference_params is not None: - mamba_inference_params.seqlen_offset += 1 - - logits = static_logits.clone() - - generated_ids = torch.cat([input_ids] + generated_tokens, dim=1) - - if profiling: - torch.cuda.synchronize() - t2 = time.time() - decode_latency = t2 - t1 - return generated_ids, decode_latency - - return generated_ids - - - def prepare_inputs_for_generation( - self, - input_ids, - past_key_values=None, - attention_mask=None, - inputs_embeds=None, - output_router_logits=False, - **kwargs, - ): - if self.config.num_memory_tokens > 0: - attention_mask = torch.cat([torch.ones(input_ids.shape[0], self.config.num_memory_tokens, device=attention_mask.device), attention_mask], dim=1) - - past_key_values = None # Disable cache for now - - position_ids = kwargs.get("position_ids", None) - if attention_mask is not None and position_ids is None: - # create position_ids on the fly for batch generation - position_ids = attention_mask.long().cumsum(-1) - 1 - position_ids.masked_fill_(attention_mask == 0, 1) - position_ids = position_ids[:, -input_ids.shape[1]:] - - # if `inputs_embeds` are passed, we only want to use them in the 1st generation step - if inputs_embeds is not None: - if input_ids.shape[1] == 0: - model_inputs = {"inputs_embeds": inputs_embeds} - else: - inputs_embeds_new = self.model.embed_tokens(input_ids) - model_inputs = {"inputs_embeds": torch.cat([inputs_embeds, inputs_embeds_new], dim=1)} - else: - model_inputs = {"input_ids": input_ids} - - model_inputs.update( - { - "position_ids": position_ids, - "past_key_values": past_key_values, - "use_cache": kwargs.get("use_cache"), - "attention_mask": attention_mask, - } - ) - return model_inputs - - -def sample_token(logits, temperature=1.0, top_k=0, top_p=0.9): - """ - Sample a token from logits with temperature, top-k, and top-p filtering. - This matches the implementation in fast_slm_gen.py for consistency. - - Args: - logits: Tensor of shape (batch_size, vocab_size) - temperature: Sampling temperature - top_k: Top-k filtering (0 to disable) - top_p: Top-p filtering (1.0 to disable) - - Returns: - next_token: Tensor of shape (batch_size, 1) - """ - if temperature == 0: - return torch.argmax(logits, dim=-1, keepdim=True) - - logits = logits / temperature - - # Top-k filtering - match fast_slm_gen.py implementation - if top_k > 0: - indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] - logits.masked_fill_(indices_to_remove, float('-inf')) - - # Top-p filtering - match fast_slm_gen.py implementation - if top_p < 1.0: - sorted_logits, sorted_indices = torch.sort(logits, descending=True, dim=-1) - cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) - - # Remove tokens with cumulative probability above the threshold - sorted_indices_to_remove = cumulative_probs > top_p - # Shift the indices to the right to keep also the first token above the threshold - sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() - sorted_indices_to_remove[..., 0] = 0 - - indices_to_remove = sorted_indices_to_remove.scatter(-1, sorted_indices, sorted_indices_to_remove) - logits.masked_fill_(indices_to_remove, float('-inf')) - - probs = F.softmax(logits, dim=-1) - return torch.multinomial(probs, num_samples=1) -