from __future__ import annotations import logging import math import sys from abc import abstractmethod from collections import defaultdict from functools import partial from typing import ( Callable, Dict, Iterable, List, NamedTuple, Optional, Sequence, Set, Tuple, cast, ) from dataclasses import dataclass, fields from typing import List, Optional, Tuple, Union import torch import torch.backends.cuda import torch.nn as nn import torch.nn.functional as F from torch import einsum from transformers import PreTrainedModel from transformers.modeling_outputs import CausalLMOutputWithPast from transformers.models.auto import AutoModel from transformers.cache_utils import Cache from .configuration_llada import ( LLaDAConfig, StrEnum, InitFnType, ActivationType, BlockType, LayerNormType, ModelConfig, ActivationCheckpointingStrategy, ) if sys.version_info.minor > 8: from collections.abc import MutableMapping elif sys.version_info.minor == 8: from typing import MutableMapping else: raise SystemExit("This script supports Python 3.8 or higher") __all__ = [ "LayerNormBase", "LayerNorm", "RMSLayerNorm", "GemmaRMSLayerNorm", "RotaryEmbedding", "Activation", "GELU", "ReLU", "SwiGLU", "LLaDABlock", "LLaDASequentialBlock", "LLaDAModel", "LLaDAOutput", "LLaDAGenerateOutput", ] log = logging.getLogger(__name__) logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) if not logger.hasHandlers(): handler = logging.StreamHandler() formatter = logging.Formatter('[%(asctime)s][%(levelname)s][%(name)s] %(message)s') handler.setFormatter(formatter) logger.addHandler(handler) class ModuleType(StrEnum): in_module = "in" out_module = "out" emb = "emb" final_out = "final_out" def init_weights( config: ModelConfig, module: Union[nn.Linear, nn.Embedding], d: Optional[int] = None, layer_id: Optional[int] = None, std_factor: float = 1.0, type_of_module: Optional[ModuleType] = None, ) -> None: """ Initialize weights of a linear or embedding module. :param config: The model config. :param module: The linear or embedding submodule to initialize. :param d: The effective input dimensionality of the weights. This could be smaller than the actual dimensions for fused layers. :param layer_id: When set, the standard deviation for the "mitchell" method will be adjusted by ``1 / sqrt(2 * (layer_id + 1))``. """ d = d if d is not None else config.d_model if config.init_fn == InitFnType.normal: std = config.init_std * std_factor if config.init_cutoff_factor is not None: cutoff_value = config.init_cutoff_factor * std nn.init.trunc_normal_(module.weight, mean=0.0, std=std, a=-cutoff_value, b=cutoff_value) else: nn.init.normal_(module.weight, mean=0.0, std=std) elif config.init_fn == InitFnType.mitchell: std = std_factor / math.sqrt(d) if layer_id is not None: std = std / math.sqrt(2 * (layer_id + 1)) nn.init.trunc_normal_(module.weight, mean=0.0, std=std, a=-3 * std, b=3 * std) elif config.init_fn == InitFnType.kaiming_normal: nn.init.kaiming_normal_(module.weight, nonlinearity="relu") elif config.init_fn == InitFnType.fan_in: std = std_factor / math.sqrt(d) nn.init.normal_(module.weight, mean=0.0, std=std) elif config.init_fn == InitFnType.full_megatron: if type_of_module is None: raise RuntimeError(f"When using the {InitFnType.full_megatron} init, every module must have a type.") cutoff_factor = config.init_cutoff_factor if cutoff_factor is None: cutoff_factor = 3 if type_of_module == ModuleType.in_module: # for att_proj (same as QKV), ff_proj std = config.init_std elif type_of_module == ModuleType.out_module: # for attn_out, ff_out std = config.init_std / math.sqrt(2.0 * config.n_layers) elif type_of_module == ModuleType.emb: # positional embeddings (wpe) # token embeddings (wte) std = config.init_std elif type_of_module == ModuleType.final_out: # final output (ff_out) std = config.d_model**-0.5 else: raise RuntimeError(f"Unknown module type '{type_of_module}'") nn.init.trunc_normal_( module.weight, mean=0.0, std=std, a=-cutoff_factor * std, b=cutoff_factor * std, ) else: raise NotImplementedError(config.init_fn) if isinstance(module, nn.Linear): if module.bias is not None: nn.init.zeros_(module.bias) if config.init_fn == InitFnType.normal and getattr(module, "_is_residual", False): with torch.no_grad(): module.weight.div_(math.sqrt(2 * config.n_layers)) def ensure_finite_(x: torch.Tensor, check_neg_inf: bool = True, check_pos_inf: bool = False): """ Modify ``x`` in place to replace ``float("-inf")`` with the minimum value of the dtype when ``check_neg_inf`` is ``True`` and to replace ``float("inf")`` with the maximum value of the dtype when ``check_pos_inf`` is ``True``. """ if check_neg_inf: x.masked_fill_(x == float("-inf"), torch.finfo(x.dtype).min) if check_pos_inf: x.masked_fill_(x == float("inf"), torch.finfo(x.dtype).max) def activation_checkpoint_function(cfg: ModelConfig): preserve_rng_state = ( (cfg.attention_dropout == 0.0) and (cfg.embedding_dropout == 0.0) and (cfg.residual_dropout == 0.0) ) from torch.utils.checkpoint import checkpoint return partial( checkpoint, preserve_rng_state=preserve_rng_state, use_reentrant=False, ) class BufferCache(dict, MutableMapping[str, torch.Tensor]): """ Cache for attention biases and other things that would normally be stored as buffers. We avoid using buffers because we've run into various issues doing so with FSDP. In general it appears the way FSDP handles buffers is not well-defined. It doesn't shard them but apparently it does synchronize them across processes, which we want to avoid since (A) it isn't necessary, and (B) we sometimes have `-inf` in these biases which might get turned into NaNs when they're synchronized due to casting or some other issue. """ def _non_meta_init_device(config: ModelConfig) -> torch.device: if config.init_device is not None and config.init_device != "meta": return torch.device(config.init_device) else: return torch.device("cuda" if torch.cuda.is_available() else "cpu") class Dropout(nn.Dropout): def forward(self, input: torch.Tensor) -> torch.Tensor: if self.p == 0.0: return input else: return F.dropout(input, self.p, self.training, self.inplace) class LayerNormBase(nn.Module): def __init__( self, config: ModelConfig, *, size: Optional[int] = None, elementwise_affine: Optional[bool] = True, eps: float = 1e-05, ): super().__init__() self.config = config self.eps = eps self.normalized_shape = (size or config.d_model,) if elementwise_affine or (elementwise_affine is None and self.config.layer_norm_with_affine): self.weight = nn.Parameter(torch.ones(self.normalized_shape, device=config.init_device)) use_bias = self.config.bias_for_layer_norm if use_bias is None: use_bias = self.config.include_bias if use_bias: self.bias = nn.Parameter(torch.zeros(self.normalized_shape, device=config.init_device)) else: self.register_parameter("bias", None) else: self.register_parameter("bias", None) self.register_parameter("weight", None) @abstractmethod def forward(self, x: torch.Tensor) -> torch.Tensor: raise NotImplementedError @classmethod def build(cls, config: ModelConfig, size: Optional[int] = None, **kwargs) -> LayerNormBase: if config.layer_norm_type == LayerNormType.default: return LayerNorm(config, size=size, low_precision=False, **kwargs) elif config.layer_norm_type == LayerNormType.low_precision: return LayerNorm(config, size=size, low_precision=True, **kwargs) elif config.layer_norm_type == LayerNormType.rms: return RMSLayerNorm(config, size=size, **kwargs) elif config.layer_norm_type == LayerNormType.gemma_rms: return GemmaRMSLayerNorm(config, size=size, **kwargs) else: raise NotImplementedError(f"Unknown LayerNorm type: '{config.layer_norm_type}'") def _cast_if_autocast_enabled(self, tensor: torch.Tensor, dtype: Optional[torch.dtype] = None) -> torch.Tensor: # NOTE: `is_autocast_enabled()` only checks for CUDA autocast, so we use the separate function # `is_autocast_cpu_enabled()` for CPU autocast. # See https://github.com/pytorch/pytorch/issues/110966. if tensor.device.type == "cuda" and torch.is_autocast_enabled(): return tensor.to(dtype=dtype if dtype is not None else torch.get_autocast_gpu_dtype()) elif tensor.device.type == "cpu" and torch.is_autocast_cpu_enabled(): return tensor.to(dtype=dtype if dtype is not None else torch.get_autocast_cpu_dtype()) else: return tensor def reset_parameters(self): if self.weight is not None: torch.nn.init.ones_(self.weight) # type: ignore if self.bias is not None: torch.nn.init.zeros_(self.bias) # type: ignore class LayerNorm(LayerNormBase): """ The default :class:`LayerNorm` implementation which can optionally run in low precision. """ def __init__( self, config: ModelConfig, size: Optional[int] = None, low_precision: bool = False, elementwise_affine: Optional[bool] = None, eps: float = 1e-05, ): super().__init__(config, size=size, elementwise_affine=elementwise_affine, eps=eps) self.low_precision = low_precision def forward(self, x: torch.Tensor) -> torch.Tensor: if self.low_precision: module_device = x.device downcast_x = self._cast_if_autocast_enabled(x) downcast_weight = ( self._cast_if_autocast_enabled(self.weight) if self.weight is not None else self.weight ) downcast_bias = self._cast_if_autocast_enabled(self.bias) if self.bias is not None else self.bias with torch.autocast(enabled=False, device_type=module_device.type): return F.layer_norm( downcast_x, self.normalized_shape, weight=downcast_weight, bias=downcast_bias, eps=self.eps ) else: return F.layer_norm(x, self.normalized_shape, weight=self.weight, bias=self.bias, eps=self.eps) class RMSLayerNorm(LayerNormBase): """ RMS layer norm, a simplified :class:`LayerNorm` implementation """ def __init__( self, config: ModelConfig, size: Optional[int] = None, elementwise_affine: Optional[bool] = None, eps: float = 1e-5, ): super().__init__(config, size=size, elementwise_affine=elementwise_affine, eps=config.rms_norm_eps) def forward(self, x: torch.Tensor) -> torch.Tensor: with torch.autocast(enabled=False, device_type=x.device.type): og_dtype = x.dtype x = x.to(torch.float32) variance = x.pow(2).mean(-1, keepdim=True) x = x * torch.rsqrt(variance + self.eps) x = x.to(og_dtype) if self.weight is not None: if self.bias is not None: return self.weight * x + self.bias else: return self.weight * x else: return x class GemmaRMSLayerNorm(LayerNormBase): """ Gemma RMS layer norm, a simplified :class:`LayerNorm` implementation """ def __init__( self, config: ModelConfig, size: Optional[int] = None, elementwise_affine: Optional[bool] = None, eps: float = 1e-5, ): super().__init__(config, size=size, elementwise_affine=elementwise_affine, eps=config.rms_norm_eps) def forward(self, x: torch.Tensor) -> torch.Tensor: with torch.autocast(enabled=False, device_type=x.device.type): og_dtype = x.dtype x = x.to(torch.float32) variance = x.pow(2).mean(-1, keepdim=True) x = x * torch.rsqrt(variance + self.eps) x = x.to(og_dtype) if self.weight is not None: if self.bias is not None: return x * (1 + self.weight) + self.bias else: return x * (1 + self.weight) else: return x class RotaryEmbedding(nn.Module): """ [Rotary positional embeddings (RoPE)](https://arxiv.org/abs/2104.09864). """ def __init__(self, config: ModelConfig, cache: BufferCache): super().__init__() self.config = config self.__cache = cache # Warm up cache. self.rope_theta = config.rope_theta self.get_rotary_embedding(config.max_sequence_length, _non_meta_init_device(config)) def get_rotary_embedding(self, seq_len: int, device: torch.device) -> Tuple[torch.Tensor, torch.Tensor]: if ( (pos_sin := self.__cache.get("rope_pos_sin")) is not None and (pos_cos := self.__cache.get("rope_pos_cos")) is not None and pos_sin.shape[-2] >= seq_len and pos_cos.shape[-2] >= seq_len ): if pos_sin.device != device: pos_sin = pos_sin.to(device) self.__cache["rope_pos_sin"] = pos_sin if pos_cos.device != device: pos_cos = pos_cos.to(device) self.__cache["rope_pos_cos"] = pos_cos return pos_sin[:, :, :seq_len, :], pos_cos[:, :, :seq_len, :] with torch.autocast(device.type, enabled=False): dim = self.config.d_model // self.config.n_heads inv_freq = 1.0 / (self.rope_theta ** (torch.arange(0, dim, 2, device=device, dtype=torch.float) / dim)) seq = torch.arange(seq_len, device=device, dtype=torch.float) freqs = einsum("i , j -> i j", seq, inv_freq) positions = torch.cat((freqs, freqs), dim=-1) pos_sin, pos_cos = positions.sin()[None, None, :, :], positions.cos()[None, None, :, :] self.__cache["rope_pos_sin"] = pos_sin self.__cache["rope_pos_cos"] = pos_cos return pos_sin, pos_cos def rotate_half(self, x: torch.Tensor) -> torch.Tensor: B, nh, T, hs = x.size() x = x.view(B, nh, T, 2, hs // 2) x1, x2 = x.unbind(dim=-2) return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(self, pos_sin: torch.Tensor, pos_cos: torch.Tensor, t: torch.Tensor) -> torch.Tensor: return ((t * pos_cos) + (self.rotate_half(t) * pos_sin)).to(t.dtype) def forward(self, q: torch.Tensor, k: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: if self.config.rope_full_precision: q_, k_ = q.float(), k.float() else: q_, k_ = q, k with torch.autocast(q.device.type, enabled=False): query_len, key_len = q_.shape[-2], k_.shape[-2] # could be different if layer_past not None pos_sin, pos_cos = self.get_rotary_embedding(key_len, q_.device) pos_sin = pos_sin.type_as(q_) pos_cos = pos_cos.type_as(q_) q_ = self.apply_rotary_pos_emb( pos_sin[:, :, key_len - query_len : key_len, :], pos_cos[:, :, key_len - query_len : key_len, :], q_, ) k_ = self.apply_rotary_pos_emb(pos_sin, pos_cos, k_) return q_.type_as(q), k_.type_as(k) class Activation(nn.Module): def __init__(self, config: ModelConfig): super().__init__() self.config = config @abstractmethod def forward(self, x: torch.Tensor) -> torch.Tensor: raise NotImplementedError @property @abstractmethod def output_multiplier(self) -> float: raise NotImplementedError @classmethod def build(cls, config: ModelConfig) -> Activation: if config.activation_type == ActivationType.gelu: return cast(Activation, GELU(approximate="none")) elif config.activation_type == ActivationType.relu: return cast(Activation, ReLU(inplace=False)) elif config.activation_type == ActivationType.silu: return cast(Activation, SiLU(inplace=False)) elif config.activation_type == ActivationType.swiglu: return SwiGLU(config) else: raise NotImplementedError(f"Unknown activation: '{config.activation_type}'") class GELU(nn.GELU): @property def output_multiplier(self) -> float: return 1.0 class ReLU(nn.ReLU): @property def output_multiplier(self) -> float: return 1.0 class SiLU(nn.SiLU): @property def output_multiplier(self) -> float: return 1.0 class SwiGLU(Activation): def forward(self, x: torch.Tensor) -> torch.Tensor: x, gate = x.chunk(2, dim=-1) return F.silu(gate) * x @property def output_multiplier(self) -> float: return 0.5 def causal_attention_bias(seq_len: int, device: torch.device) -> torch.FloatTensor: att_bias = torch.triu( torch.ones(seq_len, seq_len, device=device, dtype=torch.float), diagonal=1, ) att_bias.masked_fill_(att_bias == 1, torch.finfo(att_bias.dtype).min) return att_bias.view(1, 1, seq_len, seq_len) # type: ignore def get_causal_attention_bias(cache: BufferCache, seq_len: int, device: torch.device) -> torch.Tensor: if (causal_bias := cache.get("causal_attention_bias")) is not None and causal_bias.shape[-1] >= seq_len: if causal_bias.device != device: causal_bias = causal_bias.to(device) cache["causal_attention_bias"] = causal_bias return causal_bias with torch.autocast(device.type, enabled=False): causal_bias = causal_attention_bias(seq_len, device) cache["causal_attention_bias"] = causal_bias return causal_bias def alibi_attention_bias(seq_len: int, config: ModelConfig, device: torch.device) -> torch.FloatTensor: alibi_bias = torch.arange(1 - seq_len, 1, dtype=torch.float, device=device).view(1, 1, 1, seq_len) # shape: (1, 1, seq_len, seq_len) alibi_bias = alibi_bias - torch.arange(1 - seq_len, 1, dtype=torch.float, device=device).view(1, 1, seq_len, 1) alibi_bias.abs_().mul_(-1) # shape: (n_heads,) m = torch.arange(1, config.n_heads + 1, dtype=torch.float, device=device) m.mul_(config.alibi_bias_max / config.n_heads) # shape: (1, n_heads, seq_len, seq_len) return alibi_bias * (1.0 / (2 ** m.view(1, config.n_heads, 1, 1))) # type: ignore class LLaDABlock(nn.Module): """ A base class for transformer block implementations. """ def __init__(self, layer_id: int, config: ModelConfig, cache: BufferCache): super().__init__() self.layer_id = layer_id self.config = config self.hidden_size = ( config.mlp_hidden_size if config.mlp_hidden_size is not None else config.mlp_ratio * config.d_model ) self.__cache = cache assert config.d_model % config.n_heads == 0 self._activation_checkpoint_fn = None # Dropout. self.dropout = Dropout(config.residual_dropout) # Layer norms. self.k_norm: Optional[LayerNormBase] = None self.q_norm: Optional[LayerNormBase] = None if config.attention_layer_norm: self.k_norm = LayerNormBase.build( config, size=(config.d_model // config.n_heads) * config.effective_n_kv_heads, elementwise_affine=config.attention_layer_norm_with_affine, ) self.q_norm = LayerNormBase.build(config, elementwise_affine=config.attention_layer_norm_with_affine) # Activation function. self.act = Activation.build(config) assert (self.act.output_multiplier * self.hidden_size) % 1 == 0 # Attention output projection. self.attn_out = nn.Linear( config.d_model, config.d_model, bias=config.include_bias, device=config.init_device ) # Feed-forward output projection. self.ff_out = nn.Linear( int(self.act.output_multiplier * self.hidden_size), config.d_model, bias=config.include_bias, device=config.init_device, ) self.ff_out._is_residual = True # type: ignore # Rotary embeddings. if self.config.rope: self.rotary_emb = RotaryEmbedding(config, self.__cache) self.flash_attn_func = None if config.flash_attention: try: from flash_attn import flash_attn_func # type: ignore self.flash_attn_func = flash_attn_func except ModuleNotFoundError: pass def reset_parameters(self): if self.k_norm is not None: self.k_norm.reset_parameters() if self.q_norm is not None: self.q_norm.reset_parameters() init_weights( self.config, self.attn_out, d=self.config.d_model, layer_id=self.layer_id, type_of_module=ModuleType.out_module, ) init_weights( self.config, self.ff_out, d=self.ff_out.in_features, layer_id=self.layer_id, type_of_module=ModuleType.out_module, ) def set_activation_checkpointing(self, strategy: Optional[ActivationCheckpointingStrategy]): if strategy == ActivationCheckpointingStrategy.fine_grained: self._activation_checkpoint_fn = activation_checkpoint_function(self.config) else: self._activation_checkpoint_fn = None @classmethod def _cast_attn_bias(cls, bias: torch.Tensor, input_dtype: torch.dtype) -> torch.Tensor: target_dtype = input_dtype # NOTE: `is_autocast_enabled()` only checks for CUDA autocast, so we use the separate function # `is_autocast_cpu_enabled()` for CPU autocast. # See https://github.com/pytorch/pytorch/issues/110966. if bias.device.type == "cuda" and torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() elif bias.device.type == "cpu" and torch.is_autocast_cpu_enabled(): target_dtype = torch.get_autocast_cpu_dtype() if bias.dtype != target_dtype: bias = bias.to(target_dtype) ensure_finite_(bias, check_neg_inf=True, check_pos_inf=False) return bias def _scaled_dot_product_attention( self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, attn_mask: Optional[torch.Tensor] = None, dropout_p: float = 0.0, is_causal: bool = False, ) -> torch.Tensor: """ Computes scaled dot product attention on query, key and value tensors, using an optional attention mask if passed, and applying dropout if a probability greater than 0.0 is specified. """ if self.flash_attn_func is not None and attn_mask is None: r = self.flash_attn_func( q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), dropout_p=dropout_p, causal=False ) return r.transpose(1, 2) else: # torch's sdpa doesn't support GQA, so we're doing this assert k.size(1) == v.size(1) num_kv_heads = k.size(1) num_q_heads = q.size(1) if num_q_heads != num_kv_heads: assert num_q_heads % num_kv_heads == 0 k = k.repeat_interleave(num_q_heads // num_kv_heads, dim=1, output_size=num_q_heads) v = v.repeat_interleave(num_q_heads // num_kv_heads, dim=1, output_size=num_q_heads) # Modify: MDM set causal to False, and with no attn_mask. return F.scaled_dot_product_attention( q, k, v, attn_mask=None, dropout_p=dropout_p, is_causal=False, ) def attention( self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, attention_bias: Optional[torch.Tensor] = None, layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, use_cache: bool = False, ) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor, torch.Tensor]]]: B, T, C = q.size() # batch size, sequence length, d_model dtype = k.dtype # Optionally apply layer norm to keys and queries. if self.q_norm is not None and self.k_norm is not None: q = self.q_norm(q).to(dtype=dtype) k = self.k_norm(k).to(dtype=dtype) # Move head forward to be next to the batch dim. # shape: (B, nh, T, hs) q = q.view(B, T, self.config.n_heads, C // self.config.n_heads).transpose(1, 2) # shape: (B, n_kv_h, T, hs) k = k.view(B, T, self.config.effective_n_kv_heads, C // self.config.n_heads).transpose(1, 2) # shape: (B, n_kv_h, T, hs) v = v.view(B, T, self.config.effective_n_kv_heads, C // self.config.n_heads).transpose(1, 2) if layer_past is not None: past_key, past_value = layer_past k = torch.cat((past_key, k), dim=-2) v = torch.cat((past_value, v), dim=-2) present = (k, v) if use_cache else None query_len, key_len = q.shape[-2], k.shape[-2] # could be different if layer_past not None if self.config.rope: # Apply rotary embeddings. q, k = self.rotary_emb(q, k) if attention_bias is not None: # Resize and cast attention bias. # The current dtype of the attention bias might not match the dtype that the SDP attn function will # run in if AMP is enabled, and this can be a problem if some tokens are masked out due to padding # as down-casting the attention bias to the autocast precision will result in -infs, which will # cause the SDP attn function to produce NaNs. attention_bias = self._cast_attn_bias( attention_bias[:, :, key_len - query_len : key_len, :key_len], dtype ) # Get the attention scores. # shape: (B, nh, T, hs) att = self._scaled_dot_product_attention( q, k, v, attn_mask=None, dropout_p=0.0 if not self.training else self.config.attention_dropout, is_causal=False, ) # Re-assemble all head outputs side-by-side. att = att.transpose(1, 2).contiguous().view(B, T, C) # Apply output projection. return self.attn_out(att), present @abstractmethod def forward( self, x: torch.Tensor, attention_bias: Optional[torch.FloatTensor] = None, layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, use_cache: bool = False, ) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor, torch.Tensor]]]: raise NotImplementedError @classmethod def build(cls, layer_id: int, config: ModelConfig, cache: BufferCache) -> LLaDABlock: if config.block_type == BlockType.sequential: return LLaDASequentialBlock(layer_id, config, cache) elif config.block_type == BlockType.llama: return LLaDALlamaBlock(layer_id, config, cache) else: raise NotImplementedError(f"Unknown block type: '{config.block_type}'") class LLaDASequentialBlock(LLaDABlock): """ This is a typical transformer block where the output is computed as ``MLP(LN(x + Attention(LN(x))))`` (plus another skip connection). """ def __init__(self, layer_id: int, config: ModelConfig, cache: BufferCache): super().__init__(layer_id, config, cache) # Layer norms. self.attn_norm = LayerNorm.build(config) self.ff_norm = LayerNorm.build(config) # Attention input projection. Projects x -> (q, k, v) head_dim = config.d_model // config.n_heads self.fused_dims = ( config.d_model, config.effective_n_kv_heads * head_dim, config.effective_n_kv_heads * head_dim, ) self.att_proj = nn.Linear( config.d_model, sum(self.fused_dims), bias=config.include_bias | config.include_qkv_bias, device=config.init_device ) # Feed-forward input projection. self.ff_proj = nn.Linear( config.d_model, self.hidden_size, bias=config.include_bias, device=config.init_device ) def reset_parameters(self): super().reset_parameters() self.attn_norm.reset_parameters() self.ff_norm.reset_parameters() # NOTE: the standard deviation for these weights does not depend on the layer. init_weights( self.config, self.att_proj, d=self.config.d_model, layer_id=None, type_of_module=ModuleType.in_module ) init_weights( self.config, self.ff_proj, d=self.config.d_model, layer_id=None, type_of_module=ModuleType.in_module ) def forward( self, x: torch.Tensor, attention_bias: Optional[torch.Tensor] = None, layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, use_cache: bool = False, ) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor, torch.Tensor]]]: # Get query, key, value projections. # shape: # - for regular attn q, k, v: (batch_size, seq_len, d_model) # - for multi-query attn q: (batch_size, seq_len, d_model) # k, v: (batch_size, seq_len, d_model // n_heads) # - for group query attn q: (batch_size, seq_len, d_model) # k, v: (batch_size, seq_len, d_model // n_kv_heads) if self._activation_checkpoint_fn is not None: q, k, v = self.att_proj(self._activation_checkpoint_fn(self.attn_norm, x)).split( self.fused_dims, dim=-1 ) else: q, k, v = self.att_proj(self.attn_norm(x)).split(self.fused_dims, dim=-1) # Get attention scores. if self._activation_checkpoint_fn is not None: att, cache = self._activation_checkpoint_fn( # type: ignore self.attention, q, k, v, attention_bias, layer_past=layer_past, use_cache=use_cache ) else: att, cache = self.attention(q, k, v, attention_bias, layer_past=layer_past, use_cache=use_cache) # Add attention scores. # shape: (B, T, C) x = x + self.dropout(att) # Add feed-forward projection. # shape: (batch_size, seq_len, d_model) og_x = x if self._activation_checkpoint_fn is not None: x = self._activation_checkpoint_fn(self.ff_norm, x) # type: ignore else: x = self.ff_norm(x) x = self.ff_proj(x) if self._activation_checkpoint_fn is not None: x = self._activation_checkpoint_fn(self.act, x) # type: ignore else: x = self.act(x) x = self.ff_out(x) x = self.dropout(x) x = og_x + x return x, cache class LLaDALlamaBlock(LLaDABlock): """ This is a transformer block where the output is computed as ``MLP(LN(x + Attention(LN(x))))`` (plus another skip connection). This block is similar to `LLaDASequentialBlock` but some operations have slightly different implementations to imitate the behavior of Llama. """ def __init__(self, layer_id: int, config: ModelConfig, cache: BufferCache): super().__init__(layer_id, config, cache) # Layer norms. self.attn_norm = LayerNorm.build(config) self.ff_norm = LayerNorm.build(config) self.__cache = cache # Attention input projection. Projects x -> (q, k, v) head_dim = config.d_model // config.n_heads q_proj_out_dim = config.d_model k_proj_out_dim = config.effective_n_kv_heads * head_dim v_proj_out_dim = config.effective_n_kv_heads * head_dim self.q_proj = nn.Linear( config.d_model, q_proj_out_dim, bias=config.include_bias | config.include_qkv_bias, device=config.init_device ) self.k_proj = nn.Linear( config.d_model, k_proj_out_dim, bias=config.include_bias | config.include_qkv_bias, device=config.init_device ) self.v_proj = nn.Linear( config.d_model, v_proj_out_dim, bias=config.include_bias | config.include_qkv_bias, device=config.init_device ) # Feed-forward input projection. self.ff_proj = nn.Linear( config.d_model, self.hidden_size, bias=config.include_bias, device=config.init_device ) # new add self.up_proj = nn.Linear( config.d_model, self.hidden_size, bias=config.include_bias, device=config.init_device ) #[MODIFIED] : 这里没有正确传递type_of_module def reset_parameters(self): super().reset_parameters() # This correctly initializes attn_out and ff_out as 'out_module' self.attn_norm.reset_parameters() self.ff_norm.reset_parameters() # Correctly initialize all input projections with type_of_module init_weights(self.config, self.q_proj, d=self.config.d_model, layer_id=None, type_of_module=ModuleType.in_module) init_weights(self.config, self.k_proj, d=self.config.d_model, layer_id=None, type_of_module=ModuleType.in_module) init_weights(self.config, self.v_proj, d=self.config.d_model, layer_id=None, type_of_module=ModuleType.in_module) init_weights(self.config, self.ff_proj, d=self.config.d_model, layer_id=None, type_of_module=ModuleType.in_module) init_weights(self.config, self.up_proj, d=self.config.d_model, layer_id=None, type_of_module=ModuleType.in_module) def forward( self, x: torch.Tensor, attention_bias: Optional[torch.Tensor] = None, layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, use_cache: bool = False, ) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor, torch.Tensor]]]: # Get query, key, value projections. # shape: # - for regular attn q, k, v: (batch_size, seq_len, d_model) # - for multi-query attn q: (batch_size, seq_len, d_model) # k, v: (batch_size, seq_len, d_model // n_heads) # - for group query attn q: (batch_size, seq_len, d_model) # k, v: (batch_size, seq_len, d_model // n_kv_heads) x_normed = self.attn_norm(x) q = self.q_proj(x_normed) k = self.k_proj(x_normed) v = self.v_proj(x_normed) # Get attention scores. if self._activation_checkpoint_fn is not None: att, cache = self._activation_checkpoint_fn( # type: ignore self.attention, q, k, v, attention_bias, layer_past=layer_past, use_cache=use_cache ) else: att, cache = self.attention(q, k, v, attention_bias, layer_past=layer_past, use_cache=use_cache) # Add attention scores. # shape: (B, T, C) x = x + self.dropout(att) # Add feed-forward projection. # shape: (batch_size, seq_len, d_model) og_x = x if self._activation_checkpoint_fn is not None: x = self._activation_checkpoint_fn(self.ff_norm, x) # type: ignore else: x = self.ff_norm(x) x, x_up = self.ff_proj(x), self.up_proj(x) # new add if self._activation_checkpoint_fn is not None: x = self._activation_checkpoint_fn(self.act, x) # type: ignore else: x = self.act(x) x = x * x_up # new add x = self.ff_out(x) x = self.dropout(x) x = og_x + x return x, cache class LLaDAOutput(NamedTuple): logits: torch.FloatTensor """ A tensor of shape `(batch_size, seq_len, vocab_size)` representing the log probabilities for the next token *before* normalization via (log) softmax. """ attn_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] """ Attention keys and values from each block. """ hidden_states: Optional[Tuple[torch.Tensor]] """ Hidden states from each block. """ class LLaDAGenerateOutput(NamedTuple): token_ids: torch.LongTensor """ The generated token IDs, a tensor of shape `(batch_size, beam_size, max_steps)`. These do *not* include the original input IDs. """ scores: torch.FloatTensor """ The scores of the generated sequences, a tensor of shape `(batch_size, beam_size)`. """ class LLaDABlockGroup(nn.ModuleList): def __init__(self, config: ModelConfig, layer_offset: int, modules: Optional[Iterable[nn.Module]] = None): super().__init__(modules) self.config = config self.layer_offset = layer_offset self.activation_checkpointing_strategy: Optional[ActivationCheckpointingStrategy] = None self._activation_checkpoint_fn = activation_checkpoint_function(self.config) def forward( self, x: torch.Tensor, attention_bias: Optional[torch.FloatTensor] = None, layers_past: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = None, use_cache: bool = False, ) -> Tuple[torch.Tensor, Optional[List[Tuple[torch.Tensor, torch.Tensor]]]]: attn_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = [] if use_cache else None for block_idx, block in enumerate(self): layer_past = None if layers_past is None else layers_past[block_idx] block_idx += self.layer_offset if ( (self.activation_checkpointing_strategy == ActivationCheckpointingStrategy.whole_layer) or ( self.activation_checkpointing_strategy == ActivationCheckpointingStrategy.one_in_two and block_idx % 2 == 0 ) or ( self.activation_checkpointing_strategy == ActivationCheckpointingStrategy.one_in_three and block_idx % 3 == 0 ) or ( self.activation_checkpointing_strategy == ActivationCheckpointingStrategy.one_in_four and block_idx % 4 == 0 ) ): # shape: (batch_size, seq_len, d_model) x, cache = self._activation_checkpoint_fn( # type: ignore block, x, attention_bias=attention_bias, layer_past=layer_past, use_cache=use_cache ) else: # shape: (batch_size, seq_len, d_model) x, cache = block(x, attention_bias=attention_bias, layer_past=layer_past, use_cache=use_cache) if attn_key_values is not None: assert cache is not None attn_key_values.append(cache) return x, attn_key_values def reset_parameters(self): for block in self: block.reset_parameters() def set_activation_checkpointing(self, strategy: Optional[ActivationCheckpointingStrategy]): self.activation_checkpointing_strategy = strategy for block in self: block.set_activation_checkpointing(strategy) class LLaDAModel(nn.Module): def __init__(self, config: ModelConfig, init_params: bool = True): super().__init__() self.config = config self.__cache = BufferCache() # Validate config. if self.config.alibi and self.config.flash_attention: raise Exception("ALiBi is currently not supported with FlashAttention") if self.config.alibi and self.config.rope: raise Exception("ALiBi and RoPE are mutually exclusive") if self.config.embedding_size is not None and self.config.embedding_size != self.config.vocab_size: if self.config.embedding_size < self.config.vocab_size: raise Exception("embedding size should be at least as big as vocab size") elif self.config.embedding_size % 128 != 0: import warnings warnings.warn( "Embedding size is not a multiple of 128! This could hurt throughput performance.", UserWarning ) self.activation_checkpointing_strategy: Optional[ActivationCheckpointingStrategy] = None self._activation_checkpoint_fn: Callable = activation_checkpoint_function(self.config) if not ( 0 < self.config.block_group_size <= self.config.n_layers and self.config.n_layers % self.config.block_group_size == 0 ): raise Exception("n layers must be divisible by block group size") torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp(False) # this is super slow so make sure torch won't use it self.transformer = nn.ModuleDict( dict( wte=nn.Embedding( config.embedding_size or config.vocab_size, config.d_model, device=config.init_device,padding_idx=config.pad_token_id ), emb_drop=Dropout(config.embedding_dropout), ln_f=LayerNorm.build(config), ) ) blocks = [LLaDABlock.build(i, config, self.__cache) for i in range(config.n_layers)] if self.config.block_group_size > 1: block_groups = [ LLaDABlockGroup(config, i, blocks[i : i + config.block_group_size]) for i in range(0, config.n_layers, config.block_group_size) ] self.transformer.update({"block_groups": nn.ModuleList(block_groups)}) else: self.transformer.update({"blocks": nn.ModuleList(blocks)}) if not (self.config.alibi or self.config.rope): self.transformer.update( {"wpe": nn.Embedding(config.max_sequence_length, config.d_model, device=config.init_device)} ) if not config.weight_tying: self.transformer.update( { "ff_out": nn.Linear( config.d_model, config.embedding_size or config.vocab_size, bias=config.include_bias, device=config.init_device, ) } ) # When `init_device="meta"` FSDP will call `reset_parameters()` to initialize weights. if init_params and self.config.init_device != "meta": self.reset_parameters() self.__num_fwd_flops: Optional[int] = None # Warm up cache. if self.config.alibi: get_causal_attention_bias(self.__cache, config.max_sequence_length, _non_meta_init_device(config)) self.get_alibi_attention_bias(config.max_sequence_length, _non_meta_init_device(config)) def set_activation_checkpointing(self, strategy: Optional[ActivationCheckpointingStrategy]): self.activation_checkpointing_strategy = strategy if self.config.block_group_size != 1: for block_group in self.transformer.block_groups: block_group.set_activation_checkpointing(strategy) else: for block in self.transformer.blocks: block.set_activation_checkpointing(strategy) @property def device(self) -> torch.device: device: torch.device = self.transformer.wte.weight.device # type: ignore if device.type == "meta": return _non_meta_init_device(self.config) else: return device def reset_parameters(self): log.info("Initializing model parameters...") # Top-level embeddings / linear layers. init_weights( self.config, self.transformer.wte, # type: ignore std_factor=(0.5 * math.sqrt(self.config.d_model)) if self.config.scale_logits else 1.0, type_of_module=ModuleType.emb, ) if hasattr(self.transformer, "wpe"): init_weights(self.config, self.transformer.wpe, type_of_module=ModuleType.emb) # type: ignore # Top-level layer norm. self.transformer.ln_f.reset_parameters() # type: ignore # Output weights. if hasattr(self.transformer, "ff_out"): init_weights(self.config, self.transformer.ff_out, type_of_module=ModuleType.final_out) # type: ignore # Let the blocks handle themselves. if self.config.block_group_size == 1: for block in self.transformer.blocks: block.reset_parameters() else: for block_group in self.transformer.block_groups: block_group.reset_parameters() def get_alibi_attention_bias(self, seq_len: int, device: torch.device) -> torch.Tensor: if (alibi_bias := self.__cache.get("alibi_attention_bias")) is not None and alibi_bias.shape[ -1 ] >= seq_len: if alibi_bias.device != device: alibi_bias = alibi_bias.to(device) self.__cache["alibi_attention_bias"] = alibi_bias return alibi_bias with torch.autocast(device.type, enabled=False): alibi_bias = alibi_attention_bias(seq_len, self.config, device) self.__cache["alibi_attention_bias"] = alibi_bias return alibi_bias def forward( self, input_ids: torch.LongTensor, input_embeddings: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, attention_bias: Optional[torch.Tensor] = None, past_key_values: Optional[Sequence[Tuple[torch.Tensor, torch.Tensor]]] = None, use_cache: bool = False, last_logits_only: bool = False, output_hidden_states: Optional[bool] = None, ) -> LLaDAOutput: """ :param input_ids: A tensor of shape `(batch_size, seq_len)`. :param input_embeddings: A tensor of shape `(batch_size, seq_len, d_model)` with input embeddings. When provided, it is treated as the output of the input embedding layer. :param attention_mask: A tensor of shape `(batch_size, seq_len)` that indicates which input IDs are masked. A `1` value in the mask means that the corresponding input ID should *not* be ignored. A `0` means that the corresponding input ID is masked. This has the same meaning as the `attention_mask` in HuggingFace's `transformers` library. :param attention_bias: A tensor of shape `(batch_size, 1, seq_len, seq_len)`, `(1, 1, seq_len, seq_len)`, or `(seq_len, seq_len)`. This is used to introduce causal or other biases. If the tensor is a bool or byte tensor, a `True` or `1` at `attention_bias[:, :, i, j]` indicates that the i-th element in the sequence is allowed to attend to the j-th element in the sequence. If the tensor is a float tensor, it will just be added to the attention scores before the softmax. The default is causal, which corresponds to a lower-diagonal byte matrix of ones. :param past_key_values: Pre-computed keys and values for each attention block. Can be used to speed up sequential decoding. The `input_ids` which have their past given to this model should not be passed as `input_ids` as they have already been computed. :param use_cache: If `True`, return key and value tensors for each block. :param last_logits_only: If `True`, only compute the logits for the last token of each sequence. This can speed up decoding when you only care about the next token. """ # Add Basic MDM Model config check assert not self.config.alibi, "Alibi length extrapolation is not supported for MDM." assert self.config.rope, "Rope must be used in Llama-Encoder for MDM." assert (past_key_values is None and not use_cache), "The kvcache is not suppotred for MDM." output_hidden_states = output_hidden_states if output_hidden_states is not None else False if past_key_values: assert len(past_key_values) == self.config.n_layers batch_size, seq_len = input_ids.size() if input_embeddings is None else input_embeddings.size()[:2] if past_key_values is None: past_length = 0 else: past_length = past_key_values[0][0].size(-2) # Get embeddings of input. # shape: (batch_size, seq_len, d_model) x = self.transformer.wte(input_ids) if input_embeddings is None else input_embeddings # type: ignore if self.config.input_emb_norm: x = x * (self.config.d_model**0.5) if not (self.config.alibi or self.config.rope): # Get positional embeddings. # shape: (1, seq_len) pos = torch.arange(past_length, past_length + seq_len, dtype=torch.long, device=x.device).unsqueeze(0) # shape: (1, seq_len, d_model) pos_emb = self.transformer.wpe(pos) # type: ignore x = pos_emb + x # Add input + positional embeddings and apply dropout. # shape: (batch_size, seq_len, d_model) x = self.transformer.emb_drop(x) # type: ignore # Transform the attention mask into what the blocks expect. if attention_mask is not None and 0.0 in attention_mask: # shape: (batch_size, 1, 1, seq_len) attention_mask = attention_mask.to(dtype=torch.float).view(batch_size, -1)[:, None, None, :] attention_mask = (1.0 - attention_mask) * torch.finfo(attention_mask.dtype).min else: attention_mask = None # Merge attention mask with attention bias. if ( attention_bias is not None or attention_mask is not None or self.config.alibi # NOTE (epwalsh): we need to initialize the attn bias in order for attn to work properly # with key+value cache. Otherwise `F.scaled_dot_product_attention()` doesn't seem to compute # scores correctly. or past_key_values is not None ): if attention_bias is None and self.config.alibi: attention_bias = get_causal_attention_bias( self.__cache, past_length + seq_len, x.device ) + self.get_alibi_attention_bias(past_length + seq_len, x.device) elif attention_bias is None: attention_bias = get_causal_attention_bias(self.__cache, past_length + seq_len, x.device) elif attention_bias.dtype in (torch.int8, torch.bool): attention_bias = attention_bias.to(dtype=torch.float) attention_bias.masked_fill_(attention_bias == 0.0, torch.finfo(attention_bias.dtype).min) # Transform to the right shape and data type. mask_len = seq_len if attention_mask is not None: mask_len = attention_mask.shape[-1] elif past_key_values is not None: mask_len = past_key_values[0][0].shape[-2] + seq_len attention_bias = attention_bias[:, :, :mask_len, :mask_len].to(dtype=torch.float) # Add in the masking bias. if attention_mask is not None: attention_bias = attention_bias + attention_mask # Might get -infs after adding attention mask, since dtype.min + dtype.min = -inf. # `F.scaled_dot_product_attention()` doesn't handle -inf like you'd expect, instead # it can produce NaNs. ensure_finite_(attention_bias, check_neg_inf=True, check_pos_inf=False) attn_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = [] if use_cache else None # decoder layers all_hidden_states = [] # Apply blocks one-by-one. if self.config.block_group_size == 1: for block_idx, block in enumerate(self.transformer.blocks): if output_hidden_states: # add hidden states all_hidden_states.append(x) layer_past = None if past_key_values is None else past_key_values[block_idx] if ( (self.activation_checkpointing_strategy == ActivationCheckpointingStrategy.whole_layer) or ( self.activation_checkpointing_strategy == ActivationCheckpointingStrategy.one_in_two and block_idx % 2 == 0 ) or ( self.activation_checkpointing_strategy == ActivationCheckpointingStrategy.one_in_three and block_idx % 3 == 0 ) or ( self.activation_checkpointing_strategy == ActivationCheckpointingStrategy.one_in_four and block_idx % 4 == 0 ) ): # shape: (batch_size, seq_len, d_model) x, cache = self._activation_checkpoint_fn( block, x, attention_bias=attention_bias, layer_past=layer_past, use_cache=use_cache ) else: # shape: (batch_size, seq_len, d_model) x, cache = block(x, attention_bias=attention_bias, layer_past=layer_past, use_cache=use_cache) if attn_key_values is not None: assert cache is not None attn_key_values.append(cache) else: for group_idx, block_group in enumerate(self.transformer.block_groups): if output_hidden_states: # add hidden states all_hidden_states.append(x) layers_past = ( None if past_key_values is None else past_key_values[ group_idx * self.config.block_group_size : (group_idx + 1) * self.config.block_group_size ] ) x, cache = block_group( x, attention_bias=attention_bias, layers_past=layers_past, use_cache=use_cache ) if attn_key_values is not None: assert cache is not None attn_key_values.extend(cache) if last_logits_only: # shape: (batch_size, 1, d_model) x = x[:, -1, :].unsqueeze(1) # Apply final layer norm. # shape: (batch_size, seq_len or 1, d_model) x = self.transformer.ln_f(x) # type: ignore if output_hidden_states: # add final hidden state post-final-layernorm, following HuggingFace's convention all_hidden_states.append(x) # Get logits. # shape: (batch_size, seq_len or 1, vocab_size) if self.config.weight_tying: logits = F.linear(x, self.transformer.wte.weight, None) # type: ignore else: logits = self.transformer.ff_out(x) # type: ignore if self.config.scale_logits: logits.mul_(1 / math.sqrt(self.config.d_model)) return LLaDAOutput(logits=logits, attn_key_values=attn_key_values, hidden_states=tuple(all_hidden_states) if output_hidden_states else None) # type: ignore[arg-type] def create_model_config_from_pretrained_config(config: LLaDAConfig): """ Utility function """ kwargs = {} for field in fields(ModelConfig): kwargs[field.name] = getattr(config, field.name) model_config = ModelConfig(**kwargs) return model_config from transformers.modeling_outputs import ModelOutput from transformers.loss.loss_utils import fixed_cross_entropy def ForMaskedLMLoss( logits: torch.Tensor, labels: torch.Tensor, vocab_size: int, num_items_in_batch: Optional[torch.Tensor] = None, ignore_index: int = -100, # 新增参数,用于接收 per-token 的权重 per_token_weights: Optional[torch.Tensor] = None, loss_normalization: str = "masked_tokens", **kwargs, ): """ 计算Masked Language Model的损失。 支持基于重要性采样的 per-token 加权。 """ logits = logits.float() # 如果没有提供权重,则使用原始的、更高效的计算方式 if per_token_weights is None: # Upcast to float if we need to compute the loss to avoid potential precision issues # Flatten the tokens logits = logits.view(-1, vocab_size) labels = labels.view(-1) labels = labels.to(logits.device) # 使用原始的 fixed_cross_entropy loss = fixed_cross_entropy(logits, labels, num_items_in_batch, ignore_index, **kwargs) return loss # --- 如果提供了权重,则执行以下加权逻辑 --- # 1. 计算 per-token 的 loss,不进行 reduction # logits: (batch, seq_len, vocab_size) 或 (total_tokens, vocab_size) # labels: (batch, seq_len) 或 (total_tokens,) per_token_loss = F.cross_entropy( logits.view(-1, vocab_size), labels.view(-1), ignore_index=ignore_index, reduction='none' # 这是关键! ) # 2. 确保权重张量与 per_token_loss 的形状匹配 # per_token_weights 应该已经被塑造成 (batch * seq_len,) 或 (total_tokens,) 的形状 weights = per_token_weights.reshape(-1).to(per_token_loss.device) # 检查形状是否匹配 if per_token_loss.shape != weights.shape: raise ValueError( f"Shape mismatch between per_token_loss ({per_token_loss.shape}) and weights ({weights.shape})." "Please ensure per_token_weights are correctly expanded." ) # 3. 应用权重 weighted_loss = per_token_loss * weights # 4. 手动进行 reduction,模拟原始的 fixed_cross_entropy 行为 # 即,对所有有效的(非 ignore_index)token 的加权损失求和,然后除以有效 token 的数量 # 这种归一化方式可以确保不同大小的batch和不同数量的mask token下的loss尺度相对稳定。 # 获取有效token的数量 if num_items_in_batch is None: # 如果没提供,就自己算 num_valid_tokens = (labels.view(-1) != ignore_index).sum() else: num_valid_tokens = num_items_in_batch # 避免除以零 if torch.is_tensor(num_valid_tokens): num_valid_tokens = num_valid_tokens.to(weighted_loss.device) # 求和并归一化 total_weighted_loss = weighted_loss.sum() if loss_normalization == "total_tokens": # Normalize by total number of tokens (official implementation behavior) num_total_tokens = labels.numel() final_loss = total_weighted_loss / num_total_tokens else: # Normalize by number of valid (masked) tokens (old behavior) if num_valid_tokens > 0: final_loss = total_weighted_loss / num_valid_tokens else: final_loss = torch.tensor(0.0, device=logits.device) return final_loss @dataclass class CausalLMOutputWithPastAndMLMProb(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None current_mlm_prob: Optional[torch.FloatTensor] = None class LLaDAModelLM(PreTrainedModel): """ Extremely barebones HF model wrapper. """ config_class = LLaDAConfig base_model_prefix = "model" _no_split_modules = ["LLaDABlock", "LLaDASequentialBlock", "LLaDALlamaBlock"] def __init__(self, config: LLaDAConfig, model: Optional[LLaDAModel] = None, init_params: bool = False): super().__init__(config) if not model: model_config = create_model_config_from_pretrained_config(config) # Initialize model (always on CPU to start with so we don't run out of GPU memory). model_config.init_device = "cpu" self.model = LLaDAModel(model_config, init_params=init_params) else: self.model = model def forward( self, input_ids: torch.LongTensor = None, inputs_embeds: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, attention_bias: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[Cache] = None, # This is a hack mitigation of an issue in transformers `4.39.x` current_mlm_prob: Optional[torch.Tensor] = None, **kwargs ) -> Union[Tuple, CausalLMOutputWithPastAndMLMProb]: if use_cache is None: use_cache = self.config.use_cache if output_attentions: raise ValueError("output_attentions is not yet supported in LLaDA") return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model.forward( input_ids=input_ids, input_embeddings=inputs_embeds, attention_mask=attention_mask, attention_bias=attention_bias, past_key_values=past_key_values, use_cache=use_cache, output_hidden_states=output_hidden_states, ) logits = outputs.logits hidden_states = outputs.hidden_states loss = None # [rangehow]: edited, add 重要性采样 if labels is not None: per_token_mlm_weights = None # 如果传入了 current_mlm_prob,则计算 per-token 权重 if current_mlm_prob is not None: # 加一个很小的 epsilon 防止除以 0 t = current_mlm_prob.to(logits.device) weights_per_sentence = 1.0 / (t + 1e-8) seq_len = logits.shape[1] per_token_mlm_weights = weights_per_sentence.unsqueeze(1).expand(-1, seq_len) loss = ForMaskedLMLoss( logits, labels, vocab_size=self.config.vocab_size, per_token_weights=per_token_mlm_weights, loss_normalization=getattr(self.config, "loss_normalization", "masked_tokens"), **kwargs ) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output # [rangehow]: edited return CausalLMOutputWithPastAndMLMProb( loss=loss, logits=logits, past_key_values=outputs.attn_key_values, hidden_states=hidden_states, current_mlm_prob=current_mlm_prob.mean() if current_mlm_prob is not None else None, ) def can_generate(self) -> bool: return True def prepare_inputs_for_generation( self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple]] = None, **kwargs ): if past_key_values: # This is because we want the model to only process the last generated token. input_ids = input_ids[:, -1:] model_inputs = {"input_ids": input_ids, "past_key_values": past_key_values} model_inputs.update(kwargs) model_inputs["use_cache"] = kwargs.pop("use_cache", self.config.use_cache) return model_inputs # TODO: these are required to make the implementation complete. # def resize_position_embeddings(self, new_num_position_embeddings: int): # pass # # def get_position_embeddings(self) -> Union[nn.Embedding, Tuple[nn.Embedding]]: # pass # # def _reorder_cache(self, past_key_values, beam_idx): # pass def get_input_embeddings(self) -> torch.nn.Module: return self.model.transformer.wte def set_input_embeddings(self, value: torch.nn.Module): self.model.transformer.wte = value def get_output_embeddings(self): if self.config.weight_tying: return self.model.transformer.wte else: return self.model.transformer.ff_out def set_output_embeddings(self, value: torch.nn.Module): if self.config.weight_tying: self.model.transformer.wte = value else: self.model.transformer.ff_out = value def tie_weights(self): if self.config.weight_tying: self.model.transformer.ff_out = self.model.transformer.wte # BUG 块推理还有问题 @torch.inference_mode() def generate( self, input_ids: torch.LongTensor, mask_token_id: Optional[int], attention_mask: Optional[torch.Tensor] = None, max_new_tokens: int = 50, num_diffusion_steps: int = 10, temperature_mlm: float = 1.0, do_sample: bool = True, top_k: Optional[int] = None, top_p: Optional[float] = None, debug: bool = False, tokenizer = None, # --- 新增和修改的参数 --- block_size: int = 5, # 新增:定义块大小 decode_top_k_positions: int = 2, # 修改:现在作用于块内 **kwargs ) -> torch.LongTensor: """ 自定义的扩散生成方法 Args: input_ids: 输入的token ids,形状为 (batch_size, seq_len) attention_mask: 注意力掩码,形状为 (batch_size, seq_len) max_new_tokens: 要生成的新token数量 (L) num_diffusion_steps: 扩散迭代次数 (T) temperature_mlm: MLM采样的温度参数 do_sample: 是否使用采样,False则使用贪心解码 top_k: top-k采样参数 top_p: top-p采样参数 mask_token_id: mask token的id,如果为None则尝试自动获取 debug: 是否启用调试模式,输出每步迭代的详细信息 tokenizer: 用于将token id转换为文本的tokenizer(可选) Returns: 生成的完整序列,形状为 (batch_size, original_seq_len + max_new_tokens) """ batch_size, original_seq_len = input_ids.shape device = input_ids.device # 1. 在输入后填充L个mask token mask_tokens = torch.full((batch_size, max_new_tokens), mask_token_id, dtype=input_ids.dtype, device=device) extended_input_ids = torch.cat([input_ids, mask_tokens], dim=1) # 扩展attention mask if attention_mask is None: attention_mask = torch.ones_like(input_ids, dtype=torch.bool) extended_attention_mask = torch.cat([ attention_mask, torch.ones((batch_size, max_new_tokens), dtype=torch.bool, device=device) ], dim=1) current_sequence = extended_input_ids.clone() if debug: logger.info("=" * 80) logger.info("🚀 开始分块扩散生成过程") logger.info(f"📊 参数: max_new_tokens={max_new_tokens}, block_size={block_size}, num_diffusion_steps_per_block={num_diffusion_steps}") initial_text = self._tokens_to_text(current_sequence[0], tokenizer, mask_token_id) logger.info(f"📝 初始序列 (Batch 0): {initial_text}") logger.info("=" * 80) # 2. 【核心改造】外层循环,按块(block)进行 num_blocks = math.ceil(max_new_tokens / block_size) for block_idx in range(num_blocks): # 计算当前块在完整序列中的绝对索引起始和结束位置 block_start_abs = original_seq_len + block_idx * block_size block_end_abs = min(block_start_abs + block_size, original_seq_len + max_new_tokens) # 如果块大小为0(例如处理最后一个不满的块时计算错误),则跳过 if block_start_abs >= block_end_abs: continue if debug: logger.info(f"\n🧱 === 开始处理 Block {block_idx + 1}/{num_blocks} (位置: {block_start_abs} to {block_end_abs-1}) ===") # 3. 内层循环,对当前块进行迭代优化 for step in range(num_diffusion_steps): if debug: logger.info(f" 🔄 --- 内部迭代 {step + 1}/{num_diffusion_steps} ---") # a. 【全局上下文】前向传播始终在完整序列上进行 outputs = self.forward( input_ids=current_sequence, attention_mask=extended_attention_mask, return_dict=True ) mlm_logits = outputs.logits # (batch_size, total_seq_len, vocab_size) # b. 【聚焦当前块】只获取当前块的 logits block_logits = mlm_logits[:, block_start_abs:block_end_abs, :] # c. 【聚焦当前块】获取当前块的 tokens,用于判断哪些位置还是 MASK current_block_tokens = current_sequence[:, block_start_abs:block_end_abs] is_mask_in_block = (current_block_tokens == mask_token_id) # 如果当前块已经没有 MASK,可以提前结束此块的迭代 if not is_mask_in_block.any(): if debug: logger.info(" [信息] 当前块已填满,提前进入下一块。") break # d. 【块内生成】从块的 logits 中生成候选 token if do_sample: # (此处采样逻辑与原版相同,但作用域是 block_logits) probs = torch.softmax(block_logits / temperature_mlm, dim=-1) block_candidate_tokens = torch.multinomial(probs.view(-1, probs.size(-1)), 1).view(probs.shape[0], probs.shape[1]) else: block_candidate_tokens = torch.argmax(block_logits, dim=-1) # e. 【块内更新】只解码块内置信度最高的 top-k 个 MASK 位置 # 计算块内所有位置的置信度 (即模型预测的token的概率) probs = torch.softmax(block_logits, dim=-1) confidence_scores, _ = torch.max(probs, dim=-1) # Shape: (batch_size, current_block_len) # 关键:将被填充过的位置的置信度设为-1,确保它们不被 topk 选中 masked_confidence_scores = torch.where( is_mask_in_block, confidence_scores, -1.0 ) # 决定这次要更新多少个token num_masks_in_block = is_mask_in_block.sum(dim=1).min().item() # 取batch中最小的mask数,保证安全 k = min(decode_top_k_positions, num_masks_in_block) if k <= 0: if debug: logger.info(" [信息] 无可用MASK位置更新,跳过此迭代。") continue # 找到置信度最高的 k 个 MASK 位置的索引 _, top_k_indices_in_block = torch.topk(masked_confidence_scores, k=k, dim=1) # 创建只在这些 top-k 位置为 True 的更新掩码 block_update_mask = torch.zeros_like(confidence_scores, dtype=torch.bool, device=device) block_update_mask.scatter_(1, top_k_indices_in_block, True) # 安全校验:确保只更新原先是 MASK 的位置 block_update_mask = block_update_mask & is_mask_in_block if debug: logger.info(f" [解码] 计划更新 {block_update_mask.sum().item()} 个位置。") # f. 【更新序列】使用掩码,将候选 token 更新到当前块 updated_block_tokens = torch.where( block_update_mask, block_candidate_tokens, current_block_tokens ) # 将更新后的块放回完整序列中 prev_sequence_for_debug = current_sequence.clone() current_sequence[:, block_start_abs:block_end_abs] = updated_block_tokens if debug: self._debug_block_step_changes( block_idx + 1, step + 1, prev_sequence_for_debug, current_sequence, block_start_abs, block_end_abs, tokenizer, mask_token_id ) if debug: logger.info("\n" + "=" * 80) logger.info("🎉 分块扩散生成完成!") for batch_idx in range(batch_size): final_text = self._tokens_to_text(current_sequence[batch_idx], tokenizer, mask_token_id) logger.info(f"📝 Batch {batch_idx} 最终序列: {final_text}") logger.info("=" * 80) return current_sequence # --- 辅助调试函数 --- # 新增一个用于分块调试的函数 def _debug_block_step_changes(self, block_num, step_num, prev_seq, curr_seq, block_start, block_end, tokenizer, mask_token_id): prev_block = prev_seq[:, block_start:block_end] curr_block = curr_seq[:, block_start:block_end] changes = prev_block != curr_block if not changes.any(): logger.info(f" [结果] Block {block_num} Step {step_num}: 无变化。") return for batch_idx in range(curr_seq.shape[0]): changed_indices = torch.where(changes[batch_idx])[0] if not len(changed_indices): continue logger.info(f" [结果] B{batch_idx} | 发生变化的位置: {[idx.item() + block_start for idx in changed_indices]}") # 只展示当前块的变化细节 for idx in changed_indices: prev_token_id = prev_block[batch_idx, idx].item() curr_token_id = curr_block[batch_idx, idx].item() prev_text = self._token_to_text(prev_token_id, tokenizer, mask_token_id) curr_text = self._token_to_text(curr_token_id, tokenizer, mask_token_id) logger.info(f" - Pos {idx.item() + block_start}: {prev_text} -> {curr_text}") # 展示当前整个序列的状态 if curr_seq.shape[0] == 1: # 如果batch_size为1,直接显示 full_text = self._tokens_to_text(curr_seq[batch_idx], tokenizer, mask_token_id) logger.info(f" [序列] {full_text}") # 旧的调试函数可以保留或删除,这里我保留并重命名以示区别 def _debug_full_step_changes(self, *args, **kwargs): # ... (这个函数逻辑可以保持不变,但现在可能不太常用了) pass def _tokens_to_text(self, tokens: torch.Tensor, tokenizer, mask_token_id: int) -> str: # ... (这个函数逻辑保持不变) if tokenizer is None: token_strs = [] for token_id in tokens.tolist(): if token_id == mask_token_id: token_strs.append("[MASK]") else: token_strs.append(f"<{token_id}>") return " ".join(token_strs) else: try: temp_tokens = tokens.clone() temp_tokens[temp_tokens == mask_token_id] = tokenizer.mask_token_id if hasattr(tokenizer, 'mask_token_id') else -1 text = tokenizer.decode(temp_tokens, skip_special_tokens=False) # return text.replace(tokenizer.decode([-1]), "[MASK]") return text except Exception as e: logger.warning(f"Tokenizer解码失败: {e}") import pdb pdb.set_trace() return self._tokens_to_text(tokens, None, mask_token_id) def _token_to_text(self, token_id: int, tokenizer, mask_token_id: int) -> str: # ... (这个函数逻辑保持不变) if token_id == mask_token_id: return "[MASK]" if tokenizer is None: return f"<{token_id}>" else: try: return f"'{tokenizer.decode([token_id], skip_special_tokens=False)}'" except: return f"<{token_id}>" # Register the model so that it is available for transformer pipelines, auto-loading, etc. # AutoModel.register(LLaDAConfig, LLaDAModelLM)