from dataclasses import dataclass from typing import Any, Callable, Optional, Union import torch import torch.nn as nn import torch.nn.functional as F from transformers.activations import ACT2FN from transformers.cache_utils import Cache, DynamicCache from transformers.generation import GenerationMixin from transformers.masking_utils import create_causal_mask, create_sliding_window_causal_mask from transformers.modeling_flash_attention_utils import FlashAttentionKwargs from transformers.modeling_layers import GradientCheckpointingLayer from transformers.modeling_outputs import BaseModelOutputWithPast, ModelOutput from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from transformers.processing_utils import Unpack from transformers.utils import auto_docstring, can_return_tuple, is_torchdynamo_compiling, logging from transformers.utils.deprecation import deprecate_kwarg from transformers.models.qwen2.modeling_qwen2 import Qwen2RMSNorm from .configuration import Fast_dVLMConfig, Fast_dVLMTextConfig, Fast_dVLMVisionConfig from torch.nn.attention.flex_attention import flex_attention, create_block_mask from functools import partial import random import math logger = logging.get_logger(__name__) # @torch.compile(fullgraph=True, mode="max-autotune-no-cudagraphs") # @torch.compile() def fused_flex_attention(q, k, v, mask=None): return flex_attention(q, k, v, block_mask=mask, enable_gqa=True) def block_diff_mask(b, h, q_idx, kv_idx, block_size=None, n=None): """ Constructs the specialized block diffusion attention mask for training composed of three masks: - **Block Diagonal Mask (M_BD)**: Self-attention within noised blocks - **Offset Block Causal Mask (M_OBC)**: Cross-attention for conditional context - **Block Causal Mask (M_BC)**: Attention to update x0 Args: b, h: Batch and head indices (ignored for mask logic). q_idx, kv_idx: Query and Key indices. seq_len: Total sequence length. block_size: Defines the block structure. Returns: A boolean attention mask. """ # Indicate whether token belongs to xt or x0 x0_flag_q = (q_idx >= n) x0_flag_kv = (kv_idx >= n) # Compute block indices block_q = torch.where(x0_flag_q == 1, (q_idx - n) // block_size, q_idx // block_size) block_kv = torch.where(x0_flag_kv == 1, (kv_idx - n) // block_size, kv_idx // block_size) # **1. Block Diagonal Mask (M_BD) ** block_diagonal = (block_q == block_kv) & (x0_flag_q == x0_flag_kv) # **2. Offset Block-Causal Mask (M_OBC) ** offset_block_causal = ( (block_q > block_kv) & (x0_flag_kv == 1) & (x0_flag_q == 0) ) # **3. Block-Causal Mask (M_BC) ** block_causal = (block_q >= block_kv) & (x0_flag_kv == 1) & (x0_flag_q == 1) # **4. Combine Masks ** return block_diagonal | offset_block_causal | block_causal def block_causal_mask(b, h, q_idx, kv_idx, block_size=None, n=None): # Indicate whether token belongs to xt or x0 x0_flag_q = (q_idx >= n) x0_flag_kv = (kv_idx >= n) # Compute block indices block_q = torch.where(x0_flag_q == 1, (q_idx - n) // block_size, q_idx // block_size) block_kv = torch.where(x0_flag_kv == 1, (kv_idx - n) // block_size, kv_idx // block_size) # **1. Block Diagonal Mask (M_BD) ** block_diagonal = (block_q == block_kv) & (x0_flag_q == x0_flag_kv) # **2. Offset Block-Causal Mask (M_OBC) ** offset_block_causal = ( (block_q > block_kv) & (x0_flag_kv == 1) & (x0_flag_q == 0) ) # **3. Block-Causal Mask (M_BC) ** block_causal = (q_idx >= kv_idx) & (x0_flag_kv == 1) & (x0_flag_q == 1) # **4. Combine Masks ** return block_diagonal | offset_block_causal | block_causal def hybrid_block_causal_mask_multiturn(b, h, q_idx, kv_idx, response_block_idx=None, turn_idx=None, n=None): """ Multi-turn hybrid mask: Prompt uses causal, Response uses block causal. Args: response_block_idx: [seq_len] tensor, -1 for prompt, >=0 for response block index turn_idx: [seq_len] tensor, turn index for each position (0, 1, 2, ...) n: sequence length (half of total) Rules: - Each token can see all previous turns - Within current turn: prompt uses causal, response uses block causal - x_t response sees x_0: only tokens from current turn and before - x_0: standard causal mask Example for [prompt1, response1, prompt2, response2]: - prompt1 (turn 0): causal within turn 0 prompt - response1 (turn 0): sees prompt1 + block causal within response1 - prompt2 (turn 1): sees all of turn 0 + causal within turn 1 prompt - response2 (turn 1): sees all of turn 0 + prompt2 + block causal within response2 """ x0_flag_q = (q_idx >= n) x0_flag_kv = (kv_idx >= n) pos_q = torch.where(x0_flag_q, q_idx - n, q_idx) pos_kv = torch.where(x0_flag_kv, kv_idx - n, kv_idx) block_q = response_block_idx[pos_q] block_kv = response_block_idx[pos_kv] turn_q = turn_idx[pos_q] turn_kv = turn_idx[pos_kv] is_prompt_q = (block_q < 0) is_prompt_kv = (block_kv < 0) # x_t region rules: # 1. Can see all previous turns: turn_q > turn_kv # 2. Within same turn, prompt: causal (turn same + is prompt + pos satisfies causal) # 3. Within same turn, response: sees all prompt in same turn + block causal for response # xt_same_turn_prompt_causal = ~x0_flag_q & ~x0_flag_kv & (turn_q == turn_kv) & is_prompt_q & (pos_q >= pos_kv) # xt_same_turn_response = ~x0_flag_q & ~x0_flag_kv & (turn_q == turn_kv) & ~is_prompt_q & ( # ~is_prompt_kv # ) block_diagonal = ~x0_flag_q & ~x0_flag_kv & (turn_q == turn_kv) # **2. Offset Block-Causal Mask (M_OBC) ** offset_block_causal = ( (turn_q > turn_kv) & (x0_flag_kv == 1) & (x0_flag_q == 0) ) # x_0 region: standard causal x0_causal = x0_flag_q & x0_flag_kv & (pos_q >= pos_kv) return (block_diagonal | offset_block_causal | x0_causal) def eval_block_diff_mask(q_idx, kv_idx, block_size=None): # Compute block indices block_q = q_idx // block_size block_kv = kv_idx // block_size return torch.ones_like(block_q >= block_kv) def eval_causal_mask(q_idx, kv_idx): return q_idx >= kv_idx class Fast_dVLMMLP(nn.Module): def __init__(self, config, bias: bool = False): super().__init__() self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=bias) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=bias) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=bias) self.act_fn = ACT2FN[config.hidden_act] def forward(self, hidden_state): return self.down_proj(self.act_fn(self.gate_proj(hidden_state)) * self.up_proj(hidden_state)) class Fast_dVLMVisionPatchEmbed(nn.Module): def __init__( self, patch_size: int = 14, temporal_patch_size: int = 2, in_channels: int = 3, embed_dim: int = 1152, ) -> None: super().__init__() self.patch_size = patch_size self.temporal_patch_size = temporal_patch_size self.in_channels = in_channels self.embed_dim = embed_dim kernel_size = [temporal_patch_size, patch_size, patch_size] self.proj = nn.Conv3d(in_channels, embed_dim, kernel_size=kernel_size, stride=kernel_size, bias=False) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: target_dtype = self.proj.weight.dtype hidden_states = hidden_states.view( -1, self.in_channels, self.temporal_patch_size, self.patch_size, self.patch_size ) hidden_states = self.proj(hidden_states.to(dtype=target_dtype)).view(-1, self.embed_dim) return hidden_states class Fast_dVLMVisionRotaryEmbedding(nn.Module): inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, dim: int, theta: float = 10000.0) -> None: super().__init__() inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) def forward(self, seqlen: int) -> torch.Tensor: seq = torch.arange(seqlen, device=self.inv_freq.device, dtype=self.inv_freq.dtype) freqs = torch.outer(seq, self.inv_freq) return freqs class Fast_dVLMPatchMerger(nn.Module): def __init__(self, dim: int, context_dim: int, spatial_merge_size: int = 2) -> None: super().__init__() self.hidden_size = context_dim * (spatial_merge_size**2) self.ln_q = Qwen2RMSNorm(context_dim, eps=1e-6) self.mlp = nn.Sequential( nn.Linear(self.hidden_size, self.hidden_size), nn.GELU(), nn.Linear(self.hidden_size, dim), ) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.mlp(self.ln_q(x).view(-1, self.hidden_size)) return x def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb_vision( q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor ) -> tuple[torch.Tensor, torch.Tensor]: orig_q_dtype = q.dtype orig_k_dtype = k.dtype q, k = q.float(), k.float() cos, sin = cos.unsqueeze(-2).float(), sin.unsqueeze(-2).float() q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) q_embed = q_embed.to(orig_q_dtype) k_embed = k_embed.to(orig_k_dtype) return q_embed, k_embed def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): key_states = repeat_kv(key, module.num_key_value_groups) value_states = repeat_kv(value, module.num_key_value_groups) attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling if attention_mask is not None: causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights class Fast_dVLMVisionAttention(nn.Module): def __init__(self, config: Fast_dVLMVisionConfig) -> None: super().__init__() self.dim = config.hidden_size self.num_heads = config.num_heads self.head_dim = self.dim // self.num_heads self.num_key_value_groups = 1 # needed for eager attention self.qkv = nn.Linear(self.dim, self.dim * 3, bias=True) self.proj = nn.Linear(self.dim, self.dim) self.scaling = self.head_dim**-0.5 self.config = config self.attention_dropout = 0.0 self.is_causal = False def forward( self, hidden_states: torch.Tensor, cu_seqlens: torch.Tensor, rotary_pos_emb: Optional[torch.Tensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, **kwargs, ) -> torch.Tensor: seq_length = hidden_states.shape[0] query_states, key_states, value_states = ( self.qkv(hidden_states).reshape(seq_length, 3, self.num_heads, -1).permute(1, 0, 2, 3).unbind(0) ) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb_vision(query_states, key_states, cos, sin) query_states = query_states.transpose(0, 1).unsqueeze(0) key_states = key_states.transpose(0, 1).unsqueeze(0) value_states = value_states.transpose(0, 1).unsqueeze(0) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] if self.config._attn_implementation == "flash_attention_2": # Flash Attention 2: Use cu_seqlens for variable length attention max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max() attn_output, _ = attention_interface( self, query_states, key_states, value_states, attention_mask=None, scaling=self.scaling, dropout=0.0 if not self.training else self.attention_dropout, cu_seq_lens_q=cu_seqlens, cu_seq_lens_k=cu_seqlens, max_length_q=max_seqlen, max_length_k=max_seqlen, is_causal=False, **kwargs, ) else: # Other implementations: Process each chunk separately lengths = cu_seqlens[1:] - cu_seqlens[:-1] splits = [ torch.split(tensor, lengths.tolist(), dim=2) for tensor in (query_states, key_states, value_states) ] attn_outputs = [ attention_interface( self, q, k, v, attention_mask=None, scaling=self.scaling, dropout=0.0 if not self.training else self.attention_dropout, is_causal=False, **kwargs, )[0] for q, k, v in zip(*splits) ] attn_output = torch.cat(attn_outputs, dim=1) attn_output = attn_output.reshape(seq_length, -1).contiguous() attn_output = self.proj(attn_output) return attn_output class Fast_dVLMVisionBlock(GradientCheckpointingLayer): def __init__(self, config, attn_implementation: str = "sdpa") -> None: super().__init__() self.norm1 = Qwen2RMSNorm(config.hidden_size, eps=1e-6) self.norm2 = Qwen2RMSNorm(config.hidden_size, eps=1e-6) self.attn = Fast_dVLMVisionAttention(config=config) self.mlp = Fast_dVLMMLP(config, bias=True) def forward( self, hidden_states: torch.Tensor, cu_seqlens: torch.Tensor, rotary_pos_emb: Optional[torch.Tensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, **kwargs, ) -> torch.Tensor: hidden_states = hidden_states + self.attn( self.norm1(hidden_states), cu_seqlens=cu_seqlens, rotary_pos_emb=rotary_pos_emb, position_embeddings=position_embeddings, **kwargs, ) hidden_states = hidden_states + self.mlp(self.norm2(hidden_states)) return hidden_states @auto_docstring class Fast_dVLMPreTrainedModel(PreTrainedModel): config: Fast_dVLMConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["Fast_dVLMDecoderLayer", "Fast_dVLMVisionBlock"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn = True _supports_sdpa = True _can_compile_fullgraph = True _supports_attention_backend = True def gradient_checkpointing_enable( self, gradient_checkpointing_kwargs: Optional[dict[str, Any]] = None, ) -> None: """ Ensure non-reentrant checkpointing when the trainers call into Transformers' gradient checkpointing helper. Flash attention kernels used by MDM do not support reentrant checkpointing, so we request the safer path by default. """ if gradient_checkpointing_kwargs is None: gradient_checkpointing_kwargs = {} else: gradient_checkpointing_kwargs = dict(gradient_checkpointing_kwargs) gradient_checkpointing_kwargs.setdefault("use_reentrant", False) super().gradient_checkpointing_enable(gradient_checkpointing_kwargs=gradient_checkpointing_kwargs) class Fast_dVLMVisionTransformerPretrainedModel(Fast_dVLMPreTrainedModel): config: Fast_dVLMVisionConfig _no_split_modules = ["Fast_dVLMVisionBlock"] def __init__(self, config, *inputs, **kwargs) -> None: super().__init__(config, *inputs, **kwargs) self.spatial_merge_size = config.spatial_merge_size self.patch_size = config.patch_size self.fullatt_block_indexes = config.fullatt_block_indexes self.window_size = config.window_size self.spatial_merge_unit = self.spatial_merge_size * self.spatial_merge_size self.patch_embed = Fast_dVLMVisionPatchEmbed( patch_size=config.patch_size, temporal_patch_size=config.temporal_patch_size, in_channels=config.in_channels, embed_dim=config.hidden_size, ) head_dim = config.hidden_size // config.num_heads self.rotary_pos_emb = Fast_dVLMVisionRotaryEmbedding(head_dim // 2) self.blocks = nn.ModuleList([Fast_dVLMVisionBlock(config) for _ in range(config.depth)]) self.merger = Fast_dVLMPatchMerger( dim=config.out_hidden_size, context_dim=config.hidden_size, spatial_merge_size=config.spatial_merge_size, ) self.gradient_checkpointing = False def rot_pos_emb(self, grid_thw): pos_ids = [] for t, h, w in grid_thw: hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w) hpos_ids = hpos_ids.reshape( h // self.spatial_merge_size, self.spatial_merge_size, w // self.spatial_merge_size, self.spatial_merge_size, ) hpos_ids = hpos_ids.permute(0, 2, 1, 3) hpos_ids = hpos_ids.flatten() wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1) wpos_ids = wpos_ids.reshape( h // self.spatial_merge_size, self.spatial_merge_size, w // self.spatial_merge_size, self.spatial_merge_size, ) wpos_ids = wpos_ids.permute(0, 2, 1, 3) wpos_ids = wpos_ids.flatten() pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1)) pos_ids = torch.cat(pos_ids, dim=0) max_grid_size = grid_thw[:, 1:].max() rotary_pos_emb_full = self.rotary_pos_emb(max_grid_size) rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1) return rotary_pos_emb def get_window_index(self, grid_thw): window_index: list = [] cu_window_seqlens: list = [0] window_index_id = 0 vit_merger_window_size = self.window_size // self.spatial_merge_size // self.patch_size for grid_t, grid_h, grid_w in grid_thw: llm_grid_h, llm_grid_w = ( grid_h // self.spatial_merge_size, grid_w // self.spatial_merge_size, ) index = torch.arange(grid_t * llm_grid_h * llm_grid_w).reshape(grid_t, llm_grid_h, llm_grid_w) pad_h = vit_merger_window_size - llm_grid_h % vit_merger_window_size pad_w = vit_merger_window_size - llm_grid_w % vit_merger_window_size num_windows_h = (llm_grid_h + pad_h) // vit_merger_window_size num_windows_w = (llm_grid_w + pad_w) // vit_merger_window_size index_padded = F.pad(index, (0, pad_w, 0, pad_h), "constant", -100) index_padded = index_padded.reshape( grid_t, num_windows_h, vit_merger_window_size, num_windows_w, vit_merger_window_size, ) index_padded = index_padded.permute(0, 1, 3, 2, 4).reshape( grid_t, num_windows_h * num_windows_w, vit_merger_window_size, vit_merger_window_size, ) seqlens = (index_padded != -100).sum([2, 3]).reshape(-1) index_padded = index_padded.reshape(-1) index_new = index_padded[index_padded != -100] window_index.append(index_new + window_index_id) cu_seqlens_tmp = seqlens.cumsum(0) * self.spatial_merge_unit + cu_window_seqlens[-1] cu_window_seqlens.extend(cu_seqlens_tmp.tolist()) window_index_id += (grid_t * llm_grid_h * llm_grid_w).item() window_index = torch.cat(window_index, dim=0) return window_index, cu_window_seqlens def forward(self, hidden_states: torch.Tensor, grid_thw: torch.Tensor, **kwargs) -> torch.Tensor: """ Args: hidden_states (`torch.Tensor` of shape `(seq_len, hidden_size)`): The final hidden states of the model. grid_thw (`torch.Tensor` of shape `(num_images_or_videos, 3)`): The temporal, height and width of feature shape of each image in LLM. Returns: `torch.Tensor`: hidden_states. """ hidden_states = self.patch_embed(hidden_states) rotary_pos_emb = self.rot_pos_emb(grid_thw) window_index, cu_window_seqlens = self.get_window_index(grid_thw) cu_window_seqlens = torch.tensor( cu_window_seqlens, device=hidden_states.device, dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32, ) cu_window_seqlens = torch.unique_consecutive(cu_window_seqlens) seq_len, _ = hidden_states.size() hidden_states = hidden_states.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1) hidden_states = hidden_states[window_index, :, :] hidden_states = hidden_states.reshape(seq_len, -1) rotary_pos_emb = rotary_pos_emb.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1) rotary_pos_emb = rotary_pos_emb[window_index, :, :] rotary_pos_emb = rotary_pos_emb.reshape(seq_len, -1) emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1) position_embeddings = (emb.cos(), emb.sin()) cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum( dim=0, # Select dtype based on the following factors: # - FA2 requires that cu_seqlens_q must have dtype int32 # - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw # See https://github.com/huggingface/transformers/pull/34852 for more information dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32, ) cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0) for layer_num, blk in enumerate(self.blocks): if layer_num in self.fullatt_block_indexes: cu_seqlens_now = cu_seqlens else: cu_seqlens_now = cu_window_seqlens hidden_states = blk( hidden_states, cu_seqlens=cu_seqlens_now, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.merger(hidden_states) reverse_indices = torch.argsort(window_index) hidden_states = hidden_states[reverse_indices, :] return hidden_states @dataclass @auto_docstring( custom_intro=""" Base class for Llava outputs, with hidden states and attentions. """ ) class Fast_dVLMModelOutputWithPast(ModelOutput): r""" past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*): The rope index difference between sequence length and multimodal rope. """ last_hidden_state: Optional[torch.FloatTensor] = None past_key_values: Optional[list[torch.FloatTensor]] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None rope_deltas: Optional[torch.LongTensor] = None class Fast_dVLMRotaryEmbedding(nn.Module): inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, config: Fast_dVLMTextConfig, device=None): super().__init__() # BC: "rope_type" was originally "type" if hasattr(config, "rope_scaling") and config.rope_scaling is not None: self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) else: self.rope_type = "default" self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = self.inv_freq @torch.no_grad() @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope) def forward(self, x, position_ids): # In contrast to other models, Fast_dVLM has different position ids for the grids # So we expand the inv_freq to shape (3, ...) inv_freq_expanded = self.inv_freq[None, None, :, None].float().expand(3, position_ids.shape[1], -1, 1) position_ids_expanded = position_ids[:, :, None, :].float() # shape (3, bs, 1, positions) device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): # Force float32 freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(2, 3) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) class Qwen2MLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) self.act_fn = ACT2FN[config.hidden_act] def forward(self, x): down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) return down_proj def apply_multimodal_rotary_pos_emb(q, k, cos, sin, mrope_section, unsqueeze_dim=1): """Applies Rotary Position Embedding with Multimodal Sections to the query and key tensors (https://qwenlm.github.io/blog/qwen2-vl/). Explanation: Multimodal 3D rotary position embedding is an extension to 1D rotary position embedding. The input embedding sequence contains vision (images / videos) embedding and text embedding or just contains text embedding. For vision embedding part, we apply rotary position embedding on temporal, height and width dimension separately. Here we split the channel dimension to 3 chunks for the temporal, height and width rotary position embedding. For text embedding part, we just apply 1D rotary position embedding. The three rotary position index (temporal, height and width) of text embedding is always the same, so the text embedding rotary position embedding has no difference with modern LLMs. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`): The position indices of the tokens corresponding to the query and key tensors. For example, this can be used to pass offsetted position ids when working with a KV-cache. mrope_section(`List(int)`): Multimodal rope section is for channel dimension of temporal, height and width in rope calculation. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ mrope_section = mrope_section * 2 cos = torch.cat([m[i % 3] for i, m in enumerate(cos.split(mrope_section, dim=-1))], dim=-1).unsqueeze( unsqueeze_dim ) sin = torch.cat([m[i % 3] for i, m in enumerate(sin.split(mrope_section, dim=-1))], dim=-1).unsqueeze( unsqueeze_dim ) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed class Fast_dVLMAttention(nn.Module): """ Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer and "Generating Long Sequences with Sparse Transformers". """ def __init__(self, config: Fast_dVLMTextConfig, layer_idx: Optional[int] = None): super().__init__() self.config = config self.layer_idx = layer_idx if layer_idx is None: logger.warning_once( f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will " "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.is_causal = True self.attention_dropout = config.attention_dropout self.rope_scaling = config.rope_scaling self.scaling = self.head_dim**-0.5 if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {self.num_heads})." ) self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) self.sliding_window = config.sliding_window if config.layer_types[layer_idx] == "sliding_attention" else None self.rotary_emb = Fast_dVLMRotaryEmbedding(config=config) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC update_kv_cache: bool = False, **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) cos, sin = position_embeddings if self.training: #split q into two parts q_1 = query_states[:,:,:query_states.shape[2]//2] q_2 = query_states[:,:,query_states.shape[2]//2:] #split k into two parts k_1 = key_states[:,:,:key_states.shape[2]//2] k_2 = key_states[:,:,key_states.shape[2]//2:] q_1, k_1 = apply_multimodal_rotary_pos_emb(q_1, k_1, cos, sin, self.rope_scaling["mrope_section"]) q_2, k_2 = apply_multimodal_rotary_pos_emb(q_2, k_2, cos, sin, self.rope_scaling["mrope_section"]) query_states = torch.cat((q_1, q_2), dim=-2) key_states = torch.cat((k_1, k_2), dim=-2) else: query_states, key_states = apply_multimodal_rotary_pos_emb( query_states, key_states, cos, sin, self.rope_scaling["mrope_section"] ) if past_key_values is not None: cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models if update_kv_cache: key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) # elif len(past_key_values) > self.layer_idx: elif len(past_key_values) > self.layer_idx and past_key_values[self.layer_idx][0] is not None: key_states = torch.cat((past_key_values[self.layer_idx][0], key_states), dim=-2) value_states = torch.cat((past_key_values[self.layer_idx][1], value_states), dim=-2) if self.training: query_states = query_states.contiguous() key_states = key_states.contiguous() value_states = value_states.contiguous() attn_output = fused_flex_attention(query_states, key_states, value_states, mask=attention_mask) attn_output = attn_output.transpose(1, 2).contiguous() attn_weights = None else: attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, sliding_window=self.sliding_window, position_ids=position_ids, # pass positions for FA2 **kwargs, ) attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class Fast_dVLMDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: Fast_dVLMTextConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size if config.use_sliding_window and config._attn_implementation != "flash_attention_2": logger.warning_once( f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; " "unexpected results may be encountered." ) self.self_attn = Fast_dVLMAttention(config, layer_idx) self.mlp = Qwen2MLP(config) self.input_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.attention_type = config.layer_types[layer_idx] @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC update_kv_cache: bool = False, **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_values (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, with `head_dim` being the embedding dimension of each attention head. kwargs (`dict`, *optional*): Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code into the model """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, update_kv_cache=update_kv_cache, **kwargs, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) return outputs @auto_docstring class Fast_dVLMTextModel(Fast_dVLMPreTrainedModel): config: Fast_dVLMTextConfig def __init__(self, config: Fast_dVLMTextConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( [Fast_dVLMDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self._attn_implementation = config._attn_implementation self.norm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.rotary_emb = Fast_dVLMRotaryEmbedding(config=config) self.has_sliding_layers = "sliding_attention" in self.config.layer_types self.gradient_checkpointing = True # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, update_kv_cache: bool = False, **kwargs: Unpack[FlashAttentionKwargs], ) -> Union[tuple, BaseModelOutputWithPast]: r""" update_kv_cache (`bool`, *optional*, defaults to `False`): Whether to update the KV cache with the current forward pass outputs. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # torch.jit.trace() doesn't support cache objects in the output if use_cache and past_key_values is None and not torch.jit.is_tracing(): past_key_values = DynamicCache(config=self.config) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) # the hard coded `3` is for temporal, height and width. if position_ids is None: position_ids = cache_position.view(1, 1, -1).expand(3, inputs_embeds.shape[0], -1) elif position_ids.ndim == 2: position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1) # NOTE: we need to pass text position ids for packing. Qwen2-VL uses 3D positions # where each dim indicates visual spatial positions for temporal/height/width grids. # There are two scenarios when FA2-like packed masking might be activated. # 1. User specifically passed packed `position_ids` and no attention mask. # In this case we expect the useer to create correct position ids for all 3 grids # and prepend text-only position ids to it. The final tensor will be [4, bs, seq-len] # 2. User runs forward with no attention mask and no position ids. In this case, position ids # are prepared by the model (`get_rope_index`) as `[4, bs, seq-len]` tensor. Text-only positions are # prepended by us when creating positions so that the mask is constructed correctly. NOTE: failing to pass # text-only positions will cause incorrect mask construction, do not change `prepare_input_for_generation` if position_ids.ndim == 3 and position_ids.shape[0] == 4: text_position_ids = position_ids[0] position_ids = position_ids[1:] else: # If inputs are not packed (usual 3D positions), do not prepare mask from position_ids text_position_ids = None # It may already have been prepared by e.g. `generate` # if not isinstance(causal_mask_mapping := attention_mask, dict): # # Prepare mask arguments # mask_kwargs = { # "config": self.config, # "input_embeds": inputs_embeds, # "attention_mask": attention_mask, # "cache_position": cache_position, # "past_key_values": past_key_values, # "position_ids": text_position_ids, # } # # Create the masks # causal_mask_mapping = { # "full_attention": create_causal_mask(**mask_kwargs), # } # # The sliding window alternating layers are not always activated depending on the config # if self.has_sliding_layers: # causal_mask_mapping["sliding_attention"] = create_sliding_window_causal_mask(**mask_kwargs) hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask.to(device=hidden_states.device), position_ids=text_position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, update_kv_cache=update_kv_cache, **kwargs, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns] if v is not None ) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, ) @auto_docstring class Fast_dVLMModel(Fast_dVLMPreTrainedModel): base_model_prefix = "" _checkpoint_conversion_mapping = {"^model": "language_model"} # Reference: fix gemma3 grad acc #37208 accepts_loss_kwargs = False config: Fast_dVLMConfig _no_split_modules = ["Fast_dVLMDecoderLayer", "Fast_dVLMVisionBlock"] def __init__(self, config): super().__init__(config) self.visual = Fast_dVLMVisionTransformerPretrainedModel._from_config(config.vision_config) self.language_model = Fast_dVLMTextModel._from_config(config.text_config) self.rope_deltas = None # cache rope_deltas here self.use_block_causal_mask = config.use_block_causal_mask # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.language_model.get_input_embeddings() def set_input_embeddings(self, value): self.language_model.set_input_embeddings(value) def set_decoder(self, decoder): self.language_model = decoder def get_decoder(self): return self.language_model def get_rope_index( self, input_ids: Optional[torch.LongTensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, video_grid_thw: Optional[torch.LongTensor] = None, second_per_grid_ts: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, ) -> tuple[torch.Tensor, torch.Tensor]: """ Calculate the 3D rope index based on image and video's temporal, height and width in LLM. Explanation: Each embedding sequence contains vision embedding and text embedding or just contains text embedding. For pure text embedding sequence, the rotary position embedding has no difference with modern LLMs. Examples: input_ids: [T T T T T], here T is for text. temporal position_ids: [0, 1, 2, 3, 4] height position_ids: [0, 1, 2, 3, 4] width position_ids: [0, 1, 2, 3, 4] For vision and text embedding sequence, we calculate 3D rotary position embedding for vision part and 1D rotary position embedding for text part. Examples: Temporal (Time): 3 patches, representing different segments of the video in time. Height: 2 patches, dividing each frame vertically. Width: 2 patches, dividing each frame horizontally. We also have some important parameters: fps (Frames Per Second): The video's frame rate, set to 1. This means one frame is processed each second. tokens_per_second: This is a crucial parameter. It dictates how many "time-steps" or "temporal tokens" are conceptually packed into a one-second interval of the video. In this case, we have 25 tokens per second. So each second of the video will be represented with 25 separate time points. It essentially defines the temporal granularity. temporal_patch_size: The number of frames that compose one temporal patch. Here, it's 2 frames. interval: The step size for the temporal position IDs, calculated as tokens_per_second * temporal_patch_size / fps. In this case, 25 * 2 / 1 = 50. This means that each temporal patch will be have a difference of 50 in the temporal position IDs. input_ids: [V V V V V V V V V V V V T T T T T], here V is for vision. vision temporal position_ids: [0, 0, 0, 0, 50, 50, 50, 50, 100, 100, 100, 100] vision height position_ids: [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1] vision width position_ids: [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1] text temporal position_ids: [101, 102, 103, 104, 105] text height position_ids: [101, 102, 103, 104, 105] text width position_ids: [101, 102, 103, 104, 105] Here we calculate the text start position_ids as the max vision position_ids plus 1. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): The temporal, height and width of feature shape of each image in LLM. video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): The temporal, height and width of feature shape of each video in LLM. second_per_grid_ts (`torch.Tensor` of shape `(num_videos)`, *optional*): The time interval (in seconds) for each grid along the temporal dimension in the 3D position IDs. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. Returns: position_ids (`torch.LongTensor` of shape `(3, batch_size, sequence_length)`) mrope_position_deltas (`torch.Tensor` of shape `(batch_size)`) """ spatial_merge_size = self.config.vision_config.spatial_merge_size image_token_id = self.config.image_token_id video_token_id = self.config.video_token_id vision_start_token_id = self.config.vision_start_token_id mrope_position_deltas = [] if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None): total_input_ids = input_ids if attention_mask is not None: attention_mask = attention_mask == 1 position_ids = torch.ones( 3, input_ids.shape[0], input_ids.shape[1], dtype=input_ids.dtype, device=input_ids.device, ) image_index, video_index = 0, 0 for i, input_ids in enumerate(total_input_ids): if attention_mask is not None: input_ids = input_ids[attention_mask[i]] image_nums, video_nums = 0, 0 vision_start_indices = torch.argwhere(input_ids == vision_start_token_id).squeeze(1) vision_tokens = input_ids[vision_start_indices + 1] image_nums = (vision_tokens == image_token_id).sum() video_nums = (vision_tokens == video_token_id).sum() input_tokens = input_ids.tolist() llm_pos_ids_list: list = [] st = 0 remain_images, remain_videos = image_nums, video_nums if image_nums + video_nums == 0: image_index += 1 video_index += 1 continue for _ in range(image_nums + video_nums): if image_token_id in input_tokens and remain_images > 0: ed_image = input_tokens.index(image_token_id, st) else: ed_image = len(input_tokens) + 1 if video_token_id in input_tokens and remain_videos > 0: ed_video = input_tokens.index(video_token_id, st) else: ed_video = len(input_tokens) + 1 if ed_image < ed_video: t, h, w = ( image_grid_thw[image_index][0], image_grid_thw[image_index][1], image_grid_thw[image_index][2], ) second_per_grid_t = 0 image_index += 1 remain_images -= 1 ed = ed_image else: t, h, w = ( video_grid_thw[video_index][0], video_grid_thw[video_index][1], video_grid_thw[video_index][2], ) if second_per_grid_ts is not None: second_per_grid_t = second_per_grid_ts[video_index] else: second_per_grid_t = 1.0 video_index += 1 remain_videos -= 1 ed = ed_video llm_grid_t, llm_grid_h, llm_grid_w = ( t.item(), h.item() // spatial_merge_size, w.item() // spatial_merge_size, ) text_len = ed - st st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) range_tensor = torch.arange(llm_grid_t).view(-1, 1) expanded_range = range_tensor.expand(-1, llm_grid_h * llm_grid_w) ## normalize type, send to device. second_per_grid_t = torch.as_tensor( second_per_grid_t, dtype=range_tensor.dtype, device=range_tensor.device ) time_tensor = expanded_range * second_per_grid_t * self.config.vision_config.tokens_per_second time_tensor_long = time_tensor.long() t_index = time_tensor_long.flatten() h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(llm_grid_t, -1, llm_grid_w).flatten() w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(llm_grid_t, llm_grid_h, -1).flatten() llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]) + text_len + st_idx) st = ed + llm_grid_t * llm_grid_h * llm_grid_w if st < len(input_tokens): st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 text_len = len(input_tokens) - st llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1) if attention_mask is not None: position_ids[..., i, attention_mask[i]] = llm_positions.to(position_ids.device) else: position_ids[..., i, :] = llm_positions.to(position_ids.device) mrope_position_deltas.append(llm_positions.max() + 1 - len(total_input_ids[i])) mrope_position_deltas = torch.tensor(mrope_position_deltas).unsqueeze(1).to(device=input_ids.device) return position_ids, mrope_position_deltas else: # if attention_mask is not None: # position_ids = attention_mask.long().cumsum(-1) - 1 # position_ids.masked_fill_(attention_mask == 0, 1) # position_ids = position_ids.unsqueeze(0).expand(3, -1, -1).to(attention_mask.device) # max_position_ids = position_ids.max(0, keepdim=False)[0].max(-1, keepdim=True)[0] # mrope_position_deltas = max_position_ids + 1 - attention_mask.shape[-1] # else: if self.training: position_ids = ( torch.arange(input_ids.shape[1] // 2, device=input_ids.device) .view(1, 1, -1) .expand(3, input_ids.shape[0], -1) ) else: if attention_mask is not None: position_ids = (attention_mask.long().cumsum(-1) - 1)[-1] position_ids = position_ids.unsqueeze(0).expand(3, -1, -1).to(attention_mask.device) max_position_ids = position_ids.max(0, keepdim=False)[0].max(-1, keepdim=True)[0] mrope_position_deltas = max_position_ids + 1 - attention_mask.shape[-1] else: position_ids = ( torch.arange(input_ids.shape[1], device=input_ids.device) .view(1, 1, -1) .expand(3, input_ids.shape[0], -1) ) mrope_position_deltas = torch.zeros( [input_ids.shape[0], 1], device=input_ids.device, dtype=input_ids.dtype, ) return position_ids, mrope_position_deltas def get_video_features( self, pixel_values_videos: torch.FloatTensor, video_grid_thw: Optional[torch.LongTensor] = None ): """ Encodes videos into continuous embeddings that can be forwarded to the language model. Args: pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): The tensors corresponding to the input videos. video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): The temporal, height and width of feature shape of each video in LLM. """ pixel_values_videos = pixel_values_videos.type(self.visual.dtype) video_embeds = self.visual(pixel_values_videos, grid_thw=video_grid_thw) split_sizes = (video_grid_thw.prod(-1) // self.visual.spatial_merge_size**2).tolist() video_embeds = torch.split(video_embeds, split_sizes) return video_embeds def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: Optional[torch.LongTensor] = None): """ Encodes images into continuous embeddings that can be forwarded to the language model. Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): The tensors corresponding to the input images. image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): The temporal, height and width of feature shape of each image in LLM. """ pixel_values = pixel_values.type(self.visual.dtype) image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw) split_sizes = (image_grid_thw.prod(-1) // self.visual.spatial_merge_size**2).tolist() image_embeds = torch.split(image_embeds, split_sizes) return image_embeds def get_placeholder_mask( self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor, image_features: Optional[torch.FloatTensor] = None, video_features: Optional[torch.FloatTensor] = None, ): """ Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is equal to the length of multimodal features. If the lengths are different, an error is raised. """ if input_ids is None: special_image_mask = inputs_embeds == self.get_input_embeddings()( torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device) ) special_image_mask = special_image_mask.all(-1) special_video_mask = inputs_embeds == self.get_input_embeddings()( torch.tensor(self.config.video_token_id, dtype=torch.long, device=inputs_embeds.device) ) special_video_mask = special_video_mask.all(-1) else: special_image_mask = input_ids == self.config.image_token_id special_video_mask = input_ids == self.config.video_token_id n_image_tokens = special_image_mask.sum() special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) if image_features is not None and inputs_embeds[special_image_mask].numel() != image_features.numel(): raise ValueError( f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {image_features.shape[0]}" ) n_video_tokens = special_video_mask.sum() special_video_mask = special_video_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) if video_features is not None and inputs_embeds[special_video_mask].numel() != video_features.numel(): raise ValueError( f"Videos features and video tokens do not match: tokens: {n_video_tokens}, features {video_features.shape[0]}" ) return special_image_mask, special_video_mask def eval_mask(self, seqlen, block_size, cache_seq_len, update_kv_cache=False, use_block_causal_mask=False): q_indices = torch.arange(seqlen, device=self.device) + cache_seq_len k_indices = torch.arange(seqlen + cache_seq_len, device=self.device) if use_block_causal_mask and update_kv_cache: mask = eval_causal_mask(q_indices[:, None], k_indices[None, :]) else: mask = eval_block_diff_mask( q_idx=q_indices[:, None], kv_idx=k_indices[None, :], block_size=block_size ) return mask @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, pixel_values: Optional[torch.Tensor] = None, pixel_values_videos: Optional[torch.FloatTensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, video_grid_thw: Optional[torch.LongTensor] = None, rope_deltas: Optional[torch.LongTensor] = None, cache_position: Optional[torch.LongTensor] = None, second_per_grid_ts: Optional[torch.Tensor] = None, update_kv_cache: bool = False, bd_size: Optional[int] = None, **kwargs, ) -> Union[tuple, Fast_dVLMModelOutputWithPast]: r""" image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): The temporal, height and width of feature shape of each image in LLM. video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): The temporal, height and width of feature shape of each video in LLM. rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*): The rope index difference between sequence length and multimodal rope. second_per_grid_ts (`torch.Tensor` of shape `(num_videos)`, *optional*): The time interval (in seconds) for each grid along the temporal dimension in the 3D position IDs. update_kv_cache (`bool`, *optional*, defaults to `False`): Whether to update the KV cache with the current forward pass outputs. bd_size (`int`, *optional*): Block diffusion size to use for this forward pass. Overrides the model default when set. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if inputs_embeds is None: inputs_embeds = self.get_input_embeddings()(input_ids) if pixel_values is not None: image_embeds = self.get_image_features(pixel_values, image_grid_thw) image_embeds = torch.cat(image_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype) image_mask, _ = self.get_placeholder_mask( input_ids, inputs_embeds=inputs_embeds, image_features=image_embeds ) inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds) elif self.training and pixel_values_videos is None: # Dummy forward through vision encoder to keep FSDP/DeepSpeed gradient # sync alive when this rank has a text-only batch but other ranks have images. # Mirrors VILA's __embed_media_tokens dummy trick (llava_arch.py L519-526). _merge = self.visual.spatial_merge_size # typically 2 _t = self.visual.patch_embed.temporal_patch_size _p = self.visual.patch_embed.patch_size _c = self.visual.patch_embed.in_channels # 3 # grid_thw [1, _merge, _merge] → seq_len = _merge*_merge = spatial_merge_unit dummy_pixel = torch.zeros( _merge * _merge, _t * _p * _p * _c, dtype=inputs_embeds.dtype, device=inputs_embeds.device, ) dummy_grid = torch.tensor([[1, _merge, _merge]], dtype=torch.long, device=inputs_embeds.device) dummy_embeds = self.visual(dummy_pixel, grid_thw=dummy_grid) inputs_embeds = inputs_embeds + dummy_embeds.sum() * 0 if pixel_values_videos is not None: video_embeds = self.get_video_features(pixel_values_videos, video_grid_thw) video_embeds = torch.cat(video_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype) _, video_mask = self.get_placeholder_mask( input_ids, inputs_embeds=inputs_embeds, video_features=video_embeds ) inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds) if position_ids is None: # Calculate RoPE index once per generation in the pre-fill stage only. # When compiling, we can't check tensor values thus we check only input length # It is safe to assume that `length!=1` means we're in pre-fill because compiled # models currently cannot do asssisted decoding prefill_compiled_stage = is_torchdynamo_compiling() and ( (input_ids is not None and input_ids.shape[1] != 1) or (inputs_embeds is not None and inputs_embeds.shape[1] != 1) ) prefill_noncompiled_stage = not is_torchdynamo_compiling() and ( (cache_position is not None and cache_position[0] == 0) or (past_key_values is None or past_key_values.get_seq_length() == 0) ) if (prefill_compiled_stage or prefill_noncompiled_stage) or self.rope_deltas is None: position_ids, rope_deltas = self.get_rope_index( input_ids, image_grid_thw, video_grid_thw, second_per_grid_ts=second_per_grid_ts, attention_mask=attention_mask, ) self.rope_deltas = rope_deltas else: batch_size, seq_length, _ = inputs_embeds.shape if self.training and pixel_values is None and pixel_values_videos is None: # only train on text seq_length = seq_length // 2 position_ids = torch.arange(seq_length, device=inputs_embeds.device) position_ids = position_ids.view(1, 1, -1).expand(3, batch_size, -1) # if cache_position is not None: # delta = (cache_position[0] + self.rope_deltas).to(inputs_embeds.device) if past_key_values is not None: delta = (past_key_values.get_seq_length() + self.rope_deltas).to(inputs_embeds.device) else: delta = torch.zeros((batch_size, seq_length), device=inputs_embeds.device) delta = delta.repeat_interleave(batch_size // delta.shape[0], dim=1) position_ids = position_ids + delta.to(position_ids.device) position_ids = position_ids.to(inputs_embeds.device) if not self.training: attention_mask = self.eval_mask(inputs_embeds.shape[1], self.bd_size if bd_size is None else bd_size, 0 if past_key_values is None else past_key_values.get_seq_length(), update_kv_cache=update_kv_cache, use_block_causal_mask=self.use_block_causal_mask).to(inputs_embeds.device) outputs = self.language_model( input_ids=None, position_ids=position_ids, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, update_kv_cache=update_kv_cache, **kwargs, ) output = Fast_dVLMModelOutputWithPast( last_hidden_state=outputs.last_hidden_state, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, rope_deltas=self.rope_deltas, ) return output if return_dict else output.to_tuple() @dataclass @auto_docstring( custom_intro=""" Base class for Fast_dVLM causal language model (or autoregressive) outputs. """ ) class Fast_dVLMCausalLMOutputWithPast(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*): The rope index difference between sequence length and multimodal rope. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None past_key_values: Optional[list[torch.FloatTensor]] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None rope_deltas: Optional[torch.LongTensor] = None class Fast_dVLMForConditionalGeneration(Fast_dVLMPreTrainedModel, GenerationMixin): config_class = Fast_dVLMConfig _checkpoint_conversion_mapping = { "^visual": "model.visual", r"^model(?!\.(language_model|visual))": "model.language_model", } _tied_weights_keys = ["lm_head.weight"] # Reference: fix gemma3 grad acc #37208 accepts_loss_kwargs = True def __init__(self, config): super().__init__(config) self.model = Fast_dVLMModel(config) self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False) self.bd_size = config.bd_size self.model.bd_size = self.bd_size self.complementary_mask = getattr(config, 'complementary_mask', False) self.always_mask_im_end = getattr(config, 'always_mask_im_end', False) self.flexible_bd_size = getattr(config, 'flexible_bd_size', False) self.use_block_causal_mask = getattr(config, 'use_block_causal_mask', False) self.anneal_block_size = getattr(config, 'anneal_block_size', False) self.enable_efficient_vision_embed = getattr(config, 'enable_efficient_vision_embed', False) self.minimum_noise_level = getattr(config, 'minimum_noise_level', 0.0) self.entropy_loss = getattr(config, 'entropy_loss', False) self.entropy_loss_weight = getattr(config, 'entropy_loss_weight', 1.0) self.block_causal_no_dynamic = getattr(config, 'block_causal_no_dynamic', False) self.im_end_token_id = 151645 # <|im_end|> token id # self.max_context_length = 4096 # Vision-to-text aligner (if vision output dim != text hidden dim) vision_out_dim = config.vision_config.out_hidden_size text_hidden = config.text_config.hidden_size if vision_out_dim != text_hidden: self.vision_to_text_proj = nn.Linear(vision_out_dim, text_hidden, bias=False) for p in self.vision_to_text_proj.parameters(): p.requires_grad = False logger.info(f"Vision-to-text aligner: {vision_out_dim} -> {text_hidden}") else: self.vision_to_text_proj = None self.post_init() def get_input_embeddings(self): return self.model.get_input_embeddings() def set_input_embeddings(self, value): self.model.set_input_embeddings(value) def set_decoder(self, decoder): self.model.set_decoder(decoder) def get_decoder(self): return self.model.get_decoder() def get_video_features( self, pixel_values_videos: torch.FloatTensor, video_grid_thw: Optional[torch.LongTensor] = None ): return self.model.get_video_features(pixel_values_videos, video_grid_thw) def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: Optional[torch.LongTensor] = None): return self.model.get_image_features(pixel_values, image_grid_thw) # Make modules available through conditional class for BC @property def language_model(self): return self.model.language_model @property def visual(self): return self.model.visual def gen_mask(self, seqlen, block_size, B, H): # ================== 修改开始 ================== # flex_attention 要求闭包捕获的变量必须是 Tensor # 将 int 转换为 Tensor,并放在对应的设备上 block_size_t = torch.tensor(block_size, device=self.device, dtype=torch.int32) n_t = torch.tensor(seqlen, device=self.device, dtype=torch.int32) mask = create_block_mask( # 这里将原来的 block_size=block_size 改为传入 Tensor partial(block_diff_mask, block_size=block_size_t, n=n_t), B=B, H=H, Q_LEN=seqlen*2, KV_LEN=seqlen*2 ) # ================== 修改结束 ================== return mask def gen_block_causal_mask(self, seqlen, block_size, B, H): block_size_t = torch.tensor(block_size, device=self.device, dtype=torch.int32) n_t = torch.tensor(seqlen, device=self.device, dtype=torch.int32) mask = create_block_mask( partial(block_causal_mask, block_size=block_size_t, n=n_t), B=B, H=H, Q_LEN=seqlen*2, KV_LEN=seqlen*2 ) return mask def compute_response_block_idx(self, labels, block_size): """ Compute block index and turn index for each position. Each response segment has independent blocks. Example: prompt1(3) + response1(14) + prompt2(2) + response2(2) - turn_idx: [0,0,0, 0,0,...,0, 1,1, 1,1] (prompt+response = same turn) - response1: 14 tokens → 2 blocks (0, 1) with sizes (8, 6) - response2: 2 tokens → 1 block (2) with size (2) - Total: 3 blocks Returns: response_block_idx: [seq_len] where prompt=-1, response=block_idx turn_idx: [seq_len] turn index for each position n_blocks: total number of blocks """ labels_single = labels[0] # [seq_len] seq_len = labels_single.shape[0] response_mask = (labels_single != -100) response_block_idx = torch.full((seq_len,), -1, device=labels.device, dtype=torch.int64) turn_idx = torch.zeros((seq_len,), device=labels.device, dtype=torch.int64) current_block = 0 in_response = False response_pos_in_segment = 0 # position within current response segment for i in range(seq_len): if response_mask[i]: if not in_response: # Start of new response segment in_response = True response_pos_in_segment = 0 # Block index within this segment + global offset block_in_segment = response_pos_in_segment // block_size response_block_idx[i] = current_block + block_in_segment response_pos_in_segment += 1 else: if in_response: # End of response segment, update current_block and start new turn n_blocks_in_segment = (response_pos_in_segment + block_size - 1) // block_size current_block += n_blocks_in_segment in_response = False for i in range(1, seq_len): if response_block_idx[i] != response_block_idx[i-1]: turn_idx[i] = turn_idx[i-1] + 1 else: turn_idx[i] = turn_idx[i-1] # Handle case where sequence ends with response if in_response: n_blocks_in_segment = (response_pos_in_segment + block_size - 1) // block_size current_block += n_blocks_in_segment n_blocks = current_block return response_block_idx, turn_idx, n_blocks def gen_hybrid_block_causal_mask(self, seqlen, response_block_idx, turn_idx, B, H): """Generate hybrid mask: prompt causal, response block causal.""" n_t = torch.tensor(seqlen, device=self.device, dtype=torch.int32) mask = create_block_mask( partial(hybrid_block_causal_mask_multiturn, response_block_idx=response_block_idx, turn_idx=turn_idx, n=n_t), B=B, H=H, Q_LEN=seqlen*2, KV_LEN=seqlen*2 ) return mask def compute_entropy_loss(self, logits, labels, num_items_in_batch=None): """Compute entropy loss with optional global normalization. Args: logits: Model logits labels: Ground truth labels (-100 for ignored tokens) num_items_in_batch: Global number of non-ignored tokens for normalization. If provided, uses sum/num_items_in_batch for global norm. If None, uses mean() for micro-batch norm. """ non_ignore_mask = labels != -100 logits = logits[non_ignore_mask] labels = labels[non_ignore_mask] correct_mask = logits.argmax(dim=-1) == labels compute_logits = logits[correct_mask] if correct_mask.sum() == 0: return torch.tensor(0.0, device=logits.device) p = F.softmax(compute_logits, dim=-1) log_p = F.log_softmax(compute_logits, dim=-1) entropy = -torch.sum(p * log_p, dim=-1) if num_items_in_batch is not None: # Global normalization: use same denominator as cross entropy loss return entropy.sum() / num_items_in_batch else: return entropy.mean() @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, pixel_values: Optional[torch.Tensor] = None, pixel_values_videos: Optional[torch.FloatTensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, video_grid_thw: Optional[torch.LongTensor] = None, rope_deltas: Optional[torch.LongTensor] = None, cache_position: Optional[torch.LongTensor] = None, second_per_grid_ts: Optional[torch.Tensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, mask_id: Optional[int] = 151665, update_kv_cache: bool = False, eval_bd_size: Optional[int] = None, **kwargs, ) -> Union[tuple, Fast_dVLMCausalLMOutputWithPast]: r""" image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): The temporal, height and width of feature shape of each image in LLM. video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): The temporal, height and width of feature shape of each video in LLM. rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*): The rope index difference between sequence length and multimodal rope. second_per_grid_ts (`torch.Tensor` of shape `(num_videos)`, *optional*): The time interval (in seconds) for each grid along the temporal dimension in the 3D position IDs. mask_id (`int`, *optional*, defaults to `151665`): Token id used as the mask placeholder for block diffusion. update_kv_cache (`bool`, *optional*, defaults to `False`): Whether to update the KV cache with the current forward pass outputs. eval_bd_size (`int`, *optional*): Block diffusion size to use during evaluation. Overrides the model default when set. """ # input_ids = torch.tensor([[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]]).to(input_ids.device, dtype=input_ids.dtype) # labels = torch.tensor([[-100,-100,3,4,5,6,-100,-100,-100,-100,11,12,13,14,15]]).to(labels.device, dtype=labels.dtype) # pixel_values = None # pixel_values_videos = None # self.bd_size = 2 output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) if self.training: if self.anneal_block_size: # Get update_ratio from kwargs (passed by trainer) update_ratio = kwargs.get('update_ratio', 1.0) # Compute possible bd_sizes: [2, 4, 8, ..., target_bd_size] max_power = int(math.log2(self.bd_size)) possible_bd_sizes = [2**i for i in range(2, max_power + 1)] # Start from 4 # sqrt mapping: larger block sizes get more training time scaled_ratio = math.sqrt(update_ratio) idx = min(int(scaled_ratio * len(possible_bd_sizes)), len(possible_bd_sizes) - 1) bd_size = possible_bd_sizes[idx] elif self.flexible_bd_size: max_power = int(math.log2(self.bd_size)) possible_bd_sizes = [2**i for i in range(max_power + 1)] bd_size = random.choice(possible_bd_sizes) else: bd_size = self.bd_size if pixel_values is None and pixel_values_videos is None: # only train on text batch_size, seq_len = input_ids.shape original_labels = labels.clone() original_input_ids = input_ids.clone() # Compute response block index: -1 for prompt, >=0 for response # Each response segment has independent blocks response_mask = (labels != -100) # [B, seq_len] eps = self.minimum_noise_level if self.use_block_causal_mask and not self.block_causal_no_dynamic: response_block_idx, turn_idx, n_blocks = self.compute_response_block_idx(labels, bd_size) # Sample t for each block: [n_blocks] # random sample t for each block from [self.minimum_noise_level, 1] t = torch.rand((n_blocks,), device=input_ids.device) p_mask_per_block = (1 - eps) * t + eps # Create mask_indices: [B, seq_len] mask_indices = torch.zeros_like(labels, dtype=torch.bool) for i in range(seq_len): block_i = response_block_idx[i].item() if block_i >= 0: # response token mask_indices[:, i] = torch.rand((batch_size,), device=input_ids.device) < p_mask_per_block[block_i] else: input_ids = input_ids.reshape(input_ids.shape[0] * input_ids.shape[1] // bd_size, bd_size) b, l = input_ids.shape t = torch.rand((b,), device=input_ids.device) p_mask = (1 - eps) * t + eps p_mask = p_mask[:, None].repeat(1, l) mask_indices = torch.rand((b, l), device=input_ids.device) < p_mask mask_indices = mask_indices.reshape(labels.shape) & response_mask input_ids = input_ids.reshape(labels.shape) # Always mask <|im_end|> in response if self.always_mask_im_end: im_end_mask = (input_ids == self.im_end_token_id) & response_mask mask_indices = mask_indices | im_end_mask # Apply mask only to response noisy_input_ids = input_ids.clone() noisy_input_ids[mask_indices] = mask_id # Update labels: only predict masked response tokens labels = labels.clone() labels[~mask_indices] = -100 # Concatenate [noisy | clean] input_ids = torch.cat([noisy_input_ids, original_input_ids], dim=1) # Complementary version if self.complementary_mask: complementary_mask_indices = response_mask & ~mask_indices if self.always_mask_im_end: im_end_mask = (original_input_ids == self.im_end_token_id) & response_mask complementary_mask_indices = complementary_mask_indices | im_end_mask complementary_noisy_input_ids = original_input_ids.clone() complementary_noisy_input_ids[complementary_mask_indices] = mask_id complementary_labels = original_labels.clone() complementary_labels[~complementary_mask_indices] = -100 complementary_input_ids = torch.cat([complementary_noisy_input_ids, original_input_ids], dim=1) input_ids = torch.cat([input_ids, complementary_input_ids], dim=0) labels = torch.cat([labels, complementary_labels], dim=0) if self.use_block_causal_mask: if self.block_causal_no_dynamic: attention_mask = self.gen_block_causal_mask(seq_len, bd_size, input_ids.shape[0], self.config.num_attention_heads) else: attention_mask = self.gen_hybrid_block_causal_mask(seq_len, response_block_idx, turn_idx, input_ids.shape[0], self.config.num_attention_heads) else: attention_mask = self.gen_mask(seq_len, bd_size, input_ids.shape[0], self.config.num_attention_heads) else: # 多模态 block diffusion # Phase A: Embed + masked scatter vision if inputs_embeds is None: inputs_embeds = self.model.get_input_embeddings()(input_ids) if pixel_values is not None: image_embeds = self.model.get_image_features(pixel_values, image_grid_thw) image_embeds = torch.cat(image_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype) if self.vision_to_text_proj is not None: image_embeds = self.vision_to_text_proj(image_embeds) image_mask, _ = self.model.get_placeholder_mask( input_ids, inputs_embeds=inputs_embeds, image_features=image_embeds ) inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds) if pixel_values_videos is not None: video_embeds = self.model.get_video_features(pixel_values_videos, video_grid_thw) video_embeds = torch.cat(video_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype) if self.vision_to_text_proj is not None: video_embeds = self.vision_to_text_proj(video_embeds) _, video_mask = self.model.get_placeholder_mask( input_ids, inputs_embeds=inputs_embeds, video_features=video_embeds ) inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds) # Phase B: 生成 3D position_ids(在扩倍前,基于原长) if position_ids is None: position_ids, rope_deltas = self.model.get_rope_index( input_ids=input_ids, image_grid_thw=image_grid_thw, video_grid_thw=video_grid_thw, second_per_grid_ts=second_per_grid_ts, attention_mask=attention_mask, ) # Phase C: Block diffusion (保护 vision token 位置) batch_size = input_ids.shape[0] L = input_ids.shape[1] seq_len = L # if L > self.max_context_length: # L = self.max_context_length # input_ids = input_ids[:, :self.max_context_length] # labels = labels[:, :self.max_context_length] # position_ids = position_ids[:, :self.max_context_length] # attention_mask = attention_mask[:, :self.max_context_length] # inputs_embeds = inputs_embeds[:, :self.max_context_length] hidden_size = inputs_embeds.shape[-1] original_labels = labels.clone() original_input_ids = input_ids.clone() original_embeds = inputs_embeds.clone() original_position_ids = position_ids.clone() # 保存原长 position [3, B, L] # 识别 vision tokens(不加噪声) image_token_id = self.config.image_token_id video_token_id = self.config.video_token_id vision_start_token_id = self.config.vision_start_token_id vision_token_mask = (input_ids == image_token_id) | (input_ids == video_token_id) | (input_ids == vision_start_token_id) vision_mask_3d = vision_token_mask.unsqueeze(-1).expand(-1, -1, hidden_size) # Block diffusion with multi-turn support # Each response segment has independent blocks response_block_idx, turn_idx, n_blocks = self.compute_response_block_idx(labels, bd_size) # Compute response block index: -1 for prompt, >=0 for response # Each response segment has independent blocks response_mask = (labels != -100) # [B, seq_len] eps = self.minimum_noise_level if self.use_block_causal_mask and not self.block_causal_no_dynamic: response_block_idx, turn_idx, n_blocks = self.compute_response_block_idx(labels, bd_size) # Sample t for each block: [n_blocks] # random sample t for each block from [self.minimum_noise_level, 1] t = torch.rand((n_blocks,), device=input_ids.device) p_mask_per_block = (1 - eps) * t + eps # Create mask_indices: [B, seq_len] mask_indices = torch.zeros_like(labels, dtype=torch.bool) for i in range(seq_len): block_i = response_block_idx[i].item() if block_i >= 0: # response token mask_indices[:, i] = torch.rand((batch_size,), device=input_ids.device) < p_mask_per_block[block_i] else: input_ids = input_ids.reshape(input_ids.shape[0] * input_ids.shape[1] // bd_size, bd_size) b, l = input_ids.shape t = torch.rand((b,), device=input_ids.device) p_mask = (1 - eps) * t + eps p_mask = p_mask[:, None].repeat(1, l) mask_indices = torch.rand((b, l), device=input_ids.device) < p_mask mask_indices = mask_indices.reshape(labels.shape) & response_mask input_ids = input_ids.reshape(labels.shape) if self.always_mask_im_end: im_end_mask = (input_ids == self.im_end_token_id) & response_mask mask_indices = mask_indices | im_end_mask noisy_input_ids = input_ids.clone() noisy_input_ids[mask_indices] = mask_id # Noisy embeds(保护 vision) if self.enable_efficient_vision_embed: noisy_embeds = original_embeds.clone() text_mask_3d = mask_indices.unsqueeze(-1).expand(-1, -1, hidden_size) mask_embeds = self.model.language_model.embed_tokens( torch.full_like(input_ids, mask_id) ) noisy_embeds = torch.where(text_mask_3d, mask_embeds, noisy_embeds) else: noisy_embeds_raw = self.model.language_model.embed_tokens(noisy_input_ids) noisy_embeds = torch.where(vision_mask_3d, original_embeds, noisy_embeds_raw) # 更新 labels labels_noisy = labels.clone() labels_noisy[~mask_indices] = -100 # 拼接 [noisy | clean] input_ids_pair1 = torch.cat([noisy_input_ids, original_input_ids], dim=1) embeds_pair1 = torch.cat([noisy_embeds, original_embeds], dim=1) labels_pair1 = labels_noisy position_ids_pair1 = original_position_ids # [3, B, L] input_ids = input_ids_pair1 inputs_embeds = embeds_pair1 labels = labels_pair1 position_ids = position_ids_pair1 # Complementary if self.complementary_mask: complementary_mask_indices = response_mask & ~mask_indices if self.always_mask_im_end: im_end_mask = (original_input_ids == self.im_end_token_id) & response_mask complementary_mask_indices = complementary_mask_indices | im_end_mask complementary_noisy_input_ids = original_input_ids.clone() complementary_noisy_input_ids[complementary_mask_indices] = mask_id if self.enable_efficient_vision_embed: complementary_noisy_embeds = original_embeds.clone() text_mask_3d = complementary_mask_indices.unsqueeze(-1).expand(-1, -1, hidden_size) mask_embeds = self.model.language_model.embed_tokens( torch.full_like(original_input_ids, mask_id) ) complementary_noisy_embeds = torch.where(text_mask_3d, mask_embeds, complementary_noisy_embeds) else: complementary_noisy_embeds_raw = self.model.language_model.embed_tokens(complementary_noisy_input_ids) complementary_noisy_embeds = torch.where(vision_mask_3d, original_embeds, complementary_noisy_embeds_raw) complementary_labels = original_labels.clone() complementary_labels[~complementary_mask_indices] = -100 input_ids_pair2 = torch.cat([complementary_noisy_input_ids, original_input_ids], dim=1) embeds_pair2 = torch.cat([complementary_noisy_embeds, original_embeds], dim=1) labels_pair2 = complementary_labels position_ids_pair2 = original_position_ids # Batch 拼接 input_ids = torch.cat([input_ids_pair1, input_ids_pair2], dim=0) inputs_embeds = torch.cat([embeds_pair1, embeds_pair2], dim=0) labels = torch.cat([labels_pair1, labels_pair2], dim=0) position_ids = torch.cat([position_ids_pair1, position_ids_pair2], dim=1) if self.use_block_causal_mask: if self.block_causal_no_dynamic: attention_mask = self.gen_block_causal_mask(L, bd_size, input_ids.shape[0], self.config.num_attention_heads) else: attention_mask = self.gen_hybrid_block_causal_mask(L, response_block_idx, turn_idx, input_ids.shape[0], self.config.num_attention_heads) else: attention_mask = self.gen_mask(L, bd_size, input_ids.shape[0], self.config.num_attention_heads) # 清空 pixel_values(已替换) pixel_values = None pixel_values_videos = None # Phase D: 调用内层(多模态时传 inputs_embeds,纯文本时传 input_ids) if pixel_values is None and pixel_values_videos is None: # 纯文本:传 input_ids(内层会 embed) outputs = self.model( input_ids=input_ids, pixel_values=None, pixel_values_videos=None, image_grid_thw=None, video_grid_thw=None, position_ids=position_ids, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, update_kv_cache=update_kv_cache, bd_size=bd_size, **kwargs, ) else: # 多模态:传 inputs_embeds(已 masked_scatter) outputs = self.model.language_model( input_ids=None, position_ids=position_ids, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, update_kv_cache=update_kv_cache, bd_size=bd_size, **kwargs, ) else: outputs = self.model( input_ids=input_ids, pixel_values=pixel_values, pixel_values_videos=pixel_values_videos, image_grid_thw=image_grid_thw, video_grid_thw=video_grid_thw, position_ids=position_ids, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, update_kv_cache=update_kv_cache, bd_size=eval_bd_size, **kwargs, ) hidden_states = outputs[0] loss = None if self.training: mdm_hidden_states = hidden_states[:, :hidden_states.shape[1]//2, :] # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(mdm_hidden_states[:, slice_indices, :]) if self.use_block_causal_mask: new_kwargs = { 'num_items_in_batch': 2*kwargs['num_items_in_batch'], } else: new_kwargs = kwargs if labels is not None: loss = self.loss_function( logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **new_kwargs ) * 0.5 if self.use_block_causal_mask: if self.complementary_mask: causal_hidden_states = hidden_states[:hidden_states.shape[0]//2, hidden_states.shape[1]//2:, :] else: causal_hidden_states = hidden_states[:, :hidden_states.shape[1]//2, :] causal_logits = self.lm_head(causal_hidden_states[:, slice_indices, :]) loss += self.loss_function( logits=causal_logits, labels=original_labels, vocab_size=self.config.text_config.vocab_size, **new_kwargs ) if self.entropy_loss: # Use num_items_in_batch for global normalization (consistent with cross entropy) num_items = kwargs.get('num_items_in_batch', None) entropy_loss = self.compute_entropy_loss(logits, labels, num_items_in_batch=num_items) loss += self.entropy_loss_weight * entropy_loss else: slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) return Fast_dVLMCausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, rope_deltas=outputs.rope_deltas, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, pixel_values=None, pixel_values_videos=None, image_grid_thw=None, video_grid_thw=None, second_per_grid_ts=None, **kwargs, ): # Overwritten -- in specific circumstances we don't want to forward image inputs to the model model_inputs = super().prepare_inputs_for_generation( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, inputs_embeds=inputs_embeds, cache_position=cache_position, position_ids=position_ids, pixel_values=pixel_values, pixel_values_videos=pixel_values_videos, image_grid_thw=image_grid_thw, video_grid_thw=video_grid_thw, second_per_grid_ts=second_per_grid_ts, use_cache=use_cache, **kwargs, ) # Qwen2-5-VL position_ids are prepared with rope_deltas if position_ids is None: # Calculate RoPE index once per generation in the pre-fill stage only. # When compiling, we can't check tensor values thus we check only input length # It is safe to assume that `length!=1` means we're in pre-fill because compiled # models currently cannot do assisted decoding if cache_position[0] == 0 or self.model.rope_deltas is None: vision_positions, rope_deltas = self.model.get_rope_index( model_inputs.get("input_ids", None), image_grid_thw=image_grid_thw, video_grid_thw=video_grid_thw, attention_mask=attention_mask, ) self.model.rope_deltas = rope_deltas # then use the prev pre-calculated rope-deltas to get the correct position ids elif "position_ids" in model_inputs: batch_size, seq_length = model_inputs["position_ids"].shape device = model_inputs["position_ids"].device position_ids = torch.arange(seq_length, device=device) position_ids = position_ids.view(1, 1, -1).expand(3, batch_size, -1) delta = cache_position[0] + self.model.rope_deltas delta = delta.repeat_interleave(batch_size // delta.shape[0], dim=0) vision_positions = position_ids + delta.expand_as(position_ids) # Concatenate "text + vision" positions into [4, bs, seq-len] text_positions = model_inputs["position_ids"][None, ...] model_inputs["position_ids"] = torch.cat([text_positions, vision_positions], dim=0) if cache_position[0] != 0: model_inputs["pixel_values"] = None model_inputs["pixel_values_videos"] = None return model_inputs def _get_image_nums_and_video_nums( self, input_ids: Optional[torch.LongTensor], inputs_embeds: Optional[torch.Tensor] = None, ) -> tuple[torch.Tensor, torch.Tensor]: """ Get the number of images and videos for each sample to calculate the separation length of the sample tensor. These parameters are not passed through the processor to avoid unpredictable impacts from interface modifications. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Returns: image_nums (`torch.LongTensor` of shape `(batch_size, num_images_sample)`) video_nums (`torch.LongTensor` of shape `(batch_size, num_videos_sample)`) """ image_token_id = self.config.image_token_id video_token_id = self.config.video_token_id vision_start_token_id = self.config.vision_start_token_id if inputs_embeds is not None: vision_start_mask = ( inputs_embeds == self.get_input_embeddings()( torch.tensor(vision_start_token_id, dtype=torch.long, device=inputs_embeds.device) ) )[..., 0] image_mask = ( inputs_embeds == self.get_input_embeddings()( torch.tensor(image_token_id, dtype=torch.long, device=inputs_embeds.device) ) )[..., 0] video_mask = ( inputs_embeds == self.get_input_embeddings()( torch.tensor(video_token_id, dtype=torch.long, device=inputs_embeds.device) ) )[..., 0] else: vision_start_mask = input_ids == vision_start_token_id image_mask = input_ids == image_token_id video_mask = input_ids == video_token_id vision_first_mask = torch.roll(vision_start_mask, shifts=1, dims=1) image_nums = torch.sum(vision_first_mask & image_mask, dim=1) video_nums = torch.sum(vision_first_mask & video_mask, dim=1) return image_nums, video_nums def _expand_inputs_for_generation( self, expand_size: int = 1, is_encoder_decoder: bool = False, input_ids: Optional[torch.LongTensor] = None, **model_kwargs, ) -> tuple[torch.LongTensor, dict[str, Any]]: # Overwritten -- Support for expanding tensors without a batch size dimension # e.g., pixel_values, image_grid_thw, pixel_values_videos, video_grid_thw, second_per_grid_t # pixel_values.shape[0] is sum(seqlen_images for samples) # image_grid_thw.shape[0] is sum(num_images for samples) if expand_size == 1: return input_ids, model_kwargs visual_keys = ["pixel_values", "image_grid_thw", "pixel_values_videos", "video_grid_thw", "second_per_grid_ts"] def _expand_dict_for_generation_visual(dict_to_expand): image_grid_thw = model_kwargs.get("image_grid_thw", None) video_grid_thw = model_kwargs.get("video_grid_thw", None) image_nums, video_nums = self._get_image_nums_and_video_nums( input_ids, inputs_embeds=model_kwargs.get("inputs_embeds", None) ) def _repeat_interleave_samples(x, lengths, repeat_times): samples = torch.split(x, lengths) repeat_args = [repeat_times] + [1] * (x.dim() - 1) result = torch.cat([sample.repeat(*repeat_args) for sample in samples], dim=0) return result for key in dict_to_expand: if key == "pixel_values": # split images into samples samples = torch.split(image_grid_thw, list(image_nums)) # compute the sequence length of images for each sample lengths = [torch.prod(sample, dim=1).sum() for sample in samples] dict_to_expand[key] = _repeat_interleave_samples( dict_to_expand[key], lengths=lengths, repeat_times=expand_size ) elif key == "image_grid_thw": # get the num of images for each sample lengths = list(image_nums) dict_to_expand[key] = _repeat_interleave_samples( dict_to_expand[key], lengths=lengths, repeat_times=expand_size ) elif key == "pixel_values_videos": samples = torch.split(video_grid_thw, list(video_nums)) lengths = [torch.prod(sample, dim=1).sum() for sample in samples] dict_to_expand[key] = _repeat_interleave_samples( dict_to_expand[key], lengths=lengths, repeat_times=expand_size ) elif key == "video_grid_thw": lengths = list(video_nums) dict_to_expand[key] = _repeat_interleave_samples( dict_to_expand[key], lengths=lengths, repeat_times=expand_size ) elif key == "second_per_grid_ts": dict_to_expand[key] = _repeat_interleave_samples( dict_to_expand[key], lengths=list(video_nums), repeat_times=expand_size ) return dict_to_expand def _expand_dict_for_generation(dict_to_expand): for key in dict_to_expand: if ( key != "cache_position" and dict_to_expand[key] is not None and isinstance(dict_to_expand[key], torch.Tensor) and key not in visual_keys ): dict_to_expand[key] = dict_to_expand[key].repeat_interleave(expand_size, dim=0) return dict_to_expand model_kwargs = _expand_dict_for_generation_visual(model_kwargs) if input_ids is not None: input_ids = input_ids.repeat_interleave(expand_size, dim=0) model_kwargs = _expand_dict_for_generation(model_kwargs) if is_encoder_decoder: if model_kwargs.get("encoder_outputs") is None: raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.") model_kwargs["encoder_outputs"] = _expand_dict_for_generation(model_kwargs["encoder_outputs"]) return input_ids, model_kwargs @torch.no_grad() def generate( self, input_ids, tokenizer, block_size=32, max_tokens=1024, pixel_values=None, image_grid_thw=None, mask_id=151665, stop_token=151645, ): """Speculative block-causal parallel decoding for Fast-dVLM. Each iteration: (1) draft a block of masked tokens via one diffusion forward pass — all masked positions are filled at once, (2) verify with an AR forward pass and accept the longest matching prefix, then cache the accepted tokens. Args: input_ids: Prompt token ids ``[1, prompt_len]``. tokenizer: Tokenizer (reserved for future use). block_size: Number of tokens to draft per block. max_tokens: Maximum new tokens to generate. pixel_values: Optional image pixel values for VLM. image_grid_thw: Optional image grid info for VLM. mask_id: Token id used as the ``[MASK]`` placeholder. stop_token: EOS token id for early stopping. Returns: Generated token ids ``[1, prompt_len + gen_len]``. """ self.model.bd_size = block_size original_input_length = input_ids.shape[1] # Prefill: encode prompt and generate first token autoregressively output = self.forward( input_ids=input_ids, pixel_values=pixel_values, image_grid_thw=image_grid_thw, use_cache=True, update_kv_cache=True, ) logits, past_key_values = output.logits, output.past_key_values next_token = logits[:, -1:, :].argmax(dim=-1) input_ids = torch.cat([input_ids, next_token], dim=1) while True: prompt_length = input_ids.shape[1] # Append (block_size - 1) mask tokens as draft placeholders x_init = mask_id * torch.ones( (input_ids.shape[0], block_size - 1), device=self.device, dtype=torch.long ) x_t = torch.cat([input_ids, x_init], dim=1) # Draft: diffusion forward fills all masked positions in one pass current_ids = x_t[:, -block_size:] logits = self.forward( input_ids=current_ids, use_cache=True, past_key_values=past_key_values, update_kv_cache=False, eval_bd_size=block_size, ).logits logits = torch.cat([logits[:, :1, :], logits[:, :-1, :]], dim=1) x_1 = logits.argmax(dim=-1) mask_idx = current_ids == mask_id x_t[:, -block_size:][mask_idx] = x_1[mask_idx] # Verify: AR forward to accept longest correct prefix current_ids = x_t[:, -block_size:] output = self.forward( input_ids=current_ids, use_cache=True, past_key_values=past_key_values, update_kv_cache=True, eval_bd_size=block_size, ) logits, past_key_values = output.logits, output.past_key_values ar_block_token = logits.argmax(dim=-1) accepted_token_num = 1 for i in range(ar_block_token.shape[1] - 1): if (ar_block_token[:, i] == current_ids[:, i + 1]).all(): accepted_token_num += 1 else: break input_ids = torch.cat([input_ids, ar_block_token[:, :accepted_token_num]], dim=1) # Crop KV cache to match accepted length new_past_key_values = [] for layer_num in range(len(past_key_values)): layer_past_key_values = () for kv_idx in range(len(past_key_values[layer_num])): layer_past_key_values += ( past_key_values[layer_num][kv_idx][:, :, : input_ids.shape[1] - 1, :], ) new_past_key_values.append(layer_past_key_values) past_key_values = DynamicCache(new_past_key_values) if input_ids.shape[1] - original_input_length > max_tokens: break if stop_token in input_ids[:, prompt_length:]: stop_token_idx = (input_ids[:, prompt_length:] == stop_token).nonzero()[0][1] if (input_ids[:, prompt_length : prompt_length + stop_token_idx] == mask_id).sum() == 0: break # Truncate at stop token if stop_token in input_ids[:, original_input_length:]: stop_token_idx = (input_ids[:, original_input_length:] == stop_token).nonzero()[0][1] input_ids = input_ids[:, : stop_token_idx + original_input_length + 1] return input_ids __all__ = ["Fast_dVLMForConditionalGeneration", "Fast_dVLMModel", "Fast_dVLMPreTrainedModel", "Fast_dVLMTextModel"]