# coding=utf-8 # Copyright 2026 Tencent Youtu lab, DeepSeek-AI and The HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import os from functools import partial from typing import Callable, Optional, Tuple, Union, List, Any, Dict import torch import torch.nn.functional as F from torch import nn from transformers.activations import ACT2FN from transformers.cache_utils import Cache, DynamicCache, StaticCache from transformers.generation import GenerationMixin from transformers.modeling_attn_mask_utils import AttentionMaskConverter from transformers.modeling_flash_attention_utils import FlashAttentionKwargs from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from transformers.processing_utils import Unpack from transformers.utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, can_return_tuple, is_torch_flex_attn_available, logging, replace_return_docstrings, is_flash_attn_2_available, ) from transformers.utils.deprecation import deprecate_kwarg from .configuration_youtu_vl import YoutuVLConfig from .modeling_siglip2 import Siglip2VisionModel, Siglip2VisionEmbeddings from .configuration_siglip2 import Siglip2VisionConfig if is_torch_flex_attn_available(): from torch.nn.attention.flex_attention import BlockMask from transformers.integrations.flex_attention import make_flex_block_causal_mask is_aiter_available = False if is_flash_attn_2_available(): try: from aiter import flash_attn_varlen_func is_aiter_available = True except ImportError: from flash_attn import flash_attn_varlen_func else: flash_attn_varlen_func = None logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "YoutuVLConfig" class YoutuRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}" class YoutuRotaryEmbedding(nn.Module): def __init__(self, config: YoutuVLConfig, device=None): super().__init__() if hasattr(config, "rope_scaling") and config.rope_scaling is not None: self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) else: self.rope_type = "default" self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = self.inv_freq @torch.no_grad() @dynamic_rope_update def forward(self, x, position_ids): """ Compute rotary positional embeddings. Args: x (torch.Tensor): Input tensor, shape (batch_size, seq_len, feature_dim) position_ids (torch.LongTensor): Position indices, shape (batch_size, seq_len) Returns: Tuple of (cos, sin) tensors for rotary embedding """ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): # Force float32 freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) class YoutuMLP(nn.Module): def __init__(self, config, hidden_size=None, intermediate_size=None): super().__init__() self.config = config self.hidden_size = config.hidden_size if hidden_size is None else hidden_size self.intermediate_size = config.intermediate_size if intermediate_size is None else intermediate_size self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) self.act_fn = ACT2FN[config.hidden_act] def forward(self, x): down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) return down_proj def rotate_half(x): """ Rotates half the hidden dims of the input. """ x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): key_states = repeat_kv(key, module.num_key_value_groups) value_states = repeat_kv(value, module.num_key_value_groups) attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling if attention_mask is not None: causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights def apply_rotary_pos_emb_interleave(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): r""" Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`): The position indices of the tokens corresponding to the query and key tensors. For example, this can be used to pass offsetted position ids when working with a KV-cache. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) b, h, s, d = q.shape q = q.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d) b, h, s, d = k.shape k = k.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed def yarn_get_mscale(scale=1, mscale=1): if scale <= 1: return 1.0 return 0.1 * mscale * math.log(scale) + 1.0 class YoutuMLAttention(nn.Module): """ Multi-latent attention from 'DeepSeek-V2: A Strong, Economical, and Efficient Mixture-of-Experts Language Model'paper """ def __init__(self, config: YoutuVLConfig, layer_idx: int): super().__init__() self.config = config self.layer_idx = layer_idx self.num_key_value_groups = 1 # needed for eager attentions self.attention_dropout = config.attention_dropout self.num_heads = config.num_attention_heads self.rope_theta = config.rope_theta self.q_lora_rank = config.q_lora_rank self.qk_rope_head_dim = config.qk_rope_head_dim self.kv_lora_rank = config.kv_lora_rank self.v_head_dim = config.v_head_dim self.qk_nope_head_dim = config.qk_nope_head_dim self.qk_head_dim = config.qk_head_dim self.flash_att_sliding_window = config.flash_att_sliding_window self.is_causal = True if self.q_lora_rank is None: self.q_proj = nn.Linear(config.hidden_size, self.num_heads * self.qk_head_dim, bias=False) else: self.q_a_proj = nn.Linear(config.hidden_size, config.q_lora_rank, bias=config.attention_bias) self.q_a_layernorm = YoutuRMSNorm(config.q_lora_rank) self.q_b_proj = nn.Linear(config.q_lora_rank, self.num_heads * self.qk_head_dim, bias=False) self.kv_a_proj_with_mqa = nn.Linear( config.hidden_size, self.kv_lora_rank + self.qk_rope_head_dim, bias=config.attention_bias, ) self.kv_a_layernorm = YoutuRMSNorm(self.kv_lora_rank) self.kv_b_proj = nn.Linear( self.kv_lora_rank, self.num_heads * (self.qk_nope_head_dim + self.v_head_dim), bias=False, ) self.o_proj = nn.Linear( self.num_heads * self.v_head_dim, config.hidden_size, bias=config.attention_bias, ) self.scaling = self.qk_head_dim ** (-0.5) if self.config.rope_scaling is not None: mscale_all_dim = self.config.rope_scaling.get("mscale_all_dim", 0) scaling_factor = self.config.rope_scaling["factor"] if mscale_all_dim: mscale = yarn_get_mscale(scaling_factor, mscale_all_dim) self.scaling = self.scaling * mscale * mscale def forward( self, hidden_states: torch.Tensor, position_embeddings: Tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], instance_length: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: batch_size, seq_length = hidden_states.shape[:-1] query_shape = (batch_size, seq_length, -1, self.qk_head_dim) key_shape = (batch_size, seq_length, -1, self.qk_nope_head_dim + self.v_head_dim) if self.q_lora_rank is None: q_states = self.q_proj(hidden_states).view(query_shape).transpose(1, 2) else: q_states = self.q_b_proj(self.q_a_layernorm(self.q_a_proj(hidden_states))).view(query_shape).transpose(1, 2) q_pass, q_rot = torch.split(q_states, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1) compressed_kv = self.kv_a_proj_with_mqa(hidden_states) k_pass, k_rot = torch.split(compressed_kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1) k_pass = self.kv_b_proj(self.kv_a_layernorm(k_pass)).view(key_shape).transpose(1, 2) k_pass, value_states = torch.split(k_pass, [self.qk_nope_head_dim, self.v_head_dim], dim=-1) k_rot = k_rot.view(batch_size, 1, seq_length, self.qk_rope_head_dim) cos, sin = position_embeddings if self.config.rope_interleave: # support using interleaved weights for efficiency q_rot, k_rot = apply_rotary_pos_emb_interleave(q_rot, k_rot, cos, sin) else: q_rot, k_rot = apply_rotary_pos_emb(q_rot, k_rot, cos, sin) k_rot = k_rot.expand(*k_pass.shape[:-1], -1) query_states = torch.cat((q_pass, q_rot), dim=-1) key_states = torch.cat((k_pass, k_rot), dim=-1) if past_key_value is not None: cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) if self.config._attn_implementation == "flash_attention_2" and self.qk_head_dim != self.v_head_dim: value_states = F.pad(value_states, [0, self.qk_head_dim - self.v_head_dim]) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False): logger.warning_once( "`torch.nn.functional.scaled_dot_product_attention` does not support" "`output_attentions=True`. Falling back to 'eager attention. This warning" 'can be removed using the argument `attn_implementation="eager"` when loading the model.' ) else: attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] if instance_length is None or flash_attn_varlen_func is None: attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) if self.config._attn_implementation == "flash_attention_2" and self.qk_head_dim != self.v_head_dim: attn_output = attn_output[:, :, :, : self.v_head_dim] else: instance_length = instance_length.view(-1) query_states = query_states.squeeze(0).transpose(0,1) key_states = key_states.squeeze(0).transpose(0,1) value_states = value_states.squeeze(0).transpose(0,1) max_seqlen_in_batch = instance_length.max().item() cu_seqlens = F.pad(torch.cumsum(instance_length, dim=0, dtype=torch.int32), (1, 0)) if is_aiter_available: attn_output = flash_attn_varlen_func(query_states, key_states, value_states, cu_seqlens, cu_seqlens, max_seqlen_in_batch, max_seqlen_in_batch, dropout_p=0.0 if not self.training else self.attention_dropout, softmax_scale=self.scaling, causal=self.is_causal, return_lse=True)[0] else: attn_output = flash_attn_varlen_func(query_states, key_states, value_states, cu_seqlens, cu_seqlens, max_seqlen_in_batch, max_seqlen_in_batch, dropout_p=0.0 if not self.training else self.attention_dropout, softmax_scale=self.scaling, causal=self.is_causal) attn_output = attn_output.unsqueeze(0) attn_output = attn_output[:, :, :, : self.v_head_dim] attn_weights = None attn_output = attn_output.reshape(batch_size, seq_length, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class YoutuDecoderLayer(nn.Module): def __init__(self, config: YoutuVLConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = YoutuMLAttention(config=config, layer_idx=layer_idx) self.mlp = YoutuMLP(config) self.input_layernorm = YoutuRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = YoutuRMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: Optional[bool] = False, instance_length: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, instance_length=instance_length, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) return outputs YOUTU_VL_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`YoutuVLConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ @add_start_docstrings( "The bare Youtu Model outputting raw hidden-states without any specific head on top.", YOUTU_VL_START_DOCSTRING, ) class YoutuPreTrainedModel(PreTrainedModel): config_class = YoutuVLConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["YoutuDecoderLayer"] _skip_keys_device_placement = ["past_key_values"] _supports_flash_attn_2 = True _supports_sdpa = True _supports_flex_attn = True _supports_cache_class = True _supports_quantized_cache = True _supports_static_cache = True _supports_attention_backend = True def init_weights(self): if self.config.pruned_heads: self.prune_heads(self.config.pruned_heads) if "-init" in self.name_or_path: self.apply(self._initialize_weights) for name, module in self.named_modules(): if "o_proj" in name or "down_proj" in name: scaled_std = self.config.initializer_range * (1.0 / self.config.num_hidden_layers) ** 0.5 module.weight.data.normal_(mean=0.0, std=scaled_std) self.tie_weights() def _init_weights(self, module): std = self.config.initializer_range embedding_std = self.config.embedding_initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=embedding_std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.Parameter): module.weight.data.normal_(mean=0.0, std=std) elif isinstance(module, YoutuRMSNorm): module.weight.data.fill_(1.0) YOUTU_VL_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) past_key_values (`Cache`, *optional*): Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`, this tensor is not affected by padding. It is used to update the cache in the correct position and to infer the complete sequence length. """ @add_start_docstrings( "The bare Youtu Model outputting raw hidden-states without any specific head on top.", YOUTU_VL_START_DOCSTRING, ) class YoutuModel(YoutuPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"model\.layers\.61.*"] def __init__(self, config: YoutuVLConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( [YoutuDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.norm = YoutuRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.rotary_emb = YoutuRotaryEmbedding(config=config) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value @can_return_tuple @add_start_docstrings_to_model_forward(YOUTU_VL_INPUTS_DOCSTRING) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, instance_length: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **flash_attn_kwargs: Unpack[FlashAttentionKwargs], ) -> BaseModelOutputWithPast: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache() if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask( attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions ) hidden_states = inputs_embeds position_embeddings = self.rotary_emb(hidden_states, position_ids) all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for decoder_layer in self.layers[: self.config.num_hidden_layers]: if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, output_attentions=output_attentions, instance_length=instance_length, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **flash_attn_kwargs, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None, hidden_states=all_hidden_states, attentions=all_self_attns, ) def _update_causal_mask( self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool = False, ): if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None if self.config._attn_implementation == "flex_attention": if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) if isinstance(attention_mask, BlockMask): return attention_mask past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_static_cache = isinstance(past_key_values, StaticCache) if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions: if AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training, ): return None dtype, device = input_tensor.dtype, input_tensor.device sequence_length = input_tensor.shape[1] if using_static_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 ) causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, device=device, cache_position=cache_position, batch_size=input_tensor.shape[0], ) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type in ["cuda", "xpu"] and not output_attentions ): min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, cache_position: torch.Tensor, batch_size: int, **kwargs, ): """ Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. device (`torch.device`): The device to place the 4D attention mask on. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device ) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to( causal_mask.device ) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask class KwargsForCausalLM(FlashAttentionKwargs): ... class YoutuForCausalLM(YoutuPreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] _tp_plan = {"lm_head": "colwise_rep"} _pp_plan = {"lm_head": (["hidden_states"], ["logits"])} def __init__(self, config): super().__init__(config) self.model = YoutuModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model def get_merge_embedding(self, inputs_embeds, image_embeds, image_mask,**kwargs,): bs, length, dim_size = inputs_embeds.shape if image_embeds is None: return inputs_embeds if bs == 1: image_embeds = image_embeds.unsqueeze(0) init_inputs_embeds = inputs_embeds.clone() inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds) cmp_mask = torch.isclose(init_inputs_embeds, inputs_embeds, rtol=1e-05, atol=1e-08) else: assert(bs==1) return inputs_embeds @can_return_tuple @deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep") @add_start_docstrings_to_model_forward(YOUTU_VL_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs: Unpack[KwargsForCausalLM], ) -> CausalLMOutputWithPast: r""" Returns: """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) outputs: BaseModelOutputWithPast = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, **kwargs, ) hidden_states = outputs.last_hidden_state slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class VLPatchMerger(nn.Module): def __init__(self, dim: int, context_dim: int, spatial_merge_size: int = 2) -> None: super().__init__() self.hidden_size = context_dim * (spatial_merge_size**2) self.ln_q = YoutuRMSNorm(context_dim, eps=1e-06) self.mlp = nn.Sequential( nn.Linear(self.hidden_size, self.hidden_size), nn.GELU(), nn.Linear(self.hidden_size, dim), ) def forward(self, x: torch.Tensor, spatial_shapes: torch.Tensor) -> torch.Tensor: x = self.ln_q(x).view(-1, self.hidden_size) x = self.mlp(x) return x class YoutuVLForConditionalGeneration(YoutuPreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] _tp_plan = {"lm_head": "colwise_rep"} _pp_plan = {"lm_head": (["hidden_states"], ["logits"])} def __init__(self, config): super().__init__(config) config.vision_config.out_hidden_size = config.hidden_size config.vision_config.vision_use_head = False self.siglip2 = Siglip2VisionModel._from_config(config.vision_config) self.merger = VLPatchMerger( dim=config.hidden_size, context_dim=config.vision_config.hidden_size, spatial_merge_size=2, ) self.rope_deltas = None self.model = YoutuModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.first_logits = None self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model def get_input_idx_embeddings(self, input_ids): inputs_embeds = self.model.embed_tokens(input_ids) return inputs_embeds def get_visiual_features(self, pixel_values, pixel_attention_mask, spatial_shapes): pixel_values = pixel_values.type(self.siglip2.dtype) # Extract image embeddings via vision model image_embeds = self.siglip2(pixel_values, pixel_attention_mask, spatial_shapes).last_hidden_state # Merge image features with the output of vision model image_embeds = self.merger(image_embeds, spatial_shapes) return image_embeds def get_merge_embedding(self, inputs_embeds, image_embeds, image_mask, **kwargs): """ Merge text embeddings with image embeddings using the provided mask. Args: inputs_embeds: Text input embeddings image_embeds: Image embeddings to merge image_mask: Mask indicating where to place image embeddings **kwargs: Additional keyword arguments Returns: Merged embeddings with image features integrated """ bs, length, dim_size = inputs_embeds.shape if image_embeds is None: return inputs_embeds if bs == 1: image_embeds = image_embeds.unsqueeze(0) init_inputs_embeds = inputs_embeds.clone() inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds) cmp_mask = torch.isclose(init_inputs_embeds, inputs_embeds, rtol=1e-05, atol=1e-08) else: print('******************ERROR: if you see this info, only support batch_size==1*********************') assert(bs == 1) return inputs_embeds @can_return_tuple @deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep") @add_start_docstrings_to_model_forward(YOUTU_VL_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, pixel_values: Optional[torch.Tensor] = None, pixel_attention_mask: Optional[torch.LongTensor] = None, spatial_shapes: Optional[torch.LongTensor] = None, instance_length: Optional[torch.LongTensor] = None, coefficients: Optional[torch.FloatTensor] = None, rope_deltas: Optional[torch.LongTensor] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs: Unpack[KwargsForCausalLM], ) -> CausalLMOutputWithPast: r""" Example: TODO: Add example Returns: """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) if inputs_embeds is None: inputs_embeds = self.model.embed_tokens(input_ids) if pixel_values is not None: bs, length, dim_size = inputs_embeds.shape pixel_values = pixel_values.type(self.siglip2.dtype) image_embeds = self.siglip2(pixel_values, pixel_attention_mask, spatial_shapes).last_hidden_state image_embeds = self.merger(image_embeds, spatial_shapes) n_image_tokens = (input_ids == self.config.image_token_id).sum().item() n_image_features = image_embeds.shape[0] if n_image_tokens > n_image_features: raise ValueError( "Image features and image tokens do not match: tokens: {}, features {}".format( n_image_tokens, n_image_features ) ) mask = input_ids == self.config.image_token_id mask_unsqueezed = mask.unsqueeze(-1) mask_expanded = mask_unsqueezed.expand_as(inputs_embeds) image_mask = mask_expanded.to(inputs_embeds.device) image_embeds = image_embeds.to(inputs_embeds.device, inputs_embeds.dtype) if bs != 1: raise ValueError("Only support batch size = 1") image_embeds = image_embeds.unsqueeze(0) inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds) if attention_mask is not None: attention_mask = attention_mask.to(inputs_embeds.device) outputs: BaseModelOutputWithPast = self.model( input_ids=None, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, instance_length=instance_length, **kwargs, ) hidden_states = outputs.last_hidden_state logits = self.lm_head(hidden_states) if logits.shape[1] != 1: self.first_logits = logits loss = None return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def truncate_past_key_values( self, past_key_values: Optional[DynamicCache], num_history: int ) -> Optional[DynamicCache]: """Truncate past_key_values to specified history length in-place. Args: past_key_values: Cache object to truncate num_history: Target history length to keep Returns: Truncated cache object or None if input is None """ if past_key_values is None: return None current_length = past_key_values.get_seq_length() if current_length <= num_history: return past_key_values for layer_idx in range(len(past_key_values.key_cache)): if past_key_values.key_cache[layer_idx] is not None: past_key_values.key_cache[layer_idx] = ( past_key_values.key_cache[layer_idx][:, :, :num_history, :].contiguous() ) past_key_values.value_cache[layer_idx] = ( past_key_values.value_cache[layer_idx][:, :, :num_history, :].contiguous() ) return past_key_values def clone_past_key_values( self, past_key_values: Optional[DynamicCache] ) -> Optional[DynamicCache]: """Deep copy past_key_values to avoid shared reference issues. Args: past_key_values: Cache object to clone Returns: Deep copied cache object or None if input is None """ if past_key_values is None: return None new_cache = DynamicCache() for layer_idx in range(len(past_key_values.key_cache)): if past_key_values.key_cache[layer_idx] is not None: new_cache.key_cache.append(past_key_values.key_cache[layer_idx].clone()) new_cache.value_cache.append(past_key_values.value_cache[layer_idx].clone()) return new_cache def concat_token_ids( self, input_ids: torch.Tensor, concat_ids: Optional[List[int]] ) -> torch.Tensor: """Concatenate additional token IDs to input sequence. Args: input_ids: Original input token IDs of shape (batch_size, seq_len) concat_ids: Token IDs to concatenate Returns: Concatenated token IDs tensor """ if concat_ids is None: return input_ids num_gen = len(concat_ids) if num_gen < 2: return input_ids batch_size = input_ids.size(0) concat_token_tensor = torch.tensor( concat_ids, dtype=input_ids.dtype, device=input_ids.device ) concat_tokens = concat_token_tensor.unsqueeze(0).repeat(batch_size, 1) new_input_ids = torch.cat([input_ids, concat_tokens], dim=1) return new_input_ids def create_causal_mask_for_kv_cache( self, kv_cache_len: int, num_new_tokens: int, device: torch.device, dtype: torch.dtype = torch.bfloat16 ) -> torch.Tensor: """Create causal attention mask for KV cache usage. Each new token can only see: 1. All content in KV cache (positions 0 to kv_cache_len-1) 2. Previous new tokens and itself (causal masking) Args: kv_cache_len: Length of existing sequence in KV cache num_new_tokens: Number of new tokens being added device: Target device for tensor allocation dtype: Data type for the mask tensor Returns: Attention mask of shape (1, 1, num_new_tokens, kv_cache_len + num_new_tokens) """ total_len = kv_cache_len + num_new_tokens min_val = torch.finfo(dtype).min # Initialize mask with min_val (masked positions) mask = torch.full((num_new_tokens, total_len), min_val, device=device, dtype=dtype) # Set visible positions to 0 for i in range(num_new_tokens): if kv_cache_len > 0: mask[i, :kv_cache_len] = 0 mask[i, kv_cache_len:kv_cache_len + i + 1] = 0 return mask.unsqueeze(0).unsqueeze(0) def create_4d_causal_mask( self, seq_len: int, device: torch.device, dtype: torch.dtype = torch.bfloat16 ) -> torch.Tensor: """Create complete 4D causal attention mask for initial decoding. Args: seq_len: Sequence length device: Target device for tensor allocation dtype: Data type for the mask tensor Returns: Causal attention mask of shape (1, 1, seq_len, seq_len) """ min_val = torch.finfo(dtype).min # Create lower triangular causal mask mask = torch.full((seq_len, seq_len), min_val, device=device, dtype=dtype) mask = torch.triu(mask, diagonal=1) return mask.unsqueeze(0).unsqueeze(0) def _first_decoder( self, new_input_ids: torch.Tensor, past_key_values: Optional[DynamicCache] = None, image_embeds: Optional[torch.Tensor] = None, image_mask: Optional[torch.Tensor] = None, num_gen: int = 32 ) -> Tuple[torch.Tensor, Any]: """Execute decoder pass with causal attention masking. This method performs a single decoder pass with optimized attention masking. On the first decoding step (when past_key_values is None), it processes image embeddings and merges them with text embeddings. Args: new_input_ids: Input token IDs of shape (batch_size, seq_len) past_key_values: Cached key-value pairs from previous decoding steps image_embeds: Image embeddings to merge (only used in first step) image_mask: Mask indicating positions for image embedding placement num_gen: Number of tokens to generate in parallel Returns: Tuple containing: - predicted_token_ids: Predicted token IDs of shape (batch_size, num_gen) - outputs: Model outputs including logits and updated cache """ # Get current sequence position start_position = past_key_values.get_seq_length() if past_key_values is not None else 0 batch_size, seq_len = new_input_ids.shape # Create position IDs directly on GPU to avoid CPU-GPU transfer position_ids = torch.arange( start_position, start_position + seq_len, dtype=torch.long, device=new_input_ids.device ).unsqueeze(0) # Process image embeddings only on first decoding step inputs_embeds = None if start_position == 0: inputs_embeds = self.get_input_idx_embeddings(new_input_ids) if image_embeds is not None: inputs_embeds = self.get_merge_embedding(inputs_embeds, image_embeds, image_mask) # Create 4D causal attention mask attention_mask = None if start_position > 0 and seq_len > 0: # When using KV cache, create mask for new tokens attention_mask = self.create_causal_mask_for_kv_cache( start_position, seq_len, new_input_ids.device, dtype=torch.bfloat16 ) elif start_position == 0 and seq_len > 0: # First decoding, create complete causal mask attention_mask = self.create_4d_causal_mask( seq_len, new_input_ids.device, dtype=torch.bfloat16 ) with torch.no_grad(): if start_position > 0: outputs = self.forward( input_ids=new_input_ids, inputs_embeds=None, attention_mask=None, # Note: attention_mask currently disabled position_ids=position_ids, use_cache=True, cache_position=True, past_key_values=past_key_values, ) else: outputs = self.forward( input_ids=None, inputs_embeds=inputs_embeds, attention_mask=None, # Note: attention_mask currently disabled position_ids=position_ids, use_cache=True, cache_position=True, past_key_values=past_key_values, ) # Extract predicted token IDs from logits predicted_token_ids = outputs.logits[:, -(num_gen + 1):-1].argmax(dim=-1) return predicted_token_ids, outputs def generate_parallel_decoder( self, inputs: Dict[str, torch.Tensor], image_embeds: torch.Tensor, mask_token_id: int, max_new_tokens: int = 8192, num_gen: int = 64, verbose: bool = False ) -> List[int]: """Generate tokens using optimized parallel decoding with dual-pass verification. This method implements a parallel decoding strategy that generates multiple tokens simultaneously and verifies them in a second pass. The algorithm: 1. First pass: Predict tokens with mask tokens 2. Second pass: Verify predictions with actual predicted tokens 3. Accept verified tokens and continue from the first unverified position Optimizations: - First decoding uses cloned cache to avoid modifying the original - Second decoding updates the original cache in-place - Minimizes CPU-GPU data transfers by operating on GPU - Pre-allocates tensors to avoid repeated creation - Removes debug output from inner loops (controlled by verbose flag) - Entire loop wrapped with torch.no_grad() for efficiency Args: inputs: Input dictionary containing 'input_ids' tensor image_embeds: Image embeddings for multimodal processing mask_token_id: Token ID used for masked positions max_new_tokens: Maximum number of tokens to generate num_gen: Number of tokens to generate in parallel per iteration verbose: If True, print detailed progress information Returns: List of generated token IDs """ if verbose: print("Starting parallel decoder generation") # Constants STOP_TOKEN_ID = 128001 device = self.model.device input_ids = inputs["input_ids"] decoder_idx = [] # Pre-allocate mask tokens tensor to avoid repeated creation mask_tokens = torch.full((1, num_gen), mask_token_id, dtype=torch.long, device=device) # Initialize KV cache prefix_past_key_values = DynamicCache() step = 0 is_exit = False # Cache initial token ID prefix_step_id = input_ids[0, 0].item() with torch.no_grad(): while len(decoder_idx) < max_new_tokens and not is_exit: # ============ First Pass: Predict with mask tokens ============ new_input_ids = torch.cat([input_ids, mask_tokens], dim=1) # Use cloned cache for first pass to preserve original if step == 0: first_cache = DynamicCache() # Create image mask for first step mask = new_input_ids == self.config.image_token_id mask_unsqueezed = mask.unsqueeze(-1) mask_expanded = mask_unsqueezed.expand(-1, -1, image_embeds.size(-1)) image_mask = mask_expanded.to(image_embeds.device) else: first_cache = self.clone_past_key_values(prefix_past_key_values) first_predicted_ids, _ = self._first_decoder( new_input_ids, past_key_values=first_cache, image_embeds=image_embeds if step == 0 else None, image_mask=image_mask if step == 0 else None, num_gen=num_gen ) # ============ Second Pass: Verify with predicted tokens ============ new_input_ids = torch.cat([input_ids, first_predicted_ids], dim=1) # Use original cache for second pass (will be updated and retained) if step == 0: second_cache = DynamicCache() else: second_cache = prefix_past_key_values second_predicted_ids, outputs = self._first_decoder( new_input_ids, past_key_values=second_cache, image_embeds=image_embeds if step == 0 else None, image_mask=image_mask if step == 0 else None, num_gen=num_gen ) # ============ Compare predictions and count successes ============ first_pred_list = first_predicted_ids[0].tolist() second_pred_list = second_predicted_ids[0].tolist() if verbose: print(f"First pass predictions: {first_pred_list}") print(f"Second pass predictions: {second_pred_list}") # Compare predictions to find verified tokens success = 0 for idx in range(len(second_pred_list) - 1): first_id = first_pred_list[idx] second_id = second_pred_list[idx] next_second_id = second_pred_list[idx + 1] # Check for stop token if second_id == STOP_TOKEN_ID: is_exit = True break if next_second_id == STOP_TOKEN_ID and idx == len(second_pred_list) - 2: success += 1 is_exit = True break # Verify prediction consistency if first_id == second_id: success += 1 else: break # ============ Update decoded tokens ============ if step == 0: decoder_idx.extend(second_pred_list[:success]) else: if verbose: print(f"Verified {success} tokens: {second_pred_list[:success]}") decoder_idx.append(prefix_step_id) decoder_idx.extend(second_pred_list[:success]) if verbose: print(f"Exit status: {is_exit}") print(f"Total decoded tokens: {len(decoder_idx)}") # ============ Truncate KV cache to verified length ============ past_key_values = outputs.past_key_values if past_key_values is not None: current_kv_len = past_key_values.get_seq_length() num_to_keep = current_kv_len - (num_gen - success) prefix_past_key_values = self.truncate_past_key_values( past_key_values, num_to_keep ) else: prefix_past_key_values = None # Update input_ids for next iteration next_token_id = ( second_pred_list[success] if success < len(second_pred_list) else prefix_step_id ) input_ids = torch.tensor( [[next_token_id]], dtype=torch.long, device=device ) prefix_step_id = next_token_id step += 1 if verbose: print(f"Step {step} completed, success rate: {success}/{num_gen}\n") return decoder_idx __all__ = ["YoutuPreTrainedModel", "YoutuModel", "YoutuVLForConditionalGeneration"]