| from transformers import Olmo2Model, Olmo2ForCausalLM, AutoTokenizer, logging |
| from transformers.models.auto.modeling_auto import MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES |
| from transformers.modeling_outputs import ( |
| CausalLMOutputWithPast, |
| BaseModelOutputWithPast, |
| ) |
| import numpy as np |
| import math |
| from torch import nn |
| import pandas as pd |
| from transformers.cache_utils import Cache, DynamicCache, StaticCache |
| from dataclasses import dataclass |
|
|
| |
| from transformers.models.olmo2.modeling_olmo2 import Olmo2RotaryEmbedding, Olmo2Attention, Olmo2MLP, Olmo2RMSNorm, apply_rotary_pos_emb, eager_attention_forward, Olmo2DecoderLayer |
| from transformers.models.olmo2.configuration_olmo2 import Olmo2Config |
| from transformers.processing_utils import Unpack |
| from transformers.modeling_flash_attention_utils import FlashAttentionKwargs |
| from transformers.utils import LossKwargs |
| from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS |
|
|
| from torch.nn.functional import cosine_similarity |
| import pdb |
| from dataset import * |
| import torch |
| import torch.nn.functional as F |
| import functools |
| import torch.distributed as dist |
| from torch.distributed.fsdp import ( |
| FullyShardedDataParallel as FSDP, |
| MixedPrecision, |
| BackwardPrefetch, |
| ShardingStrategy, |
| FullStateDictConfig, |
| StateDictType, |
| ) |
| from torch.distributed.fsdp.wrap import ( |
| transformer_auto_wrap_policy, |
| enable_wrap, |
| wrap, |
| ) |
| from functools import partial |
| from torch.utils.data import DataLoader |
| from pathlib import Path |
| from typing import Type, List, Optional, Tuple, Union, Callable, Dict, Any |
|
|
|
|
| |
| import inspect |
| from transformers.generation.configuration_utils import ( |
| NEED_SETUP_CACHE_CLASSES_MAPPING, |
| QUANT_BACKEND_CLASSES_MAPPING, |
| GenerationConfig, |
| GenerationMode, |
| ) |
| from transformers.generation.logits_process import LogitsProcessorList |
| from transformers.generation.stopping_criteria import StoppingCriteriaList |
| from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled |
| from transformers.integrations.fsdp import is_fsdp_managed_module |
|
|
| from transformers.generation.utils import ( |
| is_torchdynamo_compiling, ModelOutput, GenerateDecoderOnlyOutput, |
| GenerateEncoderDecoderOutput, GenerateBeamDecoderOnlyOutput, |
| GenerateBeamEncoderDecoderOutput, GreedySearchDecoderOnlyOutput, |
| ContrastiveSearchDecoderOnlyOutput, SampleDecoderOnlyOutput, |
| ContrastiveSearchEncoderDecoderOutput, GreedySearchEncoderDecoderOutput, |
| SampleEncoderDecoderOutput, BeamSearchDecoderOnlyOutput, |
| BeamSampleDecoderOnlyOutput, BeamSearchEncoderDecoderOutput, |
| BeamSampleEncoderDecoderOutput, GreedySearchOutput, SampleOutput, |
| BeamSearchOutput, BeamSampleOutput, ContrastiveSearchOutput, |
| GenerateNonBeamOutput, GenerateBeamOutput, GenerateOutput) |
| from transformers.generation.stopping_criteria import ( |
| ConfidenceCriteria, |
| EosTokenCriteria, |
| MaxLengthCriteria, |
| MaxTimeCriteria, |
| StoppingCriteria, |
| StoppingCriteriaList, |
| StopStringCriteria, |
| ) |
|
|
| from transformers.generation.stopping_criteria import STOPPING_CRITERIA_INPUTS_DOCSTRING |
| from transformers.pytorch_utils import isin_mps_friendly |
| from transformers.utils import add_start_docstrings |
|
|
|
|
| class EosTokenCriteriaForSemiNAT(StoppingCriteria): |
| """ |
| This class can be used to stop generation whenever the "end-of-sequence" token is generated. |
| By default, it uses the `model.generation_config.eos_token_id`. |
| |
| Args: |
| eos_token_id (`Union[int, List[int], torch.Tensor]`): |
| The id(s) of the *end-of-sequence* token. |
| """ |
|
|
| def __init__(self, eos_token_id: Union[int, List[int], torch.Tensor]): |
| if not isinstance(eos_token_id, torch.Tensor): |
| if isinstance(eos_token_id, int): |
| eos_token_id = [eos_token_id] |
| eos_token_id = torch.tensor(eos_token_id) |
| self.eos_token_id = eos_token_id |
|
|
| @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING) |
| def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, last_k: int, **kwargs) -> torch.BoolTensor: |
| |
| |
| |
| self.eos_token_id = self.eos_token_id.to(input_ids.device) |
| token_is_eos = isin_mps_friendly(input_ids[:, -last_k:], self.eos_token_id) |
| is_done = torch.any(token_is_eos, dim=1) |
| return is_done |
|
|
|
|
|
|
| |
|
|
|
|
| class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ... |
|
|
|
|
| @dataclass |
| class ModelOutputWithPastForSemiNAT(BaseModelOutputWithPast): |
|
|
| chunk_hidden_state: torch.FloatTensor = None |
| length_ground_truth: Optional[torch.FloatTensor] = None |
| length_logits: Optional[torch.FloatTensor] = None |
| position_embeddings: Optional[torch.FloatTensor] = None |
| nar_hidden_state: torch.FloatTensor = None |
| past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None |
| hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None |
| attentions: Optional[Tuple[torch.FloatTensor, ...]] = None |
|
|
|
|
|
|
|
|
| class TwoLayerMLP(nn.Module): |
| def __init__(self, hidden_size: int, dropout_rate: float = 0.1): |
| """ |
| 初始化两层MLP,支持任意批处理维度 |
| |
| 参数: |
| hidden_size (int): 隐藏层维度 |
| dropout_rate (float): dropout比率,默认0.1 |
| """ |
| super().__init__() |
| |
| self.fc1 = nn.Linear(hidden_size, 4 * hidden_size) |
| self.fc2 = nn.Linear(4 * hidden_size, hidden_size) |
| self.dropout = nn.Dropout(p=dropout_rate) |
| self.activation = nn.GELU() |
| |
| def forward(self, x: torch.Tensor) -> torch.Tensor: |
| """ |
| 前向传播,支持任意批处理维度 |
| |
| 参数: |
| x (torch.Tensor): 输入张量,形状为 (..., hidden_size),支持任意前置维度 |
| |
| 返回: |
| torch.Tensor: 输出张量,形状与输入相同 |
| """ |
| |
| original_shape = x.shape |
| hidden_size = original_shape[-1] |
| |
| |
| x_2d = x.view(-1, hidden_size) |
| |
| |
| |
| x_2d = self.fc1(x_2d) |
| x_2d = self.activation(x_2d) |
| x_2d = self.dropout(x_2d) |
| |
| |
| x_2d = self.fc2(x_2d) |
| |
| |
| x = x_2d.view(*original_shape) |
| |
| return x |
|
|
|
|
|
|
|
|
| class Olmo2ConfigForSemiNAT(Olmo2Config): |
| def __init__(self, chunk_size_limit: int = 5, decoder_layers: int = 1, encoder_layer: int = 1, mlp: bool = False, position_embedding_type: str = "absolute",attn_implementation: str = "sdpa", length_loss_type: str = "ce", **kwargs): |
| super().__init__(**kwargs) |
| self.chunk_size_limit = chunk_size_limit |
| self.decoder_layers = decoder_layers |
| self.encoder_layer = encoder_layer |
| self.mlp = mlp |
| self.position_embedding_type = position_embedding_type |
| self._attn_implementation = attn_implementation |
| self.length_loss_type = length_loss_type |
| |
|
|
|
|
| class Olmo2AttentionForSemiNAT(nn.Module): |
| """Multi-headed attention from 'Attention Is All You Need' paper""" |
|
|
| def __init__(self, config: Olmo2ConfigForSemiNAT, layer_idx: Optional[int] = None, is_causal: bool = True): |
| super().__init__() |
| self.config = config |
| self.layer_idx = layer_idx |
| self.head_dim = getattr( |
| config, "head_dim", |
| config.hidden_size // config.num_attention_heads) |
| self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads |
| self.scaling = self.head_dim**-0.5 |
| self.attention_dropout = config.attention_dropout |
| self.is_causal = is_causal |
|
|
| self.q_proj = nn.Linear(config.hidden_size, |
| config.num_attention_heads * self.head_dim, |
| bias=config.attention_bias) |
| self.k_proj = nn.Linear(config.hidden_size, |
| config.num_key_value_heads * self.head_dim, |
| bias=config.attention_bias) |
| self.v_proj = nn.Linear(config.hidden_size, |
| config.num_key_value_heads * self.head_dim, |
| bias=config.attention_bias) |
| self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, |
| config.hidden_size, |
| bias=config.attention_bias) |
| self.q_norm = Olmo2RMSNorm(config.num_attention_heads * self.head_dim, |
| config.rms_norm_eps) |
| self.k_norm = Olmo2RMSNorm(config.num_key_value_heads * self.head_dim, |
| config.rms_norm_eps) |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| position_embeddings: Tuple[torch.Tensor, torch.Tensor], |
| attention_mask: Optional[torch.Tensor], |
| past_key_value: Optional[Cache] = None, |
| cache_position: Optional[torch.LongTensor] = None, |
| **kwargs, |
| ) -> Tuple[torch.Tensor, Optional[torch.Tensor], |
| Optional[Tuple[torch.Tensor]]]: |
| input_shape = hidden_states.shape[:-1] |
| hidden_shape = (*input_shape, -1, self.head_dim) |
|
|
| query_states = self.q_norm(self.q_proj(hidden_states)) |
| key_states = self.k_norm(self.k_proj(hidden_states)) |
| value_states = self.v_proj(hidden_states) |
|
|
| query_states = query_states.view(hidden_shape).transpose(1, 2) |
| key_states = key_states.view(hidden_shape).transpose(1, 2) |
| value_states = value_states.view(hidden_shape).transpose(1, 2) |
|
|
|
|
|
|
| if position_embeddings is not None: |
| cos, sin = position_embeddings |
| query_states, key_states = apply_rotary_pos_emb( |
| query_states, key_states, cos, sin) |
|
|
| if past_key_value is not None: |
| |
| cache_kwargs = { |
| "sin": sin, |
| "cos": cos, |
| "cache_position": cache_position |
| } |
| key_states, value_states = past_key_value.update( |
| key_states, value_states, self.layer_idx, cache_kwargs) |
|
|
| |
|
|
| |
| |
| |
| |
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| attention_interface: Callable = ALL_ATTENTION_FUNCTIONS["sdpa"] |
|
|
|
|
| |
| attn_output, attn_weights = attention_interface( |
| self, |
| query_states, |
| key_states, |
| value_states, |
| attention_mask, |
| dropout=0.0 if not self.training else self.attention_dropout, |
| scaling=self.scaling, |
| is_causal=self.is_causal, |
| **kwargs, |
| ) |
| |
|
|
| attn_output = attn_output.reshape(*input_shape, -1).contiguous() |
| attn_output = self.o_proj(attn_output) |
| return attn_output, attn_weights |
|
|
|
|
|
|
| class Olmo2DecoderLayerForSemiNAT(nn.Module): |
|
|
| def __init__( |
| self, |
| config: Olmo2ConfigForSemiNAT, |
| layer_idx: int, |
| is_causal: bool = True, |
| ): |
| super().__init__() |
| self.hidden_size = config.hidden_size |
| |
| self.self_attn = Olmo2AttentionForSemiNAT(config=config, |
| layer_idx=layer_idx, |
| is_causal=is_causal) |
| self.mlp = Olmo2MLP(config) |
| self.post_attention_layernorm = Olmo2RMSNorm(config.hidden_size, |
| eps=config.rms_norm_eps) |
| self.post_feedforward_layernorm = Olmo2RMSNorm(config.hidden_size, |
| eps=config.rms_norm_eps) |
|
|
| |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| attention_mask: Optional[torch.Tensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| past_key_value: Optional[Cache] = None, |
| output_attentions: Optional[bool] = False, |
| use_cache: Optional[bool] = False, |
| cache_position: Optional[torch.LongTensor] = None, |
| position_embeddings: Optional[Tuple[torch.Tensor, |
| torch.Tensor]] = None, |
| **kwargs, |
| ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, |
| torch.FloatTensor]]]: |
| residual = hidden_states |
|
|
| |
| |
| hidden_states, self_attn_weights = self.self_attn( |
| hidden_states=hidden_states, |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| past_key_value=past_key_value, |
| output_attentions=output_attentions, |
| use_cache=use_cache, |
| cache_position=cache_position, |
| position_embeddings=position_embeddings, |
| **kwargs, |
| ) |
|
|
| |
| hidden_states = self.post_attention_layernorm(hidden_states) |
| hidden_states = residual + hidden_states |
|
|
| |
| residual = hidden_states |
| hidden_states = self.mlp(hidden_states) |
| hidden_states = self.post_feedforward_layernorm(hidden_states) |
| hidden_states = residual + hidden_states |
|
|
| outputs = (hidden_states, ) |
| if output_attentions: |
| outputs += (self_attn_weights, ) |
|
|
| return outputs |
|
|
|
|
| class NATEncoderForSemiNAT(nn.Module): |
|
|
| def __init__(self, config: Olmo2ConfigForSemiNAT, num_layer: int = 1): |
| super().__init__() |
| self.num_layer = num_layer |
| self.encoder_layers = nn.ModuleList([ |
| Olmo2DecoderLayerForSemiNAT(config, layer_idx) |
| for layer_idx in range(self.num_layer) |
| ]) |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| attention_mask: Optional[torch.Tensor] = None, |
| past_key_value: Optional[Cache] = None, |
| output_attentions: Optional[bool] = False, |
| use_cache: Optional[bool] = False, |
| cache_position: Optional[torch.LongTensor] = None, |
| position_embeddings: Optional[Tuple[torch.Tensor, |
| torch.Tensor]] = None, |
| ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, |
| torch.FloatTensor]]]: |
| |
| for layer in self.encoder_layers: |
| outputs = layer(hidden_states=hidden_states, |
| output_attentions=output_attentions, |
| position_embeddings=position_embeddings, |
| attention_mask=attention_mask) |
| hidden_states = outputs[0] |
| |
| |
| |
| |
| return hidden_states |
|
|
|
|
| class NATDecoderForSemiNAT(nn.Module): |
|
|
| def __init__(self, config: Olmo2ConfigForSemiNAT, num_layer: int = 1): |
| super().__init__() |
| self.num_layer = num_layer |
| self.decoder_layers = nn.ModuleList([ |
| Olmo2DecoderLayerForSemiNAT(config, layer_idx, False) |
| for layer_idx in range(self.num_layer) |
| ]) |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| attention_mask: Optional[torch.Tensor] = None, |
| past_key_value: Optional[Cache] = None, |
| output_attentions: Optional[bool] = False, |
| use_cache: Optional[bool] = False, |
| cache_position: Optional[torch.LongTensor] = None, |
| position_embeddings: Optional[Tuple[torch.Tensor, |
| torch.Tensor]] = None, |
| ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, |
| torch.FloatTensor]]]: |
|
|
| for layer in self.decoder_layers: |
| |
| outputs = layer(hidden_states=hidden_states, |
| attention_mask=attention_mask, |
| output_attentions=output_attentions, |
| position_embeddings=position_embeddings) |
| hidden_states = outputs[0] |
| return hidden_states |
|
|
|
|
| class Olmo2ModelForSemiNAT(Olmo2Model): |
|
|
| def __init__(self, config): |
| super().__init__(config) |
| self.layers = nn.ModuleList([ |
| Olmo2DecoderLayer(config, layer_idx) |
| for layer_idx in range(config.num_hidden_layers) |
| ]) |
|
|
| self.decoder = NATDecoderForSemiNAT(config, config.decoder_layers) |
| self.encoder = NATEncoderForSemiNAT(config, config.encoder_layer) |
|
|
|
|
| |
| self.chunk_size_limit = config.chunk_size_limit |
| self.norm = Olmo2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) |
| self.rotary_emb = Olmo2RotaryEmbedding(config=config) |
| self.pos_encoder = AbsolutePositionalEncoding(config.hidden_size) |
| self.gradient_checkpointing = False |
| self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, |
| self.padding_idx) |
|
|
|
|
| self.length_predictor = nn.Linear(config.hidden_size, |
| self.chunk_size_limit) |
| self.mlp = config.mlp |
| if self.mlp: |
| self.linear_projection = TwoLayerMLP(config.hidden_size) |
| |
| self.position_embedding_type = config.position_embedding_type |
|
|
|
|
| def forward( |
| self, |
| input_ids: torch.LongTensor = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| slice_pos: torch.Tensor = None, |
| past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| cache_position: Optional[torch.LongTensor] = None, |
| inference: Optional[bool] = None, |
| padding: Optional[torch.Tensor] = None, |
| is_prefill: Optional[bool] = False, |
| **flash_attn_kwargs: Unpack[FlashAttentionKwargs], |
| ) -> Union[Tuple, CausalLMOutputWithPast]: |
|
|
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = (output_hidden_states |
| if output_hidden_states is not None else |
| self.config.output_hidden_states) |
| use_cache = use_cache if use_cache is not None else self.config.use_cache |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| if (input_ids is None) ^ (inputs_embeds is not None): |
| raise ValueError( |
| "You must specify exactly one of input_ids or inputs_embeds") |
|
|
| if self.gradient_checkpointing and self.training and use_cache: |
| logger.warning_once( |
| "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." |
| ) |
| use_cache = False |
|
|
| if inputs_embeds is None: |
| inputs_embeds = self.embed_tokens(input_ids) |
|
|
| |
|
|
| if use_cache and past_key_values is None: |
| past_key_values = DynamicCache() |
|
|
| if cache_position is None: |
| past_seen_tokens = past_key_values.get_seq_length( |
| ) if past_key_values is not None else 0 |
| cache_position = torch.arange(past_seen_tokens, |
| past_seen_tokens + |
| inputs_embeds.shape[1], |
| device=inputs_embeds.device) |
|
|
| if position_ids is None: |
| position_ids = cache_position.unsqueeze(0) |
|
|
| if inference is not None: |
| position_ids = cache_position.unsqueeze(0) |
|
|
|
|
|
|
| position_embeddings = self.rotary_emb(inputs_embeds, position_ids) |
| all_hidden_states = () if output_hidden_states else None |
| all_self_attns = () if output_attentions else None |
| next_decoder_cache = None |
| max_chunk_num = (slice_pos != -1).sum(dim=1).max() |
|
|
| |
|
|
| |
| |
| |
| |
| |
|
|
|
|
| length_ground_truth = None |
| if not inference or is_prefill: |
| M_avg, attn_mask, length_ground_truth, chunk_attention_mask, slice_num = self.build_slice_matrix(input_ids,slice_pos) |
| encoded_input = self.encoder(inputs_embeds,position_embeddings=position_embeddings,attention_mask=attn_mask) |
| |
| |
| |
| M_avg = M_avg.contiguous() |
| encoded_input = encoded_input.contiguous() |
| M_avg = M_avg.to(torch.bfloat16) |
| encoded_input = encoded_input.to(torch.bfloat16) |
| |
| chunk_inputs_embeds = torch.matmul(M_avg, encoded_input) |
| accumu_num = sum(slice_num)-encoded_input.shape[0] |
|
|
| chunk_inputs_embeds = chunk_inputs_embeds[:, :max_chunk_num, :] |
| chunk_attention_mask = chunk_attention_mask[:, :max_chunk_num] |
| length_ground_truth = length_ground_truth[:,:max_chunk_num] |
| chunk_position_ids = position_ids[:,:max_chunk_num] |
| chunk_cache_position = cache_position[:max_chunk_num] |
| |
| else: |
| |
| encoded_input = self.encoder(inputs_embeds[:,position_ids.squeeze(0)],position_embeddings=position_embeddings) |
| chunk_inputs_embeds = torch.mean(encoded_input, dim=1).unsqueeze(0) |
| |
| chunk_cache_position = torch.searchsorted(slice_pos.squeeze(0), cache_position - 1, right=True)[-1].unsqueeze(0) |
| chunk_attention_mask = torch.ones(1,cache_position[0]) |
| chunk_position_ids = chunk_cache_position.unsqueeze(0) |
| |
| |
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
|
|
|
|
| |
| |
|
|
| |
| |
|
|
|
|
| |
|
|
|
|
| |
| |
| |
| chunk_position_embeddings = self.rotary_emb( |
| chunk_inputs_embeds, chunk_position_ids |
| ) |
|
|
| hidden_states = chunk_inputs_embeds |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
|
|
| |
| |
| causal_mask = self._update_causal_mask(chunk_attention_mask, |
| chunk_inputs_embeds, |
| chunk_cache_position, |
| past_key_values, |
| output_attentions) |
|
|
|
|
| |
| for decoder_layer in self.layers: |
| if output_hidden_states: |
| all_hidden_states += (hidden_states, ) |
| if self.gradient_checkpointing and self.training: |
| layer_outputs = self._gradient_checkpointing_func( |
| decoder_layer.__call__, |
| hidden_states, |
| causal_mask, |
| position_ids, |
| past_key_values, |
| output_attentions, |
| use_cache, |
| chunk_cache_position, |
| chunk_position_embeddings, |
| ) |
| else: |
| layer_outputs = decoder_layer( |
| hidden_states, |
| attention_mask=causal_mask, |
| position_ids=position_ids, |
| past_key_value=past_key_values, |
| output_attentions=output_attentions, |
| use_cache=use_cache, |
| cache_position=chunk_cache_position, |
| position_embeddings=chunk_position_embeddings, |
| **flash_attn_kwargs, |
| ) |
|
|
| hidden_states = layer_outputs[0] |
|
|
| if output_attentions: |
| all_self_attns += (layer_outputs[1], ) |
|
|
| |
| |
| if output_hidden_states: |
| all_hidden_states += (hidden_states, ) |
|
|
| hidden_states = self.norm( |
| hidden_states) |
|
|
|
|
| next_cache = next_decoder_cache if use_cache else None |
| |
| |
|
|
| |
| self.length_predictor = self.length_predictor.to( |
| hidden_states.device).to(hidden_states.dtype) |
| length_logits = self.length_predictor( |
| hidden_states.to( |
| hidden_states.device)) |
| |
|
|
| nar_hidden_states = None |
| if inference is None: |
| |
| bs, length, hidden_size = hidden_states.size() |
| assert length == max_chunk_num |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
|
|
| nat_input_embeddings, nat_attention_mask = self.repeat_with_limit_and_pad( |
| hidden_states, length_ground_truth, self.chunk_size_limit, skip_val=-100) |
|
|
|
|
| if self.mlp: |
| nat_input_embeddings = self.linear_projection(nat_input_embeddings) |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
|
|
|
|
| |
|
|
|
|
|
|
| |
| mask_nat_attention_mask = self.nat_prepare_4d_full_attention_mask_without_causal( |
| attention_mask=nat_attention_mask, |
| dtype=nat_attention_mask.dtype, |
| device=nat_attention_mask.device) |
|
|
| |
|
|
| self.decoder = self.decoder.to(dtype=nat_input_embeddings.dtype) |
| if self.position_embedding_type == "relative": |
| nar_chunk_position = torch.arange( |
| 0, self.chunk_size_limit).unsqueeze(0).repeat( |
| accumu_num, |
| 1).to(hidden_states.device) |
| pos = self.rotary_emb(nat_attention_mask, nar_chunk_position) |
|
|
| elif self.position_embedding_type == "absolute": |
| nat_input_embeddings = self.pos_encoder(nat_input_embeddings) |
| pos = None |
|
|
| |
| nar_hidden_states = self.decoder( |
| nat_input_embeddings, |
| attention_mask=mask_nat_attention_mask, |
| |
| position_embeddings=pos, |
| output_attentions=output_attentions, |
| use_cache=use_cache, |
| cache_position=None, |
| ) |
| nar_hidden_states = self.norm( |
| nar_hidden_states) |
| |
|
|
| return ModelOutputWithPastForSemiNAT( |
| chunk_hidden_state=hidden_states, |
| length_ground_truth=length_ground_truth, |
| length_logits=length_logits, |
| position_embeddings=position_embeddings, |
| nar_hidden_state=nar_hidden_states, |
| past_key_values=next_cache, |
| hidden_states=all_hidden_states, |
| attentions=all_self_attns, |
| ) |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
|
|
| |
| |
| |
|
|
| |
| |
|
|
| |
| |
| |
|
|
| |
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
|
|
| |
|
|
|
|
| def repeat_with_limit_and_pad(self, x: torch.Tensor, repeat_counts: torch.Tensor, chunk_limit: int, skip_val: int = -100): |
| """ |
| 对 x 中的每个位置复制若干次(最多 chunk_limit 次),不足则 padding,跳过 repeat=-100 的项。 |
| |
| 参数: |
| - x: Tensor of shape (bs, length, hidden) |
| - repeat_counts: Tensor of shape (bs, length),每个位置的复制次数,-100 表示跳过 |
| - chunk_limit: int,每个位置最多复制的次数,不足则 padding |
| - skip_val: int,跳过标记值,默认 -100 |
| |
| 返回: |
| - Tensor of shape (chunk_num, chunk_limit, hidden) |
| """ |
| bs, length, hidden = x.shape |
| device = x.device |
|
|
|
|
| x = x[:,:-1,:] |
| repeat_counts = repeat_counts[:,1:] |
|
|
| |
| x_flat = x.reshape(-1, hidden) |
| repeat_flat = repeat_counts.reshape(-1) |
|
|
| valid_mask = repeat_flat != skip_val |
| x_valid = x_flat[valid_mask] |
| repeat_valid = repeat_flat[valid_mask].clamp_max(chunk_limit) |
|
|
| |
| |
| repeated = x_valid.unsqueeze(1).expand(-1, chunk_limit, -1) |
|
|
| |
| range_k = torch.arange(chunk_limit, device=device).unsqueeze(0) |
| mask = (range_k < repeat_valid.unsqueeze(1)).unsqueeze(-1) |
|
|
| |
| out = repeated * mask |
|
|
| mask = mask.squeeze(-1).to(x.dtype) |
| |
| return out, mask |
|
|
|
|
| |
| |
| |
|
|
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
|
|
| |
| |
|
|
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
|
|
| |
|
|
| def build_slice_matrix(self, input_ids, slice_pos: torch.Tensor): |
| bs, num_slices = slice_pos.shape |
| seq_len = input_ids.size(1) |
|
|
| |
| slice_pos_clipped = slice_pos.clone() |
| slice_pos_clipped[slice_pos_clipped == -1] = 0 |
|
|
| |
| prevs = torch.cat([ |
| torch.zeros((bs, 1), device=slice_pos.device, dtype=slice_pos.dtype), |
| slice_pos_clipped[:, :-1] + 1 |
| ], dim=1) |
| currents = slice_pos_clipped + 1 |
|
|
| |
| valid_mask = (slice_pos != -1) |
| lengths = currents - prevs |
| lengths[lengths <= 0] = -100 |
|
|
| |
| slice_num = (lengths != -100).sum(dim=1).tolist() |
|
|
| |
| chunk_mask = torch.zeros_like(lengths, dtype=torch.long) |
| for i in range(lengths.size(0)): |
| chunk_mask[i, :slice_num[i]] = 1 |
| values = torch.zeros_like(lengths, dtype=torch.float) |
| values[valid_mask] = 1.0 / lengths[valid_mask] |
|
|
| chunk_nums = valid_mask.sum(dim=1) |
| max_chunk_num = chunk_nums.max().item() |
|
|
| |
| M = torch.zeros((bs, max_chunk_num, seq_len), device=slice_pos.device) |
| |
| |
| |
|
|
| |
| attn_mask = torch.eye(seq_len, dtype=torch.bool, device=slice_pos.device) |
| attn_mask = attn_mask.unsqueeze(0).unsqueeze(0).expand(bs, 1, seq_len, seq_len) |
|
|
|
|
| |
| for b in range(bs): |
| a_b = prevs[b] |
| b_b = currents[b] |
| v_b = values[b] |
|
|
| for i in range(num_slices): |
| if not valid_mask[b, i]: |
| continue |
| a = a_b[i].item() |
| b_ = b_b[i].item() |
| if b_ > a: |
| |
| M[b, i, a:b_] = v_b[i] |
| |
| attn_mask[b, :, a:b_, a:b_] = True |
| |
| return M, attn_mask, lengths, chunk_mask, slice_num |
|
|
|
|
| def nat_prepare_4d_full_attention_mask_without_causal( |
| self, |
| attention_mask: torch.Tensor, |
| dtype: torch.dtype, |
| device: torch.device, |
| mask_val: float = -1e4, |
| ) -> torch.Tensor: |
| """ |
| - 对于 query 为有效 token (attention_mask==1) 的行: |
| 仅允许观看 key 也是有效 token 的列 -> 完全互看 |
| - 对于 query 为 padding 的行: |
| 采用 causal 下三角 (j <= i) -> 避免整行 -inf |
| 返回 shape = (bs, 1, L, L) 的 additive mask |
| """ |
| if attention_mask.dim() != 2: |
| raise ValueError( |
| "Expected 2-D attention_mask with shape (batch, seq_len)" |
| ) |
|
|
| bs, L = attention_mask.shape |
| attn_mask_f = attention_mask.to(device=device, dtype=torch.float32) |
|
|
| |
| |
| valid2valid = attn_mask_f[:, :, None] * attn_mask_f[:, None, :] |
|
|
| |
| |
| lower_tri = torch.tril(torch.ones(L, L, device=device)) |
| |
| query_is_pad = (1.0 - attn_mask_f)[:, :, None] |
| causal_part = query_is_pad * lower_tri |
|
|
| |
| visible = torch.clamp(valid2valid + causal_part, 0.0, 1.0) |
|
|
| |
| additive_mask = (1.0 - visible) * mask_val |
| additive_mask = additive_mask[:, None, :, :] |
|
|
| return additive_mask.to(dtype=dtype) |
|
|
|
|
|
|
| def compute_chunk_lengths(slice_pos: torch.Tensor, pad_value: int = -100): |
| """ |
| Args: |
| slice_pos: [B, L] 切分点,表示当前位置的 token 后面切一刀,-1 表示 padding |
| Returns: |
| length_gt: [B, max_chunk_num], 每个 chunk 的长度,不足部分填 pad_value |
| """ |
| B, L = slice_pos.shape |
| device = slice_pos.device |
|
|
| length_ground_truth = [] |
|
|
| for b in range(B): |
| pos = slice_pos[b] |
| pos = pos[pos != -1] + 1 |
| cuts = torch.cat([ |
| torch.tensor([0], device=device), |
| pos, |
| ]) |
| lens = cuts[1:] - cuts[:-1] |
|
|
| |
| padded = torch.full((L,), pad_value, device=device, dtype=torch.long) |
| padded[:lens.shape[0]] = lens |
| length_ground_truth.append(padded) |
|
|
| return torch.stack(length_ground_truth) |
|
|
|
|
|
|
|
|
| class Olmo2ForCausalLMForSemiNAT(Olmo2ForCausalLM): |
|
|
| def __init__(self, config, *args, **kwargs): |
| super().__init__(config, *args, **kwargs) |
| self.pos_encoder = AbsolutePositionalEncoding(config.hidden_size) |
| self.config = config |
| self.padding_idx = config.pad_token_id |
| self.vocab_size = config.vocab_size |
|
|
| self.chunk_size_limit = config.chunk_size_limit |
| self.model = Olmo2ModelForSemiNAT(config) |
| self.vocab_size = config.vocab_size |
| self.lm_head = nn.Linear(config.hidden_size, |
| config.vocab_size, |
| bias=False) |
|
|
| |
| self.post_init() |
|
|
| def forward( |
| self, |
| input_ids: torch.LongTensor = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| slice_pos: Optional[torch.Tensor] = None, |
| slice_label: Optional[torch.Tensor] = None, |
| past_key_values: Optional[List[torch.FloatTensor]] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| labels: Optional[torch.LongTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| cache_position: Optional[torch.LongTensor] = None, |
| logits_to_keep: Union[int, torch.Tensor] = 0, |
| is_prefill: Optional[bool] = False, |
| |
| **kwargs: Unpack[KwargsForCausalLM], |
| ) -> Union[Tuple, CausalLMOutputWithPast]: |
|
|
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = (output_hidden_states |
| if output_hidden_states is not None else |
| self.config.output_hidden_states) |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| |
|
|
| |
|
|
|
|
| if labels is not None: |
| outputs = self.model( |
| input_ids=input_ids, |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| slice_pos=slice_pos, |
| past_key_values=past_key_values, |
| inputs_embeds=inputs_embeds, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| cache_position=cache_position, |
| padding=self.padding_idx, |
| is_prefill=is_prefill, |
| **kwargs, |
| ) |
| else: |
| outputs = self.model( |
| input_ids=input_ids, |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| slice_pos=slice_pos, |
| past_key_values=past_key_values, |
| inputs_embeds=inputs_embeds, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| cache_position=cache_position, |
| padding=self.padding_idx, |
| inference=True, |
| is_prefill=is_prefill, |
| ) |
|
|
| |
|
|
| chunk_hidden_states = outputs.chunk_hidden_state |
| bs, length, hidden_size = chunk_hidden_states.size() |
|
|
|
|
| |
| loss = None |
| loss1 = None |
| loss2 = None |
| |
|
|
| if labels is not None: |
| length_ground_truth = outputs.length_ground_truth |
| length_logits = outputs.length_logits |
|
|
| new_length_ground_truth = torch.where( |
| length_ground_truth != -100, |
| length_ground_truth - 1, |
| length_ground_truth |
| ) |
|
|
| shift_length_logits = length_logits[:, :-1, :] |
| shift_new_length_ground_truth = new_length_ground_truth[:, 1:] |
|
|
| logits_flat = shift_length_logits.reshape(-1, self.chunk_size_limit) |
| labels_flat = shift_new_length_ground_truth.reshape(-1) |
|
|
| shift_slice_label = slice_label[:, 1:length_logits.size(1)] |
| slice_label_flat = shift_slice_label.reshape(-1) |
| mask = (slice_label_flat == -1) |
| labels_flat[mask] = -100 |
|
|
| length_loss_type = getattr(self.config, "length_loss_type", "ce") |
| if length_loss_type == "mse": |
| logits_softmax = torch.nn.functional.softmax(logits_flat, dim=-1) |
| predicted_lengths = torch.sum( |
| logits_softmax * torch.arange(self.chunk_size_limit).to( |
| chunk_hidden_states.device).to(chunk_hidden_states.dtype), |
| dim=1 |
| ) |
| loss1 = torch.mean((predicted_lengths[labels_flat != -100] - |
| labels_flat[labels_flat != -100].float()) ** 2) |
| elif length_loss_type == "ce": |
| loss1 = F.cross_entropy( |
| logits_flat[labels_flat != -100], |
| labels_flat[labels_flat != -100] |
| ) |
|
|
| |
|
|
| nar_hidden_state = outputs.nar_hidden_state |
|
|
| |
|
|
| nar_labels = torch.full( |
| (nar_hidden_state.size(0), nar_hidden_state.size(1)), |
| -100).to(nar_hidden_state.device) |
|
|
| nar_labels = self.update_nar_labels(nar_labels, labels, slice_pos, |
| length_ground_truth, input_ids, |
| self.chunk_size_limit) |
|
|
| |
| slice_indices = slice(-logits_to_keep, None) if isinstance( |
| logits_to_keep, int) else logits_to_keep |
| logits = self.lm_head( |
| nar_hidden_state[:, slice_indices, :]) |
| |
| |
| |
|
|
|
|
| loss2 = self.loss_function_seminat( |
| logits, |
| nar_labels, |
| self.vocab_size, |
| ) |
| |
| |
| |
| |
| |
| |
|
|
| else: |
| softmaxed = torch.softmax(outputs.length_logits[:, -1, :], dim=-1) |
| length = torch.argmax(softmaxed, dim=-1).item() + 1 |
| |
|
|
| |
| |
| |
| |
| nat_input_embeddings = torch.zeros( |
| 1, length, hidden_size).to(input_ids.device).to( |
| outputs.chunk_hidden_state.dtype) |
| |
| |
|
|
|
|
| |
|
|
| nat_input_embeddings[:, : length, :] = outputs.chunk_hidden_state[:, -1, :].expand( |
| length, -1).to(input_ids.device).to( |
| outputs.chunk_hidden_state.dtype) |
|
|
| |
| if self.config.mlp: |
| nat_input_embeddings = self.linear_projection(nat_input_embeddings) |
|
|
| |
| |
|
|
| |
| |
| |
| |
| |
|
|
| |
| nat_input_embeddings = self.pos_encoder(nat_input_embeddings) |
| |
| |
| nar_hidden_states = self.model.decoder( |
| nat_input_embeddings, |
| |
| attention_mask=None, |
| |
| position_embeddings=None, |
| output_attentions=output_attentions, |
| use_cache=False, |
| cache_position=None, |
| ) |
|
|
| nar_hidden_states = self.model.norm(nar_hidden_states) |
| |
| |
| logits = self.lm_head(nar_hidden_states[:, :, :]) |
|
|
|
|
| |
| return CausalLMOutputWithPast( |
| loss=(loss1, loss2), |
| logits=logits, |
| past_key_values=outputs.past_key_values, |
| hidden_states=outputs.hidden_states, |
| attentions=outputs.attentions, |
| ) |
|
|
| |
|
|
| |
| |
| |
| |
| |
| |
| return CausalLMOutputWithPast( |
| loss=(loss1, loss2), |
| logits=logits, |
| past_key_values=outputs.past_key_values, |
| hidden_states=outputs.hidden_states, |
| attentions=outputs.attentions, |
| ) |
|
|
|
|
|
|
|
|
|
|
|
|
| def update_nar_labels(self, nar_labels, labels, slice_pos, |
| length_ground_truth, input_ids, chunk_size_limit): |
| bs, length = input_ids.size() |
| chunk = 0 |
| for b in range(bs): |
| last_cut = slice_pos[b][0] |
| for i in range(1, length): |
| if slice_pos[b, i] != -1: |
| |
| try: |
| nar_labels[chunk, :length_ground_truth[b, i]] = labels[ |
| b, last_cut + 1:slice_pos[b, i] + 1] |
| except: |
| pdb.set_trace() |
| last_cut = slice_pos[b, i] |
| chunk += 1 |
| else: |
| break |
| |
| return nar_labels |
|
|
| def fixed_cross_entropy(self, |
| source, |
| target, |
| num_items_in_batch: int = None, |
| ignore_index: int = -100, |
| **kwargs): |
| reduction = "sum" if num_items_in_batch is not None else "mean" |
| loss = F.cross_entropy(source, |
| target, |
| ignore_index=ignore_index, |
| reduction=reduction) |
| if torch.isnan(loss): |
| |
| pdb.set_trace() |
| |
| if reduction == "sum": |
| loss = loss / num_items_in_batch |
| return loss |
|
|
| def loss_function_seminat(self, |
| logits, |
| labels, |
| vocab_size: int, |
| num_items_in_batch: int = None, |
| ignore_index: int = -100, |
| **kwargs): |
| |
| |
|
|
|
|
| logits = logits.float() |
| labels = labels.to(logits.device) |
|
|
| |
| logits = logits.view(-1, vocab_size) |
| labels = labels.view(-1) |
|
|
| |
| labels = labels.to(logits.device) |
|
|
| |
| loss = self.fixed_cross_entropy(logits, labels, num_items_in_batch, |
| ignore_index, **kwargs) |
| return loss |
|
|
| def generate( |
| self, |
| inputs: Optional[torch.Tensor] = None, |
| generation_config: Optional[GenerationConfig] = None, |
| logits_processor: Optional[LogitsProcessorList] = None, |
| stopping_criteria: Optional[StoppingCriteriaList] = None, |
| prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], |
| List[int]]] = None, |
| synced_gpus: Optional[bool] = None, |
| assistant_model: Optional["PreTrainedModel"] = None, |
| streamer: Optional["BaseStreamer"] = None, |
| negative_prompt_ids: Optional[torch.Tensor] = None, |
| negative_prompt_attention_mask: Optional[torch.Tensor] = None, |
| prefilling_length: int = 0, |
| **kwargs, |
| ) -> Union[GenerateOutput, torch.LongTensor]: |
|
|
| |
| self._validate_model_class() |
| tokenizer = kwargs.pop( |
| "tokenizer", |
| None) |
| assistant_tokenizer = kwargs.pop( |
| "assistant_tokenizer", None) |
|
|
| |
| generation_config, model_kwargs = self._prepare_generation_config( |
| generation_config, **kwargs) |
|
|
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| self._validate_model_kwargs(model_kwargs.copy()) |
| self._validate_assistant(assistant_model, tokenizer, |
| assistant_tokenizer) |
|
|
| |
| |
| if synced_gpus is None: |
| synced_gpus = ( |
| is_deepspeed_zero3_enabled() |
| or is_fsdp_managed_module(self)) and dist.get_world_size() > 1 |
|
|
| |
| logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList( |
| ) |
| stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList( |
| ) |
|
|
| accepts_attention_mask = "attention_mask" in set( |
| inspect.signature(self.forward).parameters.keys()) |
| requires_attention_mask = "encoder_outputs" not in model_kwargs |
| kwargs_has_attention_mask = model_kwargs.get("attention_mask", |
| None) is not None |
|
|
| |
|
|
| |
| inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs( |
| inputs, generation_config.bos_token_id, model_kwargs) |
| |
| |
| batch_size = inputs_tensor.shape[0] |
|
|
| |
|
|
| device = inputs_tensor.device |
| self._prepare_special_tokens(generation_config, |
| kwargs_has_attention_mask, |
| device=device) |
|
|
| |
| |
| if not self.config.is_encoder_decoder and not is_torchdynamo_compiling( |
| ): |
| |
| |
| if (generation_config._pad_token_tensor is not None |
| and batch_size > 1 and len(inputs_tensor.shape) == 2 |
| and torch.sum(inputs_tensor[:, -1] == |
| generation_config._pad_token_tensor) > 0): |
| logger.warning( |
| "A decoder-only architecture is being used, but right-padding was detected! For correct " |
| "generation results, please set `padding_side='left'` when initializing the tokenizer." |
| ) |
| |
|
|
|
|
| |
| |
| |
| if not self.config.is_encoder_decoder and model_input_name == "inputs_embeds": |
| generation_config.use_cache = True |
| |
|
|
| |
| if not kwargs_has_attention_mask and requires_attention_mask and accepts_attention_mask: |
| model_kwargs[ |
| "attention_mask"] = self._prepare_attention_mask_for_generation( |
| inputs_tensor, generation_config, model_kwargs) |
|
|
| |
| elif kwargs_has_attention_mask: |
| |
| if model_input_name == "input_ids" and len( |
| model_kwargs["attention_mask"].shape) > 2: |
| raise ValueError( |
| "`attention_mask` passed to `generate` must be 2D.") |
|
|
| |
| if self.config.is_encoder_decoder and "encoder_outputs" not in model_kwargs: |
| |
| model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation( |
| inputs_tensor, model_kwargs, model_input_name, |
| generation_config) |
|
|
| |
|
|
| |
| |
| if self.config.is_encoder_decoder: |
| input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation( |
| batch_size=batch_size, |
| model_input_name=model_input_name, |
| model_kwargs=model_kwargs, |
| decoder_start_token_id=generation_config. |
| _decoder_start_token_tensor, |
| device=inputs_tensor.device, |
| ) |
| else: |
| input_ids = inputs_tensor if model_input_name == "input_ids" else model_kwargs.pop( |
| "input_ids") |
|
|
| |
| if generation_config.token_healing: |
| input_ids = self.heal_tokens(input_ids, tokenizer) |
|
|
| |
| if streamer is not None: |
| streamer.put(input_ids.cpu()) |
|
|
| |
|
|
| |
| input_ids_length = input_ids.shape[-1] |
| has_default_max_length = kwargs.get( |
| "max_length") is None and generation_config.max_length is not None |
| has_default_min_length = kwargs.get( |
| "min_length") is None and generation_config.min_length is not None |
| |
|
|
| |
| generation_config = self._prepare_generated_length( |
| generation_config=generation_config, |
| has_default_max_length=has_default_max_length, |
| has_default_min_length=has_default_min_length, |
| model_input_name=model_input_name, |
| inputs_tensor=inputs_tensor, |
| input_ids_length=input_ids_length, |
| ) |
|
|
| |
| |
| |
| if self._supports_logits_to_keep( |
| ) and "logits_to_keep" not in model_kwargs: |
| model_kwargs["logits_to_keep"] = 1 |
| |
|
|
| |
| self._validate_generated_length(generation_config, input_ids_length, |
| has_default_max_length) |
|
|
|
|
| |
| |
| |
| |
| |
| max_cache_length = generation_config.max_length - 1 |
|
|
| |
| if (inputs_tensor.shape[1] != input_ids_length |
| and model_input_name == "inputs_embeds" |
| and not self.config.is_encoder_decoder): |
| max_cache_length += inputs_tensor.shape[1] |
| self._prepare_cache_for_generation(generation_config, model_kwargs, |
| assistant_model, batch_size, |
| max_cache_length, device) |
|
|
| |
| generation_mode = generation_config.get_generation_mode( |
| assistant_model) |
|
|
| if streamer is not None and (generation_config.num_beams > 1): |
| raise ValueError( |
| "`streamer` cannot be used with beam search (yet!). Make sure that `num_beams` is set to 1." |
| ) |
|
|
| |
| if not is_torchdynamo_compiling( |
| ) and self.device.type != input_ids.device.type: |
| warnings.warn( |
| "You are calling .generate() with the `input_ids` being on a device type different" |
| f" than your model's device. `input_ids` is on {input_ids.device.type}, whereas the model" |
| f" is on {self.device.type}. You may experience unexpected behaviors or slower generation." |
| " Please make sure that you have put `input_ids` to the" |
| f" correct device by calling for example input_ids = input_ids.to('{self.device.type}') before" |
| " running `.generate()`.", |
| UserWarning, |
| ) |
|
|
| |
|
|
| |
| prepared_logits_processor = self._get_logits_processor( |
| generation_config=generation_config, |
| input_ids_seq_length=input_ids_length, |
| encoder_input_ids=inputs_tensor, |
| prefix_allowed_tokens_fn=prefix_allowed_tokens_fn, |
| logits_processor=logits_processor, |
| device=inputs_tensor.device, |
| model_kwargs=model_kwargs, |
| negative_prompt_ids=negative_prompt_ids, |
| negative_prompt_attention_mask=negative_prompt_attention_mask, |
| ) |
| prepared_stopping_criteria = self._get_stopping_criteria_for_seminat( |
| generation_config=generation_config, |
| stopping_criteria=stopping_criteria, |
| tokenizer=tokenizer, |
| **kwargs) |
|
|
| |
| model_kwargs["use_cache"] = generation_config.use_cache |
|
|
| input_ids, model_kwargs = self._expand_inputs_for_generation( |
| input_ids=input_ids, |
| expand_size=generation_config.num_return_sequences, |
| is_encoder_decoder=self.config.is_encoder_decoder, |
| **model_kwargs, |
| ) |
|
|
|
|
| |
| result = self._sampleforseminat( |
| input_ids, |
| logits_processor=prepared_logits_processor, |
| stopping_criteria=prepared_stopping_criteria, |
| generation_config=generation_config, |
| synced_gpus=synced_gpus, |
| streamer=streamer, |
| prefilling_length=prefilling_length, |
| **model_kwargs, |
| ) |
|
|
| |
| if (generation_config.return_legacy_cache is True |
| and not is_torchdynamo_compiling() |
| and hasattr(result, "past_key_values") and getattr( |
| result.past_key_values, "to_legacy_cache") is not None): |
| result.past_key_values = result.past_key_values.to_legacy_cache() |
| return result |
|
|
| def _get_stopping_criteria_for_seminat( |
| self, |
| generation_config: GenerationConfig, |
| stopping_criteria: Optional[StoppingCriteriaList], |
| tokenizer: Optional["PreTrainedTokenizerBase"] = None, |
| **kwargs, |
| ) -> StoppingCriteriaList: |
| criteria = StoppingCriteriaList() |
| if generation_config.max_length is not None: |
| max_position_embeddings = getattr(self.config, "max_position_embeddings", None) |
| criteria.append( |
| MaxLengthCriteria( |
| max_length=generation_config.max_length, |
| max_position_embeddings=max_position_embeddings, |
| ) |
| ) |
| if generation_config.max_time is not None: |
| criteria.append(MaxTimeCriteria(max_time=generation_config.max_time)) |
| if generation_config.stop_strings is not None: |
| if tokenizer is None: |
| raise ValueError( |
| "There are one or more stop strings, either in the arguments to `generate` or in the " |
| "model's generation config, but we could not locate a tokenizer. When generating with " |
| "stop strings, you must pass the model's tokenizer to the `tokenizer` argument of `generate`." |
| ) |
| criteria.append(StopStringCriteria(stop_strings=generation_config.stop_strings, tokenizer=tokenizer)) |
| if generation_config._eos_token_tensor is not None: |
| criteria.append(EosTokenCriteriaForSemiNAT(eos_token_id=generation_config._eos_token_tensor)) |
| if ( |
| generation_config.is_assistant |
| and generation_config.assistant_confidence_threshold is not None |
| and generation_config.assistant_confidence_threshold > 0 |
| ): |
| criteria.append( |
| ConfidenceCriteria(assistant_confidence_threshold=generation_config.assistant_confidence_threshold) |
| ) |
| criteria = self._merge_criteria_processor_list(criteria, stopping_criteria) |
| return criteria |
|
|
|
|
| def _sampleforseminat( |
| self, |
| input_ids: torch.LongTensor, |
| logits_processor: LogitsProcessorList, |
| stopping_criteria: StoppingCriteriaList, |
| generation_config: GenerationConfig, |
| synced_gpus: bool, |
| streamer: Optional["BaseStreamer"], |
| prefilling_length: int, |
| **model_kwargs, |
| ) -> Union[GenerateNonBeamOutput, torch.LongTensor]: |
|
|
| |
| pad_token_id = generation_config._pad_token_tensor |
| output_attentions = generation_config.output_attentions |
| output_hidden_states = generation_config.output_hidden_states |
| output_scores = generation_config.output_scores |
| output_logits = generation_config.output_logits |
| return_dict_in_generate = generation_config.return_dict_in_generate |
| max_length = generation_config.max_length |
| has_eos_stopping_criteria = any( |
| hasattr(criteria, "eos_token_id") |
| for criteria in stopping_criteria) |
| do_sample = generation_config.do_sample |
|
|
| |
| |
| scores = () if (return_dict_in_generate and output_scores) else None |
| raw_logits = () if (return_dict_in_generate |
| and output_logits) else None |
| decoder_attentions = () if (return_dict_in_generate |
| and output_attentions) else None |
| cross_attentions = () if (return_dict_in_generate |
| and output_attentions) else None |
| decoder_hidden_states = () if (return_dict_in_generate |
| and output_hidden_states) else None |
|
|
| |
| |
| if return_dict_in_generate and self.config.is_encoder_decoder: |
| encoder_attentions = model_kwargs["encoder_outputs"].get( |
| "attentions") if output_attentions else None |
| encoder_hidden_states = ( |
| model_kwargs["encoder_outputs"].get("hidden_states") |
| if output_hidden_states else None) |
|
|
| |
|
|
| |
| |
| batch_size, cur_len = input_ids.shape |
| this_peer_finished = False |
| unfinished_sequences = torch.ones( |
| batch_size, dtype=torch.long, |
| device=input_ids.device) |
| model_kwargs = self._get_initial_cache_position( |
| input_ids, model_kwargs) |
|
|
| model_forward = self.__call__ |
| |
| |
| |
| if isinstance(model_kwargs.get("past_key_values"), Cache): |
| is_compileable = model_kwargs[ |
| "past_key_values"].is_compileable and self._supports_static_cache |
| is_compileable = is_compileable and not self.generation_config.disable_compile |
| if is_compileable and ( |
| self.device.type == "cuda" |
| or generation_config.compile_config._compile_all_devices): |
| os.environ["TOKENIZERS_PARALLELISM"] = "0" |
| model_forward = self.get_compiled_call( |
| generation_config.compile_config) |
|
|
| |
| start = prefilling_length-1 |
| chunk_length = prefilling_length |
|
|
| s_pos = [start] |
| while True: |
| start += chunk_length |
| if start >= input_ids.shape[1] - 1: |
| s_pos.append(input_ids.shape[1] - 1) |
| break |
| else: |
| s_pos.append(start) |
|
|
| |
| slice_pos = torch.tensor(s_pos).unsqueeze(0).to( |
| input_ids.device) |
| |
| |
| |
|
|
| model_kwargs['slice_pos'] = slice_pos |
| count = (slice_pos != -1).sum().item() |
| |
| |
| |
|
|
| |
| |
|
|
| is_prefill = True |
| while self._has_unfinished_sequences( |
| this_peer_finished, |
| synced_gpus, |
| device=input_ids.device, |
| cur_len=cur_len, |
| max_length=max_length): |
| |
|
|
| |
|
|
| |
| model_inputs = self.prepare_inputs_for_generation( |
| input_ids, **model_kwargs |
| ) |
| |
|
|
| |
| |
| |
|
|
| model_inputs.update({"input_ids": input_ids}) |
|
|
| |
| model_inputs.update({"output_attentions": output_attentions} |
| if output_attentions else {}) |
| model_inputs.update({"output_hidden_states": output_hidden_states} |
| if output_hidden_states else {}) |
|
|
| if is_prefill: |
| |
| |
| |
| outputs = self.forward(**model_inputs, return_dict=True, is_prefill=True) |
| is_prefill = False |
| else: |
| |
| outputs = model_forward(**model_inputs, return_dict=True, is_prefill=False) |
|
|
| |
|
|
| |
| |
| |
|
|
| |
| model_kwargs = self._update_model_kwargs_for_generation_for_seminat( |
| outputs, |
| model_kwargs, |
| is_encoder_decoder=self.config.is_encoder_decoder, |
| num_new_tokens=outputs.logits.size(1)) |
| if synced_gpus and this_peer_finished: |
| continue |
|
|
| |
| |
|
|
| |
| next_token_logits = outputs.logits[:, :, :].clone().float( |
| ) |
|
|
| next_token_logits = next_token_logits.to(input_ids.device) |
|
|
| |
| next_token_scores = logits_processor(input_ids, next_token_logits) |
|
|
| |
| if do_sample: |
| probs = nn.functional.softmax(next_token_scores, dim=-1) |
| |
| next_tokens = torch.multinomial(probs, |
| num_samples=1).squeeze(1) |
| else: |
| next_tokens = torch.argmax( |
| next_token_scores, |
| dim=-1) |
|
|
| |
| |
| count = (model_kwargs['slice_pos'] != -1).sum().item() |
| new_slice_pos = model_kwargs['slice_pos'][:, count - 1] + outputs.logits.size(1) |
| model_kwargs['slice_pos'] = torch.cat([model_kwargs['slice_pos'], new_slice_pos.unsqueeze(1)], dim=-1) |
|
|
| |
|
|
| |
| if has_eos_stopping_criteria: |
| next_tokens = next_tokens * unfinished_sequences + pad_token_id * ( |
| 1 - unfinished_sequences |
| ) |
|
|
| |
| |
| |
| input_ids = torch.cat([input_ids, next_tokens], dim=-1) |
| if streamer is not None: |
| streamer.put(next_tokens.cpu()) |
|
|
| |
| unfinished_sequences = unfinished_sequences & ~stopping_criteria( |
| input_ids, scores, last_k=next_tokens.size(1)) |
| this_peer_finished = unfinished_sequences.max() == 0 |
| cur_len += outputs.logits.size(1) |
|
|
| |
|
|
| |
| |
| del outputs |
|
|
| if streamer is not None: |
| streamer.end() |
|
|
| if return_dict_in_generate: |
| if self.config.is_encoder_decoder: |
| return GenerateEncoderDecoderOutput( |
| sequences=input_ids, |
| scores=scores, |
| logits=raw_logits, |
| encoder_attentions=encoder_attentions, |
| encoder_hidden_states=encoder_hidden_states, |
| decoder_attentions=decoder_attentions, |
| cross_attentions=cross_attentions, |
| decoder_hidden_states=decoder_hidden_states, |
| past_key_values=model_kwargs.get("past_key_values"), |
| ) |
| else: |
| return GenerateDecoderOnlyOutput( |
| sequences=input_ids, |
| scores=scores, |
| logits=raw_logits, |
| attentions=decoder_attentions, |
| hidden_states=decoder_hidden_states, |
| past_key_values=model_kwargs.get("past_key_values"), |
| ) |
| else: |
| return input_ids |
|
|
| def _update_model_kwargs_for_generation_for_seminat( |
| self, |
| outputs: ModelOutput, |
| model_kwargs: Dict[str, Any], |
| is_encoder_decoder: bool = False, |
| num_new_tokens: int = 1, |
| ) -> Dict[str, Any]: |
| ALL_CACHE_NAMES = [ |
| "past_key_values", |
| "cache_params", |
| "state", |
| "mems", |
| "past_buckets_states", |
| ] |
| |
| for possible_cache_name in ALL_CACHE_NAMES: |
| if possible_cache_name in outputs: |
| |
| if possible_cache_name in ("past_buckets_states", "mems"): |
| cache_name = "past_key_values" |
| else: |
| cache_name = possible_cache_name |
| model_kwargs[cache_name] = getattr(outputs, |
| possible_cache_name) |
| break |
|
|
| |
|
|
| |
| |
| if "token_type_ids" in model_kwargs: |
| token_type_ids = model_kwargs["token_type_ids"] |
| model_kwargs["token_type_ids"] = torch.cat( |
| [token_type_ids, token_type_ids[:, -1].unsqueeze(-1)], dim=-1) |
|
|
| if not is_encoder_decoder: |
| |
| |
| |
| if "attention_mask" in model_kwargs: |
| attention_mask = model_kwargs["attention_mask"] |
| model_kwargs["attention_mask"] = torch.cat( |
| [ |
| attention_mask, |
| attention_mask.new_ones( |
| (attention_mask.shape[0], num_new_tokens |
| )) |
| ], |
| dim=-1) |
| else: |
| |
| if "decoder_attention_mask" in model_kwargs: |
| decoder_attention_mask = model_kwargs["decoder_attention_mask"] |
| model_kwargs["decoder_attention_mask"] = torch.cat( |
| [ |
| decoder_attention_mask, |
| decoder_attention_mask.new_ones( |
| (decoder_attention_mask.shape[0], 1)) |
| ], |
| dim=-1, |
| ) |
|
|
| |
| if model_kwargs.get("use_cache", True): |
| model_kwargs["cache_position"] = torch.arange(model_kwargs["cache_position"][-1:].item() + 1, model_kwargs["cache_position"][-1:].item() + num_new_tokens + 1, dtype=model_kwargs["cache_position"].dtype).to(model_kwargs["cache_position"].device) |
| |
| |
| |
| else: |
| past_positions = model_kwargs.pop("cache_position") |
| new_positions = torch.arange( |
| past_positions[-1] + 1, |
| past_positions[-1] + num_new_tokens + 1, |
| dtype=past_positions.dtype).to(past_positions.device) |
| model_kwargs["cache_position"] = torch.cat( |
| (past_positions, new_positions)) |
| return model_kwargs |
|
|
| class AbsolutePositionalEncoding(nn.Module): |
| def __init__(self, hidden_size: int, max_len: int = 2048): |
| """ |
| 初始化绝对位置编码 |
| |
| 参数: |
| hidden_size (int): 隐藏层维度 |
| max_len (int): 最大序列长度 |
| """ |
| super().__init__() |
| |
| |
| pe = torch.zeros(max_len, hidden_size) |
| position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) |
| div_term = torch.exp(torch.arange(0, hidden_size, 2).float() * (-math.log(10000.0) / hidden_size)) |
| |
| |
| pe[:, 0::2] = torch.sin(position * div_term) |
| pe[:, 1::2] = torch.cos(position * div_term) |
| pe = pe.unsqueeze(0) |
| |
| |
| self.register_buffer('pe', pe) |
| |
| def forward(self, x: torch.Tensor) -> torch.Tensor: |
| """ |
| 添加位置编码到输入张量 |
| |
| 参数: |
| x (torch.Tensor): 输入张量,形状为 (batch_size, seq_len, hidden_size) |
| |
| 返回: |
| torch.Tensor: 添加位置编码后的张量,形状与输入相同 |
| """ |
| seq_len = x.size(1) |
|
|
|
|
| pos = x + self.pe[:, :seq_len] |
| |
| |
| return pos |
|
|