sft-v5 / modelforseminat_v5.py
ykzhang721's picture
Upload modelforseminat_v5.py with huggingface_hub
fcc4ce0 verified
from transformers import Olmo2Model, Olmo2ForCausalLM, AutoTokenizer, logging
from transformers.models.auto.modeling_auto import MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES
from transformers.modeling_outputs import (
CausalLMOutputWithPast,
BaseModelOutputWithPast,
)
import numpy as np
import math
from torch import nn
import pandas as pd
from transformers.cache_utils import Cache, DynamicCache, StaticCache
from dataclasses import dataclass
# Olmo2
from transformers.models.olmo2.modeling_olmo2 import Olmo2RotaryEmbedding, Olmo2Attention, Olmo2MLP, Olmo2RMSNorm, apply_rotary_pos_emb, eager_attention_forward, Olmo2DecoderLayer
from transformers.models.olmo2.configuration_olmo2 import Olmo2Config
from transformers.processing_utils import Unpack
from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
from transformers.utils import LossKwargs
from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS
from torch.nn.functional import cosine_similarity
import pdb
from dataset import *
import torch
import torch.nn.functional as F
import functools
import torch.distributed as dist
from torch.distributed.fsdp import (
FullyShardedDataParallel as FSDP,
MixedPrecision,
BackwardPrefetch,
ShardingStrategy,
FullStateDictConfig,
StateDictType,
)
from torch.distributed.fsdp.wrap import (
transformer_auto_wrap_policy,
enable_wrap,
wrap,
)
from functools import partial
from torch.utils.data import DataLoader
from pathlib import Path
from typing import Type, List, Optional, Tuple, Union, Callable, Dict, Any
############ specially for generate() #################
import inspect
from transformers.generation.configuration_utils import (
NEED_SETUP_CACHE_CLASSES_MAPPING,
QUANT_BACKEND_CLASSES_MAPPING,
GenerationConfig,
GenerationMode,
)
from transformers.generation.logits_process import LogitsProcessorList
from transformers.generation.stopping_criteria import StoppingCriteriaList
from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled
from transformers.integrations.fsdp import is_fsdp_managed_module
from transformers.generation.utils import (
is_torchdynamo_compiling, ModelOutput, GenerateDecoderOnlyOutput,
GenerateEncoderDecoderOutput, GenerateBeamDecoderOnlyOutput,
GenerateBeamEncoderDecoderOutput, GreedySearchDecoderOnlyOutput,
ContrastiveSearchDecoderOnlyOutput, SampleDecoderOnlyOutput,
ContrastiveSearchEncoderDecoderOutput, GreedySearchEncoderDecoderOutput,
SampleEncoderDecoderOutput, BeamSearchDecoderOnlyOutput,
BeamSampleDecoderOnlyOutput, BeamSearchEncoderDecoderOutput,
BeamSampleEncoderDecoderOutput, GreedySearchOutput, SampleOutput,
BeamSearchOutput, BeamSampleOutput, ContrastiveSearchOutput,
GenerateNonBeamOutput, GenerateBeamOutput, GenerateOutput)
from transformers.generation.stopping_criteria import (
ConfidenceCriteria,
EosTokenCriteria,
MaxLengthCriteria,
MaxTimeCriteria,
StoppingCriteria,
StoppingCriteriaList,
StopStringCriteria,
)
from transformers.generation.stopping_criteria import STOPPING_CRITERIA_INPUTS_DOCSTRING
from transformers.pytorch_utils import isin_mps_friendly
from transformers.utils import add_start_docstrings
class EosTokenCriteriaForSemiNAT(StoppingCriteria):
"""
This class can be used to stop generation whenever the "end-of-sequence" token is generated.
By default, it uses the `model.generation_config.eos_token_id`.
Args:
eos_token_id (`Union[int, List[int], torch.Tensor]`):
The id(s) of the *end-of-sequence* token.
"""
def __init__(self, eos_token_id: Union[int, List[int], torch.Tensor]):
if not isinstance(eos_token_id, torch.Tensor):
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
eos_token_id = torch.tensor(eos_token_id)
self.eos_token_id = eos_token_id
@add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, last_k: int, **kwargs) -> torch.BoolTensor:
# pdb.set_trace()
# if torch.any(input_ids == 100257):
# pdb.set_trace()
self.eos_token_id = self.eos_token_id.to(input_ids.device)
token_is_eos = isin_mps_friendly(input_ids[:, -last_k:], self.eos_token_id)
is_done = torch.any(token_is_eos, dim=1)
return is_done
############ specially for generate() #################
class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ...
@dataclass
class ModelOutputWithPastForSemiNAT(BaseModelOutputWithPast):
chunk_hidden_state: torch.FloatTensor = None
length_ground_truth: Optional[torch.FloatTensor] = None
length_logits: Optional[torch.FloatTensor] = None
position_embeddings: Optional[torch.FloatTensor] = None # ?
nar_hidden_state: torch.FloatTensor = None # ?
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
class TwoLayerMLP(nn.Module):
def __init__(self, hidden_size: int, dropout_rate: float = 0.1):
"""
初始化两层MLP,支持任意批处理维度
参数:
hidden_size (int): 隐藏层维度
dropout_rate (float): dropout比率,默认0.1
"""
super().__init__()
self.fc1 = nn.Linear(hidden_size, 4 * hidden_size) # 第一层将维度扩大4倍
self.fc2 = nn.Linear(4 * hidden_size, hidden_size) # 第二层将维度恢复
self.dropout = nn.Dropout(p=dropout_rate)
self.activation = nn.GELU() # 使用GELU激活函数
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
前向传播,支持任意批处理维度
参数:
x (torch.Tensor): 输入张量,形状为 (..., hidden_size),支持任意前置维度
返回:
torch.Tensor: 输出张量,形状与输入相同
"""
# 获取原始形状
original_shape = x.shape
hidden_size = original_shape[-1]
# 将输入重塑为2D: (batch_size, hidden_size),其中batch_size包含了所有前置维度
x_2d = x.view(-1, hidden_size)
# pdb.set_trace()
# 第一层:线性变换 -> 激活函数 -> dropout
x_2d = self.fc1(x_2d)
x_2d = self.activation(x_2d)
x_2d = self.dropout(x_2d)
# 第二层:线性变换
x_2d = self.fc2(x_2d)
# pdb.set_trace()
# 恢复原始形状
x = x_2d.view(*original_shape)
# pdb.set_trace()
return x
class Olmo2ConfigForSemiNAT(Olmo2Config):
def __init__(self, chunk_size_limit: int = 5, decoder_layers: int = 1, encoder_layer: int = 1, mlp: bool = False, position_embedding_type: str = "absolute",attn_implementation: str = "sdpa", length_loss_type: str = "ce", **kwargs):
super().__init__(**kwargs)
self.chunk_size_limit = chunk_size_limit
self.decoder_layers = decoder_layers
self.encoder_layer = encoder_layer
self.mlp = mlp
self.position_embedding_type = position_embedding_type
self._attn_implementation = attn_implementation
self.length_loss_type = length_loss_type
# pdb.set_trace()
class Olmo2AttentionForSemiNAT(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Olmo2ConfigForSemiNAT, layer_idx: Optional[int] = None, is_causal: bool = True):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(
config, "head_dim",
config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = is_causal
self.q_proj = nn.Linear(config.hidden_size,
config.num_attention_heads * self.head_dim,
bias=config.attention_bias)
self.k_proj = nn.Linear(config.hidden_size,
config.num_key_value_heads * self.head_dim,
bias=config.attention_bias)
self.v_proj = nn.Linear(config.hidden_size,
config.num_key_value_heads * self.head_dim,
bias=config.attention_bias)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim,
config.hidden_size,
bias=config.attention_bias)
self.q_norm = Olmo2RMSNorm(config.num_attention_heads * self.head_dim,
config.rms_norm_eps)
self.k_norm = Olmo2RMSNorm(config.num_key_value_heads * self.head_dim,
config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_value: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> Tuple[torch.Tensor, Optional[torch.Tensor],
Optional[Tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_norm(self.q_proj(hidden_states))
key_states = self.k_norm(self.k_proj(hidden_states))
value_states = self.v_proj(hidden_states)
query_states = query_states.view(hidden_shape).transpose(1, 2)
key_states = key_states.view(hidden_shape).transpose(1, 2)
value_states = value_states.view(hidden_shape).transpose(1, 2)
if position_embeddings is not None:
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(
query_states, key_states, cos, sin)
if past_key_value is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {
"sin": sin,
"cos": cos,
"cache_position": cache_position
}
key_states, value_states = past_key_value.update(
key_states, value_states, self.layer_idx, cache_kwargs)
# attention_interface: Callable = eager_attention_forward
# pdb.set_trace()
# if self.config._attn_implementation != "eager":
# if self.config._attn_implementation == "sdpa" and kwargs.get(
# "output_attentions", False):
# logger.warning_once(
# "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
# 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
# )
# else:
# attention_interface = ALL_ATTENTION_FUNCTIONS[
# self.config._attn_implementation]
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS["sdpa"] #针对encoder和decoder的新设定
# pdb.set_trace()
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
is_causal=self.is_causal,
**kwargs,
)
# pdb.set_trace()
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class Olmo2DecoderLayerForSemiNAT(nn.Module):
def __init__(
self,
config: Olmo2ConfigForSemiNAT,
layer_idx: int,
is_causal: bool = True,
):
super().__init__()
self.hidden_size = config.hidden_size
# pdb.set_trace()
self.self_attn = Olmo2AttentionForSemiNAT(config=config,
layer_idx=layer_idx,
is_causal=is_causal)
self.mlp = Olmo2MLP(config)
self.post_attention_layernorm = Olmo2RMSNorm(config.hidden_size,
eps=config.rms_norm_eps)
self.post_feedforward_layernorm = Olmo2RMSNorm(config.hidden_size,
eps=config.rms_norm_eps)
# pdb.set_trace()
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[Tuple[torch.Tensor,
torch.Tensor]] = None,
**kwargs,
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor,
torch.FloatTensor]]]:
residual = hidden_states
# pdb.set_trace()
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
# pdb.set_trace()
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.mlp(hidden_states)
hidden_states = self.post_feedforward_layernorm(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states, )
if output_attentions:
outputs += (self_attn_weights, )
return outputs
class NATEncoderForSemiNAT(nn.Module):
def __init__(self, config: Olmo2ConfigForSemiNAT, num_layer: int = 1):
super().__init__()
self.num_layer = num_layer
self.encoder_layers = nn.ModuleList([
Olmo2DecoderLayerForSemiNAT(config, layer_idx) #check下需不需要is causal false,但attn_mask优先级高于is_causal
for layer_idx in range(self.num_layer)
])
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
past_key_value: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[Tuple[torch.Tensor,
torch.Tensor]] = None,
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor,
torch.FloatTensor]]]:
# pdb.set_trace()
for layer in self.encoder_layers:
outputs = layer(hidden_states=hidden_states,
output_attentions=output_attentions,
position_embeddings=position_embeddings,
attention_mask=attention_mask)
hidden_states = outputs[0]
# pdb.set_trace()
# only the last layer attn_weights and present_key_value are stored
# mean pool the hidden states across sequence (chunk)
# hidden_states = torch.mean(hidden_states, dim=1)
return hidden_states
class NATDecoderForSemiNAT(nn.Module):
def __init__(self, config: Olmo2ConfigForSemiNAT, num_layer: int = 1):
super().__init__()
self.num_layer = num_layer
self.decoder_layers = nn.ModuleList([
Olmo2DecoderLayerForSemiNAT(config, layer_idx, False)
for layer_idx in range(self.num_layer)
])
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
past_key_value: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[Tuple[torch.Tensor,
torch.Tensor]] = None,
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor,
torch.FloatTensor]]]:
for layer in self.decoder_layers:
# pdb.set_trace()
outputs = layer(hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
position_embeddings=position_embeddings)
hidden_states = outputs[0]
return hidden_states
class Olmo2ModelForSemiNAT(Olmo2Model):
def __init__(self, config):
super().__init__(config)
self.layers = nn.ModuleList([
Olmo2DecoderLayer(config, layer_idx)
for layer_idx in range(config.num_hidden_layers)
])
self.decoder = NATDecoderForSemiNAT(config, config.decoder_layers)
self.encoder = NATEncoderForSemiNAT(config, config.encoder_layer)
# pdb.set_trace()
self.chunk_size_limit = config.chunk_size_limit
self.norm = Olmo2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = Olmo2RotaryEmbedding(config=config)
self.pos_encoder = AbsolutePositionalEncoding(config.hidden_size)
self.gradient_checkpointing = False
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size,
self.padding_idx)
self.length_predictor = nn.Linear(config.hidden_size,
self.chunk_size_limit)
self.mlp = config.mlp
if self.mlp:
self.linear_projection = TwoLayerMLP(config.hidden_size)
# pdb.set_trace()
self.position_embedding_type = config.position_embedding_type
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
slice_pos: torch.Tensor = None,
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
inference: Optional[bool] = None,
padding: Optional[torch.Tensor] = None,
is_prefill: Optional[bool] = False,
**flash_attn_kwargs: Unpack[FlashAttentionKwargs],
) -> Union[Tuple, CausalLMOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (output_hidden_states
if output_hidden_states is not None else
self.config.output_hidden_states)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError(
"You must specify exactly one of input_ids or inputs_embeds")
if self.gradient_checkpointing and self.training and use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
)
use_cache = False
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
# pdb.set_trace()
if use_cache and past_key_values is None:
past_key_values = DynamicCache()
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length(
) if past_key_values is not None else 0
cache_position = torch.arange(past_seen_tokens,
past_seen_tokens +
inputs_embeds.shape[1],
device=inputs_embeds.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
if inference is not None:
position_ids = cache_position.unsqueeze(0)
position_embeddings = self.rotary_emb(inputs_embeds, position_ids)
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
next_decoder_cache = None
max_chunk_num = (slice_pos != -1).sum(dim=1).max()
# pdb.set_trace()
################################ 并行处理 #################################
# pdb.set_trace()
# 这里attention_mask没有用,因为encoder没有attention层
length_ground_truth = None
if not inference or is_prefill:
M_avg, attn_mask, length_ground_truth, chunk_attention_mask, slice_num = self.build_slice_matrix(input_ids,slice_pos) # torch.Size([1, 111, 512])
encoded_input = self.encoder(inputs_embeds,position_embeddings=position_embeddings,attention_mask=attn_mask) # torch.Size([1, 512, 2048])
# pdb.set_trace()
M_avg = M_avg.contiguous()
encoded_input = encoded_input.contiguous()
M_avg = M_avg.to(torch.bfloat16)
encoded_input = encoded_input.to(torch.bfloat16)
chunk_inputs_embeds = torch.matmul(M_avg, encoded_input)
accumu_num = sum(slice_num)-encoded_input.shape[0]
chunk_inputs_embeds = chunk_inputs_embeds[:, :max_chunk_num, :]
chunk_attention_mask = chunk_attention_mask[:, :max_chunk_num]
length_ground_truth = length_ground_truth[:,:max_chunk_num]
chunk_position_ids = position_ids[:,:max_chunk_num]
chunk_cache_position = cache_position[:max_chunk_num]
# pdb.set_trace()
else:
encoded_input = self.encoder(inputs_embeds[:,position_ids.squeeze(0)],position_embeddings=position_embeddings)
chunk_inputs_embeds = torch.mean(encoded_input, dim=1).unsqueeze(0)
# pdb.set_trace()
chunk_cache_position = torch.searchsorted(slice_pos.squeeze(0), cache_position - 1, right=True)[-1].unsqueeze(0) #prefill之后,更新下chunk的cache_position
chunk_attention_mask = torch.ones(1,cache_position[0])
chunk_position_ids = chunk_cache_position.unsqueeze(0)
################################ 并行处理 #################################
################################ 串行处理 #################################
# initialize chunk inputs as embedding of [pad]
# pad_token_id = padding
# batch_size, seq_len, hidden_size = inputs_embeds.shape
# pad_embedding = self.embed_tokens(
# torch.tensor([pad_token_id]).to(inputs_embeds.device)) # 1, 2048
# # pdb.set_trace()
# chunk_inputs_embeds = pad_embedding.expand(
# batch_size, max_chunk_num, hidden_size).clone().to(
# inputs_embeds.device)
# length_ground_truth = []
# chunk_attention_mask = []
# chunk_labels = []
# # max_chunk_num = 0
# accumu_num = 0
# slice_nums = []
# for b in range(batch_size):
# slice_num = 0
# start_position = 0
# slice_length = []
# for i in range(seq_len):
# cut = slice_pos[b, i].item() # 获取切分点
# if cut == -1: # 如果切分点为 -1,表示不切分
# pass
# else:
# cut += 1 # +1表示在后面切一刀
# chunk_inputs_embeds[b, i] = self.encoder(
# inputs_embeds[b, start_position:cut].unsqueeze(0),
# position_embeddings=tuple(
# tensor[0, 0:cut -
# start_position, :].unsqueeze(0)
# for tensor in position_embeddings))
# slice_num += 1
# slice_length.append(cut - start_position)
# if cut - start_position > 10 or cut - start_position < 0:
# pdb.set_trace()
# start_position = cut # 更新切分起点
# slice_nums.append(slice_num) # 每个样本的 chunk 数量
# # max_chunk_num = max(max_chunk_num, slice_num) # 不用这个,直接用累计的chunk num
# accumu_num += slice_num
# chunk_attention_mask.append(
# torch.tensor([1] * slice_num + [0] *
# (seq_len - slice_num)).unsqueeze(
# 0)) # 1表示切分,0表示不切分
# length_ground_truth.append(
# torch.tensor(slice_length + [-100] *
# (seq_len - slice_num)).unsqueeze(0)) # -100表示不切分
# accumu_num -= batch_size
# # pdb.set_trace()
# chunk_attention_mask = torch.cat(chunk_attention_mask, dim=0).to(
# inputs_embeds.device) # torch.Size([1, 256]) bs * length
# length_ground_truth = torch.cat(length_ground_truth,
# dim=0).to(inputs_embeds.device)
################################ 串行处理 #################################
# 取最长chunk长度裁剪,加速计算
# pdb.set_trace()
chunk_position_embeddings = self.rotary_emb(
chunk_inputs_embeds, chunk_position_ids
) # tuple, 第一个元素为 torch.Size([1, 256, 128]),最后一个维度是 hidden_size / head , cos 和 sin 各 64 维
hidden_states = chunk_inputs_embeds # bs * max_chunk_num * hidden_size
# pdb.set_trace()
# inference待check
# if inference is not None:
# mask_bool = chunk_attention_mask.bool()
# chunk_inputs_embeds = chunk_inputs_embeds[mask_bool.unsqueeze(
# -1).expand_as(chunk_inputs_embeds)].view(
# chunk_inputs_embeds.size(0), -1,
# chunk_inputs_embeds.size(2))
# chunk_attention_mask = chunk_attention_mask[mask_bool].view(
# chunk_attention_mask.size(0), -1)
# pdb.set_trace()
# chunk_inputs_embeds = chunk_inputs_embeds[:,
# chunk_cache_position, :]
# chunk_attention_mask = chunk_attention_mask[:,
# chunk_cache_position]
# hidden_states = chunk_inputs_embeds
causal_mask = self._update_causal_mask(chunk_attention_mask,
chunk_inputs_embeds,
chunk_cache_position,
past_key_values,
output_attentions)
# pdb.set_trace()
for decoder_layer in self.layers:
if output_hidden_states:
all_hidden_states += (hidden_states, )
if self.gradient_checkpointing and self.training:
layer_outputs = self._gradient_checkpointing_func(
decoder_layer.__call__,
hidden_states,
causal_mask,
position_ids,
past_key_values,
output_attentions,
use_cache,
chunk_cache_position,
chunk_position_embeddings,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_value=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=chunk_cache_position,
position_embeddings=chunk_position_embeddings,
**flash_attn_kwargs,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1], )
# pdb.set_trace()
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states, )
hidden_states = self.norm(
hidden_states) # bs * max_chunk_num * hidden_size 所有chunk的hidden
next_cache = next_decoder_cache if use_cache else None # DynamicCache()
# if return_legacy_cache:
# next_cache = next_cache.to_legacy_cache()
# 算长度预测loss
self.length_predictor = self.length_predictor.to(
hidden_states.device).to(hidden_states.dtype) #这里强行变成了bf16,因为训练是这个
length_logits = self.length_predictor(
hidden_states.to(
hidden_states.device)) # bs * length * chunk_size_limit
# pdb.set_trace()
nar_hidden_states = None
if inference is None:
# NAR decoder
bs, length, hidden_size = hidden_states.size()
assert length == max_chunk_num
# shape: (bs * max_chunk_num) * chunk_size_limit * hidden_size
# try:
# nat_input_embeddings = torch.zeros(
# accumu_num, self.chunk_size_limit,
# hidden_size).to(hidden_states.device).to(hidden_states.dtype)
# except:
# pdb.set_trace()
# nat_attention_mask = torch.zeros(
# accumu_num, self.chunk_size_limit).to(hidden_states.device).to(
# hidden_states.dtype)
# tot_chunk_num = 0
nat_input_embeddings, nat_attention_mask = self.repeat_with_limit_and_pad(
hidden_states, length_ground_truth, self.chunk_size_limit, skip_val=-100)
if self.mlp:
nat_input_embeddings = self.linear_projection(nat_input_embeddings)
# pdb.set_trace()
# for b in range(bs):
# for i in range(slice_num[b]):
# # slice_nums[b] 是每个样本的 chunk 数量
# # length_ground_truth[b] 是每个样本的真实长度
# # copy length_ground_truth 份的 hidden_states 到 nat_input_embeddings
# if length_ground_truth[b, i + 1] != -100:
# # pdb.set_trace()
# nat_input_embeddings[
# tot_chunk_num, :length_ground_truth[
# b, i +
# 1], :] = hidden_states[b, i:i + 1, :].expand(
# length_ground_truth[b, i + 1], hidden_size)
# nat_attention_mask[tot_chunk_num, :length_ground_truth[
# b, i + 1]] = torch.tensor(
# [1] * length_ground_truth[b, i + 1])
# tot_chunk_num += 1
# # pdb.set_trace()
# else:
# break
# pdb.set_trace()
# 处理attention
mask_nat_attention_mask = self.nat_prepare_4d_full_attention_mask_without_causal(
attention_mask=nat_attention_mask,
dtype=nat_attention_mask.dtype,
device=nat_attention_mask.device)
# pdb.set_trace()
self.decoder = self.decoder.to(dtype=nat_input_embeddings.dtype)
if self.position_embedding_type == "relative":
nar_chunk_position = torch.arange(
0, self.chunk_size_limit).unsqueeze(0).repeat(
accumu_num,
1).to(hidden_states.device)
pos = self.rotary_emb(nat_attention_mask, nar_chunk_position)
elif self.position_embedding_type == "absolute":
nat_input_embeddings = self.pos_encoder(nat_input_embeddings) # 加上绝对位置编码
pos = None
# pdb.set_trace()
nar_hidden_states = self.decoder(
nat_input_embeddings,
attention_mask=mask_nat_attention_mask, #改下padding mask
# attention_mask=None,
position_embeddings=pos,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=None,
)
nar_hidden_states = self.norm(
nar_hidden_states)
# pdb.set_trace()
return ModelOutputWithPastForSemiNAT(
chunk_hidden_state=hidden_states,
length_ground_truth=length_ground_truth,
length_logits=length_logits,
position_embeddings=position_embeddings,
nar_hidden_state=nar_hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)
# @staticmethod
# def nat_prepare_4d_full_attention_mask_without_causal(
# self,
# attention_mask: torch.Tensor,
# dtype: torch.dtype,
# device: torch.device,
# ) -> torch.Tensor:
# """
# 构造一个非 causal 的 full attention mask,仅遮挡 padding token。
# Args:
# attention_mask (torch.Tensor): (batch_size, seq_len) 中 1 表示有效 token,0 表示 padding。
# dtype (torch.dtype): 生成的 mask 的数据类型(通常为 torch.float32/bfloat16)。
# device (torch.device): mask 所在设备。
# Returns:
# torch.Tensor: shape = (batch_size, 1, seq_len, seq_len),非 padding token 两两可见,padding 被遮挡。
# """
# if attention_mask.dim() != 2:
# raise ValueError("Expected 2D attention_mask of shape (batch_size, seq_len)")
# batch_size, seq_len = attention_mask.shape
# attention_mask = attention_mask.to(dtype=torch.float32) # 强制 float32 再做广播逻辑
# attention_mask = attention_mask.to(device)
# # outer product: only keep positions where both query and key are valid (1 * 1 = 1)
# visible_mask = attention_mask[:, None, :, None] * attention_mask[:, None, None, :] # (bs, 1, seq_len, seq_len)
# # 转为 additive mask:0 -> 0.0, 1 -> -inf(被遮住的位置是 -inf)
# min_dtype = torch.finfo(dtype).min
# full_attention_mask = (1.0 - visible_mask) * min_dtype # 有效区域是 0.0,其他是 -inf
# return full_attention_mask.to(dtype=dtype)
# def nat_prepare_4d_full_attention_mask_no_masking(
# self,
# attention_mask: torch.Tensor, # (bs, L),此处不会被使用
# dtype: torch.dtype, # torch.float32/bfloat16
# device: torch.device,
# mask_val: float = -1e4, # 不会被使用
# ) -> torch.Tensor:
# """
# 构造完全互看的 attention mask,包括 padding token。
# - 所有 query 可以看所有 key;
# - additive mask 全为 0(无任何遮挡);
# 返回 shape = (bs, 1, L, L)
# """
# if attention_mask.dim() != 2:
# raise ValueError(
# "Expected 2-D attention_mask with shape (batch, seq_len)")
# bs, L = attention_mask.shape
# additive_mask = torch.zeros((bs, 1, L, L), dtype=dtype,
# device=device) # 全 0,代表全可见
# return additive_mask
def repeat_with_limit_and_pad(self, x: torch.Tensor, repeat_counts: torch.Tensor, chunk_limit: int, skip_val: int = -100):
"""
对 x 中的每个位置复制若干次(最多 chunk_limit 次),不足则 padding,跳过 repeat=-100 的项。
参数:
- x: Tensor of shape (bs, length, hidden)
- repeat_counts: Tensor of shape (bs, length),每个位置的复制次数,-100 表示跳过
- chunk_limit: int,每个位置最多复制的次数,不足则 padding
- skip_val: int,跳过标记值,默认 -100
返回:
- Tensor of shape (chunk_num, chunk_limit, hidden)
"""
bs, length, hidden = x.shape
device = x.device
x = x[:,:-1,:]
repeat_counts = repeat_counts[:,1:]
# Step 1: 展平 & 过滤有效位置
x_flat = x.reshape(-1, hidden) # (bs * length, hidden)
repeat_flat = repeat_counts.reshape(-1) # (bs * length,)
valid_mask = repeat_flat != skip_val
x_valid = x_flat[valid_mask] # (chunk_num, hidden)
repeat_valid = repeat_flat[valid_mask].clamp_max(chunk_limit) # (chunk_num,)
# Step 2: 扩展向量
# chunk_num = x_valid.size(0)
repeated = x_valid.unsqueeze(1).expand(-1, chunk_limit, -1) # (chunk_num, chunk_limit, hidden)
# Step 3: 构造 mask,并乘以 mask 进行 padding
range_k = torch.arange(chunk_limit, device=device).unsqueeze(0) # (1, chunk_limit)
mask = (range_k < repeat_valid.unsqueeze(1)).unsqueeze(-1) # (chunk_num, chunk_limit, 1)
# Step 4: 应用 mask,padding
out = repeated * mask # masked 填 0
mask = mask.squeeze(-1).to(x.dtype)
# pdb.set_trace()
return out, mask # shape: (chunk_num, chunk_limit, hidden)
# def build_slice_matrix(self, slice_pos: torch.Tensor) -> torch.Tensor:
# bs, num_slices = slice_pos.shape
# seq_len = num_slices
# # 替换 -1 为 0 用于 prev 计算
# slice_pos_clipped = slice_pos.clone()
# slice_pos_clipped[slice_pos_clipped == -1] = 0
# # prevs (a) 和 currents (b)
# prevs = torch.cat([
# torch.zeros((bs,1), device=slice_pos.device, dtype=slice_pos.dtype),
# slice_pos_clipped[:, :-1] + 1
# ], dim=1)
# currents = slice_pos_clipped + 1
# # valid mask
# valid_mask = (slice_pos != -1)
# lengths = currents - prevs # (bs, num_slices)
# lengths[lengths <= 0] = -100 # 将0元素替换为-100
# # 统计每行非-100元素个数
# slice_num = (lengths != -100).sum(dim=1).tolist() # 每行非-100元素个数
# # 生成chunk_mask
# chunk_mask = torch.zeros_like(lengths, dtype=torch.long)
# for i in range(lengths.size(0)):
# chunk_mask[i, :slice_num[i]] = 1 # 前slice_num[i]个元素置1
# values = torch.zeros_like(lengths, dtype=torch.float)
# values[valid_mask] = 1.0 / lengths[valid_mask]
# chunk_nums = valid_mask.sum(dim=1) # (bs,)
# max_chunk_num = chunk_nums.max().item()
# # 初始化输出
# M = torch.zeros((bs, max_chunk_num, seq_len), device=slice_pos.device)
# # 遍历 batch 填充
# for b in range(bs):
# a_b = prevs[b] # (num_slices,)
# b_b = currents[b] # (num_slices,)
# v_b = values[b] # (num_slices,)
# for i in range(num_slices):
# if not valid_mask[b, i]:
# continue
# a = a_b[i].item()
# b_ = b_b[i].item()
# if b_ > a:
# M[b, i, a:b_] = v_b[i]
# return M, lengths, chunk_mask, slice_num
def build_slice_matrix(self, input_ids, slice_pos: torch.Tensor):
bs, num_slices = slice_pos.shape
seq_len = input_ids.size(1)
# 替换 -1 为 0 用于 prev 计算
slice_pos_clipped = slice_pos.clone()
slice_pos_clipped[slice_pos_clipped == -1] = 0
# prevs (a) 和 currents (b)
prevs = torch.cat([
torch.zeros((bs, 1), device=slice_pos.device, dtype=slice_pos.dtype),
slice_pos_clipped[:, :-1] + 1
], dim=1)
currents = slice_pos_clipped + 1
# valid mask
valid_mask = (slice_pos != -1)
lengths = currents - prevs # (bs, num_slices)
lengths[lengths <= 0] = -100 # invalid values
# 每行非 -100 元素个数
slice_num = (lengths != -100).sum(dim=1).tolist()
# chunk mask
chunk_mask = torch.zeros_like(lengths, dtype=torch.long)
for i in range(lengths.size(0)):
chunk_mask[i, :slice_num[i]] = 1
values = torch.zeros_like(lengths, dtype=torch.float)
values[valid_mask] = 1.0 / lengths[valid_mask]
chunk_nums = valid_mask.sum(dim=1)
max_chunk_num = chunk_nums.max().item()
# 初始化输出矩阵 M
M = torch.zeros((bs, max_chunk_num, seq_len), device=slice_pos.device)
# 初始化 attention mask (bs, seq_len, seq_len),默认全部 mask 掉(True)
# attn_mask = torch.zeros((bs, 1, seq_len, seq_len), dtype=torch.bool, device=slice_pos.device)
# 让padding看见自己
attn_mask = torch.eye(seq_len, dtype=torch.bool, device=slice_pos.device) # shape [seq_len, seq_len]
attn_mask = attn_mask.unsqueeze(0).unsqueeze(0).expand(bs, 1, seq_len, seq_len) # [bs, 1, seq_len, seq_len]
# 遍历填充 M 和 attention mask
for b in range(bs):
a_b = prevs[b]
b_b = currents[b]
v_b = values[b]
for i in range(num_slices):
if not valid_mask[b, i]:
continue
a = a_b[i].item()
b_ = b_b[i].item()
if b_ > a:
# 填充 chunk average matrix
M[b, i, a:b_] = v_b[i]
# 更新 attention mask,chunk 内不 mask(False)
attn_mask[b, :, a:b_, a:b_] = True
# pdb.set_trace()
return M, attn_mask, lengths, chunk_mask, slice_num
def nat_prepare_4d_full_attention_mask_without_causal(
self,
attention_mask: torch.Tensor, # (bs, L) 1=real, 0=pad
dtype: torch.dtype, # torch.float32/bfloat16
device: torch.device,
mask_val: float = -1e4, # additive mask的遮挡值
) -> torch.Tensor:
"""
- 对于 query 为有效 token (attention_mask==1) 的行:
仅允许观看 key 也是有效 token 的列 -> 完全互看
- 对于 query 为 padding 的行:
采用 causal 下三角 (j <= i) -> 避免整行 -inf
返回 shape = (bs, 1, L, L) 的 additive mask
"""
if attention_mask.dim() != 2:
raise ValueError(
"Expected 2-D attention_mask with shape (batch, seq_len)"
)
bs, L = attention_mask.shape
attn_mask_f = attention_mask.to(device=device, dtype=torch.float32) # 方便广播
# ---------- ① 有效 token 间的互看 ----------
# valid2valid[b,i,j] = 1 ⇔ query_i 与 key_j 均为 real
valid2valid = attn_mask_f[:, :, None] * attn_mask_f[:, None, :] # (bs, L, L)
# ---------- ② padding 行的因果下三角 ----------
# lower_tri[i,j] = 1 ⇔ j ≤ i
lower_tri = torch.tril(torch.ones(L, L, device=device))
# query_is_pad: (bs, L, 1) 1=pad
query_is_pad = (1.0 - attn_mask_f)[:, :, None]
causal_part = query_is_pad * lower_tri # (bs, L, L)
# ---------- ③ 合并两部分 ----------
visible = torch.clamp(valid2valid + causal_part, 0.0, 1.0) # (bs, L, L)
# ---------- ④ 变 additive mask ----------
additive_mask = (1.0 - visible) * mask_val # 0->0, 1->mask_val
additive_mask = additive_mask[:, None, :, :] # (bs,1,L,L)
return additive_mask.to(dtype=dtype)
def compute_chunk_lengths(slice_pos: torch.Tensor, pad_value: int = -100):
"""
Args:
slice_pos: [B, L] 切分点,表示当前位置的 token 后面切一刀,-1 表示 padding
Returns:
length_gt: [B, max_chunk_num], 每个 chunk 的长度,不足部分填 pad_value
"""
B, L = slice_pos.shape
device = slice_pos.device
length_ground_truth = []
for b in range(B):
pos = slice_pos[b]
pos = pos[pos != -1] + 1 # 获取有效切分点并 +1(实际切在后面)
cuts = torch.cat([
torch.tensor([0], device=device), # 起始点
pos,
])
lens = cuts[1:] - cuts[:-1] # 计算每段长度
# 补齐到 max_chunk_num(L)
padded = torch.full((L,), pad_value, device=device, dtype=torch.long)
padded[:lens.shape[0]] = lens
length_ground_truth.append(padded)
return torch.stack(length_ground_truth) # [B, L]
class Olmo2ForCausalLMForSemiNAT(Olmo2ForCausalLM):
def __init__(self, config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
self.pos_encoder = AbsolutePositionalEncoding(config.hidden_size)
self.config = config
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.chunk_size_limit = config.chunk_size_limit
self.model = Olmo2ModelForSemiNAT(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size,
config.vocab_size,
bias=False)
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
slice_pos: Optional[torch.Tensor] = None,
slice_label: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
is_prefill: Optional[bool] = False,
# padding: Optional[torch.Tensor] = None,
**kwargs: Unpack[KwargsForCausalLM],
) -> Union[Tuple, CausalLMOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (output_hidden_states
if output_hidden_states is not None else
self.config.output_hidden_states)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# pdb.set_trace()
# start = time.time()
if labels is not None:
outputs = self.model(
input_ids=input_ids, # bs * length
attention_mask=attention_mask, # bs * length
position_ids=position_ids,
slice_pos=slice_pos,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
padding=self.padding_idx,
is_prefill=is_prefill,
**kwargs,
)
else:
outputs = self.model(
input_ids=input_ids, # bs * length
attention_mask=attention_mask, # bs * length
position_ids=position_ids,
slice_pos=slice_pos,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
padding=self.padding_idx,
inference=True,
is_prefill=is_prefill,
)
# pdb.set_trace()
chunk_hidden_states = outputs.chunk_hidden_state
bs, length, hidden_size = chunk_hidden_states.size()
############################# loss 计算,分两部分 #############################
loss = None
loss1 = None
loss2 = None
############################# 首先, 接上mlp,预测长度的loss,维度是10#############################
if labels is not None:
length_ground_truth = outputs.length_ground_truth
length_logits = outputs.length_logits
new_length_ground_truth = torch.where(
length_ground_truth != -100,
length_ground_truth - 1,
length_ground_truth
)
shift_length_logits = length_logits[:, :-1, :]
shift_new_length_ground_truth = new_length_ground_truth[:, 1:]
logits_flat = shift_length_logits.reshape(-1, self.chunk_size_limit)
labels_flat = shift_new_length_ground_truth.reshape(-1)
shift_slice_label = slice_label[:, 1:length_logits.size(1)]
slice_label_flat = shift_slice_label.reshape(-1)
mask = (slice_label_flat == -1)
labels_flat[mask] = -100
length_loss_type = getattr(self.config, "length_loss_type", "ce")
if length_loss_type == "mse":
logits_softmax = torch.nn.functional.softmax(logits_flat, dim=-1)
predicted_lengths = torch.sum(
logits_softmax * torch.arange(self.chunk_size_limit).to(
chunk_hidden_states.device).to(chunk_hidden_states.dtype),
dim=1
)
loss1 = torch.mean((predicted_lengths[labels_flat != -100] -
labels_flat[labels_flat != -100].float()) ** 2)
elif length_loss_type == "ce": # cross entropy
loss1 = F.cross_entropy(
logits_flat[labels_flat != -100],
labels_flat[labels_flat != -100]
)
# pdb.set_trace()
nar_hidden_state = outputs.nar_hidden_state
############################# 其次,用chunk的hidden recover所有token,跟gt计算loss #############################
nar_labels = torch.full(
(nar_hidden_state.size(0), nar_hidden_state.size(1)),
-100).to(nar_hidden_state.device) # bs * length
nar_labels = self.update_nar_labels(nar_labels, labels, slice_pos,
length_ground_truth, input_ids,
self.chunk_size_limit)
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(
logits_to_keep, int) else logits_to_keep
logits = self.lm_head(
nar_hidden_state[:, slice_indices, :]) # 1* seq_len * 50304
# logits = logits.float()
# pdb.set_trace()
# if labels is not None:
loss2 = self.loss_function_seminat(
logits,
nar_labels,
self.vocab_size,
)
# grad1 = torch.autograd.grad(loss1, outputs.chunk_hidden_state, retain_graph=True)[0]
# grad2 = torch.autograd.grad(loss2, outputs.chunk_hidden_state, retain_graph=True)[0]
# cos_sim = cosine_similarity(grad1.flatten(), grad2.flatten(), dim=0)
# pdb.set_trace()
else: # for inference
softmaxed = torch.softmax(outputs.length_logits[:, -1, :], dim=-1)
length = torch.argmax(softmaxed, dim=-1).item() + 1
# nat_input_embeddings = torch.zeros(
# 1, self.chunk_size_limit, hidden_size).to(input_ids.device).to(
# outputs.chunk_hidden_state.dtype)
nat_input_embeddings = torch.zeros(
1, length, hidden_size).to(input_ids.device).to(
outputs.chunk_hidden_state.dtype)
# nat_attention_mask = torch.zeros(1, self.chunk_size_limit).to(
# input_ids.device).to(outputs.chunk_hidden_state.dtype)
# pdb.set_trace()
nat_input_embeddings[:, : length, :] = outputs.chunk_hidden_state[:, -1, :].expand(
length, -1).to(input_ids.device).to(
outputs.chunk_hidden_state.dtype)
# pdb.set_trace()
if self.config.mlp:
nat_input_embeddings = self.linear_projection(nat_input_embeddings)
# nat_attention_mask[:, :length] = torch.tensor([1] * length).to(
# input_ids.device).to(outputs.chunk_hidden_state.dtype)
# nar_chunk_position = torch.arange(
# 0, self.chunk_size_limit).unsqueeze(0).to(input_ids.device).to(
# outputs.chunk_hidden_state.dtype) # bs * max_chunk_num
# nar_position_embeddings = self.pos_encoder(nat_attention_mask,
# nar_chunk_position)
# pdb.set_trace()
nat_input_embeddings = self.pos_encoder(nat_input_embeddings) # 加上绝对位置编码
# pdb.set_trace()
nar_hidden_states = self.model.decoder(
nat_input_embeddings,
# attention_mask=nat_attention_mask,
attention_mask=None,
# position_embeddings=nar_position_embeddings,
position_embeddings=None,
output_attentions=output_attentions,
use_cache=False,
cache_position=None,
)
nar_hidden_states = self.model.norm(nar_hidden_states)
# slice_indices = slice(-logits_to_keep, None) if isinstance(
# logits_to_keep, int) else logits_to_keep
logits = self.lm_head(nar_hidden_states[:, :, :])
# pdb.set_trace()
return CausalLMOutputWithPast(
loss=(loss1, loss2),
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
############################# loss 计算,分两部分 #############################
# if not return_dict:
# output = (logits, ) + outputs[1:]
# if output_router_logits:
# output = (aux_loss, ) + output
# return (loss, ) + output if loss is not None else output
# pdb.set_trace()
return CausalLMOutputWithPast(
loss=(loss1, loss2),
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def update_nar_labels(self, nar_labels, labels, slice_pos,
length_ground_truth, input_ids, chunk_size_limit):
bs, length = input_ids.size()
chunk = 0
for b in range(bs):
last_cut = slice_pos[b][0] #第一次切分位置
for i in range(1, length):
if slice_pos[b, i] != -1:
# pdb.set_trace()
try:
nar_labels[chunk, :length_ground_truth[b, i]] = labels[
b, last_cut + 1:slice_pos[b, i] + 1]
except:
pdb.set_trace()
last_cut = slice_pos[b, i]
chunk += 1
else:
break
# pdb.set_trace()
return nar_labels
def fixed_cross_entropy(self,
source,
target,
num_items_in_batch: int = None,
ignore_index: int = -100,
**kwargs):
reduction = "sum" if num_items_in_batch is not None else "mean"
loss = F.cross_entropy(source,
target,
ignore_index=ignore_index,
reduction=reduction)
if torch.isnan(loss):
# print(f"Step {global_step}: loss is NaN, entering pdb …")
pdb.set_trace()
# pdb.set_trace()
if reduction == "sum":
loss = loss / num_items_in_batch
return loss
def loss_function_seminat(self,
logits,
labels,
vocab_size: int,
num_items_in_batch: int = None,
ignore_index: int = -100,
**kwargs):
# logits: (B, L, V)
# labels: (B, L)
logits = logits.float()
labels = labels.to(logits.device)
# Flatten the tokens (无 shift)
logits = logits.view(-1, vocab_size) # (B*L, V)
labels = labels.view(-1) # (B*L)
# Ensure device alignment
labels = labels.to(logits.device)
# Compute loss
loss = self.fixed_cross_entropy(logits, labels, num_items_in_batch,
ignore_index, **kwargs)
return loss
def generate(
self,
inputs: Optional[torch.Tensor] = None,
generation_config: Optional[GenerationConfig] = None,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor],
List[int]]] = None,
synced_gpus: Optional[bool] = None,
assistant_model: Optional["PreTrainedModel"] = None,
streamer: Optional["BaseStreamer"] = None,
negative_prompt_ids: Optional[torch.Tensor] = None,
negative_prompt_attention_mask: Optional[torch.Tensor] = None,
prefilling_length: int = 0,
**kwargs,
) -> Union[GenerateOutput, torch.LongTensor]:
# 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call
self._validate_model_class() #能进行generate的模型
tokenizer = kwargs.pop(
"tokenizer",
None) # Pull this out first, we only use it for stopping criteria
assistant_tokenizer = kwargs.pop(
"assistant_tokenizer", None) # only used for assisted generation
# pdb.set_trace()
generation_config, model_kwargs = self._prepare_generation_config(
generation_config, **kwargs)
# GenerationConfig {
# "eos_token_id": 50279,
# "max_length": 2048,
# "pad_token_id": 1
# }
# model_kwargs: {input_ids: , attention_mask:}
# pdb.set_trace()
self._validate_model_kwargs(model_kwargs.copy()) # 检查模型参数是否正确
self._validate_assistant(assistant_model, tokenizer,
assistant_tokenizer)
# 2. Set generation parameters if not already defined
# 判断是否在多GPU环境下同步生成(如DeepSpeed ZeRO-3或FSDP)
if synced_gpus is None:
synced_gpus = (
is_deepspeed_zero3_enabled()
or is_fsdp_managed_module(self)) and dist.get_world_size() > 1
# 初始化logits处理器和停止条件
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList(
) # 定义对模型输出logits的修改规则(如禁止重复词、强制特定token等)。
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList(
) # 定义生成停止条件(如达到最大长度、检测到终止符等)。
accepts_attention_mask = "attention_mask" in set(
inspect.signature(self.forward).parameters.keys()) # True
requires_attention_mask = "encoder_outputs" not in model_kwargs # True
kwargs_has_attention_mask = model_kwargs.get("attention_mask",
None) is not None # True
# pdb.set_trace()
# 3. Define model inputs
inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(
inputs, generation_config.bos_token_id, model_kwargs)
batch_size = inputs_tensor.shape[0]
# inputs_tensor bs * input_length; model_input_name:"input_ids";model_kwargs: attention_mask
device = inputs_tensor.device
self._prepare_special_tokens(generation_config,
kwargs_has_attention_mask,
device=device)
# decoder-only models must use left-padding for batched generation.
# batch generation用的
if not self.config.is_encoder_decoder and not is_torchdynamo_compiling(
):
# If `input_ids` was given, check if the last id in any sequence is `pad_token_id`
# Note: If using, `inputs_embeds` this check does not work, because we want to be more hands-off.
if (generation_config._pad_token_tensor is not None
and batch_size > 1 and len(inputs_tensor.shape) == 2
and torch.sum(inputs_tensor[:, -1] ==
generation_config._pad_token_tensor) > 0):
logger.warning(
"A decoder-only architecture is being used, but right-padding was detected! For correct "
"generation results, please set `padding_side='left'` when initializing the tokenizer."
)
# pdb.set_trace()
# 4. Define other model kwargs
# decoder-only models with inputs_embeds forwarding must use caching (otherwise we can't detect whether we are
# generating the first new token or not, and we only want to use the embeddings for the first new token)
if not self.config.is_encoder_decoder and model_input_name == "inputs_embeds":
generation_config.use_cache = True
# 生成第一个新token时需要依赖缓存判断是否处于生成阶段,后续token生成依赖缓存加速。
# 生成attention mask
if not kwargs_has_attention_mask and requires_attention_mask and accepts_attention_mask:
model_kwargs[
"attention_mask"] = self._prepare_attention_mask_for_generation(
inputs_tensor, generation_config, model_kwargs)
# 输入了attention,检查一下对不对
elif kwargs_has_attention_mask:
# TODO (joao): generalize this check with other types of inputs
if model_input_name == "input_ids" and len(
model_kwargs["attention_mask"].shape) > 2:
raise ValueError(
"`attention_mask` passed to `generate` must be 2D.")
# encoder-decoder model设定
if self.config.is_encoder_decoder and "encoder_outputs" not in model_kwargs:
# if model is encoder decoder encoder_outputs are created and added to `model_kwargs`
model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(
inputs_tensor, model_kwargs, model_input_name,
generation_config)
# pdb.set_trace()
# 5. Prepare `input_ids` which will be used for auto-regressive generation
# encoder-decoder model
if self.config.is_encoder_decoder:
input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation(
batch_size=batch_size,
model_input_name=model_input_name,
model_kwargs=model_kwargs,
decoder_start_token_id=generation_config.
_decoder_start_token_tensor,
device=inputs_tensor.device,
)
else:
input_ids = inputs_tensor if model_input_name == "input_ids" else model_kwargs.pop(
"input_ids") # torch.Size([1, 25]) # torch.Size([1, 25])
# 修复不完整的token
if generation_config.token_healing:
input_ids = self.heal_tokens(input_ids, tokenizer)
# 流式输出
if streamer is not None:
streamer.put(input_ids.cpu())
# pdb.set_trace()
# 6. Prepare `max_length` depending on other stopping criteria.
input_ids_length = input_ids.shape[-1]
has_default_max_length = kwargs.get(
"max_length") is None and generation_config.max_length is not None
has_default_min_length = kwargs.get(
"min_length") is None and generation_config.min_length is not None
# min_length是0
# 生成的一些config
generation_config = self._prepare_generated_length(
generation_config=generation_config,
has_default_max_length=has_default_max_length,
has_default_min_length=has_default_min_length,
model_input_name=model_input_name, # "input_ids"
inputs_tensor=inputs_tensor,
input_ids_length=input_ids_length, #输入长度
)
# If the model supports `logits_to_keep` in forward(), set it to 1 to avoid computing the whole
# logit matrix. This can save a lot of memory during the first forward pass. Note that assisted decoding
# dynamically overrides this value as it can need more than the last token logits
if self._supports_logits_to_keep(
) and "logits_to_keep" not in model_kwargs:
model_kwargs["logits_to_keep"] = 1
# 模型在计算时仅保留最后一个 token 的 logits,而非整个词汇表的 logits,从而大幅降低内存占用。若使用束搜索宽度为 5,辅助解码会覆盖 logits_to_keep=5,保留多个候选 token 的 logits 以支持多路径探索。
# 检查生成长度
self._validate_generated_length(generation_config, input_ids_length,
has_default_max_length)
# pdb.set_trace()
# 7. Prepare the cache.
# - `model_kwargs` may be updated in place with a cache as defined by the parameters in `generation_config`.
# - different models have a different cache name expected by the model (default = "past_key_values")
# - `max_length`, prepared above, is used to determine the maximum cache length
max_cache_length = generation_config.max_length - 1 #存最长length-1个token cache
# 如果输入是emb
if (inputs_tensor.shape[1] != input_ids_length
and model_input_name == "inputs_embeds"
and not self.config.is_encoder_decoder):
max_cache_length += inputs_tensor.shape[1]
self._prepare_cache_for_generation(generation_config, model_kwargs,
assistant_model, batch_size,
max_cache_length, device)
# 8. determine generation mode
generation_mode = generation_config.get_generation_mode(
assistant_model) # 辅助解码
if streamer is not None and (generation_config.num_beams > 1):
raise ValueError(
"`streamer` cannot be used with beam search (yet!). Make sure that `num_beams` is set to 1."
)
# device检查
if not is_torchdynamo_compiling(
) and self.device.type != input_ids.device.type:
warnings.warn(
"You are calling .generate() with the `input_ids` being on a device type different"
f" than your model's device. `input_ids` is on {input_ids.device.type}, whereas the model"
f" is on {self.device.type}. You may experience unexpected behaviors or slower generation."
" Please make sure that you have put `input_ids` to the"
f" correct device by calling for example input_ids = input_ids.to('{self.device.type}') before"
" running `.generate()`.",
UserWarning,
)
# pdb.set_trace()
# 9. prepare logits processors and stopping criteria
prepared_logits_processor = self._get_logits_processor(
generation_config=generation_config,
input_ids_seq_length=input_ids_length,
encoder_input_ids=inputs_tensor,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
logits_processor=logits_processor,
device=inputs_tensor.device,
model_kwargs=model_kwargs,
negative_prompt_ids=negative_prompt_ids,
negative_prompt_attention_mask=negative_prompt_attention_mask,
)
prepared_stopping_criteria = self._get_stopping_criteria_for_seminat(
generation_config=generation_config,
stopping_criteria=stopping_criteria,
tokenizer=tokenizer,
**kwargs)
# Set model_kwargs `use_cache` so we can use it later in forward runs
model_kwargs["use_cache"] = generation_config.use_cache
input_ids, model_kwargs = self._expand_inputs_for_generation(
input_ids=input_ids,
expand_size=generation_config.num_return_sequences, # 1
is_encoder_decoder=self.config.is_encoder_decoder, # false
**model_kwargs,
)
# pdb.set_trace()
result = self._sampleforseminat(
input_ids,
logits_processor=prepared_logits_processor, # 用于在每一步生成 token 前,修改模型输出的 logits 分布,常用于控制生成行为、避免重复、强制某些 token 出现或屏蔽某些 token。
stopping_criteria=prepared_stopping_criteria,
generation_config=generation_config,
synced_gpus=synced_gpus, #多gpu
streamer=streamer,
prefilling_length=prefilling_length,
**model_kwargs,
)
# Convert to legacy cache format if requested
if (generation_config.return_legacy_cache is True
and not is_torchdynamo_compiling()
and hasattr(result, "past_key_values") and getattr(
result.past_key_values, "to_legacy_cache") is not None):
result.past_key_values = result.past_key_values.to_legacy_cache()
return result
def _get_stopping_criteria_for_seminat(
self,
generation_config: GenerationConfig,
stopping_criteria: Optional[StoppingCriteriaList],
tokenizer: Optional["PreTrainedTokenizerBase"] = None,
**kwargs,
) -> StoppingCriteriaList:
criteria = StoppingCriteriaList()
if generation_config.max_length is not None:
max_position_embeddings = getattr(self.config, "max_position_embeddings", None)
criteria.append(
MaxLengthCriteria(
max_length=generation_config.max_length,
max_position_embeddings=max_position_embeddings,
)
)
if generation_config.max_time is not None:
criteria.append(MaxTimeCriteria(max_time=generation_config.max_time))
if generation_config.stop_strings is not None:
if tokenizer is None:
raise ValueError(
"There are one or more stop strings, either in the arguments to `generate` or in the "
"model's generation config, but we could not locate a tokenizer. When generating with "
"stop strings, you must pass the model's tokenizer to the `tokenizer` argument of `generate`."
)
criteria.append(StopStringCriteria(stop_strings=generation_config.stop_strings, tokenizer=tokenizer))
if generation_config._eos_token_tensor is not None:
criteria.append(EosTokenCriteriaForSemiNAT(eos_token_id=generation_config._eos_token_tensor))
if (
generation_config.is_assistant
and generation_config.assistant_confidence_threshold is not None
and generation_config.assistant_confidence_threshold > 0
):
criteria.append(
ConfidenceCriteria(assistant_confidence_threshold=generation_config.assistant_confidence_threshold)
)
criteria = self._merge_criteria_processor_list(criteria, stopping_criteria)
return criteria
def _sampleforseminat(
self,
input_ids: torch.LongTensor,
logits_processor: LogitsProcessorList,
stopping_criteria: StoppingCriteriaList,
generation_config: GenerationConfig,
synced_gpus: bool,
streamer: Optional["BaseStreamer"],
prefilling_length: int,
**model_kwargs,
) -> Union[GenerateNonBeamOutput, torch.LongTensor]:
# init values
pad_token_id = generation_config._pad_token_tensor # 获取填充token的ID
output_attentions = generation_config.output_attentions # 是否输出注意力权重
output_hidden_states = generation_config.output_hidden_states # 是否输出隐藏状态
output_scores = generation_config.output_scores # 是否输出分数
output_logits = generation_config.output_logits # 是否输出原始logits
return_dict_in_generate = generation_config.return_dict_in_generate # 是否返回结构化字典
max_length = generation_config.max_length # 最大生成长度
has_eos_stopping_criteria = any(
hasattr(criteria, "eos_token_id")
for criteria in stopping_criteria) # 检查停止条件是否包含EOS token
do_sample = generation_config.do_sample # 是否使用采样方法
# 初始化结果收集容器
# init attention / hidden states / scores tuples
scores = () if (return_dict_in_generate and output_scores) else None
raw_logits = () if (return_dict_in_generate
and output_logits) else None
decoder_attentions = () if (return_dict_in_generate
and output_attentions) else None
cross_attentions = () if (return_dict_in_generate
and output_attentions) else None
decoder_hidden_states = () if (return_dict_in_generate
and output_hidden_states) else None
# # 编码器-解码器模型特殊处理 不用管
# if model is an encoder-decoder, retrieve encoder attention weights and hidden states
if return_dict_in_generate and self.config.is_encoder_decoder:
encoder_attentions = model_kwargs["encoder_outputs"].get(
"attentions") if output_attentions else None
encoder_hidden_states = (
model_kwargs["encoder_outputs"].get("hidden_states")
if output_hidden_states else None)
# pdb.set_trace()
# 初始化序列跟踪
# keep track of which sequences are already finished
batch_size, cur_len = input_ids.shape
this_peer_finished = False
unfinished_sequences = torch.ones(
batch_size, dtype=torch.long,
device=input_ids.device) # 初始化未完成序列标记 torch.Size([1])
model_kwargs = self._get_initial_cache_position(
input_ids, model_kwargs) # 初始化缓存位置
model_forward = self.__call__ # 获取前向传播函数
############ 换成新的forward
# model_forward = self.forward
# pdb.set_trace()
if isinstance(model_kwargs.get("past_key_values"), Cache):
is_compileable = model_kwargs[
"past_key_values"].is_compileable and self._supports_static_cache #编译优化
is_compileable = is_compileable and not self.generation_config.disable_compile
if is_compileable and (
self.device.type == "cuda"
or generation_config.compile_config._compile_all_devices):
os.environ["TOKENIZERS_PARALLELISM"] = "0"
model_forward = self.get_compiled_call(
generation_config.compile_config)
############ prefilling ############
start = prefilling_length-1
chunk_length = prefilling_length
s_pos = [start]
while True:
start += chunk_length
if start >= input_ids.shape[1] - 1:
s_pos.append(input_ids.shape[1] - 1)
break
else:
s_pos.append(start)
# pdb.set_trace()
slice_pos = torch.tensor(s_pos).unsqueeze(0).to(
input_ids.device)
# slice_pos = torch.tensor(s_pos + [-1] *
# (cur_len - len(s_pos))).unsqueeze(0).to(
# input_ids.device)
model_kwargs['slice_pos'] = slice_pos
count = (slice_pos != -1).sum().item()
# new_cache_position = torch.arange(0, count).to(input_ids.device)
# model_kwargs[
# 'cache_position'] = new_cache_position # chunk级别
# pdb.set_trace()
############ prefilling ############
is_prefill = True
while self._has_unfinished_sequences(
this_peer_finished,
synced_gpus,
device=input_ids.device,
cur_len=cur_len,
max_length=max_length): # 循环知道序列生成完
# prepare model inputs
# pdb.set_trace()
# model_kwargs.keys(): dict_keys(['attention_mask', 'logits_to_keep', 'past_key_values', 'use_cache', 'cache_position', 'nar_kv_cache', 'slice_pos'])
model_inputs = self.prepare_inputs_for_generation( #加入position_id和input_id
input_ids, **model_kwargs
) #dict_keys(['cache_position', 'past_key_values', 'input_ids', 'inputs_embeds', 'position_ids', 'attention_mask', 'logits_to_keep', 'use_cache'])
# pdb.set_trace()
# position_ids = torch.arange(
# input_ids.shape[1], device=input_ids.device).unsqueeze(0).to(input_ids.device)
# model_inputs.update({"position_ids": position_ids})
model_inputs.update({"input_ids": input_ids})
# prepare variable output controls (note: some models won't accept all output controls)
model_inputs.update({"output_attentions": output_attentions}
if output_attentions else {})
model_inputs.update({"output_hidden_states": output_hidden_states}
if output_hidden_states else {})
if is_prefill:
# pdb.set_trace()
# outputs = self(**model_inputs, return_dict=True)
# dict_keys(['cache_position', 'past_key_values', 'input_ids', 'inputs_embeds', 'position_ids', 'attention_mask', 'logits_to_keep', 'use_cache'])
outputs = self.forward(**model_inputs, return_dict=True, is_prefill=True) #这里position_ids是错的
is_prefill = False
else:
# pdb.set_trace()
outputs = model_forward(**model_inputs, return_dict=True, is_prefill=False)
# pdb.set_trace()
################ seminat ###########################
# model_kwargs['slice_pos'] = outputs.slice_pos
################ seminat ###########################
# synced_gpus: don't waste resources running the code we don't need; kwargs must be updated before skipping
model_kwargs = self._update_model_kwargs_for_generation_for_seminat(
outputs,
model_kwargs,
is_encoder_decoder=self.config.is_encoder_decoder,
num_new_tokens=outputs.logits.size(1))
if synced_gpus and this_peer_finished:
continue
# Clone is needed to avoid keeping a hanging ref to outputs.logits which may be very large for first iteration
# (the clone itself is always small)
# next_token_logits = outputs.logits[:, -1, :].clone().float()
next_token_logits = outputs.logits[:, :, :].clone().float(
) # 新生成了k个token
next_token_logits = next_token_logits.to(input_ids.device)
# pre-process distribution
next_token_scores = logits_processor(input_ids, next_token_logits)
# token selection
if do_sample:
probs = nn.functional.softmax(next_token_scores, dim=-1)
# TODO (joao): this OP throws "skipping cudagraphs due to ['incompatible ops']", find solution
next_tokens = torch.multinomial(probs,
num_samples=1).squeeze(1)
else:
next_tokens = torch.argmax(
next_token_scores,
dim=-1) # tensor([9281], device='cuda:0') token id
# pdb.set_trace()
# 更新slice_pos
count = (model_kwargs['slice_pos'] != -1).sum().item()
new_slice_pos = model_kwargs['slice_pos'][:, count - 1] + outputs.logits.size(1)
model_kwargs['slice_pos'] = torch.cat([model_kwargs['slice_pos'], new_slice_pos.unsqueeze(1)], dim=-1)
# pdb.set_trace()
# finished sentences should have their next token be a padding token
if has_eos_stopping_criteria:
next_tokens = next_tokens * unfinished_sequences + pad_token_id * (
1 - unfinished_sequences
) # 序列生成完的时候,unfinished_sequences为0,正好后面全填上padding
# pdb.set_trace()
# update generated ids, model inputs, and length for next step
# input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
input_ids = torch.cat([input_ids, next_tokens], dim=-1)
if streamer is not None:
streamer.put(next_tokens.cpu())
# 更新完成状态
unfinished_sequences = unfinished_sequences & ~stopping_criteria(
input_ids, scores, last_k=next_tokens.size(1))
this_peer_finished = unfinished_sequences.max() == 0
cur_len += outputs.logits.size(1) # 长度 + n
# pdb.set_trace()
# This is needed to properly delete outputs.logits which may be very large for first iteration
# Otherwise a reference to outputs is kept which keeps the logits alive in the next iteration
del outputs
if streamer is not None:
streamer.end()
if return_dict_in_generate:
if self.config.is_encoder_decoder:
return GenerateEncoderDecoderOutput(
sequences=input_ids,
scores=scores,
logits=raw_logits,
encoder_attentions=encoder_attentions,
encoder_hidden_states=encoder_hidden_states,
decoder_attentions=decoder_attentions,
cross_attentions=cross_attentions,
decoder_hidden_states=decoder_hidden_states,
past_key_values=model_kwargs.get("past_key_values"),
)
else:
return GenerateDecoderOnlyOutput(
sequences=input_ids,
scores=scores,
logits=raw_logits,
attentions=decoder_attentions,
hidden_states=decoder_hidden_states,
past_key_values=model_kwargs.get("past_key_values"),
)
else:
return input_ids
def _update_model_kwargs_for_generation_for_seminat(
self,
outputs: ModelOutput,
model_kwargs: Dict[str, Any],
is_encoder_decoder: bool = False,
num_new_tokens: int = 1,
) -> Dict[str, Any]:
ALL_CACHE_NAMES = [
"past_key_values", # default
"cache_params", # mamba-based models
"state", # rwkv
"mems", # xlnet
"past_buckets_states", # reformer
]
# update past_key_values keeping its naming used in model code
for possible_cache_name in ALL_CACHE_NAMES:
if possible_cache_name in outputs:
# TODO (joao): remove output/input mismatch when these old models (xlnet, reformer) are deprecated
if possible_cache_name in ("past_buckets_states", "mems"):
cache_name = "past_key_values"
else:
cache_name = possible_cache_name
model_kwargs[cache_name] = getattr(outputs,
possible_cache_name)
break
# pdb.set_trace()
# update token_type_ids with last value
# false
if "token_type_ids" in model_kwargs:
token_type_ids = model_kwargs["token_type_ids"]
model_kwargs["token_type_ids"] = torch.cat(
[token_type_ids, token_type_ids[:, -1].unsqueeze(-1)], dim=-1)
if not is_encoder_decoder:
# update attention mask
# 重点看这个
# pdb.set_trace()
if "attention_mask" in model_kwargs:
attention_mask = model_kwargs["attention_mask"]
model_kwargs["attention_mask"] = torch.cat(
[
attention_mask,
attention_mask.new_ones(
(attention_mask.shape[0], num_new_tokens
)) # 1 -> num_new_tokens 一次加多个token的attention
],
dim=-1)
else:
# update decoder attention mask
if "decoder_attention_mask" in model_kwargs:
decoder_attention_mask = model_kwargs["decoder_attention_mask"]
model_kwargs["decoder_attention_mask"] = torch.cat(
[
decoder_attention_mask,
decoder_attention_mask.new_ones(
(decoder_attention_mask.shape[0], 1))
],
dim=-1,
)
# pdb.set_trace()
if model_kwargs.get("use_cache", True):
model_kwargs["cache_position"] = torch.arange(model_kwargs["cache_position"][-1:].item() + 1, model_kwargs["cache_position"][-1:].item() + num_new_tokens + 1, dtype=model_kwargs["cache_position"].dtype).to(model_kwargs["cache_position"].device)
# model_kwargs["cache_position"] = torch.tensor([
# model_kwargs["cache_position"][-1:].item() + 1
# ]).to(model_kwargs["cache_position"].device)
else:
past_positions = model_kwargs.pop("cache_position")
new_positions = torch.arange(
past_positions[-1] + 1,
past_positions[-1] + num_new_tokens + 1,
dtype=past_positions.dtype).to(past_positions.device)
model_kwargs["cache_position"] = torch.cat(
(past_positions, new_positions))
return model_kwargs
class AbsolutePositionalEncoding(nn.Module):
def __init__(self, hidden_size: int, max_len: int = 2048):
"""
初始化绝对位置编码
参数:
hidden_size (int): 隐藏层维度
max_len (int): 最大序列长度
"""
super().__init__()
# 创建位置编码矩阵
pe = torch.zeros(max_len, hidden_size)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, hidden_size, 2).float() * (-math.log(10000.0) / hidden_size))
# 使用sin和cos函数计算位置编码
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0) # [1, max_len, hidden_size]
# 注册为buffer(不参与训练)
self.register_buffer('pe', pe)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
添加位置编码到输入张量
参数:
x (torch.Tensor): 输入张量,形状为 (batch_size, seq_len, hidden_size)
返回:
torch.Tensor: 添加位置编码后的张量,形状与输入相同
"""
seq_len = x.size(1)
pos = x + self.pe[:, :seq_len]
# pdb.set_trace()
return pos