ykzhang721 commited on
Commit
c4b413a
·
verified ·
1 Parent(s): 9dd3447

Upload modelforseminat_v6.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. modelforseminat_v6.py +1918 -0
modelforseminat_v6.py ADDED
@@ -0,0 +1,1918 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import Olmo2Model, Olmo2ForCausalLM, AutoTokenizer, logging
2
+ from transformers.models.auto.modeling_auto import MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES
3
+ from transformers.modeling_outputs import (
4
+ CausalLMOutputWithPast,
5
+ BaseModelOutputWithPast,
6
+ )
7
+ import numpy as np
8
+ import math
9
+ from torch import nn
10
+ import pandas as pd
11
+ from transformers.cache_utils import Cache, DynamicCache, StaticCache
12
+ from dataclasses import dataclass
13
+
14
+ # Olmo2
15
+ from transformers.models.olmo2.modeling_olmo2 import Olmo2RotaryEmbedding, Olmo2Attention, Olmo2MLP, Olmo2RMSNorm, apply_rotary_pos_emb, eager_attention_forward, Olmo2DecoderLayer
16
+ from transformers.models.olmo2.configuration_olmo2 import Olmo2Config
17
+ from transformers.processing_utils import Unpack
18
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
19
+ from transformers.utils import LossKwargs
20
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS
21
+
22
+ from torch.nn.functional import cosine_similarity
23
+ import pdb
24
+ from dataset import *
25
+ import torch
26
+ import torch.nn.functional as F
27
+ import functools
28
+ import torch.distributed as dist
29
+ from torch.distributed.fsdp import (
30
+ FullyShardedDataParallel as FSDP,
31
+ MixedPrecision,
32
+ BackwardPrefetch,
33
+ ShardingStrategy,
34
+ FullStateDictConfig,
35
+ StateDictType,
36
+ )
37
+ from torch.distributed.fsdp.wrap import (
38
+ transformer_auto_wrap_policy,
39
+ enable_wrap,
40
+ wrap,
41
+ )
42
+ from functools import partial
43
+ from torch.utils.data import DataLoader
44
+ from pathlib import Path
45
+ from typing import Type, List, Optional, Tuple, Union, Callable, Dict, Any
46
+
47
+
48
+ ############ specially for generate() #################
49
+ import inspect
50
+ from transformers.generation.configuration_utils import (
51
+ NEED_SETUP_CACHE_CLASSES_MAPPING,
52
+ QUANT_BACKEND_CLASSES_MAPPING,
53
+ GenerationConfig,
54
+ GenerationMode,
55
+ )
56
+ from transformers.generation.logits_process import LogitsProcessorList
57
+ from transformers.generation.stopping_criteria import StoppingCriteriaList
58
+ from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled
59
+ from transformers.integrations.fsdp import is_fsdp_managed_module
60
+
61
+ from transformers.generation.utils import (
62
+ is_torchdynamo_compiling, ModelOutput, GenerateDecoderOnlyOutput,
63
+ GenerateEncoderDecoderOutput, GenerateBeamDecoderOnlyOutput,
64
+ GenerateBeamEncoderDecoderOutput, GreedySearchDecoderOnlyOutput,
65
+ ContrastiveSearchDecoderOnlyOutput, SampleDecoderOnlyOutput,
66
+ ContrastiveSearchEncoderDecoderOutput, GreedySearchEncoderDecoderOutput,
67
+ SampleEncoderDecoderOutput, BeamSearchDecoderOnlyOutput,
68
+ BeamSampleDecoderOnlyOutput, BeamSearchEncoderDecoderOutput,
69
+ BeamSampleEncoderDecoderOutput, GreedySearchOutput, SampleOutput,
70
+ BeamSearchOutput, BeamSampleOutput, ContrastiveSearchOutput,
71
+ GenerateNonBeamOutput, GenerateBeamOutput, GenerateOutput)
72
+ from transformers.generation.stopping_criteria import (
73
+ ConfidenceCriteria,
74
+ EosTokenCriteria,
75
+ MaxLengthCriteria,
76
+ MaxTimeCriteria,
77
+ StoppingCriteria,
78
+ StoppingCriteriaList,
79
+ StopStringCriteria,
80
+ )
81
+
82
+ from transformers.generation.stopping_criteria import STOPPING_CRITERIA_INPUTS_DOCSTRING
83
+ from transformers.pytorch_utils import isin_mps_friendly
84
+ from transformers.utils import add_start_docstrings
85
+
86
+
87
+ class EosTokenCriteriaForSemiNAT(StoppingCriteria):
88
+ """
89
+ This class can be used to stop generation whenever the "end-of-sequence" token is generated.
90
+ By default, it uses the `model.generation_config.eos_token_id`.
91
+
92
+ Args:
93
+ eos_token_id (`Union[int, List[int], torch.Tensor]`):
94
+ The id(s) of the *end-of-sequence* token.
95
+ """
96
+
97
+ def __init__(self, eos_token_id: Union[int, List[int], torch.Tensor]):
98
+ if not isinstance(eos_token_id, torch.Tensor):
99
+ if isinstance(eos_token_id, int):
100
+ eos_token_id = [eos_token_id]
101
+ eos_token_id = torch.tensor(eos_token_id)
102
+ self.eos_token_id = eos_token_id
103
+
104
+ @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
105
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, last_k: int, **kwargs) -> torch.BoolTensor:
106
+ # pdb.set_trace()
107
+ # if torch.any(input_ids == 100257):
108
+ # pdb.set_trace()
109
+ self.eos_token_id = self.eos_token_id.to(input_ids.device)
110
+ token_is_eos = isin_mps_friendly(input_ids[:, -last_k:], self.eos_token_id)
111
+ is_done = torch.any(token_is_eos, dim=1)
112
+ return is_done
113
+
114
+
115
+
116
+ ############ specially for generate() #################
117
+
118
+
119
+ class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ...
120
+
121
+
122
+ @dataclass
123
+ class ModelOutputWithPastForSemiNAT(BaseModelOutputWithPast):
124
+
125
+ chunk_hidden_state: torch.FloatTensor = None
126
+ length_ground_truth: Optional[torch.FloatTensor] = None
127
+ length_logits: Optional[torch.FloatTensor] = None
128
+ position_embeddings: Optional[torch.FloatTensor] = None # ?
129
+ nar_hidden_state: torch.FloatTensor = None # ?
130
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
131
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
132
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
133
+
134
+ @dataclass
135
+ class CausalLMOutputWithPastForSemiNAT(CausalLMOutputWithPast):
136
+ nat_new_tokens: torch.LongTensor = None
137
+ nat_logits: torch.FloatTensor = None
138
+
139
+
140
+
141
+ class TwoLayerMLP(nn.Module):
142
+ def __init__(self, hidden_size: int, dropout_rate: float = 0.1):
143
+ """
144
+ 初始化两层MLP,支持任意批处理维度
145
+
146
+ 参数:
147
+ hidden_size (int): 隐藏层维度
148
+ dropout_rate (float): dropout比率,默认0.1
149
+ """
150
+ super().__init__()
151
+
152
+ self.fc1 = nn.Linear(hidden_size, 4 * hidden_size) # 第一层将维度扩大4倍
153
+ self.fc2 = nn.Linear(4 * hidden_size, hidden_size) # 第二层将维度恢复
154
+ self.dropout = nn.Dropout(p=dropout_rate)
155
+ self.activation = nn.GELU() # 使用GELU激活函数
156
+
157
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
158
+ """
159
+ 前向传播,支持任意批处理维度
160
+
161
+ 参数:
162
+ x (torch.Tensor): 输入张量,形状为 (..., hidden_size),支持任意前置维度
163
+
164
+ 返回:
165
+ torch.Tensor: 输出张量,形状与输入相同
166
+ """
167
+ # 获取原始形状
168
+ original_shape = x.shape
169
+ hidden_size = original_shape[-1]
170
+
171
+ # 将输入重塑为2D: (batch_size, hidden_size),其中batch_size包含了所有前置维度
172
+ x_2d = x.view(-1, hidden_size)
173
+
174
+ # pdb.set_trace()
175
+ # 第一层:线性变换 -> 激活函数 -> dropout
176
+ x_2d = self.fc1(x_2d)
177
+ x_2d = self.activation(x_2d)
178
+ x_2d = self.dropout(x_2d)
179
+
180
+ # 第二层:线性变换
181
+ x_2d = self.fc2(x_2d)
182
+ # pdb.set_trace()
183
+ # 恢复原始形状
184
+ x = x_2d.view(*original_shape)
185
+ # pdb.set_trace()
186
+ return x
187
+
188
+
189
+
190
+
191
+ class Olmo2ConfigForSemiNAT(Olmo2Config):
192
+ def __init__(self, chunk_size_limit: int = 5, decoder_layers: int = 1, encoder_layer: int = 1, mlp: bool = False, position_embedding_type: str = "absolute",attn_implementation: str = "sdpa", length_loss_type: str = "ce", **kwargs):
193
+ super().__init__(**kwargs)
194
+ self.chunk_size_limit = chunk_size_limit
195
+ self.decoder_layers = decoder_layers
196
+ self.encoder_layer = encoder_layer
197
+ self.mlp = mlp
198
+ self.position_embedding_type = position_embedding_type
199
+ self._attn_implementation = attn_implementation
200
+ self.length_loss_type = length_loss_type
201
+ # pdb.set_trace()
202
+
203
+
204
+ class Olmo2AttentionForSemiNAT(nn.Module):
205
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
206
+
207
+ def __init__(self, config: Olmo2ConfigForSemiNAT, layer_idx: Optional[int] = None, is_causal: bool = True):
208
+ super().__init__()
209
+ self.config = config
210
+ self.layer_idx = layer_idx
211
+ self.head_dim = getattr(
212
+ config, "head_dim",
213
+ config.hidden_size // config.num_attention_heads)
214
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
215
+ self.scaling = self.head_dim**-0.5
216
+ self.attention_dropout = config.attention_dropout
217
+ self.is_causal = is_causal
218
+
219
+ self.q_proj = nn.Linear(config.hidden_size,
220
+ config.num_attention_heads * self.head_dim,
221
+ bias=config.attention_bias)
222
+ self.k_proj = nn.Linear(config.hidden_size,
223
+ config.num_key_value_heads * self.head_dim,
224
+ bias=config.attention_bias)
225
+ self.v_proj = nn.Linear(config.hidden_size,
226
+ config.num_key_value_heads * self.head_dim,
227
+ bias=config.attention_bias)
228
+ self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim,
229
+ config.hidden_size,
230
+ bias=config.attention_bias)
231
+ self.q_norm = Olmo2RMSNorm(config.num_attention_heads * self.head_dim,
232
+ config.rms_norm_eps)
233
+ self.k_norm = Olmo2RMSNorm(config.num_key_value_heads * self.head_dim,
234
+ config.rms_norm_eps)
235
+
236
+ def forward(
237
+ self,
238
+ hidden_states: torch.Tensor,
239
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
240
+ attention_mask: Optional[torch.Tensor],
241
+ past_key_value: Optional[Cache] = None,
242
+ cache_position: Optional[torch.LongTensor] = None,
243
+ **kwargs,
244
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor],
245
+ Optional[Tuple[torch.Tensor]]]:
246
+ input_shape = hidden_states.shape[:-1]
247
+ hidden_shape = (*input_shape, -1, self.head_dim)
248
+
249
+ query_states = self.q_norm(self.q_proj(hidden_states))
250
+ key_states = self.k_norm(self.k_proj(hidden_states))
251
+ value_states = self.v_proj(hidden_states)
252
+
253
+ query_states = query_states.view(hidden_shape).transpose(1, 2)
254
+ key_states = key_states.view(hidden_shape).transpose(1, 2)
255
+ value_states = value_states.view(hidden_shape).transpose(1, 2)
256
+
257
+
258
+
259
+ if position_embeddings is not None:
260
+ cos, sin = position_embeddings
261
+ query_states, key_states = apply_rotary_pos_emb(
262
+ query_states, key_states, cos, sin)
263
+
264
+ if past_key_value is not None:
265
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
266
+ cache_kwargs = {
267
+ "sin": sin,
268
+ "cos": cos,
269
+ "cache_position": cache_position
270
+ }
271
+ key_states, value_states = past_key_value.update(
272
+ key_states, value_states, self.layer_idx, cache_kwargs)
273
+
274
+ # attention_interface: Callable = eager_attention_forward
275
+
276
+
277
+ # pdb.set_trace()
278
+
279
+
280
+
281
+
282
+ # if self.config._attn_implementation != "eager":
283
+ # if self.config._attn_implementation == "sdpa" and kwargs.get(
284
+ # "output_attentions", False):
285
+ # logger.warning_once(
286
+ # "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
287
+ # 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
288
+ # )
289
+ # else:
290
+ # attention_interface = ALL_ATTENTION_FUNCTIONS[
291
+ # self.config._attn_implementation]
292
+ attention_interface: Callable = ALL_ATTENTION_FUNCTIONS["sdpa"] #针对encoder和decoder的新设定
293
+
294
+
295
+ # pdb.set_trace()
296
+ attn_output, attn_weights = attention_interface(
297
+ self,
298
+ query_states,
299
+ key_states,
300
+ value_states,
301
+ attention_mask,
302
+ dropout=0.0 if not self.training else self.attention_dropout,
303
+ scaling=self.scaling,
304
+ is_causal=self.is_causal,
305
+ **kwargs,
306
+ )
307
+ # pdb.set_trace()
308
+
309
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
310
+ attn_output = self.o_proj(attn_output)
311
+ return attn_output, attn_weights
312
+
313
+
314
+
315
+ class Olmo2DecoderLayerForSemiNAT(nn.Module):
316
+
317
+ def __init__(
318
+ self,
319
+ config: Olmo2ConfigForSemiNAT,
320
+ layer_idx: int,
321
+ is_causal: bool = True,
322
+ ):
323
+ super().__init__()
324
+ self.hidden_size = config.hidden_size
325
+ # pdb.set_trace()
326
+ self.self_attn = Olmo2AttentionForSemiNAT(config=config,
327
+ layer_idx=layer_idx,
328
+ is_causal=is_causal)
329
+ self.mlp = Olmo2MLP(config)
330
+ self.post_attention_layernorm = Olmo2RMSNorm(config.hidden_size,
331
+ eps=config.rms_norm_eps)
332
+ self.post_feedforward_layernorm = Olmo2RMSNorm(config.hidden_size,
333
+ eps=config.rms_norm_eps)
334
+
335
+ # pdb.set_trace()
336
+
337
+ def forward(
338
+ self,
339
+ hidden_states: torch.Tensor,
340
+ attention_mask: Optional[torch.Tensor] = None,
341
+ position_ids: Optional[torch.LongTensor] = None,
342
+ past_key_value: Optional[Cache] = None,
343
+ output_attentions: Optional[bool] = False,
344
+ use_cache: Optional[bool] = False,
345
+ cache_position: Optional[torch.LongTensor] = None,
346
+ position_embeddings: Optional[Tuple[torch.Tensor,
347
+ torch.Tensor]] = None,
348
+ **kwargs,
349
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor,
350
+ torch.FloatTensor]]]:
351
+ residual = hidden_states
352
+
353
+ # pdb.set_trace()
354
+ # Self Attention
355
+ hidden_states, self_attn_weights = self.self_attn(
356
+ hidden_states=hidden_states,
357
+ attention_mask=attention_mask,
358
+ position_ids=position_ids,
359
+ past_key_value=past_key_value,
360
+ output_attentions=output_attentions,
361
+ use_cache=use_cache,
362
+ cache_position=cache_position,
363
+ position_embeddings=position_embeddings,
364
+ **kwargs,
365
+ )
366
+
367
+ # pdb.set_trace()
368
+ hidden_states = self.post_attention_layernorm(hidden_states)
369
+ hidden_states = residual + hidden_states
370
+
371
+ # Fully Connected
372
+ residual = hidden_states
373
+ hidden_states = self.mlp(hidden_states)
374
+ hidden_states = self.post_feedforward_layernorm(hidden_states)
375
+ hidden_states = residual + hidden_states
376
+
377
+ outputs = (hidden_states, )
378
+ if output_attentions:
379
+ outputs += (self_attn_weights, )
380
+
381
+ return outputs
382
+
383
+
384
+ class NATEncoderForSemiNAT(nn.Module):
385
+
386
+ def __init__(self, config: Olmo2ConfigForSemiNAT, num_layer: int = 1):
387
+ super().__init__()
388
+ self.num_layer = num_layer
389
+ self.encoder_layers = nn.ModuleList([
390
+ Olmo2DecoderLayerForSemiNAT(config, layer_idx) #check下需不需要is causal false,但attn_mask优先级高于is_causal
391
+ for layer_idx in range(self.num_layer)
392
+ ])
393
+
394
+ def forward(
395
+ self,
396
+ hidden_states: torch.Tensor,
397
+ attention_mask: Optional[torch.Tensor] = None,
398
+ past_key_value: Optional[Cache] = None,
399
+ output_attentions: Optional[bool] = False,
400
+ use_cache: Optional[bool] = False,
401
+ cache_position: Optional[torch.LongTensor] = None,
402
+ position_embeddings: Optional[Tuple[torch.Tensor,
403
+ torch.Tensor]] = None,
404
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor,
405
+ torch.FloatTensor]]]:
406
+ # pdb.set_trace()
407
+ for layer in self.encoder_layers:
408
+ outputs = layer(hidden_states=hidden_states,
409
+ output_attentions=output_attentions,
410
+ position_embeddings=position_embeddings,
411
+ attention_mask=attention_mask)
412
+ hidden_states = outputs[0]
413
+ # pdb.set_trace()
414
+ # only the last layer attn_weights and present_key_value are stored
415
+ # mean pool the hidden states across sequence (chunk)
416
+ # hidden_states = torch.mean(hidden_states, dim=1)
417
+ return hidden_states
418
+
419
+
420
+ class NATDecoderForSemiNAT(nn.Module):
421
+
422
+ def __init__(self, config: Olmo2ConfigForSemiNAT, num_layer: int = 1):
423
+ super().__init__()
424
+ self.num_layer = num_layer
425
+ self.decoder_layers = nn.ModuleList([
426
+ Olmo2DecoderLayerForSemiNAT(config, layer_idx, False)
427
+ for layer_idx in range(self.num_layer)
428
+ ])
429
+
430
+ def forward(
431
+ self,
432
+ hidden_states: torch.Tensor,
433
+ attention_mask: Optional[torch.Tensor] = None,
434
+ past_key_value: Optional[Cache] = None,
435
+ output_attentions: Optional[bool] = False,
436
+ use_cache: Optional[bool] = False,
437
+ cache_position: Optional[torch.LongTensor] = None,
438
+ position_embeddings: Optional[Tuple[torch.Tensor,
439
+ torch.Tensor]] = None,
440
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor,
441
+ torch.FloatTensor]]]:
442
+
443
+ for layer in self.decoder_layers:
444
+ # pdb.set_trace()
445
+ outputs = layer(hidden_states=hidden_states,
446
+ attention_mask=attention_mask,
447
+ output_attentions=output_attentions,
448
+ position_embeddings=position_embeddings)
449
+ hidden_states = outputs[0]
450
+ return hidden_states
451
+
452
+
453
+ class Olmo2ModelForSemiNAT(Olmo2Model):
454
+
455
+ def __init__(self, config):
456
+ super().__init__(config)
457
+ self.layers = nn.ModuleList([
458
+ Olmo2DecoderLayer(config, layer_idx)
459
+ for layer_idx in range(config.num_hidden_layers)
460
+ ])
461
+
462
+ self.decoder = NATDecoderForSemiNAT(config, config.decoder_layers)
463
+ self.encoder = NATEncoderForSemiNAT(config, config.encoder_layer)
464
+
465
+
466
+ # pdb.set_trace()
467
+ self.chunk_size_limit = config.chunk_size_limit
468
+ self.norm = Olmo2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
469
+ self.rotary_emb = Olmo2RotaryEmbedding(config=config)
470
+ self.pos_encoder = AbsolutePositionalEncoding(config.hidden_size)
471
+ self.gradient_checkpointing = False
472
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size,
473
+ self.padding_idx)
474
+
475
+
476
+ self.length_predictor = nn.Linear(config.hidden_size,
477
+ self.chunk_size_limit)
478
+ self.mlp = config.mlp
479
+ if self.mlp:
480
+ self.linear_projection = TwoLayerMLP(config.hidden_size)
481
+ # pdb.set_trace()
482
+ self.position_embedding_type = config.position_embedding_type
483
+
484
+
485
+ def forward(
486
+ self,
487
+ input_ids: torch.LongTensor = None,
488
+ attention_mask: Optional[torch.Tensor] = None,
489
+ position_ids: Optional[torch.LongTensor] = None,
490
+ slice_pos: torch.Tensor = None,
491
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
492
+ inputs_embeds: Optional[torch.FloatTensor] = None,
493
+ use_cache: Optional[bool] = None,
494
+ output_attentions: Optional[bool] = None,
495
+ output_hidden_states: Optional[bool] = None,
496
+ return_dict: Optional[bool] = None,
497
+ cache_position: Optional[torch.LongTensor] = None,
498
+ inference: Optional[bool] = None,
499
+ padding: Optional[torch.Tensor] = None,
500
+ is_prefill: Optional[bool] = False,
501
+ **flash_attn_kwargs: Unpack[FlashAttentionKwargs],
502
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
503
+
504
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
505
+ output_hidden_states = (output_hidden_states
506
+ if output_hidden_states is not None else
507
+ self.config.output_hidden_states)
508
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
509
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
510
+
511
+ if (input_ids is None) ^ (inputs_embeds is not None):
512
+ raise ValueError(
513
+ "You must specify exactly one of input_ids or inputs_embeds")
514
+
515
+ if self.gradient_checkpointing and self.training and use_cache:
516
+ logger.warning_once(
517
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
518
+ )
519
+ use_cache = False
520
+
521
+ if inputs_embeds is None:
522
+ inputs_embeds = self.embed_tokens(input_ids)
523
+
524
+ # pdb.set_trace()
525
+
526
+ if use_cache and past_key_values is None:
527
+ past_key_values = DynamicCache()
528
+
529
+ if cache_position is None:
530
+ past_seen_tokens = past_key_values.get_seq_length(
531
+ ) if past_key_values is not None else 0
532
+ cache_position = torch.arange(past_seen_tokens,
533
+ past_seen_tokens +
534
+ inputs_embeds.shape[1],
535
+ device=inputs_embeds.device)
536
+
537
+ if position_ids is None:
538
+ position_ids = cache_position.unsqueeze(0)
539
+
540
+ if inference is not None:
541
+ position_ids = cache_position.unsqueeze(0)
542
+
543
+
544
+
545
+ position_embeddings = self.rotary_emb(inputs_embeds, position_ids)
546
+ all_hidden_states = () if output_hidden_states else None
547
+ all_self_attns = () if output_attentions else None
548
+ next_decoder_cache = None
549
+ max_chunk_num = (slice_pos != -1).sum(dim=1).max()
550
+
551
+ # pdb.set_trace()
552
+
553
+ ################################ 并行处理 #################################
554
+
555
+ # pdb.set_trace()
556
+
557
+ # 这里attention_mask没有用,因为encoder没有attention层
558
+
559
+
560
+ length_ground_truth = None
561
+ if not inference or is_prefill:
562
+ M_avg, attn_mask, length_ground_truth, chunk_attention_mask, slice_num = self.build_slice_matrix(input_ids,slice_pos) # torch.Size([1, 111, 512])
563
+ encoded_input = self.encoder(inputs_embeds,position_embeddings=position_embeddings,attention_mask=attn_mask) # torch.Size([1, 512, 2048])
564
+
565
+ # pdb.set_trace()
566
+
567
+ M_avg = M_avg.contiguous()
568
+ encoded_input = encoded_input.contiguous()
569
+ M_avg = M_avg.to(torch.bfloat16)
570
+ encoded_input = encoded_input.to(torch.bfloat16)
571
+
572
+ chunk_inputs_embeds = torch.matmul(M_avg, encoded_input)
573
+ accumu_num = sum(slice_num)-encoded_input.shape[0]
574
+
575
+ chunk_inputs_embeds = chunk_inputs_embeds[:, :max_chunk_num, :]
576
+ chunk_attention_mask = chunk_attention_mask[:, :max_chunk_num]
577
+ length_ground_truth = length_ground_truth[:,:max_chunk_num]
578
+ chunk_position_ids = position_ids[:,:max_chunk_num]
579
+ chunk_cache_position = cache_position[:max_chunk_num]
580
+ # pdb.set_trace()
581
+ else:
582
+
583
+ encoded_input = self.encoder(inputs_embeds[:,position_ids.squeeze(0)],position_embeddings=position_embeddings)
584
+ chunk_inputs_embeds = torch.mean(encoded_input, dim=1).unsqueeze(0)
585
+ # pdb.set_trace()
586
+ chunk_cache_position = torch.searchsorted(slice_pos.squeeze(0), cache_position - 1, right=True)[-1].unsqueeze(0) #prefill之后,更新下chunk的cache_position
587
+ chunk_attention_mask = torch.ones(1,cache_position[0])
588
+ chunk_position_ids = chunk_cache_position.unsqueeze(0)
589
+
590
+ # pdb.set_trace()
591
+ chunk_position_embeddings = self.rotary_emb(
592
+ chunk_inputs_embeds, chunk_position_ids
593
+ ) # tuple, 第一个元素为 torch.Size([1, 256, 128]),最后一个维度是 hidden_size / head , cos 和 sin 各 64 维
594
+
595
+ hidden_states = chunk_inputs_embeds # bs * max_chunk_num * hidden_size
596
+
597
+
598
+ causal_mask = self._update_causal_mask(chunk_attention_mask,
599
+ chunk_inputs_embeds,
600
+ chunk_cache_position,
601
+ past_key_values,
602
+ output_attentions)
603
+
604
+
605
+ # pdb.set_trace()
606
+ for decoder_layer in self.layers:
607
+ if output_hidden_states:
608
+ all_hidden_states += (hidden_states, )
609
+ if self.gradient_checkpointing and self.training:
610
+ layer_outputs = self._gradient_checkpointing_func(
611
+ decoder_layer.__call__,
612
+ hidden_states,
613
+ causal_mask,
614
+ position_ids,
615
+ past_key_values,
616
+ output_attentions,
617
+ use_cache,
618
+ chunk_cache_position,
619
+ chunk_position_embeddings,
620
+ )
621
+ else:
622
+ layer_outputs = decoder_layer(
623
+ hidden_states,
624
+ attention_mask=causal_mask,
625
+ position_ids=position_ids,
626
+ past_key_value=past_key_values,
627
+ output_attentions=output_attentions,
628
+ use_cache=use_cache,
629
+ cache_position=chunk_cache_position,
630
+ position_embeddings=chunk_position_embeddings,
631
+ **flash_attn_kwargs,
632
+ )
633
+
634
+ hidden_states = layer_outputs[0]
635
+
636
+ if output_attentions:
637
+ all_self_attns += (layer_outputs[1], )
638
+
639
+ # pdb.set_trace()
640
+ # add hidden states from the last decoder layer
641
+ if output_hidden_states:
642
+ all_hidden_states += (hidden_states, )
643
+
644
+ hidden_states = self.norm(
645
+ hidden_states) # bs * max_chunk_num * hidden_size 所有chunk的hidden
646
+
647
+ next_cache = next_decoder_cache if use_cache else None # DynamicCache()
648
+
649
+
650
+ # 算长度预测loss
651
+ self.length_predictor = self.length_predictor.to(
652
+ hidden_states.device).to(hidden_states.dtype) #这里强行变成了bf16,因为训练是这个
653
+ length_logits = self.length_predictor(
654
+ hidden_states.to(
655
+ hidden_states.device)) # bs * length * chunk_size_limit
656
+ # pdb.set_trace()
657
+
658
+ nar_hidden_states = None
659
+ if inference is None:
660
+ # NAR decoder
661
+ bs, length, hidden_size = hidden_states.size()
662
+ assert length == max_chunk_num
663
+
664
+
665
+
666
+ nat_input_embeddings, nat_attention_mask = self.repeat_with_limit_and_pad_chunk_ar(
667
+ hidden_states, inputs_embeds, accumu_num, length_ground_truth, self.chunk_size_limit, skip_val=-100)
668
+ # pdb.set_trace()
669
+
670
+
671
+ if self.mlp:
672
+ nat_input_embeddings = self.linear_projection(nat_input_embeddings)
673
+
674
+
675
+
676
+
677
+ # 处理attention
678
+ mask_nat_attention_mask = self.nat_prepare_4d_full_attention_mask_with_causal(
679
+ attention_mask=nat_attention_mask,
680
+ dtype=nat_attention_mask.dtype,
681
+ device=nat_attention_mask.device)
682
+
683
+ # pdb.set_trace()
684
+
685
+ self.decoder = self.decoder.to(dtype=nat_input_embeddings.dtype)
686
+ if self.position_embedding_type == "relative":
687
+ nar_chunk_position = torch.arange(
688
+ 0, self.chunk_size_limit).unsqueeze(0).repeat(
689
+ accumu_num,
690
+ 1).to(hidden_states.device)
691
+ pos = self.rotary_emb(nat_attention_mask, nar_chunk_position)
692
+
693
+ elif self.position_embedding_type == "absolute":
694
+ nat_input_embeddings = self.pos_encoder(nat_input_embeddings) # 加上绝对位置编码
695
+ pos = None
696
+
697
+ # pdb.set_trace()
698
+ nar_hidden_states = self.decoder(
699
+ nat_input_embeddings,
700
+ attention_mask=mask_nat_attention_mask, #改下padding mask
701
+ # attention_mask=None,
702
+ position_embeddings=pos,
703
+ output_attentions=output_attentions,
704
+ use_cache=use_cache,
705
+ cache_position=None,
706
+ )
707
+ nar_hidden_states = self.norm(
708
+ nar_hidden_states)
709
+ # pdb.set_trace()
710
+
711
+ return ModelOutputWithPastForSemiNAT(
712
+ chunk_hidden_state=hidden_states,
713
+ length_ground_truth=length_ground_truth,
714
+ length_logits=length_logits,
715
+ position_embeddings=position_embeddings,
716
+ nar_hidden_state=nar_hidden_states,
717
+ past_key_values=next_cache,
718
+ hidden_states=all_hidden_states,
719
+ attentions=all_self_attns,
720
+ )
721
+
722
+
723
+ def repeat_with_limit_and_pad_chunk_ar(self, x: torch.Tensor, inputs_embeds: torch.Tensor, accumu_num: int, repeat_counts: torch.Tensor, chunk_limit: int, skip_val: int = -100):
724
+ bs, length, hidden_size = x.size()
725
+ # pdb.set_trace()
726
+ acc_num = 0
727
+ out_tensor = torch.zeros(accumu_num, chunk_limit, hidden_size).to(x.device).to(x.dtype)
728
+ for b in range(bs):
729
+ start = repeat_counts[b, 0].item()
730
+ for i in range(1,length):
731
+ if repeat_counts[b, i] != -100:
732
+ out_tensor[acc_num,0,:] = x[b,i-1,:]
733
+ end = min(repeat_counts[b, i], chunk_limit-1)
734
+ out_tensor[acc_num, 1:end+1, :] = inputs_embeds[b, start:start+end, :]
735
+ acc_num += 1
736
+ start += repeat_counts[b, i]
737
+ else:
738
+ break
739
+ # pdb.set_trace()
740
+ assert acc_num == accumu_num
741
+
742
+ # Compute mask: shape (accumu_num, chunk_limit)
743
+ mask = (out_tensor.abs().sum(dim=-1) != 0).to(torch.bfloat16)
744
+ return out_tensor, mask
745
+
746
+
747
+ def build_slice_matrix(self, input_ids, slice_pos: torch.Tensor):
748
+ bs, num_slices = slice_pos.shape
749
+ seq_len = input_ids.size(1)
750
+
751
+ # 替换 -1 为 0 用于 prev 计算
752
+ slice_pos_clipped = slice_pos.clone()
753
+ slice_pos_clipped[slice_pos_clipped == -1] = 0
754
+
755
+ # prevs (a) 和 currents (b)
756
+ prevs = torch.cat([
757
+ torch.zeros((bs, 1), device=slice_pos.device, dtype=slice_pos.dtype),
758
+ slice_pos_clipped[:, :-1] + 1
759
+ ], dim=1)
760
+ currents = slice_pos_clipped + 1
761
+
762
+ # valid mask
763
+ valid_mask = (slice_pos != -1)
764
+ lengths = currents - prevs # (bs, num_slices)
765
+ lengths[lengths <= 0] = -100 # invalid values
766
+
767
+ # 每行非 -100 元素个数
768
+ slice_num = (lengths != -100).sum(dim=1).tolist()
769
+
770
+ # chunk mask
771
+ chunk_mask = torch.zeros_like(lengths, dtype=torch.long)
772
+ for i in range(lengths.size(0)):
773
+ chunk_mask[i, :slice_num[i]] = 1
774
+ values = torch.zeros_like(lengths, dtype=torch.float)
775
+ values[valid_mask] = 1.0 / lengths[valid_mask]
776
+
777
+ chunk_nums = valid_mask.sum(dim=1)
778
+ max_chunk_num = chunk_nums.max().item()
779
+
780
+ # 初始化输出矩阵 M
781
+ M = torch.zeros((bs, max_chunk_num, seq_len), device=slice_pos.device)
782
+
783
+ # 初始化 attention mask (bs, seq_len, seq_len),默认全部 mask 掉(True)
784
+ # attn_mask = torch.zeros((bs, 1, seq_len, seq_len), dtype=torch.bool, device=slice_pos.device)
785
+
786
+ # 让padding看见自己
787
+ attn_mask = torch.eye(seq_len, dtype=torch.bool, device=slice_pos.device) # shape [seq_len, seq_len]
788
+ attn_mask = attn_mask.unsqueeze(0).unsqueeze(0).expand(bs, 1, seq_len, seq_len) # [bs, 1, seq_len, seq_len]
789
+
790
+
791
+ # 遍历填充 M 和 attention mask
792
+ for b in range(bs):
793
+ a_b = prevs[b]
794
+ b_b = currents[b]
795
+ v_b = values[b]
796
+
797
+ for i in range(num_slices):
798
+ if not valid_mask[b, i]:
799
+ continue
800
+ a = a_b[i].item()
801
+ b_ = b_b[i].item()
802
+ if b_ > a:
803
+ # 填充 chunk average matrix
804
+ M[b, i, a:b_] = v_b[i]
805
+ # 更新 attention mask,chunk 内不 mask(False)
806
+ attn_mask[b, :, a:b_, a:b_] = True
807
+ # pdb.set_trace()
808
+ return M, attn_mask, lengths, chunk_mask, slice_num
809
+
810
+
811
+ def nat_prepare_4d_full_attention_mask_with_causal(
812
+ self,
813
+ attention_mask: torch.Tensor, # (bs, L) 1=real, 0=pad
814
+ dtype: torch.dtype,
815
+ device: torch.device,
816
+ mask_val: float = -1e4,
817
+ ) -> torch.Tensor:
818
+ """
819
+ - 对于 query 为有效 token (attention_mask==1) 的行:
820
+ 仅允许观看 key 也是有效 token 且满足因果关系 (j <= i)
821
+ - 对于 query 为 padding 的行:
822
+ 采用因果下三角 (j <= i),同样仅允许观看有效 key,避免整行 -inf
823
+ 返回 shape = (bs, 1, L, L) 的 additive mask
824
+ """
825
+ if attention_mask.dim() != 2:
826
+ raise ValueError("Expected 2-D attention_mask with shape (batch, seq_len)")
827
+
828
+ bs, L = attention_mask.shape
829
+ attn_mask_f = attention_mask.to(device=device, dtype=torch.float32)
830
+
831
+ # 下三角因果mask
832
+ lower_tri = torch.tril(torch.ones(L, L, device=device))
833
+
834
+ # 有效token的因果可见性
835
+ valid_query = attn_mask_f[:, :, None] # (bs, L, 1)
836
+ valid_key = attn_mask_f[:, None, :] # (bs, 1, L)
837
+ valid_causal = valid_query * lower_tri * valid_key # (bs, L, L)
838
+
839
+ # padding行的因果可见性
840
+ query_is_pad = (1.0 - attn_mask_f)[:, :, None]
841
+ pad_causal = query_is_pad * lower_tri * valid_key # (bs, L, L)
842
+
843
+ # 合并
844
+ visible = torch.clamp(valid_causal + pad_causal, 0.0, 1.0)
845
+
846
+ additive_mask = (1.0 - visible) * mask_val
847
+ additive_mask = additive_mask[:, None, :, :] # (bs,1,L,L)
848
+
849
+ return additive_mask.to(dtype=dtype)
850
+
851
+
852
+
853
+ def compute_chunk_lengths(slice_pos: torch.Tensor, pad_value: int = -100):
854
+ """
855
+ Args:
856
+ slice_pos: [B, L] 切分点,表示当前位置的 token 后面切一刀,-1 表示 padding
857
+ Returns:
858
+ length_gt: [B, max_chunk_num], 每个 chunk 的长度,不足部分填 pad_value
859
+ """
860
+ B, L = slice_pos.shape
861
+ device = slice_pos.device
862
+
863
+ length_ground_truth = []
864
+
865
+ for b in range(B):
866
+ pos = slice_pos[b]
867
+ pos = pos[pos != -1] + 1 # 获取有效切分点并 +1(实际切在后面)
868
+ cuts = torch.cat([
869
+ torch.tensor([0], device=device), # 起始点
870
+ pos,
871
+ ])
872
+ lens = cuts[1:] - cuts[:-1] # 计算每段长度
873
+
874
+ # 补齐到 max_chunk_num(L)
875
+ padded = torch.full((L,), pad_value, device=device, dtype=torch.long)
876
+ padded[:lens.shape[0]] = lens
877
+ length_ground_truth.append(padded)
878
+
879
+ return torch.stack(length_ground_truth) # [B, L]
880
+
881
+
882
+
883
+
884
+ class Olmo2ForCausalLMForSemiNAT(Olmo2ForCausalLM):
885
+
886
+ def __init__(self, config, *args, **kwargs):
887
+ super().__init__(config, *args, **kwargs)
888
+ self.pos_encoder = AbsolutePositionalEncoding(config.hidden_size)
889
+ self.config = config
890
+ self.padding_idx = config.pad_token_id
891
+ self.vocab_size = config.vocab_size
892
+
893
+ self.chunk_size_limit = config.chunk_size_limit
894
+ self.model = Olmo2ModelForSemiNAT(config)
895
+ self.vocab_size = config.vocab_size
896
+ self.lm_head = nn.Linear(config.hidden_size,
897
+ config.vocab_size,
898
+ bias=False)
899
+
900
+ # Initialize weights and apply final processing
901
+ self.post_init()
902
+
903
+ def forward(
904
+ self,
905
+ input_ids: torch.LongTensor = None,
906
+ attention_mask: Optional[torch.Tensor] = None,
907
+ position_ids: Optional[torch.LongTensor] = None,
908
+ slice_pos: Optional[torch.Tensor] = None,
909
+ slice_label: Optional[torch.Tensor] = None,
910
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
911
+ inputs_embeds: Optional[torch.FloatTensor] = None,
912
+ labels: Optional[torch.LongTensor] = None,
913
+ use_cache: Optional[bool] = None,
914
+ output_attentions: Optional[bool] = None,
915
+ output_hidden_states: Optional[bool] = None,
916
+ return_dict: Optional[bool] = None,
917
+ cache_position: Optional[torch.LongTensor] = None,
918
+ logits_to_keep: Union[int, torch.Tensor] = 0,
919
+ is_prefill: Optional[bool] = False,
920
+ # padding: Optional[torch.Tensor] = None,
921
+ **kwargs: Unpack[KwargsForCausalLM],
922
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
923
+
924
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
925
+ output_hidden_states = (output_hidden_states
926
+ if output_hidden_states is not None else
927
+ self.config.output_hidden_states)
928
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
929
+
930
+ # pdb.set_trace()
931
+
932
+ # start = time.time()
933
+
934
+
935
+ if labels is not None:
936
+ outputs = self.model(
937
+ input_ids=input_ids, # bs * length
938
+ attention_mask=attention_mask, # bs * length
939
+ position_ids=position_ids,
940
+ slice_pos=slice_pos,
941
+ past_key_values=past_key_values,
942
+ inputs_embeds=inputs_embeds,
943
+ use_cache=use_cache,
944
+ output_attentions=output_attentions,
945
+ output_hidden_states=output_hidden_states,
946
+ return_dict=return_dict,
947
+ cache_position=cache_position,
948
+ padding=self.padding_idx,
949
+ is_prefill=is_prefill,
950
+ **kwargs,
951
+ )
952
+ else:
953
+ outputs = self.model(
954
+ input_ids=input_ids, # bs * length
955
+ attention_mask=attention_mask, # bs * length
956
+ position_ids=position_ids,
957
+ slice_pos=slice_pos,
958
+ past_key_values=past_key_values,
959
+ inputs_embeds=inputs_embeds,
960
+ use_cache=use_cache,
961
+ output_attentions=output_attentions,
962
+ output_hidden_states=output_hidden_states,
963
+ return_dict=return_dict,
964
+ cache_position=cache_position,
965
+ padding=self.padding_idx,
966
+ inference=True,
967
+ is_prefill=is_prefill,
968
+ )
969
+
970
+ # pdb.set_trace()
971
+
972
+ chunk_hidden_states = outputs.chunk_hidden_state
973
+ bs, length, hidden_size = chunk_hidden_states.size()
974
+
975
+
976
+ ############################# loss 计算,分两部分 #############################
977
+ loss = None
978
+ loss1 = None
979
+ loss2 = None
980
+ ############################# 首先, 接上mlp,预测长度的loss,维度是10#############################
981
+
982
+ if labels is not None:
983
+ length_ground_truth = outputs.length_ground_truth
984
+ length_logits = outputs.length_logits
985
+
986
+ new_length_ground_truth = torch.where(
987
+ length_ground_truth != -100,
988
+ length_ground_truth - 1,
989
+ length_ground_truth
990
+ )
991
+
992
+ shift_length_logits = length_logits[:, :-1, :]
993
+ shift_new_length_ground_truth = new_length_ground_truth[:, 1:]
994
+
995
+ logits_flat = shift_length_logits.reshape(-1, self.chunk_size_limit)
996
+ labels_flat = shift_new_length_ground_truth.reshape(-1)
997
+
998
+ shift_slice_label = slice_label[:, 1:length_logits.size(1)]
999
+ slice_label_flat = shift_slice_label.reshape(-1)
1000
+ mask = (slice_label_flat == -1)
1001
+ labels_flat[mask] = -100
1002
+
1003
+ length_loss_type = getattr(self.config, "length_loss_type", "ce")
1004
+ if length_loss_type == "mse":
1005
+ logits_softmax = torch.nn.functional.softmax(logits_flat, dim=-1)
1006
+ predicted_lengths = torch.sum(
1007
+ logits_softmax * torch.arange(self.chunk_size_limit).to(
1008
+ chunk_hidden_states.device).to(chunk_hidden_states.dtype),
1009
+ dim=1
1010
+ )
1011
+ loss1 = torch.mean((predicted_lengths[labels_flat != -100] -
1012
+ labels_flat[labels_flat != -100].float()) ** 2)
1013
+ elif length_loss_type == "ce": # cross entropy
1014
+ loss1 = F.cross_entropy(
1015
+ logits_flat[labels_flat != -100],
1016
+ labels_flat[labels_flat != -100]
1017
+ )
1018
+
1019
+ # pdb.set_trace()
1020
+
1021
+ nar_hidden_state = outputs.nar_hidden_state
1022
+
1023
+ ############################# 其次,用chunk的hidden recover所有token,跟gt计算loss #############################
1024
+
1025
+ nar_labels = torch.full(
1026
+ (nar_hidden_state.size(0), nar_hidden_state.size(1)),
1027
+ -100).to(nar_hidden_state.device) # bs * length
1028
+
1029
+ nar_labels = self.update_nar_labels(nar_labels, labels, slice_pos,
1030
+ length_ground_truth, input_ids,
1031
+ self.chunk_size_limit)
1032
+
1033
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
1034
+ slice_indices = slice(-logits_to_keep, None) if isinstance(
1035
+ logits_to_keep, int) else logits_to_keep
1036
+ logits = self.lm_head(
1037
+ nar_hidden_state[:, slice_indices, :]) # 1* seq_len * 50304
1038
+ # logits = logits.float()
1039
+ # pdb.set_trace()
1040
+ # if labels is not None:
1041
+
1042
+
1043
+ loss2 = self.loss_function_seminat(
1044
+ logits,
1045
+ nar_labels,
1046
+ self.vocab_size,
1047
+ )
1048
+
1049
+ # grad1 = torch.autograd.grad(loss1, outputs.chunk_hidden_state, retain_graph=True)[0]
1050
+ # grad2 = torch.autograd.grad(loss2, outputs.chunk_hidden_state, retain_graph=True)[0]
1051
+ # cos_sim = cosine_similarity(grad1.flatten(), grad2.flatten(), dim=0)
1052
+
1053
+ pdb.set_trace()
1054
+
1055
+ else: # for inference
1056
+ softmaxed = torch.softmax(outputs.length_logits[:, -1, :], dim=-1)
1057
+ length = torch.argmax(softmaxed, dim=-1).item() + 1
1058
+
1059
+
1060
+ # nat_input_embeddings = torch.zeros(
1061
+ # 1, self.chunk_size_limit, hidden_size).to(input_ids.device).to(
1062
+ # outputs.chunk_hidden_state.dtype)
1063
+
1064
+ # nat_input_embeddings = torch.zeros(
1065
+ # 1, length, hidden_size).to(input_ids.device).to(
1066
+ # outputs.chunk_hidden_state.dtype)
1067
+ # nat_attention_mask = torch.zeros(1, self.chunk_size_limit).to(
1068
+ # input_ids.device).to(outputs.chunk_hidden_state.dtype)
1069
+
1070
+
1071
+ # nat_input_embeddings[:, : length, :] = outputs.chunk_hidden_state[:, -1, :].expand(
1072
+ # length, -1).to(input_ids.device).to(
1073
+ # outputs.chunk_hidden_state.dtype)
1074
+ nat_input_embeddings = outputs.chunk_hidden_state[:, -1:, :]
1075
+ # pdb.set_trace()
1076
+ if self.config.mlp:
1077
+ nat_input_embeddings = self.linear_projection(nat_input_embeddings)
1078
+
1079
+ # nat_attention_mask[:, :length] = torch.tensor([1] * length).to(
1080
+ # input_ids.device).to(outputs.chunk_hidden_state.dtype)
1081
+
1082
+ # nar_chunk_position = torch.arange(
1083
+ # 0, self.chunk_size_limit).unsqueeze(0).to(input_ids.device).to(
1084
+ # outputs.chunk_hidden_state.dtype) # bs * max_chunk_num
1085
+ # nar_position_embeddings = self.pos_encoder(nat_attention_mask,
1086
+ # nar_chunk_position)
1087
+
1088
+ # pdb.set_trace()
1089
+ nat_input_embeddings = self.pos_encoder(nat_input_embeddings,0) # 加上绝对位置编码
1090
+
1091
+ # pdb.set_trace()
1092
+
1093
+ all_tokens = []
1094
+ all_logits = []
1095
+ for i in range(length):
1096
+ nar_hidden_states = self.model.decoder(
1097
+ nat_input_embeddings,
1098
+ # attention_mask=nat_attention_mask,
1099
+ attention_mask=None,
1100
+ # position_embeddings=nar_position_embeddings,
1101
+ position_embeddings=None,
1102
+ output_attentions=output_attentions,
1103
+ use_cache=False,
1104
+ cache_position=None,
1105
+ )
1106
+ nar_hidden_states = self.model.norm(nar_hidden_states)
1107
+ logits = self.lm_head(nar_hidden_states[:, :, :])
1108
+ next_token_logits = logits[:, -1:, :].clone().float().to(logits.device)
1109
+ # pdb.set_trace()
1110
+ next_tokens = torch.argmax(
1111
+ next_token_logits,
1112
+ dim=-1)
1113
+ all_tokens.append(next_tokens)
1114
+ all_logits.append(logits)
1115
+ nat_input_embeddings = torch.cat([nat_input_embeddings, self.pos_encoder(self.model.embed_tokens(next_tokens), i+1)], dim=1)
1116
+ # pdb.set_trace()
1117
+ nat_new_tokens = torch.cat(all_tokens, dim=1)
1118
+ nat_logits = torch.cat(all_logits, dim=1)
1119
+
1120
+ # pdb.set_trace()
1121
+
1122
+
1123
+ # slice_indices = slice(-logits_to_keep, None) if isinstance(
1124
+ # logits_to_keep, int) else logits_to_keep
1125
+
1126
+
1127
+
1128
+ # pdb.set_trace()
1129
+ return CausalLMOutputWithPastForSemiNAT(
1130
+ loss=(loss1, loss2),
1131
+ logits=nat_logits,
1132
+ nat_new_tokens=nat_new_tokens,
1133
+ past_key_values=outputs.past_key_values,
1134
+ hidden_states=outputs.hidden_states,
1135
+ attentions=outputs.attentions,
1136
+ )
1137
+
1138
+ ############################# loss 计算,分两部分 #############################
1139
+
1140
+ # if not return_dict:
1141
+ # output = (logits, ) + outputs[1:]
1142
+ # if output_router_logits:
1143
+ # output = (aux_loss, ) + output
1144
+ # return (loss, ) + output if loss is not None else output
1145
+ # pdb.set_trace()
1146
+ return CausalLMOutputWithPast(
1147
+ loss=(loss1, loss2),
1148
+ logits=logits,
1149
+ past_key_values=outputs.past_key_values,
1150
+ hidden_states=outputs.hidden_states,
1151
+ attentions=outputs.attentions,
1152
+ )
1153
+
1154
+
1155
+
1156
+
1157
+
1158
+
1159
+ def update_nar_labels(self, nar_labels, labels, slice_pos,
1160
+ length_ground_truth, input_ids, chunk_size_limit):
1161
+ bs, length = input_ids.size()
1162
+ chunk = 0
1163
+ for b in range(bs):
1164
+ last_cut = slice_pos[b][0] #第一次切分位置
1165
+ for i in range(1, length):
1166
+ if slice_pos[b, i] != -1:
1167
+ # pdb.set_trace()
1168
+ try:
1169
+ nar_labels[chunk, :length_ground_truth[b, i]] = labels[
1170
+ b, last_cut + 1:slice_pos[b, i] + 1]
1171
+ except:
1172
+ pdb.set_trace()
1173
+ last_cut = slice_pos[b, i]
1174
+ chunk += 1
1175
+ else:
1176
+ break
1177
+ # pdb.set_trace()
1178
+ return nar_labels
1179
+
1180
+ def fixed_cross_entropy(self,
1181
+ source,
1182
+ target,
1183
+ num_items_in_batch: int = None,
1184
+ ignore_index: int = -100,
1185
+ **kwargs):
1186
+ reduction = "sum" if num_items_in_batch is not None else "mean"
1187
+ loss = F.cross_entropy(source,
1188
+ target,
1189
+ ignore_index=ignore_index,
1190
+ reduction=reduction)
1191
+ if torch.isnan(loss):
1192
+ # print(f"Step {global_step}: loss is NaN, entering pdb …")
1193
+ pdb.set_trace()
1194
+ # pdb.set_trace()
1195
+ if reduction == "sum":
1196
+ loss = loss / num_items_in_batch
1197
+ return loss
1198
+
1199
+ def loss_function_seminat(self,
1200
+ logits,
1201
+ labels,
1202
+ vocab_size: int,
1203
+ num_items_in_batch: int = None,
1204
+ ignore_index: int = -100,
1205
+ **kwargs):
1206
+ # logits: (B, L, V)
1207
+ # labels: (B, L)
1208
+
1209
+
1210
+ logits = logits.float()
1211
+ labels = labels.to(logits.device)
1212
+
1213
+ # Flatten the tokens (无 shift)
1214
+ logits = logits.view(-1, vocab_size) # (B*L, V)
1215
+ labels = labels.view(-1) # (B*L)
1216
+
1217
+ # Ensure device alignment
1218
+ labels = labels.to(logits.device)
1219
+
1220
+ # Compute loss
1221
+ loss = self.fixed_cross_entropy(logits, labels, num_items_in_batch,
1222
+ ignore_index, **kwargs)
1223
+ return loss
1224
+
1225
+ def generate(
1226
+ self,
1227
+ inputs: Optional[torch.Tensor] = None,
1228
+ generation_config: Optional[GenerationConfig] = None,
1229
+ logits_processor: Optional[LogitsProcessorList] = None,
1230
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
1231
+ prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor],
1232
+ List[int]]] = None,
1233
+ synced_gpus: Optional[bool] = None,
1234
+ assistant_model: Optional["PreTrainedModel"] = None,
1235
+ streamer: Optional["BaseStreamer"] = None,
1236
+ negative_prompt_ids: Optional[torch.Tensor] = None,
1237
+ negative_prompt_attention_mask: Optional[torch.Tensor] = None,
1238
+ prefilling_length: int = 0,
1239
+ **kwargs,
1240
+ ) -> Union[GenerateOutput, torch.LongTensor]:
1241
+
1242
+ # 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call
1243
+ self._validate_model_class() #能进行generate的模型
1244
+ tokenizer = kwargs.pop(
1245
+ "tokenizer",
1246
+ None) # Pull this out first, we only use it for stopping criteria
1247
+ assistant_tokenizer = kwargs.pop(
1248
+ "assistant_tokenizer", None) # only used for assisted generation
1249
+
1250
+ # pdb.set_trace()
1251
+ generation_config, model_kwargs = self._prepare_generation_config(
1252
+ generation_config, **kwargs)
1253
+
1254
+ # GenerationConfig {
1255
+ # "eos_token_id": 50279,
1256
+ # "max_length": 2048,
1257
+ # "pad_token_id": 1
1258
+ # }
1259
+
1260
+ # model_kwargs: {input_ids: , attention_mask:}
1261
+
1262
+
1263
+ # pdb.set_trace()
1264
+ self._validate_model_kwargs(model_kwargs.copy()) # 检查模型参数是否正确
1265
+ self._validate_assistant(assistant_model, tokenizer,
1266
+ assistant_tokenizer) # 没用
1267
+
1268
+ # 2. Set generation parameters if not already defined
1269
+ # 判断是否在多GPU环境下同步生成(如DeepSpeed ZeRO-3或FSDP)
1270
+ if synced_gpus is None:
1271
+ synced_gpus = (
1272
+ is_deepspeed_zero3_enabled()
1273
+ or is_fsdp_managed_module(self)) and dist.get_world_size() > 1
1274
+
1275
+ # 初始化logits处理器和停止条件
1276
+ logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList(
1277
+ ) # 定义对模型输出logits的修改规则(如禁止重复词、强制特定token等)。logits后处理
1278
+ stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList(
1279
+ ) # 定义生成停止条件(如达到最大长度、检测到终止符等)。
1280
+
1281
+ accepts_attention_mask = "attention_mask" in set(
1282
+ inspect.signature(self.forward).parameters.keys()) # True,是否有attention mask
1283
+ requires_attention_mask = "encoder_outputs" not in model_kwargs # True,是否需要mask
1284
+ kwargs_has_attention_mask = model_kwargs.get("attention_mask",
1285
+ None) is not None # True,传没传mask
1286
+
1287
+ # pdb.set_trace()
1288
+
1289
+ # 3. Define model inputs
1290
+ inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(
1291
+ inputs, generation_config.bos_token_id, model_kwargs)
1292
+ # 处理成input_ids
1293
+
1294
+ batch_size = inputs_tensor.shape[0] # batch decoding
1295
+
1296
+ # inputs_tensor bs * input_length; model_input_name:"input_ids";model_kwargs: attention_mask
1297
+
1298
+ device = inputs_tensor.device
1299
+ self._prepare_special_tokens(generation_config,
1300
+ kwargs_has_attention_mask,
1301
+ device=device) # 在生成开始前,把所有特殊 token 参数规范化成张量形式、补全缺失值、做必要的警告检查,并存到 generation_config 里,保证后续生成逻辑在各种模型类型(decoder-only / encoder-decoder)下都能正常工作。
1302
+
1303
+ # decoder-only models must use left-padding for batched generation.
1304
+ # batch generation用的
1305
+ if not self.config.is_encoder_decoder and not is_torchdynamo_compiling(
1306
+ ):
1307
+ # If `input_ids` was given, check if the last id in any sequence is `pad_token_id`
1308
+ # Note: If using, `inputs_embeds` this check does not work, because we want to be more hands-off.
1309
+ if (generation_config._pad_token_tensor is not None
1310
+ and batch_size > 1 and len(inputs_tensor.shape) == 2
1311
+ and torch.sum(inputs_tensor[:, -1] ==
1312
+ generation_config._pad_token_tensor) > 0):
1313
+ logger.warning(
1314
+ "A decoder-only architecture is being used, but right-padding was detected! For correct "
1315
+ "generation results, please set `padding_side='left'` when initializing the tokenizer."
1316
+ )
1317
+ # pdb.set_trace()
1318
+
1319
+
1320
+ # 4. Define other model kwargs
1321
+ # decoder-only models with inputs_embeds forwarding must use caching (otherwise we can't detect whether we are
1322
+ # generating the first new token or not, and we only want to use the embeddings for the first new token)
1323
+ if not self.config.is_encoder_decoder and model_input_name == "inputs_embeds":
1324
+ generation_config.use_cache = True
1325
+ # 生成第一个新token时需要依赖缓存判断是否处于生成阶段,后续token生成依赖缓存加速。
1326
+
1327
+ # 生成attention mask(没有的话,一般都有的
1328
+ if not kwargs_has_attention_mask and requires_attention_mask and accepts_attention_mask:
1329
+ model_kwargs[
1330
+ "attention_mask"] = self._prepare_attention_mask_for_generation(
1331
+ inputs_tensor, generation_config, model_kwargs)
1332
+
1333
+ # 输入了attention,检查一下对不对
1334
+ elif kwargs_has_attention_mask:
1335
+ # TODO (joao): generalize this check with other types of inputs
1336
+ if model_input_name == "input_ids" and len(
1337
+ model_kwargs["attention_mask"].shape) > 2:
1338
+ raise ValueError(
1339
+ "`attention_mask` passed to `generate` must be 2D.")
1340
+
1341
+ # encoder-decoder model设定
1342
+ if self.config.is_encoder_decoder and "encoder_outputs" not in model_kwargs:
1343
+ # if model is encoder decoder encoder_outputs are created and added to `model_kwargs`
1344
+ model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(
1345
+ inputs_tensor, model_kwargs, model_input_name,
1346
+ generation_config)
1347
+
1348
+ # pdb.set_trace()
1349
+
1350
+ # 5. Prepare `input_ids` which will be used for auto-regressive generation
1351
+ # encoder-decoder model
1352
+ if self.config.is_encoder_decoder:
1353
+ input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation(
1354
+ batch_size=batch_size,
1355
+ model_input_name=model_input_name,
1356
+ model_kwargs=model_kwargs,
1357
+ decoder_start_token_id=generation_config.
1358
+ _decoder_start_token_tensor,
1359
+ device=inputs_tensor.device,
1360
+ )
1361
+ else:
1362
+ input_ids = inputs_tensor if model_input_name == "input_ids" else model_kwargs.pop(
1363
+ "input_ids") # torch.Size([1, 25]) # torch.Size([1, 25])
1364
+
1365
+ # 修复不完整的token
1366
+ if generation_config.token_healing:
1367
+ input_ids = self.heal_tokens(input_ids, tokenizer)
1368
+
1369
+ # 流式输出
1370
+ if streamer is not None:
1371
+ streamer.put(input_ids.cpu())
1372
+
1373
+ # pdb.set_trace()
1374
+
1375
+ # 6. Prepare `max_length` depending on other stopping criteria.
1376
+ input_ids_length = input_ids.shape[-1]
1377
+ has_default_max_length = kwargs.get(
1378
+ "max_length") is None and generation_config.max_length is not None
1379
+ has_default_min_length = kwargs.get(
1380
+ "min_length") is None and generation_config.min_length is not None
1381
+ # min_length是0
1382
+
1383
+ # 生成的一些config
1384
+ generation_config = self._prepare_generated_length(
1385
+ generation_config=generation_config,
1386
+ has_default_max_length=has_default_max_length,
1387
+ has_default_min_length=has_default_min_length,
1388
+ model_input_name=model_input_name, # "input_ids"
1389
+ inputs_tensor=inputs_tensor,
1390
+ input_ids_length=input_ids_length, #输入长度
1391
+ )
1392
+
1393
+ # If the model supports `logits_to_keep` in forward(), set it to 1 to avoid computing the whole
1394
+ # logit matrix. This can save a lot of memory during the first forward pass. Note that assisted decoding
1395
+ # dynamically overrides this value as it can need more than the last token logits
1396
+ if self._supports_logits_to_keep(
1397
+ ) and "logits_to_keep" not in model_kwargs:
1398
+ model_kwargs["logits_to_keep"] = 1
1399
+ # 模型在计算时仅保留最后一个 token 的 logits,从而大幅降低内存占用。若使用束搜索宽度为 5,辅助解码会覆盖 logits_to_keep=5,保留多个候选 token 的 logits 以支持多路径探索。
1400
+
1401
+ # 检查生成长度
1402
+ self._validate_generated_length(generation_config, input_ids_length,
1403
+ has_default_max_length)
1404
+
1405
+
1406
+ # pdb.set_trace()
1407
+ # 7. Prepare the cache.
1408
+ # - `model_kwargs` may be updated in place with a cache as defined by the parameters in `generation_config`.
1409
+ # - different models have a different cache name expected by the model (default = "past_key_values")
1410
+ # - `max_length`, prepared above, is used to determine the maximum cache length
1411
+ max_cache_length = generation_config.max_length - 1 #存最长length-1个token cache
1412
+
1413
+ # 如果输入是emb
1414
+ if (inputs_tensor.shape[1] != input_ids_length
1415
+ and model_input_name == "inputs_embeds"
1416
+ and not self.config.is_encoder_decoder):
1417
+ max_cache_length += inputs_tensor.shape[1]
1418
+ self._prepare_cache_for_generation(generation_config, model_kwargs,
1419
+ assistant_model, batch_size,
1420
+ max_cache_length, device)
1421
+
1422
+ # 8. determine generation mode
1423
+ generation_mode = generation_config.get_generation_mode(
1424
+ assistant_model) # 辅助解码
1425
+
1426
+ if streamer is not None and (generation_config.num_beams > 1):
1427
+ raise ValueError(
1428
+ "`streamer` cannot be used with beam search (yet!). Make sure that `num_beams` is set to 1."
1429
+ )
1430
+
1431
+ # device检查
1432
+ if not is_torchdynamo_compiling(
1433
+ ) and self.device.type != input_ids.device.type:
1434
+ warnings.warn(
1435
+ "You are calling .generate() with the `input_ids` being on a device type different"
1436
+ f" than your model's device. `input_ids` is on {input_ids.device.type}, whereas the model"
1437
+ f" is on {self.device.type}. You may experience unexpected behaviors or slower generation."
1438
+ " Please make sure that you have put `input_ids` to the"
1439
+ f" correct device by calling for example input_ids = input_ids.to('{self.device.type}') before"
1440
+ " running `.generate()`.",
1441
+ UserWarning,
1442
+ )
1443
+
1444
+ # pdb.set_trace()
1445
+
1446
+ # 9. prepare logits processors and stopping criteria
1447
+ prepared_logits_processor = self._get_logits_processor(
1448
+ generation_config=generation_config,
1449
+ input_ids_seq_length=input_ids_length,
1450
+ encoder_input_ids=inputs_tensor,
1451
+ prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
1452
+ logits_processor=logits_processor,
1453
+ device=inputs_tensor.device,
1454
+ model_kwargs=model_kwargs,
1455
+ negative_prompt_ids=negative_prompt_ids,
1456
+ negative_prompt_attention_mask=negative_prompt_attention_mask,
1457
+ )# 一些对logits的后处理,比如温度、重复词、屏蔽词等,是个list,可以有多种处理方式。
1458
+
1459
+ prepared_stopping_criteria = self._get_stopping_criteria_for_seminat(
1460
+ generation_config=generation_config,
1461
+ stopping_criteria=stopping_criteria,
1462
+ tokenizer=tokenizer,
1463
+ **kwargs) #加一堆终止条件,同样是个list,有多种条件
1464
+
1465
+ # Set model_kwargs `use_cache` so we can use it later in forward runs
1466
+ model_kwargs["use_cache"] = generation_config.use_cache
1467
+
1468
+ input_ids, model_kwargs = self._expand_inputs_for_generation(
1469
+ input_ids=input_ids,
1470
+ expand_size=generation_config.num_return_sequences, # 1
1471
+ is_encoder_decoder=self.config.is_encoder_decoder, # false
1472
+ **model_kwargs,
1473
+ ) # 如果num_return_sequences>1,则需要扩展输出
1474
+
1475
+
1476
+ # pdb.set_trace()
1477
+ result = self._sampleforseminat(
1478
+ input_ids,
1479
+ logits_processor=prepared_logits_processor, # 用于在每一步生成 token 前,修改模型输出的 logits 分布,常用于控制生成行为、避免重复、强制某些 token 出现或屏蔽某些 token。
1480
+ stopping_criteria=prepared_stopping_criteria,
1481
+ generation_config=generation_config,
1482
+ synced_gpus=synced_gpus, #多gpu
1483
+ streamer=streamer,
1484
+ prefilling_length=prefilling_length,
1485
+ **model_kwargs,
1486
+ )
1487
+
1488
+ # Convert to legacy cache format if requested
1489
+ if (generation_config.return_legacy_cache is True
1490
+ and not is_torchdynamo_compiling()
1491
+ and hasattr(result, "past_key_values") and getattr(
1492
+ result.past_key_values, "to_legacy_cache") is not None):
1493
+ result.past_key_values = result.past_key_values.to_legacy_cache()
1494
+ return result
1495
+
1496
+ def _get_stopping_criteria_for_seminat(
1497
+ self,
1498
+ generation_config: GenerationConfig,
1499
+ stopping_criteria: Optional[StoppingCriteriaList],
1500
+ tokenizer: Optional["PreTrainedTokenizerBase"] = None,
1501
+ **kwargs,
1502
+ ) -> StoppingCriteriaList:
1503
+ criteria = StoppingCriteriaList()
1504
+ if generation_config.max_length is not None:
1505
+ max_position_embeddings = getattr(self.config, "max_position_embeddings", None)
1506
+ criteria.append(
1507
+ MaxLengthCriteria(
1508
+ max_length=generation_config.max_length,
1509
+ max_position_embeddings=max_position_embeddings,
1510
+ )
1511
+ )
1512
+ if generation_config.max_time is not None:
1513
+ criteria.append(MaxTimeCriteria(max_time=generation_config.max_time))
1514
+ if generation_config.stop_strings is not None:
1515
+ if tokenizer is None:
1516
+ raise ValueError(
1517
+ "There are one or more stop strings, either in the arguments to `generate` or in the "
1518
+ "model's generation config, but we could not locate a tokenizer. When generating with "
1519
+ "stop strings, you must pass the model's tokenizer to the `tokenizer` argument of `generate`."
1520
+ )
1521
+ criteria.append(StopStringCriteria(stop_strings=generation_config.stop_strings, tokenizer=tokenizer))
1522
+ if generation_config._eos_token_tensor is not None:
1523
+ criteria.append(EosTokenCriteriaForSemiNAT(eos_token_id=generation_config._eos_token_tensor))
1524
+ if (
1525
+ generation_config.is_assistant
1526
+ and generation_config.assistant_confidence_threshold is not None
1527
+ and generation_config.assistant_confidence_threshold > 0
1528
+ ):
1529
+ criteria.append(
1530
+ ConfidenceCriteria(assistant_confidence_threshold=generation_config.assistant_confidence_threshold)
1531
+ )
1532
+ criteria = self._merge_criteria_processor_list(criteria, stopping_criteria)
1533
+ return criteria
1534
+
1535
+
1536
+ def _sampleforseminat(
1537
+ self,
1538
+ input_ids: torch.LongTensor,
1539
+ logits_processor: LogitsProcessorList,
1540
+ stopping_criteria: StoppingCriteriaList,
1541
+ generation_config: GenerationConfig,
1542
+ synced_gpus: bool,
1543
+ streamer: Optional["BaseStreamer"],
1544
+ prefilling_length: int,
1545
+ **model_kwargs,
1546
+ ) -> Union[GenerateNonBeamOutput, torch.LongTensor]:
1547
+
1548
+ # init values
1549
+ pad_token_id = generation_config._pad_token_tensor # 获取填充token的ID
1550
+ output_attentions = generation_config.output_attentions # 是否输出注意力权重
1551
+ output_hidden_states = generation_config.output_hidden_states # 是否输出隐藏状态
1552
+ output_scores = generation_config.output_scores # 是否输出分数
1553
+ output_logits = generation_config.output_logits # 是否输出原始logits
1554
+ return_dict_in_generate = generation_config.return_dict_in_generate # 是否返回结构化字典
1555
+ max_length = generation_config.max_length # 最大生成长度
1556
+ has_eos_stopping_criteria = any(
1557
+ hasattr(criteria, "eos_token_id")
1558
+ for criteria in stopping_criteria) # 检查停止条件是否包含EOS token
1559
+ do_sample = generation_config.do_sample # 是否使用采样方法
1560
+
1561
+ # 初始化结果收集容器
1562
+ # init attention / hidden states / scores tuples
1563
+ scores = () if (return_dict_in_generate and output_scores) else None #保存每一步经过 logits_processor 处理后的 token 概率分数。
1564
+ raw_logits = () if (return_dict_in_generate
1565
+ and output_logits) else None #保存原始的 logits(没有 softmax 前)。
1566
+ decoder_attentions = () if (return_dict_in_generate
1567
+ and output_attentions) else None # 保存解码器的 自注意力权重(self-attention)。
1568
+ cross_attentions = () if (return_dict_in_generate
1569
+ and output_attentions) else None #只对 encoder-decoder 模型(如 T5、BART)有意义。
1570
+ decoder_hidden_states = () if (return_dict_in_generate
1571
+ and output_hidden_states) else None #保存解码器每一层的隐藏状态。
1572
+
1573
+ # # 编码器-解码器模型特殊处理 不用管
1574
+ # if model is an encoder-decoder, retrieve encoder attention weights and hidden states
1575
+ if return_dict_in_generate and self.config.is_encoder_decoder:
1576
+ encoder_attentions = model_kwargs["encoder_outputs"].get(
1577
+ "attentions") if output_attentions else None
1578
+ encoder_hidden_states = (
1579
+ model_kwargs["encoder_outputs"].get("hidden_states")
1580
+ if output_hidden_states else None)
1581
+
1582
+ # pdb.set_trace()
1583
+
1584
+ # 初始化序列跟踪
1585
+ # keep track of which sequences are already finished
1586
+ batch_size, cur_len = input_ids.shape
1587
+ this_peer_finished = False
1588
+ unfinished_sequences = torch.ones(
1589
+ batch_size, dtype=torch.long,
1590
+ device=input_ids.device) # 初始化未完成序列标记 torch.Size([1]),跟bs有关
1591
+ model_kwargs = self._get_initial_cache_position(
1592
+ input_ids, model_kwargs) # 初始化cache_position,跟输入长度有关
1593
+
1594
+ model_forward = self.__call__ # 获取前向传播函数
1595
+ ############ 换成新的forward
1596
+ # model_forward = self.forward
1597
+ # pdb.set_trace()
1598
+ if isinstance(model_kwargs.get("past_key_values"), Cache):
1599
+ is_compileable = model_kwargs[
1600
+ "past_key_values"].is_compileable and self._supports_static_cache #编译优化
1601
+ is_compileable = is_compileable and not self.generation_config.disable_compile #false,应该不用管
1602
+ if is_compileable and (
1603
+ self.device.type == "cuda"
1604
+ or generation_config.compile_config._compile_all_devices):
1605
+ os.environ["TOKENIZERS_PARALLELISM"] = "0"
1606
+ model_forward = self.get_compiled_call(
1607
+ generation_config.compile_config)
1608
+
1609
+ ############ prefilling ############
1610
+ start = prefilling_length-1
1611
+ chunk_length = prefilling_length
1612
+
1613
+ s_pos = [start]
1614
+ while True:
1615
+ start += chunk_length
1616
+ if start >= input_ids.shape[1] - 1:
1617
+ s_pos.append(input_ids.shape[1] - 1)
1618
+ break
1619
+ else:
1620
+ s_pos.append(start)
1621
+
1622
+ # pdb.set_trace()
1623
+ slice_pos = torch.tensor(s_pos).unsqueeze(0).to(
1624
+ input_ids.device)
1625
+ # slice_pos = torch.tensor(s_pos + [-1] *
1626
+ # (cur_len - len(s_pos))).unsqueeze(0).to(
1627
+ # input_ids.device)
1628
+
1629
+ model_kwargs['slice_pos'] = slice_pos
1630
+ count = (slice_pos != -1).sum().item()
1631
+ # new_cache_position = torch.arange(0, count).to(input_ids.device)
1632
+ # model_kwargs[
1633
+ # 'cache_position'] = new_cache_position # chunk级别
1634
+
1635
+ # pdb.set_trace()
1636
+ ############ prefilling ############
1637
+
1638
+ is_prefill = True
1639
+ while self._has_unfinished_sequences(
1640
+ this_peer_finished,
1641
+ synced_gpus,
1642
+ device=input_ids.device,
1643
+ cur_len=cur_len,
1644
+ max_length=max_length): # 循环知道序列生成完
1645
+ # prepare model inputs
1646
+
1647
+ # pdb.set_trace()
1648
+
1649
+ # model_kwargs.keys(): dict_keys(['attention_mask', 'logits_to_keep', 'past_key_values', 'use_cache', 'cache_position', 'nar_kv_cache', 'slice_pos'])
1650
+ model_inputs = self.prepare_inputs_for_generation( #加入position_id和input_id
1651
+ input_ids, **model_kwargs
1652
+ ) #dict_keys(['cache_position', 'past_key_values', 'input_ids', 'inputs_embeds', 'position_ids', 'attention_mask', 'logits_to_keep', 'use_cache'])
1653
+ # 主要做这样几件事:(1)cache position加入kwargs;(2)past_key_values加入kwarg,剩下的条件不满足一般;(3)对inputids做处理保证步进一致的存储布局;(4)加上position_ids;(5)对position_ids切尾;(6)直接把mask给model_inputs(因为这里是DynamicCache);(7)其余args按原样复制给model_inputs
1654
+
1655
+ # position_ids = torch.arange(
1656
+ # input_ids.shape[1], device=input_ids.device).unsqueeze(0).to(input_ids.device)
1657
+ # model_inputs.update({"position_ids": position_ids})
1658
+
1659
+ model_inputs.update({"input_ids": input_ids})
1660
+
1661
+ # prepare variable output controls (note: some models won't accept all output controls)
1662
+ model_inputs.update({"output_attentions": output_attentions}
1663
+ if output_attentions else {})# 是否输出 注意力权重 (attention weights)。
1664
+ model_inputs.update({"output_hidden_states": output_hidden_states}
1665
+ if output_hidden_states else {})# 控制是否输出 每一层的隐藏状态
1666
+
1667
+ if is_prefill:
1668
+ # pdb.set_trace()
1669
+ # outputs = self(**model_inputs, return_dict=True)
1670
+ # dict_keys(['cache_position', 'past_key_values', 'input_ids', 'inputs_embeds', 'position_ids', 'attention_mask', 'logits_to_keep', 'use_cache'])
1671
+ outputs = self.forward(**model_inputs, return_dict=True, is_prefill=True) #这里position_ids是错的
1672
+ is_prefill = False
1673
+ else:
1674
+ # pdb.set_trace()
1675
+ outputs = model_forward(**model_inputs, return_dict=True, is_prefill=False)
1676
+
1677
+ # pdb.set_trace()
1678
+
1679
+ ################ seminat ###########################
1680
+ # model_kwargs['slice_pos'] = outputs.slice_pos
1681
+ ################ seminat ###########################
1682
+
1683
+ # synced_gpus: don't waste resources running the code we don't need; kwargs must be updated before skipping
1684
+ model_kwargs = self._update_model_kwargs_for_generation_for_seminat(
1685
+ outputs,
1686
+ model_kwargs,
1687
+ is_encoder_decoder=self.config.is_encoder_decoder,
1688
+ num_new_tokens=outputs.nat_new_tokens.size(1))
1689
+ # (1)给cache改名;(2)decoder-only(not is_encoder_decoder):若有 attention_mask,在末尾拼接一列1,注意,如果是多个token这里需要改1的个数,如果是batch decoding,需要判断哪些是0(mask);(3)cache加上新生成的token数
1690
+
1691
+ if synced_gpus and this_peer_finished:
1692
+ continue
1693
+
1694
+ # Clone is needed to avoid keeping a hanging ref to outputs.logits which may be very large for first iteration
1695
+ # (the clone itself is always small)
1696
+
1697
+ # next_token_logits = outputs.logits[:, -1, :].clone().float()
1698
+ # next_token_logits = outputs.logits[:, :, :].clone().float() # 新生成了k个token,这里的k是在forward里处理的,所以这就完全保留了
1699
+
1700
+ # next_token_logits = next_token_logits.to(input_ids.device)
1701
+
1702
+ # # pre-process distribution
1703
+ # next_token_scores = logits_processor(input_ids, next_token_logits) #对新生成的token logits做后处理
1704
+
1705
+ # if return_dict_in_generate: #保留logits啥的,rl的时候有用,在generation_config里设置就行
1706
+ # if output_scores:
1707
+ # scores += (next_token_scores,)
1708
+ # if output_logits:
1709
+ # raw_logits += (next_token_logits,)
1710
+ # if output_attentions:
1711
+ # decoder_attentions += (
1712
+ # (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
1713
+ # )
1714
+ # if self.config.is_encoder_decoder:
1715
+ # cross_attentions += (outputs.cross_attentions,)
1716
+
1717
+ # if output_hidden_states:
1718
+ # decoder_hidden_states += (
1719
+ # (outputs.decoder_hidden_states,)
1720
+ # if self.config.is_encoder_decoder
1721
+ # else (outputs.hidden_states,)
1722
+ # )
1723
+
1724
+ # token selection
1725
+ # if do_sample:
1726
+ # probs = nn.functional.softmax(next_token_scores, dim=-1)
1727
+ # # TODO (joao): this OP throws "skipping cudagraphs due to ['incompatible ops']", find solution
1728
+ # next_tokens = torch.multinomial(probs,
1729
+ # num_samples=1).squeeze(1)
1730
+ # else:
1731
+ # next_tokens = torch.argmax(
1732
+ # next_token_scores,
1733
+ # dim=-1) # tensor([9281], device='cuda:0') token id,greedy情况下直接取最大的就完了,不用softmax
1734
+ next_tokens = outputs.nat_new_tokens
1735
+
1736
+
1737
+ # pdb.set_trace()
1738
+ # 更新slice_pos
1739
+ count = (model_kwargs['slice_pos'] != -1).sum().item()
1740
+ new_slice_pos = model_kwargs['slice_pos'][:, count - 1] + outputs.logits.size(1)
1741
+ model_kwargs['slice_pos'] = torch.cat([model_kwargs['slice_pos'], new_slice_pos.unsqueeze(1)], dim=-1)
1742
+
1743
+ # pdb.set_trace()
1744
+
1745
+ # finished sentences should have their next token be a padding token
1746
+ if has_eos_stopping_criteria:
1747
+ next_tokens = next_tokens * unfinished_sequences + pad_token_id * (
1748
+ 1 - unfinished_sequences
1749
+ ) # batch decoding的时候,如果有些seq生成完了,之后一直补padding,直到所有seq都生成完了
1750
+
1751
+ # pdb.set_trace()
1752
+ # update generated ids, model inputs, and length for next step
1753
+ # input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
1754
+ input_ids = torch.cat([input_ids, next_tokens], dim=-1) # 把新生成的token拼上去
1755
+ if streamer is not None:
1756
+ streamer.put(next_tokens.cpu())
1757
+
1758
+ # 更新完成状态
1759
+ unfinished_sequences = unfinished_sequences & ~stopping_criteria(
1760
+ input_ids, scores, last_k=next_tokens.size(1)) # 更新unfinished_sequences,看看有没有生成完的seq
1761
+ this_peer_finished = unfinished_sequences.max() == 0 # 还有没生成完的seq
1762
+ cur_len += outputs.logits.size(1) # 长度 + k
1763
+
1764
+ # pdb.set_trace()
1765
+
1766
+ # This is needed to properly delete outputs.logits which may be very large for first iteration
1767
+ # Otherwise a reference to outputs is kept which keeps the logits alive in the next iteration
1768
+ del outputs
1769
+
1770
+ if streamer is not None:
1771
+ streamer.end()
1772
+
1773
+ if return_dict_in_generate:
1774
+ if self.config.is_encoder_decoder:
1775
+ return GenerateEncoderDecoderOutput(
1776
+ sequences=input_ids,
1777
+ scores=scores,
1778
+ logits=raw_logits,
1779
+ encoder_attentions=encoder_attentions,
1780
+ encoder_hidden_states=encoder_hidden_states,
1781
+ decoder_attentions=decoder_attentions,
1782
+ cross_attentions=cross_attentions,
1783
+ decoder_hidden_states=decoder_hidden_states,
1784
+ past_key_values=model_kwargs.get("past_key_values"),
1785
+ )
1786
+ else:
1787
+ return GenerateDecoderOnlyOutput(
1788
+ sequences=input_ids,
1789
+ scores=scores,
1790
+ logits=raw_logits,
1791
+ attentions=decoder_attentions,
1792
+ hidden_states=decoder_hidden_states,
1793
+ past_key_values=model_kwargs.get("past_key_values"),
1794
+ )
1795
+ else:
1796
+ return input_ids
1797
+
1798
+ def _update_model_kwargs_for_generation_for_seminat(
1799
+ self,
1800
+ outputs: ModelOutput,
1801
+ model_kwargs: Dict[str, Any],
1802
+ is_encoder_decoder: bool = False,
1803
+ num_new_tokens: int = 1,
1804
+ ) -> Dict[str, Any]:
1805
+ ALL_CACHE_NAMES = [
1806
+ "past_key_values", # default
1807
+ "cache_params", # mamba-based models
1808
+ "state", # rwkv
1809
+ "mems", # xlnet
1810
+ "past_buckets_states", # reformer
1811
+ ]
1812
+ # update past_key_values keeping its naming used in model code
1813
+ for possible_cache_name in ALL_CACHE_NAMES:
1814
+ if possible_cache_name in outputs:
1815
+ # TODO (joao): remove output/input mismatch when these old models (xlnet, reformer) are deprecated
1816
+ if possible_cache_name in ("past_buckets_states", "mems"):
1817
+ cache_name = "past_key_values"
1818
+ else:
1819
+ cache_name = possible_cache_name
1820
+ model_kwargs[cache_name] = getattr(outputs,
1821
+ possible_cache_name)
1822
+ break
1823
+
1824
+ # pdb.set_trace()
1825
+
1826
+ # update token_type_ids with last value
1827
+ # false
1828
+ if "token_type_ids" in model_kwargs:
1829
+ token_type_ids = model_kwargs["token_type_ids"]
1830
+ model_kwargs["token_type_ids"] = torch.cat(
1831
+ [token_type_ids, token_type_ids[:, -1].unsqueeze(-1)], dim=-1)
1832
+
1833
+ if not is_encoder_decoder:
1834
+ # update attention mask
1835
+ # 重点看这个
1836
+ # pdb.set_trace()
1837
+ if "attention_mask" in model_kwargs:
1838
+ attention_mask = model_kwargs["attention_mask"]
1839
+ model_kwargs["attention_mask"] = torch.cat(
1840
+ [
1841
+ attention_mask,
1842
+ attention_mask.new_ones(
1843
+ (attention_mask.shape[0], num_new_tokens
1844
+ )) # 1 -> num_new_tokens 一次加多个token的attention
1845
+ ],
1846
+ dim=-1)
1847
+ else:
1848
+ # update decoder attention mask
1849
+ if "decoder_attention_mask" in model_kwargs:
1850
+ decoder_attention_mask = model_kwargs["decoder_attention_mask"]
1851
+ model_kwargs["decoder_attention_mask"] = torch.cat(
1852
+ [
1853
+ decoder_attention_mask,
1854
+ decoder_attention_mask.new_ones(
1855
+ (decoder_attention_mask.shape[0], 1))
1856
+ ],
1857
+ dim=-1,
1858
+ )
1859
+
1860
+ # pdb.set_trace()
1861
+ if model_kwargs.get("use_cache", True):
1862
+ model_kwargs["cache_position"] = torch.arange(model_kwargs["cache_position"][-1:].item() + 1, model_kwargs["cache_position"][-1:].item() + num_new_tokens + 1, dtype=model_kwargs["cache_position"].dtype).to(model_kwargs["cache_position"].device)
1863
+ # model_kwargs["cache_position"] = torch.tensor([
1864
+ # model_kwargs["cache_position"][-1:].item() + 1
1865
+ # ]).to(model_kwargs["cache_position"].device)
1866
+ else:
1867
+ past_positions = model_kwargs.pop("cache_position")
1868
+ new_positions = torch.arange(
1869
+ past_positions[-1] + 1,
1870
+ past_positions[-1] + num_new_tokens + 1,
1871
+ dtype=past_positions.dtype).to(past_positions.device)
1872
+ model_kwargs["cache_position"] = torch.cat(
1873
+ (past_positions, new_positions))
1874
+ return model_kwargs
1875
+
1876
+ class AbsolutePositionalEncoding(nn.Module):
1877
+ def __init__(self, hidden_size: int, max_len: int = 2048):
1878
+ """
1879
+ 初始化绝对位置编码
1880
+
1881
+ 参数:
1882
+ hidden_size (int): 隐藏层维度
1883
+ max_len (int): 最大序列长度
1884
+ """
1885
+ super().__init__()
1886
+
1887
+ # 创建位置编码矩阵
1888
+ pe = torch.zeros(max_len, hidden_size)
1889
+ position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
1890
+ div_term = torch.exp(torch.arange(0, hidden_size, 2).float() * (-math.log(10000.0) / hidden_size))
1891
+
1892
+ # 使用sin和cos函数计算位置编码
1893
+ pe[:, 0::2] = torch.sin(position * div_term)
1894
+ pe[:, 1::2] = torch.cos(position * div_term)
1895
+ pe = pe.unsqueeze(0) # [1, max_len, hidden_size]
1896
+
1897
+ # 注册为buffer(不参与训练)
1898
+ self.register_buffer('pe', pe)
1899
+
1900
+ def forward(self, x: torch.Tensor, position_ids: Optional[torch.Tensor] = None) -> torch.Tensor:
1901
+ """
1902
+ 添加位置编码到输入张量
1903
+
1904
+ 参数:
1905
+ x (torch.Tensor): 输入张量,形状为 (batch_size, seq_len, hidden_size)
1906
+ position_ids (torch.Tensor, optional): 位置索引,形状为 (batch_size, seq_len),值为0,1,2,...
1907
+
1908
+ 返回:
1909
+ torch.Tensor: 添加位置编码后的张量,形状与输入相同
1910
+ """
1911
+ if position_ids is None:
1912
+ seq_len = x.size(1)
1913
+ pos_emb = self.pe[:, :seq_len]
1914
+ else:
1915
+ pos_emb = self.pe[:, position_ids]
1916
+
1917
+ pos = x + pos_emb
1918
+ return pos