ykzhang721 commited on
Commit
fcc4ce0
·
verified ·
1 Parent(s): f8c941f

Upload modelforseminat_v5.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. modelforseminat_v5.py +50 -62
modelforseminat_v5.py CHANGED
@@ -185,14 +185,16 @@ class TwoLayerMLP(nn.Module):
185
 
186
 
187
  class Olmo2ConfigForSemiNAT(Olmo2Config):
188
- def __init__(self, chunk_size_limit: int = 5, decoder_layers: int = 1, encoder_layer: int = 1, mlp: bool = False, position_embedding_type: str = "absolute", **kwargs):
189
  super().__init__(**kwargs)
190
  self.chunk_size_limit = chunk_size_limit
191
  self.decoder_layers = decoder_layers
192
  self.encoder_layer = encoder_layer
193
  self.mlp = mlp
194
  self.position_embedding_type = position_embedding_type
195
-
 
 
196
 
197
 
198
  class Olmo2AttentionForSemiNAT(nn.Module):
@@ -265,23 +267,26 @@ class Olmo2AttentionForSemiNAT(nn.Module):
265
  key_states, value_states = past_key_value.update(
266
  key_states, value_states, self.layer_idx, cache_kwargs)
267
 
268
- attention_interface: Callable = eager_attention_forward
269
-
270
 
 
271
  # pdb.set_trace()
272
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
273
 
274
- self.config._attn_implementation = "sdpa"
275
- if self.config._attn_implementation != "eager":
276
- if self.config._attn_implementation == "sdpa" and kwargs.get(
277
- "output_attentions", False):
278
- logger.warning_once(
279
- "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
280
- 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
281
- )
282
- else:
283
- attention_interface = ALL_ATTENTION_FUNCTIONS[
284
- self.config._attn_implementation]
285
 
286
  # pdb.set_trace()
287
  attn_output, attn_weights = attention_interface(
@@ -292,6 +297,7 @@ class Olmo2AttentionForSemiNAT(nn.Module):
292
  attention_mask,
293
  dropout=0.0 if not self.training else self.attention_dropout,
294
  scaling=self.scaling,
 
295
  **kwargs,
296
  )
297
  # pdb.set_trace()
@@ -377,7 +383,7 @@ class NATEncoderForSemiNAT(nn.Module):
377
  super().__init__()
378
  self.num_layer = num_layer
379
  self.encoder_layers = nn.ModuleList([
380
- Olmo2DecoderLayerForSemiNAT(config, layer_idx)
381
  for layer_idx in range(self.num_layer)
382
  ])
383
 
@@ -567,7 +573,7 @@ class Olmo2ModelForSemiNAT(Olmo2Model):
567
  length_ground_truth = length_ground_truth[:,:max_chunk_num]
568
  chunk_position_ids = position_ids[:,:max_chunk_num]
569
  chunk_cache_position = cache_position[:max_chunk_num]
570
-
571
  else:
572
 
573
  encoded_input = self.encoder(inputs_embeds[:,position_ids.squeeze(0)],position_embeddings=position_embeddings)
@@ -805,27 +811,25 @@ class Olmo2ModelForSemiNAT(Olmo2Model):
805
  nar_chunk_position = torch.arange(
806
  0, self.chunk_size_limit).unsqueeze(0).repeat(
807
  accumu_num,
808
- 1).to(hidden_states.device)
809
-
810
  pos = self.rotary_emb(nat_attention_mask, nar_chunk_position)
811
 
812
  elif self.position_embedding_type == "absolute":
813
  nat_input_embeddings = self.pos_encoder(nat_input_embeddings) # 加上绝对位置编码
814
  pos = None
815
 
816
-
817
  nar_hidden_states = self.decoder(
818
  nat_input_embeddings,
819
- attention_mask=mask_nat_attention_mask,
820
  # attention_mask=None,
821
  position_embeddings=pos,
822
- # position_embeddings=None, #使��绝对位置,不传相对位置
823
  output_attentions=output_attentions,
824
  use_cache=use_cache,
825
  cache_position=None,
826
  )
827
  nar_hidden_states = self.norm(
828
- nar_hidden_states) # bs * max_chunk_num * hidden_size
829
  # pdb.set_trace()
830
 
831
  return ModelOutputWithPastForSemiNAT(
@@ -1235,53 +1239,37 @@ class Olmo2ForCausalLMForSemiNAT(Olmo2ForCausalLM):
1235
  length_logits = outputs.length_logits
1236
 
1237
  new_length_ground_truth = torch.where(
1238
- length_ground_truth != -100, # 条件:不等于 -100
1239
- length_ground_truth - 1, # 如果条件为真,执行 labels - 1
1240
- length_ground_truth # 否则保持原值
1241
  )
1242
 
1243
- # pdb.set_trace()
1244
-
1245
  shift_length_logits = length_logits[:, :-1, :]
1246
  shift_new_length_ground_truth = new_length_ground_truth[:, 1:]
1247
 
1248
- logits_flat = shift_length_logits.reshape(
1249
- -1,
1250
- self.chunk_size_limit) # 形状变为 [bs * length, chunk_size_limit]
1251
- labels_flat = shift_new_length_ground_truth.reshape(
1252
- -1) # [bs * length]
1253
-
1254
- # softmax logits to get probability
1255
- logits_flat = torch.nn.functional.softmax(logits_flat, dim=-1)
1256
-
1257
- # 修改 loss 为 MSE: 首先根据 logits 加权得到预测长度(注意不是 argmax),之后与 label 计算 MSE
1258
-
1259
- # pdb.set_trace()
1260
- # 计算预测长度
1261
- predicted_lengths = torch.sum(
1262
- logits_flat * torch.arange(self.chunk_size_limit).to(
1263
- chunk_hidden_states.device).to(chunk_hidden_states.dtype),
1264
- dim=1)
1265
- # 计算预测长度与真实长度之间的均方误差
1266
-
1267
-
1268
-
1269
 
1270
- shift_slice_label = slice_label[:, 1:length_logits.size(1)] #用最大chunk数阶段
1271
  slice_label_flat = shift_slice_label.reshape(-1)
1272
-
1273
- # 对应 labels_flat 的 global indices
1274
- indices = torch.arange(0, labels_flat.size(0), device=labels_flat.device)
1275
  mask = (slice_label_flat == -1)
1276
-
1277
- # pdb.set_trace()
1278
- # labels_not_ignored = (labels_flat[indices] != -100)
1279
- # final_mask = mask & labels_not_ignored
1280
- labels_flat[indices[mask]] = -100
1281
-
1282
-
1283
- loss1 = torch.mean((predicted_lengths[labels_flat != -100] -
1284
- labels_flat[labels_flat != -100].float())**2)
 
 
 
 
 
 
 
 
1285
 
1286
  # pdb.set_trace()
1287
 
 
185
 
186
 
187
  class Olmo2ConfigForSemiNAT(Olmo2Config):
188
+ def __init__(self, chunk_size_limit: int = 5, decoder_layers: int = 1, encoder_layer: int = 1, mlp: bool = False, position_embedding_type: str = "absolute",attn_implementation: str = "sdpa", length_loss_type: str = "ce", **kwargs):
189
  super().__init__(**kwargs)
190
  self.chunk_size_limit = chunk_size_limit
191
  self.decoder_layers = decoder_layers
192
  self.encoder_layer = encoder_layer
193
  self.mlp = mlp
194
  self.position_embedding_type = position_embedding_type
195
+ self._attn_implementation = attn_implementation
196
+ self.length_loss_type = length_loss_type
197
+ # pdb.set_trace()
198
 
199
 
200
  class Olmo2AttentionForSemiNAT(nn.Module):
 
267
  key_states, value_states = past_key_value.update(
268
  key_states, value_states, self.layer_idx, cache_kwargs)
269
 
270
+ # attention_interface: Callable = eager_attention_forward
 
271
 
272
+
273
  # pdb.set_trace()
274
 
275
+
276
+
277
+
278
+ # if self.config._attn_implementation != "eager":
279
+ # if self.config._attn_implementation == "sdpa" and kwargs.get(
280
+ # "output_attentions", False):
281
+ # logger.warning_once(
282
+ # "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
283
+ # 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
284
+ # )
285
+ # else:
286
+ # attention_interface = ALL_ATTENTION_FUNCTIONS[
287
+ # self.config._attn_implementation]
288
+ attention_interface: Callable = ALL_ATTENTION_FUNCTIONS["sdpa"] #针对encoder和decoder的新设定
289
 
 
 
 
 
 
 
 
 
 
 
 
290
 
291
  # pdb.set_trace()
292
  attn_output, attn_weights = attention_interface(
 
297
  attention_mask,
298
  dropout=0.0 if not self.training else self.attention_dropout,
299
  scaling=self.scaling,
300
+ is_causal=self.is_causal,
301
  **kwargs,
302
  )
303
  # pdb.set_trace()
 
383
  super().__init__()
384
  self.num_layer = num_layer
385
  self.encoder_layers = nn.ModuleList([
386
+ Olmo2DecoderLayerForSemiNAT(config, layer_idx) #check下需不需要is causal false,但attn_mask优先级高于is_causal
387
  for layer_idx in range(self.num_layer)
388
  ])
389
 
 
573
  length_ground_truth = length_ground_truth[:,:max_chunk_num]
574
  chunk_position_ids = position_ids[:,:max_chunk_num]
575
  chunk_cache_position = cache_position[:max_chunk_num]
576
+ # pdb.set_trace()
577
  else:
578
 
579
  encoded_input = self.encoder(inputs_embeds[:,position_ids.squeeze(0)],position_embeddings=position_embeddings)
 
811
  nar_chunk_position = torch.arange(
812
  0, self.chunk_size_limit).unsqueeze(0).repeat(
813
  accumu_num,
814
+ 1).to(hidden_states.device)
 
815
  pos = self.rotary_emb(nat_attention_mask, nar_chunk_position)
816
 
817
  elif self.position_embedding_type == "absolute":
818
  nat_input_embeddings = self.pos_encoder(nat_input_embeddings) # 加上绝对位置编码
819
  pos = None
820
 
821
+ # pdb.set_trace()
822
  nar_hidden_states = self.decoder(
823
  nat_input_embeddings,
824
+ attention_mask=mask_nat_attention_mask, #改下padding mask
825
  # attention_mask=None,
826
  position_embeddings=pos,
 
827
  output_attentions=output_attentions,
828
  use_cache=use_cache,
829
  cache_position=None,
830
  )
831
  nar_hidden_states = self.norm(
832
+ nar_hidden_states)
833
  # pdb.set_trace()
834
 
835
  return ModelOutputWithPastForSemiNAT(
 
1239
  length_logits = outputs.length_logits
1240
 
1241
  new_length_ground_truth = torch.where(
1242
+ length_ground_truth != -100,
1243
+ length_ground_truth - 1,
1244
+ length_ground_truth
1245
  )
1246
 
 
 
1247
  shift_length_logits = length_logits[:, :-1, :]
1248
  shift_new_length_ground_truth = new_length_ground_truth[:, 1:]
1249
 
1250
+ logits_flat = shift_length_logits.reshape(-1, self.chunk_size_limit)
1251
+ labels_flat = shift_new_length_ground_truth.reshape(-1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1252
 
1253
+ shift_slice_label = slice_label[:, 1:length_logits.size(1)]
1254
  slice_label_flat = shift_slice_label.reshape(-1)
 
 
 
1255
  mask = (slice_label_flat == -1)
1256
+ labels_flat[mask] = -100
1257
+
1258
+ length_loss_type = getattr(self.config, "length_loss_type", "ce")
1259
+ if length_loss_type == "mse":
1260
+ logits_softmax = torch.nn.functional.softmax(logits_flat, dim=-1)
1261
+ predicted_lengths = torch.sum(
1262
+ logits_softmax * torch.arange(self.chunk_size_limit).to(
1263
+ chunk_hidden_states.device).to(chunk_hidden_states.dtype),
1264
+ dim=1
1265
+ )
1266
+ loss1 = torch.mean((predicted_lengths[labels_flat != -100] -
1267
+ labels_flat[labels_flat != -100].float()) ** 2)
1268
+ elif length_loss_type == "ce": # cross entropy
1269
+ loss1 = F.cross_entropy(
1270
+ logits_flat[labels_flat != -100],
1271
+ labels_flat[labels_flat != -100]
1272
+ )
1273
 
1274
  # pdb.set_trace()
1275