sheep33333 commited on
Commit
4c09c34
·
verified ·
1 Parent(s): 63b4d72

Upload modelforseminat_v5.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. modelforseminat_v5.py +39 -34
modelforseminat_v5.py CHANGED
@@ -211,7 +211,14 @@ class TwoLayerMLP(nn.Module):
211
 
212
 
213
 
214
-
 
 
 
 
 
 
 
215
 
216
 
217
 
@@ -224,7 +231,7 @@ class TwoLayerMLP(nn.Module):
224
  class Olmo2AttentionForSemiNAT(nn.Module):
225
  """Multi-headed attention from 'Attention Is All You Need' paper"""
226
 
227
- def __init__(self, config: Olmo2Config, layer_idx: Optional[int] = None, is_causal: bool = True):
228
  super().__init__()
229
  self.config = config
230
  self.layer_idx = layer_idx
@@ -330,7 +337,7 @@ class Olmo2DecoderLayerForSemiNAT(nn.Module):
330
 
331
  def __init__(
332
  self,
333
- config: Olmo2Config,
334
  layer_idx: int,
335
  is_causal: bool = True,
336
  ):
@@ -395,7 +402,7 @@ class Olmo2DecoderLayerForSemiNAT(nn.Module):
395
 
396
  class NATEncoderForSemiNAT(nn.Module):
397
 
398
- def __init__(self, config: Olmo2Config, num_layer: int = 1):
399
  super().__init__()
400
  self.num_layer = num_layer
401
  self.encoder_layers = nn.ModuleList([
@@ -431,7 +438,7 @@ class NATEncoderForSemiNAT(nn.Module):
431
 
432
  class NATDecoderForSemiNAT(nn.Module):
433
 
434
- def __init__(self, config: Olmo2Config, num_layer: int = 1):
435
  super().__init__()
436
  self.num_layer = num_layer
437
  self.decoder_layers = nn.ModuleList([
@@ -471,8 +478,8 @@ class Olmo2ModelForSemiNAT(Olmo2Model):
471
  for layer_idx in range(config.num_hidden_layers)
472
  ])
473
 
474
- self.decoder = NATDecoderForSemiNAT(config, 1)
475
- self.encoder = NATEncoderForSemiNAT(config, 1)
476
 
477
 
478
  # pdb.set_trace()
@@ -487,8 +494,10 @@ class Olmo2ModelForSemiNAT(Olmo2Model):
487
 
488
  self.length_predictor = nn.Linear(config.hidden_size,
489
  self.chunk_size_limit)
490
-
491
- # self.linear_projection = TwoLayerMLP(config.hidden_size)
 
 
492
 
493
 
494
  def forward(
@@ -780,13 +789,10 @@ class Olmo2ModelForSemiNAT(Olmo2Model):
780
  hidden_states, length_ground_truth, self.chunk_size_limit, skip_val=-100)
781
 
782
 
783
-
784
-
785
-
786
-
787
-
788
-
789
-
790
  # pdb.set_trace()
791
  # for b in range(bs):
792
  # for i in range(slice_num[b]):
@@ -812,22 +818,8 @@ class Olmo2ModelForSemiNAT(Olmo2Model):
812
 
813
 
814
  # pdb.set_trace()
815
- # nar_chunk_position = torch.arange(
816
- # 0, self.chunk_size_limit).unsqueeze(0).repeat(
817
- # accumu_num,
818
- # 1).to(hidden_states.device) # bs * max_chunk_num
819
-
820
- # nar_position_embeddings = self.rotary_emb(nat_attention_mask,
821
- # nar_chunk_position)
822
-
823
-
824
- # pdb.set_trace()
825
-
826
- nat_input_embeddings = self.pos_encoder(nat_input_embeddings) # 加上绝对位置编码
827
 
828
 
829
- self.decoder = self.decoder.to(dtype=nat_input_embeddings.dtype)
830
-
831
 
832
  # 处理attention
833
  mask_nat_attention_mask = self.nat_prepare_4d_full_attention_mask_without_causal(
@@ -835,19 +827,32 @@ class Olmo2ModelForSemiNAT(Olmo2Model):
835
  dtype=nat_attention_mask.dtype,
836
  device=nat_attention_mask.device)
837
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
838
 
839
  # pdb.set_trace()
840
  nar_hidden_states = self.decoder(
841
  nat_input_embeddings,
842
  attention_mask=mask_nat_attention_mask,
843
  # attention_mask=None,
844
- # position_embeddings=nar_position_embeddings,
845
- position_embeddings=None, #使用绝对位置,不传相对位置
846
  output_attentions=output_attentions,
847
  use_cache=use_cache,
848
  cache_position=None,
849
  )
850
-
851
  nar_hidden_states = self.norm(
852
  nar_hidden_states) # bs * max_chunk_num * hidden_size
853
 
@@ -1895,7 +1900,7 @@ class Olmo2ForCausalLMForSemiNAT(Olmo2ForCausalLM):
1895
  model_kwargs[
1896
  'cache_position'] = new_cache_position # 更新一下cache position
1897
 
1898
- # pdb.set_trace()
1899
  ############ prefilling ############
1900
 
1901
  is_prefill = True
 
211
 
212
 
213
 
214
+ class Olmo2ConfigForSemiNAT(Olmo2Config):
215
+ def __init__(self, chunk_size_limit: int = 5, decoder_layers: int = 1, encoder_layer: int = 1, mlp: bool = False, position_embedding_type: str = "absolute", **kwargs):
216
+ super().__init__(**kwargs)
217
+ self.chunk_size_limit = chunk_size_limit
218
+ self.decoder_layers = decoder_layers
219
+ self.encoder_layer = encoder_layer
220
+ self.mlp = mlp
221
+ self.position_embedding_type = position_embedding_type
222
 
223
 
224
 
 
231
  class Olmo2AttentionForSemiNAT(nn.Module):
232
  """Multi-headed attention from 'Attention Is All You Need' paper"""
233
 
234
+ def __init__(self, config: Olmo2ConfigForSemiNAT, layer_idx: Optional[int] = None, is_causal: bool = True):
235
  super().__init__()
236
  self.config = config
237
  self.layer_idx = layer_idx
 
337
 
338
  def __init__(
339
  self,
340
+ config: Olmo2ConfigForSemiNAT,
341
  layer_idx: int,
342
  is_causal: bool = True,
343
  ):
 
402
 
403
  class NATEncoderForSemiNAT(nn.Module):
404
 
405
+ def __init__(self, config: Olmo2ConfigForSemiNAT, num_layer: int = 1):
406
  super().__init__()
407
  self.num_layer = num_layer
408
  self.encoder_layers = nn.ModuleList([
 
438
 
439
  class NATDecoderForSemiNAT(nn.Module):
440
 
441
+ def __init__(self, config: Olmo2ConfigForSemiNAT, num_layer: int = 1):
442
  super().__init__()
443
  self.num_layer = num_layer
444
  self.decoder_layers = nn.ModuleList([
 
478
  for layer_idx in range(config.num_hidden_layers)
479
  ])
480
 
481
+ self.decoder = NATDecoderForSemiNAT(config, config.decoder_layers)
482
+ self.encoder = NATEncoderForSemiNAT(config, config.encoder_layer)
483
 
484
 
485
  # pdb.set_trace()
 
494
 
495
  self.length_predictor = nn.Linear(config.hidden_size,
496
  self.chunk_size_limit)
497
+ self.mlp = config.mlp
498
+ if self.mlp:
499
+ self.linear_projection = TwoLayerMLP(config.hidden_size)
500
+ self.position_embedding_type = config.position_embedding_type
501
 
502
 
503
  def forward(
 
789
  hidden_states, length_ground_truth, self.chunk_size_limit, skip_val=-100)
790
 
791
 
792
+ if self.mlp:
793
+ nat_input_embeddings = self.linear_projection(nat_input_embeddings)
794
+
795
+
 
 
 
796
  # pdb.set_trace()
797
  # for b in range(bs):
798
  # for i in range(slice_num[b]):
 
818
 
819
 
820
  # pdb.set_trace()
 
 
 
 
 
 
 
 
 
 
 
 
821
 
822
 
 
 
823
 
824
  # 处理attention
825
  mask_nat_attention_mask = self.nat_prepare_4d_full_attention_mask_without_causal(
 
827
  dtype=nat_attention_mask.dtype,
828
  device=nat_attention_mask.device)
829
 
830
+ # pdb.set_trace()
831
+
832
+ self.decoder = self.decoder.to(dtype=nat_input_embeddings.dtype)
833
+ if self.position_embedding_type == "relative":
834
+ nar_chunk_position = torch.arange(
835
+ 0, self.chunk_size_limit).unsqueeze(0).repeat(
836
+ accumu_num,
837
+ 1).to(hidden_states.device)
838
+
839
+ pos = self.rotary_emb(nat_attention_mask, nar_chunk_position)
840
+
841
+ elif self.position_embedding_type == "absolute":
842
+ nat_input_embeddings = self.pos_encoder(nat_input_embeddings) # 加上绝对位置编码
843
+ pos = None
844
 
845
  # pdb.set_trace()
846
  nar_hidden_states = self.decoder(
847
  nat_input_embeddings,
848
  attention_mask=mask_nat_attention_mask,
849
  # attention_mask=None,
850
+ position_embeddings=pos,
851
+ # position_embeddings=None, #使用绝对位置,不传相对位置
852
  output_attentions=output_attentions,
853
  use_cache=use_cache,
854
  cache_position=None,
855
  )
 
856
  nar_hidden_states = self.norm(
857
  nar_hidden_states) # bs * max_chunk_num * hidden_size
858
 
 
1900
  model_kwargs[
1901
  'cache_position'] = new_cache_position # 更新一下cache position
1902
 
1903
+ pdb.set_trace()
1904
  ############ prefilling ############
1905
 
1906
  is_prefill = True