davda54 commited on
Commit
2ba6c68
·
verified ·
1 Parent(s): 1d3b2df

Update modeling_norbert.py

Browse files
Files changed (1) hide show
  1. modeling_norbert.py +0 -3
modeling_norbert.py CHANGED
@@ -57,7 +57,6 @@ class MaskClassifier(nn.Module):
57
  nn.Dropout(config.hidden_dropout_prob),
58
  nn.Linear(subword_embedding.size(1), subword_embedding.size(0))
59
  )
60
- self.initialize(config.hidden_size, subword_embedding)
61
 
62
  def forward(self, x, masked_lm_labels=None):
63
  if masked_lm_labels is not None:
@@ -97,7 +96,6 @@ class FeedForward(nn.Module):
97
  nn.Linear(config.intermediate_size, config.hidden_size, bias=False),
98
  nn.Dropout(config.hidden_dropout_prob)
99
  )
100
- self.initialize(config.hidden_size)
101
 
102
  def forward(self, x):
103
  return self.mlp(x)
@@ -148,7 +146,6 @@ class Attention(nn.Module):
148
 
149
  self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
150
  self.scale = 1.0 / math.sqrt(3 * self.head_size)
151
- self.initialize()
152
 
153
  def make_log_bucket_position(self, relative_pos, bucket_size, max_position):
154
  sign = torch.sign(relative_pos)
 
57
  nn.Dropout(config.hidden_dropout_prob),
58
  nn.Linear(subword_embedding.size(1), subword_embedding.size(0))
59
  )
 
60
 
61
  def forward(self, x, masked_lm_labels=None):
62
  if masked_lm_labels is not None:
 
96
  nn.Linear(config.intermediate_size, config.hidden_size, bias=False),
97
  nn.Dropout(config.hidden_dropout_prob)
98
  )
 
99
 
100
  def forward(self, x):
101
  return self.mlp(x)
 
146
 
147
  self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
148
  self.scale = 1.0 / math.sqrt(3 * self.head_size)
 
149
 
150
  def make_log_bucket_position(self, relative_pos, bucket_size, max_position):
151
  sign = torch.sign(relative_pos)