Update modeling_norbert.py
Browse files- modeling_norbert.py +12 -47
modeling_norbert.py
CHANGED
|
@@ -59,13 +59,6 @@ class MaskClassifier(nn.Module):
|
|
| 59 |
)
|
| 60 |
self.initialize(config.hidden_size, subword_embedding)
|
| 61 |
|
| 62 |
-
def initialize(self, hidden_size, embedding):
|
| 63 |
-
std = math.sqrt(2.0 / (5.0 * hidden_size))
|
| 64 |
-
nn.init.trunc_normal_(self.nonlinearity[1].weight, mean=0.0, std=std, a=-2*std, b=2*std)
|
| 65 |
-
self.nonlinearity[-1].weight = embedding
|
| 66 |
-
self.nonlinearity[1].bias.data.zero_()
|
| 67 |
-
self.nonlinearity[-1].bias.data.zero_()
|
| 68 |
-
|
| 69 |
def forward(self, x, masked_lm_labels=None):
|
| 70 |
if masked_lm_labels is not None:
|
| 71 |
x = torch.index_select(x.flatten(0, 1), 0, torch.nonzero(masked_lm_labels.flatten() != -100).squeeze())
|
|
@@ -106,11 +99,6 @@ class FeedForward(nn.Module):
|
|
| 106 |
)
|
| 107 |
self.initialize(config.hidden_size)
|
| 108 |
|
| 109 |
-
def initialize(self, hidden_size):
|
| 110 |
-
std = math.sqrt(2.0 / (5.0 * hidden_size))
|
| 111 |
-
nn.init.trunc_normal_(self.mlp[1].weight, mean=0.0, std=std, a=-2*std, b=2*std)
|
| 112 |
-
nn.init.trunc_normal_(self.mlp[-2].weight, mean=0.0, std=std, a=-2*std, b=2*std)
|
| 113 |
-
|
| 114 |
def forward(self, x):
|
| 115 |
return self.mlp(x)
|
| 116 |
|
|
@@ -170,15 +158,6 @@ class Attention(nn.Module):
|
|
| 170 |
bucket_pos = torch.where(abs_pos <= mid, relative_pos, log_pos * sign).long()
|
| 171 |
return bucket_pos
|
| 172 |
|
| 173 |
-
def initialize(self):
|
| 174 |
-
std = math.sqrt(2.0 / (5.0 * self.hidden_size))
|
| 175 |
-
nn.init.trunc_normal_(self.in_proj_qk.weight, mean=0.0, std=std, a=-2*std, b=2*std)
|
| 176 |
-
nn.init.trunc_normal_(self.in_proj_v.weight, mean=0.0, std=std, a=-2*std, b=2*std)
|
| 177 |
-
nn.init.trunc_normal_(self.out_proj.weight, mean=0.0, std=std, a=-2*std, b=2*std)
|
| 178 |
-
self.in_proj_qk.bias.data.zero_()
|
| 179 |
-
self.in_proj_v.bias.data.zero_()
|
| 180 |
-
self.out_proj.bias.data.zero_()
|
| 181 |
-
|
| 182 |
def compute_attention_scores(self, hidden_states, relative_embedding):
|
| 183 |
key_len, batch_size, _ = hidden_states.size()
|
| 184 |
query_len = key_len
|
|
@@ -246,13 +225,6 @@ class Embedding(nn.Module):
|
|
| 246 |
self.relative_embedding = nn.Parameter(torch.empty(2 * config.position_bucket_size - 1, config.hidden_size))
|
| 247 |
self.relative_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 248 |
|
| 249 |
-
self.initialize()
|
| 250 |
-
|
| 251 |
-
def initialize(self):
|
| 252 |
-
std = math.sqrt(2.0 / (5.0 * self.hidden_size))
|
| 253 |
-
nn.init.trunc_normal_(self.relative_embedding, mean=0.0, std=std, a=-2*std, b=2*std)
|
| 254 |
-
nn.init.trunc_normal_(self.word_embedding.weight, mean=0.0, std=std, a=-2*std, b=2*std)
|
| 255 |
-
|
| 256 |
def forward(self, input_ids):
|
| 257 |
word_embedding = self.dropout(self.word_layer_norm(self.word_embedding(input_ids)))
|
| 258 |
relative_embeddings = self.relative_layer_norm(self.relative_embedding)
|
|
@@ -273,7 +245,18 @@ class NorbertPreTrainedModel(PreTrainedModel):
|
|
| 273 |
module.activation_checkpointing = value
|
| 274 |
|
| 275 |
def _init_weights(self, module):
|
| 276 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 277 |
|
| 278 |
|
| 279 |
class NorbertModel(NorbertPreTrainedModel):
|
|
@@ -414,15 +397,6 @@ class Classifier(nn.Module):
|
|
| 414 |
nn.Dropout(drop_out),
|
| 415 |
nn.Linear(config.hidden_size, num_labels)
|
| 416 |
)
|
| 417 |
-
self.hidden_size = config.hidden_size
|
| 418 |
-
self._init_weights()
|
| 419 |
-
|
| 420 |
-
def _init_weights(self):
|
| 421 |
-
std = math.sqrt(2.0 / (5.0 * self.hidden_size))
|
| 422 |
-
nn.init.trunc_normal_(self.nonlinearity[1].weight, mean=0.0, std=std, a=-2*std, b=2*std)
|
| 423 |
-
nn.init.trunc_normal_(self.nonlinearity[-1].weight, mean=0.0, std=std, a=-2*std, b=2*std)
|
| 424 |
-
self.nonlinearity[1].bias.data.zero_()
|
| 425 |
-
self.nonlinearity[-1].bias.data.zero_()
|
| 426 |
|
| 427 |
def forward(self, x):
|
| 428 |
x = self.nonlinearity(x)
|
|
@@ -439,12 +413,6 @@ class NorbertForSequenceClassification(NorbertModel):
|
|
| 439 |
self.num_labels = config.num_labels
|
| 440 |
self.head = Classifier(config, self.num_labels)
|
| 441 |
|
| 442 |
-
def post_init(self):
|
| 443 |
-
self.head._init_weights()
|
| 444 |
-
|
| 445 |
-
def _init_weights(self):
|
| 446 |
-
self.head._init_weights()
|
| 447 |
-
|
| 448 |
def forward(
|
| 449 |
self,
|
| 450 |
input_ids: Optional[torch.Tensor] = None,
|
|
@@ -511,9 +479,6 @@ class NorbertForTokenClassification(NorbertModel):
|
|
| 511 |
self.num_labels = config.num_labels
|
| 512 |
self.head = Classifier(config, self.num_labels)
|
| 513 |
|
| 514 |
-
def post_init(self):
|
| 515 |
-
self.head._init_weights()
|
| 516 |
-
|
| 517 |
def forward(
|
| 518 |
self,
|
| 519 |
input_ids: Optional[torch.Tensor] = None,
|
|
|
|
| 59 |
)
|
| 60 |
self.initialize(config.hidden_size, subword_embedding)
|
| 61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
def forward(self, x, masked_lm_labels=None):
|
| 63 |
if masked_lm_labels is not None:
|
| 64 |
x = torch.index_select(x.flatten(0, 1), 0, torch.nonzero(masked_lm_labels.flatten() != -100).squeeze())
|
|
|
|
| 99 |
)
|
| 100 |
self.initialize(config.hidden_size)
|
| 101 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 102 |
def forward(self, x):
|
| 103 |
return self.mlp(x)
|
| 104 |
|
|
|
|
| 158 |
bucket_pos = torch.where(abs_pos <= mid, relative_pos, log_pos * sign).long()
|
| 159 |
return bucket_pos
|
| 160 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 161 |
def compute_attention_scores(self, hidden_states, relative_embedding):
|
| 162 |
key_len, batch_size, _ = hidden_states.size()
|
| 163 |
query_len = key_len
|
|
|
|
| 225 |
self.relative_embedding = nn.Parameter(torch.empty(2 * config.position_bucket_size - 1, config.hidden_size))
|
| 226 |
self.relative_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 227 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 228 |
def forward(self, input_ids):
|
| 229 |
word_embedding = self.dropout(self.word_layer_norm(self.word_embedding(input_ids)))
|
| 230 |
relative_embeddings = self.relative_layer_norm(self.relative_embedding)
|
|
|
|
| 245 |
module.activation_checkpointing = value
|
| 246 |
|
| 247 |
def _init_weights(self, module):
|
| 248 |
+
std = math.sqrt(2.0 / (5.0 * self.hidden_size))
|
| 249 |
+
|
| 250 |
+
if isinstance(module, nn.Linear):
|
| 251 |
+
nn.init.trunc_normal_(module.weight.data, mean=0.0, std=std, a=-2*std, b=2*std)
|
| 252 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
| 253 |
+
if module.bias is not None:
|
| 254 |
+
module.bias.data.zero_()
|
| 255 |
+
elif isinstance(module, nn.Embedding):
|
| 256 |
+
nn.init.trunc_normal_(module.weight.data, mean=0.0, std=std, a=-2*std, b=2*std)
|
| 257 |
+
elif isinstance(module, nn.LayerNorm):
|
| 258 |
+
module.bias.data.zero_()
|
| 259 |
+
module.weight.data.fill_(1.0)
|
| 260 |
|
| 261 |
|
| 262 |
class NorbertModel(NorbertPreTrainedModel):
|
|
|
|
| 397 |
nn.Dropout(drop_out),
|
| 398 |
nn.Linear(config.hidden_size, num_labels)
|
| 399 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 400 |
|
| 401 |
def forward(self, x):
|
| 402 |
x = self.nonlinearity(x)
|
|
|
|
| 413 |
self.num_labels = config.num_labels
|
| 414 |
self.head = Classifier(config, self.num_labels)
|
| 415 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 416 |
def forward(
|
| 417 |
self,
|
| 418 |
input_ids: Optional[torch.Tensor] = None,
|
|
|
|
| 479 |
self.num_labels = config.num_labels
|
| 480 |
self.head = Classifier(config, self.num_labels)
|
| 481 |
|
|
|
|
|
|
|
|
|
|
| 482 |
def forward(
|
| 483 |
self,
|
| 484 |
input_ids: Optional[torch.Tensor] = None,
|