Subi003 commited on
Commit
bcfbd3b
·
verified ·
1 Parent(s): b0c47b2

Update modeling.py

Browse files
Files changed (1) hide show
  1. modeling.py +51 -51
modeling.py CHANGED
@@ -1,51 +1,51 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
- from transformers import PreTrainedModel, AutoModel, AutoConfig
5
-
6
-
7
- class Encoder(nn.Module):
8
- def __init__(self, base_encoder):
9
- super().__init__()
10
- self.encoder = base_encoder
11
-
12
- def forward(self, inputs):
13
- outputs = self.encoder(**inputs, output_hidden_states=True)
14
- last_hidden = outputs.hidden_states[-1]
15
- mask = inputs["attention_mask"].unsqueeze(-1).float()
16
- pooled = (last_hidden * mask).sum(1) / mask.sum(1).clamp(min=1e-9)
17
- return F.normalize(pooled, p=2, dim=1)
18
-
19
-
20
- class Classifier(nn.Module):
21
- def __init__(self, input_dim=768, num_classes=28):
22
- super().__init__()
23
- self.mlp = nn.Sequential(
24
- nn.Linear(input_dim, 512),
25
- nn.LayerNorm(512),
26
- nn.GELU(),
27
- nn.Dropout(0.25),
28
- nn.Linear(512, num_classes),
29
- )
30
-
31
- def forward(self, x):
32
- return self.mlp(x)
33
-
34
-
35
- class RobertaEmoPillars(PreTrainedModel):
36
- config_class = AutoConfig
37
-
38
- def __init__(self, config):
39
- super().__init__(config)
40
- base_encoder = AutoModel.from_pretrained(config._name_or_path)
41
- self.encoder = Encoder(base_encoder)
42
- self.classifier = Classifier(
43
- input_dim=base_encoder.config.hidden_size,
44
- num_classes=config.num_labels,
45
- )
46
-
47
- def forward(self, input_ids=None, attention_mask=None):
48
- inputs = {"input_ids": input_ids, "attention_mask": attention_mask}
49
- emb = self.encoder(inputs)
50
- logits = self.classifier(emb)
51
- return logits
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from transformers.modeling_outputs import SequenceClassifierOutput
4
+ from transformers.models.roberta.modeling_roberta import RobertaPreTrainedModel, RobertaModel
5
+
6
+
7
+ class RobertaForSequenceClassification(RobertaPreTrainedModel):
8
+ def __init__(self, config):
9
+ super().__init__(config)
10
+
11
+ self.num_labels = config.num_labels
12
+ self.roberta = RobertaModel(config)
13
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
14
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
15
+
16
+ # Load weights
17
+ self.post_init()
18
+
19
+ def forward(
20
+ self,
21
+ input_ids=None,
22
+ attention_mask=None,
23
+ token_type_ids=None,
24
+ labels=None,
25
+ **kwargs
26
+ ):
27
+ outputs = self.roberta(
28
+ input_ids=input_ids,
29
+ attention_mask=attention_mask,
30
+ token_type_ids=token_type_ids,
31
+ )
32
+
33
+ pooled_output = outputs[1] # CLS token
34
+ pooled_output = self.dropout(pooled_output)
35
+ logits = self.classifier(pooled_output)
36
+
37
+ loss = None
38
+ if labels is not None:
39
+ if self.num_labels == 1:
40
+ loss_fct = nn.MSELoss()
41
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
42
+ else:
43
+ loss_fct = nn.BCEWithLogitsLoss()
44
+ loss = loss_fct(logits, labels.float())
45
+
46
+ return SequenceClassifierOutput(
47
+ loss=loss,
48
+ logits=logits,
49
+ hidden_states=outputs.hidden_states,
50
+ attentions=outputs.attentions,
51
+ )