| # --------------------------- | |
| # Model | |
| # --------------------------- | |
| import torch.nn as nn | |
| from transformers import AutoModel | |
| class BertEmotionClassifier(nn.Module): | |
| def __init__(self, model_name: str = "roberta-base", num_labels: int = 5, dropout: float = 0.3): | |
| super().__init__() | |
| self.encoder = AutoModel.from_pretrained(model_name) | |
| self.dropout = nn.Dropout(dropout) | |
| self.classifier = nn.Linear(self.encoder.config.hidden_size, 128) | |
| self.classifier1 = nn.Linear(128, num_labels) | |
| def forward(self, input_ids, attention_mask): | |
| out = self.encoder(input_ids=input_ids, attention_mask=attention_mask) | |
| cls = out.last_hidden_state[:, 0, :] | |
| cls = self.classifier(cls) | |
| cls = self.dropout(cls) | |
| logits = self.classifier1(cls) | |
| return logits |