| import torch.nn as nn |
| from transformers import AutoModel |
|
|
| class MultitaskModel(nn.Module): |
| def __init__(self, model_name, num_sentiment_labels, num_emotion_labels): |
| super().__init__() |
| self.encoder = AutoModel.from_pretrained(model_name) |
| self.dropout = nn.Dropout(0.1) |
| self.classifier_sent = nn.Linear(self.encoder.config.hidden_size, num_sentiment_labels) |
| self.classifier_emo = nn.Linear(self.encoder.config.hidden_size, num_emotion_labels) |
|
|
| def forward(self, input_ids=None, attention_mask=None): |
| outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask) |
| pooled = outputs.last_hidden_state[:, 0] |
| pooled = self.dropout(pooled) |
| sent = self.classifier_sent(pooled) |
| emo = self.classifier_emo(pooled) |
| return sent, emo |
|
|