|
|
import torch
|
|
|
import os
|
|
|
import torch.nn as nn
|
|
|
from transformers import RobertaPreTrainedModel, RobertaModel, AutoConfig
|
|
|
from transformers.modeling_outputs import SequenceClassifierOutput
|
|
|
|
|
|
class TransformerForABSA(RobertaPreTrainedModel):
|
|
|
base_model_prefix = "roberta"
|
|
|
|
|
|
def __init__(self, config):
|
|
|
super().__init__(config)
|
|
|
self.roberta = RobertaModel(config)
|
|
|
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
|
|
|
|
|
self.sentiment_classifiers = nn.ModuleList([
|
|
|
nn.Linear(config.hidden_size, config.num_sentiments + 1)
|
|
|
for _ in range(config.num_aspects)
|
|
|
])
|
|
|
self.init_weights()
|
|
|
|
|
|
def forward(
|
|
|
self,
|
|
|
input_ids=None,
|
|
|
attention_mask=None,
|
|
|
labels=None,
|
|
|
return_dict=None
|
|
|
):
|
|
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
outputs = self.roberta(input_ids, attention_mask=attention_mask, return_dict=return_dict)
|
|
|
pooled = self.dropout(outputs.pooler_output)
|
|
|
all_logits = torch.stack([cls(pooled) for cls in self.sentiment_classifiers], dim=1)
|
|
|
|
|
|
loss = None
|
|
|
if labels is not None:
|
|
|
|
|
|
B, A, _ = all_logits.size()
|
|
|
logits_flat = all_logits.view(-1, all_logits.size(-1))
|
|
|
targets_flat = labels.view(-1)
|
|
|
loss_fct = nn.CrossEntropyLoss()
|
|
|
loss = loss_fct(logits_flat, targets_flat)
|
|
|
|
|
|
if not return_dict:
|
|
|
return ((loss, all_logits) + outputs[2:]) if loss is not None else (all_logits,) + outputs[2:]
|
|
|
return SequenceClassifierOutput(
|
|
|
loss=loss,
|
|
|
logits=all_logits,
|
|
|
hidden_states=outputs.hidden_states,
|
|
|
attentions=outputs.attentions,
|
|
|
)
|
|
|
|
|
|
def save_pretrained(self, save_directory: str, **kwargs):
|
|
|
"""
|
|
|
HuggingFace Trainer đôi khi truyền vào state_dict=..., nên ta
|
|
|
chấp nhận thêm **kwargs để không vướng lỗi.
|
|
|
"""
|
|
|
|
|
|
self.roberta.save_pretrained(save_directory, **kwargs)
|
|
|
|
|
|
|
|
|
config = self.roberta.config
|
|
|
config.num_aspects = len(self.sentiment_classifiers)
|
|
|
config.num_sentiments = self.sentiment_classifiers[0].out_features
|
|
|
config.auto_map = {"AutoModel": "models.TransformerForABSA"}
|
|
|
config.save_pretrained(save_directory, **kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
sd = kwargs.get("state_dict", None) or self.state_dict()
|
|
|
torch.save(sd, os.path.join(save_directory, "pytorch_model.bin")) |