| | from abc import ABCMeta |
| | import numpy as np |
| | import torch |
| | from transformers.pytorch_utils import nn |
| | import torch.nn.functional as F |
| | from transformers import BertModel, BertForSequenceClassification, PreTrainedModel |
| | from transformers.modeling_outputs import SequenceClassifierOutput |
| | from transformers import AutoModelForSequenceClassification |
| | from transformers import PretrainedConfig |
| |
|
| | class BertABSAConfig(PretrainedConfig): |
| | model_type = "BertCNNForSequenceClassification" |
| |
|
| | def __init__(self, |
| | num_classes=2, |
| | embed_dim=768, |
| | conv_out_channels=256, |
| | conv_kernel_size=3, |
| | fc_hidden=128, |
| | dropout_rate=0.1, |
| | num_layers=12, |
| | **kwargs): |
| | super().__init__(**kwargs) |
| | self.num_classes = num_classes |
| | self.embed_dim = embed_dim |
| | self.conv_out_channels = conv_out_channels |
| | self.conv_kernel_size = conv_kernel_size |
| | self.fc_hidden = fc_hidden |
| | self.dropout_rate = dropout_rate |
| | self.num_layers = num_layers |
| | self.id2label = { |
| | 0: "fake", |
| | 1: "true", |
| | } |
| | self.label2id = { |
| | "fake": 0, |
| | "true": 1, |
| | } |
| |
|
| |
|
| |
|
| | class BertCNNForSequenceClassification(PreTrainedModel, metaclass=ABCMeta): |
| | config_class = BertABSAConfig |
| |
|
| | def __init__(self, config): |
| | super(BertCNNForSequenceClassification, self).__init__(config) |
| | self.num_classes = config.num_classes |
| | self.embed_dim = config.embed_dim |
| | self.num_layers = config.num_layers |
| | self.conv_out_channels = config.conv_out_channels |
| | self.conv_kernel_size = config.conv_kernel_size |
| | self.dropout = nn.Dropout(config.dropout_rate) |
| | self.bert = BertModel.from_pretrained('bert-base-uncased', |
| | output_hidden_states=True, |
| | output_attentions=False) |
| | print("BERT Model Loaded") |
| | self.conv1d = nn.Conv1d(in_channels=self.embed_dim, out_channels=self.conv_out_channels, kernel_size=self.conv_kernel_size) |
| | self.fc = nn.Linear(self.conv_out_channels, self.num_classes) |
| |
|
| | def forward(self, input_ids, attention_mask, token_type_ids, labels=None): |
| | bert_output = self.bert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids) |
| | hidden_states = bert_output["hidden_states"] |
| |
|
| | hidden_states = torch.stack([hidden_states[layer_i][:, 0].squeeze() |
| | for layer_i in range(0, self.num_layers)], dim=-1) |
| | hidden_states = hidden_states.view(-1, self.num_layers, self.embed_dim) |
| | hidden_states = hidden_states.permute(0, 2, 1) |
| | conv_output = self.conv1d(hidden_states) |
| | conv_output = F.relu(conv_output) |
| | conv_output = F.max_pool1d(conv_output, kernel_size=conv_output.size(2)) |
| | conv_output = conv_output.squeeze(-1) |
| | conv_output = self.dropout(conv_output) |
| | logits = self.fc(conv_output) |
| | loss = None |
| | if labels is not None: |
| | loss = F.cross_entropy(logits, labels) |
| | out = SequenceClassifierOutput( |
| | loss=loss, |
| | logits=logits, |
| | hidden_states=bert_output.hidden_states, |
| | attentions=bert_output.attentions, |
| | ) |
| | return out |