File size: 2,217 Bytes
c948891 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
from transformers import AutoModel
import torch.nn as nn
from tasks import SECONDARY_TASKS
from huggingface_hub import PyTorchModelHubMixin
class BertMultiTask(nn.Module, PyTorchModelHubMixin):
def __init__(
self, model_name, extra_layer_sizes=[], dropout_rate=0.1, finetune: bool = False
):
super(BertMultiTask, self).__init__()
self.model_name = model_name
self.extra_layer_sizes = extra_layer_sizes
self.dropout_rate = dropout_rate
self.finetune = finetune
self.bert = AutoModel.from_pretrained(model_name)
self.layers = nn.ModuleList()
if not finetune:
self.name = f"{model_name.split('/')[-1]}_all_tasks_{'_'.join(map(str, extra_layer_sizes))}"
for param in self.bert.parameters():
param.requires_grad = False
else:
self.name = f"{model_name.split('/')[-1]}_finetune_all_tasks_{'_'.join(map(str, extra_layer_sizes))}"
for param in self.bert.parameters():
param.requires_grad = True
prev_size = self.bert.config.hidden_size
for size in extra_layer_sizes:
self.layers.append(nn.Linear(prev_size, size))
self.layers.append(nn.ReLU())
self.layers.append(nn.Dropout(dropout_rate))
prev_size = size
self.reg_head = nn.Linear(prev_size, 1) # for education value
self.classification_heads = nn.ModuleDict()
for task_name, id_map in SECONDARY_TASKS.items():
self.classification_heads[task_name] = nn.Linear(prev_size, len(id_map))
def forward(self, input_ids, attention_mask):
outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
pooled_output = outputs.pooler_output
x = pooled_output
for layer in self.layers:
x = layer(x)
reg_output = self.reg_head(x).squeeze(-1)
classes_outputs = {}
for task, head in self.classification_heads.items():
classes_outputs[task] = head(x)
return reg_output, classes_outputs
def model_unique_name(self) -> str:
return self.name
|