| import torch | |
| import torch.nn as nn | |
| from transformers import BertModel | |
| class PersonaAssigner(nn.Module): | |
| def __init__(self, input_dim, hidden_dim, output_dim): | |
| super(PersonaAssigner, self).__init__() | |
| self.fc1 = nn.Linear(input_dim, hidden_dim) | |
| self.fc2 = nn.Linear(hidden_dim, output_dim) | |
| def forward(self, x): | |
| x = torch.relu(self.fc1(x)) | |
| return self.fc2(x) | |
| class PreferencePredictor(nn.Module): | |
| def __init__(self, input_dim): | |
| super(PreferencePredictor, self).__init__() | |
| self.fc1 = nn.Linear(input_dim, 256) | |
| self.fc2 = nn.Linear(256, 3) | |
| def forward(self, x): | |
| x = torch.relu(self.fc1(x)) | |
| return self.fc2(x) | |
| class BERTEncoder(nn.Module): | |
| def __init__(self, model_name='bert-base-uncased'): | |
| super(BERTEncoder, self).__init__() | |
| self.bert = BertModel.from_pretrained(model_name) | |
| def forward(self, input_ids, attention_mask, token_type_ids=None): | |
| outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids) | |
| return outputs.last_hidden_state.mean(dim=1) |