Spaces:
Sleeping
Sleeping
| import torch | |
| import numpy as np | |
| import copy | |
| import torch.nn.functional as F | |
| class ScoreFunctionConv(torch.nn.Module): | |
| def __init__(self) -> None: | |
| super().__init__() | |
| self.d_model = 64 | |
| self.n_head = 4 | |
| self.norm = torch.nn.LayerNorm(self.d_model) | |
| self.Attention = torch.nn.MultiheadAttention(self.d_model, self.n_head, dropout=0) | |
| self.conv1 = torch.nn.Conv1d(10, self.d_model, 1, stride=1) | |
| self.Linear1 = torch.nn.Linear(10,64) | |
| self.Linear2 = torch.nn.Linear(64,32) | |
| self.Linear3 = torch.nn.Linear(32,16) | |
| self.Linear4 = torch.nn.Linear(16,1) | |
| self.Linear5 = torch.nn.Linear(64,1) | |
| self.Linear6 = torch.nn.Linear(10,1) | |
| self.Norm = torch.nn.BatchNorm1d(self.d_model) | |
| self.Activation1 = torch.nn.ReLU() | |
| self.Activation2 = torch.nn.Sigmoid() | |
| def forward(self, input): | |
| output = self.conv1(input.unsqueeze(dim=2)).squeeze(dim=2) | |
| if output.shape[0] > 1: | |
| output = self.Norm(output) | |
| output = self.Activation1(output) | |
| output = self.Linear5(output) | |
| output = self.Activation2(output) | |
| return output |