|
|
import torch |
|
|
import torch.nn as nn |
|
|
import torch.nn.functional as F |
|
|
|
|
|
class CNNED_Protein(nn.Module): |
|
|
def __init__(self, alphabet_size: int, target_size: int, |
|
|
channel: int, depth: int, kernel_size: int, l2norm: bool = True): |
|
|
super().__init__() |
|
|
C_in = alphabet_size |
|
|
C = channel |
|
|
K = kernel_size |
|
|
pad = K // 2 |
|
|
|
|
|
blocks = [ |
|
|
nn.Conv1d(C_in, C, K, stride=1, padding=pad, bias=False), |
|
|
nn.BatchNorm1d(C), |
|
|
nn.ReLU(inplace=True), |
|
|
] |
|
|
for _ in range(depth - 1): |
|
|
blocks += [ |
|
|
nn.Conv1d(C, C, K, stride=1, padding=pad, bias=False), |
|
|
nn.BatchNorm1d(C), |
|
|
nn.ReLU(inplace=True), |
|
|
nn.AvgPool1d(2), |
|
|
] |
|
|
self.conv = nn.Sequential(*blocks) |
|
|
self.pool = nn.AdaptiveAvgPool1d(1) |
|
|
self.proj = nn.Sequential( |
|
|
nn.Linear(C, C), |
|
|
nn.ReLU(inplace=True), |
|
|
nn.Linear(C, target_size), |
|
|
) |
|
|
self.l2norm = l2norm |
|
|
|
|
|
def encode(self, x: torch.Tensor): |
|
|
|
|
|
z = self.conv(x) |
|
|
z = self.pool(z).squeeze(-1) |
|
|
y = self.proj(z) |
|
|
if self.l2norm: |
|
|
y = F.normalize(y, dim=-1) |
|
|
return y, z |
|
|
|
|
|
def forward(self, a: torch.Tensor, p: torch.Tensor, n: torch.Tensor): |
|
|
ay, _ = self.encode(a) |
|
|
py, _ = self.encode(p) |
|
|
ny, _ = self.encode(n) |
|
|
return ay, py, ny |
|
|
|