|
|
import torch |
|
|
import torch.nn.functional as F |
|
|
import torch.nn as nn |
|
|
|
|
|
from .transformer import AffineDropPath |
|
|
|
|
|
|
|
|
class SGPBlock(nn.Module): |
|
|
""" |
|
|
A simple conv block similar to the basic block used in ResNet |
|
|
""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
n_embd, |
|
|
kernel_size=3, |
|
|
n_ds_stride=1, |
|
|
k=1.5, |
|
|
group=1, |
|
|
n_out=None, |
|
|
n_hidden=None, |
|
|
path_pdrop=0.0, |
|
|
act_layer=nn.GELU, |
|
|
downsample_type="max", |
|
|
init_conv_vars=1, |
|
|
): |
|
|
super().__init__() |
|
|
self.kernel_size = kernel_size |
|
|
self.stride = n_ds_stride |
|
|
|
|
|
if n_out is None: |
|
|
n_out = n_embd |
|
|
|
|
|
self.ln = nn.LayerNorm(n_embd) |
|
|
self.gn = nn.GroupNorm(16, n_embd) |
|
|
|
|
|
assert kernel_size % 2 == 1 |
|
|
|
|
|
up_size = round((kernel_size + 1) * k) |
|
|
up_size = up_size + 1 if up_size % 2 == 0 else up_size |
|
|
|
|
|
self.psi = nn.Conv1d(n_embd, n_embd, kernel_size, stride=1, padding=kernel_size // 2, groups=n_embd) |
|
|
self.fc = nn.Conv1d(n_embd, n_embd, 1, stride=1, padding=0, groups=n_embd) |
|
|
self.convw = nn.Conv1d(n_embd, n_embd, kernel_size, stride=1, padding=kernel_size // 2, groups=n_embd) |
|
|
self.convkw = nn.Conv1d(n_embd, n_embd, up_size, stride=1, padding=up_size // 2, groups=n_embd) |
|
|
self.global_fc = nn.Conv1d(n_embd, n_embd, 1, stride=1, padding=0, groups=n_embd) |
|
|
|
|
|
|
|
|
if n_ds_stride > 1: |
|
|
if downsample_type == "max": |
|
|
kernel_size, stride, padding = n_ds_stride + 1, n_ds_stride, (n_ds_stride + 1) // 2 |
|
|
self.downsample = nn.MaxPool1d(kernel_size, stride=stride, padding=padding) |
|
|
self.stride = stride |
|
|
elif downsample_type == "avg": |
|
|
self.downsample = nn.Sequential( |
|
|
nn.AvgPool1d(n_ds_stride, stride=n_ds_stride, padding=0), |
|
|
nn.Conv1d(n_embd, n_embd, 1, 1, 0), |
|
|
) |
|
|
self.stride = n_ds_stride |
|
|
else: |
|
|
raise NotImplementedError("downsample type error") |
|
|
else: |
|
|
self.downsample = nn.Identity() |
|
|
self.stride = 1 |
|
|
|
|
|
|
|
|
if n_hidden is None: |
|
|
n_hidden = 4 * n_embd |
|
|
if n_out is None: |
|
|
n_out = n_embd |
|
|
|
|
|
self.mlp = nn.Sequential( |
|
|
nn.Conv1d(n_embd, n_hidden, 1, groups=group), |
|
|
act_layer(), |
|
|
nn.Conv1d(n_hidden, n_out, 1, groups=group), |
|
|
) |
|
|
|
|
|
|
|
|
if path_pdrop > 0.0: |
|
|
self.drop_path_out = AffineDropPath(n_embd, drop_prob=path_pdrop) |
|
|
self.drop_path_mlp = AffineDropPath(n_out, drop_prob=path_pdrop) |
|
|
else: |
|
|
self.drop_path_out = nn.Identity() |
|
|
self.drop_path_mlp = nn.Identity() |
|
|
|
|
|
self.act = act_layer() |
|
|
self.reset_params(init_conv_vars=init_conv_vars) |
|
|
|
|
|
def reset_params(self, init_conv_vars=0): |
|
|
torch.nn.init.normal_(self.psi.weight, 0, init_conv_vars) |
|
|
torch.nn.init.normal_(self.fc.weight, 0, init_conv_vars) |
|
|
torch.nn.init.normal_(self.convw.weight, 0, init_conv_vars) |
|
|
torch.nn.init.normal_(self.convkw.weight, 0, init_conv_vars) |
|
|
torch.nn.init.normal_(self.global_fc.weight, 0, init_conv_vars) |
|
|
torch.nn.init.constant_(self.psi.bias, 0) |
|
|
torch.nn.init.constant_(self.fc.bias, 0) |
|
|
torch.nn.init.constant_(self.convw.bias, 0) |
|
|
torch.nn.init.constant_(self.convkw.bias, 0) |
|
|
torch.nn.init.constant_(self.global_fc.bias, 0) |
|
|
|
|
|
def forward(self, x, mask): |
|
|
B, C, T = x.shape |
|
|
x = self.downsample(x) |
|
|
|
|
|
out_mask = F.interpolate( |
|
|
mask.unsqueeze(1).to(x.dtype), |
|
|
size=torch.div(T, self.stride, rounding_mode="trunc"), |
|
|
mode="nearest", |
|
|
).detach() |
|
|
|
|
|
out = self.ln(x.permute(0, 2, 1)).permute(0, 2, 1) |
|
|
psi = self.psi(out) |
|
|
fc = self.fc(out) |
|
|
convw = self.convw(out) |
|
|
convkw = self.convkw(out) |
|
|
phi = torch.relu(self.global_fc(out.mean(dim=-1, keepdim=True))) |
|
|
out = fc * phi + (convw + convkw) * psi + out |
|
|
|
|
|
out = x * out_mask + self.drop_path_out(out) |
|
|
|
|
|
out = out + self.drop_path_mlp(self.mlp(self.gn(out))) |
|
|
|
|
|
return out, out_mask.squeeze(1).bool() |
|
|
|