|
|
|
|
|
|
|
|
|
|
|
import torch |
|
|
import torch.nn as nn |
|
|
import torch.nn.functional as F |
|
|
from timm.layers import DropPath |
|
|
import torch.fft |
|
|
|
|
|
|
|
|
def get_dwconv(dim, kernel, bias): |
|
|
return nn.Conv2d(dim, dim, kernel_size=kernel, padding=(kernel-1)//2 ,bias=bias, groups=dim) |
|
|
|
|
|
|
|
|
class gnconv(nn.Module): |
|
|
def __init__(self, dim, order=5, gflayer=None, h=14, w=8, s=1.0): |
|
|
super().__init__() |
|
|
self.order = order |
|
|
self.dims = [dim // 2 ** i for i in range(order)] |
|
|
self.dims.reverse() |
|
|
self.proj_in = nn.Conv2d(dim, 2*dim, 1) |
|
|
|
|
|
if gflayer is None: |
|
|
self.dwconv = get_dwconv(sum(self.dims), 7, True) |
|
|
else: |
|
|
self.dwconv = gflayer(sum(self.dims), h=h, w=w) |
|
|
|
|
|
self.proj_out = nn.Conv2d(dim, dim, 1) |
|
|
|
|
|
self.pws = nn.ModuleList( |
|
|
[nn.Conv2d(self.dims[i], self.dims[i+1], 1) for i in range(order-1)] |
|
|
) |
|
|
|
|
|
self.scale = s |
|
|
print('[gnconv]', order, 'order with dims=', self.dims, 'scale=%.4f'%self.scale) |
|
|
|
|
|
def forward(self, x, mask=None, dummy=False): |
|
|
fused_x = self.proj_in(x) |
|
|
pwa, abc = torch.split(fused_x, (self.dims[0], sum(self.dims)), dim=1) |
|
|
|
|
|
dw_abc = self.dwconv(abc) * self.scale |
|
|
|
|
|
dw_list = torch.split(dw_abc, self.dims, dim=1) |
|
|
x = pwa * dw_list[0] |
|
|
|
|
|
for i in range(self.order -1): |
|
|
x = self.pws[i](x) * dw_list[i+1] |
|
|
|
|
|
x = self.proj_out(x) |
|
|
|
|
|
return x |
|
|
|
|
|
class LayerNorm(nn.Module): |
|
|
r""" LayerNorm that supports two data formats: channels_last (default) or channels_first. |
|
|
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with |
|
|
shape (batch_size, height, width, channels) while channels_first corresponds to inputs |
|
|
with shape (batch_size, channels, height, width). |
|
|
""" |
|
|
def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"): |
|
|
super().__init__() |
|
|
self.weight = nn.Parameter(torch.ones(normalized_shape)) |
|
|
self.bias = nn.Parameter(torch.zeros(normalized_shape)) |
|
|
self.eps = eps |
|
|
self.data_format = data_format |
|
|
if self.data_format not in ["channels_last", "channels_first"]: |
|
|
raise NotImplementedError |
|
|
self.normalized_shape = (normalized_shape, ) |
|
|
|
|
|
def forward(self, x): |
|
|
if self.data_format == "channels_last": |
|
|
return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) |
|
|
elif self.data_format == "channels_first": |
|
|
u = x.mean(1, keepdim=True) |
|
|
s = (x - u).pow(2).mean(1, keepdim=True) |
|
|
x = (x - u) / torch.sqrt(s + self.eps) |
|
|
x = self.weight[:, None, None] * x + self.bias[:, None, None] |
|
|
return x |
|
|
|
|
|
|
|
|
class HorBlock(nn.Module): |
|
|
""" HorNet block """ |
|
|
|
|
|
def __init__(self, dim, order=4, mlp_ratio=4, drop_path=0., init_value=1e-6, gnconv=gnconv): |
|
|
super().__init__() |
|
|
|
|
|
self.norm1 = LayerNorm(dim, eps=1e-6, data_format='channels_first') |
|
|
self.gnconv = gnconv(dim, order) |
|
|
self.norm2 = LayerNorm(dim, eps=1e-6) |
|
|
self.pwconv1 = nn.Linear(dim, int(mlp_ratio * dim)) |
|
|
self.act = nn.GELU() |
|
|
self.pwconv2 = nn.Linear(int(mlp_ratio * dim), dim) |
|
|
self.gamma1 = nn.Parameter(init_value * torch.ones(dim), requires_grad=True) |
|
|
self.gamma2 = nn.Parameter(init_value * torch.ones((dim)), requires_grad=True) |
|
|
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() |
|
|
|
|
|
def forward(self, x): |
|
|
B, C, H, W = x.shape |
|
|
gamma1 = self.gamma1.view(C, 1, 1) |
|
|
x = x + self.drop_path(gamma1 * self.gnconv(self.norm1(x))) |
|
|
|
|
|
input = x |
|
|
x = x.permute(0, 2, 3, 1) |
|
|
x = self.norm2(x) |
|
|
x = self.pwconv1(x) |
|
|
x = self.act(x) |
|
|
x = self.pwconv2(x) |
|
|
if self.gamma2 is not None: |
|
|
x = self.gamma2 * x |
|
|
x = x.permute(0, 3, 1, 2) |
|
|
|
|
|
x = input + self.drop_path(x) |
|
|
return x |
|
|
|