|
|
import torch |
|
|
import torch.nn as nn |
|
|
|
|
|
|
|
|
class GELU(nn.Module): |
|
|
|
|
|
def __init__(self, inplace=True): |
|
|
super(GELU, self).__init__() |
|
|
self.inplace = inplace |
|
|
|
|
|
def forward(self, x): |
|
|
return torch.nn.functional.gelu(x) |
|
|
|
|
|
|
|
|
class Swish(nn.Module): |
|
|
|
|
|
def __init__(self, inplace=True): |
|
|
super(Swish, self).__init__() |
|
|
self.inplace = inplace |
|
|
|
|
|
def forward(self, x): |
|
|
if self.inplace: |
|
|
x.mul_(torch.sigmoid(x)) |
|
|
return x |
|
|
else: |
|
|
return x * torch.sigmoid(x) |
|
|
|
|
|
|
|
|
class Activation(nn.Module): |
|
|
|
|
|
def __init__(self, act_type, inplace=True): |
|
|
super(Activation, self).__init__() |
|
|
act_type = act_type.lower() |
|
|
if act_type == 'relu': |
|
|
self.act = nn.ReLU(inplace=inplace) |
|
|
elif act_type == 'relu6': |
|
|
self.act = nn.ReLU6(inplace=inplace) |
|
|
elif act_type == 'sigmoid': |
|
|
self.act = nn.Sigmoid() |
|
|
elif act_type == 'hard_sigmoid': |
|
|
self.act = nn.Hardsigmoid(inplace) |
|
|
elif act_type == 'hard_swish': |
|
|
self.act = nn.Hardswish(inplace=inplace) |
|
|
elif act_type == 'leakyrelu': |
|
|
self.act = nn.LeakyReLU(inplace=inplace) |
|
|
elif act_type == 'gelu': |
|
|
self.act = GELU(inplace=inplace) |
|
|
elif act_type == 'swish': |
|
|
self.act = Swish(inplace=inplace) |
|
|
else: |
|
|
raise NotImplementedError |
|
|
|
|
|
def forward(self, inputs): |
|
|
return self.act(inputs) |
|
|
|
|
|
|
|
|
def drop_path(x, |
|
|
drop_prob: float = 0.0, |
|
|
training: bool = False, |
|
|
scale_by_keep: bool = True): |
|
|
"""Drop paths (Stochastic Depth) per sample (when applied in main path of |
|
|
residual blocks). |
|
|
|
|
|
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, |
|
|
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... |
|
|
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for |
|
|
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use |
|
|
'survival rate' as the argument. |
|
|
""" |
|
|
if drop_prob == 0.0 or not training: |
|
|
return x |
|
|
keep_prob = 1 - drop_prob |
|
|
shape = (x.shape[0], ) + (1, ) * ( |
|
|
x.ndim - 1) |
|
|
random_tensor = x.new_empty(shape).bernoulli_(keep_prob) |
|
|
if keep_prob > 0.0 and scale_by_keep: |
|
|
random_tensor.div_(keep_prob) |
|
|
return x * random_tensor |
|
|
|
|
|
|
|
|
class DropPath(nn.Module): |
|
|
"""Drop paths (Stochastic Depth) per sample (when applied in main path of |
|
|
residual blocks).""" |
|
|
|
|
|
def __init__(self, drop_prob: float = 0.0, scale_by_keep: bool = True): |
|
|
super(DropPath, self).__init__() |
|
|
self.drop_prob = drop_prob |
|
|
self.scale_by_keep = scale_by_keep |
|
|
|
|
|
def forward(self, x): |
|
|
return drop_path(x, self.drop_prob, self.training, self.scale_by_keep) |
|
|
|
|
|
def extra_repr(self): |
|
|
return f'drop_prob={round(self.drop_prob,3):0.3f}' |
|
|
|
|
|
|
|
|
class Identity(nn.Module): |
|
|
|
|
|
def __init__(self): |
|
|
super(Identity, self).__init__() |
|
|
|
|
|
def forward(self, input): |
|
|
return input |
|
|
|
|
|
|
|
|
class Mlp(nn.Module): |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
in_features, |
|
|
hidden_features=None, |
|
|
out_features=None, |
|
|
act_layer=nn.GELU, |
|
|
drop=0.0, |
|
|
): |
|
|
super().__init__() |
|
|
out_features = out_features or in_features |
|
|
hidden_features = hidden_features or in_features |
|
|
self.fc1 = nn.Linear(in_features, hidden_features) |
|
|
self.act = act_layer() |
|
|
self.fc2 = nn.Linear(hidden_features, out_features) |
|
|
self.drop = nn.Dropout(drop) |
|
|
|
|
|
def forward(self, x): |
|
|
x = self.fc1(x) |
|
|
x = self.act(x) |
|
|
x = self.drop(x) |
|
|
x = self.fc2(x) |
|
|
x = self.drop(x) |
|
|
return x |
|
|
|
|
|
|
|
|
class Attention(nn.Module): |
|
|
|
|
|
def __init__(self, |
|
|
dim, |
|
|
num_heads=8, |
|
|
qkv_bias=False, |
|
|
qk_scale=None, |
|
|
attn_drop=0.0, |
|
|
proj_drop=0.0): |
|
|
super().__init__() |
|
|
self.num_heads = num_heads |
|
|
head_dim = dim // num_heads |
|
|
|
|
|
self.scale = qk_scale or head_dim**-0.5 |
|
|
|
|
|
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) |
|
|
self.attn_drop = nn.Dropout(attn_drop) |
|
|
self.proj = nn.Linear(dim, dim) |
|
|
self.proj_drop = nn.Dropout(proj_drop) |
|
|
|
|
|
def forward(self, x): |
|
|
B, N, C = x.shape |
|
|
qkv = (self.qkv(x).reshape(B, N, 3, self.num_heads, |
|
|
C // self.num_heads).permute(2, 0, 3, 1, 4)) |
|
|
q, k, v = qkv[0], qkv[1], qkv[ |
|
|
2] |
|
|
|
|
|
attn = (q @ k.transpose(-2, -1)) * self.scale |
|
|
attn = attn.softmax(dim=-1) |
|
|
attn = self.attn_drop(attn) |
|
|
|
|
|
x = (attn @ v).transpose(1, 2).reshape(B, N, C) |
|
|
x = self.proj(x) |
|
|
x = self.proj_drop(x) |
|
|
return x |
|
|
|
|
|
|
|
|
class Block(nn.Module): |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
dim, |
|
|
num_heads, |
|
|
mlp_ratio=4.0, |
|
|
qkv_bias=False, |
|
|
qk_scale=None, |
|
|
drop=0.0, |
|
|
attn_drop=0.0, |
|
|
drop_path=0.0, |
|
|
act_layer=nn.GELU, |
|
|
norm_layer=nn.LayerNorm, |
|
|
): |
|
|
super().__init__() |
|
|
self.norm1 = norm_layer(dim) |
|
|
self.attn = Attention( |
|
|
dim, |
|
|
num_heads=num_heads, |
|
|
qkv_bias=qkv_bias, |
|
|
qk_scale=qk_scale, |
|
|
attn_drop=attn_drop, |
|
|
proj_drop=drop, |
|
|
) |
|
|
|
|
|
self.drop_path = DropPath( |
|
|
drop_path) if drop_path > 0.0 else nn.Identity() |
|
|
self.norm2 = norm_layer(dim) |
|
|
mlp_hidden_dim = int(dim * mlp_ratio) |
|
|
self.mlp = Mlp(in_features=dim, |
|
|
hidden_features=mlp_hidden_dim, |
|
|
act_layer=act_layer, |
|
|
drop=drop) |
|
|
|
|
|
def forward(self, x): |
|
|
x = x + self.drop_path(self.attn(self.norm1(x))) |
|
|
x = x + self.drop_path(self.mlp(self.norm2(x))) |
|
|
return x |
|
|
|
|
|
|
|
|
class PatchEmbed(nn.Module): |
|
|
"""Image to Patch Embedding.""" |
|
|
|
|
|
def __init__(self, |
|
|
img_size=[32, 128], |
|
|
patch_size=[4, 4], |
|
|
in_chans=3, |
|
|
embed_dim=768): |
|
|
super().__init__() |
|
|
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // |
|
|
patch_size[0]) |
|
|
self.img_size = img_size |
|
|
self.patch_size = patch_size |
|
|
self.num_patches = num_patches |
|
|
|
|
|
self.proj = nn.Conv2d(in_chans, |
|
|
embed_dim, |
|
|
kernel_size=patch_size, |
|
|
stride=patch_size) |
|
|
|
|
|
def forward(self, x): |
|
|
B, C, H, W = x.shape |
|
|
|
|
|
assert ( |
|
|
H == self.img_size[0] and W == self.img_size[1] |
|
|
), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." |
|
|
x = self.proj(x).flatten(2).transpose(1, 2) |
|
|
return x |
|
|
|