|
|
from torch import nn |
|
|
import torch, math |
|
|
|
|
|
|
|
|
|
|
|
class SimVPV2_Model(nn.Module): |
|
|
r"""SimVP Model |
|
|
|
|
|
Implementation of `SimVP: Simpler yet Better Video Prediction |
|
|
Just Remove The Skip Connection |
|
|
<https://arxiv.org/abs/2206.05099>`_. |
|
|
|
|
|
""" |
|
|
def __init__(self, shape_in, shape_out, hid_S=16, hid_T=256, N_S=4, N_T=4, |
|
|
mlp_ratio=8., drop=0.0, drop_path=0.0, spatio_kernel_enc=3, |
|
|
spatio_kernel_dec=3, last_activation='none', act_inplace=True, recursive=False, **kwargs): |
|
|
super(SimVPV2_Model, self).__init__() |
|
|
T, C, H, W = shape_in |
|
|
T2, C2, H2, W2 = shape_out |
|
|
assert C==C2 and H==H2 and W==W2, 'Need to be the same image shape for input and output' |
|
|
self.T2 = T2 |
|
|
self.T = T |
|
|
|
|
|
H, W = int(H / 2**(N_S/2)), int(W / 2**(N_S/2)) |
|
|
act_inplace = False |
|
|
|
|
|
self.enc = Encoder(C, hid_S, N_S) |
|
|
self.dec = Decoder(hid_S, C, N_S, last_activation) |
|
|
|
|
|
|
|
|
self.recursive = recursive |
|
|
if not self.recursive: |
|
|
self.hid = MidMetaNet(T*hid_S, T2*hid_S, hid_T, N_T, |
|
|
input_resolution=(H, W), model_type='gsta', |
|
|
mlp_ratio=mlp_ratio, drop=drop, drop_path=drop_path) |
|
|
else: |
|
|
self.hid = MidMetaNet(T*hid_S, T*hid_S, hid_T, N_T, |
|
|
input_resolution=(H, W), model_type='gsta', |
|
|
mlp_ratio=mlp_ratio, drop=drop, drop_path=drop_path) |
|
|
self.last_activation = last_activation |
|
|
|
|
|
def forward(self, x_raw, **kwargs): |
|
|
B, T, C, H, W = x_raw.shape |
|
|
|
|
|
x = x_raw.reshape(B*T, C, H, W) |
|
|
|
|
|
embed = self.enc(x) |
|
|
_, C_, H_, W_ = embed.shape |
|
|
|
|
|
z = embed.view(B, T, C_, H_, W_) |
|
|
|
|
|
if not self.recursive: |
|
|
hid, conds_ = self.hid(z) |
|
|
else: |
|
|
no = self.T2//self.T |
|
|
if self.T2%self.T != 0: |
|
|
no += 1 |
|
|
hid = [] |
|
|
for i in range(no): |
|
|
z, _ = self.hid(z) |
|
|
hid.append(z) |
|
|
hid = torch.cat(hid, dim=1) |
|
|
hid = hid[:, :self.T2] |
|
|
conds_ = hid.reshape(-1, C_, H_, W_) |
|
|
|
|
|
|
|
|
hid = hid.reshape(B*self.T2, C_, H_, W_) |
|
|
|
|
|
Y = self.dec(hid) |
|
|
Y = Y.reshape(B, self.T2, C, H, W) |
|
|
return Y, conds_, hid.reshape(B, -1, C_, H_, W_) |
|
|
|
|
|
def recon_loss(self, x, y): |
|
|
X = torch.cat((x, y), dim=1) |
|
|
B, T, C, H, W = X.shape |
|
|
X = X.reshape(-1, C, H, W) |
|
|
recon = self.dec(self.enc(X)) |
|
|
return nn.MSELoss()(recon, X) |
|
|
|
|
|
|
|
|
class MidMetaNet(nn.Module): |
|
|
"""The hidden Translator of MetaFormer for SimVP""" |
|
|
|
|
|
def __init__(self, channel_in, channel_out, channel_hid, N2, |
|
|
input_resolution=None, model_type=None, |
|
|
mlp_ratio=4., drop=0.0, drop_path=0.1): |
|
|
super(MidMetaNet, self).__init__() |
|
|
assert N2 >= 2 and mlp_ratio > 1 |
|
|
self.N2 = N2 |
|
|
dpr = [ |
|
|
x.item() for x in torch.linspace(1e-2, drop_path, self.N2)] |
|
|
|
|
|
|
|
|
enc_layers = [MetaBlock( |
|
|
channel_in, channel_hid, input_resolution, model_type, |
|
|
mlp_ratio, drop, drop_path=dpr[0], layer_i=0)] |
|
|
|
|
|
for i in range(1, N2-1): |
|
|
enc_layers.append(MetaBlock( |
|
|
channel_hid, channel_hid, input_resolution, model_type, |
|
|
mlp_ratio, drop, drop_path=dpr[i], layer_i=i)) |
|
|
|
|
|
|
|
|
|
|
|
enc_layers.append(MetaBlock( |
|
|
channel_hid, channel_out, input_resolution, model_type, |
|
|
mlp_ratio, drop, drop_path=drop_path, layer_i=N2-1)) |
|
|
self.enc = nn.Sequential(*enc_layers) |
|
|
|
|
|
def forward(self, x): |
|
|
B, T, C, H, W = x.shape |
|
|
x = x.reshape(B, T*C, H, W) |
|
|
|
|
|
z = x |
|
|
conds = [z] |
|
|
for i in range(self.N2): |
|
|
z = self.enc[i](z) |
|
|
conds.append(z) |
|
|
|
|
|
y = z.reshape(B, -1, C, H, W) |
|
|
return y, y.reshape(-1, C, H, W) |
|
|
|
|
|
class MetaBlock(nn.Module): |
|
|
"""The hidden Translator of MetaFormer for SimVP""" |
|
|
|
|
|
def __init__(self, in_channels, out_channels, input_resolution=None, model_type=None, |
|
|
mlp_ratio=8., drop=0.0, drop_path=0.0, layer_i=0): |
|
|
super(MetaBlock, self).__init__() |
|
|
self.in_channels = in_channels |
|
|
self.out_channels = out_channels |
|
|
model_type = model_type.lower() if model_type is not None else 'gsta' |
|
|
|
|
|
if model_type == 'gsta': |
|
|
self.block = GASubBlock( |
|
|
in_channels, kernel_size=21, mlp_ratio=mlp_ratio, |
|
|
drop=drop, drop_path=drop_path, act_layer=nn.GELU) |
|
|
else: |
|
|
assert False and "Invalid model_type in SimVP" |
|
|
|
|
|
if in_channels != out_channels: |
|
|
self.reduction = nn.Conv2d( |
|
|
in_channels, out_channels, kernel_size=1, stride=1, padding=0) |
|
|
|
|
|
def forward(self, x): |
|
|
z = self.block(x) |
|
|
return z if self.in_channels == self.out_channels else self.reduction(z) |
|
|
|
|
|
class GASubBlock(nn.Module): |
|
|
"""A GABlock (gSTA) for SimVP""" |
|
|
|
|
|
def __init__(self, dim, kernel_size=21, mlp_ratio=4., |
|
|
drop=0., drop_path=0.1, init_value=1e-2, act_layer=nn.GELU): |
|
|
super().__init__() |
|
|
self.norm1 = nn.BatchNorm2d(dim) |
|
|
self.attn = SpatialAttention(dim, kernel_size) |
|
|
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() |
|
|
|
|
|
self.norm2 = nn.BatchNorm2d(dim) |
|
|
mlp_hidden_dim = int(dim * mlp_ratio) |
|
|
self.mlp = MixMlp( |
|
|
in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) |
|
|
|
|
|
self.layer_scale_1 = nn.Parameter(init_value * torch.ones((dim)), requires_grad=True) |
|
|
self.layer_scale_2 = nn.Parameter(init_value * torch.ones((dim)), requires_grad=True) |
|
|
|
|
|
self.apply(self._init_weights) |
|
|
|
|
|
def _init_weights(self, m): |
|
|
if isinstance(m, nn.Linear): |
|
|
trunc_normal_(m.weight, std=.02) |
|
|
if isinstance(m, nn.Linear) and m.bias is not None: |
|
|
nn.init.constant_(m.bias, 0) |
|
|
elif isinstance(m, nn.LayerNorm): |
|
|
nn.init.constant_(m.bias, 0) |
|
|
nn.init.constant_(m.weight, 1.0) |
|
|
elif isinstance(m, nn.Conv2d): |
|
|
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels |
|
|
fan_out //= m.groups |
|
|
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) |
|
|
if m.bias is not None: |
|
|
m.bias.data.zero_() |
|
|
|
|
|
@torch.jit.ignore |
|
|
def no_weight_decay(self): |
|
|
return {'layer_scale_1', 'layer_scale_2'} |
|
|
|
|
|
def forward(self, x): |
|
|
x = x + self.drop_path( |
|
|
self.layer_scale_1.unsqueeze(-1).unsqueeze(-1) * self.attn(self.norm1(x))) |
|
|
x = x + self.drop_path( |
|
|
self.layer_scale_2.unsqueeze(-1).unsqueeze(-1) * self.mlp(self.norm2(x))) |
|
|
return x |
|
|
|
|
|
class SpatialAttention(nn.Module): |
|
|
"""A Spatial Attention block for SimVP""" |
|
|
|
|
|
def __init__(self, d_model, kernel_size=21, attn_shortcut=True): |
|
|
super().__init__() |
|
|
|
|
|
self.proj_1 = nn.Conv2d(d_model, d_model, 1) |
|
|
self.activation = nn.GELU() |
|
|
self.spatial_gating_unit = AttentionModule(d_model, kernel_size) |
|
|
self.proj_2 = nn.Conv2d(d_model, d_model, 1) |
|
|
self.attn_shortcut = attn_shortcut |
|
|
|
|
|
def forward(self, x): |
|
|
if self.attn_shortcut: |
|
|
shortcut = x.clone() |
|
|
x = self.proj_1(x) |
|
|
x = self.activation(x) |
|
|
x = self.spatial_gating_unit(x) |
|
|
x = self.proj_2(x) |
|
|
if self.attn_shortcut: |
|
|
x = x + shortcut |
|
|
return x |
|
|
|
|
|
class AttentionModule(nn.Module): |
|
|
"""Large Kernel Attention for SimVP""" |
|
|
|
|
|
def __init__(self, dim, kernel_size, dilation=3): |
|
|
super().__init__() |
|
|
d_k = 2 * dilation - 1 |
|
|
d_p = (d_k - 1) // 2 |
|
|
dd_k = kernel_size // dilation + ((kernel_size // dilation) % 2 - 1) |
|
|
dd_p = (dilation * (dd_k - 1) // 2) |
|
|
|
|
|
self.conv0 = nn.Conv2d(dim, dim, d_k, padding=d_p, groups=dim) |
|
|
self.conv_spatial = nn.Conv2d( |
|
|
dim, dim, dd_k, stride=1, padding=dd_p, groups=dim, dilation=dilation) |
|
|
self.conv1 = nn.Conv2d(dim, 2*dim, 1) |
|
|
|
|
|
def forward(self, x): |
|
|
u = x.clone() |
|
|
attn = self.conv0(x) |
|
|
attn = self.conv_spatial(attn) |
|
|
|
|
|
f_g = self.conv1(attn) |
|
|
split_dim = f_g.shape[1] // 2 |
|
|
f_x, g_x = torch.split(f_g, split_dim, dim=1) |
|
|
return torch.sigmoid(g_x) * f_x |
|
|
|
|
|
class DWConv(nn.Module): |
|
|
def __init__(self, dim=768): |
|
|
super(DWConv, self).__init__() |
|
|
self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim) |
|
|
|
|
|
def forward(self, x): |
|
|
x = self.dwconv(x) |
|
|
return x |
|
|
|
|
|
|
|
|
class MixMlp(nn.Module): |
|
|
def __init__(self, |
|
|
in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): |
|
|
super().__init__() |
|
|
out_features = out_features or in_features |
|
|
hidden_features = hidden_features or in_features |
|
|
self.fc1 = nn.Conv2d(in_features, hidden_features, 1) |
|
|
self.dwconv = DWConv(hidden_features) |
|
|
self.act = act_layer() |
|
|
self.fc2 = nn.Conv2d(hidden_features, out_features, 1) |
|
|
self.drop = nn.Dropout(drop) |
|
|
self.apply(self._init_weights) |
|
|
|
|
|
def _init_weights(self, m): |
|
|
if isinstance(m, nn.Linear): |
|
|
trunc_normal_(m.weight, std=.02) |
|
|
if isinstance(m, nn.Linear) and m.bias is not None: |
|
|
nn.init.constant_(m.bias, 0) |
|
|
elif isinstance(m, nn.LayerNorm): |
|
|
nn.init.constant_(m.bias, 0) |
|
|
nn.init.constant_(m.weight, 1.0) |
|
|
elif isinstance(m, nn.Conv2d): |
|
|
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels |
|
|
fan_out //= m.groups |
|
|
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) |
|
|
if m.bias is not None: |
|
|
m.bias.data.zero_() |
|
|
|
|
|
def forward(self, x): |
|
|
x = self.fc1(x) |
|
|
x = self.dwconv(x) |
|
|
x = self.act(x) |
|
|
x = self.drop(x) |
|
|
x = self.fc2(x) |
|
|
x = self.drop(x) |
|
|
return x |
|
|
|
|
|
|
|
|
""" |
|
|
From TIMM repo: https://github.com/huggingface/pytorch-image-models/blob/main/timm/layers/drop.py |
|
|
""" |
|
|
def drop_path(x, drop_prob: float = 0., training: bool = False, scale_by_keep: bool = True): |
|
|
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). |
|
|
|
|
|
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, |
|
|
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... |
|
|
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for |
|
|
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use |
|
|
'survival rate' as the argument. |
|
|
|
|
|
""" |
|
|
if drop_prob == 0. or not training: |
|
|
return x |
|
|
keep_prob = 1 - drop_prob |
|
|
shape = (x.shape[0],) + (1,) * (x.ndim - 1) |
|
|
random_tensor = x.new_empty(shape).bernoulli_(keep_prob) |
|
|
if keep_prob > 0.0 and scale_by_keep: |
|
|
random_tensor.div_(keep_prob) |
|
|
return x * random_tensor |
|
|
|
|
|
|
|
|
class DropPath(nn.Module): |
|
|
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). |
|
|
""" |
|
|
def __init__(self, drop_prob: float = 0., scale_by_keep: bool = True): |
|
|
super(DropPath, self).__init__() |
|
|
self.drop_prob = drop_prob |
|
|
self.scale_by_keep = scale_by_keep |
|
|
|
|
|
def forward(self, x): |
|
|
return drop_path(x, self.drop_prob, self.training, self.scale_by_keep) |
|
|
|
|
|
def extra_repr(self): |
|
|
return f'drop_prob={round(self.drop_prob,3):0.3f}' |
|
|
|
|
|
def _trunc_normal_(tensor, mean, std, a, b): |
|
|
|
|
|
|
|
|
def norm_cdf(x): |
|
|
|
|
|
return (1. + math.erf(x / math.sqrt(2.))) / 2. |
|
|
|
|
|
if (mean < a - 2 * std) or (mean > b + 2 * std): |
|
|
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " |
|
|
"The distribution of values may be incorrect.", |
|
|
stacklevel=2) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
l = norm_cdf((a - mean) / std) |
|
|
u = norm_cdf((b - mean) / std) |
|
|
|
|
|
|
|
|
|
|
|
tensor.uniform_(2 * l - 1, 2 * u - 1) |
|
|
|
|
|
|
|
|
|
|
|
tensor.erfinv_() |
|
|
|
|
|
|
|
|
tensor.mul_(std * math.sqrt(2.)) |
|
|
tensor.add_(mean) |
|
|
|
|
|
|
|
|
tensor.clamp_(min=a, max=b) |
|
|
return tensor |
|
|
|
|
|
class Encoder(nn.Module): |
|
|
def __init__(self,C_in, C_hid, N_S): |
|
|
super(Encoder,self).__init__() |
|
|
strides = stride_generator(N_S) |
|
|
self.enc = nn.Sequential( |
|
|
ConvSC(C_in, C_hid, stride=strides[0]), |
|
|
*[ConvSC(C_hid, C_hid, stride=s) for s in strides[1:]] |
|
|
) |
|
|
|
|
|
def forward(self,x): |
|
|
enc1 = self.enc[0](x) |
|
|
latent = enc1 |
|
|
for i in range(1,len(self.enc)): |
|
|
latent = self.enc[i](latent) |
|
|
return latent |
|
|
|
|
|
class Decoder(nn.Module): |
|
|
def __init__(self,C_hid, C_out, N_S, last_activation='sigmoid'): |
|
|
super(Decoder,self).__init__() |
|
|
strides = stride_generator(N_S, reverse=True) |
|
|
self.dec = nn.Sequential( |
|
|
*[ConvSC(C_hid, C_hid, stride=s, transpose=True) for s in strides[:-1]], |
|
|
ConvSC(C_hid, C_hid, stride=strides[-1], transpose=True) |
|
|
) |
|
|
self.readout = nn.Conv2d(C_hid, C_out, 1) |
|
|
if last_activation=='sigmoid': |
|
|
self.last = nn.Sigmoid() |
|
|
else: |
|
|
self.last = nn.Identity() |
|
|
|
|
|
def forward(self, hid): |
|
|
for i in range(0,len(self.dec)-1): |
|
|
hid = self.dec[i](hid) |
|
|
Y = self.dec[-1](hid) |
|
|
Y = self.readout(Y) |
|
|
return self.last(Y) |
|
|
|
|
|
class BasicConv2d(nn.Module): |
|
|
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, transpose=False, act_norm=False): |
|
|
super(BasicConv2d, self).__init__() |
|
|
self.act_norm=act_norm |
|
|
if not transpose: |
|
|
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding) |
|
|
else: |
|
|
self.conv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding,output_padding=stride //2 ) |
|
|
self.norm = nn.GroupNorm(2, out_channels) |
|
|
self.act = nn.LeakyReLU(0.2, inplace=True) |
|
|
|
|
|
def forward(self, x): |
|
|
y = self.conv(x) |
|
|
if self.act_norm: |
|
|
y = self.act(self.norm(y)) |
|
|
return y |
|
|
|
|
|
|
|
|
class ConvSC(nn.Module): |
|
|
def __init__(self, C_in, C_out, stride, transpose=False, act_norm=True): |
|
|
super(ConvSC, self).__init__() |
|
|
if stride == 1: |
|
|
transpose = False |
|
|
self.conv = BasicConv2d(C_in, C_out, kernel_size=3, stride=stride, |
|
|
padding=1, transpose=transpose, act_norm=act_norm) |
|
|
|
|
|
def forward(self, x): |
|
|
y = self.conv(x) |
|
|
return y |
|
|
|
|
|
def stride_generator(N, reverse=False): |
|
|
strides = [1, 2]*10 |
|
|
if reverse: return list(reversed(strides[:N])) |
|
|
else: return strides[:N] |