| | import torch |
| | import torch.nn as nn |
| | import torch.nn.functional as F |
| |
|
| | import torch |
| | import math |
| | from torch.nn import Module, Dropout |
| |
|
| | |
| |
|
| | GRAD_CLIP = 0.1 |
| |
|
| | class GradClip(torch.autograd.Function): |
| | @staticmethod |
| | def forward(ctx, x): |
| | return x |
| |
|
| | @staticmethod |
| | def backward(ctx, grad_x): |
| | grad_x = torch.where(torch.isnan(grad_x), torch.zeros_like(grad_x), grad_x) |
| | return grad_x.clamp(min=-0.01, max=0.01) |
| |
|
| | class GradientClip(nn.Module): |
| | def __init__(self): |
| | super(GradientClip, self).__init__() |
| |
|
| | def forward(self, x): |
| | return GradClip.apply(x) |
| |
|
| | def _make_divisible(v, divisor, min_value=None): |
| | if min_value is None: |
| | min_value = divisor |
| | new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) |
| | |
| | if new_v < 0.9 * v: |
| | new_v += divisor |
| | return new_v |
| |
|
| | class ConvNextBlock(nn.Module): |
| | r""" ConvNeXt Block. There are two equivalent implementations: |
| | (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W) |
| | (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back |
| | We use (2) as we find it slightly faster in PyTorch |
| | |
| | Args: |
| | dim (int): Number of input channels. |
| | drop_path (float): Stochastic depth rate. Default: 0.0 |
| | layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6. |
| | """ |
| | def __init__(self, dim, output_dim, layer_scale_init_value=1e-6): |
| | super().__init__() |
| | self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) |
| | self.norm = LayerNorm(dim, eps=1e-6) |
| | self.pwconv1 = nn.Linear(dim, 4 * output_dim) |
| | self.act = nn.GELU() |
| | self.pwconv2 = nn.Linear(4 * output_dim, dim) |
| | self.gamma = nn.Parameter(layer_scale_init_value * torch.ones((dim)), |
| | requires_grad=True) if layer_scale_init_value > 0 else None |
| | self.final = nn.Conv2d(dim, output_dim, kernel_size=1, padding=0) |
| |
|
| | def forward(self, x): |
| | input = x |
| | x = self.dwconv(x) |
| | x = x.permute(0, 2, 3, 1) |
| | x = self.norm(x) |
| | x = self.pwconv1(x) |
| | x = self.act(x) |
| | x = self.pwconv2(x) |
| | if self.gamma is not None: |
| | x = self.gamma * x |
| | x = x.permute(0, 3, 1, 2) |
| | x = self.final(input + x) |
| | return x |
| | |
| | class LayerNorm(nn.Module): |
| | r""" LayerNorm that supports two data formats: channels_last (default) or channels_first. |
| | The ordering of the dimensions in the inputs. channels_last corresponds to inputs with |
| | shape (batch_size, height, width, channels) while channels_first corresponds to inputs |
| | with shape (batch_size, channels, height, width). |
| | """ |
| | def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"): |
| | super().__init__() |
| | self.weight = nn.Parameter(torch.ones(normalized_shape)) |
| | self.bias = nn.Parameter(torch.zeros(normalized_shape)) |
| | self.eps = eps |
| | self.data_format = data_format |
| | if self.data_format not in ["channels_last", "channels_first"]: |
| | raise NotImplementedError |
| | self.normalized_shape = (normalized_shape, ) |
| | |
| | def forward(self, x): |
| | if self.data_format == "channels_last": |
| | return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) |
| | elif self.data_format == "channels_first": |
| | u = x.mean(1, keepdim=True) |
| | s = (x - u).pow(2).mean(1, keepdim=True) |
| | x = (x - u) / torch.sqrt(s + self.eps) |
| | x = self.weight[:, None, None] * x + self.bias[:, None, None] |
| | return x |
| |
|
| | def conv1x1(in_planes, out_planes, stride=1): |
| | """1x1 convolution without padding""" |
| | return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0) |
| |
|
| |
|
| | def conv3x3(in_planes, out_planes, stride=1): |
| | """3x3 convolution with padding""" |
| | return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1) |
| |
|
| | class BasicBlock(nn.Module): |
| | def __init__(self, in_planes, planes, stride=1, norm_layer=nn.BatchNorm2d): |
| | super().__init__() |
| |
|
| | |
| | self.conv1 = conv3x3(in_planes, planes, stride) |
| | self.conv2 = conv3x3(planes, planes) |
| | self.bn1 = norm_layer(planes) |
| | self.bn2 = norm_layer(planes) |
| | self.relu = nn.ReLU(inplace=True) |
| | if stride == 1 and in_planes == planes: |
| | self.downsample = None |
| | else: |
| | self.bn3 = norm_layer(planes) |
| | self.downsample = nn.Sequential( |
| | conv1x1(in_planes, planes, stride=stride), |
| | self.bn3 |
| | ) |
| |
|
| | def forward(self, x): |
| | y = x |
| | y = self.relu(self.bn1(self.conv1(y))) |
| | y = self.relu(self.bn2(self.conv2(y))) |
| | if self.downsample is not None: |
| | x = self.downsample(x) |
| | return self.relu(x+y) |