| | import torch.nn as nn
|
| |
|
| | import torch
|
| | from . import common
|
| |
|
| | from lambda_networks import LambdaLayer
|
| |
|
| |
|
| | def build_model(args):
|
| | return ResNet(args)
|
| |
|
| |
|
| | class ConvGRU(nn.Module):
|
| | def __init__(self, hidden_dim=128, input_dim=192+128):
|
| | super(ConvGRU, self).__init__()
|
| | self.convz = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
|
| | self.convr = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
|
| | self.convq = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
|
| |
|
| | def forward(self, h, x):
|
| | hx = torch.cat([h, x], dim=1)
|
| |
|
| | z = torch.sigmoid(self.convz(hx))
|
| | r = torch.sigmoid(self.convr(hx))
|
| | q = torch.tanh(self.convq(torch.cat([r*h, x], dim=1)))
|
| |
|
| |
|
| |
|
| | return (1-z) * h + z * q
|
| |
|
| | class SepConvGRU(nn.Module):
|
| | def __init__(self, hidden_dim=128, input_dim=192+128):
|
| | super(SepConvGRU, self).__init__()
|
| | self.convz1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
|
| | self.convr1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
|
| | self.convq1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
|
| |
|
| | self.convz2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
|
| | self.convr2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
|
| | self.convq2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
|
| |
|
| |
|
| | def forward(self, h, x):
|
| |
|
| | hx = torch.cat([h, x], dim=1)
|
| | z = torch.sigmoid(self.convz1(hx))
|
| | r = torch.sigmoid(self.convr1(hx))
|
| | q = torch.tanh(self.convq1(torch.cat([r*h, x], dim=1)))
|
| | h = (1-z) * h + z * q
|
| |
|
| |
|
| | hx = torch.cat([h, x], dim=1)
|
| | z = torch.sigmoid(self.convz2(hx))
|
| | r = torch.sigmoid(self.convr2(hx))
|
| | q = torch.tanh(self.convq2(torch.cat([r*h, x], dim=1)))
|
| | h = (1-z) * h + z * q
|
| |
|
| | return h
|
| |
|
| |
|
| | class ResNet(nn.Module):
|
| | def __init__(
|
| | self,
|
| | args,
|
| | in_channels=3,
|
| | out_channels=3,
|
| | n_feats=None,
|
| | kernel_size=None,
|
| | n_resblocks=None,
|
| | mean_shift=True,
|
| | ):
|
| | super(ResNet, self).__init__()
|
| |
|
| | self.in_channels = in_channels
|
| | self.out_channels = out_channels
|
| |
|
| | self.n_feats = args.n_feats if n_feats is None else n_feats
|
| | self.kernel_size = args.kernel_size if kernel_size is None else kernel_size
|
| | self.n_resblocks = args.n_resblocks if n_resblocks is None else n_resblocks
|
| |
|
| | self.mean_shift = mean_shift
|
| | self.rgb_range = args.rgb_range
|
| | self.mean = self.rgb_range / 2
|
| |
|
| | modules = []
|
| | m_head=[common.default_conv(self.in_channels, self.n_feats, self.kernel_size)]
|
| | for i in range(3):
|
| | m_head.append(common.ResBlock(self.n_feats, self.kernel_size))
|
| | for _ in range(self.n_resblocks // 2):
|
| | modules.append(common.ResBlock(self.n_feats, self.kernel_size))
|
| | modules.append(
|
| | LambdaLayer(
|
| | dim=self.n_feats, dim_out=self.n_feats, r=23, dim_k=16, heads=4, dim_u=4
|
| | )
|
| | )
|
| | for _ in range(self.n_resblocks // 2):
|
| | modules.append(common.ResBlock(self.n_feats, self.kernel_size))
|
| | m_tail=[]
|
| |
|
| | for i in range(3):
|
| | m_tail.append(common.ResBlock(self.n_feats, self.kernel_size))
|
| |
|
| | m_tail.append(
|
| | common.default_conv(self.n_feats, self.out_channels, self.kernel_size)
|
| | )
|
| | self.head=nn.Sequential(*m_head)
|
| | self.body = nn.Sequential(*modules)
|
| | self.tail=nn.Sequential(*m_tail)
|
| | self.gru=SepConvGRU(hidden_dim=self.n_feats,input_dim=self.n_feats)
|
| |
|
| | def forward(self, input):
|
| | if self.mean_shift:
|
| | input = input - self.mean
|
| |
|
| | output = self.body(input)
|
| |
|
| | if self.mean_shift:
|
| | output = output + self.mean
|
| |
|
| | return output
|
| |
|