| | import torch |
| | import torch.nn as nn |
| | from torch.nn import init as init |
| | import torch.nn.functional as F |
| | from torch.nn.modules.batchnorm import _BatchNorm |
| |
|
| | class Decoder_Identity(nn.Module): |
| | def __init__(self): |
| | super(Decoder_Identity, self).__init__() |
| |
|
| | self.conv_up_2 = nn.Sequential( |
| | nn.ConvTranspose2d(in_channels=64, out_channels=32, kernel_size=3, stride=2, padding=1, output_padding=1, dilation=1), |
| | nn.ReLU(), |
| | nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, padding=1, bias=True), |
| | nn.ReLU(), |
| | nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, padding=1, bias=True), |
| | nn.ReLU() |
| | ) |
| |
|
| | self.conv_up_1 = nn.Sequential( |
| | nn.ConvTranspose2d(in_channels=32, out_channels=16, kernel_size=3, stride=2, padding=1, output_padding=1, dilation=1), |
| | nn.ReLU(), |
| | nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3, padding=1, bias=True), |
| | nn.ReLU(), |
| | nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3, padding=1, bias=True), |
| | nn.ReLU() |
| | ) |
| |
|
| | self.conv_last = nn.Sequential( |
| | nn.Conv2d(in_channels=16, out_channels=3, kernel_size=1, bias=True), |
| | nn.ReLU() |
| | ) |
| |
|
| | def forward(self, feat): |
| | featmap_2 = self.conv_up_2(feat) |
| | featmap_1 = self.conv_up_1(featmap_2) |
| | out = self.conv_last(featmap_1) |
| |
|
| | return out |
| |
|
| |
|
| | class Decoder_SR(nn.Module): |
| | def __init__(self, scale=4): |
| | super(Decoder_SR, self).__init__() |
| |
|
| | self.scale = scale |
| |
|
| | self.conv_up_2 = nn.Sequential( |
| | nn.ConvTranspose2d(in_channels=64, out_channels=32, kernel_size=3, stride=2, padding=1, output_padding=1, dilation=1), |
| | nn.ReLU(), |
| | nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, padding=1, bias=True), |
| | nn.ReLU(), |
| | nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, padding=1, bias=True), |
| | nn.ReLU() |
| | ) |
| |
|
| | self.conv_up_1 = nn.Sequential( |
| | nn.ConvTranspose2d(in_channels=32, out_channels=16, kernel_size=3, stride=2, padding=1, output_padding=1, dilation=1), |
| | nn.ReLU(), |
| | nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3, padding=1, bias=True), |
| | nn.ReLU(), |
| | nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3, padding=1, bias=True), |
| | nn.ReLU() |
| | ) |
| |
|
| | self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) |
| |
|
| | |
| | self.upsample_1 = nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3, padding=1, bias=True) |
| | self.upsample_2 = nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3, padding=1, bias=True) |
| |
|
| | self.HR_conv = nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3, padding=1, bias=True) |
| | self.conv_last = nn.Conv2d(in_channels=16, out_channels=3, kernel_size=3, padding=1, bias=True) |
| |
|
| | def forward(self, feat): |
| | featmap_2 = self.conv_up_2(feat) |
| | featmap_1 = self.conv_up_1(featmap_2) |
| |
|
| | if self.scale == 4: |
| | featmap = self.lrelu(self.upsample_1(F.interpolate(featmap_1, scale_factor=2, mode='nearest'))) |
| | featmap = self.lrelu(self.upsample_2(F.interpolate(featmap, scale_factor=2, mode='nearest'))) |
| | elif self.scale == 2: |
| | featmap = self.lrelu(self.upsample_1(F.interpolate(featmap_1, scale_factor=2, mode='nearest'))) |
| |
|
| |
|
| | out = self.conv_last(self.lrelu(self.HR_conv(featmap))) |
| |
|
| | return out |
| |
|
| |
|
| | def default_init_weights(module_list, scale=1, bias_fill=0, **kwargs): |
| | """Initialize network weights. |
| | |
| | Args: |
| | module_list (list[nn.Module] | nn.Module): Modules to be initialized. |
| | scale (float): Scale initialized weights, especially for residual |
| | blocks. Default: 1. |
| | bias_fill (float): The value to fill bias. Default: 0 |
| | kwargs (dict): Other arguments for initialization function. |
| | """ |
| | if not isinstance(module_list, list): |
| | module_list = [module_list] |
| | for module in module_list: |
| | for m in module.modules(): |
| | if isinstance(m, nn.Conv2d): |
| | init.kaiming_normal_(m.weight, **kwargs) |
| | m.weight.data *= scale |
| | if m.bias is not None: |
| | m.bias.data.fill_(bias_fill) |
| | elif isinstance(m, nn.Linear): |
| | init.kaiming_normal_(m.weight, **kwargs) |
| | m.weight.data *= scale |
| | if m.bias is not None: |
| | m.bias.data.fill_(bias_fill) |
| | elif isinstance(m, _BatchNorm): |
| | init.constant_(m.weight, 1) |
| | if m.bias is not None: |
| | m.bias.data.fill_(bias_fill) |
| |
|
| |
|
| | def make_layer(basic_block, num_basic_block, **kwarg): |
| | """Make layers by stacking the same blocks. |
| | |
| | Args: |
| | basic_block (nn.module): nn.module class for basic block. |
| | num_basic_block (int): number of blocks. |
| | |
| | Returns: |
| | nn.Sequential: Stacked blocks in nn.Sequential. |
| | """ |
| | layers = [] |
| | for _ in range(num_basic_block): |
| | layers.append(basic_block(**kwarg)) |
| | return nn.Sequential(*layers) |
| |
|
| |
|
| | class ResidualDenseBlock(nn.Module): |
| | """Residual Dense Block. |
| | |
| | Used in RRDB block in ESRGAN. |
| | |
| | Args: |
| | num_feat (int): Channel number of intermediate features. |
| | num_grow_ch (int): Channels for each growth. |
| | """ |
| |
|
| | def __init__(self, num_feat=64, num_grow_ch=32): |
| | super(ResidualDenseBlock, self).__init__() |
| | self.conv1 = nn.Conv2d(num_feat, num_grow_ch, 3, 1, 1) |
| | self.conv2 = nn.Conv2d(num_feat + num_grow_ch, num_grow_ch, 3, 1, 1) |
| | self.conv3 = nn.Conv2d(num_feat + 2 * num_grow_ch, num_grow_ch, 3, 1, 1) |
| | self.conv4 = nn.Conv2d(num_feat + 3 * num_grow_ch, num_grow_ch, 3, 1, 1) |
| | self.conv5 = nn.Conv2d(num_feat + 4 * num_grow_ch, num_feat, 3, 1, 1) |
| |
|
| | self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) |
| |
|
| | |
| | default_init_weights([self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1) |
| |
|
| | def forward(self, x): |
| | x1 = self.lrelu(self.conv1(x)) |
| | x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1))) |
| | x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1))) |
| | x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1))) |
| | x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1)) |
| | |
| | return x5 * 0.2 + x |
| |
|
| |
|
| | class RRDB(nn.Module): |
| | """Residual in Residual Dense Block. |
| | |
| | Used in RRDB-Net in ESRGAN. |
| | |
| | Args: |
| | num_feat (int): Channel number of intermediate features. |
| | num_grow_ch (int): Channels for each growth. |
| | """ |
| |
|
| | def __init__(self, num_feat, num_grow_ch=32): |
| | super(RRDB, self).__init__() |
| | self.rdb1 = ResidualDenseBlock(num_feat, num_grow_ch) |
| | self.rdb2 = ResidualDenseBlock(num_feat, num_grow_ch) |
| | self.rdb3 = ResidualDenseBlock(num_feat, num_grow_ch) |
| |
|
| | def forward(self, x): |
| | out = self.rdb1(x) |
| | out = self.rdb2(out) |
| | out = self.rdb3(out) |
| | |
| | return out * 0.2 + x |
| |
|
| |
|
| | class Decoder_Id_RRDB(nn.Module): |
| | def __init__(self, num_in_ch, num_out_ch=3, scale=4, num_feat=64, num_block=10, num_grow_ch=32): |
| | super(Decoder_Id_RRDB, self).__init__() |
| |
|
| | self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1) |
| | self.body = make_layer(RRDB, num_block, num_feat=num_feat, num_grow_ch=num_grow_ch) |
| | self.conv_body = nn.Conv2d(num_feat, num_feat, 3, 1, 1) |
| | |
| | self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1) |
| | self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) |
| |
|
| | self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) |
| | |
| | def forward(self, x): |
| |
|
| | feat = self.conv_first(x) |
| | body_feat = self.conv_body(self.body(feat)) |
| | feat = feat + body_feat |
| | |
| | out = self.conv_last(self.lrelu(self.conv_hr(feat))) |
| | return out |
| |
|
| |
|
| | class Decoder_SR_RRDB(nn.Module): |
| | def __init__(self, num_in_ch, num_out_ch=3, scale=4, num_feat=64, num_block=23, num_grow_ch=32): |
| | super(Decoder_SR_RRDB, self).__init__() |
| | self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1) |
| | self.body = make_layer(RRDB, num_block, num_feat=num_feat, num_grow_ch=num_grow_ch) |
| | self.conv_body = nn.Conv2d(num_feat, num_feat, 3, 1, 1) |
| | |
| | self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) |
| | self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) |
| | self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1) |
| | self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) |
| |
|
| | self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) |
| |
|
| | def forward(self, x): |
| |
|
| | feat = self.conv_first(x) |
| | body_feat = self.conv_body(self.body(feat)) |
| | feat = feat + body_feat |
| | |
| | feat = self.lrelu(self.conv_up1(F.interpolate(feat, scale_factor=2, mode='nearest'))) |
| | feat = self.lrelu(self.conv_up2(F.interpolate(feat, scale_factor=2, mode='nearest'))) |
| | out = self.conv_last(self.lrelu(self.conv_hr(feat))) |
| | return out |
| |
|