code stringlengths 17 6.64M |
|---|
class CSFI2(nn.Module):
'Cross-Scale Feature Integration between 1x and 2x features.\n\n Cross-Scale Feature Integration in Texture Transformer Network for\n Image Super-Resolution.\n It is cross-scale feature integration between 1x and 2x features.\n For example, `conv2to1` means conv layer from 2x feature to 1x\n feature. Down-sampling is achieved by conv layer with stride=2,\n and up-sampling is achieved by bicubic interpolate and conv layer.\n\n Args:\n mid_channels (int): Channel number of intermediate features\n '
def __init__(self, mid_channels):
super().__init__()
self.conv1to2 = _conv1x1_layer(mid_channels, mid_channels)
self.conv2to1 = _conv3x3_layer(mid_channels, mid_channels, stride=2)
self.conv_merge1 = _conv3x3_layer((mid_channels * 2), mid_channels)
self.conv_merge2 = _conv3x3_layer((mid_channels * 2), mid_channels)
def forward(self, x1, x2):
'Forward function.\n\n Args:\n x1 (Tensor): Input tensor with shape (n, c, h, w).\n x2 (Tensor): Input tensor with shape (n, c, 2h, 2w).\n\n Returns:\n x1 (Tensor): Output tensor with shape (n, c, h, w).\n x2 (Tensor): Output tensor with shape (n, c, 2h, 2w).\n '
x12 = F.interpolate(x1, scale_factor=2, mode='bicubic', align_corners=False)
x12 = F.relu(self.conv1to2(x12))
x21 = F.relu(self.conv2to1(x2))
x1 = F.relu(self.conv_merge1(torch.cat((x1, x21), dim=1)))
x2 = F.relu(self.conv_merge2(torch.cat((x2, x12), dim=1)))
return (x1, x2)
|
class CSFI3(nn.Module):
'Cross-Scale Feature Integration between 1x, 2x, and 4x features.\n\n Cross-Scale Feature Integration in Texture Transformer Network for\n Image Super-Resolution.\n It is cross-scale feature integration between 1x and 2x features.\n For example, `conv2to1` means conv layer from 2x feature to 1x\n feature. Down-sampling is achieved by conv layer with stride=2,\n and up-sampling is achieved by bicubic interpolate and conv layer.\n\n Args:\n mid_channels (int): Channel number of intermediate features\n '
def __init__(self, mid_channels):
super().__init__()
self.conv1to2 = _conv1x1_layer(mid_channels, mid_channels)
self.conv1to4 = _conv1x1_layer(mid_channels, mid_channels)
self.conv2to1 = _conv3x3_layer(mid_channels, mid_channels, stride=2)
self.conv2to4 = _conv1x1_layer(mid_channels, mid_channels)
self.conv4to1_1 = _conv3x3_layer(mid_channels, mid_channels, stride=2)
self.conv4to1_2 = _conv3x3_layer(mid_channels, mid_channels, stride=2)
self.conv4to2 = _conv3x3_layer(mid_channels, mid_channels, stride=2)
self.conv_merge1 = _conv3x3_layer((mid_channels * 3), mid_channels)
self.conv_merge2 = _conv3x3_layer((mid_channels * 3), mid_channels)
self.conv_merge4 = _conv3x3_layer((mid_channels * 3), mid_channels)
def forward(self, x1, x2, x4):
'Forward function.\n\n Args:\n x1 (Tensor): Input tensor with shape (n, c, h, w).\n x2 (Tensor): Input tensor with shape (n, c, 2h, 2w).\n x4 (Tensor): Input tensor with shape (n, c, 4h, 4w).\n\n Returns:\n x1 (Tensor): Output tensor with shape (n, c, h, w).\n x2 (Tensor): Output tensor with shape (n, c, 2h, 2w).\n x4 (Tensor): Output tensor with shape (n, c, 4h, 4w).\n '
x12 = F.interpolate(x1, scale_factor=2, mode='bicubic', align_corners=False)
x12 = F.relu(self.conv1to2(x12))
x14 = F.interpolate(x1, scale_factor=4, mode='bicubic', align_corners=False)
x14 = F.relu(self.conv1to4(x14))
x21 = F.relu(self.conv2to1(x2))
x24 = F.interpolate(x2, scale_factor=2, mode='bicubic', align_corners=False)
x24 = F.relu(self.conv2to4(x24))
x41 = F.relu(self.conv4to1_1(x4))
x41 = F.relu(self.conv4to1_2(x41))
x42 = F.relu(self.conv4to2(x4))
x1 = F.relu(self.conv_merge1(torch.cat((x1, x21, x41), dim=1)))
x2 = F.relu(self.conv_merge2(torch.cat((x2, x12, x42), dim=1)))
x4 = F.relu(self.conv_merge4(torch.cat((x4, x14, x24), dim=1)))
return (x1, x2, x4)
|
class MergeFeatures(nn.Module):
'Merge Features. Merge 1x, 2x, and 4x features.\n\n Final module of Texture Transformer Network for Image Super-Resolution.\n\n Args:\n mid_channels (int): Channel number of intermediate features\n out_channels (int): Number of channels in the output image\n '
def __init__(self, mid_channels, out_channels):
super().__init__()
self.conv1to4 = _conv1x1_layer(mid_channels, mid_channels)
self.conv2to4 = _conv1x1_layer(mid_channels, mid_channels)
self.conv_merge = _conv3x3_layer((mid_channels * 3), mid_channels)
self.conv_last1 = _conv3x3_layer(mid_channels, (mid_channels // 2))
self.conv_last2 = _conv1x1_layer((mid_channels // 2), out_channels)
def forward(self, x1, x2, x4):
'Forward function.\n\n Args:\n x1 (Tensor): Input tensor with shape (n, c, h, w).\n x2 (Tensor): Input tensor with shape (n, c, 2h, 2w).\n x4 (Tensor): Input tensor with shape (n, c, 4h, 4w).\n\n Returns:\n x (Tensor): Output tensor with shape (n, c_out, 4h, 4w).\n '
x14 = F.interpolate(x1, scale_factor=4, mode='bicubic', align_corners=False)
x14 = F.relu(self.conv1to4(x14))
x24 = F.interpolate(x2, scale_factor=2, mode='bicubic', align_corners=False)
x24 = F.relu(self.conv2to4(x24))
x = F.relu(self.conv_merge(torch.cat((x4, x14, x24), dim=1)))
x = self.conv_last1(x)
x = self.conv_last2(x)
x = torch.clamp(x, (- 1), 1)
return x
|
@BACKBONES.register_module()
class TTSRNet(nn.Module):
"TTSR network structure (main-net) for reference-based super-resolution.\n\n Paper: Learning Texture Transformer Network for Image Super-Resolution\n\n Adapted from 'https://github.com/researchmm/TTSR.git'\n 'https://github.com/researchmm/TTSR'\n Copyright permission at 'https://github.com/researchmm/TTSR/issues/38'.\n\n Args:\n in_channels (int): Number of channels in the input image\n out_channels (int): Number of channels in the output image\n mid_channels (int): Channel number of intermediate features.\n Default: 64\n num_blocks (tuple[int]): Block numbers in the trunk network.\n Default: (16, 16, 8, 4)\n res_scale (float): Used to scale the residual in residual block.\n Default: 1.\n\n "
def __init__(self, in_channels, out_channels, mid_channels=64, texture_channels=64, num_blocks=(16, 16, 8, 4), res_scale=1.0):
super().__init__()
self.texture_channels = texture_channels
self.sfe = SFE(in_channels, mid_channels, num_blocks[0], res_scale)
self.conv_first1 = _conv3x3_layer(((4 * texture_channels) + mid_channels), mid_channels)
self.res_block1 = make_layer(ResidualBlockNoBN, num_blocks[1], mid_channels=mid_channels, res_scale=res_scale)
self.conv_last1 = _conv3x3_layer(mid_channels, mid_channels)
self.up1 = PixelShufflePack(in_channels=mid_channels, out_channels=mid_channels, scale_factor=2, upsample_kernel=3)
self.conv_first2 = _conv3x3_layer(((2 * texture_channels) + mid_channels), mid_channels)
self.csfi2 = CSFI2(mid_channels)
self.res_block2_1 = make_layer(ResidualBlockNoBN, num_blocks[2], mid_channels=mid_channels, res_scale=res_scale)
self.res_block2_2 = make_layer(ResidualBlockNoBN, num_blocks[2], mid_channels=mid_channels, res_scale=res_scale)
self.conv_last2_1 = _conv3x3_layer(mid_channels, mid_channels)
self.conv_last2_2 = _conv3x3_layer(mid_channels, mid_channels)
self.up2 = PixelShufflePack(in_channels=mid_channels, out_channels=mid_channels, scale_factor=2, upsample_kernel=3)
self.conv_first3 = _conv3x3_layer((texture_channels + mid_channels), mid_channels)
self.csfi3 = CSFI3(mid_channels)
self.res_block3_1 = make_layer(ResidualBlockNoBN, num_blocks[3], mid_channels=mid_channels, res_scale=res_scale)
self.res_block3_2 = make_layer(ResidualBlockNoBN, num_blocks[3], mid_channels=mid_channels, res_scale=res_scale)
self.res_block3_3 = make_layer(ResidualBlockNoBN, num_blocks[3], mid_channels=mid_channels, res_scale=res_scale)
self.conv_last3_1 = _conv3x3_layer(mid_channels, mid_channels)
self.conv_last3_2 = _conv3x3_layer(mid_channels, mid_channels)
self.conv_last3_3 = _conv3x3_layer(mid_channels, mid_channels)
self.merge_features = MergeFeatures(mid_channels, out_channels)
def forward(self, x, soft_attention, textures):
'Forward function.\n\n Args:\n x (Tensor): Input tensor with shape (n, c, h, w).\n soft_attention (Tensor): Soft-Attention tensor with shape\n (n, 1, h, w).\n textures (Tuple[Tensor]): Transferred HR texture tensors.\n [(N, C, H, W), (N, C/2, 2H, 2W), ...]\n\n Returns:\n Tensor: Forward results.\n '
assert (textures[(- 1)].shape[1] == self.texture_channels)
x1 = self.sfe(x)
x1_res = torch.cat((x1, textures[0]), dim=1)
x1_res = self.conv_first1(x1_res)
x1 = (x1 + (x1_res * soft_attention))
x1_res = self.res_block1(x1)
x1_res = self.conv_last1(x1_res)
x1 = (x1 + x1_res)
x21 = x1
x22 = self.up1(x1)
x22 = F.relu(x22)
x22_res = torch.cat((x22, textures[1]), dim=1)
x22_res = self.conv_first2(x22_res)
x22_res = (x22_res * F.interpolate(soft_attention, scale_factor=2, mode='bicubic', align_corners=False))
x22 = (x22 + x22_res)
(x21_res, x22_res) = self.csfi2(x21, x22)
x21_res = self.res_block2_1(x21_res)
x22_res = self.res_block2_2(x22_res)
x21_res = self.conv_last2_1(x21_res)
x22_res = self.conv_last2_2(x22_res)
x21 = (x21 + x21_res)
x22 = (x22 + x22_res)
x31 = x21
x32 = x22
x33 = self.up2(x22)
x33 = F.relu(x33)
x33_res = torch.cat((x33, textures[2]), dim=1)
x33_res = self.conv_first3(x33_res)
x33_res = (x33_res * F.interpolate(soft_attention, scale_factor=4, mode='bicubic', align_corners=False))
x33 = (x33 + x33_res)
(x31_res, x32_res, x33_res) = self.csfi3(x31, x32, x33)
x31_res = self.res_block3_1(x31_res)
x32_res = self.res_block3_2(x32_res)
x33_res = self.res_block3_3(x33_res)
x31_res = self.conv_last3_1(x31_res)
x32_res = self.conv_last3_2(x32_res)
x33_res = self.conv_last3_3(x33_res)
x31 = (x31 + x31_res)
x32 = (x32 + x32_res)
x33 = (x33 + x33_res)
x = self.merge_features(x31, x32, x33)
return x
def init_weights(self, pretrained=None, strict=True):
'Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n strict (boo, optional): Whether strictly load the pretrained model.\n Defaults to True.\n '
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif (pretrained is None):
pass
else:
raise TypeError(f'"pretrained" must be a str or None. But received {type(pretrained)}.')
|
class BaseModel(nn.Module, metaclass=ABCMeta):
'Base model.\n\n All models should subclass it.\n All subclass should overwrite:\n\n ``init_weights``, supporting to initialize models.\n\n ``forward_train``, supporting to forward when training.\n\n ``forward_test``, supporting to forward when testing.\n\n ``train_step``, supporting to train one step when training.\n '
@abstractmethod
def init_weights(self):
'Abstract method for initializing weight.\n\n All subclass should overwrite it.\n '
@abstractmethod
def forward_train(self, imgs, labels):
'Abstract method for training forward.\n\n All subclass should overwrite it.\n '
@abstractmethod
def forward_test(self, imgs):
'Abstract method for testing forward.\n\n All subclass should overwrite it.\n '
def forward(self, imgs, labels, test_mode, **kwargs):
'Forward function for base model.\n\n Args:\n imgs (Tensor): Input image(s).\n labels (Tensor): Ground-truth label(s).\n test_mode (bool): Whether in test mode.\n kwargs (dict): Other arguments.\n\n Returns:\n Tensor: Forward results.\n '
if test_mode:
return self.forward_test(imgs, **kwargs)
return self.forward_train(imgs, labels, **kwargs)
@abstractmethod
def train_step(self, data_batch, optimizer):
'Abstract method for one training step.\n\n All subclass should overwrite it.\n '
def val_step(self, data_batch, **kwargs):
'Abstract method for one validation step.\n\n All subclass should overwrite it.\n '
output = self.forward_test(**data_batch, **kwargs)
return output
def parse_losses(self, losses):
'Parse losses dict for different loss variants.\n\n Args:\n losses (dict): Loss dict.\n\n Returns:\n loss (float): Sum of the total loss.\n log_vars (dict): loss dict for different variants.\n '
log_vars = OrderedDict()
for (loss_name, loss_value) in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum((_loss.mean() for _loss in loss_value))
else:
raise TypeError(f'{loss_name} is not a tensor or list of tensors')
loss = sum((_value for (_key, _value) in log_vars.items() if ('loss' in _key)))
log_vars['loss'] = loss
for name in log_vars:
log_vars[name] = log_vars[name].item()
return (loss, log_vars)
|
def build(cfg, registry, default_args=None):
'Build module function.\n\n Args:\n cfg (dict): Configuration for building modules.\n registry (obj): ``registry`` object.\n default_args (dict, optional): Default arguments. Defaults to None.\n '
if isinstance(cfg, list):
modules = [build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg]
return nn.Sequential(*modules)
return build_from_cfg(cfg, registry, default_args)
|
def build_backbone(cfg):
'Build backbone.\n\n Args:\n cfg (dict): Configuration for building backbone.\n '
return build(cfg, BACKBONES)
|
def build_component(cfg):
'Build component.\n\n Args:\n cfg (dict): Configuration for building component.\n '
return build(cfg, COMPONENTS)
|
def build_loss(cfg):
'Build loss.\n\n Args:\n cfg (dict): Configuration for building loss.\n '
return build(cfg, LOSSES)
|
def build_model(cfg, train_cfg=None, test_cfg=None):
'Build model.\n\n Args:\n cfg (dict): Configuration for building model.\n train_cfg (dict): Training configuration. Default: None.\n test_cfg (dict): Testing configuration. Default: None.\n '
return build(cfg, MODELS, dict(train_cfg=train_cfg, test_cfg=test_cfg))
|
class ASPPPooling(nn.Sequential):
def __init__(self, in_channels, out_channels, conv_cfg, norm_cfg, act_cfg):
super().__init__(nn.AdaptiveAvgPool2d(1), ConvModule(in_channels, out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg))
def forward(self, x):
size = x.shape[(- 2):]
for mod in self:
x = mod(x)
return F.interpolate(x, size=size, mode='bilinear', align_corners=False)
|
class ASPP(nn.Module):
'ASPP module from DeepLabV3.\n\n The code is adopted from\n https://github.com/pytorch/vision/blob/master/torchvision/models/\n segmentation/deeplabv3.py\n\n For more information about the module:\n `"Rethinking Atrous Convolution for Semantic Image Segmentation"\n <https://arxiv.org/abs/1706.05587>`_.\n\n Args:\n in_channels (int): Input channels of the module.\n out_channels (int): Output channels of the module.\n mid_channels (int): Output channels of the intermediate ASPP conv\n modules.\n dilations (Sequence[int]): Dilation rate of three ASPP conv module.\n Default: [12, 24, 36].\n conv_cfg (dict): Config dict for convolution layer. If "None",\n nn.Conv2d will be applied. Default: None.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type=\'BN\').\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type=\'ReLU\').\n separable_conv (bool): Whether replace normal conv with depthwise\n separable conv which is faster. Default: False.\n '
def __init__(self, in_channels, out_channels=256, mid_channels=256, dilations=(12, 24, 36), conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU'), separable_conv=False):
super().__init__()
if separable_conv:
conv_module = DepthwiseSeparableConvModule
else:
conv_module = ConvModule
modules = []
modules.append(ConvModule(in_channels, mid_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg))
for dilation in dilations:
modules.append(conv_module(in_channels, mid_channels, 3, padding=dilation, dilation=dilation, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg))
modules.append(ASPPPooling(in_channels, mid_channels, conv_cfg, norm_cfg, act_cfg))
self.convs = nn.ModuleList(modules)
self.project = nn.Sequential(ConvModule((5 * mid_channels), out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg), nn.Dropout(0.5))
def forward(self, x):
'Forward function for ASPP module.\n\n Args:\n x (Tensor): Input tensor with shape (N, C, H, W).\n\n Returns:\n Tensor: Output tensor.\n '
res = []
for conv in self.convs:
res.append(conv(x))
res = torch.cat(res, dim=1)
return self.project(res)
|
class ContextualAttentionModule(nn.Module):
'Contexture attention module.\n\n The details of this module can be found in:\n Generative Image Inpainting with Contextual Attention\n\n Args:\n unfold_raw_kernel_size (int): Kernel size used in unfolding raw\n feature. Default: 4.\n unfold_raw_stride (int): Stride used in unfolding raw feature. Default:\n 2.\n unfold_raw_padding (int): Padding used in unfolding raw feature.\n Default: 1.\n unfold_corr_kernel_size (int): Kernel size used in unfolding\n context for computing correlation maps. Default: 3.\n unfold_corr_stride (int): Stride used in unfolding context for\n computing correlation maps. Default: 1.\n unfold_corr_dilation (int): Dilation used in unfolding context for\n computing correlation maps. Default: 1.\n unfold_corr_padding (int): Padding used in unfolding context for\n computing correlation maps. Default: 1.\n scale (float): The resale factor used in resize input features.\n Default: 0.5.\n fuse_kernel_size (int): The kernel size used in fusion module.\n Default: 3.\n softmax_scale (float): The scale factor for softmax function.\n Default: 10.\n return_attention_score (bool): If True, the attention score will be\n returned. Default: True.\n '
def __init__(self, unfold_raw_kernel_size=4, unfold_raw_stride=2, unfold_raw_padding=1, unfold_corr_kernel_size=3, unfold_corr_stride=1, unfold_corr_dilation=1, unfold_corr_padding=1, scale=0.5, fuse_kernel_size=3, softmax_scale=10, return_attention_score=True):
super().__init__()
self.unfold_raw_kernel_size = unfold_raw_kernel_size
self.unfold_raw_stride = unfold_raw_stride
self.unfold_raw_padding = unfold_raw_padding
self.unfold_corr_kernel_size = unfold_corr_kernel_size
self.unfold_corr_stride = unfold_corr_stride
self.unfold_corr_dilation = unfold_corr_dilation
self.unfold_corr_padding = unfold_corr_padding
self.scale = scale
self.fuse_kernel_size = fuse_kernel_size
self.with_fuse_correlation = (fuse_kernel_size > 1)
self.softmax_scale = softmax_scale
self.return_attention_score = return_attention_score
if self.with_fuse_correlation:
assert ((fuse_kernel_size % 2) == 1)
fuse_kernel = torch.eye(fuse_kernel_size).view(1, 1, fuse_kernel_size, fuse_kernel_size)
self.register_buffer('fuse_kernel', fuse_kernel)
padding = int(((fuse_kernel_size - 1) // 2))
self.fuse_conv = partial(F.conv2d, padding=padding, stride=1)
self.softmax = nn.Softmax(dim=1)
def forward(self, x, context, mask=None):
'Forward Function.\n\n Args:\n x (torch.Tensor): Tensor with shape (n, c, h, w).\n context (torch.Tensor): Tensor with shape (n, c, h, w).\n mask (torch.Tensor): Tensor with shape (n, 1, h, w). Default: None.\n\n Returns:\n tuple(torch.Tensor): Features after contextural attention.\n '
raw_context = context
raw_context_cols = self.im2col(raw_context, kernel_size=self.unfold_raw_kernel_size, stride=self.unfold_raw_stride, padding=self.unfold_raw_padding, normalize=False, return_cols=True)
x = F.interpolate(x, scale_factor=self.scale)
context = F.interpolate(context, scale_factor=self.scale)
context_cols = self.im2col(context, kernel_size=self.unfold_corr_kernel_size, stride=self.unfold_corr_stride, padding=self.unfold_corr_padding, dilation=self.unfold_corr_dilation, normalize=True, return_cols=True)
(h_unfold, w_unfold) = self.calculate_unfold_hw(context.size()[(- 2):], kernel_size=self.unfold_corr_kernel_size, stride=self.unfold_corr_stride, padding=self.unfold_corr_padding, dilation=self.unfold_corr_dilation)
context_cols = context_cols.reshape((- 1), *context_cols.shape[2:])
correlation_map = self.patch_correlation(x, context_cols)
if self.with_fuse_correlation:
correlation_map = self.fuse_correlation_map(correlation_map, h_unfold, w_unfold)
correlation_map = self.mask_correlation_map(correlation_map, mask=mask)
attention_score = self.softmax((correlation_map * self.softmax_scale))
raw_context_filter = raw_context_cols.reshape((- 1), *raw_context_cols.shape[2:])
output = self.patch_copy_deconv(attention_score, raw_context_filter)
overlap_factor = self.calculate_overlap_factor(attention_score)
output /= overlap_factor
if self.return_attention_score:
(n, _, h_s, w_s) = attention_score.size()
attention_score = attention_score.view(n, h_unfold, w_unfold, h_s, w_s)
return (output, attention_score)
return output
def patch_correlation(self, x, kernel):
'Calculate patch correlation.\n\n Args:\n x (torch.Tensor): Input tensor.\n kernel (torch.Tensor): Kernel tensor.\n\n Returns:\n torch.Tensor: Tensor with shape of (n, l, h, w).\n '
(n, _, h_in, w_in) = x.size()
patch_corr = F.conv2d(x.view(1, (- 1), h_in, w_in), kernel, stride=self.unfold_corr_stride, padding=self.unfold_corr_padding, dilation=self.unfold_corr_dilation, groups=n)
(h_out, w_out) = patch_corr.size()[(- 2):]
return patch_corr.view(n, (- 1), h_out, w_out)
def patch_copy_deconv(self, attention_score, context_filter):
'Copy patches using deconv.\n\n Args:\n attention_score (torch.Tensor): Tensor with shape of (n, l , h, w).\n context_filter (torch.Tensor): Filter kernel.\n\n Returns:\n torch.Tensor: Tensor with shape of (n, c, h, w).\n '
(n, _, h, w) = attention_score.size()
attention_score = attention_score.view(1, (- 1), h, w)
output = F.conv_transpose2d(attention_score, context_filter, stride=self.unfold_raw_stride, padding=self.unfold_raw_padding, groups=n)
(h_out, w_out) = output.size()[(- 2):]
return output.view(n, (- 1), h_out, w_out)
def fuse_correlation_map(self, correlation_map, h_unfold, w_unfold):
"Fuse correlation map.\n\n This operation is to fuse correlation map for increasing large\n consistent correlation regions.\n\n The mechanism behind this op is simple and easy to understand. A\n standard 'Eye' matrix will be applied as a filter on the correlation\n map in horizontal and vertical direction.\n\n The shape of input correlation map is (n, h_unfold*w_unfold, h, w).\n When adopting fusing, we will apply convolutional filter in the\n reshaped feature map with shape of (n, 1, h_unfold*w_fold, h*w).\n\n A simple specification for horizontal direction is shown below:\n\n .. code-block:: python\n\n (h, (h, (h, (h,\n 0) 1) 2) 3) ...\n (h, 0)\n (h, 1) 1\n (h, 2) 1\n (h, 3) 1\n ...\n\n "
(n, _, h_map, w_map) = correlation_map.size()
map_ = correlation_map.permute(0, 2, 3, 1)
map_ = map_.reshape(n, (h_map * w_map), (h_unfold * w_unfold), 1)
map_ = map_.permute(0, 3, 1, 2).contiguous()
map_ = self.fuse_conv(map_, self.fuse_kernel)
correlation_map = map_.view(n, h_unfold, w_unfold, h_map, w_map)
map_ = correlation_map.permute(0, 2, 1, 4, 3).reshape(n, 1, (h_unfold * w_unfold), (h_map * w_map))
map_ = self.fuse_conv(map_, self.fuse_kernel)
correlation_map = map_.view(n, w_unfold, h_unfold, w_map, h_map).permute(0, 4, 3, 2, 1)
correlation_map = correlation_map.reshape(n, (- 1), h_unfold, w_unfold)
return correlation_map
def calculate_unfold_hw(self, input_size, kernel_size=3, stride=1, dilation=1, padding=0):
'Calculate (h, w) after unfolding\n\n The official implementation of `unfold` in pytorch will put the\n dimension (h, w) into `L`. Thus, this function is just to calculate the\n (h, w) according to the equation in:\n https://pytorch.org/docs/stable/nn.html#torch.nn.Unfold\n '
(h_in, w_in) = input_size
h_unfold = int((((((h_in + (2 * padding)) - (dilation * (kernel_size - 1))) - 1) / stride) + 1))
w_unfold = int((((((w_in + (2 * padding)) - (dilation * (kernel_size - 1))) - 1) / stride) + 1))
return (h_unfold, w_unfold)
def calculate_overlap_factor(self, attention_score):
'Calculate the overlap factor after applying deconv.\n\n Args:\n attention_score (torch.Tensor): The attention score with shape of\n (n, c, h, w).\n\n Returns:\n torch.Tensor: The overlap factor will be returned.\n '
(h, w) = attention_score.shape[(- 2):]
kernel_size = self.unfold_raw_kernel_size
ones_input = torch.ones(1, 1, h, w).to(attention_score)
ones_filter = torch.ones(1, 1, kernel_size, kernel_size).to(attention_score)
overlap = F.conv_transpose2d(ones_input, ones_filter, stride=self.unfold_raw_stride, padding=self.unfold_raw_padding)
overlap[(overlap == 0)] = 1.0
return overlap
def mask_correlation_map(self, correlation_map, mask):
"Add mask weight for correlation map.\n\n Add a negative infinity number to the masked regions so that softmax\n function will result in 'zero' in those regions.\n\n Args:\n correlation_map (torch.Tensor): Correlation map with shape of\n (n, h_unfold*w_unfold, h_map, w_map).\n mask (torch.Tensor): Mask tensor with shape of (n, c, h, w). '1'\n in the mask indicates masked region while '0' indicates valid\n region.\n\n Returns:\n torch.Tensor: Updated correlation map with mask.\n "
if (mask is not None):
mask = F.interpolate(mask, scale_factor=self.scale)
mask_cols = self.im2col(mask, kernel_size=self.unfold_corr_kernel_size, stride=self.unfold_corr_stride, padding=self.unfold_corr_padding, dilation=self.unfold_corr_dilation)
mask_cols = (mask_cols.sum(dim=1, keepdim=True) > 0).float()
mask_cols = mask_cols.permute(0, 2, 1).reshape(mask.size(0), (- 1), 1, 1)
mask_cols[(mask_cols == 1)] = (- float('inf'))
correlation_map += mask_cols
return correlation_map
def im2col(self, img, kernel_size, stride=1, padding=0, dilation=1, normalize=False, return_cols=False):
'Reshape image-style feature to columns.\n\n This function is used for unfold feature maps to columns. The\n details of this function can be found in:\n https://pytorch.org/docs/1.1.0/nn.html?highlight=unfold#torch.nn.Unfold\n\n Args:\n img (torch.Tensor): Features to be unfolded. The shape of this\n feature should be (n, c, h, w).\n kernel_size (int): In this function, we only support square kernel\n with same height and width.\n stride (int): Stride number in unfolding. Default: 1.\n padding (int): Padding number in unfolding. Default: 0.\n dilation (int): Dilation number in unfolding. Default: 1.\n normalize (bool): If True, the unfolded feature will be normalized.\n Default: False.\n return_cols (bool): The official implementation in PyTorch of\n unfolding will return features with shape of\n (n, c*$prod{kernel_size}$, L). If True, the features will be\n reshaped to (n, L, c, kernel_size, kernel_size). Otherwise,\n the results will maintain the shape as the official\n implementation.\n\n Returns:\n torch.Tensor: Unfolded columns. If `return_cols` is True, the shape of output tensor is `(n, L, c, kernel_size, kernel_size)`. Otherwise, the shape will be `(n, c*$prod{kernel_size}$, L)`.\n '
img_unfold = F.unfold(img, kernel_size, stride=stride, padding=padding, dilation=dilation)
if normalize:
norm = torch.sqrt((img_unfold ** 2).sum(dim=1, keepdim=True))
eps = torch.tensor([0.0001]).to(img)
img_unfold = (img_unfold / torch.max(norm, eps))
if return_cols:
img_unfold_ = img_unfold.permute(0, 2, 1)
(n, num_cols) = img_unfold_.size()[:2]
img_cols = img_unfold_.view(n, num_cols, img.size(1), kernel_size, kernel_size)
return img_cols
return img_unfold
|
def pixel_unshuffle(x, scale):
'Down-sample by pixel unshuffle.\n\n Args:\n x (Tensor): Input tensor.\n scale (int): Scale factor.\n\n Returns:\n Tensor: Output tensor.\n '
(b, c, h, w) = x.shape
if (((h % scale) != 0) or ((w % scale) != 0)):
raise AssertionError(f'Invalid scale ({scale}) of pixel unshuffle for tensor with shape: {x.shape}')
h = int((h / scale))
w = int((w / scale))
x = x.view(b, c, h, scale, w, scale)
x = x.permute(0, 1, 3, 5, 2, 4)
return x.reshape(b, (- 1), h, w)
|
class SpatialTemporalEnsemble(nn.Module):
' Apply spatial and temporal ensemble and compute outputs\n\n Args:\n is_temporal_ensemble (bool, optional): Whether to apply ensemble\n temporally. If True, the sequence will also be flipped temporally.\n If the input is an image, this argument must be set to False.\n Default: False.\n\n '
def __init__(self, is_temporal_ensemble=False):
super().__init__()
self.is_temporal_ensemble = is_temporal_ensemble
def _transform(self, imgs, mode):
"Apply spatial transform (flip, rotate) to the images.\n\n Args:\n imgs (torch.Tensor): The images to be transformed/\n mode (str): The mode of transform. Supported values are 'vertical',\n 'horizontal', and 'transpose', corresponding to vertical flip,\n horizontal flip, and rotation, respectively.\n\n Returns:\n torch.Tensor: Output of the model with spatial ensemble applied.\n\n "
is_single_image = False
if (imgs.ndim == 4):
if self.is_temporal_ensemble:
raise ValueError('"is_temporal_ensemble" must be False if the input is an image.')
is_single_image = True
imgs = imgs.unsqueeze(1)
if (mode == 'vertical'):
imgs = imgs.flip(4).clone()
elif (mode == 'horizontal'):
imgs = imgs.flip(3).clone()
elif (mode == 'transpose'):
imgs = imgs.permute(0, 1, 2, 4, 3).clone()
if is_single_image:
imgs = imgs.squeeze(1)
return imgs
def spatial_ensemble(self, imgs, model):
'Apply spatial ensemble.\n\n Args:\n imgs (torch.Tensor): The images to be processed by the model. Its\n size should be either (n, t, c, h, w) or (n, c, h, w).\n model (nn.Module): The model to process the images.\n\n Returns:\n torch.Tensor: Output of the model with spatial ensemble applied.\n\n '
img_list = [imgs.cpu()]
for mode in ['vertical', 'horizontal', 'transpose']:
img_list.extend([self._transform(t, mode) for t in img_list])
output_list = [model(t.to(imgs.device)).cpu() for t in img_list]
for i in range(len(output_list)):
if (i > 3):
output_list[i] = self._transform(output_list[i], 'transpose')
if ((i % 4) > 1):
output_list[i] = self._transform(output_list[i], 'horizontal')
if (((i % 4) % 2) == 1):
output_list[i] = self._transform(output_list[i], 'vertical')
outputs = torch.stack(output_list, dim=0)
outputs = outputs.mean(dim=0, keepdim=False)
return outputs.to(imgs.device)
def forward(self, imgs, model):
'Apply spatial and temporal ensemble.\n\n Args:\n imgs (torch.Tensor): The images to be processed by the model. Its\n size should be either (n, t, c, h, w) or (n, c, h, w).\n model (nn.Module): The model to process the images.\n\n Returns:\n torch.Tensor: Output of the model with spatial ensemble applied.\n\n '
outputs = self.spatial_ensemble(imgs, model)
if self.is_temporal_ensemble:
outputs += self.spatial_ensemble(imgs.flip(1), model).flip(1)
outputs *= 0.5
return outputs
|
class SimpleGatedConvModule(nn.Module):
'Simple Gated Convolutional Module.\n\n This module is a simple gated convolutional module. The detailed formula\n is:\n\n .. math::\n y = \\phi(conv1(x)) * \\sigma(conv2(x)),\n\n where `phi` is the feature activation function and `sigma` is the gate\n activation function. In default, the gate activation function is sigmoid.\n\n Args:\n in_channels (int): Same as nn.Conv2d.\n out_channels (int): The number of channels of the output feature. Note\n that `out_channels` in the conv module is doubled since this module\n contains two convolutions for feature and gate separately.\n kernel_size (int or tuple[int]): Same as nn.Conv2d.\n feat_act_cfg (dict): Config dict for feature activation layer.\n gate_act_cfg (dict): Config dict for gate activation layer.\n kwargs (keyword arguments): Same as `ConvModule`.\n '
def __init__(self, in_channels, out_channels, kernel_size, feat_act_cfg=dict(type='ELU'), gate_act_cfg=dict(type='Sigmoid'), **kwargs):
super().__init__()
kwargs_ = copy.deepcopy(kwargs)
kwargs_['act_cfg'] = None
self.with_feat_act = (feat_act_cfg is not None)
self.with_gate_act = (gate_act_cfg is not None)
self.conv = ConvModule(in_channels, (out_channels * 2), kernel_size, **kwargs_)
if self.with_feat_act:
self.feat_act = build_activation_layer(feat_act_cfg)
if self.with_gate_act:
self.gate_act = build_activation_layer(gate_act_cfg)
def forward(self, x):
"Forward Function.\n\n Args:\n x (torch.Tensor): Input tensor with shape of (n, c, h, w).\n\n Returns:\n torch.Tensor: Output tensor with shape of (n, c, h', w').\n "
x = self.conv(x)
(x, gate) = torch.split(x, (x.size(1) // 2), dim=1)
if self.with_feat_act:
x = self.feat_act(x)
if self.with_gate_act:
gate = self.gate_act(gate)
x = (x * gate)
return x
|
class GCAModule(nn.Module):
"Guided Contextual Attention Module.\n\n From https://arxiv.org/pdf/2001.04069.pdf.\n Based on https://github.com/nbei/Deep-Flow-Guided-Video-Inpainting.\n This module use image feature map to augment the alpha feature map with\n guided contextual attention score.\n\n Image feature and alpha feature are unfolded to small patches and later\n used as conv kernel. Thus, we refer the unfolding size as kernel size.\n Image feature patches have a default kernel size 3 while the kernel size of\n alpha feature patches could be specified by `rate` (see `rate` below). The\n image feature patches are used to convolve with the image feature itself\n to calculate the contextual attention. Then the attention feature map is\n convolved by alpha feature patches to obtain the attention alpha feature.\n At last, the attention alpha feature is added to the input alpha feature.\n\n Args:\n in_channels (int): Input channels of the guided contextual attention\n module.\n out_channels (int): Output channels of the guided contextual attention\n module.\n kernel_size (int): Kernel size of image feature patches. Default 3.\n stride (int): Stride when unfolding the image feature. Default 1.\n rate (int): The downsample rate of image feature map. The corresponding\n kernel size and stride of alpha feature patches will be `rate x 2`\n and `rate`. It could be regarded as the granularity of the gca\n module. Default: 2.\n pad_args (dict): Parameters of padding when convolve image feature with\n image feature patches or alpha feature patches. Allowed keys are\n `mode` and `value`. See torch.nn.functional.pad() for more\n information. Default: dict(mode='reflect').\n interpolation (str): Interpolation method in upsampling and\n downsampling.\n penalty (float): Punishment hyperparameter to avoid a large correlation\n between each unknown patch and itself.\n eps (float): A small number to avoid dividing by 0 when calculating\n the normed image feature patch. Default: 1e-4.\n "
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, rate=2, pad_args=dict(mode='reflect'), interpolation='nearest', penalty=(- 10000.0), eps=0.0001):
super().__init__()
self.kernel_size = kernel_size
self.stride = stride
self.rate = rate
self.pad_args = pad_args
self.interpolation = interpolation
self.penalty = penalty
self.eps = eps
self.guidance_conv = nn.Conv2d(in_channels, (in_channels // 2), 1)
self.out_conv = ConvModule(out_channels, out_channels, 1, norm_cfg=dict(type='BN'), act_cfg=None)
self.init_weights()
def init_weights(self):
xavier_init(self.guidance_conv, distribution='uniform')
xavier_init(self.out_conv.conv, distribution='uniform')
constant_init(self.out_conv.norm, 0.001)
def forward(self, img_feat, alpha_feat, unknown=None, softmax_scale=1.0):
'Forward function of GCAModule.\n\n Args:\n img_feat (Tensor): Image feature map of shape\n (N, ori_c, ori_h, ori_w).\n alpha_feat (Tensor): Alpha feature map of shape\n (N, alpha_c, ori_h, ori_w).\n unknown (Tensor, optional): Unknown area map generated by trimap.\n If specified, this tensor should have shape\n (N, 1, ori_h, ori_w).\n softmax_scale (float, optional): The softmax scale of the attention\n if unknown area is not provided in forward. Default: 1.\n\n Returns:\n Tensor: The augmented alpha feature.\n '
if (alpha_feat.shape[2:4] != img_feat.shape[2:4]):
raise ValueError(f'image feature size does not align with alpha feature size: image feature size {img_feat.shape[2:4]}, alpha feature size {alpha_feat.shape[2:4]}')
if ((unknown is not None) and (unknown.shape[2:4] != img_feat.shape[2:4])):
raise ValueError(f'image feature size does not align with unknown mask size: image feature size {img_feat.shape[2:4]}, unknown mask size {unknown.shape[2:4]}')
img_feat = self.guidance_conv(img_feat)
img_feat = F.interpolate(img_feat, scale_factor=(1 / self.rate), mode=self.interpolation)
(unknown, softmax_scale) = self.process_unknown_mask(unknown, img_feat, softmax_scale)
(img_ps, alpha_ps, unknown_ps) = self.extract_feature_maps_patches(img_feat, alpha_feat, unknown)
self_mask = self.get_self_correlation_mask(img_feat)
img_groups = torch.split(img_feat, 1, dim=0)
img_ps_groups = torch.split(img_ps, 1, dim=0)
alpha_ps_groups = torch.split(alpha_ps, 1, dim=0)
unknown_ps_groups = torch.split(unknown_ps, 1, dim=0)
scale_groups = torch.split(softmax_scale, 1, dim=0)
groups = (img_groups, img_ps_groups, alpha_ps_groups, unknown_ps_groups, scale_groups)
out = []
for (img_i, img_ps_i, alpha_ps_i, unknown_ps_i, scale_i) in zip(*groups):
similarity_map = self.compute_similarity_map(img_i, img_ps_i)
gca_score = self.compute_guided_attention_score(similarity_map, unknown_ps_i, scale_i, self_mask)
out_i = self.propagate_alpha_feature(gca_score, alpha_ps_i)
out.append(out_i)
out = torch.cat(out, dim=0)
out.reshape_as(alpha_feat)
out = (self.out_conv(out) + alpha_feat)
return out
def extract_feature_maps_patches(self, img_feat, alpha_feat, unknown):
'Extract image feature, alpha feature unknown patches.\n\n Args:\n img_feat (Tensor): Image feature map of shape\n (N, img_c, img_h, img_w).\n alpha_feat (Tensor): Alpha feature map of shape\n (N, alpha_c, ori_h, ori_w).\n unknown (Tensor, optional): Unknown area map generated by trimap of\n shape (N, 1, img_h, img_w).\n\n Returns:\n tuple: 3-tuple of\n\n ``Tensor``: Image feature patches of shape (N, img_h*img_w, img_c, img_ks, img_ks).\n\n ``Tensor``: Guided contextual attention alpha feature map. (N, img_h*img_w, alpha_c, alpha_ks, alpha_ks).\n\n ``Tensor``: Unknown mask of shape (N, img_h*img_w, 1, 1).\n '
img_ks = self.kernel_size
img_ps = self.extract_patches(img_feat, img_ks, self.stride)
alpha_ps = self.extract_patches(alpha_feat, (self.rate * 2), self.rate)
unknown_ps = self.extract_patches(unknown, img_ks, self.stride)
unknown_ps = unknown_ps.squeeze(dim=2)
unknown_ps = unknown_ps.mean(dim=[2, 3], keepdim=True)
return (img_ps, alpha_ps, unknown_ps)
def compute_similarity_map(self, img_feat, img_ps):
'Compute similarity between image feature patches.\n\n Args:\n img_feat (Tensor): Image feature map of shape\n (1, img_c, img_h, img_w).\n img_ps (Tensor): Image feature patches tensor of shape\n (1, img_h*img_w, img_c, img_ks, img_ks).\n\n Returns:\n Tensor: Similarity map between image feature patches with shape (1, img_h*img_w, img_h, img_w).\n '
img_ps = img_ps[0]
escape_NaN = torch.FloatTensor([self.eps]).to(img_feat)
img_ps_normed = (img_ps / torch.max(self.l2_norm(img_ps), escape_NaN))
img_feat = self.pad(img_feat, self.kernel_size, self.stride)
similarity_map = F.conv2d(img_feat, img_ps_normed)
return similarity_map
def compute_guided_attention_score(self, similarity_map, unknown_ps, scale, self_mask):
'Compute guided attention score.\n\n Args:\n similarity_map (Tensor): Similarity map of image feature with shape\n (1, img_h*img_w, img_h, img_w).\n unknown_ps (Tensor): Unknown area patches tensor of shape\n (1, img_h*img_w, 1, 1).\n scale (Tensor): Softmax scale of known and unknown area:\n [unknown_scale, known_scale].\n self_mask (Tensor): Self correlation mask of shape\n (1, img_h*img_w, img_h, img_w). At (1, i*i, i, i) mask value\n equals -1e4 for i in [1, img_h*img_w] and other area is all\n zero.\n\n Returns:\n Tensor: Similarity map between image feature patches with shape (1, img_h*img_w, img_h, img_w).\n '
(unknown_scale, known_scale) = scale[0]
out = (similarity_map * ((unknown_scale * unknown_ps.gt(0.0).float()) + (known_scale * unknown_ps.le(0.0).float())))
out = (out + (self_mask * unknown_ps))
gca_score = F.softmax(out, dim=1)
return gca_score
def propagate_alpha_feature(self, gca_score, alpha_ps):
'Propagate alpha feature based on guided attention score.\n\n Args:\n gca_score (Tensor): Guided attention score map of shape\n (1, img_h*img_w, img_h, img_w).\n alpha_ps (Tensor): Alpha feature patches tensor of shape\n (1, img_h*img_w, alpha_c, alpha_ks, alpha_ks).\n\n Returns:\n Tensor: Propagated alpha feature map of shape (1, alpha_c, alpha_h, alpha_w).\n '
alpha_ps = alpha_ps[0]
if (self.rate == 1):
gca_score = self.pad(gca_score, kernel_size=2, stride=1)
alpha_ps = alpha_ps.permute(1, 0, 2, 3)
out = (F.conv2d(gca_score, alpha_ps) / 4.0)
else:
out = (F.conv_transpose2d(gca_score, alpha_ps, stride=self.rate, padding=1) / 4.0)
return out
def process_unknown_mask(self, unknown, img_feat, softmax_scale):
'Process unknown mask.\n\n Args:\n unknown (Tensor, optional): Unknown area map generated by trimap of\n shape (N, 1, ori_h, ori_w)\n img_feat (Tensor): The interpolated image feature map of shape\n (N, img_c, img_h, img_w).\n softmax_scale (float, optional): The softmax scale of the attention\n if unknown area is not provided in forward. Default: 1.\n\n Returns:\n tuple: 2-tuple of\n\n ``Tensor``: Interpolated unknown area map of shape (N, img_h*img_w, img_h, img_w).\n\n ``Tensor``: Softmax scale tensor of known and unknown area of shape (N, 2).\n '
(n, _, h, w) = img_feat.shape
if (unknown is not None):
unknown = unknown.clone()
unknown = F.interpolate(unknown, scale_factor=(1 / self.rate), mode=self.interpolation)
unknown_mean = unknown.mean(dim=[2, 3])
known_mean = (1 - unknown_mean)
unknown_scale = torch.clamp(torch.sqrt((unknown_mean / known_mean)), 0.1, 10).to(img_feat)
known_scale = torch.clamp(torch.sqrt((known_mean / unknown_mean)), 0.1, 10).to(img_feat)
softmax_scale = torch.cat([unknown_scale, known_scale], dim=1)
else:
unknown = torch.ones((n, 1, h, w)).to(img_feat)
softmax_scale = torch.FloatTensor([softmax_scale, softmax_scale]).view(1, 2).repeat(n, 1).to(img_feat)
return (unknown, softmax_scale)
def extract_patches(self, x, kernel_size, stride):
'Extract feature patches.\n\n The feature map will be padded automatically to make sure the number of\n patches is equal to `(H / stride) * (W / stride)`.\n\n Args:\n x (Tensor): Feature map of shape (N, C, H, W).\n kernel_size (int): Size of each patches.\n stride (int): Stride between patches.\n\n Returns:\n Tensor: Extracted patches of shape (N, (H / stride) * (W / stride) , C, kernel_size, kernel_size).\n '
(n, c, _, _) = x.shape
x = self.pad(x, kernel_size, stride)
x = F.unfold(x, (kernel_size, kernel_size), stride=(stride, stride))
x = x.permute(0, 2, 1)
x = x.reshape(n, (- 1), c, kernel_size, kernel_size)
return x
def pad(self, x, kernel_size, stride):
left = (((kernel_size - stride) + 1) // 2)
right = ((kernel_size - stride) // 2)
pad = (left, right, left, right)
return F.pad(x, pad, **self.pad_args)
def get_self_correlation_mask(self, img_feat):
(_, _, h, w) = img_feat.shape
self_mask = F.one_hot(torch.arange((h * w)).view(h, w), num_classes=int((h * w)))
self_mask = self_mask.permute(2, 0, 1).view(1, (h * w), h, w)
self_mask = (self_mask * self.penalty)
return self_mask.to(img_feat)
@staticmethod
def l2_norm(x):
x = (x ** 2)
x = x.sum(dim=[1, 2, 3], keepdim=True)
return torch.sqrt(x)
|
def generation_init_weights(module, init_type='normal', init_gain=0.02):
'Default initialization of network weights for image generation.\n\n By default, we use normal init, but xavier and kaiming might work\n better for some applications.\n\n Args:\n module (nn.Module): Module to be initialized.\n init_type (str): The name of an initialization method:\n normal | xavier | kaiming | orthogonal.\n init_gain (float): Scaling factor for normal, xavier and\n orthogonal.\n '
def init_func(m):
'Initialization function.\n\n Args:\n m (nn.Module): Module to be initialized.\n '
classname = m.__class__.__name__
if (hasattr(m, 'weight') and ((classname.find('Conv') != (- 1)) or (classname.find('Linear') != (- 1)))):
if (init_type == 'normal'):
normal_init(m, 0.0, init_gain)
elif (init_type == 'xavier'):
xavier_init(m, gain=init_gain, distribution='normal')
elif (init_type == 'kaiming'):
kaiming_init(m, a=0, mode='fan_in', nonlinearity='leaky_relu', distribution='normal')
elif (init_type == 'orthogonal'):
init.orthogonal_(m.weight, gain=init_gain)
init.constant_(m.bias.data, 0.0)
else:
raise NotImplementedError(f"Initialization method '{init_type}' is not implemented")
elif (classname.find('BatchNorm2d') != (- 1)):
normal_init(m, 1.0, init_gain)
module.apply(init_func)
|
class GANImageBuffer():
'This class implements an image buffer that stores previously\n generated images.\n\n This buffer allows us to update the discriminator using a history of\n generated images rather than the ones produced by the latest generator\n to reduce model oscillation.\n\n Args:\n buffer_size (int): The size of image buffer. If buffer_size = 0,\n no buffer will be created.\n buffer_ratio (float): The chance / possibility to use the images\n previously stored in the buffer.\n '
def __init__(self, buffer_size, buffer_ratio=0.5):
self.buffer_size = buffer_size
if (self.buffer_size > 0):
self.img_num = 0
self.image_buffer = []
self.buffer_ratio = buffer_ratio
def query(self, images):
'Query current image batch using a history of generated images.\n\n Args:\n images (Tensor): Current image batch without history information.\n '
if (self.buffer_size == 0):
return images
return_images = []
for image in images:
image = torch.unsqueeze(image.data, 0)
if (self.img_num < self.buffer_size):
self.img_num = (self.img_num + 1)
self.image_buffer.append(image)
return_images.append(image)
else:
use_buffer = (np.random.random() < self.buffer_ratio)
if use_buffer:
random_id = np.random.randint(0, self.buffer_size)
image_tmp = self.image_buffer[random_id].clone()
self.image_buffer[random_id] = image
return_images.append(image_tmp)
else:
return_images.append(image)
return_images = torch.cat(return_images, 0)
return return_images
|
class UnetSkipConnectionBlock(nn.Module):
"Construct a Unet submodule with skip connections, with the following\n structure: downsampling - `submodule` - upsampling.\n\n Args:\n outer_channels (int): Number of channels at the outer conv layer.\n inner_channels (int): Number of channels at the inner conv layer.\n in_channels (int): Number of channels in input images/features. If is\n None, equals to `outer_channels`. Default: None.\n submodule (UnetSkipConnectionBlock): Previously constructed submodule.\n Default: None.\n is_outermost (bool): Whether this module is the outermost module.\n Default: False.\n is_innermost (bool): Whether this module is the innermost module.\n Default: False.\n norm_cfg (dict): Config dict to build norm layer. Default:\n `dict(type='BN')`.\n use_dropout (bool): Whether to use dropout layers. Default: False.\n "
def __init__(self, outer_channels, inner_channels, in_channels=None, submodule=None, is_outermost=False, is_innermost=False, norm_cfg=dict(type='BN'), use_dropout=False):
super().__init__()
assert (not (is_outermost and is_innermost)), "'is_outermost' and 'is_innermost' cannot be Trueat the same time."
self.is_outermost = is_outermost
assert isinstance(norm_cfg, dict), f"'norm_cfg' should be dict, butgot {type(norm_cfg)}"
assert ('type' in norm_cfg), "'norm_cfg' must have key 'type'"
use_bias = (norm_cfg['type'] == 'IN')
kernel_size = 4
stride = 2
padding = 1
if (in_channels is None):
in_channels = outer_channels
down_conv_cfg = dict(type='Conv2d')
down_norm_cfg = norm_cfg
down_act_cfg = dict(type='LeakyReLU', negative_slope=0.2)
up_conv_cfg = dict(type='Deconv')
up_norm_cfg = norm_cfg
up_act_cfg = dict(type='ReLU')
up_in_channels = (inner_channels * 2)
up_bias = use_bias
middle = [submodule]
upper = []
if is_outermost:
down_act_cfg = None
down_norm_cfg = None
up_bias = True
up_norm_cfg = None
upper = [nn.Tanh()]
elif is_innermost:
down_norm_cfg = None
up_in_channels = inner_channels
middle = []
else:
upper = ([nn.Dropout(0.5)] if use_dropout else [])
down = [ConvModule(in_channels=in_channels, out_channels=inner_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=use_bias, conv_cfg=down_conv_cfg, norm_cfg=down_norm_cfg, act_cfg=down_act_cfg, order=('act', 'conv', 'norm'))]
up = [ConvModule(in_channels=up_in_channels, out_channels=outer_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=up_bias, conv_cfg=up_conv_cfg, norm_cfg=up_norm_cfg, act_cfg=up_act_cfg, order=('act', 'conv', 'norm'))]
model = (((down + middle) + up) + upper)
self.model = nn.Sequential(*model)
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor with shape (n, c, h, w).\n\n Returns:\n Tensor: Forward results.\n '
if self.is_outermost:
return self.model(x)
return torch.cat([x, self.model(x)], 1)
|
class ResidualBlockWithDropout(nn.Module):
"Define a Residual Block with dropout layers.\n\n Ref:\n Deep Residual Learning for Image Recognition\n\n A residual block is a conv block with skip connections. A dropout layer is\n added between two common conv modules.\n\n Args:\n channels (int): Number of channels in the conv layer.\n padding_mode (str): The name of padding layer:\n 'reflect' | 'replicate' | 'zeros'.\n norm_cfg (dict): Config dict to build norm layer. Default:\n `dict(type='IN')`.\n use_dropout (bool): Whether to use dropout layers. Default: True.\n "
def __init__(self, channels, padding_mode, norm_cfg=dict(type='BN'), use_dropout=True):
super().__init__()
assert isinstance(norm_cfg, dict), f"'norm_cfg' should be dict, butgot {type(norm_cfg)}"
assert ('type' in norm_cfg), "'norm_cfg' must have key 'type'"
use_bias = (norm_cfg['type'] == 'IN')
block = [ConvModule(in_channels=channels, out_channels=channels, kernel_size=3, padding=1, bias=use_bias, norm_cfg=norm_cfg, padding_mode=padding_mode)]
if use_dropout:
block += [nn.Dropout(0.5)]
block += [ConvModule(in_channels=channels, out_channels=channels, kernel_size=3, padding=1, bias=use_bias, norm_cfg=norm_cfg, act_cfg=None, padding_mode=padding_mode)]
self.block = nn.Sequential(*block)
def forward(self, x):
'Forward function. Add skip connections without final ReLU.\n\n Args:\n x (Tensor): Input tensor with shape (n, c, h, w).\n\n Returns:\n Tensor: Forward results.\n '
out = (x + self.block(x))
return out
|
class ImgNormalize(nn.Conv2d):
'Normalize images with the given mean and std value.\n\n Based on Conv2d layer, can work in GPU.\n\n Args:\n pixel_range (float): Pixel range of feature.\n img_mean (Tuple[float]): Image mean of each channel.\n img_std (Tuple[float]): Image std of each channel.\n sign (int): Sign of bias. Default -1.\n '
def __init__(self, pixel_range, img_mean, img_std, sign=(- 1)):
assert (len(img_mean) == len(img_std))
num_channels = len(img_mean)
super().__init__(num_channels, num_channels, kernel_size=1)
std = torch.Tensor(img_std)
self.weight.data = torch.eye(num_channels).view(num_channels, num_channels, 1, 1)
self.weight.data.div_(std.view(num_channels, 1, 1, 1))
self.bias.data = ((sign * pixel_range) * torch.Tensor(img_mean))
self.bias.data.div_(std)
self.weight.requires_grad = False
self.bias.requires_grad = False
|
class LinearModule(nn.Module):
'A linear block that contains linear/norm/activation layers.\n\n For low level vision, we add spectral norm and padding layer.\n\n Args:\n in_features (int): Same as nn.Linear.\n out_features (int): Same as nn.Linear.\n bias (bool): Same as nn.Linear.\n act_cfg (dict): Config dict for activation layer, "relu" by default.\n inplace (bool): Whether to use inplace mode for activation.\n with_spectral_norm (bool): Whether use spectral norm in linear module.\n order (tuple[str]): The order of linear/activation layers. It is a\n sequence of "linear", "norm" and "act". Examples are\n ("linear", "act") and ("act", "linear").\n '
def __init__(self, in_features, out_features, bias=True, act_cfg=dict(type='ReLU'), inplace=True, with_spectral_norm=False, order=('linear', 'act')):
super().__init__()
assert ((act_cfg is None) or isinstance(act_cfg, dict))
self.act_cfg = act_cfg
self.inplace = inplace
self.with_spectral_norm = with_spectral_norm
self.order = order
assert (isinstance(self.order, tuple) and (len(self.order) == 2))
assert (set(order) == set(['linear', 'act']))
self.with_activation = (act_cfg is not None)
self.with_bias = bias
self.linear = nn.Linear(in_features, out_features, bias=bias)
self.in_features = self.linear.in_features
self.out_features = self.linear.out_features
if self.with_spectral_norm:
self.linear = nn.utils.spectral_norm(self.linear)
if self.with_activation:
act_cfg_ = act_cfg.copy()
act_cfg_.setdefault('inplace', inplace)
self.activate = build_activation_layer(act_cfg_)
self.init_weights()
def init_weights(self):
if (self.with_activation and (self.act_cfg['type'] == 'LeakyReLU')):
nonlinearity = 'leaky_relu'
a = self.act_cfg.get('negative_slope', 0.01)
else:
nonlinearity = 'relu'
a = 0
kaiming_init(self.linear, a=a, nonlinearity=nonlinearity)
def forward(self, x, activate=True):
'Forward Function.\n\n Args:\n x (torch.Tensor): Input tensor with shape of :math:`(n, *, c)`.\n Same as ``torch.nn.Linear``.\n activate (bool, optional): Whether to use activation layer.\n Defaults to True.\n\n Returns:\n torch.Tensor: Same as ``torch.nn.Linear``.\n '
for layer in self.order:
if (layer == 'linear'):
x = self.linear(x)
elif ((layer == 'act') and activate and self.with_activation):
x = self.activate(x)
return x
|
class MaskConvModule(ConvModule):
'Mask convolution module.\n\n This is a simple wrapper for mask convolution like: \'partial conv\'.\n Convolutions in this module always need a mask as extra input.\n\n Args:\n in_channels (int): Same as nn.Conv2d.\n out_channels (int): Same as nn.Conv2d.\n kernel_size (int or tuple[int]): Same as nn.Conv2d.\n stride (int or tuple[int]): Same as nn.Conv2d.\n padding (int or tuple[int]): Same as nn.Conv2d.\n dilation (int or tuple[int]): Same as nn.Conv2d.\n groups (int): Same as nn.Conv2d.\n bias (bool or str): If specified as `auto`, it will be decided by the\n norm_cfg. Bias will be set as True if norm_cfg is None, otherwise\n False.\n conv_cfg (dict): Config dict for convolution layer.\n norm_cfg (dict): Config dict for normalization layer.\n act_cfg (dict): Config dict for activation layer, "relu" by default.\n inplace (bool): Whether to use inplace mode for activation.\n with_spectral_norm (bool): Whether use spectral norm in conv module.\n padding_mode (str): If the `padding_mode` has not been supported by\n current `Conv2d` in Pytorch, we will use our own padding layer\n instead. Currently, we support [\'zeros\', \'circular\'] with official\n implementation and [\'reflect\'] with our own implementation.\n Default: \'zeros\'.\n order (tuple[str]): The order of conv/norm/activation layers. It is a\n sequence of "conv", "norm" and "act". Examples are\n ("conv", "norm", "act") and ("act", "conv", "norm").\n '
supported_conv_list = ['PConv']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
assert (self.conv_cfg['type'] in self.supported_conv_list)
self.init_weights()
def forward(self, x, mask=None, activate=True, norm=True, return_mask=True):
'Forward function for partial conv2d.\n\n Args:\n input (torch.Tensor): Tensor with shape of (n, c, h, w).\n mask (torch.Tensor): Tensor with shape of (n, c, h, w) or\n (n, 1, h, w). If mask is not given, the function will\n work as standard conv2d. Default: None.\n activate (bool): Whether use activation layer.\n norm (bool): Whether use norm layer.\n return_mask (bool): If True and mask is not None, the updated\n mask will be returned. Default: True.\n\n Returns:\n Tensor or tuple: Result Tensor or 2-tuple of\n\n ``Tensor``: Results after partial conv.\n\n ``Tensor``: Updated mask will be returned if mask is given and `return_mask` is True.\n '
for layer in self.order:
if (layer == 'conv'):
if self.with_explicit_padding:
x = self.padding_layer(x)
mask = self.padding_layer(mask)
if return_mask:
(x, updated_mask) = self.conv(x, mask, return_mask=return_mask)
else:
x = self.conv(x, mask, return_mask=False)
elif ((layer == 'norm') and norm and self.with_norm):
x = self.norm(x)
elif ((layer == 'act') and activate and self.with_activation):
x = self.activate(x)
if return_mask:
return (x, updated_mask)
return x
|
@CONV_LAYERS.register_module(name='PConv')
class PartialConv2d(nn.Conv2d):
'Implementation for partial convolution.\n\n Image Inpainting for Irregular Holes Using Partial Convolutions\n [https://arxiv.org/abs/1804.07723]\n\n Args:\n multi_channel (bool): If True, the mask is multi-channel. Otherwise,\n the mask is single-channel.\n eps (float): Need to be changed for mixed precision training.\n For mixed precision training, you need change 1e-8 to 1e-6.\n '
def __init__(self, *args, multi_channel=False, eps=1e-08, **kwargs):
super().__init__(*args, **kwargs)
self.multi_channel = multi_channel
self.eps = eps
if self.multi_channel:
(out_channels, in_channels) = (self.out_channels, self.in_channels)
else:
(out_channels, in_channels) = (1, 1)
self.register_buffer('weight_mask_updater', torch.ones(out_channels, in_channels, self.kernel_size[0], self.kernel_size[1]))
self.mask_kernel_numel = np.prod(self.weight_mask_updater.shape[1:4])
self.mask_kernel_numel = self.mask_kernel_numel.item()
def forward(self, input, mask=None, return_mask=True):
'Forward function for partial conv2d.\n\n Args:\n input (torch.Tensor): Tensor with shape of (n, c, h, w).\n mask (torch.Tensor): Tensor with shape of (n, c, h, w) or\n (n, 1, h, w). If mask is not given, the function will\n work as standard conv2d. Default: None.\n return_mask (bool): If True and mask is not None, the updated\n mask will be returned. Default: True.\n\n Returns:\n torch.Tensor : Results after partial conv. torch.Tensor : Updated mask will be returned if mask is given and ``return_mask`` is True.\n '
assert (input.dim() == 4)
if (mask is not None):
assert (mask.dim() == 4)
if self.multi_channel:
assert (mask.shape[1] == input.shape[1])
else:
assert (mask.shape[1] == 1)
if (mask is not None):
with torch.no_grad():
updated_mask = F.conv2d(mask, self.weight_mask_updater, bias=None, stride=self.stride, padding=self.padding, dilation=self.dilation)
mask_ratio = (self.mask_kernel_numel / (updated_mask + self.eps))
updated_mask = torch.clamp(updated_mask, 0, 1)
mask_ratio = (mask_ratio * updated_mask)
if (mask is not None):
input = (input * mask)
raw_out = super().forward(input)
if (mask is not None):
if (self.bias is None):
output = (raw_out * mask_ratio)
else:
bias_view = self.bias.view(1, self.out_channels, 1, 1)
output = (((raw_out - bias_view) * mask_ratio) + bias_view)
output = (output * updated_mask)
else:
output = raw_out
if (return_mask and (mask is not None)):
return (output, updated_mask)
return output
|
class DepthwiseSeparableConvModule(nn.Module):
"Depthwise separable convolution module.\n\n See https://arxiv.org/pdf/1704.04861.pdf for details.\n\n This module can replace a ConvModule with the conv block replaced by two\n conv block: depthwise conv block and pointwise conv block. The depthwise\n conv block contains depthwise-conv/norm/activation layers. The pointwise\n conv block contains pointwise-conv/norm/activation layers. It should be\n noted that there will be norm/activation layer in the depthwise conv block\n if ``norm_cfg`` and ``act_cfg`` are specified.\n\n Args:\n in_channels (int): Same as nn.Conv2d.\n out_channels (int): Same as nn.Conv2d.\n kernel_size (int or tuple[int]): Same as nn.Conv2d.\n stride (int or tuple[int]): Same as nn.Conv2d. Default: 1.\n padding (int or tuple[int]): Same as nn.Conv2d. Default: 0.\n dilation (int or tuple[int]): Same as nn.Conv2d. Default: 1.\n norm_cfg (dict): Default norm config for both depthwise ConvModule and\n pointwise ConvModule. Default: None.\n act_cfg (dict): Default activation config for both depthwise ConvModule\n and pointwise ConvModule. Default: dict(type='ReLU').\n dw_norm_cfg (dict): Norm config of depthwise ConvModule. If it is\n 'default', it will be the same as ``norm_cfg``. Default: 'default'.\n dw_act_cfg (dict): Activation config of depthwise ConvModule. If it is\n 'default', it will be the same as ``act_cfg``. Default: 'default'.\n pw_norm_cfg (dict): Norm config of pointwise ConvModule. If it is\n 'default', it will be the same as `norm_cfg`. Default: 'default'.\n pw_act_cfg (dict): Activation config of pointwise ConvModule. If it is\n 'default', it will be the same as ``act_cfg``. Default: 'default'.\n kwargs (optional): Other shared arguments for depthwise and pointwise\n ConvModule. See ConvModule for ref.\n "
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, norm_cfg=None, act_cfg=dict(type='ReLU'), dw_norm_cfg='default', dw_act_cfg='default', pw_norm_cfg='default', pw_act_cfg='default', **kwargs):
super().__init__()
assert ('groups' not in kwargs), 'groups should not be specified'
dw_norm_cfg = (dw_norm_cfg if (dw_norm_cfg != 'default') else norm_cfg)
dw_act_cfg = (dw_act_cfg if (dw_act_cfg != 'default') else act_cfg)
pw_norm_cfg = (pw_norm_cfg if (pw_norm_cfg != 'default') else norm_cfg)
pw_act_cfg = (pw_act_cfg if (pw_act_cfg != 'default') else act_cfg)
self.depthwise_conv = ConvModule(in_channels, in_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=in_channels, norm_cfg=dw_norm_cfg, act_cfg=dw_act_cfg, **kwargs)
self.pointwise_conv = ConvModule(in_channels, out_channels, 1, norm_cfg=pw_norm_cfg, act_cfg=pw_act_cfg, **kwargs)
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor with shape (N, C, H, W).\n\n Returns:\n Tensor: Output tensor.\n '
x = self.depthwise_conv(x)
x = self.pointwise_conv(x)
return x
|
def default_init_weights(module, scale=1):
'Initialize network weights.\n\n Args:\n modules (nn.Module): Modules to be initialized.\n scale (float): Scale initialized weights, especially for residual\n blocks.\n '
for m in module.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m, a=0, mode='fan_in', bias=0)
m.weight.data *= scale
elif isinstance(m, nn.Linear):
kaiming_init(m, a=0, mode='fan_in', bias=0)
m.weight.data *= scale
elif isinstance(m, _BatchNorm):
constant_init(m.weight, val=1, bias=0)
|
def make_layer(block, num_blocks, **kwarg):
'Make layers by stacking the same blocks.\n\n Args:\n block (nn.module): nn.module class for basic block.\n num_blocks (int): number of blocks.\n\n Returns:\n nn.Sequential: Stacked blocks in nn.Sequential.\n '
layers = []
for _ in range(num_blocks):
layers.append(block(**kwarg))
return nn.Sequential(*layers)
|
class ResidualBlockNoBN(nn.Module):
'Residual block without BN.\n\n It has a style of:\n\n ::\n\n ---Conv-ReLU-Conv-+-\n |________________|\n\n Args:\n mid_channels (int): Channel number of intermediate features.\n Default: 64.\n res_scale (float): Used to scale the residual before addition.\n Default: 1.0.\n '
def __init__(self, mid_channels=64, res_scale=1.0):
super().__init__()
self.res_scale = res_scale
self.conv1 = nn.Conv2d(mid_channels, mid_channels, 3, 1, 1, bias=True)
self.conv2 = nn.Conv2d(mid_channels, mid_channels, 3, 1, 1, bias=True)
self.relu = nn.ReLU(inplace=True)
if (res_scale == 1.0):
self.init_weights()
def init_weights(self):
'Initialize weights for ResidualBlockNoBN.\n\n Initialization methods like `kaiming_init` are for VGG-style\n modules. For modules with residual paths, using smaller std is\n better for stability and performance. We empirically use 0.1.\n See more details in "ESRGAN: Enhanced Super-Resolution Generative\n Adversarial Networks"\n '
for m in [self.conv1, self.conv2]:
default_init_weights(m, 0.1)
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor with shape (n, c, h, w).\n\n Returns:\n Tensor: Forward results.\n '
identity = x
out = self.conv2(self.relu(self.conv1(x)))
return (identity + (out * self.res_scale))
|
class PixelShufflePack(nn.Module):
' Pixel Shuffle upsample layer.\n\n Args:\n in_channels (int): Number of input channels.\n out_channels (int): Number of output channels.\n scale_factor (int): Upsample ratio.\n upsample_kernel (int): Kernel size of Conv layer to expand channels.\n\n Returns:\n Upsampled feature map.\n '
def __init__(self, in_channels, out_channels, scale_factor, upsample_kernel):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.scale_factor = scale_factor
self.upsample_kernel = upsample_kernel
self.upsample_conv = nn.Conv2d(self.in_channels, ((self.out_channels * scale_factor) * scale_factor), self.upsample_kernel, padding=((self.upsample_kernel - 1) // 2))
self.init_weights()
def init_weights(self):
'Initialize weights for PixelShufflePack.\n '
default_init_weights(self, 1)
def forward(self, x):
'Forward function for PixelShufflePack.\n\n Args:\n x (Tensor): Input tensor with shape (n, c, h, w).\n\n Returns:\n Tensor: Forward results.\n '
x = self.upsample_conv(x)
x = F.pixel_shuffle(x, self.scale_factor)
return x
|
@COMPONENTS.register_module()
class DeepFillv1Discriminators(nn.Module):
'Discriminators used in DeepFillv1 model.\n\n In DeepFillv1 model, the discriminators are independent without any\n concatenation like Global&Local model. Thus, we call this model\n `DeepFillv1Discriminators`. There exist a global discriminator and a local\n discriminator with global and local input respectively.\n\n The details can be found in:\n Generative Image Inpainting with Contextual Attention.\n\n Args:\n global_disc_cfg (dict): Config dict for global discriminator.\n local_disc_cfg (dict): Config dict for local discriminator.\n '
def __init__(self, global_disc_cfg, local_disc_cfg):
super().__init__()
self.global_disc = build_component(global_disc_cfg)
self.local_disc = build_component(local_disc_cfg)
def forward(self, x):
'Forward function.\n\n Args:\n x (tuple[torch.Tensor]): Contains global image and the local image\n patch.\n\n Returns:\n tuple[torch.Tensor]: Contains the prediction from discriminators in global image and local image patch.\n '
(global_img, local_img) = x
global_pred = self.global_disc(global_img)
local_pred = self.local_disc(local_img)
return (global_pred, local_pred)
def init_weights(self, pretrained=None):
'Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n '
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
for m in self.modules():
if isinstance(m, nn.Linear):
normal_init(m, 0, std=0.02)
elif isinstance(m, nn.Conv2d):
normal_init(m, 0.0, std=0.02)
else:
raise TypeError(f'pretrained must be a str or None but got{type(pretrained)} instead.')
|
@COMPONENTS.register_module()
class GLDiscs(nn.Module):
'Discriminators in Global&Local\n\n This discriminator contains a local discriminator and a global\n discriminator as described in the original paper:\n Globally and locally Consistent Image Completion\n\n Args:\n global_disc_cfg (dict): Config dict to build global discriminator.\n local_disc_cfg (dict): Config dict to build local discriminator.\n '
def __init__(self, global_disc_cfg, local_disc_cfg):
super().__init__()
self.global_disc = MultiLayerDiscriminator(**global_disc_cfg)
self.local_disc = MultiLayerDiscriminator(**local_disc_cfg)
self.fc = nn.Linear(2048, 1, bias=True)
def forward(self, x):
'Forward function.\n\n Args:\n x (tuple[torch.Tensor]): Contains global image and the local image\n patch.\n\n Returns:\n tuple[torch.Tensor]: Contains the prediction from discriminators in global image and local image patch.\n '
(g_img, l_img) = x
g_pred = self.global_disc(g_img)
l_pred = self.local_disc(l_img)
pred = self.fc(torch.cat([g_pred, l_pred], dim=1))
return pred
def init_weights(self, pretrained=None):
'Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n '
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.normal_(m.weight.data, 0.0, 0.02)
nn.init.constant_(m.bias.data, 0.0)
else:
raise TypeError('pretrained must be a str or None')
|
class MaxFeature(nn.Module):
"Conv2d or Linear layer with max feature selector\n\n Generate feature maps with double channels, split them and select the max\n feature.\n\n Args:\n in_channels (int): Channel number of inputs.\n out_channels (int): Channel number of outputs.\n kernel_size (int or tuple): Size of the convolving kernel.\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of\n the input. Default: 1\n filter_type (str): Type of filter. Options are 'conv2d' and 'linear'.\n Default: 'conv2d'.\n "
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, filter_type='conv2d'):
super().__init__()
self.out_channels = out_channels
filter_type = filter_type.lower()
if (filter_type == 'conv2d'):
self.filter = nn.Conv2d(in_channels, (2 * out_channels), kernel_size=kernel_size, stride=stride, padding=padding)
elif (filter_type == 'linear'):
self.filter = nn.Linear(in_channels, (2 * out_channels))
else:
raise ValueError(f"'filter_type' should be 'conv2d' or 'linear', but got {filter_type}")
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor.\n\n Returns:\n Tensor: Forward results.\n '
x = self.filter(x)
out = torch.chunk(x, chunks=2, dim=1)
return torch.max(out[0], out[1])
|
@COMPONENTS.register_module()
class LightCNN(nn.Module):
'LightCNN discriminator with input size 128 x 128.\n\n It is used to train DICGAN.\n\n Args:\n in_channels (int): Channel number of inputs.\n '
def __init__(self, in_channels):
super().__init__()
self.features = nn.Sequential(MaxFeature(in_channels, 48, 5, 1, 2), nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True), MaxFeature(48, 48, 1, 1, 0), MaxFeature(48, 96, 3, 1, 1), nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True), MaxFeature(96, 96, 1, 1, 0), MaxFeature(96, 192, 3, 1, 1), nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True), MaxFeature(192, 192, 1, 1, 0), MaxFeature(192, 128, 3, 1, 1), MaxFeature(128, 128, 1, 1, 0), MaxFeature(128, 128, 3, 1, 1), nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True))
self.classifier = nn.Sequential(MaxFeature(((8 * 8) * 128), 256, filter_type='linear'), nn.LeakyReLU(0.2, True), nn.Linear(256, 1))
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor.\n\n Returns:\n Tensor: Forward results.\n '
x = self.features(x)
x = x.view(x.size(0), (- 1))
out = self.classifier(x)
return out
def init_weights(self, pretrained=None, strict=True):
'Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n strict (boo, optional): Whether strictly load the pretrained model.\n Defaults to True.\n '
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif (pretrained is not None):
raise TypeError(f'"pretrained" must be a str or None. But received {type(pretrained)}.')
|
@COMPONENTS.register_module()
class ModifiedVGG(nn.Module):
'A modified VGG discriminator with input size 128 x 128.\n\n It is used to train SRGAN and ESRGAN.\n\n Args:\n in_channels (int): Channel number of inputs. Default: 3.\n mid_channels (int): Channel number of base intermediate features.\n Default: 64.\n '
def __init__(self, in_channels, mid_channels):
super().__init__()
self.conv0_0 = nn.Conv2d(in_channels, mid_channels, 3, 1, 1, bias=True)
self.conv0_1 = nn.Conv2d(mid_channels, mid_channels, 4, 2, 1, bias=False)
self.bn0_1 = nn.BatchNorm2d(mid_channels, affine=True)
self.conv1_0 = nn.Conv2d(mid_channels, (mid_channels * 2), 3, 1, 1, bias=False)
self.bn1_0 = nn.BatchNorm2d((mid_channels * 2), affine=True)
self.conv1_1 = nn.Conv2d((mid_channels * 2), (mid_channels * 2), 4, 2, 1, bias=False)
self.bn1_1 = nn.BatchNorm2d((mid_channels * 2), affine=True)
self.conv2_0 = nn.Conv2d((mid_channels * 2), (mid_channels * 4), 3, 1, 1, bias=False)
self.bn2_0 = nn.BatchNorm2d((mid_channels * 4), affine=True)
self.conv2_1 = nn.Conv2d((mid_channels * 4), (mid_channels * 4), 4, 2, 1, bias=False)
self.bn2_1 = nn.BatchNorm2d((mid_channels * 4), affine=True)
self.conv3_0 = nn.Conv2d((mid_channels * 4), (mid_channels * 8), 3, 1, 1, bias=False)
self.bn3_0 = nn.BatchNorm2d((mid_channels * 8), affine=True)
self.conv3_1 = nn.Conv2d((mid_channels * 8), (mid_channels * 8), 4, 2, 1, bias=False)
self.bn3_1 = nn.BatchNorm2d((mid_channels * 8), affine=True)
self.conv4_0 = nn.Conv2d((mid_channels * 8), (mid_channels * 8), 3, 1, 1, bias=False)
self.bn4_0 = nn.BatchNorm2d((mid_channels * 8), affine=True)
self.conv4_1 = nn.Conv2d((mid_channels * 8), (mid_channels * 8), 4, 2, 1, bias=False)
self.bn4_1 = nn.BatchNorm2d((mid_channels * 8), affine=True)
self.linear1 = nn.Linear((((mid_channels * 8) * 4) * 4), 100)
self.linear2 = nn.Linear(100, 1)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor with shape (n, c, h, w).\n\n Returns:\n Tensor: Forward results.\n '
assert ((x.size(2) == 128) and (x.size(3) == 128)), f'Input spatial size must be 128x128, but received {x.size()}.'
feat = self.lrelu(self.conv0_0(x))
feat = self.lrelu(self.bn0_1(self.conv0_1(feat)))
feat = self.lrelu(self.bn1_0(self.conv1_0(feat)))
feat = self.lrelu(self.bn1_1(self.conv1_1(feat)))
feat = self.lrelu(self.bn2_0(self.conv2_0(feat)))
feat = self.lrelu(self.bn2_1(self.conv2_1(feat)))
feat = self.lrelu(self.bn3_0(self.conv3_0(feat)))
feat = self.lrelu(self.bn3_1(self.conv3_1(feat)))
feat = self.lrelu(self.bn4_0(self.conv4_0(feat)))
feat = self.lrelu(self.bn4_1(self.conv4_1(feat)))
feat = feat.view(feat.size(0), (- 1))
feat = self.lrelu(self.linear1(feat))
out = self.linear2(feat)
return out
def init_weights(self, pretrained=None, strict=True):
'Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n strict (boo, optional): Whether strictly load the pretrained model.\n Defaults to True.\n '
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif (pretrained is None):
pass
else:
raise TypeError(f'"pretrained" must be a str or None. But received {type(pretrained)}.')
|
@COMPONENTS.register_module()
class MultiLayerDiscriminator(nn.Module):
'Multilayer Discriminator.\n\n This is a commonly used structure with stacked multiply convolution layers.\n\n Args:\n in_channels (int): Input channel of the first input convolution.\n max_channels (int): The maximum channel number in this structure.\n num_conv (int): Number of stacked intermediate convs (including input\n conv but excluding output conv).\n fc_in_channels (int | None): Input dimension of the fully connected\n layer. If `fc_in_channels` is None, the fully connected layer will\n be removed.\n fc_out_channels (int): Output dimension of the fully connected layer.\n kernel_size (int): Kernel size of the conv modules. Default to 5.\n conv_cfg (dict): Config dict to build conv layer.\n norm_cfg (dict): Config dict to build norm layer.\n act_cfg (dict): Config dict for activation layer, "relu" by default.\n out_act_cfg (dict): Config dict for output activation, "relu" by\n default.\n with_input_norm (bool): Whether add normalization after the input conv.\n Default to True.\n with_out_convs (bool): Whether add output convs to the discriminator.\n The output convs contain two convs. The first out conv has the same\n setting as the intermediate convs but a stride of 1 instead of 2.\n The second out conv is a conv similar to the first out conv but\n reduces the number of channels to 1 and has no activation layer.\n Default to False.\n with_spectral_norm (bool): Whether use spectral norm after the conv\n layers. Default to False.\n kwargs (keyword arguments).\n '
def __init__(self, in_channels, max_channels, num_convs=5, fc_in_channels=None, fc_out_channels=1024, kernel_size=5, conv_cfg=None, norm_cfg=None, act_cfg=dict(type='ReLU'), out_act_cfg=dict(type='ReLU'), with_input_norm=True, with_out_convs=False, with_spectral_norm=False, **kwargs):
super().__init__()
if (fc_in_channels is not None):
assert (fc_in_channels > 0)
self.max_channels = max_channels
self.with_fc = (fc_in_channels is not None)
self.num_convs = num_convs
self.with_out_act = (out_act_cfg is not None)
self.with_out_convs = with_out_convs
cur_channels = in_channels
for i in range(num_convs):
out_ch = min((64 * (2 ** i)), max_channels)
norm_cfg_ = norm_cfg
act_cfg_ = act_cfg
if ((i == 0) and (not with_input_norm)):
norm_cfg_ = None
elif ((i == (num_convs - 1)) and (not self.with_fc) and (not self.with_out_convs)):
norm_cfg_ = None
act_cfg_ = out_act_cfg
self.add_module(f'conv{(i + 1)}', ConvModule(cur_channels, out_ch, kernel_size=kernel_size, stride=2, padding=(kernel_size // 2), norm_cfg=norm_cfg_, act_cfg=act_cfg_, with_spectral_norm=with_spectral_norm, **kwargs))
cur_channels = out_ch
if self.with_out_convs:
cur_channels = min((64 * (2 ** (num_convs - 1))), max_channels)
out_ch = min((64 * (2 ** num_convs)), max_channels)
self.add_module(f'conv{(num_convs + 1)}', ConvModule(cur_channels, out_ch, kernel_size, stride=1, padding=(kernel_size // 2), norm_cfg=norm_cfg, act_cfg=act_cfg, with_spectral_norm=with_spectral_norm, **kwargs))
self.add_module(f'conv{(num_convs + 2)}', ConvModule(out_ch, 1, kernel_size, stride=1, padding=(kernel_size // 2), act_cfg=None, with_spectral_norm=with_spectral_norm, **kwargs))
if self.with_fc:
self.fc = LinearModule(fc_in_channels, fc_out_channels, bias=True, act_cfg=out_act_cfg, with_spectral_norm=with_spectral_norm)
def forward(self, x):
"Forward Function.\n\n Args:\n x (torch.Tensor): Input tensor with shape of (n, c, h, w).\n\n Returns:\n torch.Tensor: Output tensor with shape of (n, c, h', w') or (n, c).\n "
input_size = x.size()
num_convs = (self.num_convs + (2 * self.with_out_convs))
for i in range(num_convs):
x = getattr(self, f'conv{(i + 1)}')(x)
if self.with_fc:
x = x.view(input_size[0], (- 1))
x = self.fc(x)
return x
def init_weights(self, pretrained=None):
'Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n '
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.normal_(m.weight.data, 0.0, 0.02)
nn.init.constant_(m.bias.data, 0.0)
else:
raise TypeError('pretrained must be a str or None')
|
@COMPONENTS.register_module()
class PatchDiscriminator(nn.Module):
"A PatchGAN discriminator.\n\n Args:\n in_channels (int): Number of channels in input images.\n base_channels (int): Number of channels at the first conv layer.\n Default: 64.\n num_conv (int): Number of stacked intermediate convs (excluding input\n and output conv). Default: 3.\n norm_cfg (dict): Config dict to build norm layer. Default:\n `dict(type='BN')`.\n init_cfg (dict): Config dict for initialization.\n `type`: The name of our initialization method. Default: 'normal'.\n `gain`: Scaling factor for normal, xavier and orthogonal.\n Default: 0.02.\n "
def __init__(self, in_channels, base_channels=64, num_conv=3, norm_cfg=dict(type='BN'), init_cfg=dict(type='normal', gain=0.02)):
super().__init__()
assert isinstance(norm_cfg, dict), f"'norm_cfg' should be dict, butgot {type(norm_cfg)}"
assert ('type' in norm_cfg), "'norm_cfg' must have key 'type'"
use_bias = (norm_cfg['type'] == 'IN')
kernel_size = 4
padding = 1
sequence = [ConvModule(in_channels=in_channels, out_channels=base_channels, kernel_size=kernel_size, stride=2, padding=padding, bias=True, norm_cfg=None, act_cfg=dict(type='LeakyReLU', negative_slope=0.2))]
multiple_now = 1
multiple_prev = 1
for n in range(1, num_conv):
multiple_prev = multiple_now
multiple_now = min((2 ** n), 8)
sequence += [ConvModule(in_channels=(base_channels * multiple_prev), out_channels=(base_channels * multiple_now), kernel_size=kernel_size, stride=2, padding=padding, bias=use_bias, norm_cfg=norm_cfg, act_cfg=dict(type='LeakyReLU', negative_slope=0.2))]
multiple_prev = multiple_now
multiple_now = min((2 ** num_conv), 8)
sequence += [ConvModule(in_channels=(base_channels * multiple_prev), out_channels=(base_channels * multiple_now), kernel_size=kernel_size, stride=1, padding=padding, bias=use_bias, norm_cfg=norm_cfg, act_cfg=dict(type='LeakyReLU', negative_slope=0.2))]
sequence += [build_conv_layer(dict(type='Conv2d'), (base_channels * multiple_now), 1, kernel_size=kernel_size, stride=1, padding=padding)]
self.model = nn.Sequential(*sequence)
self.init_type = ('normal' if (init_cfg is None) else init_cfg.get('type', 'normal'))
self.init_gain = (0.02 if (init_cfg is None) else init_cfg.get('gain', 0.02))
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor with shape (n, c, h, w).\n\n Returns:\n Tensor: Forward results.\n '
return self.model(x)
def init_weights(self, pretrained=None):
'Initialize weights for the model.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Default: None.\n '
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
generation_init_weights(self, init_type=self.init_type, init_gain=self.init_gain)
else:
raise TypeError(f"'pretrained' must be a str or None. But received {type(pretrained)}.")
|
@COMPONENTS.register_module()
class SoftMaskPatchDiscriminator(nn.Module):
"A Soft Mask-Guided PatchGAN discriminator.\n\n Args:\n in_channels (int): Number of channels in input images.\n base_channels (int, optional): Number of channels at the\n first conv layer. Default: 64.\n num_conv (int, optional): Number of stacked intermediate convs\n (excluding input and output conv). Default: 3.\n norm_cfg (dict, optional): Config dict to build norm layer.\n Default: None.\n init_cfg (dict, optional): Config dict for initialization.\n `type`: The name of our initialization method. Default: 'normal'.\n `gain`: Scaling factor for normal, xavier and orthogonal.\n Default: 0.02.\n with_spectral_norm (bool, optional): Whether use spectral norm\n after the conv layers. Default: False.\n "
def __init__(self, in_channels, base_channels=64, num_conv=3, norm_cfg=None, init_cfg=dict(type='normal', gain=0.02), with_spectral_norm=False):
super().__init__()
kernel_size = 4
padding = 1
sequence = [ConvModule(in_channels=in_channels, out_channels=base_channels, kernel_size=kernel_size, stride=2, padding=padding, bias=False, norm_cfg=norm_cfg, act_cfg=dict(type='LeakyReLU', negative_slope=0.2), with_spectral_norm=with_spectral_norm)]
multiplier_in = 1
multiplier_out = 1
for n in range(1, num_conv):
multiplier_in = multiplier_out
multiplier_out = min((2 ** n), 8)
sequence += [ConvModule(in_channels=(base_channels * multiplier_in), out_channels=(base_channels * multiplier_out), kernel_size=kernel_size, stride=2, padding=padding, bias=False, norm_cfg=norm_cfg, act_cfg=dict(type='LeakyReLU', negative_slope=0.2), with_spectral_norm=with_spectral_norm)]
multiplier_in = multiplier_out
multiplier_out = min((2 ** num_conv), 8)
sequence += [ConvModule(in_channels=(base_channels * multiplier_in), out_channels=(base_channels * multiplier_out), kernel_size=kernel_size, stride=1, padding=padding, bias=False, norm_cfg=norm_cfg, act_cfg=dict(type='LeakyReLU', negative_slope=0.2), with_spectral_norm=with_spectral_norm)]
sequence += [nn.Conv2d((base_channels * multiplier_out), 1, kernel_size=kernel_size, stride=1, padding=padding)]
self.model = nn.Sequential(*sequence)
self.init_type = ('normal' if (init_cfg is None) else init_cfg.get('type', 'normal'))
self.init_gain = (0.02 if (init_cfg is None) else init_cfg.get('gain', 0.02))
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor with shape (n, c, h, w).\n\n Returns:\n Tensor: Forward results.\n '
return self.model(x)
def init_weights(self, pretrained=None):
'Initialize weights for the model.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Default: None.\n '
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
generation_init_weights(self, init_type=self.init_type, init_gain=self.init_gain)
else:
raise TypeError(f"'pretrained' must be a str or None. But received {type(pretrained)}.")
|
@COMPONENTS.register_module()
class TTSRDiscriminator(nn.Module):
'A discriminator for TTSR.\n\n Args:\n in_channels (int): Channel number of inputs. Default: 3.\n in_size (int): Size of input image. Default: 160.\n '
def __init__(self, in_channels=3, in_size=160):
super().__init__()
self.body = nn.Sequential(nn.Conv2d(in_channels, 32, 3, 1, 1), nn.LeakyReLU(0.2), nn.Conv2d(32, 32, 3, 2, 1), nn.LeakyReLU(0.2), nn.Conv2d(32, 64, 3, 1, 1), nn.LeakyReLU(0.2), nn.Conv2d(64, 64, 3, 2, 1), nn.LeakyReLU(0.2), nn.Conv2d(64, 128, 3, 1, 1), nn.LeakyReLU(0.2), nn.Conv2d(128, 128, 3, 2, 1), nn.LeakyReLU(0.2), nn.Conv2d(128, 256, 3, 1, 1), nn.LeakyReLU(0.2), nn.Conv2d(256, 256, 3, 2, 1), nn.LeakyReLU(0.2), nn.Conv2d(256, 512, 3, 1, 1), nn.LeakyReLU(0.2), nn.Conv2d(512, 512, 3, 2, 1), nn.LeakyReLU(0.2))
self.last = nn.Sequential(nn.Linear(((((in_size // 32) * in_size) // 32) * 512), 1024), nn.LeakyReLU(0.2), nn.Linear(1024, 1))
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor with shape (n, c, h, w).\n\n Returns:\n Tensor: Forward results.\n '
x = self.body(x)
x = x.view(x.size(0), (- 1))
x = self.last(x)
return x
def init_weights(self, pretrained=None, strict=True):
'Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n strict (boo, optional): Whether strictly load the pretrained model.\n Defaults to True.\n '
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif (pretrained is not None):
raise TypeError(f'"pretrained" must be a str or None. But received {type(pretrained)}.')
|
@COMPONENTS.register_module()
class UNetDiscriminatorWithSpectralNorm(nn.Module):
'A U-Net discriminator with spectral normalization.\n\n Args:\n in_channels (int): Channel number of the input.\n mid_channels (int, optional): Channel number of the intermediate\n features. Default: 64.\n skip_connection (bool, optional): Whether to use skip connection.\n Default: True.\n '
def __init__(self, in_channels, mid_channels=64, skip_connection=True):
super().__init__()
self.skip_connection = skip_connection
self.conv_0 = nn.Conv2d(in_channels, mid_channels, kernel_size=3, stride=1, padding=1)
self.conv_1 = spectral_norm(nn.Conv2d(mid_channels, (mid_channels * 2), 4, 2, 1, bias=False))
self.conv_2 = spectral_norm(nn.Conv2d((mid_channels * 2), (mid_channels * 4), 4, 2, 1, bias=False))
self.conv_3 = spectral_norm(nn.Conv2d((mid_channels * 4), (mid_channels * 8), 4, 2, 1, bias=False))
self.conv_4 = spectral_norm(nn.Conv2d((mid_channels * 8), (mid_channels * 4), 3, 1, 1, bias=False))
self.conv_5 = spectral_norm(nn.Conv2d((mid_channels * 4), (mid_channels * 2), 3, 1, 1, bias=False))
self.conv_6 = spectral_norm(nn.Conv2d((mid_channels * 2), mid_channels, 3, 1, 1, bias=False))
self.conv_7 = spectral_norm(nn.Conv2d(mid_channels, mid_channels, 3, 1, 1, bias=False))
self.conv_8 = spectral_norm(nn.Conv2d(mid_channels, mid_channels, 3, 1, 1, bias=False))
self.conv_9 = nn.Conv2d(mid_channels, 1, 3, 1, 1)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
def forward(self, img):
'Forward function.\n\n Args:\n img (Tensor): Input tensor with shape (n, c, h, w).\n\n Returns:\n Tensor: Forward results.\n '
feat_0 = self.lrelu(self.conv_0(img))
feat_1 = self.lrelu(self.conv_1(feat_0))
feat_2 = self.lrelu(self.conv_2(feat_1))
feat_3 = self.lrelu(self.conv_3(feat_2))
feat_3 = self.upsample(feat_3)
feat_4 = self.lrelu(self.conv_4(feat_3))
if self.skip_connection:
feat_4 = (feat_4 + feat_2)
feat_4 = self.upsample(feat_4)
feat_5 = self.lrelu(self.conv_5(feat_4))
if self.skip_connection:
feat_5 = (feat_5 + feat_1)
feat_5 = self.upsample(feat_5)
feat_6 = self.lrelu(self.conv_6(feat_5))
if self.skip_connection:
feat_6 = (feat_6 + feat_0)
out = self.lrelu(self.conv_7(feat_6))
out = self.lrelu(self.conv_8(out))
return self.conv_9(out)
def init_weights(self, pretrained=None, strict=True):
'Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n strict (boo, optional): Whether strictly load the pretrained model.\n Defaults to True.\n '
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif (pretrained is not None):
raise TypeError(f'"pretrained" must be a str or None. But received {type(pretrained)}.')
|
@COMPONENTS.register_module()
class DeepFillRefiner(nn.Module):
'Refiner used in DeepFill model.\n\n This implementation follows:\n Generative Image Inpainting with Contextual Attention.\n\n Args:\n encoder_attention (dict): Config dict for encoder used in branch\n with contextual attention module.\n encoder_conv (dict): Config dict for encoder used in branch with\n just convolutional operation.\n dilation_neck (dict): Config dict for dilation neck in branch with\n just convolutional operation.\n contextual_attention (dict): Config dict for contextual attention\n neck.\n decoder (dict): Config dict for decoder used to fuse and decode\n features.\n '
def __init__(self, encoder_attention=dict(type='DeepFillEncoder', encoder_type='stage2_attention'), encoder_conv=dict(type='DeepFillEncoder', encoder_type='stage2_conv'), dilation_neck=dict(type='GLDilationNeck', in_channels=128, act_cfg=dict(type='ELU')), contextual_attention=dict(type='ContextualAttentionNeck', in_channels=128), decoder=dict(type='DeepFillDecoder', in_channels=256)):
super().__init__()
self.encoder_attention = build_component(encoder_attention)
self.encoder_conv = build_component(encoder_conv)
self.contextual_attention_neck = build_component(contextual_attention)
self.dilation_neck = build_component(dilation_neck)
self.decoder = build_component(decoder)
def forward(self, x, mask):
"Forward Function.\n\n Args:\n x (torch.Tensor): Input tensor with shape of (n, c, h, w).\n mask (torch.Tensor): Input tensor with shape of (n, 1, h, w).\n\n Returns:\n torch.Tensor: Output tensor with shape of (n, c, h', w').\n "
encoder_dict = self.encoder_conv(x)
conv_x = self.dilation_neck(encoder_dict['out'])
attention_x = self.encoder_attention(x)['out']
(h_x, w_x) = attention_x.shape[(- 2):]
resized_mask = F.interpolate(mask, size=(h_x, w_x))
(attention_x, offset) = self.contextual_attention_neck(attention_x, resized_mask)
x = torch.cat([conv_x, attention_x], dim=1)
x = self.decoder(dict(out=x))
return (x, offset)
|
@COMPONENTS.register_module()
class MLPRefiner(nn.Module):
'Multilayer perceptrons (MLPs), refiner used in LIIF.\n\n Args:\n in_dim (int): Input dimension.\n out_dim (int): Output dimension.\n hidden_list (list[int]): List of hidden dimensions.\n '
def __init__(self, in_dim, out_dim, hidden_list):
super().__init__()
layers = []
lastv = in_dim
for hidden in hidden_list:
layers.append(nn.Linear(lastv, hidden))
layers.append(nn.ReLU())
lastv = hidden
layers.append(nn.Linear(lastv, out_dim))
self.layers = nn.Sequential(*layers)
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): The input of MLP.\n\n Returns:\n Tensor: The output of MLP.\n '
shape = x.shape[:(- 1)]
x = self.layers(x.view((- 1), x.shape[(- 1)]))
return x.view(*shape, (- 1))
def init_weights(self, pretrained=None, strict=True):
'Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n strict (boo, optional): Whether strictly load the pretrained model.\n Defaults to True.\n '
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif (pretrained is None):
pass
else:
raise TypeError(f'"pretrained" must be a str or None. But received {type(pretrained)}.')
|
@COMPONENTS.register_module()
class PlainRefiner(nn.Module):
'Simple refiner from Deep Image Matting.\n\n Args:\n conv_channels (int): Number of channels produced by the three main\n convolutional layer.\n loss_refine (dict): Config of the loss of the refiner. Default: None.\n pretrained (str): Name of pretrained model. Default: None.\n '
def __init__(self, conv_channels=64, pretrained=None):
super().__init__()
assert (pretrained is None), 'pretrained not supported yet'
self.refine_conv1 = nn.Conv2d(4, conv_channels, kernel_size=3, padding=1)
self.refine_conv2 = nn.Conv2d(conv_channels, conv_channels, kernel_size=3, padding=1)
self.refine_conv3 = nn.Conv2d(conv_channels, conv_channels, kernel_size=3, padding=1)
self.refine_pred = nn.Conv2d(conv_channels, 1, kernel_size=3, padding=1)
self.relu = nn.ReLU(inplace=True)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m)
def forward(self, x, raw_alpha):
'Forward function.\n\n Args:\n x (Tensor): The input feature map of refiner.\n raw_alpha (Tensor): The raw predicted alpha matte.\n\n Returns:\n Tensor: The refined alpha matte.\n '
out = self.relu(self.refine_conv1(x))
out = self.relu(self.refine_conv2(out))
out = self.relu(self.refine_conv3(out))
raw_refine = self.refine_pred(out)
pred_refine = torch.sigmoid((raw_alpha + raw_refine))
return pred_refine
|
def get_module_device(module):
'Get the device of a module.\n\n Args:\n module (nn.Module): A module contains the parameters.\n\n Returns:\n torch.device: The device of the module.\n '
try:
next(module.parameters())
except StopIteration:
raise ValueError('The input module should contain parameters.')
if next(module.parameters()).is_cuda:
return next(module.parameters()).get_device()
else:
return torch.device('cpu')
|
@torch.no_grad()
def get_mean_latent(generator, num_samples=4096, bs_per_repeat=1024):
'Get mean latent of W space in Style-based GANs.\n Args:\n generator (nn.Module): Generator of a Style-based GAN.\n num_samples (int, optional): Number of sample times. Defaults to 4096.\n bs_per_repeat (int, optional): Batch size of noises per sample.\n Defaults to 1024.\n Returns:\n Tensor: Mean latent of this generator.\n '
device = get_module_device(generator)
mean_style = None
n_repeat = (num_samples // bs_per_repeat)
assert ((n_repeat * bs_per_repeat) == num_samples)
for i in range(n_repeat):
style = generator.style_mapping(torch.randn(bs_per_repeat, generator.style_channels).to(device)).mean(0, keepdim=True)
if (mean_style is None):
mean_style = style
else:
mean_style += style
mean_style /= float(n_repeat)
return mean_style
|
@torch.no_grad()
def style_mixing(generator, n_source, n_target, inject_index=1, truncation_latent=None, truncation=0.7, style_channels=512, **kwargs):
device = get_module_device(generator)
source_code = torch.randn(n_source, style_channels).to(device)
target_code = torch.randn(n_target, style_channels).to(device)
source_image = generator(source_code, truncation_latent=truncation_latent, truncation=truncation, **kwargs)
(h, w) = source_image.shape[(- 2):]
images = [(torch.ones(1, 3, h, w).to(device) * (- 1))]
target_image = generator(target_code, truncation_latent=truncation_latent, truncation=truncation, **kwargs)
images.append(source_image)
for i in range(n_target):
image = generator([target_code[i].unsqueeze(0).repeat(n_source, 1), source_code], truncation_latent=truncation_latent, truncation=truncation, inject_index=inject_index, **kwargs)
images.append(target_image[i].unsqueeze(0))
images.append(image)
images = torch.cat(images, 0)
return images
|
@COMPONENTS.register_module()
class LTE(nn.Module):
"Learnable Texture Extractor\n\n Based on pretrained VGG19. Generate features in 3 levels.\n\n Args:\n requires_grad (bool): Require grad or not. Default: True.\n pixel_range (float): Pixel range of geature. Default: 1.\n pretrained (str): Path for pretrained model. Default: None.\n load_pretrained_vgg (bool): Load pretrained VGG from torchvision.\n Default: True.\n Train: must load pretrained VGG\n Eval: needn't load pretrained VGG, because we will load pretrained\n LTE.\n "
def __init__(self, requires_grad=True, pixel_range=1.0, pretrained=None, load_pretrained_vgg=True):
super().__init__()
vgg_mean = (0.485, 0.456, 0.406)
vgg_std = ((0.229 * pixel_range), (0.224 * pixel_range), (0.225 * pixel_range))
self.img_normalize = ImgNormalize(pixel_range=pixel_range, img_mean=vgg_mean, img_std=vgg_std)
vgg_pretrained_features = models.vgg19(pretrained=load_pretrained_vgg).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
for x in range(2):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(2, 7):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(7, 12):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
if (not requires_grad):
for param in self.slice1.parameters():
param.requires_grad = requires_grad
for param in self.slice2.parameters():
param.requires_grad = requires_grad
for param in self.slice3.parameters():
param.requires_grad = requires_grad
if pretrained:
self.init_weights(pretrained)
def forward(self, x):
'\n Forward function.\n\n Args:\n x (Tensor): Input tensor with shape (n, 3, h, w).\n\n Returns:\n Tuple[Tensor]: Forward results in 3 levels.\n x_level3: Forward results in level 3 (n, 256, h/4, w/4).\n x_level2: Forward results in level 2 (n, 128, h/2, w/2).\n x_level1: Forward results in level 1 (n, 64, h, w).\n '
x = self.img_normalize(x)
x_level1 = x = self.slice1(x)
x_level2 = x = self.slice2(x)
x_level3 = x = self.slice3(x)
return [x_level3, x_level2, x_level1]
def init_weights(self, pretrained=None, strict=True):
'Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n strict (boo, optional): Whether strictly load the pretrained model.\n Defaults to True.\n '
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif (pretrained is None):
pass
else:
raise TypeError(f'"pretrained" must be a str or None. But received {type(pretrained)}.')
|
@MODELS.register_module()
class AOTInpaintor(OneStageInpaintor):
'Inpaintor for AOT-GAN method.\n\n This inpaintor is implemented according to the paper:\n Aggregated Contextual Transformations for High-Resolution Image Inpainting\n '
def forward_train_d(self, data_batch, is_real, is_disc, mask):
'Forward function in discriminator training step.\n\n In this function, we compute the prediction for each data batch (real\n or fake). Meanwhile, the standard gan loss will be computed with\n several proposed losses for stable training.\n\n Args:\n data (torch.Tensor): Batch of real data or fake data.\n is_real (bool): If True, the gan loss will regard this batch as\n real data. Otherwise, the gan loss will regard this batch as\n fake data.\n is_disc (bool): If True, this function is called in discriminator\n training step. Otherwise, this function is called in generator\n training step. This will help us to compute different types of\n adversarial loss, like LSGAN.\n mask (torch.Tensor): Mask of data.\n\n Returns:\n dict: Contains the loss items computed in this function.\n '
pred = self.disc(data_batch)
loss_ = self.loss_gan(pred, is_real, is_disc, mask=mask)
loss = (dict(real_loss=loss_) if is_real else dict(fake_loss=loss_))
if self.with_disc_shift_loss:
loss_d_shift = self.loss_disc_shift(loss_)
loss.update(loss_disc_shift=(loss_d_shift * 0.5))
return loss
def generator_loss(self, fake_res, fake_img, data_batch):
'Forward function in generator training step.\n\n In this function, we mainly compute the loss items for generator with\n the given (fake_res, fake_img). In general, the `fake_res` is the\n direct output of the generator and the `fake_img` is the composition of\n direct output and ground-truth image.\n\n Args:\n fake_res (torch.Tensor): Direct output of the generator.\n fake_img (torch.Tensor): Composition of `fake_res` and\n ground-truth image.\n data_batch (dict): Contain other elements for computing losses.\n\n Returns:\n tuple(dict): Dict contains the results computed within this\n function for visualization and dict contains the loss items\n computed in this function.\n '
gt = data_batch['gt_img']
mask = data_batch['mask']
masked_img = data_batch['masked_img']
loss = dict()
if self.with_gan:
pred = self.disc(fake_img)
loss_g_fake = self.loss_gan(pred, True, False, mask=mask)
loss['loss_g_fake'] = loss_g_fake
if self.with_l1_valid_loss:
loss_l1_valid = self.loss_l1_valid(fake_res, gt)
loss['loss_l1_valid'] = loss_l1_valid
if self.with_out_percep_loss:
(loss_out_percep, loss_out_style) = self.loss_percep(fake_res, gt)
if (loss_out_percep is not None):
loss['loss_out_percep'] = loss_out_percep
if (loss_out_style is not None):
loss['loss_out_style'] = loss_out_style
res = dict(gt_img=gt.cpu(), masked_img=masked_img.cpu(), fake_res=fake_res.cpu(), fake_img=fake_img.cpu())
return (res, loss)
def forward_test(self, masked_img, mask, save_image=False, save_path=None, iteration=None, **kwargs):
'Forward function for testing.\n\n Args:\n masked_img (torch.Tensor): Tensor with shape of (n, 3, h, w).\n mask (torch.Tensor): Tensor with shape of (n, 1, h, w).\n save_image (bool, optional): If True, results will be saved as\n image. Default: False.\n save_path (str, optional): If given a valid str, the reuslts will\n be saved in this path. Default: None.\n iteration (int, optional): Iteration number. Default: None.\n\n Returns:\n dict: Contain output results and eval metrics (if exist).\n '
masked_img = (masked_img.float() + mask)
input_x = torch.cat([masked_img, mask], dim=1)
fake_res = self.generator(input_x)
fake_img = ((fake_res * mask) + (masked_img * (1.0 - mask)))
output = dict()
eval_results = {}
if self.eval_with_metrics:
gt_img = kwargs['gt_img']
data_dict = dict(gt_img=gt_img, fake_res=fake_res, fake_img=fake_img, mask=None)
for metric_name in self.test_cfg['metrics']:
if (metric_name in ['ssim', 'psnr']):
eval_results[metric_name] = self._eval_metrics[metric_name](tensor2img(fake_img, min_max=((- 1), 1)), tensor2img(gt_img, min_max=((- 1), 1)))
else:
eval_results[metric_name] = self._eval_metrics[metric_name]()(data_dict).item()
output['eval_results'] = eval_results
else:
output['fake_res'] = fake_res
output['fake_img'] = fake_img
output['meta'] = (None if ('meta' not in kwargs) else kwargs['meta'][0])
if save_image:
assert (save_image and (save_path is not None)), 'Save path should been given'
assert (output['meta'] is not None), 'Meta information should be given to save image.'
tmp_filename = output['meta']['gt_img_path']
filestem = Path(tmp_filename).stem
if (iteration is not None):
filename = f'{filestem}_{iteration}.png'
else:
filename = f'{filestem}.png'
mmcv.mkdir_or_exist(save_path)
img_list = ([kwargs['gt_img']] if ('gt_img' in kwargs) else [])
img_list.extend([masked_img, mask.expand_as(masked_img), fake_res, fake_img])
img = torch.cat(img_list, dim=3).cpu()
self.save_visualization(img, osp.join(save_path, filename))
output['save_img_path'] = osp.abspath(osp.join(save_path, filename))
return output
def train_step(self, data_batch, optimizer):
'Train step function.\n\n In this function, the inpaintor will finish the train step following\n the pipeline:\n 1. get fake res/image\n 2. compute reconstruction losses for generator\n 3. compute adversarial loss for discriminator\n 4. optimize generator\n 5. optimize discriminator\n\n Args:\n data_batch (torch.Tensor): Batch of data as input.\n optimizer (dict[torch.optim.Optimizer]): Dict with optimizers for\n generator and discriminator (if exist).\n\n Returns:\n dict: Dict with loss, information for logger, the number of\n samples and results for visualization.\n '
log_vars = {}
gt_img = data_batch['gt_img']
mask = data_batch['mask']
masked_img = data_batch['masked_img']
masked_img = (masked_img.float() + mask)
input_x = torch.cat([masked_img, mask], dim=1)
fake_res = self.generator(input_x)
fake_img = ((gt_img * (1.0 - mask)) + (fake_res * mask))
if (self.train_cfg.disc_step > 0):
set_requires_grad(self.disc, True)
disc_losses_real = self.forward_train_d(gt_img, True, True, mask=mask)
disc_losses_fake = self.forward_train_d(fake_img.detach(), False, True, mask=mask)
disc_losses_ = (disc_losses_real['real_loss'] + disc_losses_fake['fake_loss'])
disc_losses = dict(disc_losses=disc_losses_)
(loss_disc, log_vars_d) = self.parse_losses(disc_losses)
log_vars.update(log_vars_d)
optimizer['disc'].zero_grad()
loss_disc.backward()
optimizer['disc'].step()
self.disc_step_count = ((self.disc_step_count + 1) % self.train_cfg.disc_step)
if (self.disc_step_count != 0):
results = dict(gt_img=gt_img.cpu(), masked_img=masked_img.cpu(), fake_res=fake_res.cpu(), fake_img=fake_img.cpu())
outputs = dict(log_vars=log_vars, num_samples=len(data_batch['gt_img'].data), results=results)
return outputs
if self.with_gan:
set_requires_grad(self.disc, False)
(results, g_losses) = self.generator_loss(fake_res, fake_img, data_batch)
(loss_g, log_vars_g) = self.parse_losses(g_losses)
log_vars.update(log_vars_g)
optimizer['generator'].zero_grad()
loss_g.backward()
optimizer['generator'].step()
outputs = dict(log_vars=log_vars, num_samples=len(data_batch['gt_img'].data), results=results)
return outputs
|
@MODELS.register_module()
class DeepFillv1Inpaintor(TwoStageInpaintor):
def get_module(self, model, module_name):
'Get an inner module from model.\n\n Since we will wrapper DDP for some model, we have to judge whether the\n module can be indexed directly.\n\n Args:\n model (nn.Module): This model may wrapped with DDP or not.\n module_name (str): The name of specific module.\n\n Return:\n nn.Module: Returned sub module.\n '
if isinstance(model, (DataParallel, DistributedDataParallel)):
return getattr(model.module, module_name)
return getattr(model, module_name)
def forward_train_d(self, data_batch, is_real, is_disc):
'Forward function in discriminator training step.\n\n In this function, we modify the default implementation with only one\n discriminator. In DeepFillv1 model, they use two separated\n discriminators for global and local consistency.\n\n Args:\n data (torch.Tensor): Batch of real data or fake data.\n is_real (bool): If True, the gan loss will regard this batch as\n real data. Otherwise, the gan loss will regard this batch as\n fake data.\n is_disc (bool): If True, this function is called in discriminator\n training step. Otherwise, this function is called in generator\n training step. This will help us to compute different types of\n adversarial loss, like LSGAN.\n\n Returns:\n dict: Contains the loss items computed in this function.\n '
(global_pred, local_pred) = self.disc(data_batch)
loss_global = self.loss_gan(global_pred, is_real, is_disc)
loss_local = self.loss_gan(local_pred, is_real, is_disc)
if is_real:
loss = dict(real_loss_global=loss_global, real_loss_local=loss_local)
else:
loss = dict(fake_loss_global=loss_global, fake_loss_local=loss_local)
if self.with_disc_shift_loss:
loss_d_shift_global = self.loss_disc_shift(loss_global)
loss_d_shift_local = self.loss_disc_shift(loss_local)
loss.update(loss_disc_shift_global=(loss_d_shift_global * 0.5))
loss.update(loss_disc_shift_local=(loss_d_shift_local * 0.5))
return loss
def two_stage_loss(self, stage1_data, stage2_data, data_batch):
'Calculate two-stage loss.\n\n Args:\n stage1_data (dict): Contain stage1 results.\n stage2_data (dict): Contain stage2 results.\n data_batch (dict): Contain data needed to calculate loss.\n\n Returns:\n dict: Contain losses with name.\n '
gt = data_batch['gt_img']
mask = data_batch['mask']
masked_img = data_batch['masked_img']
loss = dict()
results = dict(gt_img=gt.cpu(), mask=mask.cpu(), masked_img=masked_img.cpu())
if (self.stage1_loss_type is not None):
fake_res = stage1_data['fake_res']
fake_img = stage1_data['fake_img']
for type_key in self.stage1_loss_type:
tmp_loss = self.calculate_loss_with_type(type_key, fake_res, fake_img, gt, mask, prefix='stage1_')
loss.update(tmp_loss)
results.update(dict(stage1_fake_res=stage1_data['fake_res'].cpu(), stage1_fake_img=stage1_data['fake_img'].cpu()))
if (self.stage2_loss_type is not None):
fake_res = stage2_data['fake_res']
fake_img = stage2_data['fake_img']
fake_local = stage2_data['fake_local']
for type_key in self.stage2_loss_type:
tmp_loss = self.calculate_loss_with_type(type_key, fake_res, fake_img, gt, mask, prefix='stage2_', fake_local=fake_local)
loss.update(tmp_loss)
results.update(dict(stage2_fake_res=stage2_data['fake_res'].cpu(), stage2_fake_img=stage2_data['fake_img'].cpu()))
return (results, loss)
def calculate_loss_with_type(self, loss_type, fake_res, fake_img, gt, mask, prefix='stage1_', fake_local=None):
"Calculate multiple types of losses.\n\n Args:\n loss_type (str): Type of the loss.\n fake_res (torch.Tensor): Direct results from model.\n fake_img (torch.Tensor): Composited results from model.\n gt (torch.Tensor): Ground-truth tensor.\n mask (torch.Tensor): Mask tensor.\n prefix (str, optional): Prefix for loss name.\n Defaults to 'stage1_'.\n fake_local (torch.Tensor, optional): Local results from model.\n Defaults to None.\n\n Returns:\n dict: Contain loss value with its name.\n "
loss_dict = dict()
if (loss_type == 'loss_gan'):
(g_fake_global_pred, g_fake_local_pred) = self.disc((fake_img, fake_local))
loss_g_fake_global = self.loss_gan(g_fake_global_pred, True, is_disc=False)
loss_g_fake_local = self.loss_gan(g_fake_local_pred, True, is_disc=False)
loss_dict[(prefix + 'loss_g_fake')] = (loss_g_fake_global + loss_g_fake_local)
elif ('percep' in loss_type):
(loss_pecep, loss_style) = self.loss_percep(fake_img, gt)
if (loss_pecep is not None):
loss_dict[(prefix + loss_type)] = loss_pecep
if (loss_style is not None):
loss_dict[((prefix + loss_type[:(- 6)]) + 'style')] = loss_style
elif ('tv' in loss_type):
loss_tv = self.loss_tv(fake_img, mask=mask)
loss_dict[(prefix + loss_type)] = loss_tv
elif ('l1' in loss_type):
weight = ((1.0 - mask) if ('valid' in loss_type) else mask)
loss_l1 = getattr(self, loss_type)(fake_res, gt, weight=weight)
loss_dict[(prefix + loss_type)] = loss_l1
else:
raise NotImplementedError(f'Please check your loss type {loss_type} and the config dict in init function. We cannot find the related loss function.')
return loss_dict
def train_step(self, data_batch, optimizer):
'Train step function.\n\n In this function, the inpaintor will finish the train step following\n the pipeline:\n\n 1. get fake res/image\n 2. optimize discriminator (if have)\n 3. optimize generator\n\n If `self.train_cfg.disc_step > 1`, the train step will contain multiple\n iterations for optimizing discriminator with different input data and\n only one iteration for optimizing gerator after `disc_step` iterations\n for discriminator.\n\n Args:\n data_batch (torch.Tensor): Batch of data as input.\n optimizer (dict[torch.optim.Optimizer]): Dict with optimizers for\n generator and discriminator (if have).\n\n Returns:\n dict: Dict with loss, information for logger, the number of samples and results for visualization.\n '
log_vars = {}
gt_img = data_batch['gt_img']
mask = data_batch['mask']
masked_img = data_batch['masked_img']
bbox_tensor = data_batch['mask_bbox']
if self.input_with_ones:
tmp_ones = torch.ones_like(mask)
input_x = torch.cat([masked_img, tmp_ones, mask], dim=1)
else:
input_x = torch.cat([masked_img, mask], dim=1)
(stage1_fake_res, stage2_fake_res) = self.generator(input_x)
stage1_fake_img = ((masked_img * (1.0 - mask)) + (stage1_fake_res * mask))
stage2_fake_img = ((masked_img * (1.0 - mask)) + (stage2_fake_res * mask))
(stage2_fake_local, bbox_new) = extract_around_bbox(stage2_fake_img, bbox_tensor, self.train_cfg.local_size)
gt_local = extract_bbox_patch(bbox_new, gt_img)
fake_gt_local = torch.cat([stage2_fake_local, gt_local], dim=2)
if ((self.train_cfg.disc_step > 0) and self.with_gan):
set_requires_grad(self.disc, True)
fake_data = (stage2_fake_img.detach(), stage2_fake_local.detach())
real_data = (gt_img, gt_local)
disc_losses = self.forward_train_d(fake_data, False, is_disc=True)
(loss_disc, log_vars_d) = self.parse_losses(disc_losses)
log_vars.update(log_vars_d)
optimizer['disc'].zero_grad()
loss_disc.backward()
disc_losses = self.forward_train_d(real_data, True, is_disc=True)
(loss_disc, log_vars_d) = self.parse_losses(disc_losses)
log_vars.update(log_vars_d)
loss_disc.backward()
if self.with_gp_loss:
loss_gp_global = self.loss_gp(self.get_module(self.disc, 'global_disc'), gt_img, stage2_fake_img, mask=mask)
loss_gp_local = self.loss_gp(self.get_module(self.disc, 'local_disc'), gt_local, stage2_fake_local)
(loss_disc, log_vars_d) = self.parse_losses(dict(loss_gp_global=loss_gp_global, loss_gp_local=loss_gp_local))
log_vars.update(log_vars_d)
loss_disc.backward()
optimizer['disc'].step()
self.disc_step_count = ((self.disc_step_count + 1) % self.train_cfg.disc_step)
if (self.disc_step_count != 0):
results = dict(gt_img=gt_img.cpu(), masked_img=masked_img.cpu(), stage1_fake_res=stage1_fake_res.cpu(), stage1_fake_img=stage1_fake_img.cpu(), stage2_fake_res=stage2_fake_res.cpu(), stage2_fake_img=stage2_fake_img.cpu(), fake_gt_local=fake_gt_local.cpu(), fake_res=stage2_fake_res.cpu(), fake_img=stage2_fake_img.cpu())
outputs = dict(log_vars=log_vars, num_samples=len(data_batch['gt_img'].data), results=results)
return outputs
stage1_results = dict(fake_res=stage1_fake_res, fake_img=stage1_fake_img)
stage2_results = dict(fake_res=stage2_fake_res, fake_img=stage2_fake_img, fake_local=stage2_fake_local)
if self.with_gan:
set_requires_grad(self.disc, False)
(results, two_stage_losses) = self.two_stage_loss(stage1_results, stage2_results, data_batch)
(loss_two_stage, log_vars_two_stage) = self.parse_losses(two_stage_losses)
log_vars.update(log_vars_two_stage)
optimizer['generator'].zero_grad()
loss_two_stage.backward()
optimizer['generator'].step()
results['fake_gt_local'] = fake_gt_local.cpu()
outputs = dict(log_vars=log_vars, num_samples=len(data_batch['gt_img'].data), results=results)
return outputs
|
@MODELS.register_module()
class GLInpaintor(OneStageInpaintor):
'Inpaintor for global&local method.\n\n This inpaintor is implemented according to the paper:\n Globally and Locally Consistent Image Completion\n\n Importantly, this inpaintor is an example for using custom training\n schedule based on `OneStageInpaintor`.\n\n The training pipeline of global&local is as following:\n\n .. code-block:: python\n\n if cur_iter < iter_tc:\n update generator with only l1 loss\n else:\n update discriminator\n if cur_iter > iter_td:\n update generator with l1 loss and adversarial loss\n\n The new attribute `cur_iter` is added for recording current number of\n iteration. The `train_cfg` contains the setting of the training schedule:\n\n .. code-block:: python\n\n train_cfg = dict(\n start_iter=0,\n disc_step=1,\n iter_tc=90000,\n iter_td=100000\n )\n\n `iter_tc` and `iter_td` correspond to the notation :math:`T_C` and\n :math:`T_D` of theoriginal paper.\n\n Args:\n generator (dict): Config for encoder-decoder style generator.\n disc (dict): Config for discriminator.\n loss_gan (dict): Config for adversarial loss.\n loss_gp (dict): Config for gradient penalty loss.\n loss_disc_shift (dict): Config for discriminator shift loss.\n loss_composed_percep (dict): Config for perceptural and style loss with\n composed image as input.\n loss_out_percep (dict): Config for perceptural and style loss with\n direct output as input.\n loss_l1_hole (dict): Config for l1 loss in the hole.\n loss_l1_valid (dict): Config for l1 loss in the valid region.\n loss_tv (dict): Config for total variation loss.\n train_cfg (dict): Configs for training scheduler. `disc_step` must be\n contained for indicates the discriminator updating steps in each\n training step.\n test_cfg (dict): Configs for testing scheduler.\n pretrained (str): Path for pretrained model. Default None.\n '
def __init__(self, encdec, disc=None, loss_gan=None, loss_gp=None, loss_disc_shift=None, loss_composed_percep=None, loss_out_percep=False, loss_l1_hole=None, loss_l1_valid=None, loss_tv=None, train_cfg=None, test_cfg=None, pretrained=None):
super().__init__(encdec, disc=disc, loss_gan=loss_gan, loss_gp=loss_gp, loss_disc_shift=loss_disc_shift, loss_composed_percep=loss_composed_percep, loss_out_percep=loss_out_percep, loss_l1_hole=loss_l1_hole, loss_l1_valid=loss_l1_valid, loss_tv=loss_tv, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained)
if (self.train_cfg is not None):
self.cur_iter = self.train_cfg.start_iter
def generator_loss(self, fake_res, fake_img, fake_local, data_batch):
'Forward function in generator training step.\n\n In this function, we mainly compute the loss items for generator with\n the given (fake_res, fake_img). In general, the `fake_res` is the\n direct output of the generator and the `fake_img` is the composition of\n direct output and ground-truth image.\n\n Args:\n fake_res (torch.Tensor): Direct output of the generator.\n fake_img (torch.Tensor): Composition of `fake_res` and\n ground-truth image.\n data_batch (dict): Contain other elements for computing losses.\n\n Returns:\n tuple[dict]: A tuple containing two dictionaries. The first one is the result dict, which contains the results computed within this function for visualization. The second one is the loss dict, containing loss items computed in this function.\n '
gt = data_batch['gt_img']
mask = data_batch['mask']
masked_img = data_batch['masked_img']
loss = dict()
if (self.with_gan and (self.cur_iter > self.train_cfg.iter_td)):
g_fake_pred = self.disc((fake_img, fake_local))
loss_g_fake = self.loss_gan(g_fake_pred, True, False)
loss['loss_g_fake'] = loss_g_fake
if self.with_l1_hole_loss:
loss_l1_hole = self.loss_l1_hole(fake_res, gt, weight=mask)
loss['loss_l1_hole'] = loss_l1_hole
if self.with_l1_valid_loss:
loss_l1_valid = self.loss_l1_valid(fake_res, gt, weight=(1.0 - mask))
loss['loss_l1_valid'] = loss_l1_valid
res = dict(gt_img=gt.cpu(), masked_img=masked_img.cpu(), fake_res=fake_res.cpu(), fake_img=fake_img.cpu())
return (res, loss)
def train_step(self, data_batch, optimizer):
'Train step function.\n\n In this function, the inpaintor will finish the train step following\n the pipeline:\n\n 1. get fake res/image\n 2. optimize discriminator (if in current schedule)\n 3. optimize generator (if in current schedule)\n\n If ``self.train_cfg.disc_step > 1``, the train step will contain\n multiple iterations for optimizing discriminator with different input\n data and sonly one iteration for optimizing generator after `disc_step`\n iterations for discriminator.\n\n Args:\n data_batch (torch.Tensor): Batch of data as input.\n optimizer (dict[torch.optim.Optimizer]): Dict with optimizers for\n generator and discriminator (if have).\n\n Returns:\n dict: Dict with loss, information for logger, the number of samples and results for visualization.\n '
log_vars = {}
gt_img = data_batch['gt_img']
mask = data_batch['mask']
masked_img = data_batch['masked_img']
bbox_tensor = data_batch['mask_bbox']
input_x = torch.cat([masked_img, mask], dim=1)
fake_res = self.generator(input_x)
fake_img = ((gt_img * (1.0 - mask)) + (fake_res * mask))
(fake_local, bbox_new) = extract_around_bbox(fake_img, bbox_tensor, self.train_cfg.local_size)
gt_local = extract_bbox_patch(bbox_new, gt_img)
fake_gt_local = torch.cat([fake_local, gt_local], dim=2)
if ((self.train_cfg.disc_step > 0) and (self.cur_iter > self.train_cfg.iter_tc)):
set_requires_grad(self.disc, True)
fake_data = (fake_img.detach(), fake_local.detach())
real_data = (gt_img, gt_local)
disc_losses = self.forward_train_d(fake_data, False, True)
(loss_disc, log_vars_d) = self.parse_losses(disc_losses)
log_vars.update(log_vars_d)
optimizer['disc'].zero_grad()
loss_disc.backward()
disc_losses = self.forward_train_d(real_data, True, True)
(loss_disc, log_vars_d) = self.parse_losses(disc_losses)
log_vars.update(log_vars_d)
loss_disc.backward()
optimizer['disc'].step()
self.disc_step_count = ((self.disc_step_count + 1) % self.train_cfg.disc_step)
if ((self.disc_step_count != 0) or (self.cur_iter <= self.train_cfg.iter_td)):
results = dict(gt_img=gt_img.cpu(), masked_img=masked_img.cpu(), fake_res=fake_res.cpu(), fake_img=fake_img.cpu(), fake_gt_local=fake_gt_local.cpu())
outputs = dict(log_vars=log_vars, num_samples=len(data_batch['gt_img'].data), results=results)
self.cur_iter += 1
return outputs
set_requires_grad(self.disc, False)
if ((self.cur_iter <= self.train_cfg.iter_tc) or (self.cur_iter > self.train_cfg.iter_td)):
(results, g_losses) = self.generator_loss(fake_res, fake_img, fake_local, data_batch)
(loss_g, log_vars_g) = self.parse_losses(g_losses)
log_vars.update(log_vars_g)
optimizer['generator'].zero_grad()
loss_g.backward()
optimizer['generator'].step()
results.update(fake_gt_local=fake_gt_local.cpu())
outputs = dict(log_vars=log_vars, num_samples=len(data_batch['gt_img'].data), results=results)
self.cur_iter += 1
return outputs
|
@LOSSES.register_module()
class L1CompositionLoss(nn.Module):
"L1 composition loss.\n\n Args:\n loss_weight (float): Loss weight for L1 loss. Default: 1.0.\n reduction (str): Specifies the reduction to apply to the output.\n Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.\n sample_wise (bool): Whether calculate the loss sample-wise. This\n argument only takes effect when `reduction` is 'mean' and `weight`\n (argument of `forward()`) is not None. It will first reduces loss\n with 'mean' per-sample, and then it means over all the samples.\n Default: False.\n "
def __init__(self, loss_weight=1.0, reduction='mean', sample_wise=False):
super().__init__()
if (reduction not in ['none', 'mean', 'sum']):
raise ValueError(f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}')
self.loss_weight = loss_weight
self.reduction = reduction
self.sample_wise = sample_wise
def forward(self, pred_alpha, fg, bg, ori_merged, weight=None, **kwargs):
'\n Args:\n pred_alpha (Tensor): of shape (N, 1, H, W). Predicted alpha matte.\n fg (Tensor): of shape (N, 3, H, W). Tensor of foreground object.\n bg (Tensor): of shape (N, 3, H, W). Tensor of background object.\n ori_merged (Tensor): of shape (N, 3, H, W). Tensor of origin merged\n image before normalized by ImageNet mean and std.\n weight (Tensor, optional): of shape (N, 1, H, W). It is an\n indicating matrix: weight[trimap == 128] = 1. Default: None.\n '
pred_merged = ((pred_alpha * fg) + ((1.0 - pred_alpha) * bg))
if (weight is not None):
weight = weight.expand((- 1), 3, (- 1), (- 1))
return (self.loss_weight * l1_loss(pred_merged, ori_merged, weight, reduction=self.reduction, sample_wise=self.sample_wise))
|
@LOSSES.register_module()
class MSECompositionLoss(nn.Module):
"MSE (L2) composition loss.\n\n Args:\n loss_weight (float): Loss weight for MSE loss. Default: 1.0.\n reduction (str): Specifies the reduction to apply to the output.\n Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.\n sample_wise (bool): Whether calculate the loss sample-wise. This\n argument only takes effect when `reduction` is 'mean' and `weight`\n (argument of `forward()`) is not None. It will first reduces loss\n with 'mean' per-sample, and then it means over all the samples.\n Default: False.\n "
def __init__(self, loss_weight=1.0, reduction='mean', sample_wise=False):
super().__init__()
if (reduction not in ['none', 'mean', 'sum']):
raise ValueError(f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}')
self.loss_weight = loss_weight
self.reduction = reduction
self.sample_wise = sample_wise
def forward(self, pred_alpha, fg, bg, ori_merged, weight=None, **kwargs):
'\n Args:\n pred_alpha (Tensor): of shape (N, 1, H, W). Predicted alpha matte.\n fg (Tensor): of shape (N, 3, H, W). Tensor of foreground object.\n bg (Tensor): of shape (N, 3, H, W). Tensor of background object.\n ori_merged (Tensor): of shape (N, 3, H, W). Tensor of origin merged\n image before normalized by ImageNet mean and std.\n weight (Tensor, optional): of shape (N, 1, H, W). It is an\n indicating matrix: weight[trimap == 128] = 1. Default: None.\n '
pred_merged = ((pred_alpha * fg) + ((1.0 - pred_alpha) * bg))
if (weight is not None):
weight = weight.expand((- 1), 3, (- 1), (- 1))
return (self.loss_weight * mse_loss(pred_merged, ori_merged, weight, reduction=self.reduction, sample_wise=self.sample_wise))
|
@LOSSES.register_module()
class CharbonnierCompLoss(nn.Module):
"Charbonnier composition loss.\n\n Args:\n loss_weight (float): Loss weight for L1 loss. Default: 1.0.\n reduction (str): Specifies the reduction to apply to the output.\n Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.\n sample_wise (bool): Whether calculate the loss sample-wise. This\n argument only takes effect when `reduction` is 'mean' and `weight`\n (argument of `forward()`) is not None. It will first reduces loss\n with 'mean' per-sample, and then it means over all the samples.\n Default: False.\n eps (float): A value used to control the curvature near zero.\n Default: 1e-12.\n "
def __init__(self, loss_weight=1.0, reduction='mean', sample_wise=False, eps=1e-12):
super().__init__()
if (reduction not in ['none', 'mean', 'sum']):
raise ValueError(f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}')
self.loss_weight = loss_weight
self.reduction = reduction
self.sample_wise = sample_wise
self.eps = eps
def forward(self, pred_alpha, fg, bg, ori_merged, weight=None, **kwargs):
'\n Args:\n pred_alpha (Tensor): of shape (N, 1, H, W). Predicted alpha matte.\n fg (Tensor): of shape (N, 3, H, W). Tensor of foreground object.\n bg (Tensor): of shape (N, 3, H, W). Tensor of background object.\n ori_merged (Tensor): of shape (N, 3, H, W). Tensor of origin merged\n image before normalized by ImageNet mean and std.\n weight (Tensor, optional): of shape (N, 1, H, W). It is an\n indicating matrix: weight[trimap == 128] = 1. Default: None.\n '
pred_merged = ((pred_alpha * fg) + ((1.0 - pred_alpha) * bg))
if (weight is not None):
weight = weight.expand((- 1), 3, (- 1), (- 1))
return (self.loss_weight * charbonnier_loss(pred_merged, ori_merged, weight, eps=self.eps, reduction=self.reduction, sample_wise=self.sample_wise))
|
class LightCNNFeature(nn.Module):
'Feature of LightCNN.\n\n It is used to train DICGAN.\n '
def __init__(self) -> None:
super().__init__()
model = LightCNN(3)
self.features = nn.Sequential(*list(model.features.children()))
self.features.requires_grad_(False)
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor.\n\n Returns:\n Tensor: Forward results.\n '
return self.features(x)
def init_weights(self, pretrained=None, strict=True):
'Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n strict (boo, optional): Whether strictly load the pretrained model.\n Defaults to True.\n '
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif (pretrained is not None):
raise TypeError(f'"pretrained" must be a str or None. But received {type(pretrained)}.')
|
@LOSSES.register_module()
class LightCNNFeatureLoss(nn.Module):
"Feature loss of DICGAN, based on LightCNN.\n\n Args:\n pretrained (str): Path for pretrained weights.\n loss_weight (float): Loss weight. Default: 1.0.\n criterion (str): Criterion type. Options are 'l1' and 'mse'.\n Default: 'l1'.\n "
def __init__(self, pretrained, loss_weight=1.0, criterion='l1'):
super().__init__()
self.model = LightCNNFeature()
assert isinstance(pretrained, str), 'Model must be pretrained'
self.model.init_weights(pretrained)
self.model.eval()
self.loss_weight = loss_weight
if (criterion == 'l1'):
self.criterion = torch.nn.L1Loss()
elif (criterion == 'mse'):
self.criterion = torch.nn.MSELoss()
else:
raise ValueError(f"'criterion' should be 'l1' or 'mse', but got {criterion}")
def forward(self, pred, gt):
'Forward function.\n\n Args:\n pred (Tensor): Predicted tensor.\n gt (Tensor): GT tensor.\n\n Returns:\n Tensor: Forward results.\n '
assert (self.model.training is False)
pred_feature = self.model(pred)
gt_feature = self.model(gt).detach()
feature_loss = self.criterion(pred_feature, gt_feature)
return (feature_loss * self.loss_weight)
|
@LOSSES.register_module()
class GANLoss(nn.Module):
"Define GAN loss.\n\n Args:\n gan_type (str): Support 'vanilla', 'lsgan', 'wgan', 'hinge'.\n real_label_val (float): The value for real label. Default: 1.0.\n fake_label_val (float): The value for fake label. Default: 0.0.\n loss_weight (float): Loss weight. Default: 1.0.\n Note that loss_weight is only for generators; and it is always 1.0\n for discriminators.\n "
def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0, loss_weight=1.0):
super().__init__()
self.gan_type = gan_type
self.real_label_val = real_label_val
self.fake_label_val = fake_label_val
self.loss_weight = loss_weight
if (self.gan_type == 'smgan'):
self.gaussian_blur = GaussianBlur()
if (self.gan_type == 'vanilla'):
self.loss = nn.BCEWithLogitsLoss()
elif ((self.gan_type == 'lsgan') or (self.gan_type == 'smgan')):
self.loss = nn.MSELoss()
elif (self.gan_type == 'wgan'):
self.loss = self._wgan_loss
elif (self.gan_type == 'hinge'):
self.loss = nn.ReLU()
else:
raise NotImplementedError(f'GAN type {self.gan_type} is not implemented.')
def _wgan_loss(self, input, target):
'wgan loss.\n\n Args:\n input (Tensor): Input tensor.\n target (bool): Target label.\n\n Returns:\n Tensor: wgan loss.\n '
return ((- input.mean()) if target else input.mean())
def get_target_label(self, input, target_is_real):
'Get target label.\n\n Args:\n input (Tensor): Input tensor.\n target_is_real (bool): Whether the target is real or fake.\n\n Returns:\n (bool | Tensor): Target tensor. Return bool for wgan, otherwise,\n return Tensor.\n '
if (self.gan_type == 'wgan'):
return target_is_real
target_val = (self.real_label_val if target_is_real else self.fake_label_val)
return (input.new_ones(input.size()) * target_val)
def forward(self, input, target_is_real, is_disc=False, mask=None):
'\n Args:\n input (Tensor): The input for the loss module, i.e., the network\n prediction.\n target_is_real (bool): Whether the target is real or fake.\n is_disc (bool): Whether the loss for discriminators or not.\n Default: False.\n\n Returns:\n Tensor: GAN loss value.\n '
target_label = self.get_target_label(input, target_is_real)
if (self.gan_type == 'hinge'):
if is_disc:
input = ((- input) if target_is_real else input)
loss = self.loss((1 + input)).mean()
else:
loss = (- input.mean())
elif (self.gan_type == 'smgan'):
(input_height, input_width) = input.shape[2:]
(mask_height, mask_width) = mask.shape[2:]
if ((input_height != mask_height) or (input_width != mask_width)):
input = F.interpolate(input, size=(mask_height, mask_width), mode='bilinear', align_corners=True)
target_label = self.get_target_label(input, target_is_real)
if is_disc:
if target_is_real:
target_label = target_label
else:
target_label = (self.gaussian_blur(mask).detach().cuda() if mask.is_cuda else self.gaussian_blur(mask).detach().cpu())
loss = self.loss(input, target_label)
else:
loss = ((self.loss(input, target_label) * mask) / mask.mean())
loss = loss.mean()
else:
loss = self.loss(input, target_label)
return (loss if is_disc else (loss * self.loss_weight))
|
@LOSSES.register_module()
class GaussianBlur(nn.Module):
'A Gaussian filter which blurs a given tensor with a two-dimensional\n gaussian kernel by convolving it along each channel. Batch operation\n is supported.\n\n This function is modified from kornia.filters.gaussian:\n `<https://kornia.readthedocs.io/en/latest/_modules/kornia/filters/gaussian.html>`.\n\n Args:\n kernel_size (tuple[int]): The size of the kernel. Default: (71, 71).\n sigma (tuple[float]): The standard deviation of the kernel.\n Default (10.0, 10.0)\n\n Returns:\n Tensor: The Gaussian-blurred tensor.\n\n Shape:\n - input: Tensor with shape of (n, c, h, w)\n - output: Tensor with shape of (n, c, h, w)\n '
def __init__(self, kernel_size=(71, 71), sigma=(10.0, 10.0)):
super(GaussianBlur, self).__init__()
self.kernel_size = kernel_size
self.sigma = sigma
self.padding = self.compute_zero_padding(kernel_size)
self.kernel = self.get_2d_gaussian_kernel(kernel_size, sigma)
@staticmethod
def compute_zero_padding(kernel_size):
'Compute zero padding tuple.'
padding = [((ks - 1) // 2) for ks in kernel_size]
return (padding[0], padding[1])
def get_2d_gaussian_kernel(self, kernel_size, sigma):
'Get the two-dimensional Gaussian filter matrix coefficients.\n\n Args:\n kernel_size (tuple[int]): Kernel filter size in the x and y\n direction. The kernel sizes\n should be odd and positive.\n sigma (tuple[int]): Gaussian standard deviation in\n the x and y direction.\n\n Returns:\n kernel_2d (Tensor): A 2D torch tensor with gaussian filter\n matrix coefficients.\n '
if ((not isinstance(kernel_size, tuple)) or (len(kernel_size) != 2)):
raise TypeError('kernel_size must be a tuple of length two. Got {}'.format(kernel_size))
if ((not isinstance(sigma, tuple)) or (len(sigma) != 2)):
raise TypeError('sigma must be a tuple of length two. Got {}'.format(sigma))
(kernel_size_x, kernel_size_y) = kernel_size
(sigma_x, sigma_y) = sigma
kernel_x = self.get_1d_gaussian_kernel(kernel_size_x, sigma_x)
kernel_y = self.get_1d_gaussian_kernel(kernel_size_y, sigma_y)
kernel_2d = torch.matmul(kernel_x.unsqueeze((- 1)), kernel_y.unsqueeze((- 1)).t())
return kernel_2d
def get_1d_gaussian_kernel(self, kernel_size, sigma):
'Get the Gaussian filter coefficients in one dimension (x or y direction).\n\n Args:\n kernel_size (int): Kernel filter size in x or y direction.\n Should be odd and positive.\n sigma (float): Gaussian standard deviation in x or y direction.\n\n Returns:\n kernel_1d (Tensor): A 1D torch tensor with gaussian filter\n coefficients in x or y direction.\n '
if ((not isinstance(kernel_size, int)) or ((kernel_size % 2) == 0) or (kernel_size <= 0)):
raise TypeError('kernel_size must be an odd positive integer. Got {}'.format(kernel_size))
kernel_1d = self.gaussian(kernel_size, sigma)
return kernel_1d
def gaussian(self, kernel_size, sigma):
def gauss_arg(x):
return ((- ((x - (kernel_size // 2)) ** 2)) / float((2 * (sigma ** 2))))
gauss = torch.stack([torch.exp(torch.tensor(gauss_arg(x))) for x in range(kernel_size)])
return (gauss / gauss.sum())
def forward(self, x):
if (not torch.is_tensor(x)):
raise TypeError('Input x type is not a torch.Tensor. Got {}'.format(type(x)))
if (not (len(x.shape) == 4)):
raise ValueError('Invalid input shape, we expect BxCxHxW. Got: {}'.format(x.shape))
(_, c, _, _) = x.shape
tmp_kernel = self.kernel.to(x.device).to(x.dtype)
kernel = tmp_kernel.repeat(c, 1, 1, 1)
return conv2d(x, kernel, padding=self.padding, stride=1, groups=c)
|
def gradient_penalty_loss(discriminator, real_data, fake_data, mask=None):
'Calculate gradient penalty for wgan-gp.\n\n Args:\n discriminator (nn.Module): Network for the discriminator.\n real_data (Tensor): Real input data.\n fake_data (Tensor): Fake input data.\n mask (Tensor): Masks for inpainting. Default: None.\n\n Returns:\n Tensor: A tensor for gradient penalty.\n '
batch_size = real_data.size(0)
alpha = torch.rand(batch_size, 1, 1, 1).to(real_data)
interpolates = ((alpha * real_data) + ((1.0 - alpha) * fake_data))
interpolates = autograd.Variable(interpolates, requires_grad=True)
disc_interpolates = discriminator(interpolates)
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates, grad_outputs=torch.ones_like(disc_interpolates), create_graph=True, retain_graph=True, only_inputs=True)[0]
if (mask is not None):
gradients = (gradients * mask)
gradients_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
if (mask is not None):
gradients_penalty /= torch.mean(mask)
return gradients_penalty
|
@LOSSES.register_module()
class GradientPenaltyLoss(nn.Module):
'Gradient penalty loss for wgan-gp.\n\n Args:\n loss_weight (float): Loss weight. Default: 1.0.\n '
def __init__(self, loss_weight=1.0):
super().__init__()
self.loss_weight = loss_weight
def forward(self, discriminator, real_data, fake_data, mask=None):
'Forward function.\n\n Args:\n discriminator (nn.Module): Network for the discriminator.\n real_data (Tensor): Real input data.\n fake_data (Tensor): Fake input data.\n mask (Tensor): Masks for inpainting. Default: None.\n\n Returns:\n Tensor: Loss.\n '
loss = gradient_penalty_loss(discriminator, real_data, fake_data, mask=mask)
return (loss * self.loss_weight)
|
@LOSSES.register_module()
class DiscShiftLoss(nn.Module):
'Disc shift loss.\n\n Args:\n loss_weight (float, optional): Loss weight. Defaults to 1.0.\n '
def __init__(self, loss_weight=0.1):
super().__init__()
self.loss_weight = loss_weight
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Tensor with shape (n, c, h, w)\n\n Returns:\n Tensor: Loss.\n '
loss = torch.mean((x ** 2))
return (loss * self.loss_weight)
|
@LOSSES.register_module()
class GradientLoss(nn.Module):
"Gradient loss.\n\n Args:\n loss_weight (float): Loss weight for L1 loss. Default: 1.0.\n reduction (str): Specifies the reduction to apply to the output.\n Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.\n "
def __init__(self, loss_weight=1.0, reduction='mean'):
super().__init__()
self.loss_weight = loss_weight
self.reduction = reduction
if (self.reduction not in ['none', 'mean', 'sum']):
raise ValueError(f'Unsupported reduction mode: {self.reduction}. Supported ones are: {_reduction_modes}')
def forward(self, pred, target, weight=None):
'\n Args:\n pred (Tensor): of shape (N, C, H, W). Predicted tensor.\n target (Tensor): of shape (N, C, H, W). Ground truth tensor.\n weight (Tensor, optional): of shape (N, C, H, W). Element-wise\n weights. Default: None.\n '
kx = torch.Tensor([[1, 0, (- 1)], [2, 0, (- 2)], [1, 0, (- 1)]]).view(1, 1, 3, 3).to(target)
ky = torch.Tensor([[1, 2, 1], [0, 0, 0], [(- 1), (- 2), (- 1)]]).view(1, 1, 3, 3).to(target)
pred_grad_x = F.conv2d(pred, kx, padding=1)
pred_grad_y = F.conv2d(pred, ky, padding=1)
target_grad_x = F.conv2d(target, kx, padding=1)
target_grad_y = F.conv2d(target, ky, padding=1)
loss = (l1_loss(pred_grad_x, target_grad_x, weight, reduction=self.reduction) + l1_loss(pred_grad_y, target_grad_y, weight, reduction=self.reduction))
return (loss * self.loss_weight)
|
class PerceptualVGG(nn.Module):
"VGG network used in calculating perceptual loss.\n\n In this implementation, we allow users to choose whether use normalization\n in the input feature and the type of vgg network. Note that the pretrained\n path must fit the vgg type.\n\n Args:\n layer_name_list (list[str]): According to the name in this list,\n forward function will return the corresponding features. This\n list contains the name each layer in `vgg.feature`. An example\n of this list is ['4', '10'].\n vgg_type (str): Set the type of vgg network. Default: 'vgg19'.\n use_input_norm (bool): If True, normalize the input image.\n Importantly, the input feature must in the range [0, 1].\n Default: True.\n pretrained (str): Path for pretrained weights. Default:\n 'torchvision://vgg19'\n "
def __init__(self, layer_name_list, vgg_type='vgg19', use_input_norm=True, pretrained='torchvision://vgg19'):
super().__init__()
if pretrained.startswith('torchvision://'):
assert (vgg_type in pretrained)
self.layer_name_list = layer_name_list
self.use_input_norm = use_input_norm
_vgg = getattr(vgg, vgg_type)()
self.init_weights(_vgg, pretrained)
num_layers = (max(map(int, layer_name_list)) + 1)
assert (len(_vgg.features) >= num_layers)
self.vgg_layers = _vgg.features[:num_layers]
if self.use_input_norm:
self.register_buffer('mean', torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
self.register_buffer('std', torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
for v in self.vgg_layers.parameters():
v.requires_grad = False
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor with shape (n, c, h, w).\n\n Returns:\n Tensor: Forward results.\n '
if self.use_input_norm:
x = ((x - self.mean) / self.std)
output = {}
for (name, module) in self.vgg_layers.named_children():
x = module(x)
if (name in self.layer_name_list):
output[name] = x.clone()
return output
def init_weights(self, model, pretrained):
'Init weights.\n\n Args:\n model (nn.Module): Models to be inited.\n pretrained (str): Path for pretrained weights.\n '
logger = get_root_logger()
load_checkpoint(model, pretrained, logger=logger)
|
@LOSSES.register_module()
class PerceptualLoss(nn.Module):
"Perceptual loss with commonly used style loss.\n\n Args:\n layers_weights (dict): The weight for each layer of vgg feature for\n perceptual loss. Here is an example: {'4': 1., '9': 1., '18': 1.},\n which means the 5th, 10th and 18th feature layer will be\n extracted with weight 1.0 in calculating losses.\n layers_weights_style (dict): The weight for each layer of vgg feature\n for style loss. If set to 'None', the weights are set equal to\n the weights for perceptual loss. Default: None.\n vgg_type (str): The type of vgg network used as feature extractor.\n Default: 'vgg19'.\n use_input_norm (bool): If True, normalize the input image in vgg.\n Default: True.\n perceptual_weight (float): If `perceptual_weight > 0`, the perceptual\n loss will be calculated and the loss will multiplied by the\n weight. Default: 1.0.\n style_weight (float): If `style_weight > 0`, the style loss will be\n calculated and the loss will multiplied by the weight.\n Default: 1.0.\n norm_img (bool): If True, the image will be normed to [0, 1]. Note that\n this is different from the `use_input_norm` which norm the input in\n in forward function of vgg according to the statistics of dataset.\n Importantly, the input image must be in range [-1, 1].\n pretrained (str): Path for pretrained weights. Default:\n 'torchvision://vgg19'.\n criterion (str): Criterion type. Options are 'l1' and 'mse'.\n Default: 'l1'.\n "
def __init__(self, layer_weights, layer_weights_style=None, vgg_type='vgg19', use_input_norm=True, perceptual_weight=1.0, style_weight=1.0, norm_img=True, pretrained='torchvision://vgg19', criterion='l1'):
super().__init__()
self.norm_img = norm_img
self.perceptual_weight = perceptual_weight
self.style_weight = style_weight
self.layer_weights = layer_weights
self.layer_weights_style = layer_weights_style
self.vgg = PerceptualVGG(layer_name_list=list(self.layer_weights.keys()), vgg_type=vgg_type, use_input_norm=use_input_norm, pretrained=pretrained)
if ((self.layer_weights_style is not None) and (self.layer_weights_style != self.layer_weights)):
self.vgg_style = PerceptualVGG(layer_name_list=list(self.layer_weights_style.keys()), vgg_type=vgg_type, use_input_norm=use_input_norm, pretrained=pretrained)
else:
self.layer_weights_style = self.layer_weights
self.vgg_style = None
criterion = criterion.lower()
if (criterion == 'l1'):
self.criterion = torch.nn.L1Loss()
elif (criterion == 'mse'):
self.criterion = torch.nn.MSELoss()
else:
raise NotImplementedError(f'{criterion} criterion has not been supported in this version.')
def forward(self, x, gt):
'Forward function.\n\n Args:\n x (Tensor): Input tensor with shape (n, c, h, w).\n gt (Tensor): Ground-truth tensor with shape (n, c, h, w).\n\n Returns:\n Tensor: Forward results.\n '
if self.norm_img:
x = ((x + 1.0) * 0.5)
gt = ((gt + 1.0) * 0.5)
x_features = self.vgg(x)
gt_features = self.vgg(gt.detach())
if (self.perceptual_weight > 0):
percep_loss = 0
for k in x_features.keys():
percep_loss += (self.criterion(x_features[k], gt_features[k]) * self.layer_weights[k])
percep_loss *= self.perceptual_weight
else:
percep_loss = None
if (self.style_weight > 0):
if (self.vgg_style is not None):
x_features = self.vgg_style(x)
gt_features = self.vgg_style(gt.detach())
style_loss = 0
for k in x_features.keys():
style_loss += (self.criterion(self._gram_mat(x_features[k]), self._gram_mat(gt_features[k])) * self.layer_weights_style[k])
style_loss *= self.style_weight
else:
style_loss = None
return (percep_loss, style_loss)
def _gram_mat(self, x):
'Calculate Gram matrix.\n\n Args:\n x (torch.Tensor): Tensor with shape of (n, c, h, w).\n\n Returns:\n torch.Tensor: Gram matrix.\n '
(n, c, h, w) = x.size()
features = x.view(n, c, (w * h))
features_t = features.transpose(1, 2)
gram = (features.bmm(features_t) / ((c * h) * w))
return gram
|
@LOSSES.register_module()
class TransferalPerceptualLoss(nn.Module):
"Transferal perceptual loss.\n\n Args:\n loss_weight (float): Loss weight. Default: 1.0.\n use_attention (bool): If True, use soft-attention tensor. Default: True\n criterion (str): Criterion type. Options are 'l1' and 'mse'.\n Default: 'l1'.\n "
def __init__(self, loss_weight=1.0, use_attention=True, criterion='mse'):
super().__init__()
self.use_attention = use_attention
self.loss_weight = loss_weight
criterion = criterion.lower()
if (criterion == 'l1'):
self.loss_function = torch.nn.L1Loss()
elif (criterion == 'mse'):
self.loss_function = torch.nn.MSELoss()
else:
raise ValueError(f"criterion should be 'l1' or 'mse', but got {criterion}")
def forward(self, maps, soft_attention, textures):
'Forward function.\n\n Args:\n maps (Tuple[Tensor]): Input tensors.\n soft_attention (Tensor): Soft-attention tensor.\n textures (Tuple[Tensor]): Ground-truth tensors.\n\n Returns:\n Tensor: Forward results.\n '
if self.use_attention:
(h, w) = soft_attention.shape[(- 2):]
softs = [torch.sigmoid(soft_attention)]
for i in range(1, len(maps)):
softs.append(F.interpolate(soft_attention, size=((h * pow(2, i)), (w * pow(2, i))), mode='bicubic', align_corners=False))
else:
softs = [1.0, 1.0, 1.0]
loss_texture = 0
for (map, soft, texture) in zip(maps, softs, textures):
loss_texture += self.loss_function((map * soft), (texture * soft))
return (loss_texture * self.loss_weight)
|
def reduce_loss(loss, reduction):
'Reduce loss as specified.\n\n Args:\n loss (Tensor): Elementwise loss tensor.\n reduction (str): Options are "none", "mean" and "sum".\n\n Returns:\n Tensor: Reduced loss tensor.\n '
reduction_enum = F._Reduction.get_enum(reduction)
if (reduction_enum == 0):
return loss
if (reduction_enum == 1):
return loss.mean()
return loss.sum()
|
def mask_reduce_loss(loss, weight=None, reduction='mean', sample_wise=False):
'Apply element-wise weight and reduce loss.\n\n Args:\n loss (Tensor): Element-wise loss.\n weight (Tensor): Element-wise weights. Default: None.\n reduction (str): Same as built-in losses of PyTorch. Options are\n "none", "mean" and "sum". Default: \'mean\'.\n sample_wise (bool): Whether calculate the loss sample-wise. This\n argument only takes effect when `reduction` is \'mean\' and `weight`\n (argument of `forward()`) is not None. It will first reduces loss\n with \'mean\' per-sample, and then it means over all the samples.\n Default: False.\n\n Returns:\n Tensor: Processed loss values.\n '
if (weight is not None):
assert (weight.dim() == loss.dim())
assert ((weight.size(1) == 1) or (weight.size(1) == loss.size(1)))
loss = (loss * weight)
if ((weight is None) or (reduction == 'sum')):
loss = reduce_loss(loss, reduction)
elif (reduction == 'mean'):
if (weight.size(1) == 1):
weight = weight.expand_as(loss)
eps = 1e-12
if sample_wise:
weight = weight.sum(dim=[1, 2, 3], keepdim=True)
loss = ((loss / (weight + eps)).sum() / weight.size(0))
else:
loss = (loss.sum() / (weight.sum() + eps))
return loss
|
def masked_loss(loss_func):
"Create a masked version of a given loss function.\n\n To use this decorator, the loss function must have the signature like\n `loss_func(pred, target, **kwargs)`. The function only needs to compute\n element-wise loss without any reduction. This decorator will add weight\n and reduction arguments to the function. The decorated function will have\n the signature like `loss_func(pred, target, weight=None, reduction='mean',\n avg_factor=None, **kwargs)`.\n\n :Example:\n\n >>> import torch\n >>> @masked_loss\n >>> def l1_loss(pred, target):\n >>> return (pred - target).abs()\n\n >>> pred = torch.Tensor([0, 2, 3])\n >>> target = torch.Tensor([1, 1, 1])\n >>> weight = torch.Tensor([1, 0, 1])\n\n >>> l1_loss(pred, target)\n tensor(1.3333)\n >>> l1_loss(pred, target, weight)\n tensor(1.5000)\n >>> l1_loss(pred, target, reduction='none')\n tensor([1., 1., 2.])\n >>> l1_loss(pred, target, weight, reduction='sum')\n tensor(3.)\n "
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', sample_wise=False, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = mask_reduce_loss(loss, weight, reduction, sample_wise)
return loss
return wrapper
|
@MODELS.register_module()
class GCA(BaseMattor):
'Guided Contextual Attention image matting model.\n\n https://arxiv.org/abs/2001.04069\n\n Args:\n backbone (dict): Config of backbone.\n train_cfg (dict): Config of training. In ``train_cfg``,\n ``train_backbone`` should be specified. If the model has a refiner,\n ``train_refiner`` should be specified.\n test_cfg (dict): Config of testing. In ``test_cfg``, If the model has a\n refiner, ``train_refiner`` should be specified.\n pretrained (str): Path of the pretrained model.\n loss_alpha (dict): Config of the alpha prediction loss. Default: None.\n '
def __init__(self, backbone, train_cfg=None, test_cfg=None, pretrained=None, loss_alpha=None):
super().__init__(backbone, None, train_cfg, test_cfg, pretrained)
self.loss_alpha = build_loss(loss_alpha)
self.fp16_enabled = False
@auto_fp16(apply_to=('x',))
def _forward(self, x):
raw_alpha = self.backbone(x)
pred_alpha = ((raw_alpha.tanh() + 1.0) / 2.0)
return pred_alpha
def forward_dummy(self, inputs):
return self._forward(inputs)
def forward_train(self, merged, trimap, meta, alpha):
"Forward function for training GCA model.\n\n Args:\n merged (Tensor): with shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n trimap (Tensor): with shape (N, C', H, W). Tensor of trimap. C'\n might be 1 or 3.\n meta (list[dict]): Meta data about the current data batch.\n alpha (Tensor): with shape (N, 1, H, W). Tensor of alpha.\n\n Returns:\n dict: Contains the loss items and batch information.\n "
pred_alpha = self._forward(torch.cat((merged, trimap), 1))
weight = get_unknown_tensor(trimap, meta)
losses = {'loss': self.loss_alpha(pred_alpha, alpha, weight)}
return {'losses': losses, 'num_samples': merged.size(0)}
def forward_test(self, merged, trimap, meta, save_image=False, save_path=None, iteration=None):
'Defines the computation performed at every test call.\n\n Args:\n merged (Tensor): Image to predict alpha matte.\n trimap (Tensor): Trimap of the input image.\n meta (list[dict]): Meta data about the current data batch.\n Currently only batch_size 1 is supported. It may contain\n information needed to calculate metrics (``ori_alpha`` and\n ``ori_trimap``) or save predicted alpha matte\n (``merged_path``).\n save_image (bool, optional): Whether save predicted alpha matte.\n Defaults to False.\n save_path (str, optional): The directory to save predicted alpha\n matte. Defaults to None.\n iteration (int, optional): If given as None, the saved alpha matte\n will have the same file name with ``merged_path`` in meta dict.\n If given as an int, the saved alpha matte would named with\n postfix ``_{iteration}.png``. Defaults to None.\n\n Returns:\n dict: Contains the predicted alpha and evaluation result.\n '
pred_alpha = self._forward(torch.cat((merged, trimap), 1))
pred_alpha = pred_alpha.detach().cpu().numpy().squeeze()
pred_alpha = self.restore_shape(pred_alpha, meta)
eval_result = self.evaluate(pred_alpha, meta)
if save_image:
self.save_image(pred_alpha, meta, save_path, iteration)
return {'pred_alpha': pred_alpha, 'eval_result': eval_result}
|
@MODELS.register_module()
class IndexNet(BaseMattor):
"IndexNet matting model.\n\n This implementation follows:\n Indices Matter: Learning to Index for Deep Image Matting\n\n Args:\n backbone (dict): Config of backbone.\n train_cfg (dict): Config of training. In 'train_cfg', 'train_backbone'\n should be specified.\n test_cfg (dict): Config of testing.\n pretrained (str): path of pretrained model.\n loss_alpha (dict): Config of the alpha prediction loss. Default: None.\n loss_comp (dict): Config of the composition loss. Default: None.\n "
def __init__(self, backbone, train_cfg=None, test_cfg=None, pretrained=None, loss_alpha=None, loss_comp=None):
super().__init__(backbone, None, train_cfg, test_cfg, pretrained)
self.loss_alpha = (build_loss(loss_alpha) if (loss_alpha is not None) else None)
self.loss_comp = (build_loss(loss_comp) if (loss_comp is not None) else None)
self.fp16_enabled = False
def forward_dummy(self, inputs):
return self.backbone(inputs)
@auto_fp16(apply_to=('merged', 'trimap'))
def forward_train(self, merged, trimap, meta, alpha, ori_merged, fg, bg):
'Forward function for training IndexNet model.\n\n Args:\n merged (Tensor): Input images tensor with shape (N, C, H, W).\n Typically these should be mean centered and std scaled.\n trimap (Tensor): Tensor of trimap with shape (N, 1, H, W).\n meta (list[dict]): Meta data about the current data batch.\n alpha (Tensor): Tensor of alpha with shape (N, 1, H, W).\n ori_merged (Tensor): Tensor of origin merged images (not\n normalized) with shape (N, C, H, W).\n fg (Tensor): Tensor of foreground with shape (N, C, H, W).\n bg (Tensor): Tensor of background with shape (N, C, H, W).\n\n Returns:\n dict: Contains the loss items and batch information.\n '
pred_alpha = self.backbone(torch.cat((merged, trimap), 1))
losses = dict()
weight = get_unknown_tensor(trimap, meta)
if (self.loss_alpha is not None):
losses['loss_alpha'] = self.loss_alpha(pred_alpha, alpha, weight)
if (self.loss_comp is not None):
losses['loss_comp'] = self.loss_comp(pred_alpha, fg, bg, ori_merged, weight)
return {'losses': losses, 'num_samples': merged.size(0)}
def forward_test(self, merged, trimap, meta, save_image=False, save_path=None, iteration=None):
'Defines the computation performed at every test call.\n\n Args:\n merged (Tensor): Image to predict alpha matte.\n trimap (Tensor): Trimap of the input image.\n meta (list[dict]): Meta data about the current data batch.\n Currently only batch_size 1 is supported. It may contain\n information needed to calculate metrics (``ori_alpha`` and\n ``ori_trimap``) or save predicted alpha matte\n (``merged_path``).\n save_image (bool, optional): Whether save predicted alpha matte.\n Defaults to False.\n save_path (str, optional): The directory to save predicted alpha\n matte. Defaults to None.\n iteration (int, optional): If given as None, the saved alpha matte\n will have the same file name with ``merged_path`` in meta dict.\n If given as an int, the saved alpha matte would named with\n postfix ``_{iteration}.png``. Defaults to None.\n\n Returns:\n dict: Contains the predicted alpha and evaluation result.\n '
pred_alpha = self.backbone(torch.cat((merged, trimap), 1))
pred_alpha = pred_alpha.cpu().numpy().squeeze()
pred_alpha = self.restore_shape(pred_alpha, meta)
eval_result = self.evaluate(pred_alpha, meta)
if save_image:
self.save_image(pred_alpha, meta, save_path, iteration)
return {'pred_alpha': pred_alpha, 'eval_result': eval_result}
|
@MODELS.register_module()
class BasicRestorer(BaseModel):
'Basic model for image restoration.\n\n It must contain a generator that takes an image as inputs and outputs a\n restored image. It also has a pixel-wise loss for training.\n\n The subclasses should overwrite the function `forward_train`,\n `forward_test` and `train_step`.\n\n Args:\n generator (dict): Config for the generator structure.\n pixel_loss (dict): Config for pixel-wise loss.\n train_cfg (dict): Config for training. Default: None.\n test_cfg (dict): Config for testing. Default: None.\n pretrained (str): Path for pretrained model. Default: None.\n '
allowed_metrics = {'PSNR': psnr, 'SSIM': ssim}
def __init__(self, generator, pixel_loss, train_cfg=None, test_cfg=None, pretrained=None):
super().__init__()
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.fp16_enabled = False
self.generator = build_backbone(generator)
self.init_weights(pretrained)
self.pixel_loss = build_loss(pixel_loss)
def init_weights(self, pretrained=None):
'Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n '
self.generator.init_weights(pretrained)
@auto_fp16(apply_to=('lq',))
def forward(self, lq, gt=None, test_mode=False, **kwargs):
'Forward function.\n\n Args:\n lq (Tensor): Input lq images.\n gt (Tensor): Ground-truth image. Default: None.\n test_mode (bool): Whether in test mode or not. Default: False.\n kwargs (dict): Other arguments.\n '
if test_mode:
return self.forward_test(lq, gt, **kwargs)
return self.forward_train(lq, gt)
def forward_train(self, lq, gt):
'Training forward function.\n\n Args:\n lq (Tensor): LQ Tensor with shape (n, c, h, w).\n gt (Tensor): GT Tensor with shape (n, c, h, w).\n\n Returns:\n Tensor: Output tensor.\n '
losses = dict()
output = self.generator(lq)
loss_pix = self.pixel_loss(output, gt)
losses['loss_pix'] = loss_pix
outputs = dict(losses=losses, num_samples=len(gt.data), results=dict(lq=lq.cpu(), gt=gt.cpu(), output=output.cpu()))
return outputs
def evaluate(self, output, gt):
'Evaluation function.\n\n Args:\n output (Tensor): Model output with shape (n, c, h, w).\n gt (Tensor): GT Tensor with shape (n, c, h, w).\n\n Returns:\n dict: Evaluation results.\n '
crop_border = self.test_cfg.crop_border
output = tensor2img(output)
gt = tensor2img(gt)
eval_result = dict()
for metric in self.test_cfg.metrics:
eval_result[metric] = self.allowed_metrics[metric](output, gt, crop_border)
return eval_result
def forward_test(self, lq, gt=None, meta=None, save_image=False, save_path=None, iteration=None):
'Testing forward function.\n\n Args:\n lq (Tensor): LQ Tensor with shape (n, c, h, w).\n gt (Tensor): GT Tensor with shape (n, c, h, w). Default: None.\n save_image (bool): Whether to save image. Default: False.\n save_path (str): Path to save image. Default: None.\n iteration (int): Iteration for the saving image name.\n Default: None.\n\n Returns:\n dict: Output results.\n '
output = self.generator(lq)
if ((self.test_cfg is not None) and self.test_cfg.get('metrics', None)):
assert (gt is not None), 'evaluation with metrics must have gt images.'
results = dict(eval_result=self.evaluate(output, gt))
else:
results = dict(lq=lq.cpu(), output=output.cpu())
if (gt is not None):
results['gt'] = gt.cpu()
if save_image:
lq_path = meta[0]['lq_path']
folder_name = osp.splitext(osp.basename(lq_path))[0]
if isinstance(iteration, numbers.Number):
save_path = osp.join(save_path, folder_name, f'{folder_name}-{(iteration + 1):06d}.png')
elif (iteration is None):
save_path = osp.join(save_path, f'{folder_name}.png')
else:
raise ValueError(f'iteration should be number or None, but got {type(iteration)}')
mmcv.imwrite(tensor2img(output), save_path)
return results
def forward_dummy(self, img):
'Used for computing network FLOPs.\n\n Args:\n img (Tensor): Input image.\n\n Returns:\n Tensor: Output image.\n '
out = self.generator(img)
return out
def train_step(self, data_batch, optimizer):
'Train step.\n\n Args:\n data_batch (dict): A batch of data.\n optimizer (obj): Optimizer.\n\n Returns:\n dict: Returned output.\n '
outputs = self(**data_batch, test_mode=False)
(loss, log_vars) = self.parse_losses(outputs.pop('losses'))
optimizer['generator'].zero_grad()
loss.backward()
optimizer['generator'].step()
outputs.update({'log_vars': log_vars})
return outputs
def val_step(self, data_batch, **kwargs):
'Validation step.\n\n Args:\n data_batch (dict): A batch of data.\n kwargs (dict): Other arguments for ``val_step``.\n\n Returns:\n dict: Returned output.\n '
output = self.forward_test(**data_batch, **kwargs)
return output
|
@MODELS.register_module()
class DIC(BasicRestorer):
'DIC model for Face Super-Resolution.\n\n Paper: Deep Face Super-Resolution with Iterative Collaboration between\n Attentive Recovery and Landmark Estimation.\n\n Args:\n generator (dict): Config for the generator.\n pixel_loss (dict): Config for the pixel loss.\n align_loss (dict): Config for the align loss.\n discriminator (dict): Config for the discriminator. Default: None.\n gan_loss (dict): Config for the gan loss. Default: None.\n feature_loss (dict): Config for the feature loss. Default: None.\n train_cfg (dict): Config for train. Default: None.\n test_cfg (dict): Config for testing. Default: None.\n pretrained (str): Path for pretrained model. Default: None.\n '
def __init__(self, generator, pixel_loss, align_loss, discriminator=None, gan_loss=None, feature_loss=None, train_cfg=None, test_cfg=None, pretrained=None):
super(BasicRestorer, self).__init__()
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.generator = build_backbone(generator)
self.img_denormalize = ImgNormalize(pixel_range=1, img_mean=(0.509, 0.424, 0.378), img_std=(1.0, 1.0, 1.0), sign=1)
self.pixel_loss = build_loss(pixel_loss)
self.align_loss = build_loss(align_loss)
self.feature_loss = (build_loss(feature_loss) if feature_loss else None)
if (discriminator and gan_loss):
self.discriminator = build_component(discriminator)
self.gan_loss = build_loss(gan_loss)
else:
self.discriminator = None
self.gan_loss = None
self.init_weights(pretrained)
self.register_buffer('step_counter', torch.zeros(1))
self.fix_iter = (train_cfg.get('fix_iter', 0) if train_cfg else 0)
self.disc_steps = (train_cfg.get('disc_steps', 1) if train_cfg else 1)
def forward(self, lq, gt=None, test_mode=False, **kwargs):
'Forward function.\n\n Args:\n lq (Tensor): Input lq images.\n gt (Tensor): Ground-truth image. Default: None.\n test_mode (bool): Whether in test mode or not. Default: False.\n kwargs (dict): Other arguments.\n '
if test_mode:
return self.forward_test(lq, gt=gt, **kwargs)
return self.generator.forward(lq)
def train_step(self, data_batch, optimizer):
"Train step.\n\n Args:\n data_batch (dict): A batch of data, which requires\n 'lq', 'gt'\n optimizer (obj): Optimizer.\n\n Returns:\n dict: Returned output, which includes:\n log_vars, num_samples, results (lq, gt and pred).\n\n "
lq = data_batch['lq']
gt = data_batch['gt']
gt_heatmap = data_batch['heatmap']
(sr_list, heatmap_list) = self(**data_batch, test_mode=False)
pred = sr_list[(- 1)]
losses = dict()
log_vars = dict()
set_requires_grad(self.discriminator, False)
loss_pix = 0.0
loss_align = 0.0
for (step, (sr, heatmap)) in enumerate(zip(sr_list, heatmap_list)):
losses[f'loss_pixel_v{step}'] = self.pixel_loss(sr, gt)
loss_pix += losses[f'loss_pixel_v{step}']
losses[f'loss_align_v{step}'] = self.pixel_loss(heatmap, gt_heatmap)
loss_align += losses[f'loss_align_v{step}']
if (self.step_counter >= self.fix_iter):
if self.feature_loss:
loss_feature = self.feature_loss(pred, gt)
losses['loss_feature'] = loss_feature
if self.gan_loss:
fake_g_pred = self.discriminator(pred)
losses['loss_gan'] = self.gan_loss(fake_g_pred, target_is_real=True, is_disc=False)
(loss_g, log_vars_g) = self.parse_losses(losses)
log_vars.update(log_vars_g)
optimizer['generator'].zero_grad()
loss_g.backward()
optimizer['generator'].step()
if (self.discriminator and (self.step_counter >= self.fix_iter)):
set_requires_grad(self.discriminator, True)
for _ in range(self.disc_steps):
real_d_pred = self.discriminator(gt)
loss_d_real = self.gan_loss(real_d_pred, target_is_real=True, is_disc=True)
(loss_d, log_vars_d) = self.parse_losses(dict(loss_d_real=loss_d_real))
optimizer['discriminator'].zero_grad()
loss_d.backward()
log_vars.update(log_vars_d)
fake_d_pred = self.discriminator(pred.detach())
loss_d_fake = self.gan_loss(fake_d_pred, target_is_real=False, is_disc=True)
(loss_d, log_vars_d) = self.parse_losses(dict(loss_d_fake=loss_d_fake))
loss_d.backward()
log_vars.update(log_vars_d)
optimizer['discriminator'].step()
log_vars.pop('loss')
outputs = dict(log_vars=log_vars, num_samples=len(gt.data), results=dict(lq=lq.cpu(), gt=gt.cpu(), output=pred.cpu()))
self.step_counter += 1
return outputs
def forward_test(self, lq, gt=None, meta=None, save_image=False, save_path=None, iteration=None):
"Testing forward function.\n\n Args:\n lq (Tensor): LQ image.\n gt (Tensor): GT image.\n meta (list[dict]): Meta data, such as path of GT file.\n Default: None.\n save_image (bool): Whether to save image. Default: False.\n save_path (str): Path to save image. Default: None.\n iteration (int): Iteration for the saving image name.\n Default: None.\n\n Returns:\n dict: Output results, which contain either key(s)\n 1. 'eval_result'.\n 2. 'lq', 'pred'.\n 3. 'lq', 'pred', 'gt'.\n "
with torch.no_grad():
(sr_list, _) = self.generator.forward(lq)
pred = sr_list[(- 1)]
pred = self.img_denormalize(pred)
if (gt is not None):
gt = self.img_denormalize(gt)
if ((self.test_cfg is not None) and self.test_cfg.get('metrics', None)):
assert (gt is not None), 'evaluation with metrics must have gt images.'
results = dict(eval_result=self.evaluate(pred, gt))
else:
results = dict(lq=lq.cpu(), output=pred.cpu())
if (gt is not None):
results['gt'] = gt.cpu()
if save_image:
if ('gt_path' in meta[0]):
pred_path = meta[0]['gt_path']
else:
pred_path = meta[0]['lq_path']
folder_name = osp.splitext(osp.basename(pred_path))[0]
if isinstance(iteration, numbers.Number):
save_path = osp.join(save_path, folder_name, f'{folder_name}-{(iteration + 1):06d}.png')
elif (iteration is None):
save_path = osp.join(save_path, f'{folder_name}.png')
else:
raise ValueError(f'iteration should be number or None, but got {type(iteration)}')
mmcv.imwrite(tensor2img(pred), save_path)
return results
def init_weights(self, pretrained=None, strict=True):
'Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n strict (boo, optional): Whether strictly load the pretrained model.\n Defaults to True.\n '
if isinstance(pretrained, str):
self.generator.init_weights(pretrained, strict)
if self.discriminator:
self.discriminator.init_weights(pretrained, strict)
elif (pretrained is not None):
raise TypeError(f'"pretrained" must be a str or None. But received {type(pretrained)}.')
|
@MODELS.register_module()
class EDVR(BasicRestorer):
'EDVR model for video super-resolution.\n\n EDVR: Video Restoration with Enhanced Deformable Convolutional Networks.\n\n Args:\n generator (dict): Config for the generator structure.\n pixel_loss (dict): Config for pixel-wise loss.\n train_cfg (dict): Config for training. Default: None.\n test_cfg (dict): Config for testing. Default: None.\n pretrained (str): Path for pretrained model. Default: None.\n '
def __init__(self, generator, pixel_loss, train_cfg=None, test_cfg=None, pretrained=None):
super().__init__(generator, pixel_loss, train_cfg, test_cfg, pretrained)
self.with_tsa = generator.get('with_tsa', False)
self.step_counter = 0
def train_step(self, data_batch, optimizer):
'Train step.\n\n Args:\n data_batch (dict): A batch of data.\n optimizer (obj): Optimizer.\n\n Returns:\n dict: Returned output.\n '
if ((self.step_counter == 0) and self.with_tsa):
if ((self.train_cfg is None) or ((self.train_cfg is not None) and ('tsa_iter' not in self.train_cfg))):
raise KeyError('In TSA mode, train_cfg must contain "tsa_iter".')
for (k, v) in self.generator.named_parameters():
if ('fusion' not in k):
v.requires_grad = False
if (self.with_tsa and (self.step_counter == self.train_cfg.tsa_iter)):
for v in self.generator.parameters():
v.requires_grad = True
outputs = self(**data_batch, test_mode=False)
(loss, log_vars) = self.parse_losses(outputs.pop('losses'))
optimizer['generator'].zero_grad()
loss.backward()
optimizer['generator'].step()
self.step_counter += 1
outputs.update({'log_vars': log_vars})
return outputs
def forward_dummy(self, imgs):
'Used for computing network FLOPs.\n\n Args:\n imgs (Tensor): Input images.\n\n Returns:\n Tensor: Restored image.\n '
out = self.generator(imgs)
return out
def forward_test(self, lq, gt=None, meta=None, save_image=False, save_path=None, iteration=None):
'Testing forward function.\n\n Args:\n lq (Tensor): LQ Tensor with shape (n, c, h, w).\n gt (Tensor): GT Tensor with shape (n, c, h, w). Default: None.\n save_image (bool): Whether to save image. Default: False.\n save_path (str): Path to save image. Default: None.\n iteration (int): Iteration for the saving image name.\n Default: None.\n\n Returns:\n dict: Output results.\n '
output = self.generator(lq)
if ((self.test_cfg is not None) and self.test_cfg.get('metrics', None)):
assert (gt is not None), 'evaluation with metrics must have gt images.'
results = dict(eval_result=self.evaluate(output, gt))
else:
results = dict(lq=lq.cpu(), output=output.cpu())
if (gt is not None):
results['gt'] = gt.cpu()
if save_image:
gt_path = meta[0]['gt_path'][0]
folder_name = meta[0]['key'].split('/')[0]
frame_name = osp.splitext(osp.basename(gt_path))[0]
if isinstance(iteration, numbers.Number):
save_path = osp.join(save_path, folder_name, f'{frame_name}-{(iteration + 1):06d}.png')
elif (iteration is None):
save_path = osp.join(save_path, folder_name, f'{frame_name}.png')
else:
raise ValueError(f'iteration should be number or None, but got {type(iteration)}')
mmcv.imwrite(tensor2img(output), save_path)
return results
|
@MODELS.register_module()
class ESRGAN(SRGAN):
'Enhanced SRGAN model for single image super-resolution.\n\n Ref:\n ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks.\n It uses RaGAN for GAN updates:\n The relativistic discriminator: a key element missing from standard GAN.\n\n Args:\n generator (dict): Config for the generator.\n discriminator (dict): Config for the discriminator. Default: None.\n gan_loss (dict): Config for the gan loss.\n Note that the loss weight in gan loss is only for the generator.\n pixel_loss (dict): Config for the pixel loss. Default: None.\n perceptual_loss (dict): Config for the perceptual loss. Default: None.\n train_cfg (dict): Config for training. Default: None.\n You may change the training of gan by setting:\n `disc_steps`: how many discriminator updates after one generate\n update;\n `disc_init_steps`: how many discriminator updates at the start of\n the training.\n These two keys are useful when training with WGAN.\n test_cfg (dict): Config for testing. Default: None.\n pretrained (str): Path for pretrained model. Default: None.\n '
def train_step(self, data_batch, optimizer):
'Train step.\n\n Args:\n data_batch (dict): A batch of data.\n optimizer (obj): Optimizer.\n\n Returns:\n dict: Returned output.\n '
lq = data_batch['lq']
gt = data_batch['gt']
fake_g_output = self.generator(lq)
losses = dict()
log_vars = dict()
set_requires_grad(self.discriminator, False)
if (((self.step_counter % self.disc_steps) == 0) and (self.step_counter >= self.disc_init_steps)):
if self.pixel_loss:
losses['loss_pix'] = self.pixel_loss(fake_g_output, gt)
if self.perceptual_loss:
(loss_percep, loss_style) = self.perceptual_loss(fake_g_output, gt)
if (loss_percep is not None):
losses['loss_perceptual'] = loss_percep
if (loss_style is not None):
losses['loss_style'] = loss_style
real_d_pred = self.discriminator(gt).detach()
fake_g_pred = self.discriminator(fake_g_output)
loss_gan_fake = self.gan_loss((fake_g_pred - torch.mean(real_d_pred)), target_is_real=True, is_disc=False)
loss_gan_real = self.gan_loss((real_d_pred - torch.mean(fake_g_pred)), target_is_real=False, is_disc=False)
losses['loss_gan'] = ((loss_gan_fake + loss_gan_real) / 2)
(loss_g, log_vars_g) = self.parse_losses(losses)
log_vars.update(log_vars_g)
optimizer['generator'].zero_grad()
loss_g.backward()
optimizer['generator'].step()
set_requires_grad(self.discriminator, True)
fake_d_pred = self.discriminator(fake_g_output).detach()
real_d_pred = self.discriminator(gt)
loss_d_real = (self.gan_loss((real_d_pred - torch.mean(fake_d_pred)), target_is_real=True, is_disc=True) * 0.5)
(loss_d, log_vars_d) = self.parse_losses(dict(loss_d_real=loss_d_real))
optimizer['discriminator'].zero_grad()
loss_d.backward()
log_vars.update(log_vars_d)
fake_d_pred = self.discriminator(fake_g_output.detach())
loss_d_fake = (self.gan_loss((fake_d_pred - torch.mean(real_d_pred.detach())), target_is_real=False, is_disc=True) * 0.5)
(loss_d, log_vars_d) = self.parse_losses(dict(loss_d_fake=loss_d_fake))
loss_d.backward()
log_vars.update(log_vars_d)
optimizer['discriminator'].step()
self.step_counter += 1
log_vars.pop('loss')
outputs = dict(log_vars=log_vars, num_samples=len(gt.data), results=dict(lq=lq.cpu(), gt=gt.cpu(), output=fake_g_output.cpu()))
return outputs
|
@MODELS.register_module()
class GLEAN(SRGAN):
'GLEAN model for single image super-resolution.\n\n This model is identical to SRGAN except that the output images are\n transformed from [-1, 1] to [0, 1].\n\n Paper:\n GLEAN: Generative Latent Bank for Large-Factor Image Super-Resolution.\n CVPR, 2021.\n\n '
def init_weights(self, pretrained=None):
'Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n '
self.generator.init_weights(pretrained=pretrained)
def forward_test(self, lq, gt=None, meta=None, save_image=False, save_path=None, iteration=None):
'Testing forward function.\n\n Args:\n lq (Tensor): LQ Tensor with shape (n, c, h, w).\n gt (Tensor): GT Tensor with shape (n, c, h, w). Default: None.\n save_image (bool): Whether to save image. Default: False.\n save_path (str): Path to save image. Default: None.\n iteration (int): Iteration for the saving image name.\n Default: None.\n\n Returns:\n dict: Output results.\n '
output = self.generator(lq)
output = ((output + 1) / 2.0)
if ((self.test_cfg is not None) and self.test_cfg.get('metrics', None)):
assert (gt is not None), 'evaluation with metrics must have gt images.'
gt = ((gt + 1) / 2.0)
results = dict(eval_result=self.evaluate(output, gt))
else:
results = dict(lq=lq.cpu(), output=output.cpu())
if (gt is not None):
results['gt'] = gt.cpu()
if save_image:
lq_path = meta[0]['lq_path']
folder_name = osp.splitext(osp.basename(lq_path))[0]
if isinstance(iteration, numbers.Number):
save_path = osp.join(save_path, folder_name, f'{folder_name}-{(iteration + 1):06d}.png')
elif (iteration is None):
save_path = osp.join(save_path, f'{folder_name}.png')
else:
raise ValueError(f'iteration should be number or None, but got {type(iteration)}')
mmcv.imwrite(tensor2img(output), save_path)
return results
|
@MODELS.register_module()
class LIIF(BasicRestorer):
'LIIF model for single image super-resolution.\n\n Paper: Learning Continuous Image Representation with\n Local Implicit Image Function\n\n Args:\n generator (dict): Config for the generator.\n pixel_loss (dict): Config for the pixel loss.\n rgb_mean (tuple[float]): Data mean.\n Default: (0.5, 0.5, 0.5).\n rgb_std (tuple[float]): Data std.\n Default: (0.5, 0.5, 0.5).\n train_cfg (dict): Config for train. Default: None.\n test_cfg (dict): Config for testing. Default: None.\n pretrained (str): Path for pretrained model. Default: None.\n '
def __init__(self, generator, pixel_loss, rgb_mean=(0.5, 0.5, 0.5), rgb_std=(0.5, 0.5, 0.5), train_cfg=None, test_cfg=None, pretrained=None):
super().__init__(generator, pixel_loss, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained)
rgb_mean = torch.FloatTensor(rgb_mean)
rgb_std = torch.FloatTensor(rgb_std)
self.lq_mean = rgb_mean.view(1, (- 1), 1, 1)
self.lq_std = rgb_std.view(1, (- 1), 1, 1)
self.gt_mean = rgb_mean.view(1, 1, (- 1))
self.gt_std = rgb_std.view(1, 1, (- 1))
def train_step(self, data_batch, optimizer):
"Train step.\n\n Args:\n data_batch (dict): A batch of data, which requires\n 'coord', 'lq', 'gt', 'cell'\n optimizer (obj): Optimizer.\n\n Returns:\n dict: Returned output, which includes:\n log_vars, num_samples, results (lq, gt and pred).\n\n "
coord = data_batch['coord']
cell = data_batch['cell']
lq = data_batch['lq']
gt = data_batch['gt']
self.lq_mean = self.lq_mean.to(lq)
self.lq_std = self.lq_std.to(lq)
self.gt_mean = self.gt_mean.to(gt)
self.gt_std = self.gt_std.to(gt)
lq = ((lq - self.lq_mean) / self.lq_std)
gt = ((gt - self.gt_mean) / self.gt_std)
pred = self.generator(lq, coord, cell)
losses = dict()
log_vars = dict()
losses['loss_pix'] = self.pixel_loss(pred, gt)
(loss, log_vars) = self.parse_losses(losses)
optimizer.zero_grad()
loss.backward()
optimizer.step()
log_vars.pop('loss')
outputs = dict(log_vars=log_vars, num_samples=len(gt.data), results=dict(lq=lq.cpu(), gt=gt.cpu(), output=pred.cpu()))
return outputs
def forward_test(self, lq, gt, coord, cell, meta=None, save_image=False, save_path=None, iteration=None):
"Testing forward function.\n\n Args:\n lq (Tensor): LQ image.\n gt (Tensor): GT image.\n coord (Tensor): Coord tensor.\n cell (Tensor): Cell tensor.\n meta (list[dict]): Meta data, such as path of GT file.\n Default: None.\n save_image (bool): Whether to save image. Default: False.\n save_path (str): Path to save image. Default: None.\n iteration (int): Iteration for the saving image name.\n Default: None.\n\n Returns:\n dict: Output results, which contain either key(s)\n 1. 'eval_result'.\n 2. 'lq', 'pred'.\n 3. 'lq', 'pred', 'gt'.\n "
self.lq_mean = self.lq_mean.to(lq)
self.lq_std = self.lq_std.to(lq)
lq = ((lq - self.lq_mean) / self.lq_std)
with torch.no_grad():
pred = self.generator(lq, coord, cell, test_mode=True)
self.gt_mean = self.gt_mean.to(pred)
self.gt_std = self.gt_std.to(pred)
pred = ((pred * self.gt_std) + self.gt_mean)
pred.clamp_(0, 1)
(ih, iw) = lq.shape[(- 2):]
s = math.sqrt((coord.shape[1] / (ih * iw)))
shape = [lq.shape[0], round((ih * s)), round((iw * s)), 3]
pred = pred.view(*shape).permute(0, 3, 1, 2).contiguous()
if (gt is not None):
gt = gt.view(*shape).permute(0, 3, 1, 2).contiguous()
if ((self.test_cfg is not None) and self.test_cfg.get('metrics', None)):
assert (gt is not None), 'evaluation with metrics must have gt images.'
results = dict(eval_result=self.evaluate(pred, gt))
else:
results = dict(lq=lq.cpu(), output=pred.cpu())
if (gt is not None):
results['gt'] = gt.cpu()
if save_image:
gt_path = meta[0]['gt_path']
folder_name = osp.splitext(osp.basename(gt_path))[0]
if isinstance(iteration, numbers.Number):
save_path = osp.join(save_path, folder_name, f'{folder_name}-{(iteration + 1):06d}.png')
elif (iteration is None):
save_path = osp.join(save_path, f'{folder_name}.png')
else:
raise ValueError(f'iteration should be number or None, but got {type(iteration)}')
mmcv.imwrite(tensor2img(pred), save_path)
return results
def init_weights(self, pretrained=None, strict=True):
'Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n strict (boo, optional): Whether strictly load the pretrained model.\n Defaults to True.\n '
self.generator.init_weights(pretrained, strict)
|
@MODELS.register_module()
class MFQEv2Restorer(BasicRestorer):
'MFQEv2 model for video quality enhancement.\n\n Args:\n generator (dict): Config for the generator structure.\n pixel_loss (dict): Config for pixel-wise loss.\n train_cfg (dict): Config for training. Default: None.\n test_cfg (dict): Config for testing. Default: None.\n pretrained (str): Path for pretrained model. Default: None.\n '
def __init__(self, generator, pixel_loss, train_cfg=None, test_cfg=None, pretrained=None):
super().__init__(generator, pixel_loss, train_cfg, test_cfg, pretrained)
self.fix_spynet_iter = (train_cfg.get('fix_spynet_iter', 0) if train_cfg else 0)
self.is_weight_fixed = False
self.register_buffer('step_counter', torch.zeros(1))
def train_step(self, data_batch, optimizer):
'Train step.\n\n Args:\n data_batch (dict): A batch of data.\n optimizer (obj): Optimizer.\n\n Returns:\n dict: Returned output.\n '
if (self.step_counter < self.fix_spynet_iter):
if (not self.is_weight_fixed):
self.is_weight_fixed = True
for (k, v) in self.generator.named_parameters():
if ('spynet' in k):
v.requires_grad_(False)
elif (self.step_counter == self.fix_spynet_iter):
self.generator.requires_grad_(True)
outputs = self(**data_batch, test_mode=False)
(loss, log_vars) = self.parse_losses(outputs.pop('losses'))
optimizer['generator'].zero_grad()
loss.backward()
optimizer['generator'].step()
self.step_counter += 1
outputs.update({'log_vars': log_vars})
return outputs
def forward_test(self, lq, gt=None, meta=None, save_image=False, save_path=None, iteration=None):
'Testing forward function.\n\n Args:\n lq (Tensor): LQ Tensor with shape (n, c, h, w).\n gt (Tensor): GT Tensor with shape (n, c, h, w). Default: None.\n save_image (bool): Whether to save image. Default: False.\n save_path (str): Path to save image. Default: None.\n iteration (int): Iteration for the saving image name.\n Default: None.\n\n Returns:\n dict: Output results.\n '
output = self.generator(lq)
if ((self.test_cfg is not None) and self.test_cfg.get('metrics', None)):
assert (gt is not None), 'evaluation with metrics must have gt images.'
results = dict(eval_result=self.evaluate(output, gt))
else:
results = dict(lq=lq.cpu(), output=output.cpu())
if (gt is not None):
results['gt'] = gt.cpu()
if save_image:
gt_path = meta[0]['gt_path'][0]
folder_name = meta[0]['key'].split('/')[0]
frame_name = osp.splitext(osp.basename(gt_path))[0]
if isinstance(iteration, numbers.Number):
save_path = osp.join(save_path, folder_name, f'{frame_name}-{(iteration + 1):06d}.png')
elif (iteration is None):
save_path = osp.join(save_path, folder_name, f'{frame_name}.png')
else:
raise ValueError(f'iteration should be number or None, but got {type(iteration)}')
mmcv.imwrite(tensor2img(output), save_path)
return results
|
@MODELS.register_module()
class RealBasicVSR(RealESRGAN):
'RealBasicVSR model for real-world video super-resolution.\n\n Ref:\n Investigating Tradeoffs in Real-World Video Super-Resolution, arXiv\n\n Args:\n generator (dict): Config for the generator.\n discriminator (dict, optional): Config for the discriminator.\n Default: None.\n gan_loss (dict, optional): Config for the gan loss.\n Note that the loss weight in gan loss is only for the generator.\n pixel_loss (dict, optional): Config for the pixel loss. Default: None.\n cleaning_loss (dict, optional): Config for the image cleaning loss.\n Default: None.\n perceptual_loss (dict, optional): Config for the perceptual loss.\n Default: None.\n is_use_sharpened_gt_in_pixel (bool, optional): Whether to use the image\n sharpened by unsharp masking as the GT for pixel loss.\n Default: False.\n is_use_sharpened_gt_in_percep (bool, optional): Whether to use the\n image sharpened by unsharp masking as the GT for perceptual loss.\n Default: False.\n is_use_sharpened_gt_in_gan (bool, optional): Whether to use the\n image sharpened by unsharp masking as the GT for adversarial loss.\n Default: False.\n is_use_ema (bool, optional): When to apply exponential moving average\n on the network weights. Default: True.\n train_cfg (dict): Config for training. Default: None.\n You may change the training of gan by setting:\n `disc_steps`: how many discriminator updates after one generate\n update;\n `disc_init_steps`: how many discriminator updates at the start of\n the training.\n These two keys are useful when training with WGAN.\n test_cfg (dict): Config for testing. Default: None.\n pretrained (str): Path for pretrained model. Default: None.\n '
def __init__(self, generator, discriminator=None, gan_loss=None, pixel_loss=None, cleaning_loss=None, perceptual_loss=None, is_use_sharpened_gt_in_pixel=False, is_use_sharpened_gt_in_percep=False, is_use_sharpened_gt_in_gan=False, is_use_ema=True, train_cfg=None, test_cfg=None, pretrained=None):
super().__init__(generator, discriminator, gan_loss, pixel_loss, perceptual_loss, is_use_sharpened_gt_in_pixel, is_use_sharpened_gt_in_percep, is_use_sharpened_gt_in_gan, is_use_ema, train_cfg, test_cfg, pretrained)
self.cleaning_loss = (build_loss(cleaning_loss) if cleaning_loss else None)
def train_step(self, data_batch, optimizer):
'Train step.\n\n Args:\n data_batch (dict): A batch of data.\n optimizer (obj): Optimizer.\n\n Returns:\n dict: Returned output.\n '
if ((self.step_counter == self.start_iter) and (self.generator_ema is not None)):
if is_module_wrapper(self.generator):
self.generator.module.load_state_dict(self.generator_ema.module.state_dict())
else:
self.generator.load_state_dict(self.generator_ema.state_dict())
lq = data_batch['lq']
gt = data_batch['gt']
(gt_pixel, gt_percep, gt_gan) = (gt.clone(), gt.clone(), gt.clone())
if self.is_use_sharpened_gt_in_pixel:
gt_pixel = data_batch['gt_unsharp']
if self.is_use_sharpened_gt_in_percep:
gt_percep = data_batch['gt_unsharp']
if self.is_use_sharpened_gt_in_gan:
gt_gan = data_batch['gt_unsharp']
if self.cleaning_loss:
(n, t, c, h, w) = gt.size()
gt_clean = gt_pixel.view((- 1), c, h, w)
gt_clean = F.interpolate(gt_clean, scale_factor=0.25, mode='area')
gt_clean = gt_clean.view(n, t, c, (h // 4), (w // 4))
(fake_g_output, fake_g_lq) = self.generator(lq, return_lqs=True)
losses = dict()
log_vars = dict()
(c, h, w) = gt.shape[2:]
gt_pixel = gt_pixel.view((- 1), c, h, w)
gt_percep = gt_percep.view((- 1), c, h, w)
gt_gan = gt_gan.view((- 1), c, h, w)
fake_g_output = fake_g_output.view((- 1), c, h, w)
if self.gan_loss:
set_requires_grad(self.discriminator, False)
if (((self.step_counter % self.disc_steps) == 0) and (self.step_counter >= self.disc_init_steps)):
if self.pixel_loss:
losses['loss_pix'] = self.pixel_loss(fake_g_output, gt_pixel)
if self.cleaning_loss:
losses['loss_clean'] = self.cleaning_loss(fake_g_lq, gt_clean)
if self.perceptual_loss:
(loss_percep, loss_style) = self.perceptual_loss(fake_g_output, gt_percep)
if (loss_percep is not None):
losses['loss_perceptual'] = loss_percep
if (loss_style is not None):
losses['loss_style'] = loss_style
if self.gan_loss:
fake_g_pred = self.discriminator(fake_g_output)
losses['loss_gan'] = self.gan_loss(fake_g_pred, target_is_real=True, is_disc=False)
(loss_g, log_vars_g) = self.parse_losses(losses)
log_vars.update(log_vars_g)
optimizer['generator'].zero_grad()
loss_g.backward()
optimizer['generator'].step()
if self.gan_loss:
set_requires_grad(self.discriminator, True)
real_d_pred = self.discriminator(gt_gan)
loss_d_real = self.gan_loss(real_d_pred, target_is_real=True, is_disc=True)
(loss_d, log_vars_d) = self.parse_losses(dict(loss_d_real=loss_d_real))
optimizer['discriminator'].zero_grad()
loss_d.backward()
log_vars.update(log_vars_d)
fake_d_pred = self.discriminator(fake_g_output.detach())
loss_d_fake = self.gan_loss(fake_d_pred, target_is_real=False, is_disc=True)
(loss_d, log_vars_d) = self.parse_losses(dict(loss_d_fake=loss_d_fake))
loss_d.backward()
log_vars.update(log_vars_d)
optimizer['discriminator'].step()
self.step_counter += 1
log_vars.pop('loss')
outputs = dict(log_vars=log_vars, num_samples=len(gt.data), results=dict(lq=lq.cpu(), gt=gt.cpu(), output=fake_g_output.cpu()))
return outputs
|
@MODELS.register_module()
class RealESRGAN(SRGAN):
'Real-ESRGAN model for single image super-resolution.\n\n Ref:\n Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure\n Synthetic Data, 2021.\n\n Args:\n generator (dict): Config for the generator.\n discriminator (dict, optional): Config for the discriminator.\n Default: None.\n gan_loss (dict, optional): Config for the gan loss.\n Note that the loss weight in gan loss is only for the generator.\n pixel_loss (dict, optional): Config for the pixel loss. Default: None.\n perceptual_loss (dict, optional): Config for the perceptual loss.\n Default: None.\n is_use_sharpened_gt_in_pixel (bool, optional): Whether to use the image\n sharpened by unsharp masking as the GT for pixel loss.\n Default: False.\n is_use_sharpened_gt_in_percep (bool, optional): Whether to use the\n image sharpened by unsharp masking as the GT for perceptual loss.\n Default: False.\n is_use_sharpened_gt_in_gan (bool, optional): Whether to use the\n image sharpened by unsharp masking as the GT for adversarial loss.\n Default: False.\n is_use_ema (bool, optional): When to apply exponential moving average\n on the network weights. Default: True.\n train_cfg (dict): Config for training. Default: None.\n You may change the training of gan by setting:\n `disc_steps`: how many discriminator updates after one generate\n update;\n `disc_init_steps`: how many discriminator updates at the start of\n the training.\n These two keys are useful when training with WGAN.\n test_cfg (dict): Config for testing. Default: None.\n pretrained (str): Path for pretrained model. Default: None.\n '
def __init__(self, generator, discriminator=None, gan_loss=None, pixel_loss=None, perceptual_loss=None, is_use_sharpened_gt_in_pixel=False, is_use_sharpened_gt_in_percep=False, is_use_sharpened_gt_in_gan=False, is_use_ema=True, train_cfg=None, test_cfg=None, pretrained=None):
super().__init__(generator, discriminator, gan_loss, pixel_loss, perceptual_loss, train_cfg, test_cfg, pretrained)
self.is_use_sharpened_gt_in_pixel = is_use_sharpened_gt_in_pixel
self.is_use_sharpened_gt_in_percep = is_use_sharpened_gt_in_percep
self.is_use_sharpened_gt_in_gan = is_use_sharpened_gt_in_gan
self.is_use_ema = is_use_ema
if is_use_ema:
self.generator_ema = deepcopy(self.generator)
else:
self.generator_ema = None
del self.step_counter
self.register_buffer('step_counter', torch.zeros(1))
if (train_cfg is not None):
self.start_iter = train_cfg.get('start_iter', (- 1))
else:
self.start_iter = (- 1)
def train_step(self, data_batch, optimizer):
'Train step.\n\n Args:\n data_batch (dict): A batch of data.\n optimizer (obj): Optimizer.\n\n Returns:\n dict: Returned output.\n '
if ((self.step_counter == self.start_iter) and (self.generator_ema is not None)):
if is_module_wrapper(self.generator):
self.generator.module.load_state_dict(self.generator_ema.module.state_dict())
else:
self.generator.load_state_dict(self.generator_ema.state_dict())
lq = data_batch['lq']
gt = data_batch['gt']
(gt_pixel, gt_percep, gt_gan) = (gt.clone(), gt.clone(), gt.clone())
if self.is_use_sharpened_gt_in_pixel:
gt_pixel = data_batch['gt_unsharp']
if self.is_use_sharpened_gt_in_percep:
gt_percep = data_batch['gt_unsharp']
if self.is_use_sharpened_gt_in_gan:
gt_gan = data_batch['gt_unsharp']
fake_g_output = self.generator(lq)
losses = dict()
log_vars = dict()
if self.gan_loss:
set_requires_grad(self.discriminator, False)
if (((self.step_counter % self.disc_steps) == 0) and (self.step_counter >= self.disc_init_steps)):
if self.pixel_loss:
losses['loss_pix'] = self.pixel_loss(fake_g_output, gt_pixel)
if self.perceptual_loss:
(loss_percep, loss_style) = self.perceptual_loss(fake_g_output, gt_percep)
if (loss_percep is not None):
losses['loss_perceptual'] = loss_percep
if (loss_style is not None):
losses['loss_style'] = loss_style
if self.gan_loss:
fake_g_pred = self.discriminator(fake_g_output)
losses['loss_gan'] = self.gan_loss(fake_g_pred, target_is_real=True, is_disc=False)
(loss_g, log_vars_g) = self.parse_losses(losses)
log_vars.update(log_vars_g)
optimizer['generator'].zero_grad()
loss_g.backward()
optimizer['generator'].step()
if self.gan_loss:
set_requires_grad(self.discriminator, True)
real_d_pred = self.discriminator(gt_gan)
loss_d_real = self.gan_loss(real_d_pred, target_is_real=True, is_disc=True)
(loss_d, log_vars_d) = self.parse_losses(dict(loss_d_real=loss_d_real))
optimizer['discriminator'].zero_grad()
loss_d.backward()
log_vars.update(log_vars_d)
fake_d_pred = self.discriminator(fake_g_output.detach())
loss_d_fake = self.gan_loss(fake_d_pred, target_is_real=False, is_disc=True)
(loss_d, log_vars_d) = self.parse_losses(dict(loss_d_fake=loss_d_fake))
loss_d.backward()
log_vars.update(log_vars_d)
optimizer['discriminator'].step()
self.step_counter += 1
log_vars.pop('loss')
outputs = dict(log_vars=log_vars, num_samples=len(gt.data), results=dict(lq=lq.cpu(), gt=gt.cpu(), output=fake_g_output.cpu()))
return outputs
def forward_test(self, lq, gt=None, meta=None, save_image=False, save_path=None, iteration=None):
'Testing forward function.\n\n Args:\n lq (Tensor): LQ Tensor with shape (n, c, h, w).\n gt (Tensor): GT Tensor with shape (n, c, h, w). Default: None.\n save_image (bool): Whether to save image. Default: False.\n save_path (str): Path to save image. Default: None.\n iteration (int): Iteration for the saving image name.\n Default: None.\n\n Returns:\n dict: Output results.\n '
_model = (self.generator_ema if self.is_use_ema else self.generator)
output = _model(lq)
if ((self.test_cfg is not None) and self.test_cfg.get('metrics', None) and (gt is not None)):
results = dict(eval_result=self.evaluate(output, gt))
else:
results = dict(lq=lq.cpu(), output=output.cpu())
if save_image:
lq_path = meta[0]['lq_path']
folder_name = osp.splitext(osp.basename(lq_path))[0]
if isinstance(iteration, numbers.Number):
save_path = osp.join(save_path, folder_name, f'{folder_name}-{(iteration + 1):06d}.png')
elif (iteration is None):
save_path = osp.join(save_path, f'{folder_name}.png')
else:
raise ValueError(f'iteration should be number or None, but got {type(iteration)}')
mmcv.imwrite(tensor2img(output), save_path)
return results
|
@MODELS.register_module()
class SRGAN(BasicRestorer):
'SRGAN model for single image super-resolution.\n\n Ref:\n Photo-Realistic Single Image Super-Resolution Using a Generative\n Adversarial Network.\n\n Args:\n generator (dict): Config for the generator.\n discriminator (dict): Config for the discriminator. Default: None.\n gan_loss (dict): Config for the gan loss.\n Note that the loss weight in gan loss is only for the generator.\n pixel_loss (dict): Config for the pixel loss. Default: None.\n perceptual_loss (dict): Config for the perceptual loss. Default: None.\n train_cfg (dict): Config for training. Default: None.\n You may change the training of gan by setting:\n `disc_steps`: how many discriminator updates after one generate\n update;\n `disc_init_steps`: how many discriminator updates at the start of\n the training.\n These two keys are useful when training with WGAN.\n test_cfg (dict): Config for testing. Default: None.\n pretrained (str): Path for pretrained model. Default: None.\n '
def __init__(self, generator, discriminator=None, gan_loss=None, pixel_loss=None, perceptual_loss=None, train_cfg=None, test_cfg=None, pretrained=None):
super(BasicRestorer, self).__init__()
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.generator = build_backbone(generator)
self.discriminator = (build_component(discriminator) if discriminator else None)
self.fp16_enabled = False
self.gan_loss = (build_loss(gan_loss) if gan_loss else None)
self.pixel_loss = (build_loss(pixel_loss) if pixel_loss else None)
self.perceptual_loss = (build_loss(perceptual_loss) if perceptual_loss else None)
self.disc_steps = (1 if (self.train_cfg is None) else self.train_cfg.get('disc_steps', 1))
self.disc_init_steps = (0 if (self.train_cfg is None) else self.train_cfg.get('disc_init_steps', 0))
self.step_counter = 0
self.init_weights(pretrained)
def init_weights(self, pretrained=None):
'Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n '
self.generator.init_weights(pretrained=pretrained)
if self.discriminator:
self.discriminator.init_weights(pretrained=pretrained)
@auto_fp16(apply_to=('lq',))
def forward(self, lq, gt=None, test_mode=False, **kwargs):
'Forward function.\n\n Args:\n lq (Tensor): Input lq images.\n gt (Tensor): Ground-truth image. Default: None.\n test_mode (bool): Whether in test mode or not. Default: False.\n kwargs (dict): Other arguments.\n '
if test_mode:
return self.forward_test(lq, gt, **kwargs)
raise ValueError('SRGAN model does not support `forward_train` function.')
def train_step(self, data_batch, optimizer):
'Train step.\n\n Args:\n data_batch (dict): A batch of data.\n optimizer (obj): Optimizer.\n\n Returns:\n dict: Returned output.\n '
lq = data_batch['lq']
gt = data_batch['gt']
fake_g_output = self.generator(lq)
losses = dict()
log_vars = dict()
set_requires_grad(self.discriminator, False)
if (((self.step_counter % self.disc_steps) == 0) and (self.step_counter >= self.disc_init_steps)):
if self.pixel_loss:
losses['loss_pix'] = self.pixel_loss(fake_g_output, gt)
if self.perceptual_loss:
(loss_percep, loss_style) = self.perceptual_loss(fake_g_output, gt)
if (loss_percep is not None):
losses['loss_perceptual'] = loss_percep
if (loss_style is not None):
losses['loss_style'] = loss_style
fake_g_pred = self.discriminator(fake_g_output)
losses['loss_gan'] = self.gan_loss(fake_g_pred, target_is_real=True, is_disc=False)
(loss_g, log_vars_g) = self.parse_losses(losses)
log_vars.update(log_vars_g)
optimizer['generator'].zero_grad()
loss_g.backward()
optimizer['generator'].step()
set_requires_grad(self.discriminator, True)
real_d_pred = self.discriminator(gt)
loss_d_real = self.gan_loss(real_d_pred, target_is_real=True, is_disc=True)
(loss_d, log_vars_d) = self.parse_losses(dict(loss_d_real=loss_d_real))
optimizer['discriminator'].zero_grad()
loss_d.backward()
log_vars.update(log_vars_d)
fake_d_pred = self.discriminator(fake_g_output.detach())
loss_d_fake = self.gan_loss(fake_d_pred, target_is_real=False, is_disc=True)
(loss_d, log_vars_d) = self.parse_losses(dict(loss_d_fake=loss_d_fake))
loss_d.backward()
log_vars.update(log_vars_d)
optimizer['discriminator'].step()
self.step_counter += 1
log_vars.pop('loss')
outputs = dict(log_vars=log_vars, num_samples=len(gt.data), results=dict(lq=lq.cpu(), gt=gt.cpu(), output=fake_g_output.cpu()))
return outputs
|
@MODELS.register_module()
class STDF(BasicRestorer):
'STDF model for video restoration.\n\n It must contain a generator that takes an image as inputs and outputs a\n restored image. It also has a pixel-wise loss for training.\n\n The subclasses should overwrite the function `forward_train`,\n `forward_test` and `train_step`.\n\n Args:\n generator (dict): Config for the generator structure.\n pixel_loss (dict): Config for pixel-wise loss.\n train_cfg (dict): Config for training. Default: None.\n test_cfg (dict): Config for testing. Default: None.\n pretrained (str): Path for pretrained model. Default: None.\n '
allowed_metrics = {'PSNR': psnr, 'SSIM': ssim}
def __init__(self, generator, pixel_loss, train_cfg=None, test_cfg=None, pretrained=None):
super().__init__(generator, pixel_loss, train_cfg, test_cfg, pretrained)
def forward_test(self, lq, gt=None, meta=None, save_image=False, save_path=None, iteration=None):
'Testing forward function.\n\n Args:\n lq (Tensor): LQ Tensor with shape (n, c, h, w).\n gt (Tensor): GT Tensor with shape (n, c, h, w). Default: None.\n save_image (bool): Whether to save image. Default: False.\n save_path (str): Path to save image. Default: None.\n iteration (int): Iteration for the saving image name.\n Default: None.\n\n Returns:\n dict: Output results.\n '
output = self.generator(lq)
if ((self.test_cfg is not None) and self.test_cfg.get('metrics', None)):
assert (gt is not None), 'evaluation with metrics must have gt images.'
results = dict(eval_result=self.evaluate(output, gt))
else:
results = dict(lq=lq.cpu(), output=output.cpu())
if (gt is not None):
results['gt'] = gt.cpu()
if save_image:
gt_path = meta[0]['gt_path'][0]
folder_name = meta[0]['key'].split('/')[0]
frame_name = osp.splitext(osp.basename(gt_path))[0]
if isinstance(iteration, numbers.Number):
save_path = osp.join(save_path, folder_name, f'{frame_name}-{(iteration + 1):06d}.png')
elif (iteration is None):
save_path = osp.join(save_path, folder_name, f'{frame_name}.png')
else:
raise ValueError(f'iteration should be number or None, but got {type(iteration)}')
mmcv.imwrite(tensor2img(output), save_path)
return results
|
@MODELS.register_module()
class TTSR(BasicRestorer):
'TTSR model for Reference-based Image Super-Resolution.\n\n Paper: Learning Texture Transformer Network for Image Super-Resolution.\n\n Args:\n generator (dict): Config for the generator.\n extractor (dict): Config for the extractor.\n transformer (dict): Config for the transformer.\n pixel_loss (dict): Config for the pixel loss.\n discriminator (dict): Config for the discriminator. Default: None.\n perceptual_loss (dict): Config for the perceptual loss. Default: None.\n transferal_perceptual_loss (dict): Config for the transferal perceptual\n loss. Default: None.\n gan_loss (dict): Config for the GAN loss. Default: None\n train_cfg (dict): Config for train. Default: None.\n test_cfg (dict): Config for testing. Default: None.\n pretrained (str): Path for pretrained model. Default: None.\n '
def __init__(self, generator, extractor, transformer, pixel_loss, discriminator=None, perceptual_loss=None, transferal_perceptual_loss=None, gan_loss=None, train_cfg=None, test_cfg=None, pretrained=None):
super(BasicRestorer, self).__init__()
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.generator = build_backbone(generator)
self.transformer = build_component(transformer)
self.extractor = build_component(extractor)
if (discriminator and gan_loss):
self.discriminator = build_component(discriminator)
self.gan_loss = build_loss(gan_loss)
else:
self.discriminator = None
self.gan_loss = None
self.pixel_loss = build_loss(pixel_loss)
self.perceptual_loss = (build_loss(perceptual_loss) if perceptual_loss else None)
if transferal_perceptual_loss:
self.transferal_perceptual_loss = build_loss(transferal_perceptual_loss)
else:
self.transferal_perceptual_loss = None
self.init_weights(pretrained)
self.register_buffer('step_counter', torch.zeros(1))
self.fix_iter = (train_cfg.get('fix_iter', 0) if train_cfg else 0)
self.disc_steps = (train_cfg.get('disc_steps', 1) if train_cfg else 1)
def forward_dummy(self, lq, lq_up, ref, ref_downup, only_pred=True):
'Forward of networks.\n\n Args:\n lq (Tensor): LQ image.\n lq_up (Tensor): Upsampled LQ image.\n ref (Tensor): Reference image.\n ref_downup (Tensor): Image generated by sequentially applying\n bicubic down-sampling and up-sampling on reference image.\n only_pred (bool): Only return predicted results or not.\n Default: True.\n\n Returns:\n pred (Tensor): Predicted super-resolution results (n, 3, 4h, 4w).\n soft_attention (Tensor): Soft-Attention tensor with shape\n (n, 1, h, w).\n textures (Tuple[Tensor]): Transferred GT textures.\n [(N, C, H, W), (N, C/2, 2H, 2W), ...]\n '
(lq_up, _, _) = self.extractor(lq_up)
(ref_downup, _, _) = self.extractor(ref_downup)
refs = self.extractor(ref)
(soft_attention, textures) = self.transformer(lq_up, ref_downup, refs)
pred = self.generator(lq, soft_attention, textures)
if only_pred:
return pred
return (pred, soft_attention, textures)
def forward(self, lq, gt=None, test_mode=False, **kwargs):
'Forward function.\n\n Args:\n lq (Tensor): Input lq images.\n gt (Tensor): Ground-truth image. Default: None.\n test_mode (bool): Whether in test mode or not. Default: False.\n kwargs (dict): Other arguments.\n '
if test_mode:
return self.forward_test(lq, gt=gt, **kwargs)
return self.forward_dummy(lq, **kwargs)
def train_step(self, data_batch, optimizer):
"Train step.\n\n Args:\n data_batch (dict): A batch of data, which requires\n 'lq', 'gt', 'lq_up', 'ref', 'ref_downup'\n optimizer (obj): Optimizer.\n\n Returns:\n dict: Returned output, which includes:\n log_vars, num_samples, results (lq, gt and pred).\n\n "
lq = data_batch['lq']
lq_up = data_batch['lq_up']
gt = data_batch['gt']
ref = data_batch['ref']
ref_downup = data_batch['ref_downup']
(pred, soft_attention, textures) = self(lq, lq_up=lq_up, ref=ref, ref_downup=ref_downup, only_pred=False)
losses = dict()
log_vars = dict()
set_requires_grad(self.discriminator, False)
losses['loss_pix'] = self.pixel_loss(pred, gt)
if (self.step_counter >= self.fix_iter):
if self.perceptual_loss:
(loss_percep, loss_style) = self.perceptual_loss(pred, gt)
if (loss_percep is not None):
losses['loss_perceptual'] = loss_percep
if (loss_style is not None):
losses['loss_style'] = loss_style
if self.transferal_perceptual_loss:
set_requires_grad(self.extractor, False)
sr_textures = self.extractor(((pred + 1.0) / 2.0))
losses['loss_transferal'] = self.transferal_perceptual_loss(sr_textures, soft_attention, textures)
if self.gan_loss:
fake_g_pred = self.discriminator(pred)
losses['loss_gan'] = self.gan_loss(fake_g_pred, target_is_real=True, is_disc=False)
(loss_g, log_vars_g) = self.parse_losses(losses)
log_vars.update(log_vars_g)
optimizer['generator'].zero_grad()
loss_g.backward()
optimizer['generator'].step()
if (self.discriminator and (self.step_counter >= self.fix_iter)):
set_requires_grad(self.discriminator, True)
for _ in range(self.disc_steps):
real_d_pred = self.discriminator(gt)
loss_d_real = self.gan_loss(real_d_pred, target_is_real=True, is_disc=True)
(loss_d, log_vars_d) = self.parse_losses(dict(loss_d_real=loss_d_real))
optimizer['discriminator'].zero_grad()
loss_d.backward()
log_vars.update(log_vars_d)
fake_d_pred = self.discriminator(pred.detach())
loss_d_fake = self.gan_loss(fake_d_pred, target_is_real=False, is_disc=True)
(loss_d, log_vars_d) = self.parse_losses(dict(loss_d_fake=loss_d_fake))
loss_d.backward()
log_vars.update(log_vars_d)
optimizer['discriminator'].step()
log_vars.pop('loss')
outputs = dict(log_vars=log_vars, num_samples=len(gt.data), results=dict(lq=lq.cpu(), gt=gt.cpu(), ref=ref.cpu(), output=pred.cpu()))
self.step_counter += 1
return outputs
def forward_test(self, lq, lq_up, ref, ref_downup, gt=None, meta=None, save_image=False, save_path=None, iteration=None):
"Testing forward function.\n\n Args:\n lq (Tensor): LQ image\n gt (Tensor): GT image\n lq_up (Tensor): Upsampled LQ image\n ref (Tensor): Reference image\n ref_downup (Tensor): Image generated by sequentially applying\n bicubic down-sampling and up-sampling on reference image\n meta (list[dict]): Meta data, such as path of GT file.\n Default: None.\n save_image (bool): Whether to save image. Default: False.\n save_path (str): Path to save image. Default: None.\n iteration (int): Iteration for the saving image name.\n Default: None.\n\n Returns:\n dict: Output results, which contain either key(s)\n 1. 'eval_result'.\n 2. 'lq', 'pred'.\n 3. 'lq', 'pred', 'gt'.\n "
with torch.no_grad():
pred = self.forward_dummy(lq=lq, lq_up=lq_up, ref=ref, ref_downup=ref_downup)
pred = ((pred + 1.0) / 2.0)
if (gt is not None):
gt = ((gt + 1.0) / 2.0)
if ((self.test_cfg is not None) and self.test_cfg.get('metrics', None)):
assert (gt is not None), 'evaluation with metrics must have gt images.'
results = dict(eval_result=self.evaluate(pred, gt))
else:
results = dict(lq=lq.cpu(), output=pred.cpu())
if (gt is not None):
results['gt'] = gt.cpu()
if save_image:
if ('gt_path' in meta[0]):
the_path = meta[0]['gt_path']
else:
the_path = meta[0]['lq_path']
folder_name = osp.splitext(osp.basename(the_path))[0]
if isinstance(iteration, numbers.Number):
save_path = osp.join(save_path, folder_name, f'{folder_name}-{(iteration + 1):06d}.png')
elif (iteration is None):
save_path = osp.join(save_path, f'{folder_name}.png')
else:
raise ValueError(f'iteration should be number or None, but got {type(iteration)}')
mmcv.imwrite(tensor2img(pred), save_path)
return results
def init_weights(self, pretrained=None, strict=True):
'Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n strict (boo, optional): Whether strictly load the pretrained model.\n Defaults to True.\n '
if isinstance(pretrained, str):
if self.generator:
self.generator.init_weights(pretrained, strict)
if self.extractor:
self.extractor.init_weights(pretrained, strict)
if self.transformer:
self.transformer.init_weights(pretrained, strict)
elif (pretrained is not None):
raise TypeError(f'"pretrained" must be a str or None. But received {type(pretrained)}.')
|
@MODELS.register_module()
class CycleGAN(BaseModel):
'CycleGAN model for unpaired image-to-image translation.\n\n Ref:\n Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial\n Networks\n\n Args:\n generator (dict): Config for the generator.\n discriminator (dict): Config for the discriminator.\n gan_loss (dict): Config for the gan loss.\n cycle_loss (dict): Config for the cycle-consistency loss.\n id_loss (dict): Config for the identity loss. Default: None.\n train_cfg (dict): Config for training. Default: None.\n You may change the training of gan by setting:\n `disc_steps`: how many discriminator updates after one generator\n update.\n `disc_init_steps`: how many discriminator updates at the start of\n the training.\n These two keys are useful when training with WGAN.\n `direction`: image-to-image translation direction (the model\n training direction): a2b | b2a.\n `buffer_size`: GAN image buffer size.\n test_cfg (dict): Config for testing. Default: None.\n You may change the testing of gan by setting:\n `direction`: image-to-image translation direction (the model\n training direction): a2b | b2a.\n `show_input`: whether to show input real images.\n `test_direction`: direction in the test mode (the model testing\n direction). CycleGAN has two generators. It decides whether\n to perform forward or backward translation with respect to\n `direction` during testing: a2b | b2a.\n pretrained (str): Path for pretrained model. Default: None.\n '
def __init__(self, generator, discriminator, gan_loss, cycle_loss, id_loss=None, train_cfg=None, test_cfg=None, pretrained=None):
super().__init__()
self.train_cfg = train_cfg
self.test_cfg = test_cfg
if ((id_loss is not None) and (id_loss.get('loss_weight') > 0.0)):
assert (generator.get('in_channels') == generator.get('out_channels'))
self.generators = nn.ModuleDict()
self.generators['a'] = build_backbone(generator)
self.generators['b'] = build_backbone(generator)
self.discriminators = nn.ModuleDict()
self.discriminators['a'] = build_component(discriminator)
self.discriminators['b'] = build_component(discriminator)
self.image_buffers = dict()
self.buffer_size = (50 if (self.train_cfg is None) else self.train_cfg.get('buffer_size', 50))
self.image_buffers['a'] = GANImageBuffer(self.buffer_size)
self.image_buffers['b'] = GANImageBuffer(self.buffer_size)
assert (gan_loss is not None)
self.gan_loss = build_loss(gan_loss)
assert (cycle_loss is not None)
self.cycle_loss = build_loss(cycle_loss)
self.id_loss = (build_loss(id_loss) if id_loss else None)
self.disc_steps = (1 if (self.train_cfg is None) else self.train_cfg.get('disc_steps', 1))
self.disc_init_steps = (0 if (self.train_cfg is None) else self.train_cfg.get('disc_init_steps', 0))
if (self.train_cfg is None):
self.direction = ('a2b' if (self.test_cfg is None) else self.test_cfg.get('direction', 'a2b'))
else:
self.direction = self.train_cfg.get('direction', 'a2b')
self.step_counter = 0
self.show_input = (False if (self.test_cfg is None) else self.test_cfg.get('show_input', False))
if (not self.show_input):
self.test_direction = ('a2b' if (self.test_cfg is None) else self.test_cfg.get('test_direction', 'a2b'))
if (self.direction == 'b2a'):
self.test_direction = ('b2a' if (self.test_direction == 'a2b') else 'a2b')
self.fp16_enabled = False
self.init_weights(pretrained)
def init_weights(self, pretrained=None):
'Initialize weights for the model.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Default: None.\n '
self.generators['a'].init_weights(pretrained=pretrained)
self.generators['b'].init_weights(pretrained=pretrained)
self.discriminators['a'].init_weights(pretrained=pretrained)
self.discriminators['b'].init_weights(pretrained=pretrained)
def get_module(self, module):
'Get `nn.ModuleDict` to fit the `MMDistributedDataParallel` interface.\n\n Args:\n module (MMDistributedDataParallel | nn.ModuleDict): The input\n module that needs processing.\n\n Returns:\n nn.ModuleDict: The ModuleDict of multiple networks.\n '
if isinstance(module, MMDistributedDataParallel):
return module.module
return module
def setup(self, img_a, img_b, meta):
'Perform necessary pre-processing steps.\n\n Args:\n img_a (Tensor): Input image from domain A.\n img_b (Tensor): Input image from domain B.\n meta (list[dict]): Input meta data.\n\n Returns:\n Tensor, Tensor, list[str]: The real images from domain A/B, and the image path as the metadata.\n '
a2b = (self.direction == 'a2b')
real_a = (img_a if a2b else img_b)
real_b = (img_b if a2b else img_a)
image_path = [v[('img_a_path' if a2b else 'img_b_path')] for v in meta]
return (real_a, real_b, image_path)
@auto_fp16(apply_to=('img_a', 'img_b'))
def forward_train(self, img_a, img_b, meta):
'Forward function for training.\n\n Args:\n img_a (Tensor): Input image from domain A.\n img_b (Tensor): Input image from domain B.\n meta (list[dict]): Input meta data.\n\n Returns:\n dict: Dict of forward results for training.\n '
(real_a, real_b, _) = self.setup(img_a, img_b, meta)
generators = self.get_module(self.generators)
fake_b = generators['a'](real_a)
rec_a = generators['b'](fake_b)
fake_a = generators['b'](real_b)
rec_b = generators['a'](fake_a)
results = dict(real_a=real_a, fake_b=fake_b, rec_a=rec_a, real_b=real_b, fake_a=fake_a, rec_b=rec_b)
return results
def forward_test(self, img_a, img_b, meta, save_image=False, save_path=None, iteration=None):
'Forward function for testing.\n\n Args:\n img_a (Tensor): Input image from domain A.\n img_b (Tensor): Input image from domain B.\n meta (list[dict]): Input meta data.\n save_image (bool, optional): If True, results will be saved as\n images. Default: False.\n save_path (str, optional): If given a valid str path, the results\n will be saved in this path. Default: None.\n iteration (int, optional): Iteration number. Default: None.\n\n Returns:\n dict: Dict of forward and evaluation results for testing.\n '
self.train()
(real_a, real_b, image_path) = self.setup(img_a, img_b, meta)
generators = self.get_module(self.generators)
fake_b = generators['a'](real_a)
fake_a = generators['b'](real_b)
results = dict(real_a=real_a.cpu(), fake_b=fake_b.cpu(), real_b=real_b.cpu(), fake_a=fake_a.cpu())
if save_image:
assert (save_path is not None)
folder_name = osp.splitext(osp.basename(image_path[0]))[0]
if self.show_input:
if iteration:
save_path = osp.join(save_path, folder_name, f'{folder_name}-{(iteration + 1):06d}-ra-fb-rb-fa.png')
else:
save_path = osp.join(save_path, f'{folder_name}-ra-fb-rb-fa.png')
output = np.concatenate([tensor2img(results['real_a'], min_max=((- 1), 1)), tensor2img(results['fake_b'], min_max=((- 1), 1)), tensor2img(results['real_b'], min_max=((- 1), 1)), tensor2img(results['fake_a'], min_max=((- 1), 1))], axis=1)
elif (self.test_direction == 'a2b'):
if iteration:
save_path = osp.join(save_path, folder_name, f'{folder_name}-{(iteration + 1):06d}-fb.png')
else:
save_path = osp.join(save_path, f'{folder_name}-fb.png')
output = tensor2img(results['fake_b'], min_max=((- 1), 1))
else:
if iteration:
save_path = osp.join(save_path, folder_name, f'{folder_name}-{(iteration + 1):06d}-fa.png')
else:
save_path = osp.join(save_path, f'{folder_name}-fa.png')
output = tensor2img(results['fake_a'], min_max=((- 1), 1))
flag = mmcv.imwrite(output, save_path)
results['saved_flag'] = flag
return results
def forward_dummy(self, img):
'Used for computing network FLOPs.\n\n Args:\n img (Tensor): Dummy input used to compute FLOPs.\n\n Returns:\n Tensor: Dummy output produced by forwarding the dummy input.\n '
generators = self.get_module(self.generators)
tmp = generators['a'](img)
out = generators['b'](tmp)
return out
def forward(self, img_a, img_b, meta, test_mode=False, **kwargs):
'Forward function.\n\n Args:\n img_a (Tensor): Input image from domain A.\n img_b (Tensor): Input image from domain B.\n meta (list[dict]): Input meta data.\n test_mode (bool): Whether in test mode or not. Default: False.\n kwargs (dict): Other arguments.\n '
if test_mode:
return self.forward_test(img_a, img_b, meta, **kwargs)
return self.forward_train(img_a, img_b, meta)
def backward_discriminators(self, outputs):
'Backward function for the discriminators.\n\n Args:\n outputs (dict): Dict of forward results.\n\n Returns:\n dict: Loss dict.\n '
discriminators = self.get_module(self.discriminators)
log_vars_d = dict()
losses = dict()
fake_b = self.image_buffers['b'].query(outputs['fake_b'])
fake_pred = discriminators['a'](fake_b.detach())
losses['loss_gan_d_a_fake'] = self.gan_loss(fake_pred, target_is_real=False, is_disc=True)
real_pred = discriminators['a'](outputs['real_b'])
losses['loss_gan_d_a_real'] = self.gan_loss(real_pred, target_is_real=True, is_disc=True)
(loss_d_a, log_vars_d_a) = self.parse_losses(losses)
loss_d_a *= 0.5
loss_d_a.backward()
log_vars_d['loss_gan_d_a'] = (log_vars_d_a['loss'] * 0.5)
losses = dict()
fake_a = self.image_buffers['a'].query(outputs['fake_a'])
fake_pred = discriminators['b'](fake_a.detach())
losses['loss_gan_d_b_fake'] = self.gan_loss(fake_pred, target_is_real=False, is_disc=True)
real_pred = discriminators['b'](outputs['real_a'])
losses['loss_gan_d_b_real'] = self.gan_loss(real_pred, target_is_real=True, is_disc=True)
(loss_d_b, log_vars_d_b) = self.parse_losses(losses)
loss_d_b *= 0.5
loss_d_b.backward()
log_vars_d['loss_gan_d_b'] = (log_vars_d_b['loss'] * 0.5)
return log_vars_d
def backward_generators(self, outputs):
'Backward function for the generators.\n\n Args:\n outputs (dict): Dict of forward results.\n\n Returns:\n dict: Loss dict.\n '
generators = self.get_module(self.generators)
discriminators = self.get_module(self.discriminators)
losses = dict()
if ((self.id_loss is not None) and (self.id_loss.loss_weight > 0)):
id_a = generators['a'](outputs['real_b'])
losses['loss_id_a'] = (self.id_loss(id_a, outputs['real_b']) * self.cycle_loss.loss_weight)
id_b = generators['b'](outputs['real_a'])
losses['loss_id_b'] = (self.id_loss(id_b, outputs['real_a']) * self.cycle_loss.loss_weight)
fake_pred = discriminators['a'](outputs['fake_b'])
losses['loss_gan_g_a'] = self.gan_loss(fake_pred, target_is_real=True, is_disc=False)
fake_pred = discriminators['b'](outputs['fake_a'])
losses['loss_gan_g_b'] = self.gan_loss(fake_pred, target_is_real=True, is_disc=False)
losses['loss_cycle_a'] = self.cycle_loss(outputs['rec_a'], outputs['real_a'])
losses['loss_cycle_b'] = self.cycle_loss(outputs['rec_b'], outputs['real_b'])
(loss_g, log_vars_g) = self.parse_losses(losses)
loss_g.backward()
return log_vars_g
def train_step(self, data_batch, optimizer):
'Training step function.\n\n Args:\n data_batch (dict): Dict of the input data batch.\n optimizer (dict[torch.optim.Optimizer]): Dict of optimizers for\n the generators and discriminators.\n\n Returns:\n dict: Dict of loss, information for logger, the number of samples and results for visualization.\n '
img_a = data_batch['img_a']
img_b = data_batch['img_b']
meta = data_batch['meta']
outputs = self.forward(img_a, img_b, meta, test_mode=False)
log_vars = dict()
set_requires_grad(self.discriminators, True)
optimizer['discriminators'].zero_grad()
log_vars.update(self.backward_discriminators(outputs=outputs))
optimizer['discriminators'].step()
if (((self.step_counter % self.disc_steps) == 0) and (self.step_counter >= self.disc_init_steps)):
set_requires_grad(self.discriminators, False)
optimizer['generators'].zero_grad()
log_vars.update(self.backward_generators(outputs=outputs))
optimizer['generators'].step()
self.step_counter += 1
log_vars.pop('loss', None)
results = dict(log_vars=log_vars, num_samples=len(outputs['real_a']), results=dict(real_a=outputs['real_a'].cpu(), fake_b=outputs['fake_b'].cpu(), real_b=outputs['real_b'].cpu(), fake_a=outputs['fake_a'].cpu()))
return results
def val_step(self, data_batch, **kwargs):
'Validation step function.\n\n Args:\n data_batch (dict): Dict of the input data batch.\n kwargs (dict): Other arguments.\n\n Returns:\n dict: Dict of evaluation results for validation.\n '
img_a = data_batch['img_a']
img_b = data_batch['img_b']
meta = data_batch['meta']
results = self.forward(img_a, img_b, meta, test_mode=True, **kwargs)
return results
|
@MODELS.register_module()
class Pix2Pix(BaseModel):
'Pix2Pix model for paired image-to-image translation.\n\n Ref:\n Image-to-Image Translation with Conditional Adversarial Networks\n\n Args:\n generator (dict): Config for the generator.\n discriminator (dict): Config for the discriminator.\n gan_loss (dict): Config for the gan loss.\n pixel_loss (dict): Config for the pixel loss. Default: None.\n train_cfg (dict): Config for training. Default: None.\n You may change the training of gan by setting:\n `disc_steps`: how many discriminator updates after one generator\n update.\n `disc_init_steps`: how many discriminator updates at the start of\n the training.\n These two keys are useful when training with WGAN.\n `direction`: image-to-image translation direction (the model\n training direction): a2b | b2a.\n test_cfg (dict): Config for testing. Default: None.\n You may change the testing of gan by setting:\n `direction`: image-to-image translation direction (the model\n training direction, same as testing direction): a2b | b2a.\n `show_input`: whether to show input real images.\n pretrained (str): Path for pretrained model. Default: None.\n '
def __init__(self, generator, discriminator, gan_loss, pixel_loss=None, train_cfg=None, test_cfg=None, pretrained=None):
super().__init__()
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.generator = build_backbone(generator)
self.discriminator = build_component(discriminator)
assert (gan_loss is not None)
self.gan_loss = build_loss(gan_loss)
self.pixel_loss = (build_loss(pixel_loss) if pixel_loss else None)
self.disc_steps = (1 if (self.train_cfg is None) else self.train_cfg.get('disc_steps', 1))
self.disc_init_steps = (0 if (self.train_cfg is None) else self.train_cfg.get('disc_init_steps', 0))
if (self.train_cfg is None):
self.direction = ('a2b' if (self.test_cfg is None) else self.test_cfg.get('direction', 'a2b'))
else:
self.direction = self.train_cfg.get('direction', 'a2b')
self.step_counter = 0
self.show_input = (False if (self.test_cfg is None) else self.test_cfg.get('show_input', False))
self.fp16_enabled = False
self.init_weights(pretrained)
def init_weights(self, pretrained=None):
'Initialize weights for the model.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Default: None.\n '
self.generator.init_weights(pretrained=pretrained)
self.discriminator.init_weights(pretrained=pretrained)
def setup(self, img_a, img_b, meta):
'Perform necessary pre-processing steps.\n\n Args:\n img_a (Tensor): Input image from domain A.\n img_b (Tensor): Input image from domain B.\n meta (list[dict]): Input meta data.\n\n Returns:\n Tensor, Tensor, list[str]: The real images from domain A/B, and the image path as the metadata.\n '
a2b = (self.direction == 'a2b')
real_a = (img_a if a2b else img_b)
real_b = (img_b if a2b else img_a)
image_path = [v[('img_a_path' if a2b else 'img_b_path')] for v in meta]
return (real_a, real_b, image_path)
@auto_fp16(apply_to=('img_a', 'img_b'))
def forward_train(self, img_a, img_b, meta):
'Forward function for training.\n\n Args:\n img_a (Tensor): Input image from domain A.\n img_b (Tensor): Input image from domain B.\n meta (list[dict]): Input meta data.\n\n Returns:\n dict: Dict of forward results for training.\n '
(real_a, real_b, _) = self.setup(img_a, img_b, meta)
fake_b = self.generator(real_a)
results = dict(real_a=real_a, fake_b=fake_b, real_b=real_b)
return results
def forward_test(self, img_a, img_b, meta, save_image=False, save_path=None, iteration=None):
'Forward function for testing.\n\n Args:\n img_a (Tensor): Input image from domain A.\n img_b (Tensor): Input image from domain B.\n meta (list[dict]): Input meta data.\n save_image (bool, optional): If True, results will be saved as\n images. Default: False.\n save_path (str, optional): If given a valid str path, the results\n will be saved in this path. Default: None.\n iteration (int, optional): Iteration number. Default: None.\n\n Returns:\n dict: Dict of forward and evaluation results for testing.\n '
self.train()
(real_a, real_b, image_path) = self.setup(img_a, img_b, meta)
fake_b = self.generator(real_a)
results = dict(real_a=real_a.cpu(), fake_b=fake_b.cpu(), real_b=real_b.cpu())
if save_image:
assert (save_path is not None)
folder_name = osp.splitext(osp.basename(image_path[0]))[0]
if self.show_input:
if iteration:
save_path = osp.join(save_path, folder_name, f'{folder_name}-{(iteration + 1):06d}-ra-fb-rb.png')
else:
save_path = osp.join(save_path, f'{folder_name}-ra-fb-rb.png')
output = np.concatenate([tensor2img(results['real_a'], min_max=((- 1), 1)), tensor2img(results['fake_b'], min_max=((- 1), 1)), tensor2img(results['real_b'], min_max=((- 1), 1))], axis=1)
else:
if iteration:
save_path = osp.join(save_path, folder_name, f'{folder_name}-{(iteration + 1):06d}-fb.png')
else:
save_path = osp.join(save_path, f'{folder_name}-fb.png')
output = tensor2img(results['fake_b'], min_max=((- 1), 1))
flag = mmcv.imwrite(output, save_path)
results['saved_flag'] = flag
return results
def forward_dummy(self, img):
'Used for computing network FLOPs.\n\n Args:\n img (Tensor): Dummy input used to compute FLOPs.\n\n Returns:\n Tensor: Dummy output produced by forwarding the dummy input.\n '
out = self.generator(img)
return out
def forward(self, img_a, img_b, meta, test_mode=False, **kwargs):
'Forward function.\n\n Args:\n img_a (Tensor): Input image from domain A.\n img_b (Tensor): Input image from domain B.\n meta (list[dict]): Input meta data.\n test_mode (bool): Whether in test mode or not. Default: False.\n kwargs (dict): Other arguments.\n '
if test_mode:
return self.forward_test(img_a, img_b, meta, **kwargs)
return self.forward_train(img_a, img_b, meta)
def backward_discriminator(self, outputs):
'Backward function for the discriminator.\n\n Args:\n outputs (dict): Dict of forward results.\n\n Returns:\n dict: Loss dict.\n '
losses = dict()
fake_ab = torch.cat((outputs['real_a'], outputs['fake_b']), 1)
fake_pred = self.discriminator(fake_ab.detach())
losses['loss_gan_d_fake'] = self.gan_loss(fake_pred, target_is_real=False, is_disc=True)
real_ab = torch.cat((outputs['real_a'], outputs['real_b']), 1)
real_pred = self.discriminator(real_ab)
losses['loss_gan_d_real'] = self.gan_loss(real_pred, target_is_real=True, is_disc=True)
(loss_d, log_vars_d) = self.parse_losses(losses)
loss_d *= 0.5
loss_d.backward()
return log_vars_d
def backward_generator(self, outputs):
'Backward function for the generator.\n\n Args:\n outputs (dict): Dict of forward results.\n\n Returns:\n dict: Loss dict.\n '
losses = dict()
fake_ab = torch.cat((outputs['real_a'], outputs['fake_b']), 1)
fake_pred = self.discriminator(fake_ab)
losses['loss_gan_g'] = self.gan_loss(fake_pred, target_is_real=True, is_disc=False)
if self.pixel_loss:
losses['loss_pixel'] = self.pixel_loss(outputs['fake_b'], outputs['real_b'])
(loss_g, log_vars_g) = self.parse_losses(losses)
loss_g.backward()
return log_vars_g
def train_step(self, data_batch, optimizer):
'Training step function.\n\n Args:\n data_batch (dict): Dict of the input data batch.\n optimizer (dict[torch.optim.Optimizer]): Dict of optimizers for\n the generator and discriminator.\n\n Returns:\n dict: Dict of loss, information for logger, the number of samples and results for visualization.\n '
img_a = data_batch['img_a']
img_b = data_batch['img_b']
meta = data_batch['meta']
outputs = self.forward(img_a, img_b, meta, test_mode=False)
log_vars = dict()
set_requires_grad(self.discriminator, True)
optimizer['discriminator'].zero_grad()
log_vars.update(self.backward_discriminator(outputs=outputs))
optimizer['discriminator'].step()
if (((self.step_counter % self.disc_steps) == 0) and (self.step_counter >= self.disc_init_steps)):
set_requires_grad(self.discriminator, False)
optimizer['generator'].zero_grad()
log_vars.update(self.backward_generator(outputs=outputs))
optimizer['generator'].step()
self.step_counter += 1
log_vars.pop('loss', None)
results = dict(log_vars=log_vars, num_samples=len(outputs['real_a']), results=dict(real_a=outputs['real_a'].cpu(), fake_b=outputs['fake_b'].cpu(), real_b=outputs['real_b'].cpu()))
return results
def val_step(self, data_batch, **kwargs):
'Validation step function.\n\n Args:\n data_batch (dict): Dict of the input data batch.\n kwargs (dict): Other arguments.\n\n Returns:\n dict: Dict of evaluation results for validation.\n '
img_a = data_batch['img_a']
img_b = data_batch['img_b']
meta = data_batch['meta']
results = self.forward(img_a, img_b, meta, test_mode=True, **kwargs)
return results
|
@COMPONENTS.register_module()
class SearchTransformer(nn.Module):
'Search texture reference by transformer.\n\n Include relevance embedding, hard-attention and soft-attention.\n\n '
def gather(self, inputs, dim, index):
'Hard Attention. Gathers values along an axis specified by dim.\n\n Args:\n inputs (Tensor): The source tensor. (N, C*k*k, H*W)\n dim (int): The axis along which to index.\n index (Tensor): The indices of elements to gather. (N, H*W)\n\n results:\n outputs (Tensor): The result tensor. (N, C*k*k, H*W)\n '
views = ([inputs.size(0)] + [(1 if (i != dim) else (- 1)) for i in range(1, inputs.ndim)])
expansion = [((- 1) if (i in (0, dim)) else d) for (i, d) in enumerate(inputs.size())]
index = index.view(views).expand(expansion)
outputs = torch.gather(inputs, dim, index)
return outputs
def forward(self, lq_up, ref_downup, refs):
'Texture transformer\n\n Q = LTE(lq_up)\n K = LTE(ref_downup)\n V = LTE(ref), from V_level_n to V_level_1\n\n Relevance embedding aims to embed the relevance between the LQ and\n Ref image by estimating the similarity between Q and K.\n Hard-Attention: Only transfer features from the most relevant position\n in V for each query.\n Soft-Attention: synthesize features from the transferred GT texture\n features T and the LQ features F from the backbone.\n\n Args:\n All args are features come from extractor (such as LTE).\n These features contain 3 levels.\n When upscale_factor=4, the size ratio of these features is\n level3:level2:level1 = 1:2:4.\n lq_up (Tensor): Tensor of 4x bicubic-upsampled lq image.\n (N, C, H, W)\n ref_downup (Tensor): Tensor of ref_downup. ref_downup is obtained\n by applying bicubic down-sampling and up-sampling with factor\n 4x on ref. (N, C, H, W)\n refs (Tuple[Tensor]): Tuple of ref tensors.\n [(N, C, H, W), (N, C/2, 2H, 2W), ...]\n\n Returns:\n soft_attention (Tensor): Soft-Attention tensor. (N, 1, H, W)\n textures (Tuple[Tensor]): Transferred GT textures.\n [(N, C, H, W), (N, C/2, 2H, 2W), ...]\n '
levels = len(refs)
query = F.unfold(lq_up, kernel_size=(3, 3), padding=1)
key = F.unfold(ref_downup, kernel_size=(3, 3), padding=1)
key_t = key.permute(0, 2, 1)
values = [F.unfold(refs[i], kernel_size=(3 * pow(2, i)), padding=pow(2, i), stride=pow(2, i)) for i in range(levels)]
key_t = F.normalize(key_t, dim=2)
query = F.normalize(query, dim=1)
rel_embedding = torch.bmm(key_t, query)
(max_val, max_index) = torch.max(rel_embedding, dim=1)
textures = [self.gather(value, 2, max_index) for value in values]
(h, w) = lq_up.size()[(- 2):]
textures = [(F.fold(textures[i], output_size=((h * pow(2, i)), (w * pow(2, i))), kernel_size=(3 * pow(2, i)), padding=pow(2, i), stride=pow(2, i)) / 9.0) for i in range(levels)]
soft_attention = max_val.view(max_val.size(0), 1, h, w)
return (soft_attention, textures)
|
@MODELS.register_module()
class CAIN(BasicInterpolator):
'CAIN model for Video Interpolation.\n\n Paper: Channel Attention Is All You Need for Video Frame Interpolation\n Ref repo: https://github.com/myungsub/CAIN\n\n Args:\n generator (dict): Config for the generator structure.\n pixel_loss (dict): Config for pixel-wise loss.\n train_cfg (dict): Config for training. Default: None.\n test_cfg (dict): Config for testing. Default: None.\n pretrained (str): Path for pretrained model. Default: None.\n '
def forward_train(self, inputs, target):
'Training forward function.\n\n Args:\n inputs (Tensor): Tensor of inputs frames with shape\n (n, 2, c, h, w).\n target (Tensor): Tensor of target frame with shape (n, c, h, w).\n\n Returns:\n Tensor: Output tensor.\n '
losses = dict()
output = self.generator(inputs, padding_flag=False)
loss_pix = self.pixel_loss(output, target)
losses['loss_pix'] = loss_pix
outputs = dict(losses=losses, num_samples=len(target.data), results=dict(inputs=inputs.cpu(), target=target.cpu(), output=output.cpu()))
return outputs
def forward_test(self, inputs, target=None, meta=None, save_image=False, save_path=None, iteration=None):
"Testing forward function.\n\n Args:\n inputs (Tensor): The input Tensor with shape (n, 2, c, h, w).\n target (Tensor): The target Tensor with shape (n, c, h, w).\n meta (list[dict]): Meta data, such as path of target file.\n Default: None.\n save_image (bool): Whether to save image. Default: False.\n save_path (str): Path to save image. Default: None.\n iteration (int): Iteration for the saving image name.\n Default: None.\n\n Returns:\n dict: Output results, which contain either key(s)\n 1. 'eval_result'.\n 2. 'inputs', 'pred'.\n 3. 'inputs', 'pred', and 'target'.\n "
with torch.no_grad():
pred = self.generator(inputs, padding_flag=True)
pred = pred.clamp(0, 1)
if ((self.test_cfg is not None) and self.test_cfg.get('metrics', None)):
assert (target is not None), 'evaluation with metrics must have target images.'
results = dict(eval_result=self.evaluate(pred, target))
else:
results = dict(inputs=inputs.cpu(), output=pred.cpu())
if (target is not None):
results['target'] = target.cpu()
if save_image:
self._save_image(meta, iteration, save_path, pred)
return results
|
def modify_args():
for (i, v) in enumerate(sys.argv):
if (i == 0):
assert v.endswith('.py')
elif re.match('--\\w+_.*', v):
new_arg = v.replace('_', '-')
warnings.warn(f'command line argument {v} is deprecated, please use {new_arg} instead.', category=DeprecationWarning)
sys.argv[i] = new_arg
|
def collect_env():
'Collect the information of the running environments.'
env_info = collect_base_env()
env_info['MMEditing'] = f'{mmedit.__version__}+{get_git_hash()[:7]}'
return env_info
|
def get_root_logger(log_file=None, log_level=logging.INFO):
'Get the root logger.\n\n The logger will be initialized if it has not been initialized. By default a\n StreamHandler will be added. If `log_file` is specified, a FileHandler will\n also be added. The name of the root logger is the top-level package name,\n e.g., "mmedit".\n\n Args:\n log_file (str | None): The log filename. If specified, a FileHandler\n will be added to the root logger.\n log_level (int): The root logger level. Note that only the process of\n rank 0 is affected, while other processes will set the level to\n "Error" and be silent most of the time.\n\n Returns:\n logging.Logger: The root logger.\n '
logger = get_logger(__name__.split('.')[0], log_file, log_level)
return logger
|
def setup_multi_processes(cfg):
'Setup multi-processing environment variables.'
if (platform.system() != 'Windows'):
mp_start_method = cfg.get('mp_start_method', 'fork')
current_method = mp.get_start_method(allow_none=True)
if ((current_method is not None) and (current_method != mp_start_method)):
warnings.warn(f'Multi-processing start method `{mp_start_method}` is different from the previous setting `{current_method}`.It will be force set to `{mp_start_method}`. You can change this behavior by changing `mp_start_method` in your config.')
mp.set_start_method(mp_start_method, force=True)
opencv_num_threads = cfg.get('opencv_num_threads', 0)
cv2.setNumThreads(opencv_num_threads)
if (('OMP_NUM_THREADS' not in os.environ) and (cfg.data.workers_per_gpu > 1)):
omp_num_threads = 1
warnings.warn(f'Setting OMP_NUM_THREADS environment variable for each process to be {omp_num_threads} in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.')
os.environ['OMP_NUM_THREADS'] = str(omp_num_threads)
if (('MKL_NUM_THREADS' not in os.environ) and (cfg.data.workers_per_gpu > 1)):
mkl_num_threads = 1
warnings.warn(f'Setting MKL_NUM_THREADS environment variable for each process to be {mkl_num_threads} in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.')
os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads)
|
def parse_version_info(version_str):
ver_info = []
for x in version_str.split('.'):
if x.isdigit():
ver_info.append(int(x))
elif (x.find('rc') != (- 1)):
patch_version = x.split('rc')
ver_info.append(int(patch_version[0]))
ver_info.append(f'rc{patch_version[1]}')
return tuple(ver_info)
|
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
|
def get_git_hash():
def _minimal_ext_cmd(cmd):
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if (v is not None):
env[k] = v
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
sha = out.strip().decode('ascii')
except OSError:
sha = 'unknown'
return sha
|
def get_hash():
if os.path.exists('.git'):
sha = get_git_hash()[:7]
elif os.path.exists(version_file):
try:
from mmedit.version import __version__
sha = __version__.split('+')[(- 1)]
except ImportError:
raise ImportError('Unable to get git version')
else:
sha = 'unknown'
return sha
|
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
|
def parse_requirements(fname='requirements.txt', with_version=True):
'Parse the package dependencies listed in a requirements file but strips\n specific versioning information.\n\n Args:\n fname (str): path to requirements file\n with_version (bool, default=False): if True include version specs\n\n Returns:\n List[str]: list of requirements items\n\n CommandLine:\n python -c "import setup; print(setup.parse_requirements())"\n '
import re
import sys
from os.path import exists
require_fpath = fname
def parse_line(line):
'Parse information from a line in a requirements text file.'
if line.startswith('-r '):
target = line.split(' ')[1]
for info in parse_require_file(target):
(yield info)
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
elif ('@git+' in line):
info['package'] = line
else:
pat = (('(' + '|'.join(['>=', '<=', '==', '>', '<'])) + ')')
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if (len(parts) > 1):
(op, rest) = parts[1:]
if (';' in rest):
(version, platform_deps) = map(str.strip, rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest
info['version'] = (op, version)
(yield info)
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if (line and (not line.startswith('#'))):
for info in parse_line(line):
(yield info)
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if (with_version and ('version' in info)):
parts.extend(info['version'])
if (not sys.version.startswith('3.4')):
platform_deps = info.get('platform_deps')
if (platform_deps is not None):
parts.append((';' + platform_deps))
item = ''.join(parts)
(yield item)
packages = list(gen_packages_items())
return packages
|
def add_mim_extention():
'Add extra files that are required to support MIM into the package.\n\n These files will be added by creating a symlink to the originals if the\n package is installed in `editable` mode (e.g. pip install -e .), or by\n copying from the originals otherwise.\n '
if ('develop' in sys.argv):
mode = 'symlink'
elif (('sdist' in sys.argv) or ('bdist_wheel' in sys.argv)):
mode = 'copy'
else:
return
filenames = ['tools', 'configs', 'demo', 'model-index.yml']
repo_path = osp.dirname(__file__)
mim_path = osp.join(repo_path, 'mmedit', '.mim')
os.makedirs(mim_path, exist_ok=True)
for filename in filenames:
if osp.exists(filename):
src_path = osp.join(repo_path, filename)
tar_path = osp.join(mim_path, filename)
if (osp.isfile(tar_path) or osp.islink(tar_path)):
os.remove(tar_path)
elif osp.isdir(tar_path):
shutil.rmtree(tar_path)
if (mode == 'symlink'):
src_relpath = osp.relpath(src_path, osp.dirname(tar_path))
try:
os.symlink(src_relpath, tar_path)
except OSError:
mode = 'copy'
warnings.warn(f'Failed to create a symbolic link for {src_relpath}, and it will be copied to {tar_path}')
else:
continue
if (mode == 'copy'):
if osp.isfile(src_path):
shutil.copyfile(src_path, tar_path)
elif osp.isdir(src_path):
shutil.copytree(src_path, tar_path)
else:
warnings.warn(f'Cannot copy file {src_path}.')
else:
raise ValueError(f'Invalid mode {mode}')
|
class TestGenerationDatasets():
@classmethod
def setup_class(cls):
cls.data_prefix = (Path(__file__).parent.parent.parent / 'data')
def test_base_generation_dataset(self):
class ToyDataset(BaseGenerationDataset):
'Toy dataset for testing Generation Dataset.'
def load_annotations(self):
pass
toy_dataset = ToyDataset(pipeline=[])
file_paths = ['paired/test/3.jpg', 'paired/train/1.jpg', 'paired/train/2.jpg']
file_paths = [str((self.data_prefix / v)) for v in file_paths]
result = toy_dataset.scan_folder(self.data_prefix)
assert set(file_paths).issubset(set(result))
result = toy_dataset.scan_folder(str(self.data_prefix))
assert set(file_paths).issubset(set(result))
with pytest.raises(TypeError):
toy_dataset.scan_folder(123)
toy_dataset.data_infos = file_paths
with pytest.raises(TypeError):
_ = toy_dataset.evaluate(1)
test_results = [dict(saved_flag=True), dict(saved_flag=True)]
with pytest.raises(AssertionError):
_ = toy_dataset.evaluate(test_results)
test_results = [dict(saved_flag=True), dict(saved_flag=True), dict(saved_flag=False)]
eval_result = toy_dataset.evaluate(test_results)
assert (eval_result['val_saved_number'] == 2)
def test_generation_paired_dataset(self):
img_norm_cfg = dict(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
pipeline = [dict(type='LoadPairedImageFromFile', io_backend='disk', key='pair', flag='color'), dict(type='Resize', keys=['img_a', 'img_b'], scale=(286, 286), interpolation='bicubic'), dict(type='FixedCrop', keys=['img_a', 'img_b'], crop_size=(256, 256)), dict(type='Flip', keys=['img_a', 'img_b'], direction='horizontal'), dict(type='RescaleToZeroOne', keys=['img_a', 'img_b']), dict(type='Normalize', keys=['img_a', 'img_b'], to_rgb=True, **img_norm_cfg), dict(type='ImageToTensor', keys=['img_a', 'img_b']), dict(type='Collect', keys=['img_a', 'img_b'], meta_keys=['img_a_path', 'img_b_path'])]
target_keys = ['img_a', 'img_b', 'meta']
target_meta_keys = ['img_a_path', 'img_b_path']
pair_folder = (self.data_prefix / 'paired')
generation_paried_dataset = GenerationPairedDataset(dataroot=pair_folder, pipeline=pipeline, test_mode=True)
data_infos = generation_paried_dataset.data_infos
assert (data_infos == [dict(pair_path=str(((pair_folder / 'test') / '3.jpg')))])
result = generation_paried_dataset[0]
assert (len(generation_paried_dataset) == 1)
assert assert_dict_has_keys(result, target_keys)
assert assert_dict_has_keys(result['meta'].data, target_meta_keys)
assert (result['meta'].data['img_a_path'] == str(((pair_folder / 'test') / '3.jpg')))
assert (result['meta'].data['img_b_path'] == str(((pair_folder / 'test') / '3.jpg')))
generation_paried_dataset = GenerationPairedDataset(dataroot=str(pair_folder), pipeline=pipeline, test_mode=True)
data_infos = generation_paried_dataset.data_infos
assert (data_infos == [dict(pair_path=str(((pair_folder / 'test') / '3.jpg')))])
result = generation_paried_dataset[0]
assert (len(generation_paried_dataset) == 1)
assert assert_dict_has_keys(result, target_keys)
assert assert_dict_has_keys(result['meta'].data, target_meta_keys)
assert (result['meta'].data['img_a_path'] == str(((pair_folder / 'test') / '3.jpg')))
assert (result['meta'].data['img_b_path'] == str(((pair_folder / 'test') / '3.jpg')))
generation_paried_dataset = GenerationPairedDataset(dataroot=str(pair_folder), pipeline=pipeline, test_mode=False)
data_infos = generation_paried_dataset.data_infos
assert (data_infos == [dict(pair_path=str(((pair_folder / 'train') / '1.jpg'))), dict(pair_path=str(((pair_folder / 'train') / '2.jpg')))])
assert (len(generation_paried_dataset) == 2)
result = generation_paried_dataset[0]
assert assert_dict_has_keys(result, target_keys)
assert assert_dict_has_keys(result['meta'].data, target_meta_keys)
assert (result['meta'].data['img_a_path'] == str(((pair_folder / 'train') / '1.jpg')))
assert (result['meta'].data['img_b_path'] == str(((pair_folder / 'train') / '1.jpg')))
result = generation_paried_dataset[1]
assert assert_dict_has_keys(result, target_keys)
assert assert_dict_has_keys(result['meta'].data, target_meta_keys)
assert (result['meta'].data['img_a_path'] == str(((pair_folder / 'train') / '2.jpg')))
assert (result['meta'].data['img_b_path'] == str(((pair_folder / 'train') / '2.jpg')))
def test_generation_unpaired_dataset(self):
img_norm_cfg = dict(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
pipeline = [dict(type='LoadImageFromFile', io_backend='disk', key='img_a', flag='color'), dict(type='LoadImageFromFile', io_backend='disk', key='img_b', flag='color'), dict(type='Resize', keys=['img_a', 'img_b'], scale=(286, 286), interpolation='bicubic'), dict(type='Crop', keys=['img_a', 'img_b'], crop_size=(256, 256), random_crop=True), dict(type='Flip', keys=['img_a'], direction='horizontal'), dict(type='Flip', keys=['img_b'], direction='horizontal'), dict(type='RescaleToZeroOne', keys=['img_a', 'img_b']), dict(type='Normalize', keys=['img_a', 'img_b'], to_rgb=True, **img_norm_cfg), dict(type='ImageToTensor', keys=['img_a', 'img_b']), dict(type='Collect', keys=['img_a', 'img_b'], meta_keys=['img_a_path', 'img_b_path'])]
target_keys = ['img_a', 'img_b', 'meta']
target_meta_keys = ['img_a_path', 'img_b_path']
unpair_folder = (self.data_prefix / 'unpaired')
generation_unpaired_dataset = GenerationUnpairedDataset(dataroot=unpair_folder, pipeline=pipeline, test_mode=True)
data_infos_a = generation_unpaired_dataset.data_infos_a
data_infos_b = generation_unpaired_dataset.data_infos_b
assert (data_infos_a == [dict(path=str(((unpair_folder / 'testA') / '5.jpg')))])
assert (data_infos_b == [dict(path=str(((unpair_folder / 'testB') / '6.jpg')))])
result = generation_unpaired_dataset[0]
assert (len(generation_unpaired_dataset) == 1)
assert assert_dict_has_keys(result, target_keys)
assert assert_dict_has_keys(result['meta'].data, target_meta_keys)
assert (result['meta'].data['img_a_path'] == str(((unpair_folder / 'testA') / '5.jpg')))
assert (result['meta'].data['img_b_path'] == str(((unpair_folder / 'testB') / '6.jpg')))
generation_unpaired_dataset = GenerationUnpairedDataset(dataroot=str(unpair_folder), pipeline=pipeline, test_mode=True)
data_infos_a = generation_unpaired_dataset.data_infos_a
data_infos_b = generation_unpaired_dataset.data_infos_b
assert (data_infos_a == [dict(path=str(((unpair_folder / 'testA') / '5.jpg')))])
assert (data_infos_b == [dict(path=str(((unpair_folder / 'testB') / '6.jpg')))])
result = generation_unpaired_dataset[0]
assert (len(generation_unpaired_dataset) == 1)
assert assert_dict_has_keys(result, target_keys)
assert assert_dict_has_keys(result['meta'].data, target_meta_keys)
assert (result['meta'].data['img_a_path'] == str(((unpair_folder / 'testA') / '5.jpg')))
assert (result['meta'].data['img_b_path'] == str(((unpair_folder / 'testB') / '6.jpg')))
generation_unpaired_dataset = GenerationUnpairedDataset(dataroot=str(unpair_folder), pipeline=pipeline, test_mode=False)
data_infos_a = generation_unpaired_dataset.data_infos_a
data_infos_b = generation_unpaired_dataset.data_infos_b
assert (data_infos_a == [dict(path=str(((unpair_folder / 'trainA') / '1.jpg'))), dict(path=str(((unpair_folder / 'trainA') / '2.jpg')))])
assert (data_infos_b == [dict(path=str(((unpair_folder / 'trainB') / '3.jpg'))), dict(path=str(((unpair_folder / 'trainB') / '4.jpg')))])
assert (len(generation_unpaired_dataset) == 2)
img_b_paths = [str(((unpair_folder / 'trainB') / '3.jpg')), str(((unpair_folder / 'trainB') / '4.jpg'))]
result = generation_unpaired_dataset[0]
assert assert_dict_has_keys(result, target_keys)
assert assert_dict_has_keys(result['meta'].data, target_meta_keys)
assert (result['meta'].data['img_a_path'] == str(((unpair_folder / 'trainA') / '1.jpg')))
assert (result['meta'].data['img_b_path'] in img_b_paths)
result = generation_unpaired_dataset[1]
assert assert_dict_has_keys(result, target_keys)
assert assert_dict_has_keys(result['meta'].data, target_meta_keys)
assert (result['meta'].data['img_a_path'] == str(((unpair_folder / 'trainA') / '2.jpg')))
assert (result['meta'].data['img_b_path'] in img_b_paths)
|
class TestMattingDatasets():
@classmethod
def setup_class(cls):
cls.data_prefix = (Path(__file__).parent.parent.parent / 'data')
cls.ann_file = osp.join(cls.data_prefix, 'test_list.json')
cls.pipeline = [dict(type='LoadImageFromFile', key='alpha', flag='grayscale')]
def test_comp1k_dataset(self):
comp1k_dataset = AdobeComp1kDataset(self.ann_file, self.pipeline, self.data_prefix)
first_data = comp1k_dataset[0]
assert ('alpha' in first_data)
assert isinstance(first_data['alpha'], np.ndarray)
assert (first_data['alpha'].shape == (552, 800))
def test_comp1k_evaluate(self):
comp1k_dataset = AdobeComp1kDataset(self.ann_file, self.pipeline, self.data_prefix)
with pytest.raises(TypeError):
comp1k_dataset.evaluate('Not a list object')
results = [{'pred_alpha': None, 'eval_result': {'SAD': 26, 'MSE': 0.006}}, {'pred_alpha': None, 'eval_result': {'SAD': 24, 'MSE': 0.004}}]
eval_result = comp1k_dataset.evaluate(results)
assert (set(eval_result.keys()) == set(['SAD', 'MSE']))
assert (eval_result['SAD'] == 25)
assert (eval_result['MSE'] == 0.005)
|
def test_repeat_dataset():
class ToyDataset(Dataset):
def __init__(self):
super().__init__()
self.members = [1, 2, 3, 4, 5]
def __len__(self):
return len(self.members)
def __getitem__(self, idx):
return self.members[(idx % 5)]
toy_dataset = ToyDataset()
repeat_dataset = RepeatDataset(toy_dataset, 2)
assert (len(repeat_dataset) == 10)
assert (repeat_dataset[2] == 3)
assert (repeat_dataset[8] == 4)
|
def mock_open(*args, **kwargs):
"unittest.mock_open wrapper.\n\n unittest.mock_open doesn't support iteration. Wrap it to fix this bug.\n Reference: https://stackoverflow.com/a/41656192\n "
import unittest
f_open = unittest.mock.mock_open(*args, **kwargs)
f_open.return_value.__iter__ = (lambda self: iter(self.readline, ''))
return f_open
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.