Spaces:
Sleeping
Sleeping
| import torch | |
| import torch.nn as nn | |
| import torch.nn.functional as F | |
| from torch.nn import init | |
| from torch.nn.utils import spectral_norm | |
| import numpy as np | |
| class BaseNetwork(nn.Module): | |
| def __init__(self): | |
| super(BaseNetwork, self).__init__() | |
| def print_network(self): | |
| num_params = 0 | |
| for param in self.parameters(): | |
| num_params += param.numel() | |
| print("Network [{}] was created. Total number of parameters: {:.1f} million. " | |
| "To see the architecture, do print(network).".format(self.__class__.__name__, num_params / 1000000)) | |
| def init_weights(self, init_type='normal', gain=0.02): | |
| def init_func(m): | |
| classname = m.__class__.__name__ | |
| if 'BatchNorm2d' in classname: | |
| if hasattr(m, 'weight') and m.weight is not None: | |
| init.normal_(m.weight.data, 1.0, gain) | |
| if hasattr(m, 'bias') and m.bias is not None: | |
| init.constant_(m.bias.data, 0.0) | |
| elif ('Conv' in classname or 'Linear' in classname) and hasattr(m, 'weight'): | |
| if init_type == 'normal': | |
| init.normal_(m.weight.data, 0.0, gain) | |
| elif init_type == 'xavier': | |
| init.xavier_normal_(m.weight.data, gain=gain) | |
| elif init_type == 'xavier_uniform': | |
| init.xavier_uniform_(m.weight.data, gain=1.0) | |
| elif init_type == 'kaiming': | |
| init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') | |
| elif init_type == 'orthogonal': | |
| init.orthogonal_(m.weight.data, gain=gain) | |
| elif init_type == 'none': # uses pytorch's default init method | |
| m.reset_parameters() | |
| else: | |
| raise NotImplementedError("initialization method '{}' is not implemented".format(init_type)) | |
| if hasattr(m, 'bias') and m.bias is not None: | |
| init.constant_(m.bias.data, 0.0) | |
| self.apply(init_func) | |
| def forward(self, *inputs): | |
| pass | |
| class MaskNorm(nn.Module): | |
| def __init__(self, norm_nc): | |
| super(MaskNorm, self).__init__() | |
| self.norm_layer = nn.InstanceNorm2d(norm_nc, affine=False) | |
| def normalize_region(self, region, mask): | |
| b, c, h, w = region.size() | |
| num_pixels = mask.sum((2, 3), keepdim=True) # size: (b, 1, 1, 1) | |
| num_pixels[num_pixels == 0] = 1 | |
| mu = region.sum((2, 3), keepdim=True) / num_pixels # size: (b, c, 1, 1) | |
| normalized_region = self.norm_layer(region + (1 - mask) * mu) | |
| return normalized_region * torch.sqrt(num_pixels / (h * w)) | |
| def forward(self, x, mask): | |
| mask = mask.detach() | |
| normalized_foreground = self.normalize_region(x * mask, mask) | |
| normalized_background = self.normalize_region(x * (1 - mask), 1 - mask) | |
| return normalized_foreground + normalized_background | |
| class SPADENorm(nn.Module): | |
| def __init__(self,opt, norm_type, norm_nc, label_nc): | |
| super(SPADENorm, self).__init__() | |
| self.param_opt=opt | |
| self.noise_scale = nn.Parameter(torch.zeros(norm_nc)) | |
| assert norm_type.startswith('alias') | |
| param_free_norm_type = norm_type[len('alias'):] | |
| if param_free_norm_type == 'batch': | |
| self.param_free_norm = nn.BatchNorm2d(norm_nc, affine=False) | |
| elif param_free_norm_type == 'instance': | |
| self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False) | |
| elif param_free_norm_type == 'mask': | |
| self.param_free_norm = MaskNorm(norm_nc) | |
| else: | |
| raise ValueError( | |
| "'{}' is not a recognized parameter-free normalization type in SPADENorm".format(param_free_norm_type) | |
| ) | |
| nhidden = 128 | |
| ks = 3 | |
| pw = ks // 2 | |
| self.conv_shared = nn.Sequential(nn.Conv2d(label_nc, nhidden, kernel_size=ks, padding=pw), nn.ReLU()) | |
| self.conv_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw) | |
| self.conv_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw) | |
| def forward(self, x, seg, misalign_mask=None): | |
| # Part 1. Generate parameter-free normalized activations. | |
| b, c, h, w = x.size() | |
| if self.param_opt.cuda : | |
| noise = (torch.randn(b, w, h, 1).cuda() * self.noise_scale).transpose(1, 3) | |
| else: | |
| noise = (torch.randn(b, w, h, 1)* self.noise_scale).transpose(1, 3) | |
| if misalign_mask is None: | |
| normalized = self.param_free_norm(x + noise) | |
| else: | |
| normalized = self.param_free_norm(x + noise, misalign_mask) | |
| # Part 2. Produce affine parameters conditioned on the segmentation map. | |
| actv = self.conv_shared(seg) | |
| gamma = self.conv_gamma(actv) | |
| beta = self.conv_beta(actv) | |
| # Apply the affine parameters. | |
| output = normalized * (1 + gamma) + beta | |
| return output | |
| class SPADEResBlock(nn.Module): | |
| def __init__(self, opt, input_nc, output_nc, use_mask_norm=True): | |
| super(SPADEResBlock, self).__init__() | |
| self.param_opt=opt | |
| self.learned_shortcut = (input_nc != output_nc) | |
| middle_nc = min(input_nc, output_nc) | |
| self.conv_0 = nn.Conv2d(input_nc, middle_nc, kernel_size=3, padding=1) | |
| self.conv_1 = nn.Conv2d(middle_nc, output_nc, kernel_size=3, padding=1) | |
| if self.learned_shortcut: | |
| self.conv_s = nn.Conv2d(input_nc, output_nc, kernel_size=1, bias=False) | |
| subnorm_type = opt.norm_G | |
| if subnorm_type.startswith('spectral'): | |
| subnorm_type = subnorm_type[len('spectral'):] | |
| self.conv_0 = spectral_norm(self.conv_0) | |
| self.conv_1 = spectral_norm(self.conv_1) | |
| if self.learned_shortcut: | |
| self.conv_s = spectral_norm(self.conv_s) | |
| gen_semantic_nc = opt.gen_semantic_nc | |
| if use_mask_norm: | |
| subnorm_type = 'aliasmask' | |
| gen_semantic_nc = gen_semantic_nc + 1 | |
| self.norm_0 = SPADENorm(opt,subnorm_type, input_nc, gen_semantic_nc) | |
| self.norm_1 = SPADENorm(opt,subnorm_type, middle_nc, gen_semantic_nc) | |
| if self.learned_shortcut: | |
| self.norm_s = SPADENorm(opt,subnorm_type, input_nc, gen_semantic_nc) | |
| self.relu = nn.LeakyReLU(0.2) | |
| def shortcut(self, x, seg, misalign_mask): | |
| if self.learned_shortcut: | |
| return self.conv_s(self.norm_s(x, seg, misalign_mask)) | |
| else: | |
| return x | |
| def forward(self, x, seg, misalign_mask=None): | |
| seg = F.interpolate(seg, size=x.size()[2:], mode='nearest') | |
| if misalign_mask is not None: | |
| misalign_mask = F.interpolate(misalign_mask, size=x.size()[2:], mode='nearest') | |
| x_s = self.shortcut(x, seg, misalign_mask) | |
| dx = self.conv_0(self.relu(self.norm_0(x, seg, misalign_mask))) | |
| dx = self.conv_1(self.relu(self.norm_1(dx, seg, misalign_mask))) | |
| output = x_s + dx | |
| return output | |
| class SPADEGenerator(BaseNetwork): | |
| def __init__(self, opt, input_nc): | |
| super(SPADEGenerator, self).__init__() | |
| self.num_upsampling_layers = opt.num_upsampling_layers | |
| self.param_opt=opt | |
| self.sh, self.sw = self.compute_latent_vector_size(opt) | |
| nf = opt.ngf | |
| self.conv_0 = nn.Conv2d(input_nc, nf * 16, kernel_size=3, padding=1) | |
| for i in range(1, 8): | |
| self.add_module('conv_{}'.format(i), nn.Conv2d(input_nc, 16, kernel_size=3, padding=1)) | |
| self.head_0 = SPADEResBlock(opt, nf * 16, nf * 16, use_mask_norm=False) | |
| self.G_middle_0 = SPADEResBlock(opt, nf * 16 + 16, nf * 16, use_mask_norm=False) | |
| self.G_middle_1 = SPADEResBlock(opt, nf * 16 + 16, nf * 16, use_mask_norm=False) | |
| self.up_0 = SPADEResBlock(opt, nf * 16 + 16, nf * 8, use_mask_norm=False) | |
| self.up_1 = SPADEResBlock(opt, nf * 8 + 16, nf * 4, use_mask_norm=False) | |
| self.up_2 = SPADEResBlock(opt, nf * 4 + 16, nf * 2, use_mask_norm=False) | |
| self.up_3 = SPADEResBlock(opt, nf * 2 + 16, nf * 1, use_mask_norm=False) | |
| if self.num_upsampling_layers == 'most': | |
| self.up_4 = SPADEResBlock(opt, nf * 1 + 16, nf // 2, use_mask_norm=False) | |
| nf = nf // 2 | |
| self.conv_img = nn.Conv2d(nf, 3, kernel_size=3, padding=1) | |
| self.up = nn.Upsample(scale_factor=2, mode='nearest') | |
| self.relu = nn.LeakyReLU(0.2) | |
| self.tanh = nn.Tanh() | |
| def compute_latent_vector_size(self, opt): | |
| if self.num_upsampling_layers == 'normal': | |
| num_up_layers = 5 | |
| elif self.num_upsampling_layers == 'more': | |
| num_up_layers = 6 | |
| elif self.num_upsampling_layers == 'most': | |
| num_up_layers = 7 | |
| else: | |
| raise ValueError("opt.num_upsampling_layers '{}' is not recognized".format(self.num_upsampling_layers)) | |
| sh = opt.fine_height // 2**num_up_layers | |
| sw = opt.fine_width // 2**num_up_layers | |
| return sh, sw | |
| def forward(self, x, seg): | |
| samples = [F.interpolate(x, size=(self.sh * 2**i, self.sw * 2**i), mode='nearest') for i in range(8)] | |
| features = [self._modules['conv_{}'.format(i)](samples[i]) for i in range(8)] | |
| x = self.head_0(features[0], seg) | |
| x = self.up(x) | |
| x = self.G_middle_0(torch.cat((x, features[1]), 1), seg) | |
| if self.num_upsampling_layers in ['more', 'most']: | |
| x = self.up(x) | |
| x = self.G_middle_1(torch.cat((x, features[2]), 1), seg) | |
| x = self.up(x) | |
| x = self.up_0(torch.cat((x, features[3]), 1), seg) | |
| x = self.up(x) | |
| x = self.up_1(torch.cat((x, features[4]), 1), seg) | |
| x = self.up(x) | |
| x = self.up_2(torch.cat((x, features[5]), 1), seg) | |
| x = self.up(x) | |
| x = self.up_3(torch.cat((x, features[6]), 1), seg) | |
| if self.num_upsampling_layers == 'most': | |
| x = self.up(x) | |
| x = self.up_4(torch.cat((x, features[7]), 1), seg) | |
| x = self.conv_img(self.relu(x)) | |
| return self.tanh(x) | |
| ######################################################################## | |
| ######################################################################## | |
| class NLayerDiscriminator(BaseNetwork): | |
| def __init__(self, opt): | |
| super().__init__() | |
| self.no_ganFeat_loss = opt.no_ganFeat_loss | |
| nf = opt.ndf | |
| kw = 4 | |
| pw = int(np.ceil((kw - 1.0) / 2)) | |
| norm_layer = get_nonspade_norm_layer(opt.norm_D) | |
| input_nc = opt.gen_semantic_nc + 3 | |
| # input_nc = opt.gen_semantic_nc + 13 | |
| sequence = [[nn.Conv2d(input_nc, nf, kernel_size=kw, stride=2, padding=pw), | |
| nn.LeakyReLU(0.2, False)]] | |
| for n in range(1, opt.n_layers_D): | |
| nf_prev = nf | |
| nf = min(nf * 2, 512) | |
| sequence += [[norm_layer(nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=2, padding=pw)), | |
| nn.LeakyReLU(0.2, False)]] | |
| sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=pw)]] | |
| # We divide the layers into groups to extract intermediate layer outputs | |
| for n in range(len(sequence)): | |
| self.add_module('model' + str(n), nn.Sequential(*sequence[n])) | |
| def forward(self, input): | |
| results = [input] | |
| for submodel in self.children(): | |
| intermediate_output = submodel(results[-1]) | |
| results.append(intermediate_output) | |
| get_intermediate_features = not self.no_ganFeat_loss | |
| if get_intermediate_features: | |
| return results[1:] | |
| else: | |
| return results[-1] | |
| class MultiscaleDiscriminator(BaseNetwork): | |
| def __init__(self, opt): | |
| super().__init__() | |
| self.no_ganFeat_loss = opt.no_ganFeat_loss | |
| for i in range(opt.num_D): | |
| subnetD = NLayerDiscriminator(opt) | |
| self.add_module('discriminator_%d' % i, subnetD) | |
| def downsample(self, input): | |
| return F.avg_pool2d(input, kernel_size=3, stride=2, padding=[1, 1], count_include_pad=False) | |
| # Returns list of lists of discriminator outputs. | |
| # The final result is of size opt.num_D x opt.n_layers_D | |
| def forward(self, input): | |
| result = [] | |
| get_intermediate_features = not self.no_ganFeat_loss | |
| for name, D in self.named_children(): | |
| out = D(input) | |
| if not get_intermediate_features: | |
| out = [out] | |
| result.append(out) | |
| input = self.downsample(input) | |
| return result | |
| class GANLoss(nn.Module): | |
| def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0, tensor=torch.FloatTensor): | |
| super(GANLoss, self).__init__() | |
| self.real_label = target_real_label | |
| self.fake_label = target_fake_label | |
| self.real_label_tensor = None | |
| self.fake_label_tensor = None | |
| self.zero_tensor = None | |
| self.Tensor = tensor | |
| self.gan_mode = gan_mode | |
| if gan_mode == 'ls': | |
| pass | |
| elif gan_mode == 'original': | |
| pass | |
| elif gan_mode == 'w': | |
| pass | |
| elif gan_mode == 'hinge': | |
| pass | |
| else: | |
| raise ValueError('Unexpected gan_mode {}'.format(gan_mode)) | |
| def get_target_tensor(self, input, target_is_real): | |
| if target_is_real: | |
| if self.real_label_tensor is None: | |
| self.real_label_tensor = self.Tensor(1).fill_(self.real_label) | |
| self.real_label_tensor.requires_grad_(False) | |
| return self.real_label_tensor.expand_as(input) | |
| else: | |
| if self.fake_label_tensor is None: | |
| self.fake_label_tensor = self.Tensor(1).fill_(self.fake_label) | |
| self.fake_label_tensor.requires_grad_(False) | |
| return self.fake_label_tensor.expand_as(input) | |
| def get_zero_tensor(self, input): | |
| if self.zero_tensor is None: | |
| self.zero_tensor = self.Tensor(1).fill_(0) | |
| self.zero_tensor.requires_grad_(False) | |
| return self.zero_tensor.expand_as(input) | |
| def loss(self, input, target_is_real, for_discriminator=True): | |
| if self.gan_mode == 'original': # cross entropy loss | |
| target_tensor = self.get_target_tensor(input, target_is_real) | |
| loss = F.binary_cross_entropy_with_logits(input, target_tensor) | |
| return loss | |
| elif self.gan_mode == 'ls': | |
| target_tensor = self.get_target_tensor(input, target_is_real) | |
| return F.mse_loss(input, target_tensor) | |
| elif self.gan_mode == 'hinge': | |
| if for_discriminator: | |
| if target_is_real: | |
| minval = torch.min(input - 1, self.get_zero_tensor(input)) | |
| loss = -torch.mean(minval) | |
| else: | |
| minval = torch.min(-input - 1, self.get_zero_tensor(input)) | |
| loss = -torch.mean(minval) | |
| else: | |
| assert target_is_real, "The generator's hinge loss must be aiming for real" | |
| loss = -torch.mean(input) | |
| return loss | |
| else: | |
| # wgan | |
| if target_is_real: | |
| return -input.mean() | |
| else: | |
| return input.mean() | |
| def __call__(self, input, target_is_real, for_discriminator=True): | |
| # computing loss is a bit complicated because |input| may not be | |
| # a tensor, but list of tensors in case of multiscale discriminator | |
| if isinstance(input, list): | |
| loss = 0 | |
| for pred_i in input: | |
| if isinstance(pred_i, list): | |
| pred_i = pred_i[-1] | |
| loss_tensor = self.loss(pred_i, target_is_real, for_discriminator) | |
| bs = 1 if len(loss_tensor.size()) == 0 else loss_tensor.size(0) | |
| new_loss = torch.mean(loss_tensor.view(bs, -1), dim=1) | |
| loss += new_loss | |
| return loss / len(input) | |
| else: | |
| return self.loss(input, target_is_real, for_discriminator) | |
| def get_nonspade_norm_layer(norm_type='instance'): | |
| def get_out_channel(layer): | |
| if hasattr(layer, 'out_channels'): | |
| return getattr(layer, 'out_channels') | |
| return layer.weight.size(0) | |
| def add_norm_layer(layer): | |
| nonlocal norm_type | |
| if norm_type.startswith('spectral'): | |
| layer = spectral_norm(layer) | |
| subnorm_type = norm_type[len('spectral'):] | |
| if subnorm_type == 'none' or len(subnorm_type) == 0: | |
| return layer | |
| # remove bias in the previous layer, which is meaningless | |
| # since it has no effect after normalization | |
| if getattr(layer, 'bias', None) is not None: | |
| delattr(layer, 'bias') | |
| layer.register_parameter('bias', None) | |
| if subnorm_type == 'batch': | |
| norm_layer = nn.BatchNorm2d(get_out_channel(layer), affine=True) | |
| # elif subnorm_type == 'sync_batch': | |
| # norm_layer = SynchronizedBatchNorm2d(get_out_channel(layer), affine=True) | |
| elif subnorm_type == 'instance': | |
| norm_layer = nn.InstanceNorm2d(get_out_channel(layer), affine=False) | |
| else: | |
| raise ValueError('normalization layer %s is not recognized' % subnorm_type) | |
| return nn.Sequential(layer, norm_layer) | |
| return add_norm_layer | |