| """ |
| This file defines the core research contribution |
| """ |
| import matplotlib |
| matplotlib.use('Agg') |
| import math |
|
|
| import torch |
| from torch import nn |
| from models.encoders import psp_encoders |
| from models.stylegan2.model import Generator |
| from configs.paths_config import model_paths |
| import torch.nn.functional as F |
|
|
| def get_keys(d, name): |
| if 'state_dict' in d: |
| d = d['state_dict'] |
| d_filt = {k[len(name) + 1:]: v for k, v in d.items() if k[:len(name)] == name} |
| return d_filt |
|
|
|
|
| class pSp(nn.Module): |
|
|
| def __init__(self, opts, ckpt=None): |
| super(pSp, self).__init__() |
| self.set_opts(opts) |
| |
| self.opts.n_styles = int(math.log(self.opts.output_size, 2)) * 2 - 2 |
| |
| self.encoder = self.set_encoder() |
| self.decoder = Generator(self.opts.output_size, 512, 8) |
| self.face_pool = torch.nn.AdaptiveAvgPool2d((256, 256)) |
| |
| self.load_weights(ckpt) |
|
|
| def set_encoder(self): |
| if self.opts.encoder_type == 'GradualStyleEncoder': |
| encoder = psp_encoders.GradualStyleEncoder(50, 'ir_se', self.opts) |
| elif self.opts.encoder_type == 'BackboneEncoderUsingLastLayerIntoW': |
| encoder = psp_encoders.BackboneEncoderUsingLastLayerIntoW(50, 'ir_se', self.opts) |
| elif self.opts.encoder_type == 'BackboneEncoderUsingLastLayerIntoWPlus': |
| encoder = psp_encoders.BackboneEncoderUsingLastLayerIntoWPlus(50, 'ir_se', self.opts) |
| else: |
| raise Exception('{} is not a valid encoders'.format(self.opts.encoder_type)) |
| return encoder |
|
|
| def load_weights(self, ckpt=None): |
| if self.opts.checkpoint_path is not None: |
| print('Loading pSp from checkpoint: {}'.format(self.opts.checkpoint_path)) |
| if ckpt is None: |
| ckpt = torch.load(self.opts.checkpoint_path, map_location='cpu') |
| self.encoder.load_state_dict(get_keys(ckpt, 'encoder'), strict=False) |
| self.decoder.load_state_dict(get_keys(ckpt, 'decoder'), strict=False) |
| self.__load_latent_avg(ckpt) |
| else: |
| print('Loading encoders weights from irse50!') |
| encoder_ckpt = torch.load(model_paths['ir_se50']) |
| |
| if self.opts.label_nc != 0: |
| encoder_ckpt = {k: v for k, v in encoder_ckpt.items() if "input_layer" not in k} |
| self.encoder.load_state_dict(encoder_ckpt, strict=False) |
| print('Loading decoder weights from pretrained!') |
| ckpt = torch.load(self.opts.stylegan_weights) |
| self.decoder.load_state_dict(ckpt['g_ema'], strict=False) |
| if self.opts.learn_in_w: |
| self.__load_latent_avg(ckpt, repeat=1) |
| else: |
| self.__load_latent_avg(ckpt, repeat=self.opts.n_styles) |
| |
| if self.opts.toonify_weights is not None: |
| ckpt = torch.load(self.opts.toonify_weights) |
| self.decoder.load_state_dict(ckpt['g_ema'], strict=False) |
| self.opts.toonify_weights = None |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| def forward(self, x1, x2=None, resize=True, latent_mask=None, randomize_noise=True, |
| inject_latent=None, return_latents=False, alpha=None, use_feature=True, |
| first_layer_feature_ind=0, use_skip=False, zero_noise=False, editing_w=None): |
| |
| feats = None |
| codes, feats = self.encoder(x1, return_feat=True, return_full=use_skip) |
| if x2 is not None: |
| codes = self.encoder(x2) |
| |
| if self.opts.start_from_latent_avg: |
| if self.opts.learn_in_w: |
| codes = codes + self.latent_avg.repeat(codes.shape[0], 1) |
| else: |
| codes = codes + self.latent_avg.repeat(codes.shape[0], 1, 1) |
|
|
| |
| if latent_mask is not None: |
| for i in latent_mask: |
| if inject_latent is not None: |
| if alpha is not None: |
| codes[:, i] = alpha * inject_latent[:, i] + (1 - alpha) * codes[:, i] |
| else: |
| codes[:, i] = inject_latent[:, i] |
| else: |
| codes[:, i] = 0 |
| |
| first_layer_feats, skip_layer_feats, fusion = None, None, None |
| if use_feature: |
| first_layer_feats = feats[0:2] |
| if use_skip: |
| skip_layer_feats = feats[2:] |
| fusion = self.encoder.fusion |
| |
| images, result_latent = self.decoder([codes], |
| input_is_latent=True, |
| randomize_noise=randomize_noise, |
| return_latents=return_latents, |
| first_layer_feature=first_layer_feats, |
| first_layer_feature_ind=first_layer_feature_ind, |
| skip_layer_feature=skip_layer_feats, |
| fusion_block=fusion, |
| zero_noise=zero_noise, |
| editing_w=editing_w) |
|
|
| if resize: |
| if self.opts.output_size == 1024: |
| images = F.adaptive_avg_pool2d(images, (images.shape[2]//4, images.shape[3]//4)) |
| else: |
| images = self.face_pool(images) |
|
|
| if return_latents: |
| return images, result_latent |
| else: |
| return images |
|
|
| def set_opts(self, opts): |
| self.opts = opts |
|
|
| def __load_latent_avg(self, ckpt, repeat=None): |
| if 'latent_avg' in ckpt: |
| self.latent_avg = ckpt['latent_avg'].to(self.opts.device) |
| if repeat is not None: |
| self.latent_avg = self.latent_avg.repeat(repeat, 1) |
| else: |
| self.latent_avg = None |
|
|