| | from torch import nn |
| |
|
| | import torch.nn.functional as F |
| | import torch |
| |
|
| | from src.facerender.sync_batchnorm import SynchronizedBatchNorm2d as BatchNorm2d |
| | from src.facerender.sync_batchnorm import SynchronizedBatchNorm3d as BatchNorm3d |
| |
|
| | import torch.nn.utils.spectral_norm as spectral_norm |
| |
|
| |
|
| | def kp2gaussian(kp, spatial_size, kp_variance): |
| | """ |
| | Transform a keypoint into gaussian like representation |
| | """ |
| | mean = kp['value'] |
| |
|
| | coordinate_grid = make_coordinate_grid(spatial_size, mean.type()) |
| | number_of_leading_dimensions = len(mean.shape) - 1 |
| | shape = (1,) * number_of_leading_dimensions + coordinate_grid.shape |
| | coordinate_grid = coordinate_grid.view(*shape) |
| | repeats = mean.shape[:number_of_leading_dimensions] + (1, 1, 1, 1) |
| | coordinate_grid = coordinate_grid.repeat(*repeats) |
| |
|
| | |
| | shape = mean.shape[:number_of_leading_dimensions] + (1, 1, 1, 3) |
| | mean = mean.view(*shape) |
| |
|
| | mean_sub = (coordinate_grid - mean) |
| |
|
| | out = torch.exp(-0.5 * (mean_sub ** 2).sum(-1) / kp_variance) |
| |
|
| | return out |
| |
|
| | def make_coordinate_grid_2d(spatial_size, type): |
| | """ |
| | Create a meshgrid [-1,1] x [-1,1] of given spatial_size. |
| | """ |
| | h, w = spatial_size |
| | x = torch.arange(w).type(type) |
| | y = torch.arange(h).type(type) |
| |
|
| | x = (2 * (x / (w - 1)) - 1) |
| | y = (2 * (y / (h - 1)) - 1) |
| |
|
| | yy = y.view(-1, 1).repeat(1, w) |
| | xx = x.view(1, -1).repeat(h, 1) |
| |
|
| | meshed = torch.cat([xx.unsqueeze_(2), yy.unsqueeze_(2)], 2) |
| |
|
| | return meshed |
| |
|
| |
|
| | def make_coordinate_grid(spatial_size, type): |
| | d, h, w = spatial_size |
| | x = torch.arange(w).type(type) |
| | y = torch.arange(h).type(type) |
| | z = torch.arange(d).type(type) |
| |
|
| | x = (2 * (x / (w - 1)) - 1) |
| | y = (2 * (y / (h - 1)) - 1) |
| | z = (2 * (z / (d - 1)) - 1) |
| | |
| | yy = y.view(1, -1, 1).repeat(d, 1, w) |
| | xx = x.view(1, 1, -1).repeat(d, h, 1) |
| | zz = z.view(-1, 1, 1).repeat(1, h, w) |
| |
|
| | meshed = torch.cat([xx.unsqueeze_(3), yy.unsqueeze_(3), zz.unsqueeze_(3)], 3) |
| |
|
| | return meshed |
| |
|
| |
|
| | class ResBottleneck(nn.Module): |
| | def __init__(self, in_features, stride): |
| | super(ResBottleneck, self).__init__() |
| | self.conv1 = nn.Conv2d(in_channels=in_features, out_channels=in_features//4, kernel_size=1) |
| | self.conv2 = nn.Conv2d(in_channels=in_features//4, out_channels=in_features//4, kernel_size=3, padding=1, stride=stride) |
| | self.conv3 = nn.Conv2d(in_channels=in_features//4, out_channels=in_features, kernel_size=1) |
| | self.norm1 = BatchNorm2d(in_features//4, affine=True) |
| | self.norm2 = BatchNorm2d(in_features//4, affine=True) |
| | self.norm3 = BatchNorm2d(in_features, affine=True) |
| |
|
| | self.stride = stride |
| | if self.stride != 1: |
| | self.skip = nn.Conv2d(in_channels=in_features, out_channels=in_features, kernel_size=1, stride=stride) |
| | self.norm4 = BatchNorm2d(in_features, affine=True) |
| |
|
| | def forward(self, x): |
| | out = self.conv1(x) |
| | out = self.norm1(out) |
| | out = F.relu(out) |
| | out = self.conv2(out) |
| | out = self.norm2(out) |
| | out = F.relu(out) |
| | out = self.conv3(out) |
| | out = self.norm3(out) |
| | if self.stride != 1: |
| | x = self.skip(x) |
| | x = self.norm4(x) |
| | out += x |
| | out = F.relu(out) |
| | return out |
| |
|
| |
|
| | class ResBlock2d(nn.Module): |
| | """ |
| | Res block, preserve spatial resolution. |
| | """ |
| |
|
| | def __init__(self, in_features, kernel_size, padding): |
| | super(ResBlock2d, self).__init__() |
| | self.conv1 = nn.Conv2d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size, |
| | padding=padding) |
| | self.conv2 = nn.Conv2d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size, |
| | padding=padding) |
| | self.norm1 = BatchNorm2d(in_features, affine=True) |
| | self.norm2 = BatchNorm2d(in_features, affine=True) |
| |
|
| | def forward(self, x): |
| | out = self.norm1(x) |
| | out = F.relu(out) |
| | out = self.conv1(out) |
| | out = self.norm2(out) |
| | out = F.relu(out) |
| | out = self.conv2(out) |
| | out += x |
| | return out |
| |
|
| |
|
| | class ResBlock3d(nn.Module): |
| | """ |
| | Res block, preserve spatial resolution. |
| | """ |
| |
|
| | def __init__(self, in_features, kernel_size, padding): |
| | super(ResBlock3d, self).__init__() |
| | self.conv1 = nn.Conv3d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size, |
| | padding=padding) |
| | self.conv2 = nn.Conv3d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size, |
| | padding=padding) |
| | self.norm1 = BatchNorm3d(in_features, affine=True) |
| | self.norm2 = BatchNorm3d(in_features, affine=True) |
| |
|
| | def forward(self, x): |
| | out = self.norm1(x) |
| | out = F.relu(out) |
| | out = self.conv1(out) |
| | out = self.norm2(out) |
| | out = F.relu(out) |
| | out = self.conv2(out) |
| | out += x |
| | return out |
| |
|
| |
|
| | class UpBlock2d(nn.Module): |
| | """ |
| | Upsampling block for use in decoder. |
| | """ |
| |
|
| | def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1): |
| | super(UpBlock2d, self).__init__() |
| |
|
| | self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, |
| | padding=padding, groups=groups) |
| | self.norm = BatchNorm2d(out_features, affine=True) |
| |
|
| | def forward(self, x): |
| | out = F.interpolate(x, scale_factor=2) |
| | out = self.conv(out) |
| | out = self.norm(out) |
| | out = F.relu(out) |
| | return out |
| |
|
| | class UpBlock3d(nn.Module): |
| | """ |
| | Upsampling block for use in decoder. |
| | """ |
| |
|
| | def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1): |
| | super(UpBlock3d, self).__init__() |
| |
|
| | self.conv = nn.Conv3d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, |
| | padding=padding, groups=groups) |
| | self.norm = BatchNorm3d(out_features, affine=True) |
| |
|
| | def forward(self, x): |
| | |
| | out = F.interpolate(x, scale_factor=(1, 2, 2)) |
| | out = self.conv(out) |
| | out = self.norm(out) |
| | out = F.relu(out) |
| | return out |
| |
|
| |
|
| | class DownBlock2d(nn.Module): |
| | """ |
| | Downsampling block for use in encoder. |
| | """ |
| |
|
| | def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1): |
| | super(DownBlock2d, self).__init__() |
| | self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, |
| | padding=padding, groups=groups) |
| | self.norm = BatchNorm2d(out_features, affine=True) |
| | self.pool = nn.AvgPool2d(kernel_size=(2, 2)) |
| |
|
| | def forward(self, x): |
| | out = self.conv(x) |
| | out = self.norm(out) |
| | out = F.relu(out) |
| | out = self.pool(out) |
| | return out |
| |
|
| |
|
| | class DownBlock3d(nn.Module): |
| | """ |
| | Downsampling block for use in encoder. |
| | """ |
| |
|
| | def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1): |
| | super(DownBlock3d, self).__init__() |
| | ''' |
| | self.conv = nn.Conv3d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, |
| | padding=padding, groups=groups, stride=(1, 2, 2)) |
| | ''' |
| | self.conv = nn.Conv3d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, |
| | padding=padding, groups=groups) |
| | self.norm = BatchNorm3d(out_features, affine=True) |
| | self.pool = nn.AvgPool3d(kernel_size=(1, 2, 2)) |
| |
|
| | def forward(self, x): |
| | out = self.conv(x) |
| | out = self.norm(out) |
| | out = F.relu(out) |
| | out = self.pool(out) |
| | return out |
| |
|
| |
|
| | class SameBlock2d(nn.Module): |
| | """ |
| | Simple block, preserve spatial resolution. |
| | """ |
| |
|
| | def __init__(self, in_features, out_features, groups=1, kernel_size=3, padding=1, lrelu=False): |
| | super(SameBlock2d, self).__init__() |
| | self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, |
| | kernel_size=kernel_size, padding=padding, groups=groups) |
| | self.norm = BatchNorm2d(out_features, affine=True) |
| | if lrelu: |
| | self.ac = nn.LeakyReLU() |
| | else: |
| | self.ac = nn.ReLU() |
| |
|
| | def forward(self, x): |
| | out = self.conv(x) |
| | out = self.norm(out) |
| | out = self.ac(out) |
| | return out |
| |
|
| |
|
| | class Encoder(nn.Module): |
| | """ |
| | Hourglass Encoder |
| | """ |
| |
|
| | def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256): |
| | super(Encoder, self).__init__() |
| |
|
| | down_blocks = [] |
| | for i in range(num_blocks): |
| | down_blocks.append(DownBlock3d(in_features if i == 0 else min(max_features, block_expansion * (2 ** i)), |
| | min(max_features, block_expansion * (2 ** (i + 1))), |
| | kernel_size=3, padding=1)) |
| | self.down_blocks = nn.ModuleList(down_blocks) |
| |
|
| | def forward(self, x): |
| | outs = [x] |
| | for down_block in self.down_blocks: |
| | outs.append(down_block(outs[-1])) |
| | return outs |
| |
|
| |
|
| | class Decoder(nn.Module): |
| | """ |
| | Hourglass Decoder |
| | """ |
| |
|
| | def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256): |
| | super(Decoder, self).__init__() |
| |
|
| | up_blocks = [] |
| |
|
| | for i in range(num_blocks)[::-1]: |
| | in_filters = (1 if i == num_blocks - 1 else 2) * min(max_features, block_expansion * (2 ** (i + 1))) |
| | out_filters = min(max_features, block_expansion * (2 ** i)) |
| | up_blocks.append(UpBlock3d(in_filters, out_filters, kernel_size=3, padding=1)) |
| |
|
| | self.up_blocks = nn.ModuleList(up_blocks) |
| | |
| | self.out_filters = block_expansion + in_features |
| |
|
| | self.conv = nn.Conv3d(in_channels=self.out_filters, out_channels=self.out_filters, kernel_size=3, padding=1) |
| | self.norm = BatchNorm3d(self.out_filters, affine=True) |
| |
|
| | def forward(self, x): |
| | out = x.pop() |
| | |
| | for up_block in self.up_blocks: |
| | out = up_block(out) |
| | skip = x.pop() |
| | out = torch.cat([out, skip], dim=1) |
| | |
| | out = self.conv(out) |
| | out = self.norm(out) |
| | out = F.relu(out) |
| | return out |
| |
|
| |
|
| | class Hourglass(nn.Module): |
| | """ |
| | Hourglass architecture. |
| | """ |
| |
|
| | def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256): |
| | super(Hourglass, self).__init__() |
| | self.encoder = Encoder(block_expansion, in_features, num_blocks, max_features) |
| | self.decoder = Decoder(block_expansion, in_features, num_blocks, max_features) |
| | self.out_filters = self.decoder.out_filters |
| |
|
| | def forward(self, x): |
| | return self.decoder(self.encoder(x)) |
| |
|
| |
|
| | class KPHourglass(nn.Module): |
| | """ |
| | Hourglass architecture. |
| | """ |
| |
|
| | def __init__(self, block_expansion, in_features, reshape_features, reshape_depth, num_blocks=3, max_features=256): |
| | super(KPHourglass, self).__init__() |
| | |
| | self.down_blocks = nn.Sequential() |
| | for i in range(num_blocks): |
| | self.down_blocks.add_module('down'+ str(i), DownBlock2d(in_features if i == 0 else min(max_features, block_expansion * (2 ** i)), |
| | min(max_features, block_expansion * (2 ** (i + 1))), |
| | kernel_size=3, padding=1)) |
| |
|
| | in_filters = min(max_features, block_expansion * (2 ** num_blocks)) |
| | self.conv = nn.Conv2d(in_channels=in_filters, out_channels=reshape_features, kernel_size=1) |
| |
|
| | self.up_blocks = nn.Sequential() |
| | for i in range(num_blocks): |
| | in_filters = min(max_features, block_expansion * (2 ** (num_blocks - i))) |
| | out_filters = min(max_features, block_expansion * (2 ** (num_blocks - i - 1))) |
| | self.up_blocks.add_module('up'+ str(i), UpBlock3d(in_filters, out_filters, kernel_size=3, padding=1)) |
| |
|
| | self.reshape_depth = reshape_depth |
| | self.out_filters = out_filters |
| |
|
| | def forward(self, x): |
| | out = self.down_blocks(x) |
| | out = self.conv(out) |
| | bs, c, h, w = out.shape |
| | out = out.view(bs, c//self.reshape_depth, self.reshape_depth, h, w) |
| | out = self.up_blocks(out) |
| |
|
| | return out |
| | |
| |
|
| |
|
| | class AntiAliasInterpolation2d(nn.Module): |
| | """ |
| | Band-limited downsampling, for better preservation of the input signal. |
| | """ |
| | def __init__(self, channels, scale): |
| | super(AntiAliasInterpolation2d, self).__init__() |
| | sigma = (1 / scale - 1) / 2 |
| | kernel_size = 2 * round(sigma * 4) + 1 |
| | self.ka = kernel_size // 2 |
| | self.kb = self.ka - 1 if kernel_size % 2 == 0 else self.ka |
| |
|
| | kernel_size = [kernel_size, kernel_size] |
| | sigma = [sigma, sigma] |
| | |
| | |
| | kernel = 1 |
| | meshgrids = torch.meshgrid( |
| | [ |
| | torch.arange(size, dtype=torch.float32) |
| | for size in kernel_size |
| | ] |
| | ) |
| | for size, std, mgrid in zip(kernel_size, sigma, meshgrids): |
| | mean = (size - 1) / 2 |
| | kernel *= torch.exp(-(mgrid - mean) ** 2 / (2 * std ** 2)) |
| |
|
| | |
| | kernel = kernel / torch.sum(kernel) |
| | |
| | kernel = kernel.view(1, 1, *kernel.size()) |
| | kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1)) |
| |
|
| | self.register_buffer('weight', kernel) |
| | self.groups = channels |
| | self.scale = scale |
| | inv_scale = 1 / scale |
| | self.int_inv_scale = int(inv_scale) |
| |
|
| | def forward(self, input): |
| | if self.scale == 1.0: |
| | return input |
| |
|
| | out = F.pad(input, (self.ka, self.kb, self.ka, self.kb)) |
| | out = F.conv2d(out, weight=self.weight, groups=self.groups) |
| | out = out[:, :, ::self.int_inv_scale, ::self.int_inv_scale] |
| |
|
| | return out |
| |
|
| |
|
| | class SPADE(nn.Module): |
| | def __init__(self, norm_nc, label_nc): |
| | super().__init__() |
| |
|
| | self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False) |
| | nhidden = 128 |
| |
|
| | self.mlp_shared = nn.Sequential( |
| | nn.Conv2d(label_nc, nhidden, kernel_size=3, padding=1), |
| | nn.ReLU()) |
| | self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=3, padding=1) |
| | self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=3, padding=1) |
| |
|
| | def forward(self, x, segmap): |
| | normalized = self.param_free_norm(x) |
| | segmap = F.interpolate(segmap, size=x.size()[2:], mode='nearest') |
| | actv = self.mlp_shared(segmap) |
| | gamma = self.mlp_gamma(actv) |
| | beta = self.mlp_beta(actv) |
| | out = normalized * (1 + gamma) + beta |
| | return out |
| | |
| |
|
| | class SPADEResnetBlock(nn.Module): |
| | def __init__(self, fin, fout, norm_G, label_nc, use_se=False, dilation=1): |
| | super().__init__() |
| | |
| | self.learned_shortcut = (fin != fout) |
| | fmiddle = min(fin, fout) |
| | self.use_se = use_se |
| | |
| | self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=dilation, dilation=dilation) |
| | self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=dilation, dilation=dilation) |
| | if self.learned_shortcut: |
| | self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False) |
| | |
| | if 'spectral' in norm_G: |
| | self.conv_0 = spectral_norm(self.conv_0) |
| | self.conv_1 = spectral_norm(self.conv_1) |
| | if self.learned_shortcut: |
| | self.conv_s = spectral_norm(self.conv_s) |
| | |
| | self.norm_0 = SPADE(fin, label_nc) |
| | self.norm_1 = SPADE(fmiddle, label_nc) |
| | if self.learned_shortcut: |
| | self.norm_s = SPADE(fin, label_nc) |
| |
|
| | def forward(self, x, seg1): |
| | x_s = self.shortcut(x, seg1) |
| | dx = self.conv_0(self.actvn(self.norm_0(x, seg1))) |
| | dx = self.conv_1(self.actvn(self.norm_1(dx, seg1))) |
| | out = x_s + dx |
| | return out |
| |
|
| | def shortcut(self, x, seg1): |
| | if self.learned_shortcut: |
| | x_s = self.conv_s(self.norm_s(x, seg1)) |
| | else: |
| | x_s = x |
| | return x_s |
| |
|
| | def actvn(self, x): |
| | return F.leaky_relu(x, 2e-1) |
| |
|
| | class audio2image(nn.Module): |
| | def __init__(self, generator, kp_extractor, he_estimator_video, he_estimator_audio, train_params): |
| | super().__init__() |
| | |
| | self.generator = generator |
| | self.kp_extractor = kp_extractor |
| | self.he_estimator_video = he_estimator_video |
| | self.he_estimator_audio = he_estimator_audio |
| | self.train_params = train_params |
| |
|
| | def headpose_pred_to_degree(self, pred): |
| | device = pred.device |
| | idx_tensor = [idx for idx in range(66)] |
| | idx_tensor = torch.FloatTensor(idx_tensor).to(device) |
| | pred = F.softmax(pred) |
| | degree = torch.sum(pred*idx_tensor, 1) * 3 - 99 |
| |
|
| | return degree |
| | |
| | def get_rotation_matrix(self, yaw, pitch, roll): |
| | yaw = yaw / 180 * 3.14 |
| | pitch = pitch / 180 * 3.14 |
| | roll = roll / 180 * 3.14 |
| |
|
| | roll = roll.unsqueeze(1) |
| | pitch = pitch.unsqueeze(1) |
| | yaw = yaw.unsqueeze(1) |
| |
|
| | roll_mat = torch.cat([torch.ones_like(roll), torch.zeros_like(roll), torch.zeros_like(roll), |
| | torch.zeros_like(roll), torch.cos(roll), -torch.sin(roll), |
| | torch.zeros_like(roll), torch.sin(roll), torch.cos(roll)], dim=1) |
| | roll_mat = roll_mat.view(roll_mat.shape[0], 3, 3) |
| |
|
| | pitch_mat = torch.cat([torch.cos(pitch), torch.zeros_like(pitch), torch.sin(pitch), |
| | torch.zeros_like(pitch), torch.ones_like(pitch), torch.zeros_like(pitch), |
| | -torch.sin(pitch), torch.zeros_like(pitch), torch.cos(pitch)], dim=1) |
| | pitch_mat = pitch_mat.view(pitch_mat.shape[0], 3, 3) |
| |
|
| | yaw_mat = torch.cat([torch.cos(yaw), -torch.sin(yaw), torch.zeros_like(yaw), |
| | torch.sin(yaw), torch.cos(yaw), torch.zeros_like(yaw), |
| | torch.zeros_like(yaw), torch.zeros_like(yaw), torch.ones_like(yaw)], dim=1) |
| | yaw_mat = yaw_mat.view(yaw_mat.shape[0], 3, 3) |
| |
|
| | rot_mat = torch.einsum('bij,bjk,bkm->bim', roll_mat, pitch_mat, yaw_mat) |
| |
|
| | return rot_mat |
| |
|
| | def keypoint_transformation(self, kp_canonical, he): |
| | kp = kp_canonical['value'] |
| | yaw, pitch, roll = he['yaw'], he['pitch'], he['roll'] |
| | t, exp = he['t'], he['exp'] |
| | |
| | yaw = self.headpose_pred_to_degree(yaw) |
| | pitch = self.headpose_pred_to_degree(pitch) |
| | roll = self.headpose_pred_to_degree(roll) |
| |
|
| | rot_mat = self.get_rotation_matrix(yaw, pitch, roll) |
| | |
| | |
| | kp_rotated = torch.einsum('bmp,bkp->bkm', rot_mat, kp) |
| |
|
| | |
| |
|
| | |
| | t = t.unsqueeze_(1).repeat(1, kp.shape[1], 1) |
| | kp_t = kp_rotated + t |
| |
|
| | |
| | exp = exp.view(exp.shape[0], -1, 3) |
| | kp_transformed = kp_t + exp |
| |
|
| | return {'value': kp_transformed} |
| |
|
| | def forward(self, source_image, target_audio): |
| | pose_source = self.he_estimator_video(source_image) |
| | pose_generated = self.he_estimator_audio(target_audio) |
| | kp_canonical = self.kp_extractor(source_image) |
| | kp_source = self.keypoint_transformation(kp_canonical, pose_source) |
| | kp_transformed_generated = self.keypoint_transformation(kp_canonical, pose_generated) |
| | generated = self.generator(source_image, kp_source=kp_source, kp_driving=kp_transformed_generated) |
| | return generated |