| import torch
|
| import torch.nn.functional as F
|
| import torch.nn as nn
|
| from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
|
| from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
|
|
|
| from modules.parallel_wavegan.layers import UpsampleNetwork, ConvInUpsampleNetwork
|
| from modules.parallel_wavegan.models.source import SourceModuleHnNSF
|
| import numpy as np
|
|
|
| LRELU_SLOPE = 0.1
|
|
|
|
|
| def init_weights(m, mean=0.0, std=0.01):
|
| classname = m.__class__.__name__
|
| if classname.find("Conv") != -1:
|
| m.weight.data.normal_(mean, std)
|
|
|
|
|
| def apply_weight_norm(m):
|
| classname = m.__class__.__name__
|
| if classname.find("Conv") != -1:
|
| weight_norm(m)
|
|
|
|
|
| def get_padding(kernel_size, dilation=1):
|
| return int((kernel_size * dilation - dilation) / 2)
|
|
|
|
|
| class ResBlock1(torch.nn.Module):
|
| def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)):
|
| super(ResBlock1, self).__init__()
|
| self.h = h
|
| self.convs1 = nn.ModuleList([
|
| weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
|
| padding=get_padding(kernel_size, dilation[0]))),
|
| weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
|
| padding=get_padding(kernel_size, dilation[1]))),
|
| weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
|
| padding=get_padding(kernel_size, dilation[2])))
|
| ])
|
| self.convs1.apply(init_weights)
|
|
|
| self.convs2 = nn.ModuleList([
|
| weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
| padding=get_padding(kernel_size, 1))),
|
| weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
| padding=get_padding(kernel_size, 1))),
|
| weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
| padding=get_padding(kernel_size, 1)))
|
| ])
|
| self.convs2.apply(init_weights)
|
|
|
| def forward(self, x):
|
| for c1, c2 in zip(self.convs1, self.convs2):
|
| xt = F.leaky_relu(x, LRELU_SLOPE)
|
| xt = c1(xt)
|
| xt = F.leaky_relu(xt, LRELU_SLOPE)
|
| xt = c2(xt)
|
| x = xt + x
|
| return x
|
|
|
| def remove_weight_norm(self):
|
| for l in self.convs1:
|
| remove_weight_norm(l)
|
| for l in self.convs2:
|
| remove_weight_norm(l)
|
|
|
|
|
| class ResBlock2(torch.nn.Module):
|
| def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)):
|
| super(ResBlock2, self).__init__()
|
| self.h = h
|
| self.convs = nn.ModuleList([
|
| weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
|
| padding=get_padding(kernel_size, dilation[0]))),
|
| weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
|
| padding=get_padding(kernel_size, dilation[1])))
|
| ])
|
| self.convs.apply(init_weights)
|
|
|
| def forward(self, x):
|
| for c in self.convs:
|
| xt = F.leaky_relu(x, LRELU_SLOPE)
|
| xt = c(xt)
|
| x = xt + x
|
| return x
|
|
|
| def remove_weight_norm(self):
|
| for l in self.convs:
|
| remove_weight_norm(l)
|
|
|
|
|
| class Conv1d1x1(Conv1d):
|
| """1x1 Conv1d with customized initialization."""
|
|
|
| def __init__(self, in_channels, out_channels, bias):
|
| """Initialize 1x1 Conv1d module."""
|
| super(Conv1d1x1, self).__init__(in_channels, out_channels,
|
| kernel_size=1, padding=0,
|
| dilation=1, bias=bias)
|
|
|
|
|
| class HifiGanGenerator(torch.nn.Module):
|
| def __init__(self, h, c_out=1):
|
| super(HifiGanGenerator, self).__init__()
|
| self.h = h
|
| self.num_kernels = len(h['resblock_kernel_sizes'])
|
| self.num_upsamples = len(h['upsample_rates'])
|
|
|
| if h['use_pitch_embed']:
|
| self.harmonic_num = 8
|
| self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(h['upsample_rates']))
|
| self.m_source = SourceModuleHnNSF(
|
| sampling_rate=h['audio_sample_rate'],
|
| harmonic_num=self.harmonic_num)
|
| self.noise_convs = nn.ModuleList()
|
| self.conv_pre = weight_norm(Conv1d(80, h['upsample_initial_channel'], 7, 1, padding=3))
|
| resblock = ResBlock1 if h['resblock'] == '1' else ResBlock2
|
|
|
| self.ups = nn.ModuleList()
|
| for i, (u, k) in enumerate(zip(h['upsample_rates'], h['upsample_kernel_sizes'])):
|
| c_cur = h['upsample_initial_channel'] // (2 ** (i + 1))
|
| self.ups.append(weight_norm(
|
| ConvTranspose1d(c_cur * 2, c_cur, k, u, padding=(k - u) // 2)))
|
| if h['use_pitch_embed']:
|
| if i + 1 < len(h['upsample_rates']):
|
| stride_f0 = np.prod(h['upsample_rates'][i + 1:])
|
| self.noise_convs.append(Conv1d(
|
| 1, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=stride_f0 // 2))
|
| else:
|
| self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
|
|
|
| self.resblocks = nn.ModuleList()
|
| for i in range(len(self.ups)):
|
| ch = h['upsample_initial_channel'] // (2 ** (i + 1))
|
| for j, (k, d) in enumerate(zip(h['resblock_kernel_sizes'], h['resblock_dilation_sizes'])):
|
| self.resblocks.append(resblock(h, ch, k, d))
|
|
|
| self.conv_post = weight_norm(Conv1d(ch, c_out, 7, 1, padding=3))
|
| self.ups.apply(init_weights)
|
| self.conv_post.apply(init_weights)
|
|
|
| def forward(self, x, f0=None):
|
| if f0 is not None:
|
|
|
| f0 = self.f0_upsamp(f0[:, None]).transpose(1, 2)
|
| har_source, noi_source, uv = self.m_source(f0)
|
| har_source = har_source.transpose(1, 2)
|
|
|
| x = self.conv_pre(x)
|
| for i in range(self.num_upsamples):
|
| x = F.leaky_relu(x, LRELU_SLOPE)
|
| x = self.ups[i](x)
|
| if f0 is not None:
|
| x_source = self.noise_convs[i](har_source)
|
| x = x + x_source
|
| xs = None
|
| for j in range(self.num_kernels):
|
| if xs is None:
|
| xs = self.resblocks[i * self.num_kernels + j](x)
|
| else:
|
| xs += self.resblocks[i * self.num_kernels + j](x)
|
| x = xs / self.num_kernels
|
| x = F.leaky_relu(x)
|
| x = self.conv_post(x)
|
| x = torch.tanh(x)
|
|
|
| return x
|
|
|
| def remove_weight_norm(self):
|
| print('Removing weight norm...')
|
| for l in self.ups:
|
| remove_weight_norm(l)
|
| for l in self.resblocks:
|
| l.remove_weight_norm()
|
| remove_weight_norm(self.conv_pre)
|
| remove_weight_norm(self.conv_post)
|
|
|
|
|
| class DiscriminatorP(torch.nn.Module):
|
| def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False, use_cond=False, c_in=1):
|
| super(DiscriminatorP, self).__init__()
|
| self.use_cond = use_cond
|
| if use_cond:
|
| from utils.hparams import hparams
|
| t = hparams['hop_size']
|
| self.cond_net = torch.nn.ConvTranspose1d(80, 1, t * 2, stride=t, padding=t // 2)
|
| c_in = 2
|
|
|
| self.period = period
|
| norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
| self.convs = nn.ModuleList([
|
| norm_f(Conv2d(c_in, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
|
| norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
|
| norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
|
| norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
|
| norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))),
|
| ])
|
| self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
|
|
|
| def forward(self, x, mel):
|
| fmap = []
|
| if self.use_cond:
|
| x_mel = self.cond_net(mel)
|
| x = torch.cat([x_mel, x], 1)
|
|
|
| b, c, t = x.shape
|
| if t % self.period != 0:
|
| n_pad = self.period - (t % self.period)
|
| x = F.pad(x, (0, n_pad), "reflect")
|
| t = t + n_pad
|
| x = x.view(b, c, t // self.period, self.period)
|
|
|
| for l in self.convs:
|
| x = l(x)
|
| x = F.leaky_relu(x, LRELU_SLOPE)
|
| fmap.append(x)
|
| x = self.conv_post(x)
|
| fmap.append(x)
|
| x = torch.flatten(x, 1, -1)
|
|
|
| return x, fmap
|
|
|
|
|
| class MultiPeriodDiscriminator(torch.nn.Module):
|
| def __init__(self, use_cond=False, c_in=1):
|
| super(MultiPeriodDiscriminator, self).__init__()
|
| self.discriminators = nn.ModuleList([
|
| DiscriminatorP(2, use_cond=use_cond, c_in=c_in),
|
| DiscriminatorP(3, use_cond=use_cond, c_in=c_in),
|
| DiscriminatorP(5, use_cond=use_cond, c_in=c_in),
|
| DiscriminatorP(7, use_cond=use_cond, c_in=c_in),
|
| DiscriminatorP(11, use_cond=use_cond, c_in=c_in),
|
| ])
|
|
|
| def forward(self, y, y_hat, mel=None):
|
| y_d_rs = []
|
| y_d_gs = []
|
| fmap_rs = []
|
| fmap_gs = []
|
| for i, d in enumerate(self.discriminators):
|
| y_d_r, fmap_r = d(y, mel)
|
| y_d_g, fmap_g = d(y_hat, mel)
|
| y_d_rs.append(y_d_r)
|
| fmap_rs.append(fmap_r)
|
| y_d_gs.append(y_d_g)
|
| fmap_gs.append(fmap_g)
|
|
|
| return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
|
|
|
|
| class DiscriminatorS(torch.nn.Module):
|
| def __init__(self, use_spectral_norm=False, use_cond=False, upsample_rates=None, c_in=1):
|
| super(DiscriminatorS, self).__init__()
|
| self.use_cond = use_cond
|
| if use_cond:
|
| t = np.prod(upsample_rates)
|
| self.cond_net = torch.nn.ConvTranspose1d(80, 1, t * 2, stride=t, padding=t // 2)
|
| c_in = 2
|
| norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
| self.convs = nn.ModuleList([
|
| norm_f(Conv1d(c_in, 128, 15, 1, padding=7)),
|
| norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)),
|
| norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)),
|
| norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)),
|
| norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)),
|
| norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)),
|
| norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
|
| ])
|
| self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
|
|
|
| def forward(self, x, mel):
|
| if self.use_cond:
|
| x_mel = self.cond_net(mel)
|
| x = torch.cat([x_mel, x], 1)
|
| fmap = []
|
| for l in self.convs:
|
| x = l(x)
|
| x = F.leaky_relu(x, LRELU_SLOPE)
|
| fmap.append(x)
|
| x = self.conv_post(x)
|
| fmap.append(x)
|
| x = torch.flatten(x, 1, -1)
|
|
|
| return x, fmap
|
|
|
|
|
| class MultiScaleDiscriminator(torch.nn.Module):
|
| def __init__(self, use_cond=False, c_in=1):
|
| super(MultiScaleDiscriminator, self).__init__()
|
| from utils.hparams import hparams
|
| self.discriminators = nn.ModuleList([
|
| DiscriminatorS(use_spectral_norm=True, use_cond=use_cond,
|
| upsample_rates=[4, 4, hparams['hop_size'] // 16],
|
| c_in=c_in),
|
| DiscriminatorS(use_cond=use_cond,
|
| upsample_rates=[4, 4, hparams['hop_size'] // 32],
|
| c_in=c_in),
|
| DiscriminatorS(use_cond=use_cond,
|
| upsample_rates=[4, 4, hparams['hop_size'] // 64],
|
| c_in=c_in),
|
| ])
|
| self.meanpools = nn.ModuleList([
|
| AvgPool1d(4, 2, padding=1),
|
| AvgPool1d(4, 2, padding=1)
|
| ])
|
|
|
| def forward(self, y, y_hat, mel=None):
|
| y_d_rs = []
|
| y_d_gs = []
|
| fmap_rs = []
|
| fmap_gs = []
|
| for i, d in enumerate(self.discriminators):
|
| if i != 0:
|
| y = self.meanpools[i - 1](y)
|
| y_hat = self.meanpools[i - 1](y_hat)
|
| y_d_r, fmap_r = d(y, mel)
|
| y_d_g, fmap_g = d(y_hat, mel)
|
| y_d_rs.append(y_d_r)
|
| fmap_rs.append(fmap_r)
|
| y_d_gs.append(y_d_g)
|
| fmap_gs.append(fmap_g)
|
|
|
| return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
|
|
|
|
| def feature_loss(fmap_r, fmap_g):
|
| loss = 0
|
| for dr, dg in zip(fmap_r, fmap_g):
|
| for rl, gl in zip(dr, dg):
|
| loss += torch.mean(torch.abs(rl - gl))
|
|
|
| return loss * 2
|
|
|
|
|
| def discriminator_loss(disc_real_outputs, disc_generated_outputs):
|
| r_losses = 0
|
| g_losses = 0
|
| for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
|
| r_loss = torch.mean((1 - dr) ** 2)
|
| g_loss = torch.mean(dg ** 2)
|
| r_losses += r_loss
|
| g_losses += g_loss
|
| r_losses = r_losses / len(disc_real_outputs)
|
| g_losses = g_losses / len(disc_real_outputs)
|
| return r_losses, g_losses
|
|
|
|
|
| def cond_discriminator_loss(outputs):
|
| loss = 0
|
| for dg in outputs:
|
| g_loss = torch.mean(dg ** 2)
|
| loss += g_loss
|
| loss = loss / len(outputs)
|
| return loss
|
|
|
|
|
| def generator_loss(disc_outputs):
|
| loss = 0
|
| for dg in disc_outputs:
|
| l = torch.mean((1 - dg) ** 2)
|
| loss += l
|
| loss = loss / len(disc_outputs)
|
| return loss
|
|
|