| |
| |
|
|
| |
| |
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| from torch.nn import Conv1d, Conv2d, ConvTranspose1d |
| from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm |
|
|
| import indextts.BigVGAN.activations as activations |
|
|
| from indextts.BigVGAN.ECAPA_TDNN import ECAPA_TDNN |
| from indextts.BigVGAN.utils import get_padding, init_weights |
|
|
| LRELU_SLOPE = 0.1 |
|
|
|
|
| class AMPBlock1(torch.nn.Module): |
| def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5), activation=None): |
| super(AMPBlock1, self).__init__() |
| self.h = h |
|
|
| self.convs1 = nn.ModuleList([ |
| weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], |
| padding=get_padding(kernel_size, dilation[0]))), |
| weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], |
| padding=get_padding(kernel_size, dilation[1]))), |
| weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], |
| padding=get_padding(kernel_size, dilation[2]))) |
| ]) |
| self.convs1.apply(init_weights) |
|
|
| self.convs2 = nn.ModuleList([ |
| weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, |
| padding=get_padding(kernel_size, 1))), |
| weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, |
| padding=get_padding(kernel_size, 1))), |
| weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, |
| padding=get_padding(kernel_size, 1))) |
| ]) |
| self.convs2.apply(init_weights) |
|
|
| self.num_layers = len(self.convs1) + len(self.convs2) |
| if self.h.get("use_cuda_kernel", False): |
| from indextts.BigVGAN.alias_free_activation.cuda.activation1d import Activation1d |
| else: |
| from indextts.BigVGAN.alias_free_torch import Activation1d |
| if activation == 'snake': |
| self.activations = nn.ModuleList([ |
| Activation1d( |
| activation=activations.Snake(channels, alpha_logscale=h.snake_logscale)) |
| for _ in range(self.num_layers) |
| ]) |
| elif activation == 'snakebeta': |
| self.activations = nn.ModuleList([ |
| Activation1d( |
| activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale)) |
| for _ in range(self.num_layers) |
| ]) |
| else: |
| raise NotImplementedError("activation incorrectly specified. check the config file and look for 'activation'.") |
|
|
| def forward(self, x): |
| acts1, acts2 = self.activations[::2], self.activations[1::2] |
| for c1, c2, a1, a2 in zip(self.convs1, self.convs2, acts1, acts2): |
| xt = a1(x) |
| xt = c1(xt) |
| xt = a2(xt) |
| xt = c2(xt) |
| x = xt + x |
|
|
| return x |
|
|
| def remove_weight_norm(self): |
| for l in self.convs1: |
| remove_weight_norm(l) |
| for l in self.convs2: |
| remove_weight_norm(l) |
|
|
|
|
| class AMPBlock2(torch.nn.Module): |
| def __init__(self, h, channels, kernel_size=3, dilation=(1, 3), activation=None): |
| super(AMPBlock2, self).__init__() |
| self.h = h |
|
|
| self.convs = nn.ModuleList([ |
| weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], |
| padding=get_padding(kernel_size, dilation[0]))), |
| weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], |
| padding=get_padding(kernel_size, dilation[1]))) |
| ]) |
| self.convs.apply(init_weights) |
|
|
| self.num_layers = len(self.convs) |
| if self.h.get("use_cuda_kernel", False): |
| from indextts.BigVGAN.alias_free_activation.cuda.activation1d import Activation1d |
| else: |
| from indextts.BigVGAN.alias_free_torch import Activation1d |
|
|
| if activation == 'snake': |
| self.activations = nn.ModuleList([ |
| Activation1d( |
| activation=activations.Snake(channels, alpha_logscale=h.snake_logscale)) |
| for _ in range(self.num_layers) |
| ]) |
| elif activation == 'snakebeta': |
| self.activations = nn.ModuleList([ |
| Activation1d( |
| activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale)) |
| for _ in range(self.num_layers) |
| ]) |
| else: |
| raise NotImplementedError("activation incorrectly specified. check the config file and look for 'activation'.") |
|
|
| def forward(self, x): |
| for c, a in zip(self.convs, self.activations): |
| xt = a(x) |
| xt = c(xt) |
| x = xt + x |
|
|
| return x |
|
|
| def remove_weight_norm(self): |
| for l in self.convs: |
| remove_weight_norm(l) |
|
|
|
|
| class BigVGAN(torch.nn.Module): |
| |
| def __init__(self, h, use_cuda_kernel=False): |
| """ |
| Args: |
| h (dict) |
| use_cuda_kernel (bool): whether to use custom cuda kernel for anti-aliased activation |
| """ |
| super(BigVGAN, self).__init__() |
| self.h = h |
| self.h["use_cuda_kernel"] = use_cuda_kernel |
|
|
| self.num_kernels = len(h.resblock_kernel_sizes) |
| self.num_upsamples = len(h.upsample_rates) |
|
|
| self.feat_upsample = h.feat_upsample |
| self.cond_in_each_up_layer = h.cond_d_vector_in_each_upsampling_layer |
|
|
| |
| self.conv_pre = weight_norm(Conv1d(h.gpt_dim, h.upsample_initial_channel, 7, 1, padding=3)) |
|
|
| |
| resblock = AMPBlock1 if h.resblock == "1" else AMPBlock2 |
|
|
| |
| self.ups = nn.ModuleList() |
| for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)): |
| self.ups.append(nn.ModuleList([ |
| weight_norm(ConvTranspose1d(h.upsample_initial_channel // (2 ** i), |
| h.upsample_initial_channel // (2 ** (i + 1)), |
| k, u, padding=(k - u) // 2)) |
| ])) |
|
|
| |
| self.resblocks = nn.ModuleList() |
| for i in range(len(self.ups)): |
| ch = h.upsample_initial_channel // (2 ** (i + 1)) |
| for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)): |
| self.resblocks.append(resblock(self.h, ch, k, d, activation=h.activation)) |
| if use_cuda_kernel: |
| from indextts.BigVGAN.alias_free_activation.cuda.activation1d import Activation1d |
| else: |
| from indextts.BigVGAN.alias_free_torch import Activation1d |
|
|
| |
| if h.activation == "snake": |
| activation_post = activations.Snake(ch, alpha_logscale=h.snake_logscale) |
| self.activation_post = Activation1d(activation=activation_post) |
| elif h.activation == "snakebeta": |
| activation_post = activations.SnakeBeta(ch, alpha_logscale=h.snake_logscale) |
| self.activation_post = Activation1d(activation=activation_post) |
| else: |
| raise NotImplementedError("activation incorrectly specified. check the config file and look for 'activation'.") |
|
|
| self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3)) |
|
|
| |
| for i in range(len(self.ups)): |
| self.ups[i].apply(init_weights) |
| self.conv_post.apply(init_weights) |
|
|
| self.speaker_encoder = ECAPA_TDNN(h.num_mels, lin_neurons=h.speaker_embedding_dim) |
| self.cond_layer = nn.Conv1d(h.speaker_embedding_dim, h.upsample_initial_channel, 1) |
| if self.cond_in_each_up_layer: |
| self.conds = nn.ModuleList() |
| for i in range(len(self.ups)): |
| ch = h.upsample_initial_channel // (2 ** (i + 1)) |
| self.conds.append(nn.Conv1d(h.speaker_embedding_dim, ch, 1)) |
|
|
| |
|
|
| def forward(self, x, mel_ref, lens=None): |
| speaker_embedding = self.speaker_encoder(mel_ref, lens) |
| n_batch = x.size(0) |
| contrastive_loss = None |
| if n_batch * 2 == speaker_embedding.size(0): |
| spe_emb_chunk1, spe_emb_chunk2 = speaker_embedding[:n_batch, :, :], speaker_embedding[n_batch:, :, :] |
| contrastive_loss = self.cal_clip_loss(spe_emb_chunk1.squeeze(1), spe_emb_chunk2.squeeze(1), self.logit_scale.exp()) |
|
|
| speaker_embedding = speaker_embedding[:n_batch, :, :] |
| speaker_embedding = speaker_embedding.transpose(1, 2) |
|
|
| |
| if self.feat_upsample: |
| x = torch.nn.functional.interpolate( |
| x.transpose(1, 2), |
| scale_factor=[4], |
| mode="linear", |
| ).squeeze(1) |
| else: |
| x = x.transpose(1, 2) |
|
|
| |
| |
| x = self.conv_pre(x) |
|
|
| x = x + self.cond_layer(speaker_embedding) |
|
|
| for i in range(self.num_upsamples): |
| |
| for i_up in range(len(self.ups[i])): |
| x = self.ups[i][i_up](x) |
|
|
| if self.cond_in_each_up_layer: |
| x = x + self.conds[i](speaker_embedding) |
|
|
| |
| xs = None |
| for j in range(self.num_kernels): |
| if xs is None: |
| xs = self.resblocks[i * self.num_kernels + j](x) |
| else: |
| xs += self.resblocks[i * self.num_kernels + j](x) |
| x = xs / self.num_kernels |
|
|
| |
| x = self.activation_post(x) |
| x = self.conv_post(x) |
| x = torch.tanh(x) |
|
|
| return x, contrastive_loss |
|
|
| def remove_weight_norm(self): |
| print('Removing weight norm...') |
| for l in self.ups: |
| for l_i in l: |
| remove_weight_norm(l_i) |
| for l in self.resblocks: |
| l.remove_weight_norm() |
| remove_weight_norm(self.conv_pre) |
| remove_weight_norm(self.conv_post) |
|
|
| def cal_clip_loss(self, image_features, text_features, logit_scale): |
| device = image_features.device |
| logits_per_image, logits_per_text = self.get_logits(image_features, text_features, logit_scale) |
| labels = torch.arange(logits_per_image.shape[0], device=device, dtype=torch.long) |
| total_loss = ( |
| F.cross_entropy(logits_per_image, labels) + |
| F.cross_entropy(logits_per_text, labels) |
| ) / 2 |
| return total_loss |
|
|
| def get_logits(self, image_features, text_features, logit_scale): |
| logits_per_image = logit_scale * image_features @ text_features.T |
| logits_per_text = logit_scale * text_features @ image_features.T |
| return logits_per_image, logits_per_text |
|
|
|
|
| class DiscriminatorP(torch.nn.Module): |
| def __init__(self, h, period, kernel_size=5, stride=3, use_spectral_norm=False): |
| super(DiscriminatorP, self).__init__() |
| self.period = period |
| self.d_mult = h.discriminator_channel_mult |
| norm_f = weight_norm if use_spectral_norm == False else spectral_norm |
| self.convs = nn.ModuleList([ |
| norm_f(Conv2d(1, int(32 * self.d_mult), (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), |
| norm_f(Conv2d(int(32 * self.d_mult), int(128 * self.d_mult), (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), |
| norm_f(Conv2d(int(128 * self.d_mult), int(512 * self.d_mult), (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), |
| norm_f(Conv2d(int(512 * self.d_mult), int(1024 * self.d_mult), (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), |
| norm_f(Conv2d(int(1024 * self.d_mult), int(1024 * self.d_mult), (kernel_size, 1), 1, padding=(2, 0))), |
| ]) |
| self.conv_post = norm_f(Conv2d(int(1024 * self.d_mult), 1, (3, 1), 1, padding=(1, 0))) |
|
|
| def forward(self, x): |
| fmap = [] |
|
|
| |
| b, c, t = x.shape |
| if t % self.period != 0: |
| n_pad = self.period - (t % self.period) |
| x = F.pad(x, (0, n_pad), "reflect") |
| t = t + n_pad |
| x = x.view(b, c, t // self.period, self.period) |
|
|
| for l in self.convs: |
| x = l(x) |
| x = F.leaky_relu(x, LRELU_SLOPE) |
| fmap.append(x) |
| x = self.conv_post(x) |
| fmap.append(x) |
| x = torch.flatten(x, 1, -1) |
|
|
| return x, fmap |
|
|
|
|
| class MultiPeriodDiscriminator(torch.nn.Module): |
| def __init__(self, h): |
| super(MultiPeriodDiscriminator, self).__init__() |
| self.mpd_reshapes = h.mpd_reshapes |
| print("mpd_reshapes: {}".format(self.mpd_reshapes)) |
| discriminators = [DiscriminatorP(h, rs, use_spectral_norm=h.use_spectral_norm) for rs in self.mpd_reshapes] |
| self.discriminators = nn.ModuleList(discriminators) |
|
|
| def forward(self, y, y_hat): |
| y_d_rs = [] |
| y_d_gs = [] |
| fmap_rs = [] |
| fmap_gs = [] |
| for i, d in enumerate(self.discriminators): |
| y_d_r, fmap_r = d(y) |
| y_d_g, fmap_g = d(y_hat) |
| y_d_rs.append(y_d_r) |
| fmap_rs.append(fmap_r) |
| y_d_gs.append(y_d_g) |
| fmap_gs.append(fmap_g) |
|
|
| return y_d_rs, y_d_gs, fmap_rs, fmap_gs |
|
|
|
|
| class DiscriminatorR(nn.Module): |
| def __init__(self, cfg, resolution): |
| super().__init__() |
|
|
| self.resolution = resolution |
| assert len(self.resolution) == 3, \ |
| "MRD layer requires list with len=3, got {}".format(self.resolution) |
| self.lrelu_slope = LRELU_SLOPE |
|
|
| norm_f = weight_norm if cfg.use_spectral_norm == False else spectral_norm |
| if hasattr(cfg, "mrd_use_spectral_norm"): |
| print("INFO: overriding MRD use_spectral_norm as {}".format(cfg.mrd_use_spectral_norm)) |
| norm_f = weight_norm if cfg.mrd_use_spectral_norm == False else spectral_norm |
| self.d_mult = cfg.discriminator_channel_mult |
| if hasattr(cfg, "mrd_channel_mult"): |
| print("INFO: overriding mrd channel multiplier as {}".format(cfg.mrd_channel_mult)) |
| self.d_mult = cfg.mrd_channel_mult |
|
|
| self.convs = nn.ModuleList([ |
| norm_f(nn.Conv2d(1, int(32 * self.d_mult), (3, 9), padding=(1, 4))), |
| norm_f(nn.Conv2d(int(32 * self.d_mult), int(32 * self.d_mult), (3, 9), stride=(1, 2), padding=(1, 4))), |
| norm_f(nn.Conv2d(int(32 * self.d_mult), int(32 * self.d_mult), (3, 9), stride=(1, 2), padding=(1, 4))), |
| norm_f(nn.Conv2d(int(32 * self.d_mult), int(32 * self.d_mult), (3, 9), stride=(1, 2), padding=(1, 4))), |
| norm_f(nn.Conv2d(int(32 * self.d_mult), int(32 * self.d_mult), (3, 3), padding=(1, 1))), |
| ]) |
| self.conv_post = norm_f(nn.Conv2d(int(32 * self.d_mult), 1, (3, 3), padding=(1, 1))) |
|
|
| def forward(self, x): |
| fmap = [] |
|
|
| x = self.spectrogram(x) |
| x = x.unsqueeze(1) |
| for l in self.convs: |
| x = l(x) |
| x = F.leaky_relu(x, self.lrelu_slope) |
| fmap.append(x) |
| x = self.conv_post(x) |
| fmap.append(x) |
| x = torch.flatten(x, 1, -1) |
|
|
| return x, fmap |
|
|
| def spectrogram(self, x): |
| n_fft, hop_length, win_length = self.resolution |
| x = F.pad(x, (int((n_fft - hop_length) / 2), int((n_fft - hop_length) / 2)), mode='reflect') |
| x = x.squeeze(1) |
| x = torch.stft(x, n_fft=n_fft, hop_length=hop_length, win_length=win_length, center=False, return_complex=True) |
| x = torch.view_as_real(x) |
| mag = torch.norm(x, p=2, dim=-1) |
|
|
| return mag |
|
|
|
|
| class MultiResolutionDiscriminator(nn.Module): |
| def __init__(self, cfg, debug=False): |
| super().__init__() |
| self.resolutions = cfg.resolutions |
| assert len(self.resolutions) == 3, \ |
| "MRD requires list of list with len=3, each element having a list with len=3. got {}".\ |
| format(self.resolutions) |
| self.discriminators = nn.ModuleList( |
| [DiscriminatorR(cfg, resolution) for resolution in self.resolutions] |
| ) |
|
|
| def forward(self, y, y_hat): |
| y_d_rs = [] |
| y_d_gs = [] |
| fmap_rs = [] |
| fmap_gs = [] |
|
|
| for i, d in enumerate(self.discriminators): |
| y_d_r, fmap_r = d(x=y) |
| y_d_g, fmap_g = d(x=y_hat) |
| y_d_rs.append(y_d_r) |
| fmap_rs.append(fmap_r) |
| y_d_gs.append(y_d_g) |
| fmap_gs.append(fmap_g) |
|
|
| return y_d_rs, y_d_gs, fmap_rs, fmap_gs |
|
|
|
|
| def feature_loss(fmap_r, fmap_g): |
| loss = 0 |
| for dr, dg in zip(fmap_r, fmap_g): |
| for rl, gl in zip(dr, dg): |
| loss += torch.mean(torch.abs(rl - gl)) |
|
|
| return loss * 2 |
|
|
|
|
| def discriminator_loss(disc_real_outputs, disc_generated_outputs): |
| loss = 0 |
| r_losses = [] |
| g_losses = [] |
| for dr, dg in zip(disc_real_outputs, disc_generated_outputs): |
| r_loss = torch.mean((1 - dr)**2) |
| g_loss = torch.mean(dg**2) |
| loss += (r_loss + g_loss) |
| r_losses.append(r_loss.item()) |
| g_losses.append(g_loss.item()) |
|
|
| return loss, r_losses, g_losses |
|
|
|
|
| def generator_loss(disc_outputs): |
| loss = 0 |
| gen_losses = [] |
| for dg in disc_outputs: |
| l = torch.mean((1 - dg)**2) |
| gen_losses.append(l) |
| loss += l |
|
|
| return loss, gen_losses |
|
|