testSpeech / models_mel_style.py
CongBang's picture
Upload folder using huggingface_hub
e3c2b9c verified
import copy
import math
import torch
from torch import nn
from torch.nn import functional as F
import os
import yaml
import commons
import modules
import attentions
import monotonic_align
import numpy as np
from mel_processing import mel_spectrogram_torch, spec_to_mel_torch, spectrogram_torch
from Attention import MultiHeadedAttention as BaseMultiHeadedAttention
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
from commons import init_weights, get_padding
from transformers import AlbertConfig, AlbertModel
from collections import OrderedDict
from text import sequence_to_text
import utils
log_dir = "configs"
config_path = os.path.join(log_dir, "vie_bert.yml")
plbert_config = yaml.safe_load(open(config_path))
# hps = utils.get_hparams()
class StochasticDurationPredictor(nn.Module):
def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
super().__init__()
filter_channels = in_channels # it needs to be removed from future version.
self.in_channels = in_channels
self.filter_channels = filter_channels
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.n_flows = n_flows
self.gin_channels = gin_channels
self.log_flow = modules.Log()
self.flows = nn.ModuleList()
self.flows.append(modules.ElementwiseAffine(2))
for i in range(n_flows):
self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
self.flows.append(modules.Flip())
self.post_pre = nn.Conv1d(1, filter_channels, 1)
self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
self.post_flows = nn.ModuleList()
self.post_flows.append(modules.ElementwiseAffine(2))
for i in range(4):
self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
self.post_flows.append(modules.Flip())
self.pre = nn.Conv1d(in_channels, filter_channels, 1)
self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
if gin_channels != 0:
self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
x = torch.detach(x)
x = self.pre(x)
if g is not None:
g = torch.detach(g)
x = x + self.cond(g)
x = self.convs(x, x_mask)
x = self.proj(x) * x_mask
if not reverse:
flows = self.flows
assert w is not None
logdet_tot_q = 0
h_w = self.post_pre(w)
h_w = self.post_convs(h_w, x_mask)
h_w = self.post_proj(h_w) * x_mask
e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
z_q = e_q
for flow in self.post_flows:
z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
logdet_tot_q += logdet_q
z_u, z1 = torch.split(z_q, [1, 1], 1)
u = torch.sigmoid(z_u) * x_mask
z0 = (w - u) * x_mask
logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2])
logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q
logdet_tot = 0
z0, logdet = self.log_flow(z0, x_mask)
logdet_tot += logdet
z = torch.cat([z0, z1], 1)
for flow in flows:
z, logdet = flow(z, x_mask, g=x, reverse=reverse)
logdet_tot = logdet_tot + logdet
nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot
return nll + logq # [b]
else:
flows = list(reversed(self.flows))
flows = flows[:-2] + [flows[-1]] # remove a useless vflow
z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
for flow in flows:
z = flow(z, x_mask, g=x, reverse=reverse)
z0, z1 = torch.split(z, [1, 1], 1)
logw = z0
return logw
class DurationPredictor(nn.Module):
def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
super().__init__()
self.in_channels = in_channels
self.filter_channels = filter_channels
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.gin_channels = gin_channels
self.drop = nn.Dropout(p_dropout)
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2)
self.norm_1 = modules.LayerNorm(filter_channels)
self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)
self.norm_2 = modules.LayerNorm(filter_channels)
self.proj = nn.Conv1d(filter_channels, 1, 1)
if gin_channels != 0:
self.cond = nn.Conv1d(gin_channels, in_channels, 1)
def forward(self, x, x_mask, g=None):
x = torch.detach(x)
if g is not None:
g = torch.detach(g)
x = x + self.cond(g)
x = self.conv_1(x * x_mask)
x = torch.relu(x)
x = self.norm_1(x)
x = self.drop(x)
x = self.conv_2(x * x_mask)
x = torch.relu(x)
x = self.norm_2(x)
x = self.drop(x)
x = self.proj(x * x_mask)
return x * x_mask
def length_to_mask(lengths):
#print(lengths.max(),'final')
mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)
mask = torch.gt(mask+1, lengths.unsqueeze(1))
return mask
class TextEncoder(nn.Module):
def __init__(self,
n_vocab,
out_channels,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
p_dropout):
super().__init__()
self.out_channels = out_channels
#self.hidden_channels = hidden_channels
#self.p_dropout = p_dropout
#self.emb = nn.Embedding(n_vocab, hidden_channels)
#nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
self.encoder = attentions.Encoder(
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
p_dropout)
albert_base_configuration = AlbertConfig(**plbert_config['model_params'])
bert = AlbertModel(albert_base_configuration)
# checkpoint = torch.load(log_dir + "/step_1000000" + ".t7", map_location='cpu')
# state_dict = checkpoint['net']
checkpoint = torch.load(log_dir + "/bert_" + "5" + ".pt")
state_dict = checkpoint
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] # remove `module.`
if name.startswith('encoder.'):
name = name[8:] # remove `encoder.`
new_state_dict[name] = v
#print(new_state_dict)
bert.load_state_dict(new_state_dict, strict=False)
# self.bert = bert.to('cuda')
self.bert = bert # no-gpu-inference
# print(self.bert.pooler.weight.requires_grad)
# print(self.bert.pooler.bias.requires_grad)
# for param in self.bert.pooler.weight.parameters():
# param.requires_grad = True # or True
# for param in self.bert.pooler.bias.parameters():
# param.requires_grad = True # or True
self.linear = nn.Linear(plbert_config['model_params']['hidden_size'], hidden_channels)
self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1)
def forward(self, x, x_lengths):
#print(x, x_lengths, 'test2')
attention_mask = length_to_mask(torch.Tensor(x_lengths))
#print(len(x[0]), len(attention_mask[0]), 'test3')
#print((~attention_mask).int())
# print(self.bert(x, attention_mask=(~attention_mask).int()))
x = self.bert(x, attention_mask=(~attention_mask).int()).last_hidden_state # [b, t, h1]
x = self.linear(x)
#x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
x = torch.transpose(x, 1, -1) # [b, h, t]
#x_mask = torch.gt(torch.arange(torch.Tensor(x_lengths).max()).unsqueeze(0).expand(torch.Tensor(x_lengths).shape[0], -1).type_as(torch.Tensor(x_lengths))+1, torch.Tensor(x_lengths).unsqueeze(1)).int()
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
#print(x_mask)
x = self.encoder(x * x_mask, x_mask)
stats = self.proj(x) * x_mask
m, logs = torch.split(stats, self.out_channels, dim=1)
return x, m, logs, x_mask
class ResidualCouplingBlock(nn.Module):
def __init__(self,
channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
n_flows=4,
gin_channels=0):
super().__init__()
self.channels = channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.n_flows = n_flows
self.gin_channels = gin_channels
self.flows = nn.ModuleList()
for i in range(n_flows):
self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
self.flows.append(modules.Flip())
def forward(self, x, x_mask, g=None, reverse=False):
if not reverse:
for flow in self.flows:
x, _ = flow(x, x_mask, g=g, reverse=reverse)
else:
for flow in reversed(self.flows):
x = flow(x, x_mask, g=g, reverse=reverse)
return x
class PosteriorEncoder(nn.Module):
def __init__(self,
in_channels,
out_channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=0):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
def forward(self, x, x_lengths, g=None):
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
x = self.pre(x) * x_mask
x = self.enc(x, x_mask, g=g)
stats = self.proj(x) * x_mask
m, logs = torch.split(stats, self.out_channels, dim=1)
z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
return z, m, logs, x_mask
class Generator(torch.nn.Module):
def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
super(Generator, self).__init__()
self.num_kernels = len(resblock_kernel_sizes)
self.num_upsamples = len(upsample_rates)
self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
self.ups = nn.ModuleList()
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
self.ups.append(weight_norm(
ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)),
k, u, padding=(k-u)//2)))
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = upsample_initial_channel//(2**(i+1))
for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
self.resblocks.append(resblock(ch, k, d))
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
self.ups.apply(init_weights)
if gin_channels != 0:
self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
def forward(self, x, g=None):
x = self.conv_pre(x)
if g is not None:
x = x + self.cond(g)
for i in range(self.num_upsamples):
x = F.leaky_relu(x, modules.LRELU_SLOPE)
x = self.ups[i](x)
xs = None
for j in range(self.num_kernels):
if xs is None:
xs = self.resblocks[i*self.num_kernels+j](x)
else:
xs += self.resblocks[i*self.num_kernels+j](x)
x = xs / self.num_kernels
x = F.leaky_relu(x)
x = self.conv_post(x)
x = torch.tanh(x)
return x
def remove_weight_norm(self):
print('Removing weight norm...')
for l in self.ups:
remove_weight_norm(l)
for l in self.resblocks:
l.remove_weight_norm()
class DiscriminatorP(torch.nn.Module):
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
super(DiscriminatorP, self).__init__()
self.period = period
self.use_spectral_norm = use_spectral_norm
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
self.convs = nn.ModuleList([
norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
])
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
def forward(self, x):
fmap = []
# 1d to 2d
b, c, t = x.shape
if t % self.period != 0: # pad first
n_pad = self.period - (t % self.period)
x = F.pad(x, (0, n_pad), "reflect")
t = t + n_pad
x = x.view(b, c, t // self.period, self.period)
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, modules.LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class DiscriminatorS(torch.nn.Module):
def __init__(self, use_spectral_norm=False):
super(DiscriminatorS, self).__init__()
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
self.convs = nn.ModuleList([
norm_f(Conv1d(1, 16, 15, 1, padding=7)),
norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
])
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
def forward(self, x):
fmap = []
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, modules.LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class MultiPeriodDiscriminator(torch.nn.Module):
def __init__(self, use_spectral_norm=False):
super(MultiPeriodDiscriminator, self).__init__()
periods = [2,3,5,7,11]
discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
self.discriminators = nn.ModuleList(discs)
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
y_d_gs.append(y_d_g)
fmap_rs.append(fmap_r)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
class SynthesizerTrn(nn.Module):
"""
Synthesizer for Training
"""
def __init__(self,
n_vocab,
spec_channels,
segment_size,
inter_channels,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
p_dropout,
resblock,
resblock_kernel_sizes,
resblock_dilation_sizes,
upsample_rates,
upsample_initial_channel,
upsample_kernel_sizes,
n_speakers=0,
gin_channels=0,
use_sdp=True,
**kwargs):
super().__init__()
self.n_vocab = n_vocab
self.spec_channels = spec_channels
self.inter_channels = inter_channels
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = n_layers
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.resblock = resblock
self.resblock_kernel_sizes = resblock_kernel_sizes
self.resblock_dilation_sizes = resblock_dilation_sizes
self.upsample_rates = upsample_rates
self.upsample_initial_channel = upsample_initial_channel
self.upsample_kernel_sizes = upsample_kernel_sizes
self.segment_size = segment_size
self.n_speakers = n_speakers
self.gin_channels = gin_channels
self.use_sdp = use_sdp
self.enc_p = TextEncoder(n_vocab,
inter_channels,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
p_dropout)
self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
self.style_encoder = StyleEmbedding()
if use_sdp:
self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
else:
self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
if n_speakers > 1:
self.emb_g = nn.Embedding(n_speakers, gin_channels)
def forward(self, x, x_lengths, mel, y, y_lengths, sid=None):
'''
set g = None for posterior enc, sdp(dp), vocoder except flow
'''
x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
if self.n_speakers > 0:
g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
else:
g = None
#* g: (8,256,1)
# z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=None)
#* y_mask:(8,1,262)
# z_p = self.flow(z, y_mask, g=g)
#* Zero-shot
style_vector = self.style_encoder(mel.transpose(1,2), torch.tensor(np.full((mel.shape[0]), mel.shape[2])))
z_p = self.flow(z, y_mask, g=style_vector.unsqueeze(-1))
with torch.no_grad():
# negative cross-entropy
s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]
neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s]
neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s]
neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4
attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()
w = attn.sum(2)
if self.use_sdp:
# l_length = self.dp(x, x_mask, w, g=g)
l_length = self.dp(x, x_mask, w, g=None)
l_length = l_length / torch.sum(x_mask)
else:
logw_ = torch.log(w + 1e-6) * x_mask
# logw = self.dp(x, x_mask, g=g)
logw = self.dp(x, x_mask, g=None)
l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging
# expand prior
m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)
logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)
z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
# o = self.dec(z_slice, g=g)
o = self.dec(z_slice, g=None)
return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
# def forward(self, mel_src, mel_tgt, y, y_lengths, y_ref, y_lengths_ref, sid=None):
# style_vector_src = self.style_encoder(mel_src.transpose(1,2), torch.tensor(np.full((mel_src.shape[0]), mel_src.shape[2])))
# style_vector_ref = self.style_encoder(mel_tgt.transpose(1,2), torch.tensor(np.full((mel_tgt.shape[0]), mel_tgt.shape[2])))
# ## SRC
# z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=None)
# z_p = self.flow(z, y_mask, g=style_vector_src.unsqueeze(-1))
# z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
# ## REF
# z_ref, z_q_ref, logs_q_ref, y_mask_ref = self.enc_q(y_ref, y_lengths_ref, g=None)
# z_p_ref = self.flow(z_ref, y_mask_ref, g=style_vector_ref.unsqueeze(-1))
# z_slice_ref, ids_slice_ref = commons.rand_slice_segments(z_ref, y_lengths_ref, self.segment_size)
# o = self.dec(z_slice, g=None)
# o_ref = self.dec(z_slice_ref, g=None)
# ## Style reconstruction
# z_vc = self.flow(z_p, y_mask, g=style_vector_ref.unsqueeze(-1), reverse=True)
# o_hat_vc = self.dec(z_vc * y_mask, g=None)
# o_hat_vc_mel = mel_spectrogram_torch(
# o_hat_vc.squeeze(1),
# hps.data.filter_length,
# hps.data.n_mel_channels,
# hps.data.sampling_rate,
# hps.data.hop_length,
# hps.data.win_length,
# hps.data.mel_fmin,
# hps.data.mel_fmax
# )
# # spec_vc = spectrogram_torch(y_hat_vc, hps.data.filter_length,
# # hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length,
# # center=False)
# # spec_vc = torch.squeeze(spec_vc, 0)
# # mel_vc = spec_to_mel_torch(
# # spec_vc,
# # hps.data.filter_length,
# # hps.data.n_mel_channels,
# # hps.data.sampling_rate,
# # hps.data.mel_fmin,
# # hps.data.mel_fmax)
# style_vector_vc = self.style_encoder(o_hat_vc_mel.transpose(1,2), torch.tensor(np.full((o_hat_vc_mel.shape[0]), o_hat_vc_mel.shape[2])))
# return o, o_ref, ids_slice, ids_slice_ref, y_mask, y_mask_ref, (z, z_ref, z_p, z_p_ref, logs_q, logs_q_ref), style_vector_vc, style_vector_ref
def infer(self, x, x_lengths, mel, mel_lengths = None, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None):
x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
#print(m_p.transpose(1,2).shape, m_p.transpose(1,2))
if self.n_speakers > 0:
g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
else:
g = None
if self.use_sdp:
# logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)
logw = self.dp(x, x_mask, g=None, reverse=True, noise_scale=noise_scale_w)
else:
# logw = self.dp(x, x_mask, g=g)
logw = self.dp(x, x_mask, g=None)
w = torch.exp(logw) * x_mask * length_scale
w_ceil = torch.ceil(w)
#print(w_ceil)
y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
attn = commons.generate_path(w_ceil, attn_mask)
m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
#print(m_p.transpose(1,2).shape, m_p.transpose(1,2))
logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
#print(z_p.transpose(1,2).shape, z_p.transpose(1,2))
#* used for mel style encoder
if mel_lengths is not None:
style_mask = torch.unsqueeze(commons.sequence_mask(mel_lengths, mel.size(2)), 1).to(x.dtype)
style_vector = self.style_encoder(mel.transpose(1,2), torch.tensor(np.full((mel.shape[0]), mel.shape[2])))
else:
style_vector = self.style_encoder(mel.transpose(1,2), torch.tensor(np.full((mel.shape[0]), mel.shape[2])))
# z = self.flow(z_p, y_mask, g=g, reverse=True)
z = self.flow(z_p, y_mask, g=style_vector.unsqueeze(-1), reverse=True)
# o = self.dec((z * y_mask)[:,:,:max_len], g=g)
o = self.dec((z * y_mask)[:,:,:max_len], g=None)
return o, attn, y_mask, (z, z_p, m_p, logs_p)
# def forward(self, spec_source_pattern, spec_lengths_source, mel_source, mel_ref):
# style_vector_src = self.style_encoder(mel_source.transpose(1,2), torch.tensor(np.full((mel_source.shape[0]), mel_source.shape[2])))
# style_vector_ref = self.style_encoder(mel_ref.transpose(1,2), torch.tensor(np.full((mel_ref.shape[0]), mel_ref.shape[2])))
# z, m_q, logs_q, y_mask = self.enc_q(spec_source_pattern, spec_lengths_source, g=None)
# z_p = self.flow(z, y_mask, g=style_vector_src.unsqueeze(-1))
# z_hat = self.flow(z_p, y_mask, g=style_vector_ref.unsqueeze(-1), reverse=True)
# o_hat = self.dec(z_hat * y_mask, g=None)
# z_slice, ids_slice = commons.rand_slice_segments(z, spec_lengths_source, self.segment_size)
# return o_hat, y_mask, (z, z_p, z_hat), style_vector_src, style_vector_ref, ids_slice
def voice_conversion(self, spec_source_pattern, spec_lengths_source, mel_source, mel_ref):
style_vector_src = self.style_encoder(mel_source.transpose(1,2), torch.tensor(np.full((mel_source.shape[0]), mel_source.shape[2])))
style_vector_ref = self.style_encoder(mel_ref.transpose(1,2), torch.tensor(np.full((mel_ref.shape[0]), mel_ref.shape[2])))
z, m_q, logs_q, y_mask = self.enc_q(spec_source_pattern, spec_lengths_source, g=None)
z_p = self.flow(z, y_mask, g=style_vector_src.unsqueeze(-1))
z_hat = self.flow(z_p, y_mask, g=style_vector_ref.unsqueeze(-1), reverse=True)
o_hat = self.dec(z_hat * y_mask, g=None)
return o_hat, y_mask, (z, z_p, z_hat)
class MelStyleEncoder(nn.Module):
''' MelStyleEncoder '''
def __init__(self, n_mel_channels=80,
style_hidden=128,
style_vector_dim=256,
style_kernel_size=5,
style_head=2,
dropout=0.1):
super(MelStyleEncoder, self).__init__()
self.in_dim = n_mel_channels
self.hidden_dim = style_hidden
self.out_dim = style_vector_dim
self.kernel_size = style_kernel_size
self.n_head = style_head
self.dropout = dropout
self.spectral = nn.Sequential(
modules.LinearNorm(self.in_dim, self.hidden_dim),
modules.Mish(),
nn.Dropout(self.dropout),
modules.LinearNorm(self.hidden_dim, self.hidden_dim),
modules.Mish(),
nn.Dropout(self.dropout)
)
self.temporal = nn.Sequential(
modules.Conv1dGLU(self.hidden_dim, self.hidden_dim, self.kernel_size, self.dropout),
modules.Conv1dGLU(self.hidden_dim, self.hidden_dim, self.kernel_size, self.dropout),
)
self.slf_attn = modules.MultiHeadAttention(self.n_head, self.hidden_dim,
self.hidden_dim//self.n_head, self.hidden_dim//self.n_head, self.dropout)
self.fc = modules.LinearNorm(self.hidden_dim, self.out_dim)
def temporal_avg_pool(self, x, mask=None):
if mask is None:
out = torch.mean(x, dim=1)
else:
len_ = (~mask).sum(dim=1).unsqueeze(1)
x = x.masked_fill(mask.unsqueeze(-1), 0)
x = x.sum(dim=1)
out = torch.div(x, len_)
return out
def forward(self, x, mask=None):
max_len = x.shape[1]
slf_attn_mask = mask.unsqueeze(1).expand(-1, max_len, -1) if mask is not None else None
# spectral
x = self.spectral(x)
# temporal
x = x.transpose(1,2)
x = self.temporal(x)
x = x.transpose(1,2)
# self-attention
#print(x.shape, mask.shape)
if mask is not None:
x = x.masked_fill(mask.unsqueeze(-1), 0)
x, _ = self.slf_attn(x, mask=slf_attn_mask)
# fc
x = self.fc(x)
# temoral average pooling
w = self.temporal_avg_pool(x, mask=mask)
return w
class StyleEmbedding(torch.nn.Module):
def __init__(self):
super().__init__()
self.gst = StyleEncoder()
def forward(self,
batch_of_spectrograms,
batch_of_spectrogram_lengths,
return_all_outs=False,
return_only_refs=False):
minimum_sequence_length = 812
specs = list()
for index, spec_length in enumerate(batch_of_spectrogram_lengths):
spec = batch_of_spectrograms[index][:spec_length]
# double the length at least once, then check
spec = spec.repeat((2, 1))
current_spec_length = len(spec)
while current_spec_length < minimum_sequence_length:
# make it longer
spec = spec.repeat((2, 1))
current_spec_length = len(spec)
specs.append(spec[:812])
spec_batch = torch.stack(specs, dim=0)
return self.gst(speech=spec_batch,
return_all_outs=return_all_outs,
return_only_ref=return_only_refs)
class StyleEncoder(torch.nn.Module):
def __init__(
self,
idim: int = 80,
gst_tokens: int = 2000,
gst_token_dim: int = 256,
gst_heads: int = 8,
conv_layers: int = 8,
conv_chans_list=(32, 32, 64, 64, 128, 128, 256, 256),
conv_kernel_size: int = 3,
conv_stride: int = 2,
gst_layers: int = 2,
gst_units: int = 256,
):
"""Initialize global style encoder module."""
super(StyleEncoder, self).__init__()
self.num_tokens = gst_tokens
self.ref_enc = ReferenceEncoder(idim=idim,
conv_layers=conv_layers,
conv_chans_list=conv_chans_list,
conv_kernel_size=conv_kernel_size,
conv_stride=conv_stride,
gst_layers=gst_layers,
gst_units=gst_units, )
self.stl = StyleTokenLayer(ref_embed_dim=gst_units,
gst_tokens=gst_tokens,
gst_token_dim=gst_token_dim,
gst_heads=gst_heads, )
self.ref_mel = MelStyleEncoder(n_mel_channels = idim)
def forward(self, speech, return_all_outs=False, return_only_ref=False):
ref_mels = self.ref_mel(speech)
ref_embs = self.ref_enc(speech)
if return_only_ref and not return_all_outs:
return ref_embs
style_embs = self.stl(ref_embs)
if return_all_outs:
if return_only_ref:
return ref_embs, [ref_embs] + [style_embs]
return style_embs, [ref_embs] + [style_embs]
#print(style_embs.shape, ref_mels.shape, ref_embs.shape)
return style_embs + ref_mels
def calculate_ada4_regularization_loss(self):
losses = list()
for emb1_index in range(self.num_tokens):
for emb2_index in range(emb1_index + 1, self.num_tokens):
if emb1_index != emb2_index:
losses.append(torch.nn.functional.cosine_similarity(self.stl.gst_embs[emb1_index],
self.stl.gst_embs[emb2_index], dim=0))
return sum(losses)
class ReferenceEncoder(torch.nn.Module):
def __init__(
self,
idim=80,
conv_layers: int = 6,
conv_chans_list=(32, 32, 64, 64, 128, 128),
conv_kernel_size: int = 3,
conv_stride: int = 2,
gst_layers: int = 1,
gst_units: int = 128,
):
"""Initialize reference encoder module."""
super(ReferenceEncoder, self).__init__()
# check hyperparameters are valid
assert conv_kernel_size % 2 == 1, "kernel size must be odd."
assert (
len(conv_chans_list) == conv_layers), "the number of conv layers and length of channels list must be the same."
convs = []
padding = (conv_kernel_size - 1) // 2
for i in range(conv_layers):
conv_in_chans = 1 if i == 0 else conv_chans_list[i - 1]
conv_out_chans = conv_chans_list[i]
convs += [torch.nn.Conv2d(conv_in_chans,
conv_out_chans,
kernel_size=conv_kernel_size,
stride=conv_stride,
padding=padding,
# Do not use bias due to the following batch norm
bias=False, ),
torch.nn.BatchNorm2d(conv_out_chans),
torch.nn.ReLU(inplace=True), ]
self.convs = torch.nn.Sequential(*convs)
self.conv_layers = conv_layers
self.kernel_size = conv_kernel_size
self.stride = conv_stride
self.padding = padding
# get the number of GRU input units
gst_in_units = idim
for i in range(conv_layers):
gst_in_units = (gst_in_units - conv_kernel_size + 2 * padding) // conv_stride + 1
gst_in_units *= conv_out_chans
self.gst = torch.nn.GRU(gst_in_units, gst_units, gst_layers, batch_first=True)
def forward(self, speech):
"""Calculate forward propagation.
Args:
speech (Tensor): Batch of padded target features (B, Lmax, idim).
Returns:
Tensor: Reference embedding (B, gst_units)
"""
batch_size = speech.size(0)
xs = speech.unsqueeze(1) # (B, 1, Lmax, idim)
hs = self.convs(xs).transpose(1, 2) # (B, Lmax', conv_out_chans, idim')
time_length = hs.size(1)
hs = hs.contiguous().view(batch_size, time_length, -1) # (B, Lmax', gst_units)
self.gst.flatten_parameters()
# pack_padded_sequence(hs, speech_lens, enforce_sorted=False, batch_first=True)
_, ref_embs = self.gst(hs) # (gst_layers, batch_size, gst_units)
ref_embs = ref_embs[-1] # (batch_size, gst_units)
return ref_embs
class StyleTokenLayer(torch.nn.Module):
"""Style token layer module.
This module is style token layer introduced in `Style Tokens: Unsupervised Style
Modeling, Control and Transfer in End-to-End Speech Synthesis`.
.. _`Style Tokens: Unsupervised Style Modeling, Control and Transfer in End-to-End
Speech Synthesis`: https://arxiv.org/abs/1803.09017
Args:
ref_embed_dim (int, optional): Dimension of the input reference embedding.
gst_tokens (int, optional): The number of GST embeddings.
gst_token_dim (int, optional): Dimension of each GST embedding.
gst_heads (int, optional): The number of heads in GST multihead attention.
dropout_rate (float, optional): Dropout rate in multi-head attention.
"""
def __init__(
self,
ref_embed_dim: int = 128,
gst_tokens: int = 10,
gst_token_dim: int = 128,
gst_heads: int = 4,
dropout_rate: float = 0.0,
):
"""Initialize style token layer module."""
super(StyleTokenLayer, self).__init__()
gst_embs = torch.randn(gst_tokens, gst_token_dim // gst_heads)
self.register_parameter("gst_embs", torch.nn.Parameter(gst_embs))
self.mha = MultiHeadedAttention(q_dim=ref_embed_dim,
k_dim=gst_token_dim // gst_heads,
v_dim=gst_token_dim // gst_heads,
n_head=gst_heads,
n_feat=gst_token_dim,
dropout_rate=dropout_rate, )
def forward(self, ref_embs):
"""Calculate forward propagation.
Args:
ref_embs (Tensor): Reference embeddings (B, ref_embed_dim).
Returns:
Tensor: Style token embeddings (B, gst_token_dim).
"""
batch_size = ref_embs.size(0)
# (num_tokens, token_dim) -> (batch_size, num_tokens, token_dim)
gst_embs = torch.tanh(self.gst_embs).unsqueeze(0).expand(batch_size, -1, -1)
# NOTE(kan-bayashi): Shoule we apply Tanh?
ref_embs = ref_embs.unsqueeze(1) # (batch_size, 1 ,ref_embed_dim)
style_embs = self.mha(ref_embs, gst_embs, gst_embs, None)
return style_embs.squeeze(1)
class MultiHeadedAttention(BaseMultiHeadedAttention):
"""Multi head attention module with different input dimension."""
def __init__(self, q_dim, k_dim, v_dim, n_head, n_feat, dropout_rate=0.0):
"""Initialize multi head attention module."""
# NOTE(kan-bayashi): Do not use super().__init__() here since we want to
# overwrite BaseMultiHeadedAttention.__init__() method.
torch.nn.Module.__init__(self)
assert n_feat % n_head == 0
# We assume d_v always equals d_k
self.d_k = n_feat // n_head
self.h = n_head
self.linear_q = torch.nn.Linear(q_dim, n_feat)
self.linear_k = torch.nn.Linear(k_dim, n_feat)
self.linear_v = torch.nn.Linear(v_dim, n_feat)
self.linear_out = torch.nn.Linear(n_feat, n_feat)
self.attn = None
self.dropout = torch.nn.Dropout(p=dropout_rate)