Spaces:
Runtime error
Runtime error
| import math | |
| import torch | |
| from torch import nn | |
| from torch.nn import functional as F | |
| import modules | |
| import attentions | |
| from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d | |
| from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm | |
| from commons import init_weights | |
| import numpy as np | |
| import commons | |
| class TextEncoder256(nn.Module): | |
| def __init__( | |
| self, | |
| out_channels, | |
| hidden_channels, | |
| filter_channels, | |
| n_heads, | |
| n_layers, | |
| kernel_size, | |
| p_dropout, | |
| f0=True | |
| ): | |
| super().__init__() | |
| self.out_channels = out_channels | |
| self.hidden_channels = hidden_channels | |
| self.filter_channels = filter_channels | |
| self.n_heads = n_heads | |
| self.n_layers = n_layers | |
| self.kernel_size = kernel_size | |
| self.p_dropout = p_dropout | |
| self.emb_phone = nn.Linear(256, hidden_channels) | |
| self.lrelu=nn.LeakyReLU(0.1,inplace=True) | |
| if(f0==True): | |
| self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 | |
| self.encoder = attentions.Encoder( | |
| hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout | |
| ) | |
| self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) | |
| def forward(self, phone, pitch, lengths): | |
| if(pitch==None): | |
| x = self.emb_phone(phone) | |
| else: | |
| x = self.emb_phone(phone) + self.emb_pitch(pitch) | |
| x = x * math.sqrt(self.hidden_channels) # [b, t, h] | |
| x=self.lrelu(x) | |
| x = torch.transpose(x, 1, -1) # [b, h, t] | |
| x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( | |
| x.dtype | |
| ) | |
| x = self.encoder(x * x_mask, x_mask) | |
| stats = self.proj(x) * x_mask | |
| m, logs = torch.split(stats, self.out_channels, dim=1) | |
| return m, logs, x_mask | |
| class ResidualCouplingBlock(nn.Module): | |
| def __init__( | |
| self, | |
| channels, | |
| hidden_channels, | |
| kernel_size, | |
| dilation_rate, | |
| n_layers, | |
| n_flows=4, | |
| gin_channels=0, | |
| ): | |
| super().__init__() | |
| self.channels = channels | |
| self.hidden_channels = hidden_channels | |
| self.kernel_size = kernel_size | |
| self.dilation_rate = dilation_rate | |
| self.n_layers = n_layers | |
| self.n_flows = n_flows | |
| self.gin_channels = gin_channels | |
| self.flows = nn.ModuleList() | |
| for i in range(n_flows): | |
| self.flows.append( | |
| modules.ResidualCouplingLayer( | |
| channels, | |
| hidden_channels, | |
| kernel_size, | |
| dilation_rate, | |
| n_layers, | |
| gin_channels=gin_channels, | |
| mean_only=True, | |
| ) | |
| ) | |
| self.flows.append(modules.Flip()) | |
| def forward(self, x, x_mask, g=None, reverse=False): | |
| if not reverse: | |
| for flow in self.flows: | |
| x, _ = flow(x, x_mask, g=g, reverse=reverse) | |
| else: | |
| for flow in reversed(self.flows): | |
| x = flow(x, x_mask, g=g, reverse=reverse) | |
| return x | |
| def remove_weight_norm(self): | |
| for i in range(self.n_flows): | |
| self.flows[i * 2].remove_weight_norm() | |
| class PosteriorEncoder(nn.Module): | |
| def __init__( | |
| self, | |
| in_channels, | |
| out_channels, | |
| hidden_channels, | |
| kernel_size, | |
| dilation_rate, | |
| n_layers, | |
| gin_channels=0, | |
| ): | |
| super().__init__() | |
| self.in_channels = in_channels | |
| self.out_channels = out_channels | |
| self.hidden_channels = hidden_channels | |
| self.kernel_size = kernel_size | |
| self.dilation_rate = dilation_rate | |
| self.n_layers = n_layers | |
| self.gin_channels = gin_channels | |
| self.pre = nn.Conv1d(in_channels, hidden_channels, 1) | |
| self.enc = modules.WN( | |
| hidden_channels, | |
| kernel_size, | |
| dilation_rate, | |
| n_layers, | |
| gin_channels=gin_channels, | |
| ) | |
| self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) | |
| def forward(self, x, x_lengths, g=None): | |
| x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( | |
| x.dtype | |
| ) | |
| x = self.pre(x) * x_mask | |
| x = self.enc(x, x_mask, g=g) | |
| stats = self.proj(x) * x_mask | |
| m, logs = torch.split(stats, self.out_channels, dim=1) | |
| z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask | |
| return z, m, logs, x_mask | |
| def remove_weight_norm(self): | |
| self.enc.remove_weight_norm() | |
| class Generator(torch.nn.Module): | |
| def __init__( | |
| self, | |
| initial_channel, | |
| resblock, | |
| resblock_kernel_sizes, | |
| resblock_dilation_sizes, | |
| upsample_rates, | |
| upsample_initial_channel, | |
| upsample_kernel_sizes, | |
| gin_channels=0, | |
| ): | |
| super(Generator, self).__init__() | |
| self.num_kernels = len(resblock_kernel_sizes) | |
| self.num_upsamples = len(upsample_rates) | |
| self.conv_pre = Conv1d( | |
| initial_channel, upsample_initial_channel, 7, 1, padding=3 | |
| ) | |
| resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 | |
| self.ups = nn.ModuleList() | |
| for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): | |
| self.ups.append( | |
| weight_norm( | |
| ConvTranspose1d( | |
| upsample_initial_channel // (2**i), | |
| upsample_initial_channel // (2 ** (i + 1)), | |
| k, | |
| u, | |
| padding=(k - u) // 2, | |
| ) | |
| ) | |
| ) | |
| self.resblocks = nn.ModuleList() | |
| for i in range(len(self.ups)): | |
| ch = upsample_initial_channel // (2 ** (i + 1)) | |
| for j, (k, d) in enumerate( | |
| zip(resblock_kernel_sizes, resblock_dilation_sizes) | |
| ): | |
| self.resblocks.append(resblock(ch, k, d)) | |
| self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) | |
| self.ups.apply(init_weights) | |
| if gin_channels != 0: | |
| self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) | |
| def forward(self, x, g=None): | |
| x = self.conv_pre(x) | |
| if g is not None: | |
| x = x + self.cond(g) | |
| for i in range(self.num_upsamples): | |
| x = F.leaky_relu(x, modules.LRELU_SLOPE) | |
| x = self.ups[i](x) | |
| xs = None | |
| for j in range(self.num_kernels): | |
| if xs is None: | |
| xs = self.resblocks[i * self.num_kernels + j](x) | |
| else: | |
| xs += self.resblocks[i * self.num_kernels + j](x) | |
| x = xs / self.num_kernels | |
| x = F.leaky_relu(x) | |
| x = self.conv_post(x) | |
| x = torch.tanh(x) | |
| return x | |
| def remove_weight_norm(self): | |
| for l in self.ups: | |
| remove_weight_norm(l) | |
| for l in self.resblocks: | |
| l.remove_weight_norm() | |
| class SynthesizerTrn256(nn.Module): | |
| """ | |
| Synthesizer for Training | |
| """ | |
| def __init__( | |
| self, | |
| spec_channels, | |
| segment_size, | |
| inter_channels, | |
| hidden_channels, | |
| filter_channels, | |
| n_heads, | |
| n_layers, | |
| kernel_size, | |
| p_dropout, | |
| resblock, | |
| resblock_kernel_sizes, | |
| resblock_dilation_sizes, | |
| upsample_rates, | |
| upsample_initial_channel, | |
| upsample_kernel_sizes, | |
| gin_channels=0, | |
| use_sdp=True, | |
| **kwargs | |
| ): | |
| super().__init__() | |
| self.spec_channels = spec_channels | |
| self.inter_channels = inter_channels | |
| self.hidden_channels = hidden_channels | |
| self.filter_channels = filter_channels | |
| self.n_heads = n_heads | |
| self.n_layers = n_layers | |
| self.kernel_size = kernel_size | |
| self.p_dropout = p_dropout | |
| self.resblock = resblock | |
| self.resblock_kernel_sizes = resblock_kernel_sizes | |
| self.resblock_dilation_sizes = resblock_dilation_sizes | |
| self.upsample_rates = upsample_rates | |
| self.upsample_initial_channel = upsample_initial_channel | |
| self.upsample_kernel_sizes = upsample_kernel_sizes | |
| self.segment_size = segment_size | |
| self.gin_channels = gin_channels | |
| self.enc_p = TextEncoder256( | |
| inter_channels, | |
| hidden_channels, | |
| filter_channels, | |
| n_heads, | |
| n_layers, | |
| kernel_size, | |
| p_dropout, | |
| ) | |
| self.dec = Generator( | |
| inter_channels, | |
| resblock, | |
| resblock_kernel_sizes, | |
| resblock_dilation_sizes, | |
| upsample_rates, | |
| upsample_initial_channel, | |
| upsample_kernel_sizes, | |
| gin_channels=gin_channels, | |
| ) | |
| self.enc_q = PosteriorEncoder( | |
| spec_channels, | |
| inter_channels, | |
| hidden_channels, | |
| 5, | |
| 1, | |
| 16, | |
| gin_channels=gin_channels, | |
| ) | |
| self.flow = ResidualCouplingBlock( | |
| inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels | |
| ) | |
| def remove_weight_norm(self): | |
| self.dec.remove_weight_norm() | |
| self.flow.remove_weight_norm() | |
| self.enc_q.remove_weight_norm() | |
| def infer(self, phone, phone_lengths, pitch): | |
| m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) | |
| z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66) * x_mask | |
| z = self.flow(z_p, x_mask, g=None, reverse=True) | |
| o = self.dec((z * x_mask)[:, :, :None], g=None) | |
| return o, x_mask, (z, z_p, m_p, logs_p) | |