repo
stringlengths
1
99
file
stringlengths
13
215
code
stringlengths
12
59.2M
file_length
int64
12
59.2M
avg_line_length
float64
3.82
1.48M
max_line_length
int64
12
2.51M
extension_type
stringclasses
1 value
DeSpaWN
DeSpaWN-main/lib/despawn.py
# -*- coding: utf-8 -*- """ Title: Fully Learnable Deep Wavelet Transform for Unsupervised Monitoring of High-Frequency Time Series ------ (DeSpaWN) Description: -------------- Function to generate a DeSpaWN TF model. Please cite the corresponding paper: Michau, G., Frusque, G., & Fink, O. (2022). Fully learnable deep wavelet transform for unsupervised monitoring of high-frequency time series. Proceedings of the National Academy of Sciences, 119(8). Version: 1.0 -------- @author: Dr. Gabriel Michau, -------- Chair of Intelligent Maintenance Systems ETH Zürich Created on 15.01.2022 Licence: ---------- MIT License Copyright (c) 2022 Dr. Gabriel Michau Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ # /!\ Designed for tensorflow 2.1.X import tensorflow as tf import tensorflow.keras as keras from lib import despawnLayers as impLay def createDeSpaWN(inputSize=None, kernelInit=8, kernTrainable=True, level=1, lossCoeff='l1', kernelsConstraint='QMF', initHT=1.0, trainHT=True): """ Function that generates a TF DeSpaWN network Parameters ---------- inputSize : INT, optional Length of the time series. Network is more efficient if set. Can be set to None to allow various input size time series. The default is None. kernelInit : numpy array or LIST or INT, optional Initialisation of the kernel. If INT, random normal initialisation of size kernelInit. If array or LIST, then kernelInit is the kernel. The default is 8. kernTrainable : BOOL, optional Whether the kernels are trainable. Set to FALSE to compare to traditional wavelet decomposition. The default is True. level : INT, optional Number of layers in the network. Ideally should be log2 of the time series length. If bigger, additional layers will be of size 1. The default is 1. lossCoeff : STRING, optional To specify which loss on the wavelet coefficient to compute. Can be None (no loss computed) or 'l1'' for the L1-norm of the coefficients. The default is 'l1'. kernelsConstraint : STRING, optional Specify which version of DeSpaWN to implement. Refers to the paper (https://arxiv.org/pdf/2105.00899.pdf) [Section 4.4 Ablation Study] for more details. The default is 'CQF'. initHT : FLOAT, optional Value to initialise the Hard-thresholding coefficient. The default is 1.0. trainHT : BOOL, optional Whether the hard-thresholding coefficient is trainable or not. Set to FALSE to compare to traditiona wavelet decomposition. The default is True. Returns ------- model1: a TF neural network with outputs the reconstructed signals and the loss on the wavelet coefficients model2: a TF neural network with outputs t the reconstructed signals and wavelet coefficients model1 and model2 share their architecture, weigths and parameters. Training one of the two changes both models """ input_shape = (inputSize,1,1) inputSig = keras.layers.Input(shape=input_shape, name='input_Raw') g = inputSig if kernelsConstraint=='CQF': kern = impLay.Kernel(kernelInit, trainKern=kernTrainable)(g) kernelsG = [kern for lev in range(level)] kernelsH = kernelsG kernelsGT = kernelsG kernelsHT = kernelsG elif kernelsConstraint=='PerLayer': kernelsG = [impLay.Kernel(kernelInit, trainKern=kernTrainable)(g) for lev in range(level)] kernelsH = kernelsG kernelsGT = kernelsG kernelsHT = kernelsG elif kernelsConstraint=='PerFilter': kernelsG = [impLay.Kernel(kernelInit, trainKern=kernTrainable)(g) for lev in range(level)] kernelsH = [impLay.Kernel(kernelInit, trainKern=kernTrainable)(g) for lev in range(level)] kernelsGT = kernelsG kernelsHT = kernelsH elif kernelsConstraint=='Free': kernelsG = [impLay.Kernel(kernelInit, trainKern=kernTrainable)(g) for lev in range(level)] kernelsH = [impLay.Kernel(kernelInit, trainKern=kernTrainable)(g) for lev in range(level)] kernelsGT = [impLay.Kernel(kernelInit, trainKern=kernTrainable)(g) for lev in range(level)] kernelsHT = [impLay.Kernel(kernelInit, trainKern=kernTrainable)(g) for lev in range(level)] hl = [] inSizel = [] # Decomposition for lev in range(level): inSizel.append(tf.shape(g)) hl.append(impLay.HardThresholdAssym(init=initHT,trainBias=trainHT)(impLay.HighPassWave()([g,kernelsH[lev]]))) g = impLay.LowPassWave()([g,kernelsG[lev]]) g = impLay.HardThresholdAssym(init=initHT,trainBias=trainHT)(g) # save intermediate coefficients to output them gint = g # Reconstruction for lev in range(level-1,-1,-1): h = impLay.HighPassTrans()([hl[lev],kernelsHT[lev],inSizel[lev]]) g = impLay.LowPassTrans()([g,kernelsGT[lev],inSizel[lev]]) g = keras.layers.Add()([g,h]) # Compute specified loss on coefficients if not lossCoeff: vLossCoeff = tf.zeros((1,1,1,1)) elif lossCoeff=='l1': # L1-Sum vLossCoeff = tf.math.reduce_mean(tf.math.abs(tf.concat([gint]+hl,axis=1)),axis=1,keepdims=True) else: raise ValueError('Could not understand value in \'lossCoeff\'. It should be either \'l1\' or \'None\'') return keras.models.Model(inputSig,[g,vLossCoeff]), keras.models.Model(inputSig,[g,gint,hl[::-1]]) #### /!\ In tf > 2.2.0 each output variable is 1 output. The second model above output 3 variables and not level+2 as is tf 2.1.0
6,679
43.238411
144
py
TTS
TTS-master/TTS/speaker_encoder/losses.py
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np # adapted from https://github.com/cvqluu/GE2E-Loss class GE2ELoss(nn.Module): def __init__(self, init_w=10.0, init_b=-5.0, loss_method="softmax"): """ Implementation of the Generalized End-to-End loss defined in https://arxiv.org/abs/1710.10467 [1] Accepts an input of size (N, M, D) where N is the number of speakers in the batch, M is the number of utterances per speaker, and D is the dimensionality of the embedding vector (e.g. d-vector) Args: - init_w (float): defines the initial value of w in Equation (5) of [1] - init_b (float): definies the initial value of b in Equation (5) of [1] """ super(GE2ELoss, self).__init__() # pylint: disable=E1102 self.w = nn.Parameter(torch.tensor(init_w)) # pylint: disable=E1102 self.b = nn.Parameter(torch.tensor(init_b)) self.loss_method = loss_method print(' > Initialised Generalized End-to-End loss') assert self.loss_method in ["softmax", "contrast"] if self.loss_method == "softmax": self.embed_loss = self.embed_loss_softmax if self.loss_method == "contrast": self.embed_loss = self.embed_loss_contrast # pylint: disable=R0201 def calc_new_centroids(self, dvecs, centroids, spkr, utt): """ Calculates the new centroids excluding the reference utterance """ excl = torch.cat((dvecs[spkr, :utt], dvecs[spkr, utt + 1 :])) excl = torch.mean(excl, 0) new_centroids = [] for i, centroid in enumerate(centroids): if i == spkr: new_centroids.append(excl) else: new_centroids.append(centroid) return torch.stack(new_centroids) def calc_cosine_sim(self, dvecs, centroids): """ Make the cosine similarity matrix with dims (N,M,N) """ cos_sim_matrix = [] for spkr_idx, speaker in enumerate(dvecs): cs_row = [] for utt_idx, utterance in enumerate(speaker): new_centroids = self.calc_new_centroids( dvecs, centroids, spkr_idx, utt_idx ) # vector based cosine similarity for speed cs_row.append( torch.clamp( torch.mm( utterance.unsqueeze(1).transpose(0, 1), new_centroids.transpose(0, 1), ) / (torch.norm(utterance) * torch.norm(new_centroids, dim=1)), 1e-6, ) ) cs_row = torch.cat(cs_row, dim=0) cos_sim_matrix.append(cs_row) return torch.stack(cos_sim_matrix) # pylint: disable=R0201 def embed_loss_softmax(self, dvecs, cos_sim_matrix): """ Calculates the loss on each embedding $L(e_{ji})$ by taking softmax """ N, M, _ = dvecs.shape L = [] for j in range(N): L_row = [] for i in range(M): L_row.append(-F.log_softmax(cos_sim_matrix[j, i], 0)[j]) L_row = torch.stack(L_row) L.append(L_row) return torch.stack(L) # pylint: disable=R0201 def embed_loss_contrast(self, dvecs, cos_sim_matrix): """ Calculates the loss on each embedding $L(e_{ji})$ by contrast loss with closest centroid """ N, M, _ = dvecs.shape L = [] for j in range(N): L_row = [] for i in range(M): centroids_sigmoids = torch.sigmoid(cos_sim_matrix[j, i]) excl_centroids_sigmoids = torch.cat( (centroids_sigmoids[:j], centroids_sigmoids[j + 1 :]) ) L_row.append( 1.0 - torch.sigmoid(cos_sim_matrix[j, i, j]) + torch.max(excl_centroids_sigmoids) ) L_row = torch.stack(L_row) L.append(L_row) return torch.stack(L) def forward(self, dvecs): """ Calculates the GE2E loss for an input of dimensions (num_speakers, num_utts_per_speaker, dvec_feats) """ centroids = torch.mean(dvecs, 1) cos_sim_matrix = self.calc_cosine_sim(dvecs, centroids) torch.clamp(self.w, 1e-6) cos_sim_matrix = self.w * cos_sim_matrix + self.b L = self.embed_loss(dvecs, cos_sim_matrix) return L.mean() # adapted from https://github.com/clovaai/voxceleb_trainer/blob/master/loss/angleproto.py class AngleProtoLoss(nn.Module): """ Implementation of the Angular Prototypical loss defined in https://arxiv.org/abs/2003.11982 Accepts an input of size (N, M, D) where N is the number of speakers in the batch, M is the number of utterances per speaker, and D is the dimensionality of the embedding vector Args: - init_w (float): defines the initial value of w - init_b (float): definies the initial value of b """ def __init__(self, init_w=10.0, init_b=-5.0): super(AngleProtoLoss, self).__init__() # pylint: disable=E1102 self.w = nn.Parameter(torch.tensor(init_w)) # pylint: disable=E1102 self.b = nn.Parameter(torch.tensor(init_b)) self.criterion = torch.nn.CrossEntropyLoss() print(' > Initialised Angular Prototypical loss') def forward(self, x): """ Calculates the AngleProto loss for an input of dimensions (num_speakers, num_utts_per_speaker, dvec_feats) """ out_anchor = torch.mean(x[:, 1:, :], 1) out_positive = x[:, 0, :] num_speakers = out_anchor.size()[0] cos_sim_matrix = F.cosine_similarity(out_positive.unsqueeze(-1).expand(-1, -1, num_speakers), out_anchor.unsqueeze(-1).expand(-1, -1, num_speakers).transpose(0, 2)) torch.clamp(self.w, 1e-6) cos_sim_matrix = cos_sim_matrix * self.w + self.b label = torch.from_numpy(np.asarray(range(0, num_speakers))).to(cos_sim_matrix.device) L = self.criterion(cos_sim_matrix, label) return L
6,369
38.565217
172
py
TTS
TTS-master/TTS/speaker_encoder/model.py
import torch from torch import nn class LSTMWithProjection(nn.Module): def __init__(self, input_size, hidden_size, proj_size): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.proj_size = proj_size self.lstm = nn.LSTM(input_size, hidden_size, batch_first=True) self.linear = nn.Linear(hidden_size, proj_size, bias=False) def forward(self, x): self.lstm.flatten_parameters() o, (_, _) = self.lstm(x) return self.linear(o) class LSTMWithoutProjection(nn.Module): def __init__(self, input_dim, lstm_dim, proj_dim, num_lstm_layers): super().__init__() self.lstm = nn.LSTM(input_size=input_dim, hidden_size=lstm_dim, num_layers=num_lstm_layers, batch_first=True) self.linear = nn.Linear(lstm_dim, proj_dim, bias=True) self.relu = nn.ReLU() def forward(self, x): _, (hidden, _) = self.lstm(x) return self.relu(self.linear(hidden[-1])) class SpeakerEncoder(nn.Module): def __init__(self, input_dim, proj_dim=256, lstm_dim=768, num_lstm_layers=3, use_lstm_with_projection=True): super().__init__() self.use_lstm_with_projection = use_lstm_with_projection layers = [] # choise LSTM layer if use_lstm_with_projection: layers.append(LSTMWithProjection(input_dim, lstm_dim, proj_dim)) for _ in range(num_lstm_layers - 1): layers.append(LSTMWithProjection(proj_dim, lstm_dim, proj_dim)) self.layers = nn.Sequential(*layers) else: self.layers = LSTMWithoutProjection(input_dim, lstm_dim, proj_dim, num_lstm_layers) self._init_layers() def _init_layers(self): for name, param in self.layers.named_parameters(): if "bias" in name: nn.init.constant_(param, 0.0) elif "weight" in name: nn.init.xavier_normal_(param) def forward(self, x): # TODO: implement state passing for lstms d = self.layers(x) if self.use_lstm_with_projection: d = torch.nn.functional.normalize(d[:, -1], p=2, dim=1) else: d = torch.nn.functional.normalize(d, p=2, dim=1) return d @torch.no_grad() def inference(self, x): d = self.layers.forward(x) if self.use_lstm_with_projection: d = torch.nn.functional.normalize(d[:, -1], p=2, dim=1) else: d = torch.nn.functional.normalize(d, p=2, dim=1) return d def compute_embedding(self, x, num_frames=160, overlap=0.5): """ Generate embeddings for a batch of utterances x: 1xTxD """ num_overlap = int(num_frames * overlap) max_len = x.shape[1] embed = None cur_iter = 0 for offset in range(0, max_len, num_frames - num_overlap): cur_iter += 1 end_offset = min(x.shape[1], offset + num_frames) frames = x[:, offset:end_offset] if embed is None: embed = self.inference(frames) else: embed += self.inference(frames) return embed / cur_iter def batch_compute_embedding(self, x, seq_lens, num_frames=160, overlap=0.5): """ Generate embeddings for a batch of utterances x: BxTxD """ num_overlap = num_frames * overlap max_len = x.shape[1] embed = None num_iters = seq_lens / (num_frames - num_overlap) cur_iter = 0 for offset in range(0, max_len, num_frames - num_overlap): cur_iter += 1 end_offset = min(x.shape[1], offset + num_frames) frames = x[:, offset:end_offset] if embed is None: embed = self.inference(frames) else: embed[cur_iter <= num_iters, :] += self.inference( frames[cur_iter <= num_iters, :, :] ) return embed / num_iters
4,118
35.451327
112
py
TTS
TTS-master/TTS/speaker_encoder/dataset.py
import numpy import numpy as np import queue import torch import random from torch.utils.data import Dataset from tqdm import tqdm class MyDataset(Dataset): def __init__(self, ap, meta_data, voice_len=1.6, num_speakers_in_batch=64, storage_size=1, sample_from_storage_p=0.5, additive_noise=0, num_utter_per_speaker=10, skip_speakers=False, verbose=False): """ Args: ap (TTS.tts.utils.AudioProcessor): audio processor object. meta_data (list): list of dataset instances. seq_len (int): voice segment length in seconds. verbose (bool): print diagnostic information. """ self.items = meta_data self.sample_rate = ap.sample_rate self.voice_len = voice_len self.seq_len = int(voice_len * self.sample_rate) self.num_speakers_in_batch = num_speakers_in_batch self.num_utter_per_speaker = num_utter_per_speaker self.skip_speakers = skip_speakers self.ap = ap self.verbose = verbose self.__parse_items() self.storage = queue.Queue(maxsize=storage_size*num_speakers_in_batch) self.sample_from_storage_p = float(sample_from_storage_p) self.additive_noise = float(additive_noise) if self.verbose: print("\n > DataLoader initialization") print(f" | > Speakers per Batch: {num_speakers_in_batch}") print(f" | > Storage Size: {self.storage.maxsize} speakers, each with {num_utter_per_speaker} utters") print(f" | > Sample_from_storage_p : {self.sample_from_storage_p}") print(f" | > Noise added : {self.additive_noise}") print(f" | > Number of instances : {len(self.items)}") print(f" | > Sequence length: {self.seq_len}") print(f" | > Num speakers: {len(self.speakers)}") def load_wav(self, filename): audio = self.ap.load_wav(filename, sr=self.ap.sample_rate) return audio def load_data(self, idx): text, wav_file, speaker_name = self.items[idx] wav = np.asarray(self.load_wav(wav_file), dtype=np.float32) mel = self.ap.melspectrogram(wav).astype("float32") # sample seq_len assert text.size > 0, self.items[idx][1] assert wav.size > 0, self.items[idx][1] sample = { "mel": mel, "item_idx": self.items[idx][1], "speaker_name": speaker_name, } return sample def __parse_items(self): self.speaker_to_utters = {} for i in self.items: path_ = i[1] speaker_ = i[2] if speaker_ in self.speaker_to_utters.keys(): self.speaker_to_utters[speaker_].append(path_) else: self.speaker_to_utters[speaker_] = [path_, ] if self.skip_speakers: self.speaker_to_utters = {k: v for (k, v) in self.speaker_to_utters.items() if len(v) >= self.num_utter_per_speaker} self.speakers = [k for (k, v) in self.speaker_to_utters.items()] # def __parse_items(self): # """ # Find unique speaker ids and create a dict mapping utterances from speaker id # """ # speakers = list({item[-1] for item in self.items}) # self.speaker_to_utters = {} # self.speakers = [] # for speaker in speakers: # speaker_utters = [item[1] for item in self.items if item[2] == speaker] # if len(speaker_utters) < self.num_utter_per_speaker and self.skip_speakers: # print( # f" [!] Skipped speaker {speaker}. Not enough utterances {self.num_utter_per_speaker} vs {len(speaker_utters)}." # ) # else: # self.speakers.append(speaker) # self.speaker_to_utters[speaker] = speaker_utters def __len__(self): return int(1e10) def __sample_speaker(self): speaker = random.sample(self.speakers, 1)[0] if self.num_utter_per_speaker > len(self.speaker_to_utters[speaker]): utters = random.choices( self.speaker_to_utters[speaker], k=self.num_utter_per_speaker ) else: utters = random.sample( self.speaker_to_utters[speaker], self.num_utter_per_speaker ) return speaker, utters def __sample_speaker_utterances(self, speaker): """ Sample all M utterances for the given speaker. """ wavs = [] labels = [] for _ in range(self.num_utter_per_speaker): # TODO:dummy but works while True: if len(self.speaker_to_utters[speaker]) > 0: utter = random.sample(self.speaker_to_utters[speaker], 1)[0] else: self.speakers.remove(speaker) speaker, _ = self.__sample_speaker() continue wav = self.load_wav(utter) if wav.shape[0] - self.seq_len > 0: break self.speaker_to_utters[speaker].remove(utter) wavs.append(wav) labels.append(speaker) return wavs, labels def __getitem__(self, idx): speaker, _ = self.__sample_speaker() return speaker def collate_fn(self, batch): labels = [] feats = [] for speaker in batch: if random.random() < self.sample_from_storage_p and self.storage.full(): # sample from storage (if full), ignoring the speaker wavs_, labels_ = random.choice(self.storage.queue) else: # don't sample from storage, but from HDD wavs_, labels_ = self.__sample_speaker_utterances(speaker) # if storage is full, remove an item if self.storage.full(): _ = self.storage.get_nowait() # put the newly loaded item into storage self.storage.put_nowait((wavs_, labels_)) # add random gaussian noise if self.additive_noise > 0: noises_ = [numpy.random.normal(0, self.additive_noise, size=len(w)) for w in wavs_] wavs_ = [wavs_[i] + noises_[i] for i in range(len(wavs_))] # get a random subset of each of the wavs and convert to MFCC. offsets_ = [random.randint(0, wav.shape[0] - self.seq_len) for wav in wavs_] mels_ = [self.ap.melspectrogram(wavs_[i][offsets_[i]: offsets_[i] + self.seq_len]) for i in range(len(wavs_))] feats_ = [torch.FloatTensor(mel) for mel in mels_] labels.append(labels_) feats.extend(feats_) feats = torch.stack(feats) return feats.transpose(1, 2), labels
6,882
39.488235
133
py
TTS
TTS-master/TTS/speaker_encoder/utils/generic_utils.py
import datetime import os import re import torch from TTS.speaker_encoder.model import SpeakerEncoder from TTS.utils.generic_utils import check_argument def to_camel(text): text = text.capitalize() return re.sub(r'(?!^)_([a-zA-Z])', lambda m: m.group(1).upper(), text) def setup_model(c): model = SpeakerEncoder(c.model['input_dim'], c.model['proj_dim'], c.model['lstm_dim'], c.model['num_lstm_layers']) return model def save_checkpoint(model, optimizer, model_loss, out_path, current_step, epoch): checkpoint_path = 'checkpoint_{}.pth.tar'.format(current_step) checkpoint_path = os.path.join(out_path, checkpoint_path) print(" | | > Checkpoint saving : {}".format(checkpoint_path)) new_state_dict = model.state_dict() state = { 'model': new_state_dict, 'optimizer': optimizer.state_dict() if optimizer is not None else None, 'step': current_step, 'epoch': epoch, 'loss': model_loss, 'date': datetime.date.today().strftime("%B %d, %Y"), } torch.save(state, checkpoint_path) def save_best_model(model, optimizer, model_loss, best_loss, out_path, current_step): if model_loss < best_loss: new_state_dict = model.state_dict() state = { 'model': new_state_dict, 'optimizer': optimizer.state_dict(), 'step': current_step, 'loss': model_loss, 'date': datetime.date.today().strftime("%B %d, %Y"), } best_loss = model_loss bestmodel_path = 'best_model.pth.tar' bestmodel_path = os.path.join(out_path, bestmodel_path) print("\n > BEST MODEL ({0:.5f}) : {1:}".format( model_loss, bestmodel_path)) torch.save(state, bestmodel_path) return best_loss def check_config_speaker_encoder(c): """Check the config.json file of the speaker encoder""" check_argument('run_name', c, restricted=True, val_type=str) check_argument('run_description', c, val_type=str) # audio processing parameters check_argument('audio', c, restricted=True, val_type=dict) check_argument('num_mels', c['audio'], restricted=True, val_type=int, min_val=10, max_val=2056) check_argument('fft_size', c['audio'], restricted=True, val_type=int, min_val=128, max_val=4058) check_argument('sample_rate', c['audio'], restricted=True, val_type=int, min_val=512, max_val=100000) check_argument('frame_length_ms', c['audio'], restricted=True, val_type=float, min_val=10, max_val=1000, alternative='win_length') check_argument('frame_shift_ms', c['audio'], restricted=True, val_type=float, min_val=1, max_val=1000, alternative='hop_length') check_argument('preemphasis', c['audio'], restricted=True, val_type=float, min_val=0, max_val=1) check_argument('min_level_db', c['audio'], restricted=True, val_type=int, min_val=-1000, max_val=10) check_argument('ref_level_db', c['audio'], restricted=True, val_type=int, min_val=0, max_val=1000) check_argument('power', c['audio'], restricted=True, val_type=float, min_val=1, max_val=5) check_argument('griffin_lim_iters', c['audio'], restricted=True, val_type=int, min_val=10, max_val=1000) # training parameters check_argument('loss', c, enum_list=['ge2e', 'angleproto'], restricted=True, val_type=str) check_argument('grad_clip', c, restricted=True, val_type=float) check_argument('epochs', c, restricted=True, val_type=int, min_val=1) check_argument('lr', c, restricted=True, val_type=float, min_val=0) check_argument('lr_decay', c, restricted=True, val_type=bool) check_argument('warmup_steps', c, restricted=True, val_type=int, min_val=0) check_argument('tb_model_param_stats', c, restricted=True, val_type=bool) check_argument('num_speakers_in_batch', c, restricted=True, val_type=int) check_argument('num_loader_workers', c, restricted=True, val_type=int) check_argument('wd', c, restricted=True, val_type=float, min_val=0.0, max_val=1.0) # checkpoint and output parameters check_argument('steps_plot_stats', c, restricted=True, val_type=int) check_argument('checkpoint', c, restricted=True, val_type=bool) check_argument('save_step', c, restricted=True, val_type=int) check_argument('print_step', c, restricted=True, val_type=int) check_argument('output_path', c, restricted=True, val_type=str) # model parameters check_argument('model', c, restricted=True, val_type=dict) check_argument('input_dim', c['model'], restricted=True, val_type=int) check_argument('proj_dim', c['model'], restricted=True, val_type=int) check_argument('lstm_dim', c['model'], restricted=True, val_type=int) check_argument('num_lstm_layers', c['model'], restricted=True, val_type=int) check_argument('use_lstm_with_projection', c['model'], restricted=True, val_type=bool) # in-memory storage parameters check_argument('storage', c, restricted=True, val_type=dict) check_argument('sample_from_storage_p', c['storage'], restricted=True, val_type=float, min_val=0.0, max_val=1.0) check_argument('storage_size', c['storage'], restricted=True, val_type=int, min_val=1, max_val=100) check_argument('additive_noise', c['storage'], restricted=True, val_type=float, min_val=0.0, max_val=1.0) # datasets - checking only the first entry check_argument('datasets', c, restricted=True, val_type=list) for dataset_entry in c['datasets']: check_argument('name', dataset_entry, restricted=True, val_type=str) check_argument('path', dataset_entry, restricted=True, val_type=str) check_argument('meta_file_train', dataset_entry, restricted=True, val_type=[str, list]) check_argument('meta_file_val', dataset_entry, restricted=True, val_type=str)
5,843
48.525424
134
py
TTS
TTS-master/TTS/bin/convert_tacotron2_tflite.py
# Convert Tensorflow Tacotron2 model to TF-Lite binary import argparse from TTS.utils.io import load_config from TTS.tts.utils.text.symbols import symbols, phonemes from TTS.tts.tf.utils.generic_utils import setup_model from TTS.tts.tf.utils.io import load_checkpoint from TTS.tts.tf.utils.tflite import convert_tacotron2_to_tflite parser = argparse.ArgumentParser() parser.add_argument('--tf_model', type=str, help='Path to target torch model to be converted to TF.') parser.add_argument('--config_path', type=str, help='Path to config file of torch model.') parser.add_argument('--output_path', type=str, help='path to tflite output binary.') args = parser.parse_args() # Set constants CONFIG = load_config(args.config_path) # load the model c = CONFIG num_speakers = 0 num_chars = len(phonemes) if c.use_phonemes else len(symbols) model = setup_model(num_chars, num_speakers, c, enable_tflite=True) model.build_inference() model = load_checkpoint(model, args.tf_model) model.decoder.set_max_decoder_steps(1000) # create tflite model tflite_model = convert_tacotron2_to_tflite(model, output_path=args.output_path)
1,243
31.736842
79
py
TTS
TTS-master/TTS/bin/train_tacotron.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import argparse import glob import os import sys import time import traceback from random import randrange import numpy as np import torch from torch.utils.data import DataLoader from TTS.tts.datasets.preprocess import load_meta_data from TTS.tts.datasets.TTSDataset import MyDataset from TTS.tts.layers.losses import TacotronLoss from TTS.tts.utils.generic_utils import check_config_tts, setup_model from TTS.tts.utils.io import save_best_model, save_checkpoint from TTS.tts.utils.measures import alignment_diagonal_score from TTS.tts.utils.speakers import parse_speakers from TTS.tts.utils.synthesis import synthesis from TTS.tts.utils.text.symbols import make_symbols, phonemes, symbols from TTS.tts.utils.visual import plot_alignment, plot_spectrogram from TTS.utils.audio import AudioProcessor from TTS.utils.console_logger import ConsoleLogger from TTS.utils.distribute import (DistributedSampler, apply_gradient_allreduce, init_distributed, reduce_tensor) from TTS.utils.generic_utils import (KeepAverage, count_parameters, create_experiment_folder, get_git_branch, remove_experiment_folder, set_init_dict) from TTS.utils.io import copy_model_files, load_config from TTS.utils.radam import RAdam from TTS.utils.tensorboard_logger import TensorboardLogger from TTS.utils.training import (NoamLR, adam_weight_decay, check_update, gradual_training_scheduler, set_weight_decay, setup_torch_training_env) use_cuda, num_gpus = setup_torch_training_env(True, False) def setup_loader(ap, r, is_val=False, verbose=False, dataset=None): if is_val and not c.run_eval: loader = None else: if dataset is None: dataset = MyDataset( r, c.text_cleaner, compute_linear_spec=c.model.lower() == 'tacotron', meta_data=meta_data_eval if is_val else meta_data_train, ap=ap, tp=c.characters if 'characters' in c.keys() else None, add_blank=c['add_blank'] if 'add_blank' in c.keys() else False, batch_group_size=0 if is_val else c.batch_group_size * c.batch_size, min_seq_len=c.min_seq_len, max_seq_len=c.max_seq_len, phoneme_cache_path=c.phoneme_cache_path, use_phonemes=c.use_phonemes, phoneme_language=c.phoneme_language, enable_eos_bos=c.enable_eos_bos_chars, verbose=verbose, speaker_mapping=speaker_mapping if c.use_speaker_embedding and c.use_external_speaker_embedding_file else None) if c.use_phonemes and c.compute_input_seq_cache: # precompute phonemes to have a better estimate of sequence lengths. dataset.compute_input_seq(c.num_loader_workers) dataset.sort_items() sampler = DistributedSampler(dataset) if num_gpus > 1 else None loader = DataLoader( dataset, batch_size=c.eval_batch_size if is_val else c.batch_size, shuffle=False, collate_fn=dataset.collate_fn, drop_last=False, sampler=sampler, num_workers=c.num_val_loader_workers if is_val else c.num_loader_workers, pin_memory=False) return loader def format_data(data): # setup input data text_input = data[0] text_lengths = data[1] speaker_names = data[2] linear_input = data[3] if c.model in ["Tacotron"] else None mel_input = data[4] mel_lengths = data[5] stop_targets = data[6] max_text_length = torch.max(text_lengths.float()) max_spec_length = torch.max(mel_lengths.float()) if c.use_speaker_embedding: if c.use_external_speaker_embedding_file: speaker_embeddings = data[8] speaker_ids = None else: speaker_ids = [ speaker_mapping[speaker_name] for speaker_name in speaker_names ] speaker_ids = torch.LongTensor(speaker_ids) speaker_embeddings = None else: speaker_embeddings = None speaker_ids = None # set stop targets view, we predict a single stop token per iteration. stop_targets = stop_targets.view(text_input.shape[0], stop_targets.size(1) // c.r, -1) stop_targets = (stop_targets.sum(2) > 0.0).unsqueeze(2).float().squeeze(2) # dispatch data to GPU if use_cuda: text_input = text_input.cuda(non_blocking=True) text_lengths = text_lengths.cuda(non_blocking=True) mel_input = mel_input.cuda(non_blocking=True) mel_lengths = mel_lengths.cuda(non_blocking=True) linear_input = linear_input.cuda(non_blocking=True) if c.model in ["Tacotron"] else None stop_targets = stop_targets.cuda(non_blocking=True) if speaker_ids is not None: speaker_ids = speaker_ids.cuda(non_blocking=True) if speaker_embeddings is not None: speaker_embeddings = speaker_embeddings.cuda(non_blocking=True) return text_input, text_lengths, mel_input, mel_lengths, linear_input, stop_targets, speaker_ids, speaker_embeddings, max_text_length, max_spec_length def train(data_loader, model, criterion, optimizer, optimizer_st, scheduler, ap, global_step, epoch, scaler, scaler_st): model.train() epoch_time = 0 keep_avg = KeepAverage() if use_cuda: batch_n_iter = int( len(data_loader.dataset) / (c.batch_size * num_gpus)) else: batch_n_iter = int(len(data_loader.dataset) / c.batch_size) end_time = time.time() c_logger.print_train_start() for num_iter, data in enumerate(data_loader): start_time = time.time() # format data text_input, text_lengths, mel_input, mel_lengths, linear_input, stop_targets, speaker_ids, speaker_embeddings, max_text_length, max_spec_length = format_data(data) loader_time = time.time() - end_time global_step += 1 # setup lr if c.noam_schedule: scheduler.step() optimizer.zero_grad() if optimizer_st: optimizer_st.zero_grad() with torch.cuda.amp.autocast(enabled=c.mixed_precision): # forward pass model if c.bidirectional_decoder or c.double_decoder_consistency: decoder_output, postnet_output, alignments, stop_tokens, decoder_backward_output, alignments_backward = model( text_input, text_lengths, mel_input, mel_lengths, speaker_ids=speaker_ids, speaker_embeddings=speaker_embeddings) else: decoder_output, postnet_output, alignments, stop_tokens = model( text_input, text_lengths, mel_input, mel_lengths, speaker_ids=speaker_ids, speaker_embeddings=speaker_embeddings) decoder_backward_output = None alignments_backward = None # set the [alignment] lengths wrt reduction factor for guided attention if mel_lengths.max() % model.decoder.r != 0: alignment_lengths = (mel_lengths + (model.decoder.r - (mel_lengths.max() % model.decoder.r))) // model.decoder.r else: alignment_lengths = mel_lengths // model.decoder.r # compute loss loss_dict = criterion(postnet_output, decoder_output, mel_input, linear_input, stop_tokens, stop_targets, mel_lengths, decoder_backward_output, alignments, alignment_lengths, alignments_backward, text_lengths) # check nan loss if torch.isnan(loss_dict['loss']).any(): raise RuntimeError(f'Detected NaN loss at step {global_step}.') # optimizer step if c.mixed_precision: # model optimizer step in mixed precision mode scaler.scale(loss_dict['loss']).backward() scaler.unscale_(optimizer) optimizer, current_lr = adam_weight_decay(optimizer) grad_norm, _ = check_update(model, c.grad_clip, ignore_stopnet=True) scaler.step(optimizer) scaler.update() # stopnet optimizer step if c.separate_stopnet: scaler_st.scale( loss_dict['stopnet_loss']).backward() scaler.unscale_(optimizer_st) optimizer_st, _ = adam_weight_decay(optimizer_st) grad_norm_st, _ = check_update(model.decoder.stopnet, 1.0) scaler_st.step(optimizer) scaler_st.update() else: grad_norm_st = 0 else: # main model optimizer step loss_dict['loss'].backward() optimizer, current_lr = adam_weight_decay(optimizer) grad_norm, _ = check_update(model, c.grad_clip, ignore_stopnet=True) optimizer.step() # stopnet optimizer step if c.separate_stopnet: loss_dict['stopnet_loss'].backward() optimizer_st, _ = adam_weight_decay(optimizer_st) grad_norm_st, _ = check_update(model.decoder.stopnet, 1.0) optimizer_st.step() else: grad_norm_st = 0 # compute alignment error (the lower the better ) align_error = 1 - alignment_diagonal_score(alignments) loss_dict['align_error'] = align_error step_time = time.time() - start_time epoch_time += step_time # aggregate losses from processes if num_gpus > 1: loss_dict['postnet_loss'] = reduce_tensor(loss_dict['postnet_loss'].data, num_gpus) loss_dict['decoder_loss'] = reduce_tensor(loss_dict['decoder_loss'].data, num_gpus) loss_dict['loss'] = reduce_tensor(loss_dict['loss'] .data, num_gpus) loss_dict['stopnet_loss'] = reduce_tensor(loss_dict['stopnet_loss'].data, num_gpus) if c.stopnet else loss_dict['stopnet_loss'] # detach loss values loss_dict_new = dict() for key, value in loss_dict.items(): if isinstance(value, (int, float)): loss_dict_new[key] = value else: loss_dict_new[key] = value.item() loss_dict = loss_dict_new # update avg stats update_train_values = dict() for key, value in loss_dict.items(): update_train_values['avg_' + key] = value update_train_values['avg_loader_time'] = loader_time update_train_values['avg_step_time'] = step_time keep_avg.update_values(update_train_values) # print training progress if global_step % c.print_step == 0: log_dict = { "max_spec_length": [max_spec_length, 1], # value, precision "max_text_length": [max_text_length, 1], "step_time": [step_time, 4], "loader_time": [loader_time, 2], "current_lr": current_lr, } c_logger.print_train_step(batch_n_iter, num_iter, global_step, log_dict, loss_dict, keep_avg.avg_values) if args.rank == 0: # Plot Training Iter Stats # reduce TB load if global_step % c.tb_plot_step == 0: iter_stats = { "lr": current_lr, "grad_norm": grad_norm, "grad_norm_st": grad_norm_st, "step_time": step_time } iter_stats.update(loss_dict) tb_logger.tb_train_iter_stats(global_step, iter_stats) if global_step % c.save_step == 0: if c.checkpoint: # save model save_checkpoint(model, optimizer, global_step, epoch, model.decoder.r, OUT_PATH, optimizer_st=optimizer_st, model_loss=loss_dict['postnet_loss'], scaler=scaler.state_dict() if c.mixed_precision else None) # Diagnostic visualizations const_spec = postnet_output[0].data.cpu().numpy() gt_spec = linear_input[0].data.cpu().numpy() if c.model in [ "Tacotron", "TacotronGST" ] else mel_input[0].data.cpu().numpy() align_img = alignments[0].data.cpu().numpy() figures = { "prediction": plot_spectrogram(const_spec, ap, output_fig=False), "ground_truth": plot_spectrogram(gt_spec, ap, output_fig=False), "alignment": plot_alignment(align_img, output_fig=False), } if c.bidirectional_decoder or c.double_decoder_consistency: figures["alignment_backward"] = plot_alignment(alignments_backward[0].data.cpu().numpy(), output_fig=False) tb_logger.tb_train_figures(global_step, figures) # Sample audio if c.model in ["Tacotron", "TacotronGST"]: train_audio = ap.inv_spectrogram(const_spec.T) else: train_audio = ap.inv_melspectrogram(const_spec.T) tb_logger.tb_train_audios(global_step, {'TrainAudio': train_audio}, c.audio["sample_rate"]) end_time = time.time() # print epoch stats c_logger.print_train_epoch_end(global_step, epoch, epoch_time, keep_avg) # Plot Epoch Stats if args.rank == 0: epoch_stats = {"epoch_time": epoch_time} epoch_stats.update(keep_avg.avg_values) tb_logger.tb_train_epoch_stats(global_step, epoch_stats) if c.tb_model_param_stats: tb_logger.tb_model_weights(model, global_step) return keep_avg.avg_values, global_step @torch.no_grad() def evaluate(data_loader, model, criterion, ap, global_step, epoch): model.eval() epoch_time = 0 keep_avg = KeepAverage() c_logger.print_eval_start() if data_loader is not None: for num_iter, data in enumerate(data_loader): start_time = time.time() # format data text_input, text_lengths, mel_input, mel_lengths, linear_input, stop_targets, speaker_ids, speaker_embeddings, _, _ = format_data(data) assert mel_input.shape[1] % model.decoder.r == 0 # forward pass model if c.bidirectional_decoder or c.double_decoder_consistency: decoder_output, postnet_output, alignments, stop_tokens, decoder_backward_output, alignments_backward = model( text_input, text_lengths, mel_input, speaker_ids=speaker_ids, speaker_embeddings=speaker_embeddings) else: decoder_output, postnet_output, alignments, stop_tokens = model( text_input, text_lengths, mel_input, speaker_ids=speaker_ids, speaker_embeddings=speaker_embeddings) decoder_backward_output = None alignments_backward = None # set the alignment lengths wrt reduction factor for guided attention if mel_lengths.max() % model.decoder.r != 0: alignment_lengths = (mel_lengths + (model.decoder.r - (mel_lengths.max() % model.decoder.r))) // model.decoder.r else: alignment_lengths = mel_lengths // model.decoder.r # compute loss loss_dict = criterion(postnet_output, decoder_output, mel_input, linear_input, stop_tokens, stop_targets, mel_lengths, decoder_backward_output, alignments, alignment_lengths, alignments_backward, text_lengths) # step time step_time = time.time() - start_time epoch_time += step_time # compute alignment score align_error = 1 - alignment_diagonal_score(alignments) loss_dict['align_error'] = align_error # aggregate losses from processes if num_gpus > 1: loss_dict['postnet_loss'] = reduce_tensor(loss_dict['postnet_loss'].data, num_gpus) loss_dict['decoder_loss'] = reduce_tensor(loss_dict['decoder_loss'].data, num_gpus) if c.stopnet: loss_dict['stopnet_loss'] = reduce_tensor(loss_dict['stopnet_loss'].data, num_gpus) # detach loss values loss_dict_new = dict() for key, value in loss_dict.items(): if isinstance(value, (int, float)): loss_dict_new[key] = value else: loss_dict_new[key] = value.item() loss_dict = loss_dict_new # update avg stats update_train_values = dict() for key, value in loss_dict.items(): update_train_values['avg_' + key] = value keep_avg.update_values(update_train_values) if c.print_eval: c_logger.print_eval_step(num_iter, loss_dict, keep_avg.avg_values) if args.rank == 0: # Diagnostic visualizations idx = np.random.randint(mel_input.shape[0]) const_spec = postnet_output[idx].data.cpu().numpy() gt_spec = linear_input[idx].data.cpu().numpy() if c.model in [ "Tacotron", "TacotronGST" ] else mel_input[idx].data.cpu().numpy() align_img = alignments[idx].data.cpu().numpy() eval_figures = { "prediction": plot_spectrogram(const_spec, ap, output_fig=False), "ground_truth": plot_spectrogram(gt_spec, ap, output_fig=False), "alignment": plot_alignment(align_img, output_fig=False) } # Sample audio if c.model in ["Tacotron", "TacotronGST"]: eval_audio = ap.inv_spectrogram(const_spec.T) else: eval_audio = ap.inv_melspectrogram(const_spec.T) tb_logger.tb_eval_audios(global_step, {"ValAudio": eval_audio}, c.audio["sample_rate"]) # Plot Validation Stats if c.bidirectional_decoder or c.double_decoder_consistency: align_b_img = alignments_backward[idx].data.cpu().numpy() eval_figures['alignment2'] = plot_alignment(align_b_img, output_fig=False) tb_logger.tb_eval_stats(global_step, keep_avg.avg_values) tb_logger.tb_eval_figures(global_step, eval_figures) if args.rank == 0 and epoch > c.test_delay_epochs: if c.test_sentences_file is None: test_sentences = [ "It took me quite a long time to develop a voice, and now that I have it I'm not going to be silent.", "Be a voice, not an echo.", "I'm sorry Dave. I'm afraid I can't do that.", "This cake is great. It's so delicious and moist.", "Prior to November 22, 1963." ] else: with open(c.test_sentences_file, "r") as f: test_sentences = [s.strip() for s in f.readlines()] # test sentences test_audios = {} test_figures = {} print(" | > Synthesizing test sentences") speaker_id = 0 if c.use_speaker_embedding else None speaker_embedding = speaker_mapping[list(speaker_mapping.keys())[randrange(len(speaker_mapping)-1)]]['embedding'] if c.use_external_speaker_embedding_file and c.use_speaker_embedding else None style_wav = c.get("gst_style_input") if style_wav is None and c.use_gst: # inicialize GST with zero dict. style_wav = {} print("WARNING: You don't provided a gst style wav, for this reason we use a zero tensor!") for i in range(c.gst['gst_style_tokens']): style_wav[str(i)] = 0 style_wav = c.get("gst_style_input") for idx, test_sentence in enumerate(test_sentences): try: wav, alignment, decoder_output, postnet_output, stop_tokens, _ = synthesis( model, test_sentence, c, use_cuda, ap, speaker_id=speaker_id, speaker_embedding=speaker_embedding, style_wav=style_wav, truncated=False, enable_eos_bos_chars=c.enable_eos_bos_chars, #pylint: disable=unused-argument use_griffin_lim=True, do_trim_silence=False) file_path = os.path.join(AUDIO_PATH, str(global_step)) os.makedirs(file_path, exist_ok=True) file_path = os.path.join(file_path, "TestSentence_{}.wav".format(idx)) ap.save_wav(wav, file_path) test_audios['{}-audio'.format(idx)] = wav test_figures['{}-prediction'.format(idx)] = plot_spectrogram( postnet_output, ap, output_fig=False) test_figures['{}-alignment'.format(idx)] = plot_alignment( alignment, output_fig=False) except: #pylint: disable=bare-except print(" !! Error creating Test Sentence -", idx) traceback.print_exc() tb_logger.tb_test_audios(global_step, test_audios, c.audio['sample_rate']) tb_logger.tb_test_figures(global_step, test_figures) return keep_avg.avg_values # FIXME: move args definition/parsing inside of main? def main(args): # pylint: disable=redefined-outer-name # pylint: disable=global-variable-undefined global meta_data_train, meta_data_eval, symbols, phonemes, speaker_mapping # Audio processor ap = AudioProcessor(**c.audio) if 'characters' in c.keys(): symbols, phonemes = make_symbols(**c.characters) # DISTRUBUTED if num_gpus > 1: init_distributed(args.rank, num_gpus, args.group_id, c.distributed["backend"], c.distributed["url"]) num_chars = len(phonemes) if c.use_phonemes else len(symbols) # load data instances meta_data_train, meta_data_eval = load_meta_data(c.datasets) # set the portion of the data used for training if 'train_portion' in c.keys(): meta_data_train = meta_data_train[:int(len(meta_data_train) * c.train_portion)] if 'eval_portion' in c.keys(): meta_data_eval = meta_data_eval[:int(len(meta_data_eval) * c.eval_portion)] # parse speakers num_speakers, speaker_embedding_dim, speaker_mapping = parse_speakers(c, args, meta_data_train, OUT_PATH) model = setup_model(num_chars, num_speakers, c, speaker_embedding_dim) # scalers for mixed precision training scaler = torch.cuda.amp.GradScaler() if c.mixed_precision else None scaler_st = torch.cuda.amp.GradScaler() if c.mixed_precision and c.separate_stopnet else None params = set_weight_decay(model, c.wd) optimizer = RAdam(params, lr=c.lr, weight_decay=0) if c.stopnet and c.separate_stopnet: optimizer_st = RAdam(model.decoder.stopnet.parameters(), lr=c.lr, weight_decay=0) else: optimizer_st = None # setup criterion criterion = TacotronLoss(c, stopnet_pos_weight=10.0, ga_sigma=0.4) if args.restore_path: checkpoint = torch.load(args.restore_path, map_location='cpu') try: print(" > Restoring Model.") model.load_state_dict(checkpoint['model']) # optimizer restore print(" > Restoring Optimizer.") optimizer.load_state_dict(checkpoint['optimizer']) if "scaler" in checkpoint and c.mixed_precision: print(" > Restoring AMP Scaler...") scaler.load_state_dict(checkpoint["scaler"]) if c.reinit_layers: raise RuntimeError except (KeyError, RuntimeError): print(" > Partial model initialization.") model_dict = model.state_dict() model_dict = set_init_dict(model_dict, checkpoint['model'], c) # torch.save(model_dict, os.path.join(OUT_PATH, 'state_dict.pt')) # print("State Dict saved for debug in: ", os.path.join(OUT_PATH, 'state_dict.pt')) model.load_state_dict(model_dict) del model_dict for group in optimizer.param_groups: group['lr'] = c.lr print(" > Model restored from step %d" % checkpoint['step'], flush=True) args.restore_step = checkpoint['step'] else: args.restore_step = 0 if use_cuda: model.cuda() criterion.cuda() # DISTRUBUTED if num_gpus > 1: model = apply_gradient_allreduce(model) if c.noam_schedule: scheduler = NoamLR(optimizer, warmup_steps=c.warmup_steps, last_epoch=args.restore_step - 1) else: scheduler = None num_params = count_parameters(model) print("\n > Model has {} parameters".format(num_params), flush=True) if 'best_loss' not in locals(): best_loss = float('inf') # define data loaders train_loader = setup_loader(ap, model.decoder.r, is_val=False, verbose=True) eval_loader = setup_loader(ap, model.decoder.r, is_val=True) global_step = args.restore_step for epoch in range(0, c.epochs): c_logger.print_epoch_start(epoch, c.epochs) # set gradual training if c.gradual_training is not None: r, c.batch_size = gradual_training_scheduler(global_step, c) c.r = r model.decoder.set_r(r) if c.bidirectional_decoder: model.decoder_backward.set_r(r) train_loader.dataset.outputs_per_step = r eval_loader.dataset.outputs_per_step = r train_loader = setup_loader(ap, model.decoder.r, is_val=False, dataset=train_loader.dataset) eval_loader = setup_loader(ap, model.decoder.r, is_val=True, dataset=eval_loader.dataset) print("\n > Number of output frames:", model.decoder.r) # train one epoch train_avg_loss_dict, global_step = train(train_loader, model, criterion, optimizer, optimizer_st, scheduler, ap, global_step, epoch, scaler, scaler_st) # eval one epoch eval_avg_loss_dict = evaluate(eval_loader, model, criterion, ap, global_step, epoch) c_logger.print_epoch_end(epoch, eval_avg_loss_dict) target_loss = train_avg_loss_dict['avg_postnet_loss'] if c.run_eval: target_loss = eval_avg_loss_dict['avg_postnet_loss'] best_loss = save_best_model( target_loss, best_loss, model, optimizer, global_step, epoch, c.r, OUT_PATH, scaler=scaler.state_dict() if c.mixed_precision else None) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '--continue_path', type=str, help='Training output folder to continue training. Use to continue a training. If it is used, "config_path" is ignored.', default='', required='--config_path' not in sys.argv) parser.add_argument( '--restore_path', type=str, help='Model file to be restored. Use to finetune a model.', default='') parser.add_argument( '--config_path', type=str, help='Path to config file for training.', required='--continue_path' not in sys.argv ) parser.add_argument('--debug', type=bool, default=False, help='Do not verify commit integrity to run training.') # DISTRUBUTED parser.add_argument( '--rank', type=int, default=0, help='DISTRIBUTED: process rank for distributed training.') parser.add_argument('--group_id', type=str, default="", help='DISTRIBUTED: process group id.') args = parser.parse_args() if args.continue_path != '': print(f" > Training continues for {args.continue_path}") args.output_path = args.continue_path args.config_path = os.path.join(args.continue_path, 'config.json') list_of_files = glob.glob(args.continue_path + "/*.pth.tar") # * means all if need specific format then *.csv latest_model_file = max(list_of_files, key=os.path.getctime) args.restore_path = latest_model_file # setup output paths and read configs c = load_config(args.config_path) check_config_tts(c) _ = os.path.dirname(os.path.realpath(__file__)) if c.mixed_precision: print(" > Mixed precision mode is ON") OUT_PATH = args.continue_path if args.continue_path == '': OUT_PATH = create_experiment_folder(c.output_path, c.run_name, args.debug) AUDIO_PATH = os.path.join(OUT_PATH, 'test_audios') c_logger = ConsoleLogger() if args.rank == 0: os.makedirs(AUDIO_PATH, exist_ok=True) new_fields = {} if args.restore_path: new_fields["restore_path"] = args.restore_path new_fields["github_branch"] = get_git_branch() copy_model_files(c, args.config_path, OUT_PATH, new_fields) os.chmod(AUDIO_PATH, 0o775) os.chmod(OUT_PATH, 0o775) LOG_DIR = OUT_PATH tb_logger = TensorboardLogger(LOG_DIR, model_name='TTS') # write model desc to tensorboard tb_logger.tb_add_text('model-description', c['run_description'], 0) try: main(args) except KeyboardInterrupt: remove_experiment_folder(OUT_PATH) try: sys.exit(0) except SystemExit: os._exit(0) # pylint: disable=protected-access except Exception: # pylint: disable=broad-except remove_experiment_folder(OUT_PATH) traceback.print_exc() sys.exit(1)
31,270
41.719945
200
py
TTS
TTS-master/TTS/bin/compute_attention_masks.py
import argparse import importlib import os import numpy as np import torch from torch.utils.data import DataLoader from tqdm import tqdm from argparse import RawTextHelpFormatter from TTS.tts.datasets.TTSDataset import MyDataset from TTS.tts.utils.generic_utils import setup_model from TTS.tts.utils.io import load_checkpoint from TTS.tts.utils.text.symbols import make_symbols, phonemes, symbols from TTS.utils.audio import AudioProcessor from TTS.utils.io import load_config if __name__ == '__main__': parser = argparse.ArgumentParser( description='''Extract attention masks from trained Tacotron/Tacotron2 models. These masks can be used for different purposes including training a TTS model with a Duration Predictor.\n\n''' '''Each attention mask is written to the same path as the input wav file with ".npy" file extension. (e.g. path/bla.wav (wav file) --> path/bla.npy (attention mask))\n''' ''' Example run: CUDA_VISIBLE_DEVICE="0" python TTS/bin/compute_attention_masks.py --model_path /data/rw/home/Models/ljspeech-dcattn-December-14-2020_11+10AM-9d0e8c7/checkpoint_200000.pth.tar --config_path /data/rw/home/Models/ljspeech-dcattn-December-14-2020_11+10AM-9d0e8c7/config.json --dataset_metafile /root/LJSpeech-1.1/metadata.csv --data_path /root/LJSpeech-1.1/ --batch_size 32 --dataset ljspeech --use_cuda True ''', formatter_class=RawTextHelpFormatter ) parser.add_argument('--model_path', type=str, required=True, help='Path to Tacotron/Tacotron2 model file ') parser.add_argument( '--config_path', type=str, required=True, help='Path to Tacotron/Tacotron2 config file.', ) parser.add_argument('--dataset', type=str, default='', required=True, help='Target dataset processor name from TTS.tts.dataset.preprocess.') parser.add_argument( '--dataset_metafile', type=str, default='', required=True, help='Dataset metafile inclusing file paths with transcripts.') parser.add_argument( '--data_path', type=str, default='', help='Defines the data path. It overwrites config.json.') parser.add_argument('--use_cuda', type=bool, default=False, help="enable/disable cuda.") parser.add_argument( '--batch_size', default=16, type=int, help='Batch size for the model. Use batch_size=1 if you have no CUDA.') args = parser.parse_args() C = load_config(args.config_path) ap = AudioProcessor(**C.audio) # if the vocabulary was passed, replace the default if 'characters' in C.keys(): symbols, phonemes = make_symbols(**C.characters) # load the model num_chars = len(phonemes) if C.use_phonemes else len(symbols) # TODO: handle multi-speaker model = setup_model(num_chars, num_speakers=0, c=C) model, _ = load_checkpoint(model, args.model_path, None, args.use_cuda) model.eval() # data loader preprocessor = importlib.import_module('TTS.tts.datasets.preprocess') preprocessor = getattr(preprocessor, args.dataset) meta_data = preprocessor(args.data_path, args.dataset_metafile) dataset = MyDataset(model.decoder.r, C.text_cleaner, compute_linear_spec=False, ap=ap, meta_data=meta_data, tp=C.characters if 'characters' in C.keys() else None, add_blank=C['add_blank'] if 'add_blank' in C.keys() else False, use_phonemes=C.use_phonemes, phoneme_cache_path=C.phoneme_cache_path, phoneme_language=C.phoneme_language, enable_eos_bos=C.enable_eos_bos_chars) dataset.sort_items() loader = DataLoader(dataset, batch_size=args.batch_size, num_workers=4, collate_fn=dataset.collate_fn, shuffle=False, drop_last=False) # compute attentions file_paths = [] with torch.no_grad(): for data in tqdm(loader): # setup input data text_input = data[0] text_lengths = data[1] linear_input = data[3] mel_input = data[4] mel_lengths = data[5] stop_targets = data[6] item_idxs = data[7] # dispatch data to GPU if args.use_cuda: text_input = text_input.cuda() text_lengths = text_lengths.cuda() mel_input = mel_input.cuda() mel_lengths = mel_lengths.cuda() mel_outputs, postnet_outputs, alignments, stop_tokens = model.forward( text_input, text_lengths, mel_input) alignments = alignments.detach() for idx, alignment in enumerate(alignments): item_idx = item_idxs[idx] # interpolate if r > 1 alignment = torch.nn.functional.interpolate( alignment.transpose(0, 1).unsqueeze(0), size=None, scale_factor=model.decoder.r, mode='nearest', align_corners=None, recompute_scale_factor=None).squeeze(0).transpose(0, 1) # remove paddings alignment = alignment[:mel_lengths[idx], :text_lengths[idx]].cpu().numpy() # set file paths wav_file_name = os.path.basename(item_idx) align_file_name = os.path.splitext(wav_file_name)[0] + '.npy' file_path = item_idx.replace(wav_file_name, align_file_name) # save output file_paths.append([item_idx, file_path]) np.save(file_path, alignment) # ourput metafile metafile = os.path.join(args.data_path, "metadata_attn_mask.txt") with open(metafile, "w") as f: for p in file_paths: f.write(f"{p[0]}|{p[1]}\n") print(f" >> Metafile created: {metafile}")
6,431
37.51497
116
py
TTS
TTS-master/TTS/bin/train_vocoder_gan.py
import argparse import glob import os import sys import time import traceback from inspect import signature import torch from torch.utils.data import DataLoader from TTS.utils.audio import AudioProcessor from TTS.utils.console_logger import ConsoleLogger from TTS.utils.generic_utils import (KeepAverage, count_parameters, create_experiment_folder, get_git_branch, remove_experiment_folder, set_init_dict) from TTS.utils.io import copy_model_files, load_config from TTS.utils.radam import RAdam from TTS.utils.tensorboard_logger import TensorboardLogger from TTS.utils.training import setup_torch_training_env from TTS.vocoder.datasets.gan_dataset import GANDataset from TTS.vocoder.datasets.preprocess import load_wav_data, load_wav_feat_data from TTS.vocoder.layers.losses import DiscriminatorLoss, GeneratorLoss from TTS.vocoder.utils.generic_utils import (plot_results, setup_discriminator, setup_generator) from TTS.vocoder.utils.io import save_best_model, save_checkpoint # DISTRIBUTED from torch.nn.parallel import DistributedDataParallel as DDP_th from torch.utils.data.distributed import DistributedSampler from TTS.utils.distribute import init_distributed use_cuda, num_gpus = setup_torch_training_env(True, True) def setup_loader(ap, is_val=False, verbose=False): if is_val and not c.run_eval: loader = None else: dataset = GANDataset(ap=ap, items=eval_data if is_val else train_data, seq_len=c.seq_len, hop_len=ap.hop_length, pad_short=c.pad_short, conv_pad=c.conv_pad, is_training=not is_val, return_segments=not is_val, use_noise_augment=c.use_noise_augment, use_cache=c.use_cache, verbose=verbose) dataset.shuffle_mapping() sampler = DistributedSampler(dataset, shuffle=True) if num_gpus > 1 else None loader = DataLoader(dataset, batch_size=1 if is_val else c.batch_size, shuffle=False if num_gpus > 1 else True, drop_last=False, sampler=sampler, num_workers=c.num_val_loader_workers if is_val else c.num_loader_workers, pin_memory=False) return loader def format_data(data): if isinstance(data[0], list): # setup input data c_G, x_G = data[0] c_D, x_D = data[1] # dispatch data to GPU if use_cuda: c_G = c_G.cuda(non_blocking=True) x_G = x_G.cuda(non_blocking=True) c_D = c_D.cuda(non_blocking=True) x_D = x_D.cuda(non_blocking=True) return c_G, x_G, c_D, x_D # return a whole audio segment co, x = data if use_cuda: co = co.cuda(non_blocking=True) x = x.cuda(non_blocking=True) return co, x, None, None def train(model_G, criterion_G, optimizer_G, model_D, criterion_D, optimizer_D, scheduler_G, scheduler_D, ap, global_step, epoch): data_loader = setup_loader(ap, is_val=False, verbose=(epoch == 0)) model_G.train() model_D.train() epoch_time = 0 keep_avg = KeepAverage() if use_cuda: batch_n_iter = int( len(data_loader.dataset) / (c.batch_size * num_gpus)) else: batch_n_iter = int(len(data_loader.dataset) / c.batch_size) end_time = time.time() c_logger.print_train_start() for num_iter, data in enumerate(data_loader): start_time = time.time() # format data c_G, y_G, c_D, y_D = format_data(data) loader_time = time.time() - end_time global_step += 1 ############################## # GENERATOR ############################## # generator pass y_hat = model_G(c_G) y_hat_sub = None y_G_sub = None y_hat_vis = y_hat # for visualization # PQMF formatting if y_hat.shape[1] > 1: y_hat_sub = y_hat y_hat = model_G.pqmf_synthesis(y_hat) y_hat_vis = y_hat y_G_sub = model_G.pqmf_analysis(y_G) scores_fake, feats_fake, feats_real = None, None, None if global_step > c.steps_to_start_discriminator: # run D with or without cond. features if len(signature(model_D.forward).parameters) == 2: D_out_fake = model_D(y_hat, c_G) else: D_out_fake = model_D(y_hat) D_out_real = None if c.use_feat_match_loss: with torch.no_grad(): D_out_real = model_D(y_G) # format D outputs if isinstance(D_out_fake, tuple): scores_fake, feats_fake = D_out_fake if D_out_real is None: feats_real = None else: _, feats_real = D_out_real else: scores_fake = D_out_fake # compute losses loss_G_dict = criterion_G(y_hat, y_G, scores_fake, feats_fake, feats_real, y_hat_sub, y_G_sub) loss_G = loss_G_dict['G_loss'] # optimizer generator optimizer_G.zero_grad() loss_G.backward() if c.gen_clip_grad > 0: torch.nn.utils.clip_grad_norm_(model_G.parameters(), c.gen_clip_grad) optimizer_G.step() if scheduler_G is not None: scheduler_G.step() loss_dict = dict() for key, value in loss_G_dict.items(): if isinstance(value, int): loss_dict[key] = value else: loss_dict[key] = value.item() ############################## # DISCRIMINATOR ############################## if global_step >= c.steps_to_start_discriminator: # discriminator pass with torch.no_grad(): y_hat = model_G(c_D) # PQMF formatting if y_hat.shape[1] > 1: y_hat = model_G.pqmf_synthesis(y_hat) # run D with or without cond. features if len(signature(model_D.forward).parameters) == 2: D_out_fake = model_D(y_hat.detach(), c_D) D_out_real = model_D(y_D, c_D) else: D_out_fake = model_D(y_hat.detach()) D_out_real = model_D(y_D) # format D outputs if isinstance(D_out_fake, tuple): scores_fake, feats_fake = D_out_fake if D_out_real is None: scores_real, feats_real = None, None else: scores_real, feats_real = D_out_real else: scores_fake = D_out_fake scores_real = D_out_real # compute losses loss_D_dict = criterion_D(scores_fake, scores_real) loss_D = loss_D_dict['D_loss'] # optimizer discriminator optimizer_D.zero_grad() loss_D.backward() if c.disc_clip_grad > 0: torch.nn.utils.clip_grad_norm_(model_D.parameters(), c.disc_clip_grad) optimizer_D.step() if scheduler_D is not None: scheduler_D.step() for key, value in loss_D_dict.items(): if isinstance(value, (int, float)): loss_dict[key] = value else: loss_dict[key] = value.item() step_time = time.time() - start_time epoch_time += step_time # get current learning rates current_lr_G = list(optimizer_G.param_groups)[0]['lr'] current_lr_D = list(optimizer_D.param_groups)[0]['lr'] # update avg stats update_train_values = dict() for key, value in loss_dict.items(): update_train_values['avg_' + key] = value update_train_values['avg_loader_time'] = loader_time update_train_values['avg_step_time'] = step_time keep_avg.update_values(update_train_values) # print training stats if global_step % c.print_step == 0: log_dict = { 'step_time': [step_time, 2], 'loader_time': [loader_time, 4], "current_lr_G": current_lr_G, "current_lr_D": current_lr_D } c_logger.print_train_step(batch_n_iter, num_iter, global_step, log_dict, loss_dict, keep_avg.avg_values) if args.rank == 0: # plot step stats if global_step % 10 == 0: iter_stats = { "lr_G": current_lr_G, "lr_D": current_lr_D, "step_time": step_time } iter_stats.update(loss_dict) tb_logger.tb_train_iter_stats(global_step, iter_stats) # save checkpoint if global_step % c.save_step == 0: if c.checkpoint: # save model save_checkpoint(model_G, optimizer_G, scheduler_G, model_D, optimizer_D, scheduler_D, global_step, epoch, OUT_PATH, model_losses=loss_dict) # compute spectrograms figures = plot_results(y_hat_vis, y_G, ap, global_step, 'train') tb_logger.tb_train_figures(global_step, figures) # Sample audio sample_voice = y_hat_vis[0].squeeze(0).detach().cpu().numpy() tb_logger.tb_train_audios(global_step, {'train/audio': sample_voice}, c.audio["sample_rate"]) end_time = time.time() # print epoch stats c_logger.print_train_epoch_end(global_step, epoch, epoch_time, keep_avg) # Plot Training Epoch Stats epoch_stats = {"epoch_time": epoch_time} epoch_stats.update(keep_avg.avg_values) if args.rank == 0: tb_logger.tb_train_epoch_stats(global_step, epoch_stats) # TODO: plot model stats # if c.tb_model_param_stats: # tb_logger.tb_model_weights(model, global_step) return keep_avg.avg_values, global_step @torch.no_grad() def evaluate(model_G, criterion_G, model_D, criterion_D, ap, global_step, epoch): data_loader = setup_loader(ap, is_val=True, verbose=(epoch == 0)) model_G.eval() model_D.eval() epoch_time = 0 keep_avg = KeepAverage() end_time = time.time() c_logger.print_eval_start() for num_iter, data in enumerate(data_loader): start_time = time.time() # format data c_G, y_G, _, _ = format_data(data) loader_time = time.time() - end_time global_step += 1 ############################## # GENERATOR ############################## # generator pass y_hat = model_G(c_G) y_hat_sub = None y_G_sub = None # PQMF formatting if y_hat.shape[1] > 1: y_hat_sub = y_hat y_hat = model_G.pqmf_synthesis(y_hat) y_G_sub = model_G.pqmf_analysis(y_G) scores_fake, feats_fake, feats_real = None, None, None if global_step > c.steps_to_start_discriminator: if len(signature(model_D.forward).parameters) == 2: D_out_fake = model_D(y_hat, c_G) else: D_out_fake = model_D(y_hat) D_out_real = None if c.use_feat_match_loss: with torch.no_grad(): D_out_real = model_D(y_G) # format D outputs if isinstance(D_out_fake, tuple): scores_fake, feats_fake = D_out_fake if D_out_real is None: feats_real = None else: _, feats_real = D_out_real else: scores_fake = D_out_fake feats_fake, feats_real = None, None # compute losses loss_G_dict = criterion_G(y_hat, y_G, scores_fake, feats_fake, feats_real, y_hat_sub, y_G_sub) loss_dict = dict() for key, value in loss_G_dict.items(): if isinstance(value, (int, float)): loss_dict[key] = value else: loss_dict[key] = value.item() ############################## # DISCRIMINATOR ############################## if global_step >= c.steps_to_start_discriminator: # discriminator pass with torch.no_grad(): y_hat = model_G(c_G) # PQMF formatting if y_hat.shape[1] > 1: y_hat = model_G.pqmf_synthesis(y_hat) # run D with or without cond. features if len(signature(model_D.forward).parameters) == 2: D_out_fake = model_D(y_hat.detach(), c_G) D_out_real = model_D(y_G, c_G) else: D_out_fake = model_D(y_hat.detach()) D_out_real = model_D(y_G) # format D outputs if isinstance(D_out_fake, tuple): scores_fake, feats_fake = D_out_fake if D_out_real is None: scores_real, feats_real = None, None else: scores_real, feats_real = D_out_real else: scores_fake = D_out_fake scores_real = D_out_real # compute losses loss_D_dict = criterion_D(scores_fake, scores_real) for key, value in loss_D_dict.items(): if isinstance(value, (int, float)): loss_dict[key] = value else: loss_dict[key] = value.item() step_time = time.time() - start_time epoch_time += step_time # update avg stats update_eval_values = dict() for key, value in loss_dict.items(): update_eval_values['avg_' + key] = value update_eval_values['avg_loader_time'] = loader_time update_eval_values['avg_step_time'] = step_time keep_avg.update_values(update_eval_values) # print eval stats if c.print_eval: c_logger.print_eval_step(num_iter, loss_dict, keep_avg.avg_values) if args.rank == 0: # compute spectrograms figures = plot_results(y_hat, y_G, ap, global_step, 'eval') tb_logger.tb_eval_figures(global_step, figures) # Sample audio sample_voice = y_hat[0].squeeze(0).detach().cpu().numpy() tb_logger.tb_eval_audios(global_step, {'eval/audio': sample_voice}, c.audio["sample_rate"]) tb_logger.tb_eval_stats(global_step, keep_avg.avg_values) # synthesize a full voice data_loader.return_segments = False return keep_avg.avg_values # FIXME: move args definition/parsing inside of main? def main(args): # pylint: disable=redefined-outer-name # pylint: disable=global-variable-undefined global train_data, eval_data print(f" > Loading wavs from: {c.data_path}") if c.feature_path is not None: print(f" > Loading features from: {c.feature_path}") eval_data, train_data = load_wav_feat_data( c.data_path, c.feature_path, c.eval_split_size) else: eval_data, train_data = load_wav_data(c.data_path, c.eval_split_size) # setup audio processor ap = AudioProcessor(**c.audio) # DISTRUBUTED if num_gpus > 1: init_distributed(args.rank, num_gpus, args.group_id, c.distributed["backend"], c.distributed["url"]) # setup models model_gen = setup_generator(c) model_disc = setup_discriminator(c) # setup optimizers optimizer_gen = RAdam(model_gen.parameters(), lr=c.lr_gen, weight_decay=0) optimizer_disc = RAdam(model_disc.parameters(), lr=c.lr_disc, weight_decay=0) # schedulers scheduler_gen = None scheduler_disc = None if 'lr_scheduler_gen' in c: scheduler_gen = getattr(torch.optim.lr_scheduler, c.lr_scheduler_gen) scheduler_gen = scheduler_gen( optimizer_gen, **c.lr_scheduler_gen_params) if 'lr_scheduler_disc' in c: scheduler_disc = getattr(torch.optim.lr_scheduler, c.lr_scheduler_disc) scheduler_disc = scheduler_disc( optimizer_disc, **c.lr_scheduler_disc_params) # setup criterion criterion_gen = GeneratorLoss(c) criterion_disc = DiscriminatorLoss(c) if args.restore_path: checkpoint = torch.load(args.restore_path, map_location='cpu') try: print(" > Restoring Generator Model...") model_gen.load_state_dict(checkpoint['model']) print(" > Restoring Generator Optimizer...") optimizer_gen.load_state_dict(checkpoint['optimizer']) print(" > Restoring Discriminator Model...") model_disc.load_state_dict(checkpoint['model_disc']) print(" > Restoring Discriminator Optimizer...") optimizer_disc.load_state_dict(checkpoint['optimizer_disc']) if 'scheduler' in checkpoint: print(" > Restoring Generator LR Scheduler...") scheduler_gen.load_state_dict(checkpoint['scheduler']) # NOTE: Not sure if necessary scheduler_gen.optimizer = optimizer_gen if 'scheduler_disc' in checkpoint: print(" > Restoring Discriminator LR Scheduler...") scheduler_disc.load_state_dict(checkpoint['scheduler_disc']) scheduler_disc.optimizer = optimizer_disc except RuntimeError: # retore only matching layers. print(" > Partial model initialization...") model_dict = model_gen.state_dict() model_dict = set_init_dict(model_dict, checkpoint['model'], c) model_gen.load_state_dict(model_dict) model_dict = model_disc.state_dict() model_dict = set_init_dict(model_dict, checkpoint['model_disc'], c) model_disc.load_state_dict(model_dict) del model_dict # reset lr if not countinuining training. for group in optimizer_gen.param_groups: group['lr'] = c.lr_gen for group in optimizer_disc.param_groups: group['lr'] = c.lr_disc print(" > Model restored from step %d" % checkpoint['step'], flush=True) args.restore_step = checkpoint['step'] else: args.restore_step = 0 if use_cuda: model_gen.cuda() criterion_gen.cuda() model_disc.cuda() criterion_disc.cuda() # DISTRUBUTED if num_gpus > 1: model_gen = DDP_th(model_gen, device_ids=[args.rank]) model_disc = DDP_th(model_disc, device_ids=[args.rank]) num_params = count_parameters(model_gen) print(" > Generator has {} parameters".format(num_params), flush=True) num_params = count_parameters(model_disc) print(" > Discriminator has {} parameters".format(num_params), flush=True) if 'best_loss' not in locals(): best_loss = float('inf') global_step = args.restore_step for epoch in range(0, c.epochs): c_logger.print_epoch_start(epoch, c.epochs) _, global_step = train(model_gen, criterion_gen, optimizer_gen, model_disc, criterion_disc, optimizer_disc, scheduler_gen, scheduler_disc, ap, global_step, epoch) eval_avg_loss_dict = evaluate(model_gen, criterion_gen, model_disc, criterion_disc, ap, global_step, epoch) c_logger.print_epoch_end(epoch, eval_avg_loss_dict) target_loss = eval_avg_loss_dict[c.target_loss] best_loss = save_best_model(target_loss, best_loss, model_gen, optimizer_gen, scheduler_gen, model_disc, optimizer_disc, scheduler_disc, global_step, epoch, OUT_PATH, model_losses=eval_avg_loss_dict) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '--continue_path', type=str, help='Training output folder to continue training. Use to continue a training. If it is used, "config_path" is ignored.', default='', required='--config_path' not in sys.argv) parser.add_argument( '--restore_path', type=str, help='Model file to be restored. Use to finetune a model.', default='') parser.add_argument('--config_path', type=str, help='Path to config file for training.', required='--continue_path' not in sys.argv) parser.add_argument('--debug', type=bool, default=False, help='Do not verify commit integrity to run training.') # DISTRUBUTED parser.add_argument( '--rank', type=int, default=0, help='DISTRIBUTED: process rank for distributed training.') parser.add_argument('--group_id', type=str, default="", help='DISTRIBUTED: process group id.') args = parser.parse_args() if args.continue_path != '': args.output_path = args.continue_path args.config_path = os.path.join(args.continue_path, 'config.json') list_of_files = glob.glob( args.continue_path + "/*.pth.tar") # * means all if need specific format then *.csv latest_model_file = max(list_of_files, key=os.path.getctime) args.restore_path = latest_model_file print(f" > Training continues for {args.restore_path}") # setup output paths and read configs c = load_config(args.config_path) # check_config(c) _ = os.path.dirname(os.path.realpath(__file__)) OUT_PATH = args.continue_path if args.continue_path == '': OUT_PATH = create_experiment_folder(c.output_path, c.run_name, args.debug) AUDIO_PATH = os.path.join(OUT_PATH, 'test_audios') c_logger = ConsoleLogger() if args.rank == 0: os.makedirs(AUDIO_PATH, exist_ok=True) new_fields = {} if args.restore_path: new_fields["restore_path"] = args.restore_path new_fields["github_branch"] = get_git_branch() copy_model_files(c, args.config_path, OUT_PATH, new_fields) os.chmod(AUDIO_PATH, 0o775) os.chmod(OUT_PATH, 0o775) LOG_DIR = OUT_PATH tb_logger = TensorboardLogger(LOG_DIR, model_name='VOCODER') # write model desc to tensorboard tb_logger.tb_add_text('model-description', c['run_description'], 0) try: main(args) except KeyboardInterrupt: remove_experiment_folder(OUT_PATH) try: sys.exit(0) except SystemExit: os._exit(0) # pylint: disable=protected-access except Exception: # pylint: disable=broad-except remove_experiment_folder(OUT_PATH) traceback.print_exc() sys.exit(1)
24,482
35.816541
129
py
TTS
TTS-master/TTS/bin/train_speedy_speech.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import argparse import glob import os import sys import time import traceback import numpy as np from random import randrange import torch # DISTRIBUTED from torch.nn.parallel import DistributedDataParallel as DDP_th from torch.utils.data import DataLoader from torch.utils.data.distributed import DistributedSampler from TTS.tts.datasets.preprocess import load_meta_data from TTS.tts.datasets.TTSDataset import MyDataset from TTS.tts.layers.losses import SpeedySpeechLoss from TTS.tts.utils.generic_utils import check_config_tts, setup_model from TTS.tts.utils.io import save_best_model, save_checkpoint from TTS.tts.utils.measures import alignment_diagonal_score from TTS.tts.utils.speakers import parse_speakers from TTS.tts.utils.synthesis import synthesis from TTS.tts.utils.text.symbols import make_symbols, phonemes, symbols from TTS.tts.utils.visual import plot_alignment, plot_spectrogram from TTS.utils.audio import AudioProcessor from TTS.utils.console_logger import ConsoleLogger from TTS.utils.distribute import init_distributed, reduce_tensor from TTS.utils.generic_utils import (KeepAverage, count_parameters, create_experiment_folder, get_git_branch, remove_experiment_folder, set_init_dict) from TTS.utils.io import copy_model_files, load_config from TTS.utils.radam import RAdam from TTS.utils.tensorboard_logger import TensorboardLogger from TTS.utils.training import NoamLR, setup_torch_training_env use_cuda, num_gpus = setup_torch_training_env(True, False) def setup_loader(ap, r, is_val=False, verbose=False): if is_val and not c.run_eval: loader = None else: dataset = MyDataset( r, c.text_cleaner, compute_linear_spec=False, meta_data=meta_data_eval if is_val else meta_data_train, ap=ap, tp=c.characters if 'characters' in c.keys() else None, add_blank=c['add_blank'] if 'add_blank' in c.keys() else False, batch_group_size=0 if is_val else c.batch_group_size * c.batch_size, min_seq_len=c.min_seq_len, max_seq_len=c.max_seq_len, phoneme_cache_path=c.phoneme_cache_path, use_phonemes=c.use_phonemes, phoneme_language=c.phoneme_language, enable_eos_bos=c.enable_eos_bos_chars, use_noise_augment=not is_val, verbose=verbose, speaker_mapping=speaker_mapping if c.use_speaker_embedding and c.use_external_speaker_embedding_file else None) if c.use_phonemes and c.compute_input_seq_cache: # precompute phonemes to have a better estimate of sequence lengths. dataset.compute_input_seq(c.num_loader_workers) dataset.sort_items() sampler = DistributedSampler(dataset) if num_gpus > 1 else None loader = DataLoader( dataset, batch_size=c.eval_batch_size if is_val else c.batch_size, shuffle=False, collate_fn=dataset.collate_fn, drop_last=False, sampler=sampler, num_workers=c.num_val_loader_workers if is_val else c.num_loader_workers, pin_memory=False) return loader def format_data(data): # setup input data text_input = data[0] text_lengths = data[1] speaker_names = data[2] mel_input = data[4].permute(0, 2, 1) # B x D x T mel_lengths = data[5] item_idx = data[7] attn_mask = data[9] avg_text_length = torch.mean(text_lengths.float()) avg_spec_length = torch.mean(mel_lengths.float()) if c.use_speaker_embedding: if c.use_external_speaker_embedding_file: # return precomputed embedding vector speaker_c = data[8] else: # return speaker_id to be used by an embedding layer speaker_c = [ speaker_mapping[speaker_name] for speaker_name in speaker_names ] speaker_c = torch.LongTensor(speaker_c) else: speaker_c = None # compute durations from attention mask durations = torch.zeros(attn_mask.shape[0], attn_mask.shape[2]) for idx, am in enumerate(attn_mask): # compute raw durations c_idxs = am[:, :text_lengths[idx], :mel_lengths[idx]].max(1)[1] # c_idxs, counts = torch.unique_consecutive(c_idxs, return_counts=True) c_idxs, counts = torch.unique(c_idxs, return_counts=True) dur = torch.ones([text_lengths[idx]]).to(counts.dtype) dur[c_idxs] = counts # smooth the durations and set any 0 duration to 1 # by cutting off from the largest duration indeces. extra_frames = dur.sum() - mel_lengths[idx] largest_idxs = torch.argsort(-dur)[:extra_frames] dur[largest_idxs] -= 1 assert dur.sum() == mel_lengths[idx], f" [!] total duration {dur.sum()} vs spectrogram length {mel_lengths[idx]}" durations[idx, :text_lengths[idx]] = dur # dispatch data to GPU if use_cuda: text_input = text_input.cuda(non_blocking=True) text_lengths = text_lengths.cuda(non_blocking=True) mel_input = mel_input.cuda(non_blocking=True) mel_lengths = mel_lengths.cuda(non_blocking=True) if speaker_c is not None: speaker_c = speaker_c.cuda(non_blocking=True) attn_mask = attn_mask.cuda(non_blocking=True) durations = durations.cuda(non_blocking=True) return text_input, text_lengths, mel_input, mel_lengths, speaker_c,\ avg_text_length, avg_spec_length, attn_mask, durations, item_idx def train(data_loader, model, criterion, optimizer, scheduler, ap, global_step, epoch): model.train() epoch_time = 0 keep_avg = KeepAverage() if use_cuda: batch_n_iter = int( len(data_loader.dataset) / (c.batch_size * num_gpus)) else: batch_n_iter = int(len(data_loader.dataset) / c.batch_size) end_time = time.time() c_logger.print_train_start() scaler = torch.cuda.amp.GradScaler() if c.mixed_precision else None for num_iter, data in enumerate(data_loader): start_time = time.time() # format data text_input, text_lengths, mel_targets, mel_lengths, speaker_c,\ avg_text_length, avg_spec_length, _, dur_target, _ = format_data(data) loader_time = time.time() - end_time global_step += 1 optimizer.zero_grad() # forward pass model with torch.cuda.amp.autocast(enabled=c.mixed_precision): decoder_output, dur_output, alignments = model.forward( text_input, text_lengths, mel_lengths, dur_target, g=speaker_c) # compute loss loss_dict = criterion(decoder_output, mel_targets, mel_lengths, dur_output, torch.log(1 + dur_target), text_lengths) # backward pass with loss scaling if c.mixed_precision: scaler.scale(loss_dict['loss']).backward() scaler.unscale_(optimizer) grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), c.grad_clip) scaler.step(optimizer) scaler.update() else: loss_dict['loss'].backward() grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), c.grad_clip) optimizer.step() # setup lr if c.noam_schedule: scheduler.step() # current_lr current_lr = optimizer.param_groups[0]['lr'] # compute alignment error (the lower the better ) align_error = 1 - alignment_diagonal_score(alignments, binary=True) loss_dict['align_error'] = align_error step_time = time.time() - start_time epoch_time += step_time # aggregate losses from processes if num_gpus > 1: loss_dict['loss_l1'] = reduce_tensor(loss_dict['loss_l1'].data, num_gpus) loss_dict['loss_ssim'] = reduce_tensor(loss_dict['loss_ssim'].data, num_gpus) loss_dict['loss_dur'] = reduce_tensor(loss_dict['loss_dur'].data, num_gpus) loss_dict['loss'] = reduce_tensor(loss_dict['loss'] .data, num_gpus) # detach loss values loss_dict_new = dict() for key, value in loss_dict.items(): if isinstance(value, (int, float)): loss_dict_new[key] = value else: loss_dict_new[key] = value.item() loss_dict = loss_dict_new # update avg stats update_train_values = dict() for key, value in loss_dict.items(): update_train_values['avg_' + key] = value update_train_values['avg_loader_time'] = loader_time update_train_values['avg_step_time'] = step_time keep_avg.update_values(update_train_values) # print training progress if global_step % c.print_step == 0: log_dict = { "avg_spec_length": [avg_spec_length, 1], # value, precision "avg_text_length": [avg_text_length, 1], "step_time": [step_time, 4], "loader_time": [loader_time, 2], "current_lr": current_lr, } c_logger.print_train_step(batch_n_iter, num_iter, global_step, log_dict, loss_dict, keep_avg.avg_values) if args.rank == 0: # Plot Training Iter Stats # reduce TB load if global_step % c.tb_plot_step == 0: iter_stats = { "lr": current_lr, "grad_norm": grad_norm, "step_time": step_time } iter_stats.update(loss_dict) tb_logger.tb_train_iter_stats(global_step, iter_stats) if global_step % c.save_step == 0: if c.checkpoint: # save model save_checkpoint(model, optimizer, global_step, epoch, 1, OUT_PATH, model_loss=loss_dict['loss']) # wait all kernels to be completed torch.cuda.synchronize() # Diagnostic visualizations idx = np.random.randint(mel_targets.shape[0]) pred_spec = decoder_output[idx].detach().data.cpu().numpy().T gt_spec = mel_targets[idx].data.cpu().numpy().T align_img = alignments[idx].data.cpu() figures = { "prediction": plot_spectrogram(pred_spec, ap), "ground_truth": plot_spectrogram(gt_spec, ap), "alignment": plot_alignment(align_img), } tb_logger.tb_train_figures(global_step, figures) # Sample audio train_audio = ap.inv_melspectrogram(pred_spec.T) tb_logger.tb_train_audios(global_step, {'TrainAudio': train_audio}, c.audio["sample_rate"]) end_time = time.time() # print epoch stats c_logger.print_train_epoch_end(global_step, epoch, epoch_time, keep_avg) # Plot Epoch Stats if args.rank == 0: epoch_stats = {"epoch_time": epoch_time} epoch_stats.update(keep_avg.avg_values) tb_logger.tb_train_epoch_stats(global_step, epoch_stats) if c.tb_model_param_stats: tb_logger.tb_model_weights(model, global_step) return keep_avg.avg_values, global_step @torch.no_grad() def evaluate(data_loader, model, criterion, ap, global_step, epoch): model.eval() epoch_time = 0 keep_avg = KeepAverage() c_logger.print_eval_start() if data_loader is not None: for num_iter, data in enumerate(data_loader): start_time = time.time() # format data text_input, text_lengths, mel_targets, mel_lengths, speaker_c,\ _, _, _, dur_target, _ = format_data(data) # forward pass model with torch.cuda.amp.autocast(enabled=c.mixed_precision): decoder_output, dur_output, alignments = model.forward( text_input, text_lengths, mel_lengths, dur_target, g=speaker_c) # compute loss loss_dict = criterion(decoder_output, mel_targets, mel_lengths, dur_output, torch.log(1 + dur_target), text_lengths) # step time step_time = time.time() - start_time epoch_time += step_time # compute alignment score align_error = 1 - alignment_diagonal_score(alignments, binary=True) loss_dict['align_error'] = align_error # aggregate losses from processes if num_gpus > 1: loss_dict['loss_l1'] = reduce_tensor(loss_dict['loss_l1'].data, num_gpus) loss_dict['loss_ssim'] = reduce_tensor(loss_dict['loss_ssim'].data, num_gpus) loss_dict['loss_dur'] = reduce_tensor(loss_dict['loss_dur'].data, num_gpus) loss_dict['loss'] = reduce_tensor(loss_dict['loss'] .data, num_gpus) # detach loss values loss_dict_new = dict() for key, value in loss_dict.items(): if isinstance(value, (int, float)): loss_dict_new[key] = value else: loss_dict_new[key] = value.item() loss_dict = loss_dict_new # update avg stats update_train_values = dict() for key, value in loss_dict.items(): update_train_values['avg_' + key] = value keep_avg.update_values(update_train_values) if c.print_eval: c_logger.print_eval_step(num_iter, loss_dict, keep_avg.avg_values) if args.rank == 0: # Diagnostic visualizations idx = np.random.randint(mel_targets.shape[0]) pred_spec = decoder_output[idx].detach().data.cpu().numpy().T gt_spec = mel_targets[idx].data.cpu().numpy().T align_img = alignments[idx].data.cpu() eval_figures = { "prediction": plot_spectrogram(pred_spec, ap, output_fig=False), "ground_truth": plot_spectrogram(gt_spec, ap, output_fig=False), "alignment": plot_alignment(align_img, output_fig=False) } # Sample audio eval_audio = ap.inv_melspectrogram(pred_spec.T) tb_logger.tb_eval_audios(global_step, {"ValAudio": eval_audio}, c.audio["sample_rate"]) # Plot Validation Stats tb_logger.tb_eval_stats(global_step, keep_avg.avg_values) tb_logger.tb_eval_figures(global_step, eval_figures) if args.rank == 0 and epoch >= c.test_delay_epochs: if c.test_sentences_file is None: test_sentences = [ "It took me quite a long time to develop a voice, and now that I have it I'm not going to be silent.", "Be a voice, not an echo.", "I'm sorry Dave. I'm afraid I can't do that.", "This cake is great. It's so delicious and moist.", "Prior to November 22, 1963." ] else: with open(c.test_sentences_file, "r") as f: test_sentences = [s.strip() for s in f.readlines()] # test sentences test_audios = {} test_figures = {} print(" | > Synthesizing test sentences") if c.use_speaker_embedding: if c.use_external_speaker_embedding_file: speaker_embedding = speaker_mapping[list(speaker_mapping.keys())[randrange(len(speaker_mapping)-1)]]['embedding'] speaker_id = None else: speaker_id = 0 speaker_embedding = None else: speaker_id = None speaker_embedding = None style_wav = c.get("style_wav_for_test") for idx, test_sentence in enumerate(test_sentences): try: wav, alignment, _, postnet_output, _, _ = synthesis( model, test_sentence, c, use_cuda, ap, speaker_id=speaker_id, speaker_embedding=speaker_embedding, style_wav=style_wav, truncated=False, enable_eos_bos_chars=c.enable_eos_bos_chars, #pylint: disable=unused-argument use_griffin_lim=True, do_trim_silence=False) file_path = os.path.join(AUDIO_PATH, str(global_step)) os.makedirs(file_path, exist_ok=True) file_path = os.path.join(file_path, "TestSentence_{}.wav".format(idx)) ap.save_wav(wav, file_path) test_audios['{}-audio'.format(idx)] = wav test_figures['{}-prediction'.format(idx)] = plot_spectrogram( postnet_output, ap) test_figures['{}-alignment'.format(idx)] = plot_alignment( alignment) except: #pylint: disable=bare-except print(" !! Error creating Test Sentence -", idx) traceback.print_exc() tb_logger.tb_test_audios(global_step, test_audios, c.audio['sample_rate']) tb_logger.tb_test_figures(global_step, test_figures) return keep_avg.avg_values # FIXME: move args definition/parsing inside of main? def main(args): # pylint: disable=redefined-outer-name # pylint: disable=global-variable-undefined global meta_data_train, meta_data_eval, symbols, phonemes, speaker_mapping # Audio processor ap = AudioProcessor(**c.audio) if 'characters' in c.keys(): symbols, phonemes = make_symbols(**c.characters) # DISTRUBUTED if num_gpus > 1: init_distributed(args.rank, num_gpus, args.group_id, c.distributed["backend"], c.distributed["url"]) num_chars = len(phonemes) if c.use_phonemes else len(symbols) # load data instances meta_data_train, meta_data_eval = load_meta_data(c.datasets, eval_split=True) # set the portion of the data used for training if set in config.json if 'train_portion' in c.keys(): meta_data_train = meta_data_train[:int(len(meta_data_train) * c.train_portion)] if 'eval_portion' in c.keys(): meta_data_eval = meta_data_eval[:int(len(meta_data_eval) * c.eval_portion)] # parse speakers num_speakers, speaker_embedding_dim, speaker_mapping = parse_speakers(c, args, meta_data_train, OUT_PATH) # setup model model = setup_model(num_chars, num_speakers, c, speaker_embedding_dim=speaker_embedding_dim) optimizer = RAdam(model.parameters(), lr=c.lr, weight_decay=0, betas=(0.9, 0.98), eps=1e-9) criterion = SpeedySpeechLoss(c) if args.restore_path: checkpoint = torch.load(args.restore_path, map_location='cpu') try: # TODO: fix optimizer init, model.cuda() needs to be called before # optimizer restore optimizer.load_state_dict(checkpoint['optimizer']) if c.reinit_layers: raise RuntimeError model.load_state_dict(checkpoint['model']) except: #pylint: disable=bare-except print(" > Partial model initialization.") model_dict = model.state_dict() model_dict = set_init_dict(model_dict, checkpoint['model'], c) model.load_state_dict(model_dict) del model_dict for group in optimizer.param_groups: group['initial_lr'] = c.lr print(" > Model restored from step %d" % checkpoint['step'], flush=True) args.restore_step = checkpoint['step'] else: args.restore_step = 0 if use_cuda: model.cuda() criterion.cuda() # DISTRUBUTED if num_gpus > 1: model = DDP_th(model, device_ids=[args.rank]) if c.noam_schedule: scheduler = NoamLR(optimizer, warmup_steps=c.warmup_steps, last_epoch=args.restore_step - 1) else: scheduler = None num_params = count_parameters(model) print("\n > Model has {} parameters".format(num_params), flush=True) if 'best_loss' not in locals(): best_loss = float('inf') # define dataloaders train_loader = setup_loader(ap, 1, is_val=False, verbose=True) eval_loader = setup_loader(ap, 1, is_val=True, verbose=True) global_step = args.restore_step for epoch in range(0, c.epochs): c_logger.print_epoch_start(epoch, c.epochs) train_avg_loss_dict, global_step = train(train_loader, model, criterion, optimizer, scheduler, ap, global_step, epoch) eval_avg_loss_dict = evaluate(eval_loader , model, criterion, ap, global_step, epoch) c_logger.print_epoch_end(epoch, eval_avg_loss_dict) target_loss = train_avg_loss_dict['avg_loss'] if c.run_eval: target_loss = eval_avg_loss_dict['avg_loss'] best_loss = save_best_model(target_loss, best_loss, model, optimizer, global_step, epoch, c.r, OUT_PATH) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '--continue_path', type=str, help='Training output folder to continue training. Use to continue a training. If it is used, "config_path" is ignored.', default='', required='--config_path' not in sys.argv) parser.add_argument( '--restore_path', type=str, help='Model file to be restored. Use to finetune a model.', default='') parser.add_argument( '--config_path', type=str, help='Path to config file for training.', required='--continue_path' not in sys.argv ) parser.add_argument('--debug', type=bool, default=False, help='Do not verify commit integrity to run training.') # DISTRUBUTED parser.add_argument( '--rank', type=int, default=0, help='DISTRIBUTED: process rank for distributed training.') parser.add_argument('--group_id', type=str, default="", help='DISTRIBUTED: process group id.') args = parser.parse_args() if args.continue_path != '': args.output_path = args.continue_path args.config_path = os.path.join(args.continue_path, 'config.json') list_of_files = glob.glob(args.continue_path + "/*.pth.tar") # * means all if need specific format then *.csv latest_model_file = max(list_of_files, key=os.path.getctime) args.restore_path = latest_model_file print(f" > Training continues for {args.restore_path}") # setup output paths and read configs c = load_config(args.config_path) # check_config(c) check_config_tts(c) _ = os.path.dirname(os.path.realpath(__file__)) if c.mixed_precision: print(" > Mixed precision enabled.") OUT_PATH = args.continue_path if args.continue_path == '': OUT_PATH = create_experiment_folder(c.output_path, c.run_name, args.debug) AUDIO_PATH = os.path.join(OUT_PATH, 'test_audios') c_logger = ConsoleLogger() if args.rank == 0: os.makedirs(AUDIO_PATH, exist_ok=True) new_fields = {} if args.restore_path: new_fields["restore_path"] = args.restore_path new_fields["github_branch"] = get_git_branch() copy_model_files(c, args.config_path, OUT_PATH, new_fields) os.chmod(AUDIO_PATH, 0o775) os.chmod(OUT_PATH, 0o775) LOG_DIR = OUT_PATH tb_logger = TensorboardLogger(LOG_DIR, model_name='TTS') # write model desc to tensorboard tb_logger.tb_add_text('model-description', c['run_description'], 0) try: main(args) except KeyboardInterrupt: remove_experiment_folder(OUT_PATH) try: sys.exit(0) except SystemExit: os._exit(0) # pylint: disable=protected-access except Exception: # pylint: disable=broad-except remove_experiment_folder(OUT_PATH) traceback.print_exc() sys.exit(1)
24,811
39.084006
132
py
TTS
TTS-master/TTS/bin/tune_wavegrad.py
"""Search a good noise schedule for WaveGrad for a given number of inferece iterations""" import argparse from itertools import product as cartesian_product import numpy as np import torch from torch.utils.data import DataLoader from tqdm import tqdm from TTS.utils.audio import AudioProcessor from TTS.utils.io import load_config from TTS.vocoder.datasets.preprocess import load_wav_data from TTS.vocoder.datasets.wavegrad_dataset import WaveGradDataset from TTS.vocoder.utils.generic_utils import setup_generator parser = argparse.ArgumentParser() parser.add_argument('--model_path', type=str, help='Path to model checkpoint.') parser.add_argument('--config_path', type=str, help='Path to model config file.') parser.add_argument('--data_path', type=str, help='Path to data directory.') parser.add_argument('--output_path', type=str, help='path for output file including file name and extension.') parser.add_argument('--num_iter', type=int, help='Number of model inference iterations that you like to optimize noise schedule for.') parser.add_argument('--use_cuda', type=bool, help='enable/disable CUDA.') parser.add_argument('--num_samples', type=int, default=1, help='Number of datasamples used for inference.') parser.add_argument('--search_depth', type=int, default=3, help='Search granularity. Increasing this increases the run-time exponentially.') # load config args = parser.parse_args() config = load_config(args.config_path) # setup audio processor ap = AudioProcessor(**config.audio) # load dataset _, train_data = load_wav_data(args.data_path, 0) train_data = train_data[:args.num_samples] dataset = WaveGradDataset(ap=ap, items=train_data, seq_len=-1, hop_len=ap.hop_length, pad_short=config.pad_short, conv_pad=config.conv_pad, is_training=True, return_segments=False, use_noise_augment=False, use_cache=False, verbose=True) loader = DataLoader( dataset, batch_size=1, shuffle=False, collate_fn=dataset.collate_full_clips, drop_last=False, num_workers=config.num_loader_workers, pin_memory=False) # setup the model model = setup_generator(config) if args.use_cuda: model.cuda() # setup optimization parameters base_values = sorted(10 * np.random.uniform(size=args.search_depth)) print(base_values) exponents = 10 ** np.linspace(-6, -1, num=args.num_iter) best_error = float('inf') best_schedule = None total_search_iter = len(base_values)**args.num_iter for base in tqdm(cartesian_product(base_values, repeat=args.num_iter), total=total_search_iter): beta = exponents * base model.compute_noise_level(beta) for data in loader: mel, audio = data y_hat = model.inference(mel.cuda() if args.use_cuda else mel) if args.use_cuda: y_hat = y_hat.cpu() y_hat = y_hat.numpy() mel_hat = [] for i in range(y_hat.shape[0]): m = ap.melspectrogram(y_hat[i, 0])[:, :-1] mel_hat.append(torch.from_numpy(m)) mel_hat = torch.stack(mel_hat) mse = torch.sum((mel - mel_hat) ** 2).mean() if mse.item() < best_error: best_error = mse.item() best_schedule = {'beta': beta} print(f" > Found a better schedule. - MSE: {mse.item()}") np.save(args.output_path, best_schedule)
3,538
37.467391
140
py
TTS
TTS-master/TTS/bin/convert_tacotron2_torch_to_tf.py
# %% # %% import argparse from difflib import SequenceMatcher import os import sys # %% # print variable match from pprint import pprint import numpy as np import tensorflow as tf import torch from TTS.tts.tf.models.tacotron2 import Tacotron2 from TTS.tts.tf.utils.convert_torch_to_tf_utils import ( compare_torch_tf, convert_tf_name, transfer_weights_torch_to_tf) from TTS.tts.tf.utils.generic_utils import save_checkpoint from TTS.tts.utils.generic_utils import setup_model from TTS.tts.utils.text.symbols import phonemes, symbols from TTS.utils.io import load_config sys.path.append('/home/erogol/Projects') os.environ['CUDA_VISIBLE_DEVICES'] = '' parser = argparse.ArgumentParser() parser.add_argument('--torch_model_path', type=str, help='Path to target torch model to be converted to TF.') parser.add_argument('--config_path', type=str, help='Path to config file of torch model.') parser.add_argument('--output_path', type=str, help='path to output file including file name to save TF model.') args = parser.parse_args() # load model config config_path = args.config_path c = load_config(config_path) num_speakers = 0 # init torch model num_chars = len(phonemes) if c.use_phonemes else len(symbols) model = setup_model(num_chars, num_speakers, c) checkpoint = torch.load(args.torch_model_path, map_location=torch.device('cpu')) state_dict = checkpoint['model'] model.load_state_dict(state_dict) # init tf model model_tf = Tacotron2(num_chars=num_chars, num_speakers=num_speakers, r=model.decoder.r, postnet_output_dim=c.audio['num_mels'], decoder_output_dim=c.audio['num_mels'], attn_type=c.attention_type, attn_win=c.windowing, attn_norm=c.attention_norm, prenet_type=c.prenet_type, prenet_dropout=c.prenet_dropout, forward_attn=c.use_forward_attn, trans_agent=c.transition_agent, forward_attn_mask=c.forward_attn_mask, location_attn=c.location_attn, attn_K=c.attention_heads, separate_stopnet=c.separate_stopnet, bidirectional_decoder=c.bidirectional_decoder) # set initial layer mapping - these are not captured by the below heuristic approach # TODO: set layer names so that we can remove these manual matching common_sufix = '/.ATTRIBUTES/VARIABLE_VALUE' var_map = [ ('embedding/embeddings:0', 'embedding.weight'), ('encoder/lstm/forward_lstm/lstm_cell_1/kernel:0', 'encoder.lstm.weight_ih_l0'), ('encoder/lstm/forward_lstm/lstm_cell_1/recurrent_kernel:0', 'encoder.lstm.weight_hh_l0'), ('encoder/lstm/backward_lstm/lstm_cell_2/kernel:0', 'encoder.lstm.weight_ih_l0_reverse'), ('encoder/lstm/backward_lstm/lstm_cell_2/recurrent_kernel:0', 'encoder.lstm.weight_hh_l0_reverse'), ('encoder/lstm/forward_lstm/lstm_cell_1/bias:0', ('encoder.lstm.bias_ih_l0', 'encoder.lstm.bias_hh_l0')), ('encoder/lstm/backward_lstm/lstm_cell_2/bias:0', ('encoder.lstm.bias_ih_l0_reverse', 'encoder.lstm.bias_hh_l0_reverse')), ('attention/v/kernel:0', 'decoder.attention.v.linear_layer.weight'), ('decoder/linear_projection/kernel:0', 'decoder.linear_projection.linear_layer.weight'), ('decoder/stopnet/kernel:0', 'decoder.stopnet.1.linear_layer.weight') ] # %% # get tf_model graph model_tf.build_inference() # get tf variables tf_vars = model_tf.weights # match variable names with fuzzy logic torch_var_names = list(state_dict.keys()) tf_var_names = [we.name for we in model_tf.weights] for tf_name in tf_var_names: # skip re-mapped layer names if tf_name in [name[0] for name in var_map]: continue tf_name_edited = convert_tf_name(tf_name) ratios = [ SequenceMatcher(None, torch_name, tf_name_edited).ratio() for torch_name in torch_var_names ] max_idx = np.argmax(ratios) matching_name = torch_var_names[max_idx] del torch_var_names[max_idx] var_map.append((tf_name, matching_name)) pprint(var_map) pprint(torch_var_names) # pass weights tf_vars = transfer_weights_torch_to_tf(tf_vars, dict(var_map), state_dict) # Compare TF and TORCH models # %% # check embedding outputs model.eval() input_ids = torch.randint(0, 24, (1, 128)).long() o_t = model.embedding(input_ids) o_tf = model_tf.embedding(input_ids.detach().numpy()) assert abs(o_t.detach().numpy() - o_tf.numpy()).sum() < 1e-5, abs(o_t.detach().numpy() - o_tf.numpy()).sum() # compare encoder outputs oo_en = model.encoder.inference(o_t.transpose(1, 2)) ooo_en = model_tf.encoder(o_t.detach().numpy(), training=False) assert compare_torch_tf(oo_en, ooo_en) < 1e-5 #pylint: disable=redefined-builtin # compare decoder.attention_rnn inp = torch.rand([1, 768]) inp_tf = inp.numpy() model.decoder._init_states(oo_en, mask=None) #pylint: disable=protected-access output, cell_state = model.decoder.attention_rnn(inp) states = model_tf.decoder.build_decoder_initial_states(1, 512, 128) output_tf, memory_state = model_tf.decoder.attention_rnn(inp_tf, states[2], training=False) assert compare_torch_tf(output, output_tf).mean() < 1e-5 query = output inputs = torch.rand([1, 128, 512]) query_tf = query.detach().numpy() inputs_tf = inputs.numpy() # compare decoder.attention model.decoder.attention.init_states(inputs) processes_inputs = model.decoder.attention.preprocess_inputs(inputs) loc_attn, proc_query = model.decoder.attention.get_location_attention( query, processes_inputs) context = model.decoder.attention(query, inputs, processes_inputs, None) attention_states = model_tf.decoder.build_decoder_initial_states(1, 512, 128)[-1] model_tf.decoder.attention.process_values(tf.convert_to_tensor(inputs_tf)) loc_attn_tf, proc_query_tf = model_tf.decoder.attention.get_loc_attn(query_tf, attention_states) context_tf, attention, attention_states = model_tf.decoder.attention(query_tf, attention_states, training=False) assert compare_torch_tf(loc_attn, loc_attn_tf).mean() < 1e-5 assert compare_torch_tf(proc_query, proc_query_tf).mean() < 1e-5 assert compare_torch_tf(context, context_tf) < 1e-5 # compare decoder.decoder_rnn input = torch.rand([1, 1536]) input_tf = input.numpy() model.decoder._init_states(oo_en, mask=None) #pylint: disable=protected-access output, cell_state = model.decoder.decoder_rnn( input, [model.decoder.decoder_hidden, model.decoder.decoder_cell]) states = model_tf.decoder.build_decoder_initial_states(1, 512, 128) output_tf, memory_state = model_tf.decoder.decoder_rnn(input_tf, states[3], training=False) assert abs(input - input_tf).mean() < 1e-5 assert compare_torch_tf(output, output_tf).mean() < 1e-5 # compare decoder.linear_projection input = torch.rand([1, 1536]) input_tf = input.numpy() output = model.decoder.linear_projection(input) output_tf = model_tf.decoder.linear_projection(input_tf, training=False) assert compare_torch_tf(output, output_tf) < 1e-5 # compare decoder outputs model.decoder.max_decoder_steps = 100 model_tf.decoder.set_max_decoder_steps(100) output, align, stop = model.decoder.inference(oo_en) states = model_tf.decoder.build_decoder_initial_states(1, 512, 128) output_tf, align_tf, stop_tf = model_tf.decoder(ooo_en, states, training=False) assert compare_torch_tf(output.transpose(1, 2), output_tf) < 1e-4 # compare the whole model output outputs_torch = model.inference(input_ids) outputs_tf = model_tf(tf.convert_to_tensor(input_ids.numpy())) print(abs(outputs_torch[0].numpy()[:, 0] - outputs_tf[0].numpy()[:, 0]).mean()) assert compare_torch_tf(outputs_torch[2][:, 50, :], outputs_tf[2][:, 50, :]) < 1e-5 assert compare_torch_tf(outputs_torch[0], outputs_tf[0]) < 1e-4 # %% # save tf model save_checkpoint(model_tf, None, checkpoint['step'], checkpoint['epoch'], checkpoint['r'], args.output_path) print(' > Model conversion is successfully completed :).')
8,452
38.5
112
py
TTS
TTS-master/TTS/bin/convert_melgan_torch_to_tf.py
import argparse from difflib import SequenceMatcher import os import numpy as np import tensorflow as tf import torch from TTS.utils.io import load_config from TTS.vocoder.tf.utils.convert_torch_to_tf_utils import ( compare_torch_tf, convert_tf_name, transfer_weights_torch_to_tf) from TTS.vocoder.tf.utils.generic_utils import \ setup_generator as setup_tf_generator from TTS.vocoder.tf.utils.io import save_checkpoint from TTS.vocoder.utils.generic_utils import setup_generator # prevent GPU use os.environ['CUDA_VISIBLE_DEVICES'] = '' # define args parser = argparse.ArgumentParser() parser.add_argument('--torch_model_path', type=str, help='Path to target torch model to be converted to TF.') parser.add_argument('--config_path', type=str, help='Path to config file of torch model.') parser.add_argument( '--output_path', type=str, help='path to output file including file name to save TF model.') args = parser.parse_args() # load model config config_path = args.config_path c = load_config(config_path) num_speakers = 0 # init torch model model = setup_generator(c) checkpoint = torch.load(args.torch_model_path, map_location=torch.device('cpu')) state_dict = checkpoint['model'] model.load_state_dict(state_dict) model.remove_weight_norm() state_dict = model.state_dict() # init tf model model_tf = setup_tf_generator(c) common_sufix = '/.ATTRIBUTES/VARIABLE_VALUE' # get tf_model graph by passing an input # B x D x T dummy_input = tf.random.uniform((7, 80, 64), dtype=tf.float32) mel_pred = model_tf(dummy_input, training=False) # get tf variables tf_vars = model_tf.weights # match variable names with fuzzy logic torch_var_names = list(state_dict.keys()) tf_var_names = [we.name for we in model_tf.weights] var_map = [] for tf_name in tf_var_names: # skip re-mapped layer names if tf_name in [name[0] for name in var_map]: continue tf_name_edited = convert_tf_name(tf_name) ratios = [ SequenceMatcher(None, torch_name, tf_name_edited).ratio() for torch_name in torch_var_names ] max_idx = np.argmax(ratios) matching_name = torch_var_names[max_idx] del torch_var_names[max_idx] var_map.append((tf_name, matching_name)) # pass weights tf_vars = transfer_weights_torch_to_tf(tf_vars, dict(var_map), state_dict) # Compare TF and TORCH models # check embedding outputs model.eval() dummy_input_torch = torch.ones((1, 80, 10)) dummy_input_tf = tf.convert_to_tensor(dummy_input_torch.numpy()) dummy_input_tf = tf.transpose(dummy_input_tf, perm=[0, 2, 1]) dummy_input_tf = tf.expand_dims(dummy_input_tf, 2) out_torch = model.layers[0](dummy_input_torch) out_tf = model_tf.model_layers[0](dummy_input_tf) out_tf_ = tf.transpose(out_tf, perm=[0, 3, 2, 1])[:, :, 0, :] assert compare_torch_tf(out_torch, out_tf_) < 1e-5 for i in range(1, len(model.layers)): print(f"{i} -> {model.layers[i]} vs {model_tf.model_layers[i]}") out_torch = model.layers[i](out_torch) out_tf = model_tf.model_layers[i](out_tf) out_tf_ = tf.transpose(out_tf, perm=[0, 3, 2, 1])[:, :, 0, :] diff = compare_torch_tf(out_torch, out_tf_) assert diff < 1e-5, diff torch.manual_seed(0) dummy_input_torch = torch.rand((1, 80, 100)) dummy_input_tf = tf.convert_to_tensor(dummy_input_torch.numpy()) model.inference_padding = 0 model_tf.inference_padding = 0 output_torch = model.inference(dummy_input_torch) output_tf = model_tf(dummy_input_tf, training=False) assert compare_torch_tf(output_torch, output_tf) < 1e-5, compare_torch_tf( output_torch, output_tf) # save tf model save_checkpoint(model_tf, checkpoint['step'], checkpoint['epoch'], args.output_path) print(' > Model conversion is successfully completed :).')
3,833
31.769231
77
py
TTS
TTS-master/TTS/bin/train_encoder.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import argparse import os import sys import time import traceback import torch from torch.utils.data import DataLoader from TTS.speaker_encoder.dataset import MyDataset from TTS.speaker_encoder.losses import AngleProtoLoss, GE2ELoss from TTS.speaker_encoder.model import SpeakerEncoder from TTS.speaker_encoder.utils.generic_utils import \ check_config_speaker_encoder, save_best_model from TTS.speaker_encoder.utils.visual import plot_embeddings from TTS.tts.datasets.preprocess import load_meta_data from TTS.utils.audio import AudioProcessor from TTS.utils.generic_utils import (count_parameters, create_experiment_folder, get_git_branch, remove_experiment_folder, set_init_dict) from TTS.utils.io import copy_model_files, load_config from TTS.utils.radam import RAdam from TTS.utils.tensorboard_logger import TensorboardLogger from TTS.utils.training import NoamLR, check_update torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True torch.manual_seed(54321) use_cuda = torch.cuda.is_available() num_gpus = torch.cuda.device_count() print(" > Using CUDA: ", use_cuda) print(" > Number of GPUs: ", num_gpus) def setup_loader(ap: AudioProcessor, is_val: bool=False, verbose: bool=False): if is_val: loader = None else: dataset = MyDataset(ap, meta_data_eval if is_val else meta_data_train, voice_len=1.6, num_utter_per_speaker=c.num_utters_per_speaker, num_speakers_in_batch=c.num_speakers_in_batch, skip_speakers=False, storage_size=c.storage["storage_size"], sample_from_storage_p=c.storage["sample_from_storage_p"], additive_noise=c.storage["additive_noise"], verbose=verbose) # sampler = DistributedSampler(dataset) if num_gpus > 1 else None loader = DataLoader(dataset, batch_size=c.num_speakers_in_batch, shuffle=False, num_workers=c.num_loader_workers, collate_fn=dataset.collate_fn) return loader def train(model, criterion, optimizer, scheduler, ap, global_step): data_loader = setup_loader(ap, is_val=False, verbose=True) model.train() epoch_time = 0 best_loss = float('inf') avg_loss = 0 avg_loader_time = 0 end_time = time.time() for _, data in enumerate(data_loader): start_time = time.time() # setup input data inputs = data[0] loader_time = time.time() - end_time global_step += 1 # setup lr if c.lr_decay: scheduler.step() optimizer.zero_grad() # dispatch data to GPU if use_cuda: inputs = inputs.cuda(non_blocking=True) # labels = labels.cuda(non_blocking=True) # forward pass model outputs = model(inputs) # loss computation loss = criterion( outputs.view(c.num_speakers_in_batch, outputs.shape[0] // c.num_speakers_in_batch, -1)) loss.backward() grad_norm, _ = check_update(model, c.grad_clip) optimizer.step() step_time = time.time() - start_time epoch_time += step_time # Averaged Loss and Averaged Loader Time avg_loss = 0.01 * loss.item() \ + 0.99 * avg_loss if avg_loss != 0 else loss.item() avg_loader_time = 1/c.num_loader_workers * loader_time + \ (c.num_loader_workers-1) / c.num_loader_workers * avg_loader_time if avg_loader_time != 0 else loader_time current_lr = optimizer.param_groups[0]['lr'] if global_step % c.steps_plot_stats == 0: # Plot Training Epoch Stats train_stats = { "loss": avg_loss, "lr": current_lr, "grad_norm": grad_norm, "step_time": step_time, "avg_loader_time": avg_loader_time } tb_logger.tb_train_epoch_stats(global_step, train_stats) figures = { # FIXME: not constant "UMAP Plot": plot_embeddings(outputs.detach().cpu().numpy(), 10), } tb_logger.tb_train_figures(global_step, figures) if global_step % c.print_step == 0: print( " | > Step:{} Loss:{:.5f} AvgLoss:{:.5f} GradNorm:{:.5f} " "StepTime:{:.2f} LoaderTime:{:.2f} AvGLoaderTime:{:.2f} LR:{:.6f}".format( global_step, loss.item(), avg_loss, grad_norm, step_time, loader_time, avg_loader_time, current_lr), flush=True) # save best model best_loss = save_best_model(model, optimizer, avg_loss, best_loss, OUT_PATH, global_step) end_time = time.time() return avg_loss, global_step def main(args): # pylint: disable=redefined-outer-name # pylint: disable=global-variable-undefined global meta_data_train global meta_data_eval ap = AudioProcessor(**c.audio) model = SpeakerEncoder(input_dim=c.model['input_dim'], proj_dim=c.model['proj_dim'], lstm_dim=c.model['lstm_dim'], num_lstm_layers=c.model['num_lstm_layers']) optimizer = RAdam(model.parameters(), lr=c.lr) if c.loss == "ge2e": criterion = GE2ELoss(loss_method='softmax') elif c.loss == "angleproto": criterion = AngleProtoLoss() else: raise Exception("The %s not is a loss supported" % c.loss) if args.restore_path: checkpoint = torch.load(args.restore_path) try: # TODO: fix optimizer init, model.cuda() needs to be called before # optimizer restore # optimizer.load_state_dict(checkpoint['optimizer']) if c.reinit_layers: raise RuntimeError model.load_state_dict(checkpoint['model']) except KeyError: print(" > Partial model initialization.") model_dict = model.state_dict() model_dict = set_init_dict(model_dict, checkpoint, c) model.load_state_dict(model_dict) del model_dict for group in optimizer.param_groups: group['lr'] = c.lr print(" > Model restored from step %d" % checkpoint['step'], flush=True) args.restore_step = checkpoint['step'] else: args.restore_step = 0 if use_cuda: model = model.cuda() criterion.cuda() if c.lr_decay: scheduler = NoamLR(optimizer, warmup_steps=c.warmup_steps, last_epoch=args.restore_step - 1) else: scheduler = None num_params = count_parameters(model) print("\n > Model has {} parameters".format(num_params), flush=True) # pylint: disable=redefined-outer-name meta_data_train, meta_data_eval = load_meta_data(c.datasets) global_step = args.restore_step _, global_step = train(model, criterion, optimizer, scheduler, ap, global_step) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '--restore_path', type=str, help='Path to model outputs (checkpoint, tensorboard etc.).', default=0) parser.add_argument( '--config_path', type=str, required=True, help='Path to config file for training.', ) parser.add_argument('--debug', type=bool, default=True, help='Do not verify commit integrity to run training.') parser.add_argument( '--data_path', type=str, default='', help='Defines the data path. It overwrites config.json.') parser.add_argument('--output_path', type=str, help='path for training outputs.', default='') parser.add_argument('--output_folder', type=str, default='', help='folder name for training outputs.') args = parser.parse_args() # setup output paths and read configs c = load_config(args.config_path) check_config_speaker_encoder(c) _ = os.path.dirname(os.path.realpath(__file__)) if args.data_path != '': c.data_path = args.data_path if args.output_path == '': OUT_PATH = os.path.join(_, c.output_path) else: OUT_PATH = args.output_path if args.output_folder == '': OUT_PATH = create_experiment_folder(OUT_PATH, c.run_name, args.debug) else: OUT_PATH = os.path.join(OUT_PATH, args.output_folder) new_fields = {} if args.restore_path: new_fields["restore_path"] = args.restore_path new_fields["github_branch"] = get_git_branch() copy_model_files(c, args.config_path, OUT_PATH, new_fields) LOG_DIR = OUT_PATH tb_logger = TensorboardLogger(LOG_DIR, model_name='Speaker_Encoder') try: main(args) except KeyboardInterrupt: remove_experiment_folder(OUT_PATH) try: sys.exit(0) except SystemExit: os._exit(0) # pylint: disable=protected-access except Exception: # pylint: disable=broad-except remove_experiment_folder(OUT_PATH) traceback.print_exc() sys.exit(1)
9,856
34.843636
132
py
TTS
TTS-master/TTS/bin/train_glow_tts.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import argparse import glob import os import sys import time import traceback from random import randrange import torch # DISTRIBUTED from torch.nn.parallel import DistributedDataParallel as DDP_th from torch.utils.data import DataLoader from torch.utils.data.distributed import DistributedSampler from TTS.tts.datasets.preprocess import load_meta_data from TTS.tts.datasets.TTSDataset import MyDataset from TTS.tts.layers.losses import GlowTTSLoss from TTS.tts.utils.generic_utils import check_config_tts, setup_model from TTS.tts.utils.io import save_best_model, save_checkpoint from TTS.tts.utils.measures import alignment_diagonal_score from TTS.tts.utils.speakers import parse_speakers from TTS.tts.utils.synthesis import synthesis from TTS.tts.utils.text.symbols import make_symbols, phonemes, symbols from TTS.tts.utils.visual import plot_alignment, plot_spectrogram from TTS.utils.audio import AudioProcessor from TTS.utils.console_logger import ConsoleLogger from TTS.utils.distribute import init_distributed, reduce_tensor from TTS.utils.generic_utils import (KeepAverage, count_parameters, create_experiment_folder, get_git_branch, remove_experiment_folder, set_init_dict) from TTS.utils.io import copy_model_files, load_config from TTS.utils.radam import RAdam from TTS.utils.tensorboard_logger import TensorboardLogger from TTS.utils.training import NoamLR, setup_torch_training_env use_cuda, num_gpus = setup_torch_training_env(True, False) def setup_loader(ap, r, is_val=False, verbose=False): if is_val and not c.run_eval: loader = None else: dataset = MyDataset( r, c.text_cleaner, compute_linear_spec=False, meta_data=meta_data_eval if is_val else meta_data_train, ap=ap, tp=c.characters if 'characters' in c.keys() else None, add_blank=c['add_blank'] if 'add_blank' in c.keys() else False, batch_group_size=0 if is_val else c.batch_group_size * c.batch_size, min_seq_len=c.min_seq_len, max_seq_len=c.max_seq_len, phoneme_cache_path=c.phoneme_cache_path, use_phonemes=c.use_phonemes, phoneme_language=c.phoneme_language, enable_eos_bos=c.enable_eos_bos_chars, use_noise_augment=c['use_noise_augment'] and not is_val, verbose=verbose, speaker_mapping=speaker_mapping if c.use_speaker_embedding and c.use_external_speaker_embedding_file else None) if c.use_phonemes and c.compute_input_seq_cache: # precompute phonemes to have a better estimate of sequence lengths. dataset.compute_input_seq(c.num_loader_workers) dataset.sort_items() sampler = DistributedSampler(dataset) if num_gpus > 1 else None loader = DataLoader( dataset, batch_size=c.eval_batch_size if is_val else c.batch_size, shuffle=False, collate_fn=dataset.collate_fn, drop_last=False, sampler=sampler, num_workers=c.num_val_loader_workers if is_val else c.num_loader_workers, pin_memory=False) return loader def format_data(data): # setup input data text_input = data[0] text_lengths = data[1] speaker_names = data[2] mel_input = data[4].permute(0, 2, 1) # B x D x T mel_lengths = data[5] item_idx = data[7] attn_mask = data[9] avg_text_length = torch.mean(text_lengths.float()) avg_spec_length = torch.mean(mel_lengths.float()) if c.use_speaker_embedding: if c.use_external_speaker_embedding_file: # return precomputed embedding vector speaker_c = data[8] else: # return speaker_id to be used by an embedding layer speaker_c = [ speaker_mapping[speaker_name] for speaker_name in speaker_names ] speaker_c = torch.LongTensor(speaker_c) else: speaker_c = None # dispatch data to GPU if use_cuda: text_input = text_input.cuda(non_blocking=True) text_lengths = text_lengths.cuda(non_blocking=True) mel_input = mel_input.cuda(non_blocking=True) mel_lengths = mel_lengths.cuda(non_blocking=True) if speaker_c is not None: speaker_c = speaker_c.cuda(non_blocking=True) if attn_mask is not None: attn_mask = attn_mask.cuda(non_blocking=True) return text_input, text_lengths, mel_input, mel_lengths, speaker_c,\ avg_text_length, avg_spec_length, attn_mask, item_idx def data_depended_init(data_loader, model, ap): """Data depended initialization for activation normalization.""" if hasattr(model, 'module'): for f in model.module.decoder.flows: if getattr(f, "set_ddi", False): f.set_ddi(True) else: for f in model.decoder.flows: if getattr(f, "set_ddi", False): f.set_ddi(True) model.train() print(" > Data depended initialization ... ") num_iter = 0 with torch.no_grad(): for _, data in enumerate(data_loader): # format data text_input, text_lengths, mel_input, mel_lengths, spekaer_embed,\ _, _, attn_mask, item_idx = format_data(data) # forward pass model _ = model.forward( text_input, text_lengths, mel_input, mel_lengths, attn_mask, g=spekaer_embed) if num_iter == c.data_dep_init_iter: break num_iter += 1 if hasattr(model, 'module'): for f in model.module.decoder.flows: if getattr(f, "set_ddi", False): f.set_ddi(False) else: for f in model.decoder.flows: if getattr(f, "set_ddi", False): f.set_ddi(False) return model def train(data_loader, model, criterion, optimizer, scheduler, ap, global_step, epoch): model.train() epoch_time = 0 keep_avg = KeepAverage() if use_cuda: batch_n_iter = int( len(data_loader.dataset) / (c.batch_size * num_gpus)) else: batch_n_iter = int(len(data_loader.dataset) / c.batch_size) end_time = time.time() c_logger.print_train_start() scaler = torch.cuda.amp.GradScaler() if c.mixed_precision else None for num_iter, data in enumerate(data_loader): start_time = time.time() # format data text_input, text_lengths, mel_input, mel_lengths, speaker_c,\ avg_text_length, avg_spec_length, attn_mask, item_idx = format_data(data) loader_time = time.time() - end_time global_step += 1 optimizer.zero_grad() # forward pass model with torch.cuda.amp.autocast(enabled=c.mixed_precision): z, logdet, y_mean, y_log_scale, alignments, o_dur_log, o_total_dur = model.forward( text_input, text_lengths, mel_input, mel_lengths, attn_mask, g=speaker_c) # compute loss loss_dict = criterion(z, y_mean, y_log_scale, logdet, mel_lengths, o_dur_log, o_total_dur, text_lengths) # backward pass with loss scaling if c.mixed_precision: scaler.scale(loss_dict['loss']).backward() scaler.unscale_(optimizer) grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), c.grad_clip) scaler.step(optimizer) scaler.update() else: loss_dict['loss'].backward() grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), c.grad_clip) optimizer.step() # setup lr if c.noam_schedule: scheduler.step() # current_lr current_lr = optimizer.param_groups[0]['lr'] # compute alignment error (the lower the better ) align_error = 1 - alignment_diagonal_score(alignments, binary=True) loss_dict['align_error'] = align_error step_time = time.time() - start_time epoch_time += step_time # aggregate losses from processes if num_gpus > 1: loss_dict['log_mle'] = reduce_tensor(loss_dict['log_mle'].data, num_gpus) loss_dict['loss_dur'] = reduce_tensor(loss_dict['loss_dur'].data, num_gpus) loss_dict['loss'] = reduce_tensor(loss_dict['loss'] .data, num_gpus) # detach loss values loss_dict_new = dict() for key, value in loss_dict.items(): if isinstance(value, (int, float)): loss_dict_new[key] = value else: loss_dict_new[key] = value.item() loss_dict = loss_dict_new # update avg stats update_train_values = dict() for key, value in loss_dict.items(): update_train_values['avg_' + key] = value update_train_values['avg_loader_time'] = loader_time update_train_values['avg_step_time'] = step_time keep_avg.update_values(update_train_values) # print training progress if global_step % c.print_step == 0: log_dict = { "avg_spec_length": [avg_spec_length, 1], # value, precision "avg_text_length": [avg_text_length, 1], "step_time": [step_time, 4], "loader_time": [loader_time, 2], "current_lr": current_lr, } c_logger.print_train_step(batch_n_iter, num_iter, global_step, log_dict, loss_dict, keep_avg.avg_values) if args.rank == 0: # Plot Training Iter Stats # reduce TB load if global_step % c.tb_plot_step == 0: iter_stats = { "lr": current_lr, "grad_norm": grad_norm, "step_time": step_time } iter_stats.update(loss_dict) tb_logger.tb_train_iter_stats(global_step, iter_stats) if global_step % c.save_step == 0: if c.checkpoint: # save model save_checkpoint(model, optimizer, global_step, epoch, 1, OUT_PATH, model_loss=loss_dict['loss']) # wait all kernels to be completed torch.cuda.synchronize() # Diagnostic visualizations # direct pass on model for spec predictions target_speaker = None if speaker_c is None else speaker_c[:1] if hasattr(model, 'module'): spec_pred, *_ = model.module.inference(text_input[:1], text_lengths[:1], g=target_speaker) else: spec_pred, *_ = model.inference(text_input[:1], text_lengths[:1], g=target_speaker) spec_pred = spec_pred.permute(0, 2, 1) gt_spec = mel_input.permute(0, 2, 1) const_spec = spec_pred[0].data.cpu().numpy() gt_spec = gt_spec[0].data.cpu().numpy() align_img = alignments[0].data.cpu().numpy() figures = { "prediction": plot_spectrogram(const_spec, ap), "ground_truth": plot_spectrogram(gt_spec, ap), "alignment": plot_alignment(align_img), } tb_logger.tb_train_figures(global_step, figures) # Sample audio train_audio = ap.inv_melspectrogram(const_spec.T) tb_logger.tb_train_audios(global_step, {'TrainAudio': train_audio}, c.audio["sample_rate"]) end_time = time.time() # print epoch stats c_logger.print_train_epoch_end(global_step, epoch, epoch_time, keep_avg) # Plot Epoch Stats if args.rank == 0: epoch_stats = {"epoch_time": epoch_time} epoch_stats.update(keep_avg.avg_values) tb_logger.tb_train_epoch_stats(global_step, epoch_stats) if c.tb_model_param_stats: tb_logger.tb_model_weights(model, global_step) return keep_avg.avg_values, global_step @torch.no_grad() def evaluate(data_loader, model, criterion, ap, global_step, epoch): model.eval() epoch_time = 0 keep_avg = KeepAverage() c_logger.print_eval_start() if data_loader is not None: for num_iter, data in enumerate(data_loader): start_time = time.time() # format data text_input, text_lengths, mel_input, mel_lengths, speaker_c,\ _, _, attn_mask, item_idx = format_data(data) # forward pass model z, logdet, y_mean, y_log_scale, alignments, o_dur_log, o_total_dur = model.forward( text_input, text_lengths, mel_input, mel_lengths, attn_mask, g=speaker_c) # compute loss loss_dict = criterion(z, y_mean, y_log_scale, logdet, mel_lengths, o_dur_log, o_total_dur, text_lengths) # step time step_time = time.time() - start_time epoch_time += step_time # compute alignment score align_error = 1 - alignment_diagonal_score(alignments) loss_dict['align_error'] = align_error # aggregate losses from processes if num_gpus > 1: loss_dict['log_mle'] = reduce_tensor(loss_dict['log_mle'].data, num_gpus) loss_dict['loss_dur'] = reduce_tensor(loss_dict['loss_dur'].data, num_gpus) loss_dict['loss'] = reduce_tensor(loss_dict['loss'] .data, num_gpus) # detach loss values loss_dict_new = dict() for key, value in loss_dict.items(): if isinstance(value, (int, float)): loss_dict_new[key] = value else: loss_dict_new[key] = value.item() loss_dict = loss_dict_new # update avg stats update_train_values = dict() for key, value in loss_dict.items(): update_train_values['avg_' + key] = value keep_avg.update_values(update_train_values) if c.print_eval: c_logger.print_eval_step(num_iter, loss_dict, keep_avg.avg_values) if args.rank == 0: # Diagnostic visualizations # direct pass on model for spec predictions target_speaker = None if speaker_c is None else speaker_c[:1] if hasattr(model, 'module'): spec_pred, *_ = model.module.inference(text_input[:1], text_lengths[:1], g=target_speaker) else: spec_pred, *_ = model.inference(text_input[:1], text_lengths[:1], g=target_speaker) spec_pred = spec_pred.permute(0, 2, 1) gt_spec = mel_input.permute(0, 2, 1) const_spec = spec_pred[0].data.cpu().numpy() gt_spec = gt_spec[0].data.cpu().numpy() align_img = alignments[0].data.cpu().numpy() eval_figures = { "prediction": plot_spectrogram(const_spec, ap), "ground_truth": plot_spectrogram(gt_spec, ap), "alignment": plot_alignment(align_img) } # Sample audio eval_audio = ap.inv_melspectrogram(const_spec.T) tb_logger.tb_eval_audios(global_step, {"ValAudio": eval_audio}, c.audio["sample_rate"]) # Plot Validation Stats tb_logger.tb_eval_stats(global_step, keep_avg.avg_values) tb_logger.tb_eval_figures(global_step, eval_figures) if args.rank == 0 and epoch >= c.test_delay_epochs: if c.test_sentences_file is None: test_sentences = [ "It took me quite a long time to develop a voice, and now that I have it I'm not going to be silent.", "Be a voice, not an echo.", "I'm sorry Dave. I'm afraid I can't do that.", "This cake is great. It's so delicious and moist.", "Prior to November 22, 1963." ] else: with open(c.test_sentences_file, "r") as f: test_sentences = [s.strip() for s in f.readlines()] # test sentences test_audios = {} test_figures = {} print(" | > Synthesizing test sentences") if c.use_speaker_embedding: if c.use_external_speaker_embedding_file: speaker_embedding = speaker_mapping[list(speaker_mapping.keys())[randrange(len(speaker_mapping)-1)]]['embedding'] speaker_id = None else: speaker_id = 0 speaker_embedding = None else: speaker_id = None speaker_embedding = None style_wav = c.get("style_wav_for_test") for idx, test_sentence in enumerate(test_sentences): try: wav, alignment, _, postnet_output, _, _ = synthesis( model, test_sentence, c, use_cuda, ap, speaker_id=speaker_id, speaker_embedding=speaker_embedding, style_wav=style_wav, truncated=False, enable_eos_bos_chars=c.enable_eos_bos_chars, #pylint: disable=unused-argument use_griffin_lim=True, do_trim_silence=False) file_path = os.path.join(AUDIO_PATH, str(global_step)) os.makedirs(file_path, exist_ok=True) file_path = os.path.join(file_path, "TestSentence_{}.wav".format(idx)) ap.save_wav(wav, file_path) test_audios['{}-audio'.format(idx)] = wav test_figures['{}-prediction'.format(idx)] = plot_spectrogram( postnet_output, ap) test_figures['{}-alignment'.format(idx)] = plot_alignment( alignment) except: #pylint: disable=bare-except print(" !! Error creating Test Sentence -", idx) traceback.print_exc() tb_logger.tb_test_audios(global_step, test_audios, c.audio['sample_rate']) tb_logger.tb_test_figures(global_step, test_figures) return keep_avg.avg_values # FIXME: move args definition/parsing inside of main? def main(args): # pylint: disable=redefined-outer-name # pylint: disable=global-variable-undefined global meta_data_train, meta_data_eval, symbols, phonemes, speaker_mapping # Audio processor ap = AudioProcessor(**c.audio) if 'characters' in c.keys(): symbols, phonemes = make_symbols(**c.characters) # DISTRUBUTED if num_gpus > 1: init_distributed(args.rank, num_gpus, args.group_id, c.distributed["backend"], c.distributed["url"]) num_chars = len(phonemes) if c.use_phonemes else len(symbols) # load data instances meta_data_train, meta_data_eval = load_meta_data(c.datasets) # set the portion of the data used for training if 'train_portion' in c.keys(): meta_data_train = meta_data_train[:int(len(meta_data_train) * c.train_portion)] if 'eval_portion' in c.keys(): meta_data_eval = meta_data_eval[:int(len(meta_data_eval) * c.eval_portion)] # parse speakers num_speakers, speaker_embedding_dim, speaker_mapping = parse_speakers(c, args, meta_data_train, OUT_PATH) # setup model model = setup_model(num_chars, num_speakers, c, speaker_embedding_dim=speaker_embedding_dim) optimizer = RAdam(model.parameters(), lr=c.lr, weight_decay=0, betas=(0.9, 0.98), eps=1e-9) criterion = GlowTTSLoss() if args.restore_path: checkpoint = torch.load(args.restore_path, map_location='cpu') try: # TODO: fix optimizer init, model.cuda() needs to be called before # optimizer restore optimizer.load_state_dict(checkpoint['optimizer']) if c.reinit_layers: raise RuntimeError model.load_state_dict(checkpoint['model']) except: #pylint: disable=bare-except print(" > Partial model initialization.") model_dict = model.state_dict() model_dict = set_init_dict(model_dict, checkpoint['model'], c) model.load_state_dict(model_dict) del model_dict for group in optimizer.param_groups: group['initial_lr'] = c.lr print(" > Model restored from step %d" % checkpoint['step'], flush=True) args.restore_step = checkpoint['step'] else: args.restore_step = 0 if use_cuda: model.cuda() criterion.cuda() # DISTRUBUTED if num_gpus > 1: model = DDP_th(model, device_ids=[args.rank]) if c.noam_schedule: scheduler = NoamLR(optimizer, warmup_steps=c.warmup_steps, last_epoch=args.restore_step - 1) else: scheduler = None num_params = count_parameters(model) print("\n > Model has {} parameters".format(num_params), flush=True) if 'best_loss' not in locals(): best_loss = float('inf') # define dataloaders train_loader = setup_loader(ap, 1, is_val=False, verbose=True) eval_loader = setup_loader(ap, 1, is_val=True, verbose=True) global_step = args.restore_step model = data_depended_init(train_loader, model, ap) for epoch in range(0, c.epochs): c_logger.print_epoch_start(epoch, c.epochs) train_avg_loss_dict, global_step = train(train_loader, model, criterion, optimizer, scheduler, ap, global_step, epoch) eval_avg_loss_dict = evaluate(eval_loader , model, criterion, ap, global_step, epoch) c_logger.print_epoch_end(epoch, eval_avg_loss_dict) target_loss = train_avg_loss_dict['avg_loss'] if c.run_eval: target_loss = eval_avg_loss_dict['avg_loss'] best_loss = save_best_model(target_loss, best_loss, model, optimizer, global_step, epoch, c.r, OUT_PATH) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '--continue_path', type=str, help='Training output folder to continue training. Use to continue a training. If it is used, "config_path" is ignored.', default='', required='--config_path' not in sys.argv) parser.add_argument( '--restore_path', type=str, help='Model file to be restored. Use to finetune a model.', default='') parser.add_argument( '--config_path', type=str, help='Path to config file for training.', required='--continue_path' not in sys.argv ) parser.add_argument('--debug', type=bool, default=False, help='Do not verify commit integrity to run training.') # DISTRUBUTED parser.add_argument( '--rank', type=int, default=0, help='DISTRIBUTED: process rank for distributed training.') parser.add_argument('--group_id', type=str, default="", help='DISTRIBUTED: process group id.') args = parser.parse_args() if args.continue_path != '': args.output_path = args.continue_path args.config_path = os.path.join(args.continue_path, 'config.json') list_of_files = glob.glob(args.continue_path + "/*.pth.tar") # * means all if need specific format then *.csv latest_model_file = max(list_of_files, key=os.path.getctime) args.restore_path = latest_model_file print(f" > Training continues for {args.restore_path}") # setup output paths and read configs c = load_config(args.config_path) # check_config(c) check_config_tts(c) _ = os.path.dirname(os.path.realpath(__file__)) if c.mixed_precision: print(" > Mixed precision enabled.") OUT_PATH = args.continue_path if args.continue_path == '': OUT_PATH = create_experiment_folder(c.output_path, c.run_name, args.debug) AUDIO_PATH = os.path.join(OUT_PATH, 'test_audios') c_logger = ConsoleLogger() if args.rank == 0: os.makedirs(AUDIO_PATH, exist_ok=True) new_fields = {} if args.restore_path: new_fields["restore_path"] = args.restore_path new_fields["github_branch"] = get_git_branch() copy_model_files(c, args.config_path, OUT_PATH, new_fields) os.chmod(AUDIO_PATH, 0o775) os.chmod(OUT_PATH, 0o775) LOG_DIR = OUT_PATH tb_logger = TensorboardLogger(LOG_DIR, model_name='TTS') # write model desc to tensorboard tb_logger.tb_add_text('model-description', c['run_description'], 0) try: main(args) except KeyboardInterrupt: remove_experiment_folder(OUT_PATH) try: sys.exit(0) except SystemExit: os._exit(0) # pylint: disable=protected-access except Exception: # pylint: disable=broad-except remove_experiment_folder(OUT_PATH) traceback.print_exc() sys.exit(1)
25,835
38.264438
129
py
TTS
TTS-master/TTS/bin/distribute.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import os import sys import pathlib import time import subprocess import argparse import torch def main(): """ Call train.py as a new process and pass command arguments """ parser = argparse.ArgumentParser() parser.add_argument( '--script', type=str, help='Target training script to distibute.') parser.add_argument( '--continue_path', type=str, help='Training output folder to continue training. Use to continue a training. If it is used, "config_path" is ignored.', default='', required='--config_path' not in sys.argv) parser.add_argument( '--restore_path', type=str, help='Model file to be restored. Use to finetune a model.', default='') parser.add_argument( '--config_path', type=str, help='Path to config file for training.', required='--continue_path' not in sys.argv ) args = parser.parse_args() num_gpus = torch.cuda.device_count() group_id = time.strftime("%Y_%m_%d-%H%M%S") # set arguments for train.py folder_path = pathlib.Path(__file__).parent.absolute() command = [os.path.join(folder_path, args.script)] command.append('--continue_path={}'.format(args.continue_path)) command.append('--restore_path={}'.format(args.restore_path)) command.append('--config_path={}'.format(args.config_path)) command.append('--group_id=group_{}'.format(group_id)) command.append('') # run processes processes = [] for i in range(num_gpus): my_env = os.environ.copy() my_env["PYTHON_EGG_CACHE"] = "/tmp/tmp{}".format(i) command[-1] = '--rank={}'.format(i) stdout = None if i == 0 else open(os.devnull, 'w') p = subprocess.Popen(['python3'] + command, stdout=stdout, env=my_env) processes.append(p) print(command) for p in processes: p.wait() if __name__ == '__main__': main()
2,021
27.885714
129
py
TTS
TTS-master/TTS/bin/convert_melgan_tflite.py
# Convert Tensorflow Tacotron2 model to TF-Lite binary import argparse from TTS.utils.io import load_config from TTS.vocoder.tf.utils.generic_utils import setup_generator from TTS.vocoder.tf.utils.io import load_checkpoint from TTS.vocoder.tf.utils.tflite import convert_melgan_to_tflite parser = argparse.ArgumentParser() parser.add_argument('--tf_model', type=str, help='Path to target torch model to be converted to TF.') parser.add_argument('--config_path', type=str, help='Path to config file of torch model.') parser.add_argument('--output_path', type=str, help='path to tflite output binary.') args = parser.parse_args() # Set constants CONFIG = load_config(args.config_path) # load the model model = setup_generator(CONFIG) model.build_inference() model = load_checkpoint(model, args.tf_model) # create tflite model tflite_model = convert_melgan_to_tflite(model, output_path=args.output_path)
1,028
30.181818
77
py
TTS
TTS-master/TTS/bin/train_vocoder_wavegrad.py
import argparse import glob import os import sys import time import traceback import numpy as np import torch # DISTRIBUTED from torch.nn.parallel import DistributedDataParallel as DDP_th from torch.optim import Adam from torch.utils.data import DataLoader from torch.utils.data.distributed import DistributedSampler from TTS.utils.audio import AudioProcessor from TTS.utils.console_logger import ConsoleLogger from TTS.utils.distribute import init_distributed from TTS.utils.generic_utils import (KeepAverage, count_parameters, create_experiment_folder, get_git_branch, remove_experiment_folder, set_init_dict) from TTS.utils.io import copy_model_files, load_config from TTS.utils.tensorboard_logger import TensorboardLogger from TTS.utils.training import setup_torch_training_env from TTS.vocoder.datasets.preprocess import load_wav_data, load_wav_feat_data from TTS.vocoder.datasets.wavegrad_dataset import WaveGradDataset from TTS.vocoder.utils.generic_utils import plot_results, setup_generator from TTS.vocoder.utils.io import save_best_model, save_checkpoint use_cuda, num_gpus = setup_torch_training_env(True, True) def setup_loader(ap, is_val=False, verbose=False): if is_val and not c.run_eval: loader = None else: dataset = WaveGradDataset(ap=ap, items=eval_data if is_val else train_data, seq_len=c.seq_len, hop_len=ap.hop_length, pad_short=c.pad_short, conv_pad=c.conv_pad, is_training=not is_val, return_segments=True, use_noise_augment=False, use_cache=c.use_cache, verbose=verbose) sampler = DistributedSampler(dataset) if num_gpus > 1 else None loader = DataLoader(dataset, batch_size=c.batch_size, shuffle=num_gpus <= 1, drop_last=False, sampler=sampler, num_workers=c.num_val_loader_workers if is_val else c.num_loader_workers, pin_memory=False) return loader def format_data(data): # return a whole audio segment m, x = data x = x.unsqueeze(1) if use_cuda: m = m.cuda(non_blocking=True) x = x.cuda(non_blocking=True) return m, x def format_test_data(data): # return a whole audio segment m, x = data m = m[None, ...] x = x[None, None, ...] if use_cuda: m = m.cuda(non_blocking=True) x = x.cuda(non_blocking=True) return m, x def train(model, criterion, optimizer, scheduler, scaler, ap, global_step, epoch): data_loader = setup_loader(ap, is_val=False, verbose=(epoch == 0)) model.train() epoch_time = 0 keep_avg = KeepAverage() if use_cuda: batch_n_iter = int( len(data_loader.dataset) / (c.batch_size * num_gpus)) else: batch_n_iter = int(len(data_loader.dataset) / c.batch_size) end_time = time.time() c_logger.print_train_start() # setup noise schedule noise_schedule = c['train_noise_schedule'] betas = np.linspace(noise_schedule['min_val'], noise_schedule['max_val'], noise_schedule['num_steps']) if hasattr(model, 'module'): model.module.compute_noise_level(betas) else: model.compute_noise_level(betas) for num_iter, data in enumerate(data_loader): start_time = time.time() # format data m, x = format_data(data) loader_time = time.time() - end_time global_step += 1 with torch.cuda.amp.autocast(enabled=c.mixed_precision): # compute noisy input if hasattr(model, 'module'): noise, x_noisy, noise_scale = model.module.compute_y_n(x) else: noise, x_noisy, noise_scale = model.compute_y_n(x) # forward pass noise_hat = model(x_noisy, m, noise_scale) # compute losses loss = criterion(noise, noise_hat) loss_wavegrad_dict = {'wavegrad_loss':loss} # check nan loss if torch.isnan(loss).any(): raise RuntimeError(f'Detected NaN loss at step {global_step}.') optimizer.zero_grad() # backward pass with loss scaling if c.mixed_precision: scaler.scale(loss).backward() scaler.unscale_(optimizer) grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), c.clip_grad) scaler.step(optimizer) scaler.update() else: loss.backward() grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), c.clip_grad) optimizer.step() # schedule update if scheduler is not None: scheduler.step() # disconnect loss values loss_dict = dict() for key, value in loss_wavegrad_dict.items(): if isinstance(value, int): loss_dict[key] = value else: loss_dict[key] = value.item() # epoch/step timing step_time = time.time() - start_time epoch_time += step_time # get current learning rates current_lr = list(optimizer.param_groups)[0]['lr'] # update avg stats update_train_values = dict() for key, value in loss_dict.items(): update_train_values['avg_' + key] = value update_train_values['avg_loader_time'] = loader_time update_train_values['avg_step_time'] = step_time keep_avg.update_values(update_train_values) # print training stats if global_step % c.print_step == 0: log_dict = { 'step_time': [step_time, 2], 'loader_time': [loader_time, 4], "current_lr": current_lr, "grad_norm": grad_norm.item() } c_logger.print_train_step(batch_n_iter, num_iter, global_step, log_dict, loss_dict, keep_avg.avg_values) if args.rank == 0: # plot step stats if global_step % 10 == 0: iter_stats = { "lr": current_lr, "grad_norm": grad_norm.item(), "step_time": step_time } iter_stats.update(loss_dict) tb_logger.tb_train_iter_stats(global_step, iter_stats) # save checkpoint if global_step % c.save_step == 0: if c.checkpoint: # save model save_checkpoint(model, optimizer, scheduler, None, None, None, global_step, epoch, OUT_PATH, model_losses=loss_dict, scaler=scaler.state_dict() if c.mixed_precision else None) end_time = time.time() # print epoch stats c_logger.print_train_epoch_end(global_step, epoch, epoch_time, keep_avg) # Plot Training Epoch Stats epoch_stats = {"epoch_time": epoch_time} epoch_stats.update(keep_avg.avg_values) if args.rank == 0: tb_logger.tb_train_epoch_stats(global_step, epoch_stats) # TODO: plot model stats if c.tb_model_param_stats and args.rank == 0: tb_logger.tb_model_weights(model, global_step) return keep_avg.avg_values, global_step @torch.no_grad() def evaluate(model, criterion, ap, global_step, epoch): data_loader = setup_loader(ap, is_val=True, verbose=(epoch == 0)) model.eval() epoch_time = 0 keep_avg = KeepAverage() end_time = time.time() c_logger.print_eval_start() for num_iter, data in enumerate(data_loader): start_time = time.time() # format data m, x = format_data(data) loader_time = time.time() - end_time global_step += 1 # compute noisy input if hasattr(model, 'module'): noise, x_noisy, noise_scale = model.module.compute_y_n(x) else: noise, x_noisy, noise_scale = model.compute_y_n(x) # forward pass noise_hat = model(x_noisy, m, noise_scale) # compute losses loss = criterion(noise, noise_hat) loss_wavegrad_dict = {'wavegrad_loss':loss} loss_dict = dict() for key, value in loss_wavegrad_dict.items(): if isinstance(value, (int, float)): loss_dict[key] = value else: loss_dict[key] = value.item() step_time = time.time() - start_time epoch_time += step_time # update avg stats update_eval_values = dict() for key, value in loss_dict.items(): update_eval_values['avg_' + key] = value update_eval_values['avg_loader_time'] = loader_time update_eval_values['avg_step_time'] = step_time keep_avg.update_values(update_eval_values) # print eval stats if c.print_eval: c_logger.print_eval_step(num_iter, loss_dict, keep_avg.avg_values) if args.rank == 0: data_loader.dataset.return_segments = False samples = data_loader.dataset.load_test_samples(1) m, x = format_test_data(samples[0]) # setup noise schedule and inference noise_schedule = c['test_noise_schedule'] betas = np.linspace(noise_schedule['min_val'], noise_schedule['max_val'], noise_schedule['num_steps']) if hasattr(model, 'module'): model.module.compute_noise_level(betas) # compute voice x_pred = model.module.inference(m) else: model.compute_noise_level(betas) # compute voice x_pred = model.inference(m) # compute spectrograms figures = plot_results(x_pred, x, ap, global_step, 'eval') tb_logger.tb_eval_figures(global_step, figures) # Sample audio sample_voice = x_pred[0].squeeze(0).detach().cpu().numpy() tb_logger.tb_eval_audios(global_step, {'eval/audio': sample_voice}, c.audio["sample_rate"]) tb_logger.tb_eval_stats(global_step, keep_avg.avg_values) data_loader.dataset.return_segments = True return keep_avg.avg_values def main(args): # pylint: disable=redefined-outer-name # pylint: disable=global-variable-undefined global train_data, eval_data print(f" > Loading wavs from: {c.data_path}") if c.feature_path is not None: print(f" > Loading features from: {c.feature_path}") eval_data, train_data = load_wav_feat_data(c.data_path, c.feature_path, c.eval_split_size) else: eval_data, train_data = load_wav_data(c.data_path, c.eval_split_size) # setup audio processor ap = AudioProcessor(**c.audio) # DISTRUBUTED if num_gpus > 1: init_distributed(args.rank, num_gpus, args.group_id, c.distributed["backend"], c.distributed["url"]) # setup models model = setup_generator(c) # scaler for mixed_precision scaler = torch.cuda.amp.GradScaler() if c.mixed_precision else None # setup optimizers optimizer = Adam(model.parameters(), lr=c.lr, weight_decay=0) # schedulers scheduler = None if 'lr_scheduler' in c: scheduler = getattr(torch.optim.lr_scheduler, c.lr_scheduler) scheduler = scheduler(optimizer, **c.lr_scheduler_params) # setup criterion criterion = torch.nn.L1Loss().cuda() if args.restore_path: checkpoint = torch.load(args.restore_path, map_location='cpu') try: print(" > Restoring Model...") model.load_state_dict(checkpoint['model']) print(" > Restoring Optimizer...") optimizer.load_state_dict(checkpoint['optimizer']) if 'scheduler' in checkpoint: print(" > Restoring LR Scheduler...") scheduler.load_state_dict(checkpoint['scheduler']) # NOTE: Not sure if necessary scheduler.optimizer = optimizer if "scaler" in checkpoint and c.mixed_precision: print(" > Restoring AMP Scaler...") scaler.load_state_dict(checkpoint["scaler"]) except RuntimeError: # retore only matching layers. print(" > Partial model initialization...") model_dict = model.state_dict() model_dict = set_init_dict(model_dict, checkpoint['model'], c) model.load_state_dict(model_dict) del model_dict # reset lr if not countinuining training. for group in optimizer.param_groups: group['lr'] = c.lr print(" > Model restored from step %d" % checkpoint['step'], flush=True) args.restore_step = checkpoint['step'] else: args.restore_step = 0 if use_cuda: model.cuda() criterion.cuda() # DISTRUBUTED if num_gpus > 1: model = DDP_th(model, device_ids=[args.rank]) num_params = count_parameters(model) print(" > WaveGrad has {} parameters".format(num_params), flush=True) if 'best_loss' not in locals(): best_loss = float('inf') global_step = args.restore_step for epoch in range(0, c.epochs): c_logger.print_epoch_start(epoch, c.epochs) _, global_step = train(model, criterion, optimizer, scheduler, scaler, ap, global_step, epoch) eval_avg_loss_dict = evaluate(model, criterion, ap, global_step, epoch) c_logger.print_epoch_end(epoch, eval_avg_loss_dict) target_loss = eval_avg_loss_dict[c.target_loss] best_loss = save_best_model(target_loss, best_loss, model, optimizer, scheduler, None, None, None, global_step, epoch, OUT_PATH, model_losses=eval_avg_loss_dict, scaler=scaler.state_dict() if c.mixed_precision else None) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '--continue_path', type=str, help= 'Training output folder to continue training. Use to continue a training. If it is used, "config_path" is ignored.', default='', required='--config_path' not in sys.argv) parser.add_argument( '--restore_path', type=str, help='Model file to be restored. Use to finetune a model.', default='') parser.add_argument('--config_path', type=str, help='Path to config file for training.', required='--continue_path' not in sys.argv) parser.add_argument('--debug', type=bool, default=False, help='Do not verify commit integrity to run training.') # DISTRUBUTED parser.add_argument( '--rank', type=int, default=0, help='DISTRIBUTED: process rank for distributed training.') parser.add_argument('--group_id', type=str, default="", help='DISTRIBUTED: process group id.') args = parser.parse_args() if args.continue_path != '': args.output_path = args.continue_path args.config_path = os.path.join(args.continue_path, 'config.json') list_of_files = glob.glob( args.continue_path + "/*.pth.tar") # * means all if need specific format then *.csv latest_model_file = max(list_of_files, key=os.path.getctime) args.restore_path = latest_model_file print(f" > Training continues for {args.restore_path}") # setup output paths and read configs c = load_config(args.config_path) # check_config(c) _ = os.path.dirname(os.path.realpath(__file__)) # DISTRIBUTED if c.mixed_precision: print(" > Mixed precision is enabled") OUT_PATH = args.continue_path if args.continue_path == '': OUT_PATH = create_experiment_folder(c.output_path, c.run_name, args.debug) AUDIO_PATH = os.path.join(OUT_PATH, 'test_audios') c_logger = ConsoleLogger() if args.rank == 0: os.makedirs(AUDIO_PATH, exist_ok=True) new_fields = {} if args.restore_path: new_fields["restore_path"] = args.restore_path new_fields["github_branch"] = get_git_branch() copy_model_files(c, args.config_path, OUT_PATH, new_fields) os.chmod(AUDIO_PATH, 0o775) os.chmod(OUT_PATH, 0o775) LOG_DIR = OUT_PATH tb_logger = TensorboardLogger(LOG_DIR, model_name='VOCODER') # write model desc to tensorboard tb_logger.tb_add_text('model-description', c['run_description'], 0) try: main(args) except KeyboardInterrupt: remove_experiment_folder(OUT_PATH) try: sys.exit(0) except SystemExit: os._exit(0) # pylint: disable=protected-access except Exception: # pylint: disable=broad-except remove_experiment_folder(OUT_PATH) traceback.print_exc() sys.exit(1)
18,329
34.800781
124
py
TTS
TTS-master/TTS/bin/train_vocoder_wavernn.py
import argparse import os import sys import traceback import time import glob import random import torch from torch.utils.data import DataLoader # from torch.utils.data.distributed import DistributedSampler from TTS.tts.utils.visual import plot_spectrogram from TTS.utils.audio import AudioProcessor from TTS.utils.radam import RAdam from TTS.utils.io import copy_model_files, load_config from TTS.utils.training import setup_torch_training_env from TTS.utils.console_logger import ConsoleLogger from TTS.utils.tensorboard_logger import TensorboardLogger from TTS.utils.generic_utils import ( KeepAverage, count_parameters, create_experiment_folder, get_git_branch, remove_experiment_folder, set_init_dict, ) from TTS.vocoder.datasets.wavernn_dataset import WaveRNNDataset from TTS.vocoder.datasets.preprocess import ( load_wav_data, load_wav_feat_data ) from TTS.vocoder.utils.distribution import discretized_mix_logistic_loss, gaussian_loss from TTS.vocoder.utils.generic_utils import setup_wavernn from TTS.vocoder.utils.io import save_best_model, save_checkpoint use_cuda, num_gpus = setup_torch_training_env(True, True) def setup_loader(ap, is_val=False, verbose=False): if is_val and not c.run_eval: loader = None else: dataset = WaveRNNDataset(ap=ap, items=eval_data if is_val else train_data, seq_len=c.seq_len, hop_len=ap.hop_length, pad=c.padding, mode=c.mode, mulaw=c.mulaw, is_training=not is_val, verbose=verbose, ) # sampler = DistributedSampler(dataset) if num_gpus > 1 else None loader = DataLoader(dataset, shuffle=True, collate_fn=dataset.collate, batch_size=c.batch_size, num_workers=c.num_val_loader_workers if is_val else c.num_loader_workers, pin_memory=True, ) return loader def format_data(data): # setup input data x_input = data[0] mels = data[1] y_coarse = data[2] # dispatch data to GPU if use_cuda: x_input = x_input.cuda(non_blocking=True) mels = mels.cuda(non_blocking=True) y_coarse = y_coarse.cuda(non_blocking=True) return x_input, mels, y_coarse def train(model, optimizer, criterion, scheduler, scaler, ap, global_step, epoch): # create train loader data_loader = setup_loader(ap, is_val=False, verbose=(epoch == 0)) model.train() epoch_time = 0 keep_avg = KeepAverage() if use_cuda: batch_n_iter = int(len(data_loader.dataset) / (c.batch_size * num_gpus)) else: batch_n_iter = int(len(data_loader.dataset) / c.batch_size) end_time = time.time() c_logger.print_train_start() # train loop for num_iter, data in enumerate(data_loader): start_time = time.time() x_input, mels, y_coarse = format_data(data) loader_time = time.time() - end_time global_step += 1 optimizer.zero_grad() if c.mixed_precision: # mixed precision training with torch.cuda.amp.autocast(): y_hat = model(x_input, mels) if isinstance(model.mode, int): y_hat = y_hat.transpose(1, 2).unsqueeze(-1) else: y_coarse = y_coarse.float() y_coarse = y_coarse.unsqueeze(-1) # compute losses loss = criterion(y_hat, y_coarse) scaler.scale(loss).backward() scaler.unscale_(optimizer) if c.grad_clip > 0: torch.nn.utils.clip_grad_norm_( model.parameters(), c.grad_clip) scaler.step(optimizer) scaler.update() else: # full precision training y_hat = model(x_input, mels) if isinstance(model.mode, int): y_hat = y_hat.transpose(1, 2).unsqueeze(-1) else: y_coarse = y_coarse.float() y_coarse = y_coarse.unsqueeze(-1) # compute losses loss = criterion(y_hat, y_coarse) if loss.item() is None: raise RuntimeError(" [!] None loss. Exiting ...") loss.backward() if c.grad_clip > 0: torch.nn.utils.clip_grad_norm_( model.parameters(), c.grad_clip) optimizer.step() if scheduler is not None: scheduler.step() # get the current learning rate cur_lr = list(optimizer.param_groups)[0]["lr"] step_time = time.time() - start_time epoch_time += step_time update_train_values = dict() loss_dict = dict() loss_dict["model_loss"] = loss.item() for key, value in loss_dict.items(): update_train_values["avg_" + key] = value update_train_values["avg_loader_time"] = loader_time update_train_values["avg_step_time"] = step_time keep_avg.update_values(update_train_values) # print training stats if global_step % c.print_step == 0: log_dict = {"step_time": [step_time, 2], "loader_time": [loader_time, 4], "current_lr": cur_lr, } c_logger.print_train_step(batch_n_iter, num_iter, global_step, log_dict, loss_dict, keep_avg.avg_values, ) # plot step stats if global_step % 10 == 0: iter_stats = {"lr": cur_lr, "step_time": step_time} iter_stats.update(loss_dict) tb_logger.tb_train_iter_stats(global_step, iter_stats) # save checkpoint if global_step % c.save_step == 0: if c.checkpoint: # save model save_checkpoint(model, optimizer, scheduler, None, None, None, global_step, epoch, OUT_PATH, model_losses=loss_dict, scaler=scaler.state_dict() if c.mixed_precision else None ) # synthesize a full voice rand_idx = random.randrange(0, len(train_data)) wav_path = train_data[rand_idx] if not isinstance( train_data[rand_idx], (tuple, list)) else train_data[rand_idx][0] wav = ap.load_wav(wav_path) ground_mel = ap.melspectrogram(wav) sample_wav = model.generate(ground_mel, c.batched, c.target_samples, c.overlap_samples, use_cuda ) predict_mel = ap.melspectrogram(sample_wav) # compute spectrograms figures = {"train/ground_truth": plot_spectrogram(ground_mel.T), "train/prediction": plot_spectrogram(predict_mel.T) } tb_logger.tb_train_figures(global_step, figures) # Sample audio tb_logger.tb_train_audios( global_step, { "train/audio": sample_wav}, c.audio["sample_rate"] ) end_time = time.time() # print epoch stats c_logger.print_train_epoch_end(global_step, epoch, epoch_time, keep_avg) # Plot Training Epoch Stats epoch_stats = {"epoch_time": epoch_time} epoch_stats.update(keep_avg.avg_values) tb_logger.tb_train_epoch_stats(global_step, epoch_stats) # TODO: plot model stats # if c.tb_model_param_stats: # tb_logger.tb_model_weights(model, global_step) return keep_avg.avg_values, global_step @torch.no_grad() def evaluate(model, criterion, ap, global_step, epoch): # create train loader data_loader = setup_loader(ap, is_val=True, verbose=(epoch == 0)) model.eval() epoch_time = 0 keep_avg = KeepAverage() end_time = time.time() c_logger.print_eval_start() with torch.no_grad(): for num_iter, data in enumerate(data_loader): start_time = time.time() # format data x_input, mels, y_coarse = format_data(data) loader_time = time.time() - end_time global_step += 1 y_hat = model(x_input, mels) if isinstance(model.mode, int): y_hat = y_hat.transpose(1, 2).unsqueeze(-1) else: y_coarse = y_coarse.float() y_coarse = y_coarse.unsqueeze(-1) loss = criterion(y_hat, y_coarse) # Compute avg loss # if num_gpus > 1: # loss = reduce_tensor(loss.data, num_gpus) loss_dict = dict() loss_dict["model_loss"] = loss.item() step_time = time.time() - start_time epoch_time += step_time # update avg stats update_eval_values = dict() for key, value in loss_dict.items(): update_eval_values["avg_" + key] = value update_eval_values["avg_loader_time"] = loader_time update_eval_values["avg_step_time"] = step_time keep_avg.update_values(update_eval_values) # print eval stats if c.print_eval: c_logger.print_eval_step( num_iter, loss_dict, keep_avg.avg_values) if epoch % c.test_every_epochs == 0 and epoch != 0: # synthesize a full voice rand_idx = random.randrange(0, len(eval_data)) wav_path = eval_data[rand_idx] if not isinstance( eval_data[rand_idx], (tuple, list)) else eval_data[rand_idx][0] wav = ap.load_wav(wav_path) ground_mel = ap.melspectrogram(wav) sample_wav = model.generate(ground_mel, c.batched, c.target_samples, c.overlap_samples, use_cuda ) predict_mel = ap.melspectrogram(sample_wav) # Sample audio tb_logger.tb_eval_audios( global_step, { "eval/audio": sample_wav}, c.audio["sample_rate"] ) # compute spectrograms figures = {"eval/ground_truth": plot_spectrogram(ground_mel.T), "eval/prediction": plot_spectrogram(predict_mel.T) } tb_logger.tb_eval_figures(global_step, figures) tb_logger.tb_eval_stats(global_step, keep_avg.avg_values) return keep_avg.avg_values # FIXME: move args definition/parsing inside of main? def main(args): # pylint: disable=redefined-outer-name # pylint: disable=global-variable-undefined global train_data, eval_data # setup audio processor ap = AudioProcessor(**c.audio) # print(f" > Loading wavs from: {c.data_path}") # if c.feature_path is not None: # print(f" > Loading features from: {c.feature_path}") # eval_data, train_data = load_wav_feat_data( # c.data_path, c.feature_path, c.eval_split_size # ) # else: # mel_feat_path = os.path.join(OUT_PATH, "mel") # feat_data = find_feat_files(mel_feat_path) # if feat_data: # print(f" > Loading features from: {mel_feat_path}") # eval_data, train_data = load_wav_feat_data( # c.data_path, mel_feat_path, c.eval_split_size # ) # else: # print(" > No feature data found. Preprocessing...") # # preprocessing feature data from given wav files # preprocess_wav_files(OUT_PATH, CONFIG, ap) # eval_data, train_data = load_wav_feat_data( # c.data_path, mel_feat_path, c.eval_split_size # ) print(f" > Loading wavs from: {c.data_path}") if c.feature_path is not None: print(f" > Loading features from: {c.feature_path}") eval_data, train_data = load_wav_feat_data( c.data_path, c.feature_path, c.eval_split_size) else: eval_data, train_data = load_wav_data( c.data_path, c.eval_split_size) # setup model model_wavernn = setup_wavernn(c) # setup amp scaler scaler = torch.cuda.amp.GradScaler() if c.mixed_precision else None # define train functions if c.mode == "mold": criterion = discretized_mix_logistic_loss elif c.mode == "gauss": criterion = gaussian_loss elif isinstance(c.mode, int): criterion = torch.nn.CrossEntropyLoss() if use_cuda: model_wavernn.cuda() if isinstance(c.mode, int): criterion.cuda() optimizer = RAdam(model_wavernn.parameters(), lr=c.lr, weight_decay=0) scheduler = None if "lr_scheduler" in c: scheduler = getattr(torch.optim.lr_scheduler, c.lr_scheduler) scheduler = scheduler(optimizer, **c.lr_scheduler_params) # slow start for the first 5 epochs # lr_lambda = lambda epoch: min(epoch / c.warmup_steps, 1) # scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda) # restore any checkpoint if args.restore_path: checkpoint = torch.load(args.restore_path, map_location="cpu") try: print(" > Restoring Model...") model_wavernn.load_state_dict(checkpoint["model"]) print(" > Restoring Optimizer...") optimizer.load_state_dict(checkpoint["optimizer"]) if "scheduler" in checkpoint: print(" > Restoring Generator LR Scheduler...") scheduler.load_state_dict(checkpoint["scheduler"]) scheduler.optimizer = optimizer if "scaler" in checkpoint and c.mixed_precision: print(" > Restoring AMP Scaler...") scaler.load_state_dict(checkpoint["scaler"]) except RuntimeError: # retore only matching layers. print(" > Partial model initialization...") model_dict = model_wavernn.state_dict() model_dict = set_init_dict(model_dict, checkpoint["model"], c) model_wavernn.load_state_dict(model_dict) print(" > Model restored from step %d" % checkpoint["step"], flush=True) args.restore_step = checkpoint["step"] else: args.restore_step = 0 # DISTRIBUTED # if num_gpus > 1: # model = apply_gradient_allreduce(model) num_parameters = count_parameters(model_wavernn) print(" > Model has {} parameters".format(num_parameters), flush=True) if "best_loss" not in locals(): best_loss = float("inf") global_step = args.restore_step for epoch in range(0, c.epochs): c_logger.print_epoch_start(epoch, c.epochs) _, global_step = train(model_wavernn, optimizer, criterion, scheduler, scaler, ap, global_step, epoch) eval_avg_loss_dict = evaluate( model_wavernn, criterion, ap, global_step, epoch) c_logger.print_epoch_end(epoch, eval_avg_loss_dict) target_loss = eval_avg_loss_dict["avg_model_loss"] best_loss = save_best_model( target_loss, best_loss, model_wavernn, optimizer, scheduler, None, None, None, global_step, epoch, OUT_PATH, model_losses=eval_avg_loss_dict, scaler=scaler.state_dict() if c.mixed_precision else None ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--continue_path", type=str, help='Training output folder to continue training. Use to continue a training. If it is used, "config_path" is ignored.', default="", required="--config_path" not in sys.argv, ) parser.add_argument( "--restore_path", type=str, help="Model file to be restored. Use to finetune a model.", default="", ) parser.add_argument( "--config_path", type=str, help="Path to config file for training.", required="--continue_path" not in sys.argv, ) parser.add_argument( "--debug", type=bool, default=False, help="Do not verify commit integrity to run training.", ) # DISTRUBUTED parser.add_argument( "--rank", type=int, default=0, help="DISTRIBUTED: process rank for distributed training.", ) parser.add_argument( "--group_id", type=str, default="", help="DISTRIBUTED: process group id." ) args = parser.parse_args() if args.continue_path != "": args.output_path = args.continue_path args.config_path = os.path.join(args.continue_path, "config.json") list_of_files = glob.glob( args.continue_path + "/*.pth.tar" ) # * means all if need specific format then *.csv latest_model_file = max(list_of_files, key=os.path.getctime) args.restore_path = latest_model_file print(f" > Training continues for {args.restore_path}") # setup output paths and read configs c = load_config(args.config_path) # check_config(c) _ = os.path.dirname(os.path.realpath(__file__)) OUT_PATH = args.continue_path if args.continue_path == "": OUT_PATH = create_experiment_folder( c.output_path, c.run_name, args.debug ) AUDIO_PATH = os.path.join(OUT_PATH, "test_audios") c_logger = ConsoleLogger() if args.rank == 0: os.makedirs(AUDIO_PATH, exist_ok=True) new_fields = {} if args.restore_path: new_fields["restore_path"] = args.restore_path new_fields["github_branch"] = get_git_branch() copy_model_files( c, args.config_path, OUT_PATH, new_fields ) os.chmod(AUDIO_PATH, 0o775) os.chmod(OUT_PATH, 0o775) LOG_DIR = OUT_PATH tb_logger = TensorboardLogger(LOG_DIR, model_name="VOCODER") # write model desc to tensorboard tb_logger.tb_add_text("model-description", c["run_description"], 0) try: main(args) except KeyboardInterrupt: remove_experiment_folder(OUT_PATH) try: sys.exit(0) except SystemExit: os._exit(0) # pylint: disable=protected-access except Exception: # pylint: disable=broad-except remove_experiment_folder(OUT_PATH) traceback.print_exc() sys.exit(1)
19,468
35.053704
129
py
TTS
TTS-master/TTS/bin/compute_embeddings.py
import argparse import glob import os import numpy as np from tqdm import tqdm import torch from TTS.speaker_encoder.model import SpeakerEncoder from TTS.utils.audio import AudioProcessor from TTS.utils.io import load_config from TTS.tts.utils.speakers import save_speaker_mapping from TTS.tts.datasets.preprocess import load_meta_data parser = argparse.ArgumentParser( description='Compute embedding vectors for each wav file in a dataset. If "target_dataset" is defined, it generates "speakers.json" necessary for training a multi-speaker model.') parser.add_argument( 'model_path', type=str, help='Path to model outputs (checkpoint, tensorboard etc.).') parser.add_argument( 'config_path', type=str, help='Path to config file for training.', ) parser.add_argument( 'data_path', type=str, help='Data path for wav files - directory or CSV file') parser.add_argument( 'output_path', type=str, help='path for training outputs.') parser.add_argument( '--target_dataset', type=str, default='', help='Target dataset to pick a processor from TTS.tts.dataset.preprocess. Necessary to create a speakers.json file.' ) parser.add_argument( '--use_cuda', type=bool, help='flag to set cuda.', default=False ) parser.add_argument( '--separator', type=str, help='Separator used in file if CSV is passed for data_path', default='|' ) args = parser.parse_args() c = load_config(args.config_path) ap = AudioProcessor(**c['audio']) data_path = args.data_path split_ext = os.path.splitext(data_path) sep = args.separator if args.target_dataset != '': # if target dataset is defined dataset_config = [ { "name": args.target_dataset, "path": args.data_path, "meta_file_train": None, "meta_file_val": None }, ] wav_files, _ = load_meta_data(dataset_config, eval_split=False) output_files = [wav_file[1].replace(data_path, args.output_path).replace( '.wav', '.npy') for wav_file in wav_files] else: # if target dataset is not defined if len(split_ext) > 0 and split_ext[1].lower() == '.csv': # Parse CSV print(f'CSV file: {data_path}') with open(data_path) as f: wav_path = os.path.join(os.path.dirname(data_path), 'wavs') wav_files = [] print(f'Separator is: {sep}') for line in f: components = line.split(sep) if len(components) != 2: print("Invalid line") continue wav_file = os.path.join(wav_path, components[0] + '.wav') #print(f'wav_file: {wav_file}') if os.path.exists(wav_file): wav_files.append(wav_file) print(f'Count of wavs imported: {len(wav_files)}') else: # Parse all wav files in data_path wav_files = glob.glob(data_path + '/**/*.wav', recursive=True) output_files = [wav_file.replace(data_path, args.output_path).replace( '.wav', '.npy') for wav_file in wav_files] for output_file in output_files: os.makedirs(os.path.dirname(output_file), exist_ok=True) # define Encoder model model = SpeakerEncoder(**c.model) model.load_state_dict(torch.load(args.model_path)['model']) model.eval() if args.use_cuda: model.cuda() # compute speaker embeddings speaker_mapping = {} for idx, wav_file in enumerate(tqdm(wav_files)): if isinstance(wav_file, list): speaker_name = wav_file[2] wav_file = wav_file[1] mel_spec = ap.melspectrogram(ap.load_wav(wav_file, sr=ap.sample_rate)).T mel_spec = torch.FloatTensor(mel_spec[None, :, :]) if args.use_cuda: mel_spec = mel_spec.cuda() embedd = model.compute_embedding(mel_spec) embedd = embedd.detach().cpu().numpy() np.save(output_files[idx], embedd) if args.target_dataset != '': # create speaker_mapping if target dataset is defined wav_file_name = os.path.basename(wav_file) speaker_mapping[wav_file_name] = {} speaker_mapping[wav_file_name]['name'] = speaker_name speaker_mapping[wav_file_name]['embedding'] = embedd.flatten().tolist() if args.target_dataset != '': # save speaker_mapping if target dataset is defined mapping_file_path = os.path.join(args.output_path, 'speakers.json') save_speaker_mapping(args.output_path, speaker_mapping)
4,439
32.89313
183
py
TTS
TTS-master/TTS/tts/models/tacotron_abstract.py
import copy from abc import ABC, abstractmethod import torch from torch import nn from TTS.tts.utils.generic_utils import sequence_mask class TacotronAbstract(ABC, nn.Module): def __init__(self, num_chars, num_speakers, r, postnet_output_dim=80, decoder_output_dim=80, attn_type='original', attn_win=False, attn_norm="softmax", prenet_type="original", prenet_dropout=True, forward_attn=False, trans_agent=False, forward_attn_mask=False, location_attn=True, attn_K=5, separate_stopnet=True, bidirectional_decoder=False, double_decoder_consistency=False, ddc_r=None, encoder_in_features=512, decoder_in_features=512, speaker_embedding_dim=None, gst=False, gst_embedding_dim=512, gst_num_heads=4, gst_style_tokens=10, gst_use_speaker_embedding=False): """ Abstract Tacotron class """ super().__init__() self.num_chars = num_chars self.r = r self.decoder_output_dim = decoder_output_dim self.postnet_output_dim = postnet_output_dim self.gst = gst self.gst_embedding_dim = gst_embedding_dim self.gst_num_heads = gst_num_heads self.gst_style_tokens = gst_style_tokens self.gst_use_speaker_embedding = gst_use_speaker_embedding self.num_speakers = num_speakers self.bidirectional_decoder = bidirectional_decoder self.double_decoder_consistency = double_decoder_consistency self.ddc_r = ddc_r self.attn_type = attn_type self.attn_win = attn_win self.attn_norm = attn_norm self.prenet_type = prenet_type self.prenet_dropout = prenet_dropout self.forward_attn = forward_attn self.trans_agent = trans_agent self.forward_attn_mask = forward_attn_mask self.location_attn = location_attn self.attn_K = attn_K self.separate_stopnet = separate_stopnet self.encoder_in_features = encoder_in_features self.decoder_in_features = decoder_in_features self.speaker_embedding_dim = speaker_embedding_dim # layers self.embedding = None self.encoder = None self.decoder = None self.postnet = None # multispeaker if self.speaker_embedding_dim is None: # if speaker_embedding_dim is None we need use the nn.Embedding, with default speaker_embedding_dim self.embeddings_per_sample = False else: # if speaker_embedding_dim is not None we need use speaker embedding per sample self.embeddings_per_sample = True # global style token if self.gst: self.decoder_in_features += gst_embedding_dim # add gst embedding dim self.gst_layer = None # model states self.speaker_embeddings = None self.speaker_embeddings_projected = None # additional layers self.decoder_backward = None self.coarse_decoder = None ############################# # INIT FUNCTIONS ############################# def _init_states(self): self.speaker_embeddings = None self.speaker_embeddings_projected = None def _init_backward_decoder(self): self.decoder_backward = copy.deepcopy(self.decoder) def _init_coarse_decoder(self): self.coarse_decoder = copy.deepcopy(self.decoder) self.coarse_decoder.r_init = self.ddc_r self.coarse_decoder.set_r(self.ddc_r) ############################# # CORE FUNCTIONS ############################# @abstractmethod def forward(self): pass @abstractmethod def inference(self): pass def load_checkpoint(self, config, checkpoint_path, eval=False): # pylint: disable=unused-argument, redefined-builtin state = torch.load(checkpoint_path, map_location=torch.device('cpu')) self.load_state_dict(state['model']) self.decoder.set_r(state['r']) if eval: self.eval() assert not self.training ############################# # COMMON COMPUTE FUNCTIONS ############################# def compute_masks(self, text_lengths, mel_lengths): """Compute masks against sequence paddings.""" # B x T_in_max (boolean) device = text_lengths.device input_mask = sequence_mask(text_lengths).to(device) output_mask = None if mel_lengths is not None: max_len = mel_lengths.max() r = self.decoder.r max_len = max_len + (r - (max_len % r)) if max_len % r > 0 else max_len output_mask = sequence_mask(mel_lengths, max_len=max_len).to(device) return input_mask, output_mask def _backward_pass(self, mel_specs, encoder_outputs, mask): """ Run backwards decoder """ decoder_outputs_b, alignments_b, _ = self.decoder_backward( encoder_outputs, torch.flip(mel_specs, dims=(1,)), mask, self.speaker_embeddings_projected) decoder_outputs_b = decoder_outputs_b.transpose(1, 2).contiguous() return decoder_outputs_b, alignments_b def _coarse_decoder_pass(self, mel_specs, encoder_outputs, alignments, input_mask): """ Double Decoder Consistency """ T = mel_specs.shape[1] if T % self.coarse_decoder.r > 0: padding_size = self.coarse_decoder.r - (T % self.coarse_decoder.r) mel_specs = torch.nn.functional.pad(mel_specs, (0, 0, 0, padding_size, 0, 0)) decoder_outputs_backward, alignments_backward, _ = self.coarse_decoder( encoder_outputs.detach(), mel_specs, input_mask) # scale_factor = self.decoder.r_init / self.decoder.r alignments_backward = torch.nn.functional.interpolate( alignments_backward.transpose(1, 2), size=alignments.shape[1], mode='nearest').transpose(1, 2) decoder_outputs_backward = decoder_outputs_backward.transpose(1, 2) decoder_outputs_backward = decoder_outputs_backward[:, :T, :] return decoder_outputs_backward, alignments_backward ############################# # EMBEDDING FUNCTIONS ############################# def compute_speaker_embedding(self, speaker_ids): """ Compute speaker embedding vectors """ if hasattr(self, "speaker_embedding") and speaker_ids is None: raise RuntimeError( " [!] Model has speaker embedding layer but speaker_id is not provided" ) if hasattr(self, "speaker_embedding") and speaker_ids is not None: self.speaker_embeddings = self.speaker_embedding(speaker_ids).unsqueeze(1) if hasattr(self, "speaker_project_mel") and speaker_ids is not None: self.speaker_embeddings_projected = self.speaker_project_mel( self.speaker_embeddings).squeeze(1) def compute_gst(self, inputs, style_input, speaker_embedding=None): """ Compute global style token """ device = inputs.device if isinstance(style_input, dict): query = torch.zeros(1, 1, self.gst_embedding_dim//2).to(device) if speaker_embedding is not None: query = torch.cat([query, speaker_embedding.reshape(1, 1, -1)], dim=-1) _GST = torch.tanh(self.gst_layer.style_token_layer.style_tokens) gst_outputs = torch.zeros(1, 1, self.gst_embedding_dim).to(device) for k_token, v_amplifier in style_input.items(): key = _GST[int(k_token)].unsqueeze(0).expand(1, -1, -1) gst_outputs_att = self.gst_layer.style_token_layer.attention(query, key) gst_outputs = gst_outputs + gst_outputs_att * v_amplifier elif style_input is None: gst_outputs = torch.zeros(1, 1, self.gst_embedding_dim).to(device) else: gst_outputs = self.gst_layer(style_input, speaker_embedding) # pylint: disable=not-callable inputs = self._concat_speaker_embedding(inputs, gst_outputs) return inputs @staticmethod def _add_speaker_embedding(outputs, speaker_embeddings): speaker_embeddings_ = speaker_embeddings.expand( outputs.size(0), outputs.size(1), -1) outputs = outputs + speaker_embeddings_ return outputs @staticmethod def _concat_speaker_embedding(outputs, speaker_embeddings): speaker_embeddings_ = speaker_embeddings.expand( outputs.size(0), outputs.size(1), -1) outputs = torch.cat([outputs, speaker_embeddings_], dim=-1) return outputs
9,066
39.119469
121
py
TTS
TTS-master/TTS/tts/models/speedy_speech.py
import torch from torch import nn from TTS.tts.layers.speedy_speech.decoder import Decoder from TTS.tts.layers.speedy_speech.duration_predictor import DurationPredictor from TTS.tts.layers.speedy_speech.encoder import Encoder, PositionalEncoding from TTS.tts.utils.generic_utils import sequence_mask from TTS.tts.layers.glow_tts.monotonic_align import generate_path class SpeedySpeech(nn.Module): """Speedy Speech model https://arxiv.org/abs/2008.03802 Encoder -> DurationPredictor -> Decoder This model is able to achieve a reasonable performance with only ~3M model parameters and convolutional layers. This model requires precomputed phoneme durations to train a duration predictor. At inference it only uses the duration predictor to compute durations and expand encoder outputs respectively. Args: num_chars (int): number of unique input to characters out_channels (int): number of output tensor channels. It is equal to the expected spectrogram size. hidden_channels (int): number of channels in all the model layers. positional_encoding (bool, optional): enable/disable Positional encoding on encoder outputs. Defaults to True. length_scale (int, optional): coefficient to set the speech speed. <1 slower, >1 faster. Defaults to 1. encoder_type (str, optional): set the encoder type. Defaults to 'residual_conv_bn'. encoder_params (dict, optional): set encoder parameters depending on 'encoder_type'. Defaults to { "kernel_size": 4, "dilations": 4 * [1, 2, 4] + [1], "num_conv_blocks": 2, "num_res_blocks": 13 }. decoder_type (str, optional): decoder type. Defaults to 'residual_conv_bn'. decoder_params (dict, optional): set decoder parameters depending on 'decoder_type'. Defaults to { "kernel_size": 4, "dilations": 4 * [1, 2, 4, 8] + [1], "num_conv_blocks": 2, "num_res_blocks": 17 }. num_speakers (int, optional): number of speakers for multi-speaker training. Defaults to 0. external_c (bool, optional): enable external speaker embeddings. Defaults to False. c_in_channels (int, optional): number of channels in speaker embedding vectors. Defaults to 0. """ # pylint: disable=dangerous-default-value def __init__( self, num_chars, out_channels, hidden_channels, positional_encoding=True, length_scale=1, encoder_type='residual_conv_bn', encoder_params={ "kernel_size": 4, "dilations": 4 * [1, 2, 4] + [1], "num_conv_blocks": 2, "num_res_blocks": 13 }, decoder_type='residual_conv_bn', decoder_params={ "kernel_size": 4, "dilations": 4 * [1, 2, 4, 8] + [1], "num_conv_blocks": 2, "num_res_blocks": 17 }, num_speakers=0, external_c=False, c_in_channels=0): super().__init__() self.length_scale = float(length_scale) if isinstance(length_scale, int) else length_scale self.emb = nn.Embedding(num_chars, hidden_channels) self.encoder = Encoder(hidden_channels, hidden_channels, encoder_type, encoder_params, c_in_channels) if positional_encoding: self.pos_encoder = PositionalEncoding(hidden_channels) self.decoder = Decoder(out_channels, hidden_channels, decoder_type, decoder_params) self.duration_predictor = DurationPredictor(hidden_channels + c_in_channels) if num_speakers > 1 and not external_c: # speaker embedding layer self.emb_g = nn.Embedding(num_speakers, c_in_channels) nn.init.uniform_(self.emb_g.weight, -0.1, 0.1) if c_in_channels > 0 and c_in_channels != hidden_channels: self.proj_g = nn.Conv1d(c_in_channels, hidden_channels, 1) @staticmethod def expand_encoder_outputs(en, dr, x_mask, y_mask): """Generate attention alignment map from durations and expand encoder outputs Example: encoder output: [a,b,c,d] durations: [1, 3, 2, 1] expanded: [a, b, b, b, c, c, d] attention map: [[0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0]] """ attn_mask = torch.unsqueeze(x_mask, -1) * torch.unsqueeze(y_mask, 2) attn = generate_path(dr, attn_mask.squeeze(1)).to(en.dtype) o_en_ex = torch.matmul( attn.squeeze(1).transpose(1, 2), en.transpose(1, 2)).transpose(1, 2) return o_en_ex, attn def format_durations(self, o_dr_log, x_mask): o_dr = (torch.exp(o_dr_log) - 1) * x_mask * self.length_scale o_dr[o_dr < 1] = 1.0 o_dr = torch.round(o_dr) return o_dr @staticmethod def _concat_speaker_embedding(o_en, g): g_exp = g.expand(-1, -1, o_en.size(-1)) # [B, C, T_en] o_en = torch.cat([o_en, g_exp], 1) return o_en def _sum_speaker_embedding(self, x, g): # project g to decoder dim. if hasattr(self, 'proj_g'): g = self.proj_g(g) return x + g def _forward_encoder(self, x, x_lengths, g=None): if hasattr(self, 'emb_g'): g = nn.functional.normalize(self.emb_g(g)) # [B, C, 1] if g is not None: g = g.unsqueeze(-1) # [B, T, C] x_emb = self.emb(x) # [B, C, T] x_emb = torch.transpose(x_emb, 1, -1) # compute sequence masks x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.shape[1]), 1).to(x.dtype) # encoder pass o_en = self.encoder(x_emb, x_mask) # speaker conditioning for duration predictor if g is not None: o_en_dp = self._concat_speaker_embedding(o_en, g) else: o_en_dp = o_en return o_en, o_en_dp, x_mask, g def _forward_decoder(self, o_en, o_en_dp, dr, x_mask, y_lengths, g): y_mask = torch.unsqueeze(sequence_mask(y_lengths, None), 1).to(o_en_dp.dtype) # expand o_en with durations o_en_ex, attn = self.expand_encoder_outputs(o_en, dr, x_mask, y_mask) # positional encoding if hasattr(self, 'pos_encoder'): o_en_ex = self.pos_encoder(o_en_ex, y_mask) # speaker embedding if g is not None: o_en_ex = self._sum_speaker_embedding(o_en_ex, g) # decoder pass o_de = self.decoder(o_en_ex, y_mask, g=g) return o_de, attn.transpose(1, 2) def forward(self, x, x_lengths, y_lengths, dr, g=None): # pylint: disable=unused-argument """ Shapes: x: [B, T_max] x_lengths: [B] y_lengths: [B] dr: [B, T_max] g: [B, C] """ o_en, o_en_dp, x_mask, g = self._forward_encoder(x, x_lengths, g) o_dr_log = self.duration_predictor(o_en_dp.detach(), x_mask) o_de, attn= self._forward_decoder(o_en, o_en_dp, dr, x_mask, y_lengths, g=g) return o_de, o_dr_log.squeeze(1), attn def inference(self, x, x_lengths, g=None): # pylint: disable=unused-argument """ Shapes: x: [B, T_max] x_lengths: [B] g: [B, C] """ # pad input to prevent dropping the last word x = torch.nn.functional.pad(x, pad=(0, 5), mode='constant', value=0) o_en, o_en_dp, x_mask, g = self._forward_encoder(x, x_lengths, g) # duration predictor pass o_dr_log = self.duration_predictor(o_en_dp.detach(), x_mask) o_dr = self.format_durations(o_dr_log, x_mask).squeeze(1) y_lengths = o_dr.sum(1) o_de, attn = self._forward_decoder(o_en, o_en_dp, o_dr, x_mask, y_lengths, g=g) return o_de, attn def load_checkpoint(self, config, checkpoint_path, eval=False): # pylint: disable=unused-argument, redefined-builtin state = torch.load(checkpoint_path, map_location=torch.device('cpu')) self.load_state_dict(state['model']) if eval: self.eval() assert not self.training
8,367
40.84
207
py
TTS
TTS-master/TTS/tts/models/tacotron.py
# coding: utf-8 import torch from torch import nn from TTS.tts.layers.gst_layers import GST from TTS.tts.layers.tacotron import Decoder, Encoder, PostCBHG from TTS.tts.models.tacotron_abstract import TacotronAbstract class Tacotron(TacotronAbstract): """Tacotron as in https://arxiv.org/abs/1703.10135 It's an autoregressive encoder-attention-decoder-postnet architecture. Args: num_chars (int): number of input characters to define the size of embedding layer. num_speakers (int): number of speakers in the dataset. >1 enables multi-speaker training and model learns speaker embeddings. r (int): initial model reduction rate. postnet_output_dim (int, optional): postnet output channels. Defaults to 80. decoder_output_dim (int, optional): decoder output channels. Defaults to 80. attn_type (str, optional): attention type. Check ```TTS.tts.layers.attentions.init_attn```. Defaults to 'original'. attn_win (bool, optional): enable/disable attention windowing. It especially useful at inference to keep attention alignment diagonal. Defaults to False. attn_norm (str, optional): Attention normalization method. "sigmoid" or "softmax". Defaults to "softmax". prenet_type (str, optional): prenet type for the decoder. Defaults to "original". prenet_dropout (bool, optional): prenet dropout rate. Defaults to True. forward_attn (bool, optional): enable/disable forward attention. It is only valid if ```attn_type``` is ```original```. Defaults to False. trans_agent (bool, optional): enable/disable transition agent in forward attention. Defaults to False. forward_attn_mask (bool, optional): enable/disable extra masking over forward attention. Defaults to False. location_attn (bool, optional): enable/disable location sensitive attention. It is only valid if ```attn_type``` is ```original```. Defaults to True. attn_K (int, optional): Number of attention heads for GMM attention. Defaults to 5. separate_stopnet (bool, optional): enable/disable separate stopnet training without only gradient flow from stopnet to the rest of the model. Defaults to True. bidirectional_decoder (bool, optional): enable/disable bidirectional decoding. Defaults to False. double_decoder_consistency (bool, optional): enable/disable double decoder consistency. Defaults to False. ddc_r (int, optional): reduction rate for the coarse decoder of double decoder consistency. Defaults to None. encoder_in_features (int, optional): input channels for the encoder. Defaults to 512. decoder_in_features (int, optional): input channels for the decoder. Defaults to 512. speaker_embedding_dim (int, optional): external speaker conditioning vector channels. Defaults to None. gst (bool, optional): enable/disable global style token learning. Defaults to False. gst_embedding_dim (int, optional): size of channels for GST vectors. Defaults to 512. gst_num_heads (int, optional): number of attention heads for GST. Defaults to 4. gst_style_tokens (int, optional): number of GST tokens. Defaults to 10. gst_use_speaker_embedding (bool, optional): enable/disable inputing speaker embedding to GST. Defaults to False. memory_size (int, optional): size of the history queue fed to the prenet. Model feeds the last ```memory_size``` output frames to the prenet. """ def __init__(self, num_chars, num_speakers, r=5, postnet_output_dim=1025, decoder_output_dim=80, attn_type='original', attn_win=False, attn_norm="sigmoid", prenet_type="original", prenet_dropout=True, forward_attn=False, trans_agent=False, forward_attn_mask=False, location_attn=True, attn_K=5, separate_stopnet=True, bidirectional_decoder=False, double_decoder_consistency=False, ddc_r=None, encoder_in_features=256, decoder_in_features=256, speaker_embedding_dim=None, gst=False, gst_embedding_dim=256, gst_num_heads=4, gst_style_tokens=10, memory_size=5, gst_use_speaker_embedding=False): super(Tacotron, self).__init__(num_chars, num_speakers, r, postnet_output_dim, decoder_output_dim, attn_type, attn_win, attn_norm, prenet_type, prenet_dropout, forward_attn, trans_agent, forward_attn_mask, location_attn, attn_K, separate_stopnet, bidirectional_decoder, double_decoder_consistency, ddc_r, encoder_in_features, decoder_in_features, speaker_embedding_dim, gst, gst_embedding_dim, gst_num_heads, gst_style_tokens, gst_use_speaker_embedding) # speaker embedding layers if self.num_speakers > 1: if not self.embeddings_per_sample: speaker_embedding_dim = 256 self.speaker_embedding = nn.Embedding(self.num_speakers, speaker_embedding_dim) self.speaker_embedding.weight.data.normal_(0, 0.3) # speaker and gst embeddings is concat in decoder input if self.num_speakers > 1: self.decoder_in_features += speaker_embedding_dim # add speaker embedding dim # embedding layer self.embedding = nn.Embedding(num_chars, 256, padding_idx=0) self.embedding.weight.data.normal_(0, 0.3) # base model layers self.encoder = Encoder(self.encoder_in_features) self.decoder = Decoder(self.decoder_in_features, decoder_output_dim, r, memory_size, attn_type, attn_win, attn_norm, prenet_type, prenet_dropout, forward_attn, trans_agent, forward_attn_mask, location_attn, attn_K, separate_stopnet) self.postnet = PostCBHG(decoder_output_dim) self.last_linear = nn.Linear(self.postnet.cbhg.gru_features * 2, postnet_output_dim) # global style token layers if self.gst: self.gst_layer = GST(num_mel=80, num_heads=gst_num_heads, num_style_tokens=gst_style_tokens, gst_embedding_dim=self.gst_embedding_dim, speaker_embedding_dim=speaker_embedding_dim if self.embeddings_per_sample and self.gst_use_speaker_embedding else None) # backward pass decoder if self.bidirectional_decoder: self._init_backward_decoder() # setup DDC if self.double_decoder_consistency: self.coarse_decoder = Decoder( self.decoder_in_features, decoder_output_dim, ddc_r, memory_size, attn_type, attn_win, attn_norm, prenet_type, prenet_dropout, forward_attn, trans_agent, forward_attn_mask, location_attn, attn_K, separate_stopnet) def forward(self, characters, text_lengths, mel_specs, mel_lengths=None, speaker_ids=None, speaker_embeddings=None): """ Shapes: characters: [B, T_in] text_lengths: [B] mel_specs: [B, T_out, C] mel_lengths: [B] speaker_ids: [B, 1] speaker_embeddings: [B, C] """ input_mask, output_mask = self.compute_masks(text_lengths, mel_lengths) # B x T_in x embed_dim inputs = self.embedding(characters) # B x T_in x encoder_in_features encoder_outputs = self.encoder(inputs) # sequence masking encoder_outputs = encoder_outputs * input_mask.unsqueeze(2).expand_as(encoder_outputs) # global style token if self.gst: # B x gst_dim encoder_outputs = self.compute_gst(encoder_outputs, mel_specs, speaker_embeddings if self.gst_use_speaker_embedding else None) # speaker embedding if self.num_speakers > 1: if not self.embeddings_per_sample: # B x 1 x speaker_embed_dim speaker_embeddings = self.speaker_embedding(speaker_ids)[:, None] else: # B x 1 x speaker_embed_dim speaker_embeddings = torch.unsqueeze(speaker_embeddings, 1) encoder_outputs = self._concat_speaker_embedding(encoder_outputs, speaker_embeddings) # decoder_outputs: B x decoder_in_features x T_out # alignments: B x T_in x encoder_in_features # stop_tokens: B x T_in decoder_outputs, alignments, stop_tokens = self.decoder( encoder_outputs, mel_specs, input_mask) # sequence masking if output_mask is not None: decoder_outputs = decoder_outputs * output_mask.unsqueeze(1).expand_as(decoder_outputs) # B x T_out x decoder_in_features postnet_outputs = self.postnet(decoder_outputs) # sequence masking if output_mask is not None: postnet_outputs = postnet_outputs * output_mask.unsqueeze(2).expand_as(postnet_outputs) # B x T_out x posnet_dim postnet_outputs = self.last_linear(postnet_outputs) # B x T_out x decoder_in_features decoder_outputs = decoder_outputs.transpose(1, 2).contiguous() if self.bidirectional_decoder: decoder_outputs_backward, alignments_backward = self._backward_pass(mel_specs, encoder_outputs, input_mask) return decoder_outputs, postnet_outputs, alignments, stop_tokens, decoder_outputs_backward, alignments_backward if self.double_decoder_consistency: decoder_outputs_backward, alignments_backward = self._coarse_decoder_pass(mel_specs, encoder_outputs, alignments, input_mask) return decoder_outputs, postnet_outputs, alignments, stop_tokens, decoder_outputs_backward, alignments_backward return decoder_outputs, postnet_outputs, alignments, stop_tokens @torch.no_grad() def inference(self, characters, speaker_ids=None, style_mel=None, speaker_embeddings=None): inputs = self.embedding(characters) encoder_outputs = self.encoder(inputs) if self.gst: # B x gst_dim encoder_outputs = self.compute_gst(encoder_outputs, style_mel, speaker_embeddings if self.gst_use_speaker_embedding else None) if self.num_speakers > 1: if not self.embeddings_per_sample: # B x 1 x speaker_embed_dim speaker_embeddings = self.speaker_embedding(speaker_ids)[:, None] else: # B x 1 x speaker_embed_dim speaker_embeddings = torch.unsqueeze(speaker_embeddings, 1) encoder_outputs = self._concat_speaker_embedding(encoder_outputs, speaker_embeddings) decoder_outputs, alignments, stop_tokens = self.decoder.inference( encoder_outputs) postnet_outputs = self.postnet(decoder_outputs) postnet_outputs = self.last_linear(postnet_outputs) decoder_outputs = decoder_outputs.transpose(1, 2) return decoder_outputs, postnet_outputs, alignments, stop_tokens
11,909
54.654206
152
py
TTS
TTS-master/TTS/tts/models/tacotron2.py
import torch from torch import nn from TTS.tts.layers.gst_layers import GST from TTS.tts.layers.tacotron2 import Decoder, Encoder, Postnet from TTS.tts.models.tacotron_abstract import TacotronAbstract # TODO: match function arguments with tacotron class Tacotron2(TacotronAbstract): """Tacotron2 as in https://arxiv.org/abs/1712.05884 It's an autoregressive encoder-attention-decoder-postnet architecture. Args: num_chars (int): number of input characters to define the size of embedding layer. num_speakers (int): number of speakers in the dataset. >1 enables multi-speaker training and model learns speaker embeddings. r (int): initial model reduction rate. postnet_output_dim (int, optional): postnet output channels. Defaults to 80. decoder_output_dim (int, optional): decoder output channels. Defaults to 80. attn_type (str, optional): attention type. Check ```TTS.tts.layers.common_layers.init_attn```. Defaults to 'original'. attn_win (bool, optional): enable/disable attention windowing. It especially useful at inference to keep attention alignment diagonal. Defaults to False. attn_norm (str, optional): Attention normalization method. "sigmoid" or "softmax". Defaults to "softmax". prenet_type (str, optional): prenet type for the decoder. Defaults to "original". prenet_dropout (bool, optional): prenet dropout rate. Defaults to True. forward_attn (bool, optional): enable/disable forward attention. It is only valid if ```attn_type``` is ```original```. Defaults to False. trans_agent (bool, optional): enable/disable transition agent in forward attention. Defaults to False. forward_attn_mask (bool, optional): enable/disable extra masking over forward attention. Defaults to False. location_attn (bool, optional): enable/disable location sensitive attention. It is only valid if ```attn_type``` is ```original```. Defaults to True. attn_K (int, optional): Number of attention heads for GMM attention. Defaults to 5. separate_stopnet (bool, optional): enable/disable separate stopnet training without only gradient flow from stopnet to the rest of the model. Defaults to True. bidirectional_decoder (bool, optional): enable/disable bidirectional decoding. Defaults to False. double_decoder_consistency (bool, optional): enable/disable double decoder consistency. Defaults to False. ddc_r (int, optional): reduction rate for the coarse decoder of double decoder consistency. Defaults to None. encoder_in_features (int, optional): input channels for the encoder. Defaults to 512. decoder_in_features (int, optional): input channels for the decoder. Defaults to 512. speaker_embedding_dim (int, optional): external speaker conditioning vector channels. Defaults to None. gst (bool, optional): enable/disable global style token learning. Defaults to False. gst_embedding_dim (int, optional): size of channels for GST vectors. Defaults to 512. gst_num_heads (int, optional): number of attention heads for GST. Defaults to 4. gst_style_tokens (int, optional): number of GST tokens. Defaults to 10. gst_use_speaker_embedding (bool, optional): enable/disable inputing speaker embedding to GST. Defaults to False. """ def __init__(self, num_chars, num_speakers, r, postnet_output_dim=80, decoder_output_dim=80, attn_type='original', attn_win=False, attn_norm="softmax", prenet_type="original", prenet_dropout=True, forward_attn=False, trans_agent=False, forward_attn_mask=False, location_attn=True, attn_K=5, separate_stopnet=True, bidirectional_decoder=False, double_decoder_consistency=False, ddc_r=None, encoder_in_features=512, decoder_in_features=512, speaker_embedding_dim=None, gst=False, gst_embedding_dim=512, gst_num_heads=4, gst_style_tokens=10, gst_use_speaker_embedding=False): super(Tacotron2, self).__init__(num_chars, num_speakers, r, postnet_output_dim, decoder_output_dim, attn_type, attn_win, attn_norm, prenet_type, prenet_dropout, forward_attn, trans_agent, forward_attn_mask, location_attn, attn_K, separate_stopnet, bidirectional_decoder, double_decoder_consistency, ddc_r, encoder_in_features, decoder_in_features, speaker_embedding_dim, gst, gst_embedding_dim, gst_num_heads, gst_style_tokens, gst_use_speaker_embedding) # speaker embedding layer if self.num_speakers > 1: if not self.embeddings_per_sample: speaker_embedding_dim = 512 self.speaker_embedding = nn.Embedding(self.num_speakers, speaker_embedding_dim) self.speaker_embedding.weight.data.normal_(0, 0.3) # speaker and gst embeddings is concat in decoder input if self.num_speakers > 1: self.decoder_in_features += speaker_embedding_dim # add speaker embedding dim # embedding layer self.embedding = nn.Embedding(num_chars, 512, padding_idx=0) # base model layers self.encoder = Encoder(self.encoder_in_features) self.decoder = Decoder(self.decoder_in_features, self.decoder_output_dim, r, attn_type, attn_win, attn_norm, prenet_type, prenet_dropout, forward_attn, trans_agent, forward_attn_mask, location_attn, attn_K, separate_stopnet) self.postnet = Postnet(self.postnet_output_dim) # global style token layers if self.gst: self.gst_layer = GST(num_mel=80, num_heads=self.gst_num_heads, num_style_tokens=self.gst_style_tokens, gst_embedding_dim=self.gst_embedding_dim, speaker_embedding_dim=speaker_embedding_dim if self.embeddings_per_sample and self.gst_use_speaker_embedding else None) # backward pass decoder if self.bidirectional_decoder: self._init_backward_decoder() # setup DDC if self.double_decoder_consistency: self.coarse_decoder = Decoder( self.decoder_in_features, self.decoder_output_dim, ddc_r, attn_type, attn_win, attn_norm, prenet_type, prenet_dropout, forward_attn, trans_agent, forward_attn_mask, location_attn, attn_K, separate_stopnet) @staticmethod def shape_outputs(mel_outputs, mel_outputs_postnet, alignments): mel_outputs = mel_outputs.transpose(1, 2) mel_outputs_postnet = mel_outputs_postnet.transpose(1, 2) return mel_outputs, mel_outputs_postnet, alignments def forward(self, text, text_lengths, mel_specs=None, mel_lengths=None, speaker_ids=None, speaker_embeddings=None): """ Shapes: text: [B, T_in] text_lengths: [B] mel_specs: [B, T_out, C] mel_lengths: [B] speaker_ids: [B, 1] speaker_embeddings: [B, C] """ # compute mask for padding # B x T_in_max (boolean) input_mask, output_mask = self.compute_masks(text_lengths, mel_lengths) # B x D_embed x T_in_max embedded_inputs = self.embedding(text).transpose(1, 2) # B x T_in_max x D_en encoder_outputs = self.encoder(embedded_inputs, text_lengths) if self.gst: # B x gst_dim encoder_outputs = self.compute_gst(encoder_outputs, mel_specs, speaker_embeddings if self.gst_use_speaker_embedding else None) if self.num_speakers > 1: if not self.embeddings_per_sample: # B x 1 x speaker_embed_dim speaker_embeddings = self.speaker_embedding(speaker_ids)[:, None] else: # B x 1 x speaker_embed_dim speaker_embeddings = torch.unsqueeze(speaker_embeddings, 1) encoder_outputs = self._concat_speaker_embedding(encoder_outputs, speaker_embeddings) encoder_outputs = encoder_outputs * input_mask.unsqueeze(2).expand_as(encoder_outputs) # B x mel_dim x T_out -- B x T_out//r x T_in -- B x T_out//r decoder_outputs, alignments, stop_tokens = self.decoder( encoder_outputs, mel_specs, input_mask) # sequence masking if mel_lengths is not None: decoder_outputs = decoder_outputs * output_mask.unsqueeze(1).expand_as(decoder_outputs) # B x mel_dim x T_out postnet_outputs = self.postnet(decoder_outputs) postnet_outputs = decoder_outputs + postnet_outputs # sequence masking if output_mask is not None: postnet_outputs = postnet_outputs * output_mask.unsqueeze(1).expand_as(postnet_outputs) # B x T_out x mel_dim -- B x T_out x mel_dim -- B x T_out//r x T_in decoder_outputs, postnet_outputs, alignments = self.shape_outputs( decoder_outputs, postnet_outputs, alignments) if self.bidirectional_decoder: decoder_outputs_backward, alignments_backward = self._backward_pass(mel_specs, encoder_outputs, input_mask) return decoder_outputs, postnet_outputs, alignments, stop_tokens, decoder_outputs_backward, alignments_backward if self.double_decoder_consistency: decoder_outputs_backward, alignments_backward = self._coarse_decoder_pass(mel_specs, encoder_outputs, alignments, input_mask) return decoder_outputs, postnet_outputs, alignments, stop_tokens, decoder_outputs_backward, alignments_backward return decoder_outputs, postnet_outputs, alignments, stop_tokens @torch.no_grad() def inference(self, text, speaker_ids=None, style_mel=None, speaker_embeddings=None): embedded_inputs = self.embedding(text).transpose(1, 2) encoder_outputs = self.encoder.inference(embedded_inputs) if self.gst: # B x gst_dim encoder_outputs = self.compute_gst(encoder_outputs, style_mel, speaker_embeddings if self.gst_use_speaker_embedding else None) if self.num_speakers > 1: if not self.embeddings_per_sample: speaker_embeddings = self.speaker_embedding(speaker_ids)[:, None] encoder_outputs = self._concat_speaker_embedding(encoder_outputs, speaker_embeddings) decoder_outputs, alignments, stop_tokens = self.decoder.inference( encoder_outputs) postnet_outputs = self.postnet(decoder_outputs) postnet_outputs = decoder_outputs + postnet_outputs decoder_outputs, postnet_outputs, alignments = self.shape_outputs( decoder_outputs, postnet_outputs, alignments) return decoder_outputs, postnet_outputs, alignments, stop_tokens def inference_truncated(self, text, speaker_ids=None, style_mel=None, speaker_embeddings=None): """ Preserve model states for continuous inference """ embedded_inputs = self.embedding(text).transpose(1, 2) encoder_outputs = self.encoder.inference_truncated(embedded_inputs) if self.gst: # B x gst_dim encoder_outputs = self.compute_gst(encoder_outputs, style_mel, speaker_embeddings if self.gst_use_speaker_embedding else None) if self.num_speakers > 1: if not self.embeddings_per_sample: speaker_embeddings = self.speaker_embedding(speaker_ids)[:, None] encoder_outputs = self._concat_speaker_embedding(encoder_outputs, speaker_embeddings) mel_outputs, alignments, stop_tokens = self.decoder.inference_truncated( encoder_outputs) mel_outputs_postnet = self.postnet(mel_outputs) mel_outputs_postnet = mel_outputs + mel_outputs_postnet mel_outputs, mel_outputs_postnet, alignments = self.shape_outputs( mel_outputs, mel_outputs_postnet, alignments) return mel_outputs, mel_outputs_postnet, alignments, stop_tokens
13,010
54.131356
152
py
TTS
TTS-master/TTS/tts/models/glow_tts.py
import math import torch from torch import nn from torch.nn import functional as F from TTS.tts.layers.glow_tts.encoder import Encoder from TTS.tts.layers.glow_tts.decoder import Decoder from TTS.tts.utils.generic_utils import sequence_mask from TTS.tts.layers.glow_tts.monotonic_align import maximum_path, generate_path class GlowTts(nn.Module): """Glow TTS models from https://arxiv.org/abs/2005.11129 Args: num_chars (int): number of embedding characters. hidden_channels_enc (int): number of embedding and encoder channels. hidden_channels_dec (int): number of decoder channels. use_encoder_prenet (bool): enable/disable prenet for encoder. Prenet modules are hard-coded for each alternative encoder. hidden_channels_dp (int): number of duration predictor channels. out_channels (int): number of output channels. It should be equal to the number of spectrogram filter. num_flow_blocks_dec (int): number of decoder blocks. kernel_size_dec (int): decoder kernel size. dilation_rate (int): rate to increase dilation by each layer in a decoder block. num_block_layers (int): number of decoder layers in each decoder block. dropout_p_dec (float): dropout rate for decoder. num_speaker (int): number of speaker to define the size of speaker embedding layer. c_in_channels (int): number of speaker embedding channels. It is set to 512 if embeddings are learned. num_splits (int): number of split levels in inversible conv1x1 operation. num_squeeze (int): number of squeeze levels. When squeezing channels increases and time steps reduces by the factor 'num_squeeze'. sigmoid_scale (bool): enable/disable sigmoid scaling in decoder. mean_only (bool): if True, encoder only computes mean value and uses constant variance for each time step. encoder_type (str): encoder module type. encoder_params (dict): encoder module parameters. external_speaker_embedding_dim (int): channels of external speaker embedding vectors. """ def __init__(self, num_chars, hidden_channels_enc, hidden_channels_dec, use_encoder_prenet, hidden_channels_dp, out_channels, num_flow_blocks_dec=12, kernel_size_dec=5, dilation_rate=5, num_block_layers=4, dropout_p_dp=0.1, dropout_p_dec=0.05, num_speakers=0, c_in_channels=0, num_splits=4, num_squeeze=1, sigmoid_scale=False, mean_only=False, encoder_type="transformer", encoder_params=None, external_speaker_embedding_dim=None): super().__init__() self.num_chars = num_chars self.hidden_channels_dp = hidden_channels_dp self.hidden_channels_enc = hidden_channels_enc self.hidden_channels_dec = hidden_channels_dec self.out_channels = out_channels self.num_flow_blocks_dec = num_flow_blocks_dec self.kernel_size_dec = kernel_size_dec self.dilation_rate = dilation_rate self.num_block_layers = num_block_layers self.dropout_p_dec = dropout_p_dec self.num_speakers = num_speakers self.c_in_channels = c_in_channels self.num_splits = num_splits self.num_squeeze = num_squeeze self.sigmoid_scale = sigmoid_scale self.mean_only = mean_only self.use_encoder_prenet = use_encoder_prenet # model constants. self.noise_scale = 0.33 # defines the noise variance applied to the random z vector at inference. self.length_scale = 1. # scaler for the duration predictor. The larger it is, the slower the speech. self.external_speaker_embedding_dim = external_speaker_embedding_dim # if is a multispeaker and c_in_channels is 0, set to 256 if num_speakers > 1: if self.c_in_channels == 0 and not self.external_speaker_embedding_dim: self.c_in_channels = 512 elif self.external_speaker_embedding_dim: self.c_in_channels = self.external_speaker_embedding_dim self.encoder = Encoder(num_chars, out_channels=out_channels, hidden_channels=hidden_channels_enc, hidden_channels_dp=hidden_channels_dp, encoder_type=encoder_type, encoder_params=encoder_params, mean_only=mean_only, use_prenet=use_encoder_prenet, dropout_p_dp=dropout_p_dp, c_in_channels=self.c_in_channels) self.decoder = Decoder(out_channels, hidden_channels_dec, kernel_size_dec, dilation_rate, num_flow_blocks_dec, num_block_layers, dropout_p=dropout_p_dec, num_splits=num_splits, num_squeeze=num_squeeze, sigmoid_scale=sigmoid_scale, c_in_channels=self.c_in_channels) if num_speakers > 1 and not external_speaker_embedding_dim: # speaker embedding layer self.emb_g = nn.Embedding(num_speakers, self.c_in_channels) nn.init.uniform_(self.emb_g.weight, -0.1, 0.1) @staticmethod def compute_outputs(attn, o_mean, o_log_scale, x_mask): # compute final values with the computed alignment y_mean = torch.matmul( attn.squeeze(1).transpose(1, 2), o_mean.transpose(1, 2)).transpose( 1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] y_log_scale = torch.matmul( attn.squeeze(1).transpose(1, 2), o_log_scale.transpose( 1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] # compute total duration with adjustment o_attn_dur = torch.log(1 + torch.sum(attn, -1)) * x_mask return y_mean, y_log_scale, o_attn_dur def forward(self, x, x_lengths, y=None, y_lengths=None, attn=None, g=None): """ Shapes: x: [B, T] x_lenghts: B y: [B, C, T] y_lengths: B g: [B, C] or B """ y_max_length = y.size(2) # norm speaker embeddings if g is not None: if self.external_speaker_embedding_dim: g = F.normalize(g).unsqueeze(-1) else: g = F.normalize(self.emb_g(g)).unsqueeze(-1)# [b, h, 1] # embedding pass o_mean, o_log_scale, o_dur_log, x_mask = self.encoder(x, x_lengths, g=g) # drop redisual frames wrt num_squeeze and set y_lengths. y, y_lengths, y_max_length, attn = self.preprocess( y, y_lengths, y_max_length, None) # create masks y_mask = torch.unsqueeze(sequence_mask(y_lengths, y_max_length), 1).to(x_mask.dtype) attn_mask = torch.unsqueeze(x_mask, -1) * torch.unsqueeze(y_mask, 2) # decoder pass z, logdet = self.decoder(y, y_mask, g=g, reverse=False) # find the alignment path with torch.no_grad(): o_scale = torch.exp(-2 * o_log_scale) logp1 = torch.sum(-0.5 * math.log(2 * math.pi) - o_log_scale, [1]).unsqueeze(-1) # [b, t, 1] logp2 = torch.matmul(o_scale.transpose(1, 2), -0.5 * (z**2)) # [b, t, d] x [b, d, t'] = [b, t, t'] logp3 = torch.matmul((o_mean * o_scale).transpose(1, 2), z) # [b, t, d] x [b, d, t'] = [b, t, t'] logp4 = torch.sum(-0.5 * (o_mean**2) * o_scale, [1]).unsqueeze(-1) # [b, t, 1] logp = logp1 + logp2 + logp3 + logp4 # [b, t, t'] attn = maximum_path(logp, attn_mask.squeeze(1)).unsqueeze(1).detach() y_mean, y_log_scale, o_attn_dur = self.compute_outputs( attn, o_mean, o_log_scale, x_mask) attn = attn.squeeze(1).permute(0, 2, 1) return z, logdet, y_mean, y_log_scale, attn, o_dur_log, o_attn_dur @torch.no_grad() def inference(self, x, x_lengths, g=None): if g is not None: if self.external_speaker_embedding_dim: g = F.normalize(g).unsqueeze(-1) else: g = F.normalize(self.emb_g(g)).unsqueeze(-1) # [b, h] # embedding pass o_mean, o_log_scale, o_dur_log, x_mask = self.encoder(x, x_lengths, g=g) # compute output durations w = (torch.exp(o_dur_log) - 1) * x_mask * self.length_scale w_ceil = torch.ceil(w) y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() y_max_length = None # compute masks y_mask = torch.unsqueeze(sequence_mask(y_lengths, y_max_length), 1).to(x_mask.dtype) attn_mask = torch.unsqueeze(x_mask, -1) * torch.unsqueeze(y_mask, 2) # compute attention mask attn = generate_path(w_ceil.squeeze(1), attn_mask.squeeze(1)).unsqueeze(1) y_mean, y_log_scale, o_attn_dur = self.compute_outputs( attn, o_mean, o_log_scale, x_mask) z = (y_mean + torch.exp(y_log_scale) * torch.randn_like(y_mean) * self.noise_scale) * y_mask # decoder pass y, logdet = self.decoder(z, y_mask, g=g, reverse=True) attn = attn.squeeze(1).permute(0, 2, 1) return y, logdet, y_mean, y_log_scale, attn, o_dur_log, o_attn_dur def preprocess(self, y, y_lengths, y_max_length, attn=None): if y_max_length is not None: y_max_length = (y_max_length // self.num_squeeze) * self.num_squeeze y = y[:, :, :y_max_length] if attn is not None: attn = attn[:, :, :, :y_max_length] y_lengths = (y_lengths // self.num_squeeze) * self.num_squeeze return y, y_lengths, y_max_length, attn def store_inverse(self): self.decoder.store_inverse() def load_checkpoint(self, config, checkpoint_path, eval=False): # pylint: disable=unused-argument, redefined-builtin state = torch.load(checkpoint_path, map_location=torch.device('cpu')) self.load_state_dict(state['model']) if eval: self.eval() self.store_inverse() assert not self.training
11,148
46.645299
138
py
TTS
TTS-master/TTS/tts/datasets/TTSDataset.py
import collections import os import random from multiprocessing import Manager, Pool import numpy as np import torch import tqdm from torch.utils.data import Dataset from TTS.tts.utils.data import (prepare_data, prepare_stop_target, prepare_tensor) from TTS.tts.utils.text import (pad_with_eos_bos, phoneme_to_sequence, text_to_sequence) class MyDataset(Dataset): def __init__(self, outputs_per_step, text_cleaner, compute_linear_spec, ap, meta_data, tp=None, add_blank=False, batch_group_size=0, min_seq_len=0, max_seq_len=float("inf"), use_phonemes=True, phoneme_cache_path=None, phoneme_language="en-us", enable_eos_bos=False, speaker_mapping=None, use_noise_augment=False, verbose=False): """ Args: outputs_per_step (int): number of time frames predicted per step. text_cleaner (str): text cleaner used for the dataset. compute_linear_spec (bool): compute linear spectrogram if True. ap (TTS.tts.utils.AudioProcessor): audio processor object. meta_data (list): list of dataset instances. batch_group_size (int): (0) range of batch randomization after sorting sequences by length. min_seq_len (int): (0) minimum sequence length to be processed by the loader. max_seq_len (int): (float("inf")) maximum sequence length. use_phonemes (bool): (true) if true, text converted to phonemes. phoneme_cache_path (str): path to cache phoneme features. phoneme_language (str): one the languages from https://github.com/bootphon/phonemizer#languages enable_eos_bos (bool): enable end of sentence and beginning of sentences characters. use_noise_augment (bool): enable adding random noise to wav for augmentation. verbose (bool): print diagnostic information. """ self.batch_group_size = batch_group_size self.items = meta_data self.outputs_per_step = outputs_per_step self.sample_rate = ap.sample_rate self.cleaners = text_cleaner self.compute_linear_spec = compute_linear_spec self.min_seq_len = min_seq_len self.max_seq_len = max_seq_len self.ap = ap self.tp = tp self.add_blank = add_blank self.use_phonemes = use_phonemes self.phoneme_cache_path = phoneme_cache_path self.phoneme_language = phoneme_language self.enable_eos_bos = enable_eos_bos self.speaker_mapping = speaker_mapping self.use_noise_augment = use_noise_augment self.verbose = verbose self.input_seq_computed = False if use_phonemes and not os.path.isdir(phoneme_cache_path): os.makedirs(phoneme_cache_path, exist_ok=True) if self.verbose: print("\n > DataLoader initialization") print(" | > Use phonemes: {}".format(self.use_phonemes)) if use_phonemes: print(" | > phoneme language: {}".format(phoneme_language)) print(" | > Number of instances : {}".format(len(self.items))) def load_wav(self, filename): audio = self.ap.load_wav(filename) return audio @staticmethod def load_np(filename): data = np.load(filename).astype('float32') return data @staticmethod def _generate_and_cache_phoneme_sequence(text, cache_path, cleaners, language, tp, add_blank): """generate a phoneme sequence from text. since the usage is for subsequent caching, we never add bos and eos chars here. Instead we add those dynamically later; based on the config option.""" phonemes = phoneme_to_sequence(text, [cleaners], language=language, enable_eos_bos=False, tp=tp, add_blank=add_blank) phonemes = np.asarray(phonemes, dtype=np.int32) np.save(cache_path, phonemes) return phonemes @staticmethod def _load_or_generate_phoneme_sequence(wav_file, text, phoneme_cache_path, enable_eos_bos, cleaners, language, tp, add_blank): file_name = os.path.splitext(os.path.basename(wav_file))[0] # different names for normal phonemes and with blank chars. file_name_ext = '_blanked_phoneme.npy' if add_blank else '_phoneme.npy' cache_path = os.path.join(phoneme_cache_path, file_name + file_name_ext) try: phonemes = np.load(cache_path) except FileNotFoundError: phonemes = MyDataset._generate_and_cache_phoneme_sequence( text, cache_path, cleaners, language, tp, add_blank) except (ValueError, IOError): print(" [!] failed loading phonemes for {}. " "Recomputing.".format(wav_file)) phonemes = MyDataset._generate_and_cache_phoneme_sequence( text, cache_path, cleaners, language, tp, add_blank) if enable_eos_bos: phonemes = pad_with_eos_bos(phonemes, tp=tp) phonemes = np.asarray(phonemes, dtype=np.int32) return phonemes def load_data(self, idx): item = self.items[idx] if len(item) == 4: text, wav_file, speaker_name, attn_file = item else: text, wav_file, speaker_name = item attn = None wav = np.asarray(self.load_wav(wav_file), dtype=np.float32) # apply noise for augmentation if self.use_noise_augment: wav = wav + (1.0 / 32768.0) * np.random.rand(*wav.shape) if not self.input_seq_computed: if self.use_phonemes: text = self._load_or_generate_phoneme_sequence(wav_file, text, self.phoneme_cache_path, self.enable_eos_bos, self.cleaners, self.phoneme_language, self.tp, self.add_blank) else: text = np.asarray(text_to_sequence(text, [self.cleaners], tp=self.tp, add_blank=self.add_blank), dtype=np.int32) assert text.size > 0, self.items[idx][1] assert wav.size > 0, self.items[idx][1] if "attn_file" in locals(): attn = np.load(attn_file) if len(text) > self.max_seq_len: # return a different sample if the phonemized # text is longer than the threshold # TODO: find a better fix return self.load_data(100) sample = { 'text': text, 'wav': wav, 'attn': attn, 'item_idx': self.items[idx][1], 'speaker_name': speaker_name, 'wav_file_name': os.path.basename(wav_file) } return sample @staticmethod def _phoneme_worker(args): item = args[0] func_args = args[1] text, wav_file, *_ = item phonemes = MyDataset._load_or_generate_phoneme_sequence(wav_file, text, *func_args) return phonemes def compute_input_seq(self, num_workers=0): """compute input sequences separately. Call it before passing dataset to data loader.""" if not self.use_phonemes: if self.verbose: print(" | > Computing input sequences ...") for idx, item in enumerate(tqdm.tqdm(self.items)): text, *_ = item sequence = np.asarray(text_to_sequence(text, [self.cleaners], tp=self.tp, add_blank=self.add_blank), dtype=np.int32) self.items[idx][0] = sequence else: func_args = [self.phoneme_cache_path, self.enable_eos_bos, self.cleaners, self.phoneme_language, self.tp, self.add_blank] if self.verbose: print(" | > Computing phonemes ...") if num_workers == 0: for idx, item in enumerate(tqdm.tqdm(self.items)): phonemes = self._phoneme_worker([item, func_args]) self.items[idx][0] = phonemes else: with Pool(num_workers) as p: phonemes = list(tqdm.tqdm(p.imap(MyDataset._phoneme_worker, [[item, func_args] for item in self.items]), total=len(self.items))) for idx, p in enumerate(phonemes): self.items[idx][0] = p def sort_items(self): r"""Sort instances based on text length in ascending order""" lengths = np.array([len(ins[0]) for ins in self.items]) idxs = np.argsort(lengths) new_items = [] ignored = [] for i, idx in enumerate(idxs): length = lengths[idx] if length < self.min_seq_len or length > self.max_seq_len: ignored.append(idx) else: new_items.append(self.items[idx]) # shuffle batch groups if self.batch_group_size > 0: for i in range(len(new_items) // self.batch_group_size): offset = i * self.batch_group_size end_offset = offset + self.batch_group_size temp_items = new_items[offset:end_offset] random.shuffle(temp_items) new_items[offset:end_offset] = temp_items self.items = new_items if self.verbose: print(" | > Max length sequence: {}".format(np.max(lengths))) print(" | > Min length sequence: {}".format(np.min(lengths))) print(" | > Avg length sequence: {}".format(np.mean(lengths))) print( " | > Num. instances discarded by max-min (max={}, min={}) seq limits: {}" .format(self.max_seq_len, self.min_seq_len, len(ignored))) print(" | > Batch group size: {}.".format(self.batch_group_size)) def __len__(self): return len(self.items) def __getitem__(self, idx): return self.load_data(idx) def collate_fn(self, batch): r""" Perform preprocessing and create a final data batch: 1. Sort batch instances by text-length 2. Convert Audio signal to Spectrograms. 3. PAD sequences wrt r. 4. Load to Torch. """ # Puts each data field into a tensor with outer dimension batch size if isinstance(batch[0], collections.Mapping): text_lenghts = np.array([len(d["text"]) for d in batch]) # sort items with text input length for RNN efficiency text_lenghts, ids_sorted_decreasing = torch.sort( torch.LongTensor(text_lenghts), dim=0, descending=True) wav = [batch[idx]['wav'] for idx in ids_sorted_decreasing] item_idxs = [ batch[idx]['item_idx'] for idx in ids_sorted_decreasing ] text = [batch[idx]['text'] for idx in ids_sorted_decreasing] speaker_name = [ batch[idx]['speaker_name'] for idx in ids_sorted_decreasing ] # get speaker embeddings if self.speaker_mapping is not None: wav_files_names = [ batch[idx]['wav_file_name'] for idx in ids_sorted_decreasing ] speaker_embedding = [ self.speaker_mapping[w]['embedding'] for w in wav_files_names ] else: speaker_embedding = None # compute features mel = [self.ap.melspectrogram(w).astype('float32') for w in wav] mel_lengths = [m.shape[1] for m in mel] # compute 'stop token' targets stop_targets = [ np.array([0.] * (mel_len - 1) + [1.]) for mel_len in mel_lengths ] # PAD stop targets stop_targets = prepare_stop_target(stop_targets, self.outputs_per_step) # PAD sequences with longest instance in the batch text = prepare_data(text).astype(np.int32) # PAD features with longest instance mel = prepare_tensor(mel, self.outputs_per_step) # B x D x T --> B x T x D mel = mel.transpose(0, 2, 1) # convert things to pytorch text_lenghts = torch.LongTensor(text_lenghts) text = torch.LongTensor(text) mel = torch.FloatTensor(mel).contiguous() mel_lengths = torch.LongTensor(mel_lengths) stop_targets = torch.FloatTensor(stop_targets) if speaker_embedding is not None: speaker_embedding = torch.FloatTensor(speaker_embedding) # compute linear spectrogram if self.compute_linear_spec: linear = [ self.ap.spectrogram(w).astype('float32') for w in wav ] linear = prepare_tensor(linear, self.outputs_per_step) linear = linear.transpose(0, 2, 1) assert mel.shape[1] == linear.shape[1] linear = torch.FloatTensor(linear).contiguous() else: linear = None # collate attention alignments if batch[0]['attn'] is not None: attns = [batch[idx]['attn'].T for idx in ids_sorted_decreasing] for idx, attn in enumerate(attns): pad2 = mel.shape[1] - attn.shape[1] pad1 = text.shape[1] - attn.shape[0] attn = np.pad(attn, [[0, pad1], [0, pad2]]) attns[idx] = attn attns = prepare_tensor(attns, self.outputs_per_step) attns = torch.FloatTensor(attns).unsqueeze(1) else: attns = None return text, text_lenghts, speaker_name, linear, mel, mel_lengths, \ stop_targets, item_idxs, speaker_embedding, attns raise TypeError(("batch must contain tensors, numbers, dicts or lists;\ found {}".format(type(batch[0]))))
14,558
40.597143
187
py
TTS
TTS-master/TTS/tts/layers/common_layers.py
import torch from torch import nn from torch.nn import functional as F class Linear(nn.Module): """Linear layer with a specific initialization. Args: in_features (int): number of channels in the input tensor. out_features (int): number of channels in the output tensor. bias (bool, optional): enable/disable bias in the layer. Defaults to True. init_gain (str, optional): method to compute the gain in the weight initializtion based on the nonlinear activation used afterwards. Defaults to 'linear'. """ def __init__(self, in_features, out_features, bias=True, init_gain='linear'): super(Linear, self).__init__() self.linear_layer = torch.nn.Linear( in_features, out_features, bias=bias) self._init_w(init_gain) def _init_w(self, init_gain): torch.nn.init.xavier_uniform_( self.linear_layer.weight, gain=torch.nn.init.calculate_gain(init_gain)) def forward(self, x): return self.linear_layer(x) class LinearBN(nn.Module): """Linear layer with Batch Normalization. x -> linear -> BN -> o Args: in_features (int): number of channels in the input tensor. out_features (int ): number of channels in the output tensor. bias (bool, optional): enable/disable bias in the linear layer. Defaults to True. init_gain (str, optional): method to set the gain for weight initialization. Defaults to 'linear'. """ def __init__(self, in_features, out_features, bias=True, init_gain='linear'): super(LinearBN, self).__init__() self.linear_layer = torch.nn.Linear( in_features, out_features, bias=bias) self.batch_normalization = nn.BatchNorm1d(out_features, momentum=0.1, eps=1e-5) self._init_w(init_gain) def _init_w(self, init_gain): torch.nn.init.xavier_uniform_( self.linear_layer.weight, gain=torch.nn.init.calculate_gain(init_gain)) def forward(self, x): """ Shapes: x: [T, B, C] or [B, C] """ out = self.linear_layer(x) if len(out.shape) == 3: out = out.permute(1, 2, 0) out = self.batch_normalization(out) if len(out.shape) == 3: out = out.permute(2, 0, 1) return out class Prenet(nn.Module): """Tacotron specific Prenet with an optional Batch Normalization. Note: Prenet with BN improves the model performance significantly especially if it is enabled after learning a diagonal attention alignment with the original prenet. However, if the target dataset is high quality then it also works from the start. It is also suggested to disable dropout if BN is in use. prenet_type == "original" x -> [linear -> ReLU -> Dropout]xN -> o prenet_type == "bn" x -> [linear -> BN -> ReLU -> Dropout]xN -> o Args: in_features (int): number of channels in the input tensor and the inner layers. prenet_type (str, optional): prenet type "original" or "bn". Defaults to "original". prenet_dropout (bool, optional): dropout rate. Defaults to True. out_features (list, optional): List of output channels for each prenet block. It also defines number of the prenet blocks based on the length of argument list. Defaults to [256, 256]. bias (bool, optional): enable/disable bias in prenet linear layers. Defaults to True. """ # pylint: disable=dangerous-default-value def __init__(self, in_features, prenet_type="original", prenet_dropout=True, out_features=[256, 256], bias=True): super(Prenet, self).__init__() self.prenet_type = prenet_type self.prenet_dropout = prenet_dropout in_features = [in_features] + out_features[:-1] if prenet_type == "bn": self.linear_layers = nn.ModuleList([ LinearBN(in_size, out_size, bias=bias) for (in_size, out_size) in zip(in_features, out_features) ]) elif prenet_type == "original": self.linear_layers = nn.ModuleList([ Linear(in_size, out_size, bias=bias) for (in_size, out_size) in zip(in_features, out_features) ]) def forward(self, x): for linear in self.linear_layers: if self.prenet_dropout: x = F.dropout(F.relu(linear(x)), p=0.5, training=self.training) else: x = F.relu(linear(x)) return x
4,799
36.795276
162
py
TTS
TTS-master/TTS/tts/layers/losses.py
import math import numpy as np import torch from torch import nn from inspect import signature from torch.nn import functional from TTS.tts.utils.generic_utils import sequence_mask from TTS.tts.utils.ssim import ssim # pylint: disable=abstract-method Method # relates https://github.com/pytorch/pytorch/issues/42305 class L1LossMasked(nn.Module): def __init__(self, seq_len_norm): super().__init__() self.seq_len_norm = seq_len_norm def forward(self, x, target, length): """ Args: x: A Variable containing a FloatTensor of size (batch, max_len, dim) which contains the unnormalized probability for each class. target: A Variable containing a LongTensor of size (batch, max_len, dim) which contains the index of the true class for each corresponding step. length: A Variable containing a LongTensor of size (batch,) which contains the length of each data in a batch. Shapes: x: B x T X D target: B x T x D length: B Returns: loss: An average loss value in range [0, 1] masked by the length. """ # mask: (batch, max_len, 1) target.requires_grad = False mask = sequence_mask(sequence_length=length, max_len=target.size(1)).unsqueeze(2).float() if self.seq_len_norm: norm_w = mask / mask.sum(dim=1, keepdim=True) out_weights = norm_w.div(target.shape[0] * target.shape[2]) mask = mask.expand_as(x) loss = functional.l1_loss(x * mask, target * mask, reduction='none') loss = loss.mul(out_weights.to(loss.device)).sum() else: mask = mask.expand_as(x) loss = functional.l1_loss(x * mask, target * mask, reduction='sum') loss = loss / mask.sum() return loss class MSELossMasked(nn.Module): def __init__(self, seq_len_norm): super(MSELossMasked, self).__init__() self.seq_len_norm = seq_len_norm def forward(self, x, target, length): """ Args: x: A Variable containing a FloatTensor of size (batch, max_len, dim) which contains the unnormalized probability for each class. target: A Variable containing a LongTensor of size (batch, max_len, dim) which contains the index of the true class for each corresponding step. length: A Variable containing a LongTensor of size (batch,) which contains the length of each data in a batch. Shapes: x: B x T X D target: B x T x D length: B Returns: loss: An average loss value in range [0, 1] masked by the length. """ # mask: (batch, max_len, 1) target.requires_grad = False mask = sequence_mask(sequence_length=length, max_len=target.size(1)).unsqueeze(2).float() if self.seq_len_norm: norm_w = mask / mask.sum(dim=1, keepdim=True) out_weights = norm_w.div(target.shape[0] * target.shape[2]) mask = mask.expand_as(x) loss = functional.mse_loss(x * mask, target * mask, reduction='none') loss = loss.mul(out_weights.to(loss.device)).sum() else: mask = mask.expand_as(x) loss = functional.mse_loss(x * mask, target * mask, reduction='sum') loss = loss / mask.sum() return loss class SSIMLoss(torch.nn.Module): """SSIM loss as explained here https://en.wikipedia.org/wiki/Structural_similarity""" def __init__(self): super().__init__() self.loss_func = ssim def forward(self, y_hat, y, length=None): """ Args: y_hat (tensor): model prediction values. y (tensor): target values. length (tensor): length of each sample in a batch. Shapes: y_hat: B x T X D y: B x T x D length: B Returns: loss: An average loss value in range [0, 1] masked by the length. """ if length is not None: m = sequence_mask(sequence_length=length, max_len=y.size(1)).unsqueeze(2).float().to( y_hat.device) y_hat, y = y_hat * m, y * m return 1 - self.loss_func(y_hat.unsqueeze(1), y.unsqueeze(1)) class AttentionEntropyLoss(nn.Module): # pylint: disable=R0201 def forward(self, align): """ Forces attention to be more decisive by penalizing soft attention weights TODO: arguments TODO: unit_test """ entropy = torch.distributions.Categorical(probs=align).entropy() loss = (entropy / np.log(align.shape[1])).mean() return loss class BCELossMasked(nn.Module): def __init__(self, pos_weight): super(BCELossMasked, self).__init__() self.pos_weight = pos_weight def forward(self, x, target, length): """ Args: x: A Variable containing a FloatTensor of size (batch, max_len) which contains the unnormalized probability for each class. target: A Variable containing a LongTensor of size (batch, max_len) which contains the index of the true class for each corresponding step. length: A Variable containing a LongTensor of size (batch,) which contains the length of each data in a batch. Shapes: x: B x T target: B x T length: B Returns: loss: An average loss value in range [0, 1] masked by the length. """ # mask: (batch, max_len, 1) target.requires_grad = False if length is not None: mask = sequence_mask(sequence_length=length, max_len=target.size(1)).float() x = x * mask target = target * mask num_items = mask.sum() else: num_items = torch.numel(x) loss = functional.binary_cross_entropy_with_logits( x, target, pos_weight=self.pos_weight, reduction='sum') loss = loss / num_items return loss class DifferentailSpectralLoss(nn.Module): """Differential Spectral Loss https://arxiv.org/ftp/arxiv/papers/1909/1909.10302.pdf""" def __init__(self, loss_func): super().__init__() self.loss_func = loss_func def forward(self, x, target, length=None): """ Shapes: x: B x T target: B x T length: B Returns: loss: An average loss value in range [0, 1] masked by the length. """ x_diff = x[:, 1:] - x[:, :-1] target_diff = target[:, 1:] - target[:, :-1] if length is None: return self.loss_func(x_diff, target_diff) return self.loss_func(x_diff, target_diff, length-1) class GuidedAttentionLoss(torch.nn.Module): def __init__(self, sigma=0.4): super(GuidedAttentionLoss, self).__init__() self.sigma = sigma def _make_ga_masks(self, ilens, olens): B = len(ilens) max_ilen = max(ilens) max_olen = max(olens) ga_masks = torch.zeros((B, max_olen, max_ilen)) for idx, (ilen, olen) in enumerate(zip(ilens, olens)): ga_masks[idx, :olen, :ilen] = self._make_ga_mask( ilen, olen, self.sigma) return ga_masks def forward(self, att_ws, ilens, olens): ga_masks = self._make_ga_masks(ilens, olens).to(att_ws.device) seq_masks = self._make_masks(ilens, olens).to(att_ws.device) losses = ga_masks * att_ws loss = torch.mean(losses.masked_select(seq_masks)) return loss @staticmethod def _make_ga_mask(ilen, olen, sigma): grid_x, grid_y = torch.meshgrid(torch.arange(olen).to(olen), torch.arange(ilen).to(ilen)) grid_x, grid_y = grid_x.float(), grid_y.float() return 1.0 - torch.exp(-(grid_y / ilen - grid_x / olen)**2 / (2 * (sigma**2))) @staticmethod def _make_masks(ilens, olens): in_masks = sequence_mask(ilens) out_masks = sequence_mask(olens) return out_masks.unsqueeze(-1) & in_masks.unsqueeze(-2) class Huber(nn.Module): # pylint: disable=R0201 def forward(self, x, y, length=None): """ Shapes: x: B x T y: B x T length: B """ mask = sequence_mask(sequence_length=length, max_len=y.size(1)).float() return torch.nn.functional.smooth_l1_loss( x * mask, y * mask, reduction='sum') / mask.sum() ######################## # MODEL LOSS LAYERS ######################## class TacotronLoss(torch.nn.Module): """Collection of Tacotron set-up based on provided config.""" def __init__(self, c, stopnet_pos_weight=10, ga_sigma=0.4): super(TacotronLoss, self).__init__() self.stopnet_pos_weight = stopnet_pos_weight self.ga_alpha = c.ga_alpha self.decoder_diff_spec_alpha = c.decoder_diff_spec_alpha self.postnet_diff_spec_alpha = c.postnet_diff_spec_alpha self.decoder_alpha = c.decoder_loss_alpha self.postnet_alpha = c.postnet_loss_alpha self.decoder_ssim_alpha = c.decoder_ssim_alpha self.postnet_ssim_alpha = c.postnet_ssim_alpha self.config = c # postnet and decoder loss if c.loss_masking: self.criterion = L1LossMasked(c.seq_len_norm) if c.model in [ "Tacotron" ] else MSELossMasked(c.seq_len_norm) else: self.criterion = nn.L1Loss() if c.model in ["Tacotron" ] else nn.MSELoss() # guided attention loss if c.ga_alpha > 0: self.criterion_ga = GuidedAttentionLoss(sigma=ga_sigma) # differential spectral loss if c.postnet_diff_spec_alpha > 0 or c.decoder_diff_spec_alpha > 0: self.criterion_diff_spec = DifferentailSpectralLoss(loss_func=self.criterion) # ssim loss if c.postnet_ssim_alpha > 0 or c.decoder_ssim_alpha > 0: self.criterion_ssim = SSIMLoss() # stopnet loss # pylint: disable=not-callable self.criterion_st = BCELossMasked( pos_weight=torch.tensor(stopnet_pos_weight)) if c.stopnet else None def forward(self, postnet_output, decoder_output, mel_input, linear_input, stopnet_output, stopnet_target, output_lens, decoder_b_output, alignments, alignment_lens, alignments_backwards, input_lens): return_dict = {} # remove lengths if no masking is applied if not self.config.loss_masking: output_lens = None # decoder and postnet losses if self.config.loss_masking: if self.decoder_alpha > 0: decoder_loss = self.criterion(decoder_output, mel_input, output_lens) if self.postnet_alpha > 0: if self.config.model in ["Tacotron", "TacotronGST"]: postnet_loss = self.criterion(postnet_output, linear_input, output_lens) else: postnet_loss = self.criterion(postnet_output, mel_input, output_lens) else: if self.decoder_alpha > 0: decoder_loss = self.criterion(decoder_output, mel_input) if self.postnet_alpha > 0: if self.config.model in ["Tacotron", "TacotronGST"]: postnet_loss = self.criterion(postnet_output, linear_input) else: postnet_loss = self.criterion(postnet_output, mel_input) loss = self.decoder_alpha * decoder_loss + self.postnet_alpha * postnet_loss return_dict['decoder_loss'] = decoder_loss return_dict['postnet_loss'] = postnet_loss # stopnet loss stop_loss = self.criterion_st( stopnet_output, stopnet_target, output_lens) if self.config.stopnet else torch.zeros(1) if not self.config.separate_stopnet and self.config.stopnet: loss += stop_loss return_dict['stopnet_loss'] = stop_loss # backward decoder loss (if enabled) if self.config.bidirectional_decoder: if self.config.loss_masking: decoder_b_loss = self.criterion( torch.flip(decoder_b_output, dims=(1, )), mel_input, output_lens) else: decoder_b_loss = self.criterion(torch.flip(decoder_b_output, dims=(1, )), mel_input) decoder_c_loss = torch.nn.functional.l1_loss(torch.flip(decoder_b_output, dims=(1, )), decoder_output) loss += self.decoder_alpha * (decoder_b_loss + decoder_c_loss) return_dict['decoder_b_loss'] = decoder_b_loss return_dict['decoder_c_loss'] = decoder_c_loss # double decoder consistency loss (if enabled) if self.config.double_decoder_consistency: if self.config.loss_masking: decoder_b_loss = self.criterion(decoder_b_output, mel_input, output_lens) else: decoder_b_loss = self.criterion(decoder_b_output, mel_input) # decoder_c_loss = torch.nn.functional.l1_loss(decoder_b_output, decoder_output) attention_c_loss = torch.nn.functional.l1_loss(alignments, alignments_backwards) loss += self.decoder_alpha * (decoder_b_loss + attention_c_loss) return_dict['decoder_coarse_loss'] = decoder_b_loss return_dict['decoder_ddc_loss'] = attention_c_loss # guided attention loss (if enabled) if self.config.ga_alpha > 0: ga_loss = self.criterion_ga(alignments, input_lens, alignment_lens) loss += ga_loss * self.ga_alpha return_dict['ga_loss'] = ga_loss # decoder differential spectral loss if self.config.decoder_diff_spec_alpha > 0: decoder_diff_spec_loss = self.criterion_diff_spec(decoder_output, mel_input, output_lens) loss += decoder_diff_spec_loss * self.decoder_diff_spec_alpha return_dict['decoder_diff_spec_loss'] = decoder_diff_spec_loss # postnet differential spectral loss if self.config.postnet_diff_spec_alpha > 0: postnet_diff_spec_loss = self.criterion_diff_spec(postnet_output, mel_input, output_lens) loss += postnet_diff_spec_loss * self.postnet_diff_spec_alpha return_dict['postnet_diff_spec_loss'] = postnet_diff_spec_loss # decoder ssim loss if self.config.decoder_ssim_alpha > 0: decoder_ssim_loss = self.criterion_ssim(decoder_output, mel_input, output_lens) loss += decoder_ssim_loss * self.postnet_ssim_alpha return_dict['decoder_ssim_loss'] = decoder_ssim_loss # postnet ssim loss if self.config.postnet_ssim_alpha > 0: postnet_ssim_loss = self.criterion_ssim(postnet_output, mel_input, output_lens) loss += postnet_ssim_loss * self.postnet_ssim_alpha return_dict['postnet_ssim_loss'] = postnet_ssim_loss return_dict['loss'] = loss # check if any loss is NaN for key, loss in return_dict.items(): if torch.isnan(loss): raise RuntimeError(f" [!] NaN loss with {key}.") return return_dict class GlowTTSLoss(torch.nn.Module): def __init__(self): super().__init__() self.constant_factor = 0.5 * math.log(2 * math.pi) def forward(self, z, means, scales, log_det, y_lengths, o_dur_log, o_attn_dur, x_lengths): return_dict = {} # flow loss - neg log likelihood pz = torch.sum(scales) + 0.5 * torch.sum( torch.exp(-2 * scales) * (z - means)**2) log_mle = self.constant_factor + (pz - torch.sum(log_det)) / ( torch.sum(y_lengths) * z.shape[1]) # duration loss - MSE # loss_dur = torch.sum((o_dur_log - o_attn_dur)**2) / torch.sum(x_lengths) # duration loss - huber loss loss_dur = torch.nn.functional.smooth_l1_loss( o_dur_log, o_attn_dur, reduction='sum') / torch.sum(x_lengths) return_dict['loss'] = log_mle + loss_dur return_dict['log_mle'] = log_mle return_dict['loss_dur'] = loss_dur # check if any loss is NaN for key, loss in return_dict.items(): if torch.isnan(loss): raise RuntimeError(f" [!] NaN loss with {key}.") return return_dict class SpeedySpeechLoss(nn.Module): def __init__(self, c): super().__init__() self.l1 = L1LossMasked(False) self.ssim = SSIMLoss() self.huber = Huber() self.ssim_alpha = c.ssim_alpha self.huber_alpha = c.huber_alpha self.l1_alpha = c.l1_alpha def forward(self, decoder_output, decoder_target, decoder_output_lens, dur_output, dur_target, input_lens): l1_loss = self.l1(decoder_output, decoder_target, decoder_output_lens) ssim_loss = self.ssim(decoder_output, decoder_target, decoder_output_lens) huber_loss = self.huber(dur_output, dur_target, input_lens) loss = l1_loss + ssim_loss + huber_loss return {'loss': loss, 'loss_l1': l1_loss, 'loss_ssim': ssim_loss, 'loss_dur': huber_loss}
18,140
39.493304
114
py
TTS
TTS-master/TTS/tts/layers/tacotron.py
# coding: utf-8 import torch from torch import nn from .common_layers import Prenet from .attentions import init_attn class BatchNormConv1d(nn.Module): r"""A wrapper for Conv1d with BatchNorm. It sets the activation function between Conv and BatchNorm layers. BatchNorm layer is initialized with the TF default values for momentum and eps. Args: in_channels: size of each input sample out_channels: size of each output samples kernel_size: kernel size of conv filters stride: stride of conv filters padding: padding of conv filters activation: activation function set b/w Conv1d and BatchNorm Shapes: - input: (B, D) - output: (B, D) """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, activation=None): super(BatchNormConv1d, self).__init__() self.padding = padding self.padder = nn.ConstantPad1d(padding, 0) self.conv1d = nn.Conv1d( in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=0, bias=False) # Following tensorflow's default parameters self.bn = nn.BatchNorm1d(out_channels, momentum=0.99, eps=1e-3) self.activation = activation # self.init_layers() def init_layers(self): if isinstance(self.activation, torch.nn.ReLU): w_gain = 'relu' elif isinstance(self.activation, torch.nn.Tanh): w_gain = 'tanh' elif self.activation is None: w_gain = 'linear' else: raise RuntimeError('Unknown activation function') torch.nn.init.xavier_uniform_( self.conv1d.weight, gain=torch.nn.init.calculate_gain(w_gain)) def forward(self, x): x = self.padder(x) x = self.conv1d(x) x = self.bn(x) if self.activation is not None: x = self.activation(x) return x class Highway(nn.Module): r"""Highway layers as explained in https://arxiv.org/abs/1505.00387 Args: in_features (int): size of each input sample out_feature (int): size of each output sample Shapes: - input: (B, *, H_in) - output: (B, *, H_out) """ # TODO: Try GLU layer def __init__(self, in_features, out_feature): super(Highway, self).__init__() self.H = nn.Linear(in_features, out_feature) self.H.bias.data.zero_() self.T = nn.Linear(in_features, out_feature) self.T.bias.data.fill_(-1) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() # self.init_layers() def init_layers(self): torch.nn.init.xavier_uniform_( self.H.weight, gain=torch.nn.init.calculate_gain('relu')) torch.nn.init.xavier_uniform_( self.T.weight, gain=torch.nn.init.calculate_gain('sigmoid')) def forward(self, inputs): H = self.relu(self.H(inputs)) T = self.sigmoid(self.T(inputs)) return H * T + inputs * (1.0 - T) class CBHG(nn.Module): """CBHG module: a recurrent neural network composed of: - 1-d convolution banks - Highway networks + residual connections - Bidirectional gated recurrent units Args: in_features (int): sample size K (int): max filter size in conv bank projections (list): conv channel sizes for conv projections num_highways (int): number of highways layers Shapes: - input: (B, C, T_in) - output: (B, T_in, C*2) """ #pylint: disable=dangerous-default-value def __init__(self, in_features, K=16, conv_bank_features=128, conv_projections=[128, 128], highway_features=128, gru_features=128, num_highways=4): super(CBHG, self).__init__() self.in_features = in_features self.conv_bank_features = conv_bank_features self.highway_features = highway_features self.gru_features = gru_features self.conv_projections = conv_projections self.relu = nn.ReLU() # list of conv1d bank with filter size k=1...K # TODO: try dilational layers instead self.conv1d_banks = nn.ModuleList([ BatchNormConv1d(in_features, conv_bank_features, kernel_size=k, stride=1, padding=[(k - 1) // 2, k // 2], activation=self.relu) for k in range(1, K + 1) ]) # max pooling of conv bank, with padding # TODO: try average pooling OR larger kernel size out_features = [K * conv_bank_features] + conv_projections[:-1] activations = [self.relu] * (len(conv_projections) - 1) activations += [None] # setup conv1d projection layers layer_set = [] for (in_size, out_size, ac) in zip(out_features, conv_projections, activations): layer = BatchNormConv1d(in_size, out_size, kernel_size=3, stride=1, padding=[1, 1], activation=ac) layer_set.append(layer) self.conv1d_projections = nn.ModuleList(layer_set) # setup Highway layers if self.highway_features != conv_projections[-1]: self.pre_highway = nn.Linear(conv_projections[-1], highway_features, bias=False) self.highways = nn.ModuleList([ Highway(highway_features, highway_features) for _ in range(num_highways) ]) # bi-directional GPU layer self.gru = nn.GRU(gru_features, gru_features, 1, batch_first=True, bidirectional=True) def forward(self, inputs): # (B, in_features, T_in) x = inputs # (B, hid_features*K, T_in) # Concat conv1d bank outputs outs = [] for conv1d in self.conv1d_banks: out = conv1d(x) outs.append(out) x = torch.cat(outs, dim=1) assert x.size(1) == self.conv_bank_features * len(self.conv1d_banks) for conv1d in self.conv1d_projections: x = conv1d(x) x += inputs x = x.transpose(1, 2) if self.highway_features != self.conv_projections[-1]: x = self.pre_highway(x) # Residual connection # TODO: try residual scaling as in Deep Voice 3 # TODO: try plain residual layers for highway in self.highways: x = highway(x) # (B, T_in, hid_features*2) # TODO: replace GRU with convolution as in Deep Voice 3 self.gru.flatten_parameters() outputs, _ = self.gru(x) return outputs class EncoderCBHG(nn.Module): r"""CBHG module with Encoder specific arguments""" def __init__(self): super(EncoderCBHG, self).__init__() self.cbhg = CBHG( 128, K=16, conv_bank_features=128, conv_projections=[128, 128], highway_features=128, gru_features=128, num_highways=4) def forward(self, x): return self.cbhg(x) class Encoder(nn.Module): r"""Stack Prenet and CBHG module for encoder Args: inputs (FloatTensor): embedding features Shapes: - inputs: (B, T, D_in) - outputs: (B, T, 128 * 2) """ def __init__(self, in_features): super(Encoder, self).__init__() self.prenet = Prenet(in_features, out_features=[256, 128]) self.cbhg = EncoderCBHG() def forward(self, inputs): # B x T x prenet_dim outputs = self.prenet(inputs) outputs = self.cbhg(outputs.transpose(1, 2)) return outputs class PostCBHG(nn.Module): def __init__(self, mel_dim): super(PostCBHG, self).__init__() self.cbhg = CBHG( mel_dim, K=8, conv_bank_features=128, conv_projections=[256, mel_dim], highway_features=128, gru_features=128, num_highways=4) def forward(self, x): return self.cbhg(x) class Decoder(nn.Module): """Tacotron decoder. Args: in_channels (int): number of input channels. frame_channels (int): number of feature frame channels. r (int): number of outputs per time step (reduction rate). memory_size (int): size of the past window. if <= 0 memory_size = r attn_type (string): type of attention used in decoder. attn_windowing (bool): if true, define an attention window centered to maximum attention response. It provides more robust attention alignment especially at interence time. attn_norm (string): attention normalization function. 'sigmoid' or 'softmax'. prenet_type (string): 'original' or 'bn'. prenet_dropout (float): prenet dropout rate. forward_attn (bool): if true, use forward attention method. https://arxiv.org/abs/1807.06736 trans_agent (bool): if true, use transition agent. https://arxiv.org/abs/1807.06736 forward_attn_mask (bool): if true, mask attention values smaller than a threshold. location_attn (bool): if true, use location sensitive attention. attn_K (int): number of attention heads for GravesAttention. separate_stopnet (bool): if true, detach stopnet input to prevent gradient flow. speaker_embedding_dim (int): size of speaker embedding vector, for multi-speaker training. """ # Pylint gets confused by PyTorch conventions here # pylint: disable=attribute-defined-outside-init def __init__(self, in_channels, frame_channels, r, memory_size, attn_type, attn_windowing, attn_norm, prenet_type, prenet_dropout, forward_attn, trans_agent, forward_attn_mask, location_attn, attn_K, separate_stopnet): super(Decoder, self).__init__() self.r_init = r self.r = r self.in_channels = in_channels self.max_decoder_steps = 500 self.use_memory_queue = memory_size > 0 self.memory_size = memory_size if memory_size > 0 else r self.frame_channels = frame_channels self.separate_stopnet = separate_stopnet self.query_dim = 256 # memory -> |Prenet| -> processed_memory prenet_dim = frame_channels * self.memory_size if self.use_memory_queue else frame_channels self.prenet = Prenet( prenet_dim, prenet_type, prenet_dropout, out_features=[256, 128]) # processed_inputs, processed_memory -> |Attention| -> Attention, attention, RNN_State # attention_rnn generates queries for the attention mechanism self.attention_rnn = nn.GRUCell(in_channels + 128, self.query_dim) self.attention = init_attn(attn_type=attn_type, query_dim=self.query_dim, embedding_dim=in_channels, attention_dim=128, location_attention=location_attn, attention_location_n_filters=32, attention_location_kernel_size=31, windowing=attn_windowing, norm=attn_norm, forward_attn=forward_attn, trans_agent=trans_agent, forward_attn_mask=forward_attn_mask, attn_K=attn_K) # (processed_memory | attention context) -> |Linear| -> decoder_RNN_input self.project_to_decoder_in = nn.Linear(256 + in_channels, 256) # decoder_RNN_input -> |RNN| -> RNN_state self.decoder_rnns = nn.ModuleList( [nn.GRUCell(256, 256) for _ in range(2)]) # RNN_state -> |Linear| -> mel_spec self.proj_to_mel = nn.Linear(256, frame_channels * self.r_init) # learn init values instead of zero init. self.stopnet = StopNet(256 + frame_channels * self.r_init) def set_r(self, new_r): self.r = new_r def _reshape_memory(self, memory): """ Reshape the spectrograms for given 'r' """ # Grouping multiple frames if necessary if memory.size(-1) == self.frame_channels: memory = memory.view(memory.shape[0], memory.size(1) // self.r, -1) # Time first (T_decoder, B, frame_channels) memory = memory.transpose(0, 1) return memory def _init_states(self, inputs): """ Initialization of decoder states """ B = inputs.size(0) # go frame as zeros matrix if self.use_memory_queue: self.memory_input = torch.zeros(1, device=inputs.device).repeat(B, self.frame_channels * self.memory_size) else: self.memory_input = torch.zeros(1, device=inputs.device).repeat(B, self.frame_channels) # decoder states self.attention_rnn_hidden = torch.zeros(1, device=inputs.device).repeat(B, 256) self.decoder_rnn_hiddens = [ torch.zeros(1, device=inputs.device).repeat(B, 256) for idx in range(len(self.decoder_rnns)) ] self.context_vec = inputs.data.new(B, self.in_channels).zero_() # cache attention inputs self.processed_inputs = self.attention.preprocess_inputs(inputs) def _parse_outputs(self, outputs, attentions, stop_tokens): # Back to batch first attentions = torch.stack(attentions).transpose(0, 1) stop_tokens = torch.stack(stop_tokens).transpose(0, 1) outputs = torch.stack(outputs).transpose(0, 1).contiguous() outputs = outputs.view( outputs.size(0), -1, self.frame_channels) outputs = outputs.transpose(1, 2) return outputs, attentions, stop_tokens def decode(self, inputs, mask=None): # Prenet processed_memory = self.prenet(self.memory_input) # Attention RNN self.attention_rnn_hidden = self.attention_rnn( torch.cat((processed_memory, self.context_vec), -1), self.attention_rnn_hidden) self.context_vec = self.attention( self.attention_rnn_hidden, inputs, self.processed_inputs, mask) # Concat RNN output and attention context vector decoder_input = self.project_to_decoder_in( torch.cat((self.attention_rnn_hidden, self.context_vec), -1)) # Pass through the decoder RNNs for idx in range(len(self.decoder_rnns)): self.decoder_rnn_hiddens[idx] = self.decoder_rnns[idx]( decoder_input, self.decoder_rnn_hiddens[idx]) # Residual connection decoder_input = self.decoder_rnn_hiddens[idx] + decoder_input decoder_output = decoder_input # predict mel vectors from decoder vectors output = self.proj_to_mel(decoder_output) # output = torch.sigmoid(output) # predict stop token stopnet_input = torch.cat([decoder_output, output], -1) if self.separate_stopnet: stop_token = self.stopnet(stopnet_input.detach()) else: stop_token = self.stopnet(stopnet_input) output = output[:, : self.r * self.frame_channels] return output, stop_token, self.attention.attention_weights def _update_memory_input(self, new_memory): if self.use_memory_queue: if self.memory_size > self.r: # memory queue size is larger than number of frames per decoder iter self.memory_input = torch.cat([ new_memory, self.memory_input[:, :( self.memory_size - self.r) * self.frame_channels].clone() ], dim=-1) else: # memory queue size smaller than number of frames per decoder iter self.memory_input = new_memory[:, :self.memory_size * self.frame_channels] else: # use only the last frame prediction # assert new_memory.shape[-1] == self.r * self.frame_channels self.memory_input = new_memory[:, self.frame_channels * (self.r - 1):] def forward(self, inputs, memory, mask): """ Args: inputs: Encoder outputs. memory: Decoder memory (autoregression. If None (at eval-time), decoder outputs are used as decoder inputs. If None, it uses the last output as the input. mask: Attention mask for sequence padding. Shapes: - inputs: (B, T, D_out_enc) - memory: (B, T_mel, D_mel) """ # Run greedy decoding if memory is None memory = self._reshape_memory(memory) outputs = [] attentions = [] stop_tokens = [] t = 0 self._init_states(inputs) self.attention.init_states(inputs) while len(outputs) < memory.size(0): if t > 0: new_memory = memory[t - 1] self._update_memory_input(new_memory) output, stop_token, attention = self.decode(inputs, mask) outputs += [output] attentions += [attention] stop_tokens += [stop_token.squeeze(1)] t += 1 return self._parse_outputs(outputs, attentions, stop_tokens) def inference(self, inputs): """ Args: inputs: encoder outputs. Shapes: - inputs: batch x time x encoder_out_dim """ outputs = [] attentions = [] stop_tokens = [] t = 0 self._init_states(inputs) self.attention.init_win_idx() self.attention.init_states(inputs) while True: if t > 0: new_memory = outputs[-1] self._update_memory_input(new_memory) output, stop_token, attention = self.decode(inputs, None) stop_token = torch.sigmoid(stop_token.data) outputs += [output] attentions += [attention] stop_tokens += [stop_token] t += 1 if t > inputs.shape[1] / 4 and (stop_token > 0.6 or attention[:, -1].item() > 0.6): break if t > self.max_decoder_steps: print(" | > Decoder stopped with 'max_decoder_steps") break return self._parse_outputs(outputs, attentions, stop_tokens) class StopNet(nn.Module): r"""Stopnet signalling decoder to stop inference. Args: in_features (int): feature dimension of input. """ def __init__(self, in_features): super(StopNet, self).__init__() self.dropout = nn.Dropout(0.1) self.linear = nn.Linear(in_features, 1) torch.nn.init.xavier_uniform_( self.linear.weight, gain=torch.nn.init.calculate_gain('linear')) def forward(self, inputs): outputs = self.dropout(inputs) outputs = self.linear(outputs) return outputs
19,771
37.317829
118
py
TTS
TTS-master/TTS/tts/layers/gst_layers.py
import torch import torch.nn as nn import torch.nn.functional as F class GST(nn.Module): """Global Style Token Module for factorizing prosody in speech. See https://arxiv.org/pdf/1803.09017""" def __init__(self, num_mel, num_heads, num_style_tokens, gst_embedding_dim, speaker_embedding_dim=None): super().__init__() self.encoder = ReferenceEncoder(num_mel, gst_embedding_dim) self.style_token_layer = StyleTokenLayer(num_heads, num_style_tokens, gst_embedding_dim, speaker_embedding_dim) def forward(self, inputs, speaker_embedding=None): enc_out = self.encoder(inputs) # concat speaker_embedding if speaker_embedding is not None: enc_out = torch.cat([enc_out, speaker_embedding], dim=-1) style_embed = self.style_token_layer(enc_out) return style_embed class ReferenceEncoder(nn.Module): """NN module creating a fixed size prosody embedding from a spectrogram. inputs: mel spectrograms [batch_size, num_spec_frames, num_mel] outputs: [batch_size, embedding_dim] """ def __init__(self, num_mel, embedding_dim): super().__init__() self.num_mel = num_mel filters = [1] + [32, 32, 64, 64, 128, 128] num_layers = len(filters) - 1 convs = [ nn.Conv2d( in_channels=filters[i], out_channels=filters[i + 1], kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) for i in range(num_layers) ] self.convs = nn.ModuleList(convs) self.bns = nn.ModuleList([ nn.BatchNorm2d(num_features=filter_size) for filter_size in filters[1:] ]) post_conv_height = self.calculate_post_conv_height( num_mel, 3, 2, 1, num_layers) self.recurrence = nn.GRU( input_size=filters[-1] * post_conv_height, hidden_size=embedding_dim // 2, batch_first=True) def forward(self, inputs): batch_size = inputs.size(0) x = inputs.view(batch_size, 1, -1, self.num_mel) # x: 4D tensor [batch_size, num_channels==1, num_frames, num_mel] for conv, bn in zip(self.convs, self.bns): x = conv(x) x = bn(x) x = F.relu(x) x = x.transpose(1, 2) # x: 4D tensor [batch_size, post_conv_width, # num_channels==128, post_conv_height] post_conv_width = x.size(1) x = x.contiguous().view(batch_size, post_conv_width, -1) # x: 3D tensor [batch_size, post_conv_width, # num_channels*post_conv_height] self.recurrence.flatten_parameters() memory, out = self.recurrence(x) # out: 3D tensor [seq_len==1, batch_size, encoding_size=128] return out.squeeze(0) @staticmethod def calculate_post_conv_height(height, kernel_size, stride, pad, n_convs): """Height of spec after n convolutions with fixed kernel/stride/pad.""" for _ in range(n_convs): height = (height - kernel_size + 2 * pad) // stride + 1 return height class StyleTokenLayer(nn.Module): """NN Module attending to style tokens based on prosody encodings.""" def __init__(self, num_heads, num_style_tokens, embedding_dim, speaker_embedding_dim=None): super().__init__() self.query_dim = embedding_dim // 2 if speaker_embedding_dim: self.query_dim += speaker_embedding_dim self.key_dim = embedding_dim // num_heads self.style_tokens = nn.Parameter( torch.FloatTensor(num_style_tokens, self.key_dim)) nn.init.normal_(self.style_tokens, mean=0, std=0.5) self.attention = MultiHeadAttention( query_dim=self.query_dim, key_dim=self.key_dim, num_units=embedding_dim, num_heads=num_heads) def forward(self, inputs): batch_size = inputs.size(0) prosody_encoding = inputs.unsqueeze(1) # prosody_encoding: 3D tensor [batch_size, 1, encoding_size==128] tokens = torch.tanh(self.style_tokens) \ .unsqueeze(0) \ .expand(batch_size, -1, -1) # tokens: 3D tensor [batch_size, num tokens, token embedding size] style_embed = self.attention(prosody_encoding, tokens) return style_embed class MultiHeadAttention(nn.Module): ''' input: query --- [N, T_q, query_dim] key --- [N, T_k, key_dim] output: out --- [N, T_q, num_units] ''' def __init__(self, query_dim, key_dim, num_units, num_heads): super().__init__() self.num_units = num_units self.num_heads = num_heads self.key_dim = key_dim self.W_query = nn.Linear( in_features=query_dim, out_features=num_units, bias=False) self.W_key = nn.Linear( in_features=key_dim, out_features=num_units, bias=False) self.W_value = nn.Linear( in_features=key_dim, out_features=num_units, bias=False) def forward(self, query, key): queries = self.W_query(query) # [N, T_q, num_units] keys = self.W_key(key) # [N, T_k, num_units] values = self.W_value(key) split_size = self.num_units // self.num_heads queries = torch.stack( torch.split(queries, split_size, dim=2), dim=0) # [h, N, T_q, num_units/h] keys = torch.stack( torch.split(keys, split_size, dim=2), dim=0) # [h, N, T_k, num_units/h] values = torch.stack( torch.split(values, split_size, dim=2), dim=0) # [h, N, T_k, num_units/h] # score = softmax(QK^T / (d_k ** 0.5)) scores = torch.matmul(queries, keys.transpose(2, 3)) # [h, N, T_q, T_k] scores = scores / (self.key_dim**0.5) scores = F.softmax(scores, dim=3) # out = score * V out = torch.matmul(scores, values) # [h, N, T_q, num_units/h] out = torch.cat( torch.split(out, 1, dim=0), dim=3).squeeze(0) # [N, T_q, num_units] return out
6,268
34.619318
108
py
TTS
TTS-master/TTS/tts/layers/tacotron2.py
import torch from torch import nn from torch.nn import functional as F from .common_layers import Prenet, Linear from .attentions import init_attn # NOTE: linter has a problem with the current TF release #pylint: disable=no-value-for-parameter #pylint: disable=unexpected-keyword-arg class ConvBNBlock(nn.Module): r"""Convolutions with Batch Normalization and non-linear activation. Args: in_channels (int): number of input channels. out_channels (int): number of output channels. kernel_size (int): convolution kernel size. activation (str): 'relu', 'tanh', None (linear). Shapes: - input: (B, C_in, T) - output: (B, C_out, T) """ def __init__(self, in_channels, out_channels, kernel_size, activation=None): super(ConvBNBlock, self).__init__() assert (kernel_size - 1) % 2 == 0 padding = (kernel_size - 1) // 2 self.convolution1d = nn.Conv1d(in_channels, out_channels, kernel_size, padding=padding) self.batch_normalization = nn.BatchNorm1d(out_channels, momentum=0.1, eps=1e-5) self.dropout = nn.Dropout(p=0.5) if activation == 'relu': self.activation = nn.ReLU() elif activation == 'tanh': self.activation = nn.Tanh() else: self.activation = nn.Identity() def forward(self, x): o = self.convolution1d(x) o = self.batch_normalization(o) o = self.activation(o) o = self.dropout(o) return o class Postnet(nn.Module): r"""Tacotron2 Postnet Args: in_out_channels (int): number of output channels. Shapes: - input: (B, C_in, T) - output: (B, C_in, T) """ def __init__(self, in_out_channels, num_convs=5): super(Postnet, self).__init__() self.convolutions = nn.ModuleList() self.convolutions.append( ConvBNBlock(in_out_channels, 512, kernel_size=5, activation='tanh')) for _ in range(1, num_convs - 1): self.convolutions.append( ConvBNBlock(512, 512, kernel_size=5, activation='tanh')) self.convolutions.append( ConvBNBlock(512, in_out_channels, kernel_size=5, activation=None)) def forward(self, x): o = x for layer in self.convolutions: o = layer(o) return o class Encoder(nn.Module): r"""Tacotron2 Encoder Args: in_out_channels (int): number of input and output channels. Shapes: - input: (B, C_in, T) - output: (B, C_in, T) """ def __init__(self, in_out_channels=512): super(Encoder, self).__init__() self.convolutions = nn.ModuleList() for _ in range(3): self.convolutions.append( ConvBNBlock(in_out_channels, in_out_channels, 5, 'relu')) self.lstm = nn.LSTM(in_out_channels, int(in_out_channels / 2), num_layers=1, batch_first=True, bias=True, bidirectional=True) self.rnn_state = None def forward(self, x, input_lengths): o = x for layer in self.convolutions: o = layer(o) o = o.transpose(1, 2) o = nn.utils.rnn.pack_padded_sequence(o, input_lengths.cpu(), batch_first=True) self.lstm.flatten_parameters() o, _ = self.lstm(o) o, _ = nn.utils.rnn.pad_packed_sequence(o, batch_first=True) return o def inference(self, x): o = x for layer in self.convolutions: o = layer(o) o = o.transpose(1, 2) # self.lstm.flatten_parameters() o, _ = self.lstm(o) return o # adapted from https://github.com/NVIDIA/tacotron2/ class Decoder(nn.Module): """Tacotron2 decoder. We don't use Zoneout but Dropout between RNN layers. Args: in_channels (int): number of input channels. frame_channels (int): number of feature frame channels. r (int): number of outputs per time step (reduction rate). memory_size (int): size of the past window. if <= 0 memory_size = r attn_type (string): type of attention used in decoder. attn_win (bool): if true, define an attention window centered to maximum attention response. It provides more robust attention alignment especially at interence time. attn_norm (string): attention normalization function. 'sigmoid' or 'softmax'. prenet_type (string): 'original' or 'bn'. prenet_dropout (float): prenet dropout rate. forward_attn (bool): if true, use forward attention method. https://arxiv.org/abs/1807.06736 trans_agent (bool): if true, use transition agent. https://arxiv.org/abs/1807.06736 forward_attn_mask (bool): if true, mask attention values smaller than a threshold. location_attn (bool): if true, use location sensitive attention. attn_K (int): number of attention heads for GravesAttention. separate_stopnet (bool): if true, detach stopnet input to prevent gradient flow. """ # Pylint gets confused by PyTorch conventions here #pylint: disable=attribute-defined-outside-init def __init__(self, in_channels, frame_channels, r, attn_type, attn_win, attn_norm, prenet_type, prenet_dropout, forward_attn, trans_agent, forward_attn_mask, location_attn, attn_K, separate_stopnet): super(Decoder, self).__init__() self.frame_channels = frame_channels self.r_init = r self.r = r self.encoder_embedding_dim = in_channels self.separate_stopnet = separate_stopnet self.max_decoder_steps = 1000 self.stop_threshold = 0.5 # model dimensions self.query_dim = 1024 self.decoder_rnn_dim = 1024 self.prenet_dim = 256 self.attn_dim = 128 self.p_attention_dropout = 0.1 self.p_decoder_dropout = 0.1 # memory -> |Prenet| -> processed_memory prenet_dim = self.frame_channels self.prenet = Prenet(prenet_dim, prenet_type, prenet_dropout, out_features=[self.prenet_dim, self.prenet_dim], bias=False) self.attention_rnn = nn.LSTMCell(self.prenet_dim + in_channels, self.query_dim, bias=True) self.attention = init_attn(attn_type=attn_type, query_dim=self.query_dim, embedding_dim=in_channels, attention_dim=128, location_attention=location_attn, attention_location_n_filters=32, attention_location_kernel_size=31, windowing=attn_win, norm=attn_norm, forward_attn=forward_attn, trans_agent=trans_agent, forward_attn_mask=forward_attn_mask, attn_K=attn_K) self.decoder_rnn = nn.LSTMCell(self.query_dim + in_channels, self.decoder_rnn_dim, bias=True) self.linear_projection = Linear(self.decoder_rnn_dim + in_channels, self.frame_channels * self.r_init) self.stopnet = nn.Sequential( nn.Dropout(0.1), Linear(self.decoder_rnn_dim + self.frame_channels * self.r_init, 1, bias=True, init_gain='sigmoid')) self.memory_truncated = None def set_r(self, new_r): self.r = new_r def get_go_frame(self, inputs): B = inputs.size(0) memory = torch.zeros(1, device=inputs.device).repeat( B, self.frame_channels * self.r) return memory def _init_states(self, inputs, mask, keep_states=False): B = inputs.size(0) # T = inputs.size(1) if not keep_states: self.query = torch.zeros(1, device=inputs.device).repeat( B, self.query_dim) self.attention_rnn_cell_state = torch.zeros( 1, device=inputs.device).repeat(B, self.query_dim) self.decoder_hidden = torch.zeros(1, device=inputs.device).repeat( B, self.decoder_rnn_dim) self.decoder_cell = torch.zeros(1, device=inputs.device).repeat( B, self.decoder_rnn_dim) self.context = torch.zeros(1, device=inputs.device).repeat( B, self.encoder_embedding_dim) self.inputs = inputs self.processed_inputs = self.attention.preprocess_inputs(inputs) self.mask = mask def _reshape_memory(self, memory): """ Reshape the spectrograms for given 'r' """ # Grouping multiple frames if necessary if memory.size(-1) == self.frame_channels: memory = memory.view(memory.shape[0], memory.size(1) // self.r, -1) # Time first (T_decoder, B, frame_channels) memory = memory.transpose(0, 1) return memory def _parse_outputs(self, outputs, stop_tokens, alignments): alignments = torch.stack(alignments).transpose(0, 1) stop_tokens = torch.stack(stop_tokens).transpose(0, 1) outputs = torch.stack(outputs).transpose(0, 1).contiguous() outputs = outputs.view(outputs.size(0), -1, self.frame_channels) outputs = outputs.transpose(1, 2) return outputs, stop_tokens, alignments def _update_memory(self, memory): if len(memory.shape) == 2: return memory[:, self.frame_channels * (self.r - 1):] return memory[:, :, self.frame_channels * (self.r - 1):] def decode(self, memory): ''' shapes: - memory: B x r * self.frame_channels ''' # self.context: B x D_en # query_input: B x D_en + (r * self.frame_channels) query_input = torch.cat((memory, self.context), -1) # self.query and self.attention_rnn_cell_state : B x D_attn_rnn self.query, self.attention_rnn_cell_state = self.attention_rnn( query_input, (self.query, self.attention_rnn_cell_state)) self.query = F.dropout(self.query, self.p_attention_dropout, self.training) self.attention_rnn_cell_state = F.dropout( self.attention_rnn_cell_state, self.p_attention_dropout, self.training) # B x D_en self.context = self.attention(self.query, self.inputs, self.processed_inputs, self.mask) # B x (D_en + D_attn_rnn) decoder_rnn_input = torch.cat((self.query, self.context), -1) # self.decoder_hidden and self.decoder_cell: B x D_decoder_rnn self.decoder_hidden, self.decoder_cell = self.decoder_rnn( decoder_rnn_input, (self.decoder_hidden, self.decoder_cell)) self.decoder_hidden = F.dropout(self.decoder_hidden, self.p_decoder_dropout, self.training) # B x (D_decoder_rnn + D_en) decoder_hidden_context = torch.cat((self.decoder_hidden, self.context), dim=1) # B x (self.r * self.frame_channels) decoder_output = self.linear_projection(decoder_hidden_context) # B x (D_decoder_rnn + (self.r * self.frame_channels)) stopnet_input = torch.cat((self.decoder_hidden, decoder_output), dim=1) if self.separate_stopnet: stop_token = self.stopnet(stopnet_input.detach()) else: stop_token = self.stopnet(stopnet_input) # select outputs for the reduction rate self.r decoder_output = decoder_output[:, :self.r * self.frame_channels] return decoder_output, self.attention.attention_weights, stop_token def forward(self, inputs, memories, mask): r"""Train Decoder with teacher forcing. Args: inputs: Encoder outputs. memories: Feature frames for teacher-forcing. mask: Attention mask for sequence padding. Shapes: - inputs: (B, T, D_out_enc) - memory: (B, T_mel, D_mel) - outputs: (B, T_mel, D_mel) - alignments: (B, T_in, T_out) - stop_tokens: (B, T_out) """ memory = self.get_go_frame(inputs).unsqueeze(0) memories = self._reshape_memory(memories) memories = torch.cat((memory, memories), dim=0) memories = self._update_memory(memories) memories = self.prenet(memories) self._init_states(inputs, mask=mask) self.attention.init_states(inputs) outputs, stop_tokens, alignments = [], [], [] while len(outputs) < memories.size(0) - 1: memory = memories[len(outputs)] decoder_output, attention_weights, stop_token = self.decode(memory) outputs += [decoder_output.squeeze(1)] stop_tokens += [stop_token.squeeze(1)] alignments += [attention_weights] outputs, stop_tokens, alignments = self._parse_outputs( outputs, stop_tokens, alignments) return outputs, alignments, stop_tokens def inference(self, inputs): r"""Decoder inference without teacher forcing and use Stopnet to stop decoder. Args: inputs: Encoder outputs. Shapes: - inputs: (B, T, D_out_enc) - outputs: (B, T_mel, D_mel) - alignments: (B, T_in, T_out) - stop_tokens: (B, T_out) """ memory = self.get_go_frame(inputs) memory = self._update_memory(memory) self._init_states(inputs, mask=None) self.attention.init_states(inputs) outputs, stop_tokens, alignments, t = [], [], [], 0 while True: memory = self.prenet(memory) decoder_output, alignment, stop_token = self.decode(memory) stop_token = torch.sigmoid(stop_token.data) outputs += [decoder_output.squeeze(1)] stop_tokens += [stop_token] alignments += [alignment] if stop_token > self.stop_threshold and t > inputs.shape[0] // 2: break if len(outputs) == self.max_decoder_steps: print(" | > Decoder stopped with 'max_decoder_steps") break memory = self._update_memory(decoder_output) t += 1 outputs, stop_tokens, alignments = self._parse_outputs( outputs, stop_tokens, alignments) return outputs, alignments, stop_tokens def inference_truncated(self, inputs): """ Preserve decoder states for continuous inference """ if self.memory_truncated is None: self.memory_truncated = self.get_go_frame(inputs) self._init_states(inputs, mask=None, keep_states=False) else: self._init_states(inputs, mask=None, keep_states=True) self.attention.init_win_idx() self.attention.init_states(inputs) outputs, stop_tokens, alignments, t = [], [], [], 0 while True: memory = self.prenet(self.memory_truncated) decoder_output, alignment, stop_token = self.decode(memory) stop_token = torch.sigmoid(stop_token.data) outputs += [decoder_output.squeeze(1)] stop_tokens += [stop_token] alignments += [alignment] if stop_token > 0.7: break if len(outputs) == self.max_decoder_steps: print(" | > Decoder stopped with 'max_decoder_steps") break self.memory_truncated = decoder_output t += 1 outputs, stop_tokens, alignments = self._parse_outputs( outputs, stop_tokens, alignments) return outputs, alignments, stop_tokens def inference_step(self, inputs, t, memory=None): """ For debug purposes """ if t == 0: memory = self.get_go_frame(inputs) self._init_states(inputs, mask=None) memory = self.prenet(memory) decoder_output, stop_token, alignment = self.decode(memory) stop_token = torch.sigmoid(stop_token.data) memory = decoder_output return decoder_output, stop_token, alignment
16,992
38.983529
100
py
TTS
TTS-master/TTS/tts/layers/attentions.py
import torch from torch import nn from torch.nn import functional as F from TTS.tts.layers.common_layers import Linear from scipy.stats import betabinom class LocationLayer(nn.Module): """Layers for Location Sensitive Attention Args: attention_dim (int): number of channels in the input tensor. attention_n_filters (int, optional): number of filters in convolution. Defaults to 32. attention_kernel_size (int, optional): kernel size of convolution filter. Defaults to 31. """ def __init__(self, attention_dim, attention_n_filters=32, attention_kernel_size=31): super(LocationLayer, self).__init__() self.location_conv1d = nn.Conv1d( in_channels=2, out_channels=attention_n_filters, kernel_size=attention_kernel_size, stride=1, padding=(attention_kernel_size - 1) // 2, bias=False) self.location_dense = Linear( attention_n_filters, attention_dim, bias=False, init_gain='tanh') def forward(self, attention_cat): """ Shapes: attention_cat: [B, 2, C] """ processed_attention = self.location_conv1d(attention_cat) processed_attention = self.location_dense( processed_attention.transpose(1, 2)) return processed_attention class GravesAttention(nn.Module): """Graves Attention as is ref1 with updates from ref2. ref1: https://arxiv.org/abs/1910.10288 ref2: https://arxiv.org/pdf/1906.01083.pdf Args: query_dim (int): number of channels in query tensor. K (int): number of Gaussian heads to be used for computing attention. """ COEF = 0.3989422917366028 # numpy.sqrt(1/(2*numpy.pi)) def __init__(self, query_dim, K): super(GravesAttention, self).__init__() self._mask_value = 1e-8 self.K = K # self.attention_alignment = 0.05 self.eps = 1e-5 self.J = None self.N_a = nn.Sequential( nn.Linear(query_dim, query_dim, bias=True), nn.ReLU(), nn.Linear(query_dim, 3*K, bias=True)) self.attention_weights = None self.mu_prev = None self.init_layers() def init_layers(self): torch.nn.init.constant_(self.N_a[2].bias[(2*self.K):(3*self.K)], 1.) # bias mean torch.nn.init.constant_(self.N_a[2].bias[self.K:(2*self.K)], 10) # bias std def init_states(self, inputs): if self.J is None or inputs.shape[1]+1 > self.J.shape[-1]: self.J = torch.arange(0, inputs.shape[1]+2.0).to(inputs.device) + 0.5 self.attention_weights = torch.zeros(inputs.shape[0], inputs.shape[1]).to(inputs.device) self.mu_prev = torch.zeros(inputs.shape[0], self.K).to(inputs.device) # pylint: disable=R0201 # pylint: disable=unused-argument def preprocess_inputs(self, inputs): return None def forward(self, query, inputs, processed_inputs, mask): """ Shapes: query: [B, C_attention_rnn] inputs: [B, T_in, C_encoder] processed_inputs: place_holder mask: [B, T_in] """ gbk_t = self.N_a(query) gbk_t = gbk_t.view(gbk_t.size(0), -1, self.K) # attention model parameters # each B x K g_t = gbk_t[:, 0, :] b_t = gbk_t[:, 1, :] k_t = gbk_t[:, 2, :] # dropout to decorrelate attention heads g_t = torch.nn.functional.dropout(g_t, p=0.5, training=self.training) # attention GMM parameters sig_t = torch.nn.functional.softplus(b_t) + self.eps mu_t = self.mu_prev + torch.nn.functional.softplus(k_t) g_t = torch.softmax(g_t, dim=-1) + self.eps j = self.J[:inputs.size(1)+1] # attention weights phi_t = g_t.unsqueeze(-1) * (1 / (1 + torch.sigmoid((mu_t.unsqueeze(-1) - j) / sig_t.unsqueeze(-1)))) # discritize attention weights alpha_t = torch.sum(phi_t, 1) alpha_t = alpha_t[:, 1:] - alpha_t[:, :-1] alpha_t[alpha_t == 0] = 1e-8 # apply masking if mask is not None: alpha_t.data.masked_fill_(~mask, self._mask_value) context = torch.bmm(alpha_t.unsqueeze(1), inputs).squeeze(1) self.attention_weights = alpha_t self.mu_prev = mu_t return context class OriginalAttention(nn.Module): """Bahdanau Attention with various optional modifications. Proposed below. - Location sensitive attnetion: https://arxiv.org/abs/1712.05884 - Forward Attention: https://arxiv.org/abs/1807.06736 + state masking at inference - Using sigmoid instead of softmax normalization - Attention windowing at inference time Note: Location Sensitive Attention is an attention mechanism that extends the additive attention mechanism to use cumulative attention weights from previous decoder time steps as an additional feature. Forward attention considers only the alignment paths that satisfy the monotonic condition at each decoder timestep. The modified attention probabilities at each timestep are computed recursively using a forward algorithm. Transition agent for forward attention is further proposed, which helps the attention mechanism to make decisions whether to move forward or stay at each decoder timestep. Attention windowing applies a sliding windows to time steps of the input tensor centering at the last time step with the largest attention weight. It is especially useful at inference to keep the attention alignment diagonal. Args: query_dim (int): number of channels in the query tensor. embedding_dim (int): number of channels in the vakue tensor. In general, the value tensor is the output of the encoder layer. attention_dim (int): number of channels of the inner attention layers. location_attention (bool): enable/disable location sensitive attention. attention_location_n_filters (int): number of location attention filters. attention_location_kernel_size (int): filter size of location attention convolution layer. windowing (int): window size for attention windowing. if it is 5, for computing the attention, it only considers the time steps [(t-5), ..., (t+5)] of the input. norm (str): normalization method applied to the attention weights. 'softmax' or 'sigmoid' forward_attn (bool): enable/disable forward attention. trans_agent (bool): enable/disable transition agent in the forward attention. forward_attn_mask (int): enable/disable an explicit masking in forward attention. It is useful to set at especially inference time. """ # Pylint gets confused by PyTorch conventions here #pylint: disable=attribute-defined-outside-init def __init__(self, query_dim, embedding_dim, attention_dim, location_attention, attention_location_n_filters, attention_location_kernel_size, windowing, norm, forward_attn, trans_agent, forward_attn_mask): super(OriginalAttention, self).__init__() self.query_layer = Linear( query_dim, attention_dim, bias=False, init_gain='tanh') self.inputs_layer = Linear( embedding_dim, attention_dim, bias=False, init_gain='tanh') self.v = Linear(attention_dim, 1, bias=True) if trans_agent: self.ta = nn.Linear( query_dim + embedding_dim, 1, bias=True) if location_attention: self.location_layer = LocationLayer( attention_dim, attention_location_n_filters, attention_location_kernel_size, ) self._mask_value = -float("inf") self.windowing = windowing self.win_idx = None self.norm = norm self.forward_attn = forward_attn self.trans_agent = trans_agent self.forward_attn_mask = forward_attn_mask self.location_attention = location_attention def init_win_idx(self): self.win_idx = -1 self.win_back = 2 self.win_front = 6 def init_forward_attn(self, inputs): B = inputs.shape[0] T = inputs.shape[1] self.alpha = torch.cat( [torch.ones([B, 1]), torch.zeros([B, T])[:, :-1] + 1e-7], dim=1).to(inputs.device) self.u = (0.5 * torch.ones([B, 1])).to(inputs.device) def init_location_attention(self, inputs): B = inputs.size(0) T = inputs.size(1) self.attention_weights_cum = torch.zeros([B, T], device=inputs.device) def init_states(self, inputs): B = inputs.size(0) T = inputs.size(1) self.attention_weights = torch.zeros([B, T], device=inputs.device) if self.location_attention: self.init_location_attention(inputs) if self.forward_attn: self.init_forward_attn(inputs) if self.windowing: self.init_win_idx() def preprocess_inputs(self, inputs): return self.inputs_layer(inputs) def update_location_attention(self, alignments): self.attention_weights_cum += alignments def get_location_attention(self, query, processed_inputs): attention_cat = torch.cat((self.attention_weights.unsqueeze(1), self.attention_weights_cum.unsqueeze(1)), dim=1) processed_query = self.query_layer(query.unsqueeze(1)) processed_attention_weights = self.location_layer(attention_cat) energies = self.v( torch.tanh(processed_query + processed_attention_weights + processed_inputs)) energies = energies.squeeze(-1) return energies, processed_query def get_attention(self, query, processed_inputs): processed_query = self.query_layer(query.unsqueeze(1)) energies = self.v(torch.tanh(processed_query + processed_inputs)) energies = energies.squeeze(-1) return energies, processed_query def apply_windowing(self, attention, inputs): back_win = self.win_idx - self.win_back front_win = self.win_idx + self.win_front if back_win > 0: attention[:, :back_win] = -float("inf") if front_win < inputs.shape[1]: attention[:, front_win:] = -float("inf") # this is a trick to solve a special problem. # but it does not hurt. if self.win_idx == -1: attention[:, 0] = attention.max() # Update the window self.win_idx = torch.argmax(attention, 1).long()[0].item() return attention def apply_forward_attention(self, alignment): # forward attention fwd_shifted_alpha = F.pad( self.alpha[:, :-1].clone().to(alignment.device), (1, 0, 0, 0)) # compute transition potentials alpha = ((1 - self.u) * self.alpha + self.u * fwd_shifted_alpha + 1e-8) * alignment # force incremental alignment if not self.training and self.forward_attn_mask: _, n = fwd_shifted_alpha.max(1) val, _ = alpha.max(1) for b in range(alignment.shape[0]): alpha[b, n[b] + 3:] = 0 alpha[b, :( n[b] - 1 )] = 0 # ignore all previous states to prevent repetition. alpha[b, (n[b] - 2 )] = 0.01 * val[b] # smoothing factor for the prev step # renormalize attention weights alpha = alpha / alpha.sum(dim=1, keepdim=True) return alpha def forward(self, query, inputs, processed_inputs, mask): """ shapes: query: [B, C_attn_rnn] inputs: [B, T_en, D_en] processed_inputs: [B, T_en, D_attn] mask: [B, T_en] """ if self.location_attention: attention, _ = self.get_location_attention( query, processed_inputs) else: attention, _ = self.get_attention( query, processed_inputs) # apply masking if mask is not None: attention.data.masked_fill_(~mask, self._mask_value) # apply windowing - only in eval mode if not self.training and self.windowing: attention = self.apply_windowing(attention, inputs) # normalize attention values if self.norm == "softmax": alignment = torch.softmax(attention, dim=-1) elif self.norm == "sigmoid": alignment = torch.sigmoid(attention) / torch.sigmoid( attention).sum( dim=1, keepdim=True) else: raise ValueError("Unknown value for attention norm type") if self.location_attention: self.update_location_attention(alignment) # apply forward attention if enabled if self.forward_attn: alignment = self.apply_forward_attention(alignment) self.alpha = alignment context = torch.bmm(alignment.unsqueeze(1), inputs) context = context.squeeze(1) self.attention_weights = alignment # compute transition agent if self.forward_attn and self.trans_agent: ta_input = torch.cat([context, query.squeeze(1)], dim=-1) self.u = torch.sigmoid(self.ta(ta_input)) return context class MonotonicDynamicConvolutionAttention(nn.Module): """Dynamic convolution attention from https://arxiv.org/pdf/1910.10288.pdf query -> linear -> tanh -> linear ->| | mask values v | | atten_w(t-1) -|-> conv1d_dynamic -> linear -|-> tanh -> + -> softmax -> * -> * -> context |-> conv1d_static -> linear -| | |-> conv1d_prior -> log ----------------| query: attention rnn output. Note: Dynamic convolution attention is an alternation of the location senstive attention with dynamically computed convolution filters from the previous attention scores and a set of constraints to keep the attention alignment diagonal. Args: query_dim (int): number of channels in the query tensor. embedding_dim (int): number of channels in the value tensor. static_filter_dim (int): number of channels in the convolution layer computing the static filters. static_kernel_size (int): kernel size for the convolution layer computing the static filters. dynamic_filter_dim (int): number of channels in the convolution layer computing the dynamic filters. dynamic_kernel_size (int): kernel size for the convolution layer computing the dynamic filters. prior_filter_len (int, optional): [description]. Defaults to 11 from the paper. alpha (float, optional): [description]. Defaults to 0.1 from the paper. beta (float, optional): [description]. Defaults to 0.9 from the paper. """ def __init__( self, query_dim, embedding_dim, # pylint: disable=unused-argument attention_dim, static_filter_dim, static_kernel_size, dynamic_filter_dim, dynamic_kernel_size, prior_filter_len=11, alpha=0.1, beta=0.9, ): super().__init__() self._mask_value = 1e-8 self.dynamic_filter_dim = dynamic_filter_dim self.dynamic_kernel_size = dynamic_kernel_size self.prior_filter_len = prior_filter_len self.attention_weights = None # setup key and query layers self.query_layer = nn.Linear(query_dim, attention_dim) self.key_layer = nn.Linear( attention_dim, dynamic_filter_dim * dynamic_kernel_size, bias=False ) self.static_filter_conv = nn.Conv1d( 1, static_filter_dim, static_kernel_size, padding=(static_kernel_size - 1) // 2, bias=False, ) self.static_filter_layer = nn.Linear(static_filter_dim, attention_dim, bias=False) self.dynamic_filter_layer = nn.Linear(dynamic_filter_dim, attention_dim) self.v = nn.Linear(attention_dim, 1, bias=False) prior = betabinom.pmf(range(prior_filter_len), prior_filter_len - 1, alpha, beta) self.register_buffer("prior", torch.FloatTensor(prior).flip(0)) # pylint: disable=unused-argument def forward(self, query, inputs, processed_inputs, mask): """ query: [B, C_attn_rnn] inputs: [B, T_en, D_en] processed_inputs: place holder. mask: [B, T_en] """ # compute prior filters prior_filter = F.conv1d( F.pad(self.attention_weights.unsqueeze(1), (self.prior_filter_len - 1, 0)), self.prior.view(1, 1, -1)) prior_filter = torch.log(prior_filter.clamp_min_(1e-6)).squeeze(1) G = self.key_layer(torch.tanh(self.query_layer(query))) # compute dynamic filters dynamic_filter = F.conv1d( self.attention_weights.unsqueeze(0), G.view(-1, 1, self.dynamic_kernel_size), padding=(self.dynamic_kernel_size - 1) // 2, groups=query.size(0), ) dynamic_filter = dynamic_filter.view(query.size(0), self.dynamic_filter_dim, -1).transpose(1, 2) # compute static filters static_filter = self.static_filter_conv(self.attention_weights.unsqueeze(1)).transpose(1, 2) alignment = self.v( torch.tanh( self.static_filter_layer(static_filter) + self.dynamic_filter_layer(dynamic_filter))).squeeze(-1) + prior_filter # compute attention weights attention_weights = F.softmax(alignment, dim=-1) # apply masking if mask is not None: attention_weights.data.masked_fill_(~mask, self._mask_value) self.attention_weights = attention_weights # compute context context = torch.bmm(attention_weights.unsqueeze(1), inputs).squeeze(1) return context def preprocess_inputs(self, inputs): # pylint: disable=no-self-use return None def init_states(self, inputs): B = inputs.size(0) T = inputs.size(1) self.attention_weights = torch.zeros([B, T], device=inputs.device) self.attention_weights[:, 0] = 1. def init_attn(attn_type, query_dim, embedding_dim, attention_dim, location_attention, attention_location_n_filters, attention_location_kernel_size, windowing, norm, forward_attn, trans_agent, forward_attn_mask, attn_K): if attn_type == "original": return OriginalAttention(query_dim, embedding_dim, attention_dim, location_attention, attention_location_n_filters, attention_location_kernel_size, windowing, norm, forward_attn, trans_agent, forward_attn_mask) if attn_type == "graves": return GravesAttention(query_dim, attn_K) if attn_type == "dynamic_convolution": return MonotonicDynamicConvolutionAttention(query_dim, embedding_dim, attention_dim, static_filter_dim=8, static_kernel_size=21, dynamic_filter_dim=8, dynamic_kernel_size=21, prior_filter_len=11, alpha=0.1, beta=0.9) raise RuntimeError( " [!] Given Attention Type '{attn_type}' is not exist.")
20,308
41.047619
169
py
TTS
TTS-master/TTS/tts/layers/speedy_speech/encoder.py
import math import torch from torch import nn from TTS.tts.layers.glow_tts.transformer import RelativePositionTransformer from TTS.tts.layers.generic.res_conv_bn import ResidualConv1dBNBlock class PositionalEncoding(nn.Module): """Sinusoidal positional encoding for non-recurrent neural networks. Implementation based on "Attention Is All You Need" Args: channels (int): embedding size dropout (float): dropout parameter """ def __init__(self, channels, dropout=0.0, max_len=5000): super().__init__() if channels % 2 != 0: raise ValueError( "Cannot use sin/cos positional encoding with " "odd channels (got channels={:d})".format(channels)) pe = torch.zeros(max_len, channels) position = torch.arange(0, max_len).unsqueeze(1) div_term = torch.exp((torch.arange(0, channels, 2, dtype=torch.float) * -(math.log(10000.0) / channels))) pe[:, 0::2] = torch.sin(position.float() * div_term) pe[:, 1::2] = torch.cos(position.float() * div_term) pe = pe.unsqueeze(0).transpose(1, 2) self.register_buffer('pe', pe) if dropout > 0: self.dropout = nn.Dropout(p=dropout) self.channels = channels def forward(self, x, mask=None, first_idx=None, last_idx=None): """ Shapes: x: [B, C, T] mask: [B, 1, T] first_idx: int last_idx: int """ x = x * math.sqrt(self.channels) if first_idx is None: if self.pe.size(2) < x.size(2): raise RuntimeError( f"Sequence is {x.size(2)} but PositionalEncoding is" f" limited to {self.pe.size(2)}. See max_len argument.") if mask is not None: pos_enc = (self.pe[:, :, :x.size(2)] * mask) else: pos_enc = self.pe[:, :, :x.size(2)] x = x + pos_enc else: x = x + self.pe[:, :, first_idx:last_idx] if hasattr(self, 'dropout'): x = self.dropout(x) return x class RelativePositionTransformerEncoder(nn.Module): """Speedy speech encoder built on Transformer with Relative Position encoding. TODO: Integrate speaker conditioning vector. Args: in_channels (int): number of input channels. out_channels (int): number of output channels. hidden_channels (int): number of hidden channels params (dict): dictionary for residual convolutional blocks. """ def __init__(self, in_channels, out_channels, hidden_channels, params): super().__init__() self.prenet = ResidualConv1dBNBlock(in_channels, hidden_channels, hidden_channels, kernel_size=5, num_res_blocks=3, num_conv_blocks=1, dilations=[1, 1, 1] ) self.rel_pos_transformer = RelativePositionTransformer( hidden_channels, out_channels, hidden_channels, **params) def forward(self, x, x_mask=None, g=None): # pylint: disable=unused-argument if x_mask is None: x_mask = 1 o = self.prenet(x) * x_mask o = self.rel_pos_transformer(o, x_mask) return o class ResidualConv1dBNEncoder(nn.Module): """Residual Convolutional Encoder as in the original Speedy Speech paper TODO: Integrate speaker conditioning vector. Args: in_channels (int): number of input channels. out_channels (int): number of output channels. hidden_channels (int): number of hidden channels params (dict): dictionary for residual convolutional blocks. """ def __init__(self, in_channels, out_channels, hidden_channels, params): super().__init__() self.prenet = nn.Sequential( nn.Conv1d(in_channels, hidden_channels, 1), nn.ReLU()) self.res_conv_block = ResidualConv1dBNBlock(hidden_channels, hidden_channels, hidden_channels, **params) self.postnet = nn.Sequential(*[ nn.Conv1d(hidden_channels, hidden_channels, 1), nn.ReLU(), nn.BatchNorm1d(hidden_channels), nn.Conv1d(hidden_channels, out_channels, 1) ]) def forward(self, x, x_mask=None, g=None): # pylint: disable=unused-argument if x_mask is None: x_mask = 1 o = self.prenet(x) * x_mask o = self.res_conv_block(o, x_mask) o = self.postnet(o + x) * x_mask return o * x_mask class Encoder(nn.Module): # pylint: disable=dangerous-default-value """Factory class for Speedy Speech encoder enables different encoder types internally. Args: num_chars (int): number of characters. out_channels (int): number of output channels. in_hidden_channels (int): input and hidden channels. Model keeps the input channels for the intermediate layers. encoder_type (str): encoder layer types. 'transformers' or 'residual_conv_bn'. Default 'residual_conv_bn'. encoder_params (dict): model parameters for specified encoder type. c_in_channels (int): number of channels for conditional input. Note: Default encoder_params... for 'transformer' encoder_params={ 'hidden_channels_ffn': 128, 'num_heads': 2, "kernel_size": 3, "dropout_p": 0.1, "num_layers": 6, "rel_attn_window_size": 4, "input_length": None }, for 'residual_conv_bn' encoder_params = { "kernel_size": 4, "dilations": 4 * [1, 2, 4] + [1], "num_conv_blocks": 2, "num_res_blocks": 13 } """ def __init__( self, in_hidden_channels, out_channels, encoder_type='residual_conv_bn', encoder_params={ "kernel_size": 4, "dilations": 4 * [1, 2, 4] + [1], "num_conv_blocks": 2, "num_res_blocks": 13 }, c_in_channels=0): super().__init__() self.out_channels = out_channels self.in_channels = in_hidden_channels self.hidden_channels = in_hidden_channels self.encoder_type = encoder_type self.c_in_channels = c_in_channels # init encoder if encoder_type.lower() == "transformer": # text encoder self.encoder = RelativePositionTransformerEncoder(in_hidden_channels, out_channels, in_hidden_channels, encoder_params) # pylint: disable=unexpected-keyword-arg elif encoder_type.lower() == 'residual_conv_bn': self.encoder = ResidualConv1dBNEncoder(in_hidden_channels, out_channels, in_hidden_channels, encoder_params) else: raise NotImplementedError(' [!] unknown encoder type.') # final projection layers def forward(self, x, x_mask, g=None): # pylint: disable=unused-argument """ Shapes: x: [B, C, T] x_mask: [B, 1, T] g: [B, C, 1] """ o = self.encoder(x, x_mask) return o * x_mask
7,871
36.485714
120
py
TTS
TTS-master/TTS/tts/layers/speedy_speech/duration_predictor.py
from torch import nn from TTS.tts.layers.generic.res_conv_bn import Conv1dBN class DurationPredictor(nn.Module): """Speedy Speech duration predictor model. Predicts phoneme durations from encoder outputs. Note: Outputs interpreted as log(durations) To get actual durations, do exp transformation conv_BN_4x1 -> conv_BN_3x1 -> conv_BN_1x1 -> conv_1x1 Args: hidden_channels (int): number of channels in the inner layers. """ def __init__(self, hidden_channels): super().__init__() self.layers = nn.ModuleList([ Conv1dBN(hidden_channels, hidden_channels, 4, 1), Conv1dBN(hidden_channels, hidden_channels, 3, 1), Conv1dBN(hidden_channels, hidden_channels, 1, 1), nn.Conv1d(hidden_channels, 1, 1) ]) def forward(self, x, x_mask): """ Shapes: x: [B, C, T] x_mask: [B, 1, T] """ o = x for layer in self.layers: o = layer(o) * x_mask return o
1,056
25.425
70
py
TTS
TTS-master/TTS/tts/layers/speedy_speech/decoder.py
import torch from torch import nn from TTS.tts.layers.generic.res_conv_bn import Conv1dBNBlock, ResidualConv1dBNBlock, Conv1dBN from TTS.tts.layers.generic.wavenet import WNBlocks from TTS.tts.layers.glow_tts.transformer import RelativePositionTransformer class WaveNetDecoder(nn.Module): """WaveNet based decoder with a prenet and a postnet. prenet: conv1d_1x1 postnet: 3 x [conv1d_1x1 -> relu] -> conv1d_1x1 TODO: Integrate speaker conditioning vector. Note: default wavenet parameters; params = { "num_blocks": 12, "hidden_channels":192, "kernel_size": 5, "dilation_rate": 1, "num_layers": 4, "dropout_p": 0.05 } Args: in_channels (int): number of input channels. out_channels (int): number of output channels. hidden_channels (int): number of hidden channels for prenet and postnet. params (dict): dictionary for residual convolutional blocks. """ def __init__(self, in_channels, out_channels, hidden_channels, c_in_channels, params): super().__init__() # prenet self.prenet = torch.nn.Conv1d(in_channels, params['hidden_channels'], 1) # wavenet layers self.wn = WNBlocks(params['hidden_channels'], c_in_channels=c_in_channels, **params) # postnet self.postnet = [ torch.nn.Conv1d(params['hidden_channels'], hidden_channels, 1), torch.nn.ReLU(), torch.nn.Conv1d(hidden_channels, hidden_channels, 1), torch.nn.ReLU(), torch.nn.Conv1d(hidden_channels, hidden_channels, 1), torch.nn.ReLU(), torch.nn.Conv1d(hidden_channels, out_channels, 1), ] self.postnet = nn.Sequential(*self.postnet) def forward(self, x, x_mask=None, g=None): x = self.prenet(x) * x_mask x = self.wn(x, x_mask, g) o = self.postnet(x) * x_mask return o class RelativePositionTransformerDecoder(nn.Module): """Decoder with Relative Positional Transformer. Note: Default params params={ 'hidden_channels_ffn': 128, 'num_heads': 2, "kernel_size": 3, "dropout_p": 0.1, "num_layers": 8, "rel_attn_window_size": 4, "input_length": None } Args: in_channels (int): number of input channels. out_channels (int): number of output channels. hidden_channels (int): number of hidden channels including Transformer layers. params (dict): dictionary for residual convolutional blocks. """ def __init__(self, in_channels, out_channels, hidden_channels, params): super().__init__() self.prenet = Conv1dBN(in_channels, hidden_channels, 1, 1) self.rel_pos_transformer = RelativePositionTransformer( in_channels, out_channels, hidden_channels, **params) def forward(self, x, x_mask=None, g=None): # pylint: disable=unused-argument o = self.prenet(x) * x_mask o = self.rel_pos_transformer(o, x_mask) return o class ResidualConv1dBNDecoder(nn.Module): """Residual Convolutional Decoder as in the original Speedy Speech paper TODO: Integrate speaker conditioning vector. Note: Default params params = { "kernel_size": 4, "dilations": 4 * [1, 2, 4, 8] + [1], "num_conv_blocks": 2, "num_res_blocks": 17 } Args: in_channels (int): number of input channels. out_channels (int): number of output channels. hidden_channels (int): number of hidden channels including ResidualConv1dBNBlock layers. params (dict): dictionary for residual convolutional blocks. """ def __init__(self, in_channels, out_channels, hidden_channels, params): super().__init__() self.res_conv_block = ResidualConv1dBNBlock(in_channels, hidden_channels, hidden_channels, **params) self.post_conv = nn.Conv1d(hidden_channels, hidden_channels, 1) self.postnet = nn.Sequential( Conv1dBNBlock(hidden_channels, hidden_channels, hidden_channels, params['kernel_size'], 1, num_conv_blocks=2), nn.Conv1d(hidden_channels, out_channels, 1), ) def forward(self, x, x_mask=None, g=None): # pylint: disable=unused-argument o = self.res_conv_block(x, x_mask) o = self.post_conv(o) + x return self.postnet(o) * x_mask class Decoder(nn.Module): """Decodes the expanded phoneme encoding into spectrograms Args: out_channels (int): number of output channels. in_hidden_channels (int): input and hidden channels. Model keeps the input channels for the intermediate layers. decoder_type (str): decoder layer types. 'transformers' or 'residual_conv_bn'. Default 'residual_conv_bn'. decoder_params (dict): model parameters for specified decoder type. c_in_channels (int): number of channels for conditional input. Shapes: - input: (B, C, T) """ # pylint: disable=dangerous-default-value def __init__( self, out_channels, in_hidden_channels, decoder_type='residual_conv_bn', decoder_params={ "kernel_size": 4, "dilations": 4 * [1, 2, 4, 8] + [1], "num_conv_blocks": 2, "num_res_blocks": 17 }, c_in_channels=0): super().__init__() if decoder_type == 'transformer': self.decoder = RelativePositionTransformerDecoder( in_channels=in_hidden_channels, out_channels=out_channels, hidden_channels=in_hidden_channels, params=decoder_params) elif decoder_type == 'residual_conv_bn': self.decoder = ResidualConv1dBNDecoder( in_channels=in_hidden_channels, out_channels=out_channels, hidden_channels=in_hidden_channels, params=decoder_params) elif decoder_type == 'wavenet': self.decoder = WaveNetDecoder(in_channels=in_hidden_channels, out_channels=out_channels, hidden_channels=in_hidden_channels, c_in_channels=c_in_channels, params=decoder_params) else: raise ValueError(f'[!] Unknown decoder type - {decoder_type}') def forward(self, x, x_mask, g=None): # pylint: disable=unused-argument """ Args: x: [B, C, T] x_mask: [B, 1, T] g: [B, C_g, 1] """ # TODO: implement multi-speaker o = self.decoder(x, x_mask, g) return o
7,226
36.640625
120
py
TTS
TTS-master/TTS/tts/layers/generic/gated_conv.py
from torch import nn from .normalization import LayerNorm class GatedConvBlock(nn.Module): """Gated convolutional block as in https://arxiv.org/pdf/1612.08083.pdf Args: in_out_channels (int): number of input/output channels. kernel_size (int): convolution kernel size. dropout_p (float): dropout rate. """ def __init__(self, in_out_channels, kernel_size, dropout_p, num_layers): super().__init__() # class arguments self.dropout_p = dropout_p self.num_layers = num_layers # define layers self.conv_layers = nn.ModuleList() self.norm_layers = nn.ModuleList() self.layers = nn.ModuleList() for _ in range(num_layers): self.conv_layers += [ nn.Conv1d(in_out_channels, 2 * in_out_channels, kernel_size, padding=kernel_size // 2) ] self.norm_layers += [LayerNorm(2 * in_out_channels)] def forward(self, x, x_mask): o = x res = x for idx in range(self.num_layers): o = nn.functional.dropout(o, p=self.dropout_p, training=self.training) o = self.conv_layers[idx](o * x_mask) o = self.norm_layers[idx](o) o = nn.functional.glu(o, dim=1) o = res + o res = o return o
1,487
33.604651
76
py
TTS
TTS-master/TTS/tts/layers/generic/res_conv_bn.py
from torch import nn class ZeroTemporalPad(nn.Module): """Pad sequences to equal lentgh in the temporal dimension""" def __init__(self, kernel_size, dilation): super().__init__() total_pad = (dilation * (kernel_size - 1)) begin = total_pad // 2 end = total_pad - begin self.pad_layer = nn.ZeroPad2d((0, 0, begin, end)) def forward(self, x): return self.pad_layer(x) class Conv1dBN(nn.Module): """1d convolutional with batch norm. conv1d -> relu -> BN blocks. Note: Batch normalization is applied after ReLU regarding the original implementation. Args: in_channels (int): number of input channels. out_channels (int): number of output channels. kernel_size (int): kernel size for convolutional filters. dilation (int): dilation for convolution layers. """ def __init__(self, in_channels, out_channels, kernel_size, dilation): super().__init__() padding = (dilation * (kernel_size - 1)) pad_s = padding // 2 pad_e = padding - pad_s self.conv1d = nn.Conv1d(in_channels, out_channels, kernel_size, dilation=dilation) self.pad = nn.ZeroPad2d((pad_s, pad_e, 0, 0)) # uneven left and right padding self.norm = nn.BatchNorm1d(out_channels) def forward(self, x): o = self.conv1d(x) o = self.pad(o) o = nn.functional.relu(o) o = self.norm(o) return o class Conv1dBNBlock(nn.Module): """1d convolutional block with batch norm. It is a set of conv1d -> relu -> BN blocks. Args: in_channels (int): number of input channels. out_channels (int): number of output channels. hidden_channels (int): number of inner convolution channels. kernel_size (int): kernel size for convolutional filters. dilation (int): dilation for convolution layers. num_conv_blocks (int, optional): number of convolutional blocks. Defaults to 2. """ def __init__(self, in_channels, out_channels, hidden_channels, kernel_size, dilation, num_conv_blocks=2): super().__init__() self.conv_bn_blocks = [] for idx in range(num_conv_blocks): layer = Conv1dBN(in_channels if idx == 0 else hidden_channels, out_channels if idx == (num_conv_blocks - 1) else hidden_channels, kernel_size, dilation) self.conv_bn_blocks.append(layer) self.conv_bn_blocks = nn.Sequential(*self.conv_bn_blocks) def forward(self, x): """ Shapes: x: (B, D, T) """ return self.conv_bn_blocks(x) class ResidualConv1dBNBlock(nn.Module): """Residual Convolutional Blocks with BN Each block has 'num_conv_block' conv layers and 'num_res_blocks' such blocks are connected with residual connections. conv_block = (conv1d -> relu -> bn) x 'num_conv_blocks' residuak_conv_block = (x -> conv_block -> + ->) x 'num_res_blocks' ' - - - - - - - - - ^ Args: in_channels (int): number of input channels. out_channels (int): number of output channels. hidden_channels (int): number of inner convolution channels. kernel_size (int): kernel size for convolutional filters. dilations (list): dilations for each convolution layer. num_res_blocks (int, optional): number of residual blocks. Defaults to 13. num_conv_blocks (int, optional): number of convolutional blocks in each residual block. Defaults to 2. """ def __init__(self, in_channels, out_channels, hidden_channels, kernel_size, dilations, num_res_blocks=13, num_conv_blocks=2): super().__init__() assert len(dilations) == num_res_blocks self.res_blocks = nn.ModuleList() for idx, dilation in enumerate(dilations): block = Conv1dBNBlock(in_channels if idx==0 else hidden_channels, out_channels if (idx + 1) == len(dilations) else hidden_channels, hidden_channels, kernel_size, dilation, num_conv_blocks) self.res_blocks.append(block) def forward(self, x, x_mask=None): if x_mask is None: x_mask = 1.0 o = x * x_mask for block in self.res_blocks: res = o o = block(o) o = o + res if x_mask is not None: o = o * x_mask return o
4,646
38.05042
129
py
TTS
TTS-master/TTS/tts/layers/generic/wavenet.py
import torch from torch import nn @torch.jit.script def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): n_channels_int = n_channels[0] in_act = input_a + input_b t_act = torch.tanh(in_act[:, :n_channels_int, :]) s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) acts = t_act * s_act return acts class WN(torch.nn.Module): """Wavenet layers with weight norm and no input conditioning. |-----------------------------------------------------------------------------| | |-> tanh -| | res -|- conv1d(dilation) -> dropout -> + -| * -> conv1d1x1 -> split -|- + -> res g -------------------------------------| |-> sigmoid -| | o --------------------------------------------------------------------------- + --------- o Args: in_channels (int): number of input channels. hidden_channes (int): number of hidden channels. kernel_size (int): filter kernel size for the first conv layer. dilation_rate (int): dilations rate to increase dilation per layer. If it is 2, dilations are 1, 2, 4, 8 for the next 4 layers. num_layers (int): number of wavenet layers. c_in_channels (int): number of channels of conditioning input. dropout_p (float): dropout rate. weight_norm (bool): enable/disable weight norm for convolution layers. """ def __init__(self, in_channels, hidden_channels, kernel_size, dilation_rate, num_layers, c_in_channels=0, dropout_p=0, weight_norm=True): super().__init__() assert kernel_size % 2 == 1 assert hidden_channels % 2 == 0 self.in_channels = in_channels self.hidden_channels = hidden_channels self.kernel_size = kernel_size self.dilation_rate = dilation_rate self.num_layers = num_layers self.c_in_channels = c_in_channels self.dropout_p = dropout_p self.in_layers = torch.nn.ModuleList() self.res_skip_layers = torch.nn.ModuleList() self.dropout = nn.Dropout(dropout_p) # init conditioning layer if c_in_channels > 0: cond_layer = torch.nn.Conv1d(c_in_channels, 2 * hidden_channels * num_layers, 1) self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') # intermediate layers for i in range(num_layers): dilation = dilation_rate**i padding = int((kernel_size * dilation - dilation) / 2) in_layer = torch.nn.Conv1d(hidden_channels, 2 * hidden_channels, kernel_size, dilation=dilation, padding=padding) in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') self.in_layers.append(in_layer) if i < num_layers - 1: res_skip_channels = 2 * hidden_channels else: res_skip_channels = hidden_channels res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') self.res_skip_layers.append(res_skip_layer) # setup weight norm if not weight_norm: self.remove_weight_norm() def forward(self, x, x_mask=None, g=None, **kwargs): # pylint: disable=unused-argument output = torch.zeros_like(x) n_channels_tensor = torch.IntTensor([self.hidden_channels]) if g is not None: g = self.cond_layer(g) for i in range(self.num_layers): x_in = self.in_layers[i](x) x_in = self.dropout(x_in) if g is not None: cond_offset = i * 2 * self.hidden_channels g_l = g[:, cond_offset:cond_offset + 2 * self.hidden_channels, :] else: g_l = torch.zeros_like(x_in) acts = fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) res_skip_acts = self.res_skip_layers[i](acts) if i < self.num_layers - 1: x = (x + res_skip_acts[:, :self.hidden_channels, :]) * x_mask output = output + res_skip_acts[:, self.hidden_channels:, :] else: output = output + res_skip_acts return output * x_mask def remove_weight_norm(self): if self.c_in_channels != 0: torch.nn.utils.remove_weight_norm(self.cond_layer) for l in self.in_layers: torch.nn.utils.remove_weight_norm(l) for l in self.res_skip_layers: torch.nn.utils.remove_weight_norm(l) class WNBlocks(nn.Module): """Wavenet blocks. Note: After each block dilation resets to 1 and it increases in each block along the dilation rate. Args: in_channels (int): number of input channels. hidden_channes (int): number of hidden channels. kernel_size (int): filter kernel size for the first conv layer. dilation_rate (int): dilations rate to increase dilation per layer. If it is 2, dilations are 1, 2, 4, 8 for the next 4 layers. num_blocks (int): number of wavenet blocks. num_layers (int): number of wavenet layers. c_in_channels (int): number of channels of conditioning input. dropout_p (float): dropout rate. weight_norm (bool): enable/disable weight norm for convolution layers. """ def __init__(self, in_channels, hidden_channels, kernel_size, dilation_rate, num_blocks, num_layers, c_in_channels=0, dropout_p=0, weight_norm=True): super().__init__() self.wn_blocks = nn.ModuleList() for idx in range(num_blocks): layer = WN(in_channels=in_channels if idx == 0 else hidden_channels, hidden_channels=hidden_channels, kernel_size=kernel_size, dilation_rate=dilation_rate, num_layers=num_layers, c_in_channels=c_in_channels, dropout_p=dropout_p, weight_norm=weight_norm) self.wn_blocks.append(layer) def forward(self, x, x_mask, g=None): o = x for layer in self.wn_blocks: o = layer(o, x_mask, g) return o
7,028
40.347059
95
py
TTS
TTS-master/TTS/tts/layers/generic/normalization.py
import torch from torch import nn class LayerNorm(nn.Module): def __init__(self, channels, eps=1e-4): """Layer norm for the 2nd dimension of the input. Args: channels (int): number of channels (2nd dimension) of the input. eps (float): to prevent 0 division Shapes: - input: (B, C, T) - output: (B, C, T) """ super().__init__() self.channels = channels self.eps = eps self.gamma = nn.Parameter(torch.ones(1, channels, 1) * 0.1) self.beta = nn.Parameter(torch.zeros(1, channels, 1)) def forward(self, x): mean = torch.mean(x, 1, keepdim=True) variance = torch.mean((x - mean)**2, 1, keepdim=True) x = (x - mean) * torch.rsqrt(variance + self.eps) x = x * self.gamma + self.beta return x class TemporalBatchNorm1d(nn.BatchNorm1d): """Normalize each channel separately over time and batch. """ def __init__(self, channels, affine=True, track_running_stats=True, momentum=0.1): super().__init__(channels, affine=affine, track_running_stats=track_running_stats, momentum=momentum) def forward(self, x): return super().forward(x.transpose(2, 1)).transpose(2, 1) class ActNorm(nn.Module): """Activation Normalization bijector as an alternative to Batch Norm. It computes mean and std from a sample data in advance and it uses these values for normalization at training. Args: channels (int): input channels. ddi (False): data depended initialization flag. Shapes: - inputs: (B, C, T) - outputs: (B, C, T) """ def __init__(self, channels, ddi=False, **kwargs): # pylint: disable=unused-argument super().__init__() self.channels = channels self.initialized = not ddi self.logs = nn.Parameter(torch.zeros(1, channels, 1)) self.bias = nn.Parameter(torch.zeros(1, channels, 1)) def forward(self, x, x_mask=None, reverse=False, **kwargs): # pylint: disable=unused-argument if x_mask is None: x_mask = torch.ones(x.size(0), 1, x.size(2)).to(device=x.device, dtype=x.dtype) x_len = torch.sum(x_mask, [1, 2]) if not self.initialized: self.initialize(x, x_mask) self.initialized = True if reverse: z = (x - self.bias) * torch.exp(-self.logs) * x_mask logdet = None else: z = (self.bias + torch.exp(self.logs) * x) * x_mask logdet = torch.sum(self.logs) * x_len # [b] return z, logdet def store_inverse(self): pass def set_ddi(self, ddi): self.initialized = not ddi def initialize(self, x, x_mask): with torch.no_grad(): denom = torch.sum(x_mask, [0, 2]) m = torch.sum(x * x_mask, [0, 2]) / denom m_sq = torch.sum(x * x * x_mask, [0, 2]) / denom v = m_sq - (m**2) logs = 0.5 * torch.log(torch.clamp_min(v, 1e-6)) bias_init = (-m * torch.exp(-logs)).view(*self.bias.shape).to( dtype=self.bias.dtype) logs_init = (-logs).view(*self.logs.shape).to( dtype=self.logs.dtype) self.bias.data.copy_(bias_init) self.logs.data.copy_(logs_init)
3,553
32.214953
98
py
TTS
TTS-master/TTS/tts/layers/generic/time_depth_sep_conv.py
import torch from torch import nn class TimeDepthSeparableConv(nn.Module): """Time depth separable convolution as in https://arxiv.org/pdf/1904.02619.pdf It shows competative results with less computation and memory footprint.""" def __init__(self, in_channels, hid_channels, out_channels, kernel_size, bias=True): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.hid_channels = hid_channels self.kernel_size = kernel_size self.time_conv = nn.Conv1d( in_channels, 2 * hid_channels, kernel_size=1, stride=1, padding=0, bias=bias, ) self.norm1 = nn.BatchNorm1d(2 * hid_channels) self.depth_conv = nn.Conv1d( hid_channels, hid_channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2, groups=hid_channels, bias=bias, ) self.norm2 = nn.BatchNorm1d(hid_channels) self.time_conv2 = nn.Conv1d( hid_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=bias, ) self.norm3 = nn.BatchNorm1d(out_channels) def forward(self, x): x_res = x x = self.time_conv(x) x = self.norm1(x) x = nn.functional.glu(x, dim=1) x = self.depth_conv(x) x = self.norm2(x) x = x * torch.sigmoid(x) x = self.time_conv2(x) x = self.norm3(x) x = x_res + x return x class TimeDepthSeparableConvBlock(nn.Module): def __init__(self, in_channels, hid_channels, out_channels, num_layers, kernel_size, bias=True): super().__init__() assert (kernel_size - 1) % 2 == 0 assert num_layers > 1 self.layers = nn.ModuleList() layer = TimeDepthSeparableConv( in_channels, hid_channels, out_channels if num_layers == 1 else hid_channels, kernel_size, bias) self.layers.append(layer) for idx in range(num_layers - 1): layer = TimeDepthSeparableConv( hid_channels, hid_channels, out_channels if (idx + 1) == (num_layers - 1) else hid_channels, kernel_size, bias) self.layers.append(layer) def forward(self, x, mask): for layer in self.layers: x = layer(x * mask) return x
2,716
28.215054
82
py
TTS
TTS-master/TTS/tts/layers/glow_tts/encoder.py
import math import torch from torch import nn from TTS.tts.layers.glow_tts.transformer import RelativePositionTransformer from TTS.tts.layers.generic.gated_conv import GatedConvBlock from TTS.tts.utils.generic_utils import sequence_mask from TTS.tts.layers.glow_tts.glow import ResidualConv1dLayerNormBlock from TTS.tts.layers.glow_tts.duration_predictor import DurationPredictor from TTS.tts.layers.generic.time_depth_sep_conv import TimeDepthSeparableConvBlock from TTS.tts.layers.generic.res_conv_bn import ResidualConv1dBNBlock class Encoder(nn.Module): """Glow-TTS encoder module. embedding -> <prenet> -> encoder_module -> <postnet> --> proj_mean | |-> proj_var | |-> concat -> duration_predictor ↑ speaker_embed Args: num_chars (int): number of characters. out_channels (int): number of output channels. hidden_channels (int): encoder's embedding size. hidden_channels_ffn (int): transformer's feed-forward channels. kernel_size (int): kernel size for conv layers and duration predictor. dropout_p (float): dropout rate for any dropout layer. mean_only (bool): if True, output only mean values and use constant std. use_prenet (bool): if True, use pre-convolutional layers before transformer layers. c_in_channels (int): number of channels in conditional input. Shapes: - input: (B, T, C) Notes: suggested encoder params... for encoder_type == 'rel_pos_transformer' encoder_params={ 'kernel_size':3, 'dropout_p': 0.1, 'num_layers': 6, 'num_heads': 2, 'hidden_channels_ffn': 768, # 4 times the hidden_channels 'input_length': None } for encoder_type == 'gated_conv' encoder_params={ 'kernel_size':5, 'dropout_p': 0.1, 'num_layers': 9, } for encoder_type == 'residual_conv_bn' encoder_params={ "kernel_size": 4, "dilations": [1, 2, 4, 1, 2, 4, 1, 2, 4, 1, 2, 4, 1], "num_conv_blocks": 2, "num_res_blocks": 13 } for encoder_type == 'time_depth_separable' encoder_params={ "kernel_size": 5, 'num_layers': 9, } """ def __init__(self, num_chars, out_channels, hidden_channels, hidden_channels_dp, encoder_type, encoder_params, dropout_p_dp=0.1, mean_only=False, use_prenet=True, c_in_channels=0): super().__init__() # class arguments self.num_chars = num_chars self.out_channels = out_channels self.hidden_channels = hidden_channels self.hidden_channels_dp = hidden_channels_dp self.dropout_p_dp = dropout_p_dp self.mean_only = mean_only self.use_prenet = use_prenet self.c_in_channels = c_in_channels self.encoder_type = encoder_type # embedding layer self.emb = nn.Embedding(num_chars, hidden_channels) nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) # init encoder module if encoder_type.lower() == "rel_pos_transformer": if use_prenet: self.prenet = ResidualConv1dLayerNormBlock(hidden_channels, hidden_channels, hidden_channels, kernel_size=5, num_layers=3, dropout_p=0.5) self.encoder = RelativePositionTransformer(hidden_channels, hidden_channels, hidden_channels, **encoder_params) elif encoder_type.lower() == 'gated_conv': self.encoder = GatedConvBlock(hidden_channels, **encoder_params) elif encoder_type.lower() == 'residual_conv_bn': if use_prenet: self.prenet = nn.Sequential( nn.Conv1d(hidden_channels, hidden_channels, 1), nn.ReLU() ) self.encoder = ResidualConv1dBNBlock(hidden_channels, hidden_channels, hidden_channels, **encoder_params) self.postnet = nn.Sequential( nn.Conv1d(self.hidden_channels, self.hidden_channels, 1), nn.BatchNorm1d(self.hidden_channels)) elif encoder_type.lower() == 'time_depth_separable': if use_prenet: self.prenet = ResidualConv1dLayerNormBlock(hidden_channels, hidden_channels, hidden_channels, kernel_size=5, num_layers=3, dropout_p=0.5) self.encoder = TimeDepthSeparableConvBlock(hidden_channels, hidden_channels, hidden_channels, **encoder_params) else: raise ValueError(" [!] Unkown encoder type.") # final projection layers self.proj_m = nn.Conv1d(hidden_channels, out_channels, 1) if not mean_only: self.proj_s = nn.Conv1d(hidden_channels, out_channels, 1) # duration predictor self.duration_predictor = DurationPredictor( hidden_channels + c_in_channels, hidden_channels_dp, 3, dropout_p_dp) def forward(self, x, x_lengths, g=None): """ Shapes: x: [B, C, T] x_lengths: [B] g (optional): [B, 1, T] """ # embedding layer # [B ,T, D] x = self.emb(x) * math.sqrt(self.hidden_channels) # [B, D, T] x = torch.transpose(x, 1, -1) # compute input sequence mask x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) # prenet if hasattr(self, 'prenet') and self.use_prenet: x = self.prenet(x, x_mask) # encoder x = self.encoder(x, x_mask) # postnet if hasattr(self, 'postnet'): x = self.postnet(x) * x_mask # set duration predictor input if g is not None: g_exp = g.expand(-1, -1, x.size(-1)) x_dp = torch.cat([torch.detach(x), g_exp], 1) else: x_dp = torch.detach(x) # final projection layer x_m = self.proj_m(x) * x_mask if not self.mean_only: x_logs = self.proj_s(x) * x_mask else: x_logs = torch.zeros_like(x_m) # duration predictor logw = self.duration_predictor(x_dp, x_mask) return x_m, x_logs, logw, x_mask
7,749
40.44385
91
py
TTS
TTS-master/TTS/tts/layers/glow_tts/glow.py
import torch from torch import nn from torch.nn import functional as F from TTS.tts.layers.generic.wavenet import WN from ..generic.normalization import LayerNorm class ResidualConv1dLayerNormBlock(nn.Module): def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, num_layers, dropout_p): """Conv1d with Layer Normalization and residual connection as in GlowTTS paper. https://arxiv.org/pdf/1811.00002.pdf x |-> conv1d -> layer_norm -> relu -> dropout -> + -> o |---------------> conv1d_1x1 -----------------------| Args: in_channels (int): number of input tensor channels. hidden_channels (int): number of inner layer channels. out_channels (int): number of output tensor channels. kernel_size (int): kernel size of conv1d filter. num_layers (int): number of blocks. dropout_p (float): dropout rate for each block. """ super().__init__() self.in_channels = in_channels self.hidden_channels = hidden_channels self.out_channels = out_channels self.kernel_size = kernel_size self.num_layers = num_layers self.dropout_p = dropout_p assert num_layers > 1, " [!] number of layers should be > 0." assert kernel_size % 2 == 1, " [!] kernel size should be odd number." self.conv_layers = nn.ModuleList() self.norm_layers = nn.ModuleList() for idx in range(num_layers): self.conv_layers.append( nn.Conv1d(in_channels if idx == 0 else hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) self.norm_layers.append(LayerNorm(hidden_channels)) self.proj = nn.Conv1d(hidden_channels, out_channels, 1) self.proj.weight.data.zero_() self.proj.bias.data.zero_() def forward(self, x, x_mask): x_res = x for i in range(self.num_layers): x = self.conv_layers[i](x * x_mask) x = self.norm_layers[i](x * x_mask) x = F.dropout(F.relu(x), self.dropout_p, training=self.training) x = x_res + self.proj(x) return x * x_mask class InvConvNear(nn.Module): """Invertible Convolution with input splitting as in GlowTTS paper. https://arxiv.org/pdf/1811.00002.pdf Args: channels (int): input and output channels. num_splits (int): number of splits, also H and W of conv layer. no_jacobian (bool): enable/disable jacobian computations. Note: Split the input into groups of size self.num_splits and perform 1x1 convolution separately. Cast 1x1 conv operation to 2d by reshaping the input for efficiency. """ def __init__(self, channels, num_splits=4, no_jacobian=False, **kwargs): # pylint: disable=unused-argument super().__init__() assert num_splits % 2 == 0 self.channels = channels self.num_splits = num_splits self.no_jacobian = no_jacobian self.weight_inv = None w_init = torch.qr( torch.FloatTensor(self.num_splits, self.num_splits).normal_())[0] if torch.det(w_init) < 0: w_init[:, 0] = -1 * w_init[:, 0] self.weight = nn.Parameter(w_init) def forward(self, x, x_mask=None, reverse=False, **kwargs): # pylint: disable=unused-argument """ Shapes: x: B x C x T x_mask: B x 1 x T """ b, c, t = x.size() assert c % self.num_splits == 0 if x_mask is None: x_mask = 1 x_len = torch.ones((b, ), dtype=x.dtype, device=x.device) * t else: x_len = torch.sum(x_mask, [1, 2]) x = x.view(b, 2, c // self.num_splits, self.num_splits // 2, t) x = x.permute(0, 1, 3, 2, 4).contiguous().view(b, self.num_splits, c // self.num_splits, t) if reverse: if self.weight_inv is not None: weight = self.weight_inv else: weight = torch.inverse( self.weight.float()).to(dtype=self.weight.dtype) logdet = None else: weight = self.weight if self.no_jacobian: logdet = 0 else: logdet = torch.logdet( self.weight) * (c / self.num_splits) * x_len # [b] weight = weight.view(self.num_splits, self.num_splits, 1, 1) z = F.conv2d(x, weight) z = z.view(b, 2, self.num_splits // 2, c // self.num_splits, t) z = z.permute(0, 1, 3, 2, 4).contiguous().view(b, c, t) * x_mask return z, logdet def store_inverse(self): weight_inv = torch.inverse( self.weight.float()).to(dtype=self.weight.dtype) self.weight_inv = nn.Parameter(weight_inv, requires_grad=False) class CouplingBlock(nn.Module): """Glow Affine Coupling block as in GlowTTS paper. https://arxiv.org/pdf/1811.00002.pdf x --> x0 -> conv1d -> wavenet -> conv1d --> t, s -> concat(s*x1 + t, x0) -> o '-> x1 - - - - - - - - - - - - - - - - - - - - - - - - - ^ Args: in_channels (int): number of input tensor channels. hidden_channels (int): number of hidden channels. kernel_size (int): WaveNet filter kernel size. dilation_rate (int): rate to increase dilation by each layer in a decoder block. num_layers (int): number of WaveNet layers. c_in_channels (int): number of conditioning input channels. dropout_p (int): wavenet dropout rate. sigmoid_scale (bool): enable/disable sigmoid scaling for output scale. Note: It does not use conditional inputs differently from WaveGlow. """ def __init__(self, in_channels, hidden_channels, kernel_size, dilation_rate, num_layers, c_in_channels=0, dropout_p=0, sigmoid_scale=False): super().__init__() self.in_channels = in_channels self.hidden_channels = hidden_channels self.kernel_size = kernel_size self.dilation_rate = dilation_rate self.num_layers = num_layers self.c_in_channels = c_in_channels self.dropout_p = dropout_p self.sigmoid_scale = sigmoid_scale # input layer start = torch.nn.Conv1d(in_channels // 2, hidden_channels, 1) start = torch.nn.utils.weight_norm(start) self.start = start # output layer # Initializing last layer to 0 makes the affine coupling layers # do nothing at first. This helps with training stability end = torch.nn.Conv1d(hidden_channels, in_channels, 1) end.weight.data.zero_() end.bias.data.zero_() self.end = end # coupling layers self.wn = WN(in_channels, hidden_channels, kernel_size, dilation_rate, num_layers, c_in_channels, dropout_p) def forward(self, x, x_mask=None, reverse=False, g=None, **kwargs): # pylint: disable=unused-argument """ Shapes: x: B x C x T x_mask: B x 1 x T g: B x C x 1 """ if x_mask is None: x_mask = 1 x_0, x_1 = x[:, :self.in_channels // 2], x[:, self.in_channels // 2:] x = self.start(x_0) * x_mask x = self.wn(x, x_mask, g) out = self.end(x) z_0 = x_0 t = out[:, :self.in_channels // 2, :] s = out[:, self.in_channels // 2:, :] if self.sigmoid_scale: s = torch.log(1e-6 + torch.sigmoid(s + 2)) if reverse: z_1 = (x_1 - t) * torch.exp(-s) * x_mask logdet = None else: z_1 = (t + torch.exp(s) * x_1) * x_mask logdet = torch.sum(s * x_mask, [1, 2]) z = torch.cat([z_0, z_1], 1) return z, logdet def store_inverse(self): self.wn.remove_weight_norm()
8,196
35.923423
111
py
TTS
TTS-master/TTS/tts/layers/glow_tts/duration_predictor.py
import torch from torch import nn from ..generic.normalization import LayerNorm class DurationPredictor(nn.Module): """Glow-TTS duration prediction model. [2 x (conv1d_kxk -> relu -> layer_norm -> dropout)] -> conv1d_1x1 -> durs Args: in_channels ([type]): [description] hidden_channels ([type]): [description] kernel_size ([type]): [description] dropout_p ([type]): [description] """ def __init__(self, in_channels, hidden_channels, kernel_size, dropout_p): super().__init__() # class arguments self.in_channels = in_channels self.filter_channels = hidden_channels self.kernel_size = kernel_size self.dropout_p = dropout_p # layers self.drop = nn.Dropout(dropout_p) self.conv_1 = nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2) self.norm_1 = LayerNorm(hidden_channels) self.conv_2 = nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2) self.norm_2 = LayerNorm(hidden_channels) # output layer self.proj = nn.Conv1d(hidden_channels, 1, 1) def forward(self, x, x_mask): """ Shapes: x: [B, C, T] x_mask: [B, 1, T] Returns: [type]: [description] """ x = self.conv_1(x * x_mask) x = torch.relu(x) x = self.norm_1(x) x = self.drop(x) x = self.conv_2(x * x_mask) x = torch.relu(x) x = self.norm_2(x) x = self.drop(x) x = self.proj(x * x_mask) return x * x_mask
1,849
30.896552
77
py
TTS
TTS-master/TTS/tts/layers/glow_tts/transformer.py
import math import torch from torch import nn from torch.nn import functional as F from TTS.tts.layers.glow_tts.glow import LayerNorm class RelativePositionMultiHeadAttention(nn.Module): """Multi-head attention with Relative Positional embedding. https://arxiv.org/pdf/1809.04281.pdf It learns positional embeddings for a window of neighbours. For keys and values, it learns different set of embeddings. Key embeddings are agregated with the attention scores and value embeddings are aggregated with the output. Note: Example with relative attention window size 2 input = [a, b, c, d, e] rel_attn_embeddings = [e(t-2), e(t-1), e(t+1), e(t+2)] So it learns 4 embedding vectors (in total 8) separately for key and value vectors. Considering the input c e(t-2) corresponds to c -> a e(t-2) corresponds to c -> b e(t-2) corresponds to c -> d e(t-2) corresponds to c -> e These embeddings are shared among different time steps. So input a, b, d and e also uses the same embeddings. Embeddings are ignored when the relative window is out of limit for the first and the last n items. Args: channels (int): input and inner layer channels. out_channels (int): output channels. num_heads (int): number of attention heads. rel_attn_window_size (int, optional): relation attention window size. If 4, for each time step next and previous 4 time steps are attended. If default, relative encoding is disabled and it is a regular transformer. Defaults to None. heads_share (bool, optional): [description]. Defaults to True. dropout_p (float, optional): dropout rate. Defaults to 0.. input_length (int, optional): intput length for positional encoding. Defaults to None. proximal_bias (bool, optional): enable/disable proximal bias as in the paper. Defaults to False. proximal_init (bool, optional): enable/disable poximal init as in the paper. Init key and query layer weights the same. Defaults to False. """ def __init__(self, channels, out_channels, num_heads, rel_attn_window_size=None, heads_share=True, dropout_p=0., input_length=None, proximal_bias=False, proximal_init=False): super().__init__() assert channels % num_heads == 0, " [!] channels should be divisible by num_heads." # class attributes self.channels = channels self.out_channels = out_channels self.num_heads = num_heads self.rel_attn_window_size = rel_attn_window_size self.heads_share = heads_share self.input_length = input_length self.proximal_bias = proximal_bias self.dropout_p = dropout_p self.attn = None # query, key, value layers self.k_channels = channels // num_heads self.conv_q = nn.Conv1d(channels, channels, 1) self.conv_k = nn.Conv1d(channels, channels, 1) self.conv_v = nn.Conv1d(channels, channels, 1) # output layers self.conv_o = nn.Conv1d(channels, out_channels, 1) self.dropout = nn.Dropout(dropout_p) # relative positional encoding layers if rel_attn_window_size is not None: n_heads_rel = 1 if heads_share else num_heads rel_stddev = self.k_channels**-0.5 emb_rel_k = nn.Parameter( torch.randn(n_heads_rel, rel_attn_window_size * 2 + 1, self.k_channels) * rel_stddev) emb_rel_v = nn.Parameter( torch.randn(n_heads_rel, rel_attn_window_size * 2 + 1, self.k_channels) * rel_stddev) self.register_parameter('emb_rel_k', emb_rel_k) self.register_parameter('emb_rel_v', emb_rel_v) # init layers nn.init.xavier_uniform_(self.conv_q.weight) nn.init.xavier_uniform_(self.conv_k.weight) # proximal bias if proximal_init: self.conv_k.weight.data.copy_(self.conv_q.weight.data) self.conv_k.bias.data.copy_(self.conv_q.bias.data) nn.init.xavier_uniform_(self.conv_v.weight) def forward(self, x, c, attn_mask=None): q = self.conv_q(x) k = self.conv_k(c) v = self.conv_v(c) x, self.attn = self.attention(q, k, v, mask=attn_mask) x = self.conv_o(x) return x def attention(self, query, key, value, mask=None): # reshape [b, d, t] -> [b, n_h, t, d_k] b, d, t_s, t_t = (*key.size(), query.size(2)) query = query.view(b, self.num_heads, self.k_channels, t_t).transpose(2, 3) key = key.view(b, self.num_heads, self.k_channels, t_s).transpose(2, 3) value = value.view(b, self.num_heads, self.k_channels, t_s).transpose(2, 3) # compute raw attention scores scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt( self.k_channels) # relative positional encoding for scores if self.rel_attn_window_size is not None: assert t_s == t_t, "Relative attention is only available for self-attention." # get relative key embeddings key_relative_embeddings = self._get_relative_embeddings( self.emb_rel_k, t_s) rel_logits = self._matmul_with_relative_keys( query, key_relative_embeddings) rel_logits = self._relative_position_to_absolute_position( rel_logits) scores_local = rel_logits / math.sqrt(self.k_channels) scores = scores + scores_local # proximan bias if self.proximal_bias: assert t_s == t_t, "Proximal bias is only available for self-attention." scores = scores + self._attn_proximity_bias(t_s).to( device=scores.device, dtype=scores.dtype) # attention score masking if mask is not None: # add small value to prevent oor error. scores = scores.masked_fill(mask == 0, -1e4) if self.input_length is not None: block_mask = torch.ones_like(scores).triu( -1 * self.input_length).tril(self.input_length) scores = scores * block_mask + -1e4 * (1 - block_mask) # attention score normalization p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] # apply dropout to attention weights p_attn = self.dropout(p_attn) # compute output output = torch.matmul(p_attn, value) # relative positional encoding for values if self.rel_attn_window_size is not None: relative_weights = self._absolute_position_to_relative_position( p_attn) value_relative_embeddings = self._get_relative_embeddings( self.emb_rel_v, t_s) output = output + self._matmul_with_relative_values( relative_weights, value_relative_embeddings) output = output.transpose(2, 3).contiguous().view( b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] return output, p_attn @staticmethod def _matmul_with_relative_values(p_attn, re): """ Args: p_attn (Tensor): attention weights. re (Tensor): relative value embedding vector. (a_(i,j)^V) Shapes: p_attn: [B, H, T, V] re: [H or 1, V, D] logits: [B, H, T, D] """ logits = torch.matmul(p_attn, re.unsqueeze(0)) return logits @staticmethod def _matmul_with_relative_keys(query, re): """ Args: query (Tensor): batch of query vectors. (x*W^Q) re (Tensor): relative key embedding vector. (a_(i,j)^K) Shapes: query: [B, H, T, D] re: [H or 1, V, D] logits: [B, H, T, V] """ # logits = torch.einsum('bhld, kmd -> bhlm', [query, re.to(query.dtype)]) logits = torch.matmul(query, re.unsqueeze(0).transpose(-2, -1)) return logits def _get_relative_embeddings(self, relative_embeddings, length): """Convert embedding vestors to a tensor of embeddings """ # Pad first before slice to avoid using cond ops. pad_length = max(length - (self.rel_attn_window_size + 1), 0) slice_start_position = max((self.rel_attn_window_size + 1) - length, 0) slice_end_position = slice_start_position + 2 * length - 1 if pad_length > 0: padded_relative_embeddings = F.pad( relative_embeddings, [0, 0, pad_length, pad_length, 0, 0]) else: padded_relative_embeddings = relative_embeddings used_relative_embeddings = padded_relative_embeddings[:, slice_start_position: slice_end_position] return used_relative_embeddings @staticmethod def _relative_position_to_absolute_position(x): """Converts tensor from relative to absolute indexing for local attention. Args: x: [B, D, length, 2 * length - 1] Returns: A Tensor of shape [B, D, length, length] """ batch, heads, length, _ = x.size() # Pad to shift from relative to absolute indexing. x = F.pad(x, [0, 1, 0, 0, 0, 0, 0, 0]) # Pad extra elements so to add up to shape (len+1, 2*len-1). x_flat = x.view([batch, heads, length * 2 * length]) x_flat = F.pad(x_flat, [0, length - 1, 0, 0, 0, 0]) # Reshape and slice out the padded elements. x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1:] return x_final @staticmethod def _absolute_position_to_relative_position(x): """ x: [B, H, T, T] ret: [B, H, T, 2*T-1] """ batch, heads, length, _ = x.size() # padd along column x = F.pad(x, [0, length - 1, 0, 0, 0, 0, 0, 0]) x_flat = x.view([batch, heads, length**2 + length * (length - 1)]) # add 0's in the beginning that will skew the elements after reshape x_flat = F.pad(x_flat, [length, 0, 0, 0, 0, 0]) x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] return x_final @staticmethod def _attn_proximity_bias(length): """Produce an attention mask that discourages distant attention values. Args: length (int): an integer scalar. Returns: a Tensor with shape [1, 1, length, length] """ # L r = torch.arange(length, dtype=torch.float32) # L x L diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) # scale mask values diff = -torch.log1p(torch.abs(diff)) # 1 x 1 x L x L return diff.unsqueeze(0).unsqueeze(0) class FeedForwardNetwork(nn.Module): """Feed Forward Inner layers for Transformer. Args: in_channels (int): input tensor channels. out_channels (int): output tensor channels. hidden_channels (int): inner layers hidden channels. kernel_size (int): conv1d filter kernel size. dropout_p (float, optional): dropout rate. Defaults to 0. """ def __init__(self, in_channels, out_channels, hidden_channels, kernel_size, dropout_p=0.): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.hidden_channels = hidden_channels self.kernel_size = kernel_size self.dropout_p = dropout_p self.conv_1 = nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2) self.conv_2 = nn.Conv1d(hidden_channels, out_channels, kernel_size, padding=kernel_size // 2) self.dropout = nn.Dropout(dropout_p) def forward(self, x, x_mask): x = self.conv_1(x * x_mask) x = torch.relu(x) x = self.dropout(x) x = self.conv_2(x * x_mask) return x * x_mask class RelativePositionTransformer(nn.Module): """Transformer with Relative Potional Encoding. https://arxiv.org/abs/1803.02155 Args: in_channels (int): number of channels of the input tensor. out_chanels (int): number of channels of the output tensor. hidden_channels (int): model hidden channels. hidden_channels_ffn (int): hidden channels of FeedForwardNetwork. num_heads (int): number of attention heads. num_layers (int): number of transformer layers. kernel_size (int, optional): kernel size of feed-forward inner layers. Defaults to 1. dropout_p (float, optional): dropout rate for self-attention and feed-forward inner layers_per_stack. Defaults to 0. rel_attn_window_size (int, optional): relation attention window size. If 4, for each time step next and previous 4 time steps are attended. If default, relative encoding is disabled and it is a regular transformer. Defaults to None. input_length (int, optional): input lenght to limit position encoding. Defaults to None. """ def __init__(self, in_channels, out_channels, hidden_channels, hidden_channels_ffn, num_heads, num_layers, kernel_size=1, dropout_p=0., rel_attn_window_size=None, input_length=None): super().__init__() self.hidden_channels = hidden_channels self.hidden_channels_ffn = hidden_channels_ffn self.num_heads = num_heads self.num_layers = num_layers self.kernel_size = kernel_size self.dropout_p = dropout_p self.rel_attn_window_size = rel_attn_window_size self.dropout = nn.Dropout(dropout_p) self.attn_layers = nn.ModuleList() self.norm_layers_1 = nn.ModuleList() self.ffn_layers = nn.ModuleList() self.norm_layers_2 = nn.ModuleList() for idx in range(self.num_layers): self.attn_layers.append( RelativePositionMultiHeadAttention( hidden_channels if idx != 0 else in_channels, hidden_channels, num_heads, rel_attn_window_size=rel_attn_window_size, dropout_p=dropout_p, input_length=input_length)) self.norm_layers_1.append(LayerNorm(hidden_channels)) if hidden_channels != out_channels and (idx + 1) == self.num_layers: self.proj = nn.Conv1d(hidden_channels, out_channels, 1) self.ffn_layers.append( FeedForwardNetwork(hidden_channels, hidden_channels if (idx + 1) != self.num_layers else out_channels, hidden_channels_ffn, kernel_size, dropout_p=dropout_p)) self.norm_layers_2.append( LayerNorm(hidden_channels if ( idx + 1) != self.num_layers else out_channels)) def forward(self, x, x_mask): """ Shapes: x: [B, C, T] x_mask: [B, 1, T] """ attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) for i in range(self.num_layers): x = x * x_mask y = self.attn_layers[i](x, x, attn_mask) y = self.dropout(y) x = self.norm_layers_1[i](x + y) y = self.ffn_layers[i](x, x_mask) y = self.dropout(y) if (i + 1) == self.num_layers and hasattr(self, 'proj'): x = self.proj(x) x = self.norm_layers_2[i](x + y) x = x * x_mask return x
16,544
40.259352
128
py
TTS
TTS-master/TTS/tts/layers/glow_tts/decoder.py
import torch from torch import nn from TTS.tts.layers.glow_tts.glow import InvConvNear, CouplingBlock from TTS.tts.layers.generic.normalization import ActNorm def squeeze(x, x_mask=None, num_sqz=2): """GlowTTS squeeze operation Increase number of channels and reduce number of time steps by the same factor. Note: each 's' is a n-dimensional vector. [s1,s2,s3,s4,s5,s6] --> [[s1, s3, s5], [s2, s4, s6]]""" b, c, t = x.size() t = (t // num_sqz) * num_sqz x = x[:, :, :t] x_sqz = x.view(b, c, t // num_sqz, num_sqz) x_sqz = x_sqz.permute(0, 3, 1, 2).contiguous().view(b, c * num_sqz, t // num_sqz) if x_mask is not None: x_mask = x_mask[:, :, num_sqz - 1::num_sqz] else: x_mask = torch.ones(b, 1, t // num_sqz).to(device=x.device, dtype=x.dtype) return x_sqz * x_mask, x_mask def unsqueeze(x, x_mask=None, num_sqz=2): """GlowTTS unsqueeze operation Note: each 's' is a n-dimensional vector. [[s1, s3, s5], [s2, s4, s6]] --> [[s1, s3, s5], [s2, s4, s6]] """ b, c, t = x.size() x_unsqz = x.view(b, num_sqz, c // num_sqz, t) x_unsqz = x_unsqz.permute(0, 2, 3, 1).contiguous().view(b, c // num_sqz, t * num_sqz) if x_mask is not None: x_mask = x_mask.unsqueeze(-1).repeat(1, 1, 1, num_sqz).view(b, 1, t * num_sqz) else: x_mask = torch.ones(b, 1, t * num_sqz).to(device=x.device, dtype=x.dtype) return x_unsqz * x_mask, x_mask class Decoder(nn.Module): """Stack of Glow Decoder Modules. Squeeze -> ActNorm -> InvertibleConv1x1 -> AffineCoupling -> Unsqueeze Args: in_channels (int): channels of input tensor. hidden_channels (int): hidden decoder channels. kernel_size (int): Coupling block kernel size. (Wavenet filter kernel size.) dilation_rate (int): rate to increase dilation by each layer in a decoder block. num_flow_blocks (int): number of decoder blocks. num_coupling_layers (int): number coupling layers. (number of wavenet layers.) dropout_p (float): wavenet dropout rate. sigmoid_scale (bool): enable/disable sigmoid scaling in coupling layer. """ def __init__(self, in_channels, hidden_channels, kernel_size, dilation_rate, num_flow_blocks, num_coupling_layers, dropout_p=0., num_splits=4, num_squeeze=2, sigmoid_scale=False, c_in_channels=0): super().__init__() self.in_channels = in_channels self.hidden_channels = hidden_channels self.kernel_size = kernel_size self.dilation_rate = dilation_rate self.num_flow_blocks = num_flow_blocks self.num_coupling_layers = num_coupling_layers self.dropout_p = dropout_p self.num_splits = num_splits self.num_squeeze = num_squeeze self.sigmoid_scale = sigmoid_scale self.c_in_channels = c_in_channels self.flows = nn.ModuleList() for _ in range(num_flow_blocks): self.flows.append(ActNorm(channels=in_channels * num_squeeze)) self.flows.append( InvConvNear(channels=in_channels * num_squeeze, num_splits=num_splits)) self.flows.append( CouplingBlock(in_channels * num_squeeze, hidden_channels, kernel_size=kernel_size, dilation_rate=dilation_rate, num_layers=num_coupling_layers, c_in_channels=c_in_channels, dropout_p=dropout_p, sigmoid_scale=sigmoid_scale)) def forward(self, x, x_mask, g=None, reverse=False): if not reverse: flows = self.flows logdet_tot = 0 else: flows = reversed(self.flows) logdet_tot = None if self.num_squeeze > 1: x, x_mask = squeeze(x, x_mask, self.num_squeeze) for f in flows: if not reverse: x, logdet = f(x, x_mask, g=g, reverse=reverse) logdet_tot += logdet else: x, logdet = f(x, x_mask, g=g, reverse=reverse) if self.num_squeeze > 1: x, x_mask = unsqueeze(x, x_mask, self.num_squeeze) return x, logdet_tot def store_inverse(self): for f in self.flows: f.store_inverse()
4,876
35.669173
88
py
TTS
TTS-master/TTS/tts/layers/glow_tts/monotonic_align/__init__.py
import numpy as np import torch from torch.nn import functional as F from TTS.tts.utils.generic_utils import sequence_mask try: # TODO: fix pypi cython installation problem. from TTS.tts.layers.glow_tts.monotonic_align.core import maximum_path_c CYTHON = True except ModuleNotFoundError: CYTHON = False def convert_pad_shape(pad_shape): l = pad_shape[::-1] pad_shape = [item for sublist in l for item in sublist] return pad_shape def generate_path(duration, mask): """ duration: [b, t_x] mask: [b, t_x, t_y] """ device = duration.device b, t_x, t_y = mask.shape cum_duration = torch.cumsum(duration, 1) path = torch.zeros(b, t_x, t_y, dtype=mask.dtype).to(device=device) cum_duration_flat = cum_duration.view(b * t_x) path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) path = path.view(b, t_x, t_y) path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0] ]))[:, :-1] path = path * mask return path def maximum_path(value, mask): if CYTHON: return maximum_path_cython(value, mask) return maximum_path_numpy(value, mask) def maximum_path_cython(value, mask): """ Cython optimised version. value: [b, t_x, t_y] mask: [b, t_x, t_y] """ value = value * mask device = value.device dtype = value.dtype value = value.data.cpu().numpy().astype(np.float32) path = np.zeros_like(value).astype(np.int32) mask = mask.data.cpu().numpy() t_x_max = mask.sum(1)[:, 0].astype(np.int32) t_y_max = mask.sum(2)[:, 0].astype(np.int32) maximum_path_c(path, value, t_x_max, t_y_max) return torch.from_numpy(path).to(device=device, dtype=dtype) def maximum_path_numpy(value, mask, max_neg_val=None): """ Monotonic alignment search algorithm Numpy-friendly version. It's about 4 times faster than torch version. value: [b, t_x, t_y] mask: [b, t_x, t_y] """ if max_neg_val is None: max_neg_val = -np.inf # Patch for Sphinx complaint value = value * mask device = value.device dtype = value.dtype value = value.cpu().detach().numpy() mask = mask.cpu().detach().numpy().astype(np.bool) b, t_x, t_y = value.shape direction = np.zeros(value.shape, dtype=np.int64) v = np.zeros((b, t_x), dtype=np.float32) x_range = np.arange(t_x, dtype=np.float32).reshape(1, -1) for j in range(t_y): v0 = np.pad(v, [[0, 0], [1, 0]], mode="constant", constant_values=max_neg_val)[:, :-1] v1 = v max_mask = v1 >= v0 v_max = np.where(max_mask, v1, v0) direction[:, :, j] = max_mask index_mask = x_range <= j v = np.where(index_mask, v_max + value[:, :, j], max_neg_val) direction = np.where(mask, direction, 1) path = np.zeros(value.shape, dtype=np.float32) index = mask[:, :, 0].sum(1).astype(np.int64) - 1 index_range = np.arange(b) for j in reversed(range(t_y)): path[index_range, index, j] = 1 index = index + direction[index_range, index, j] - 1 path = path * mask.astype(np.float32) path = torch.from_numpy(path).to(device=device, dtype=dtype) return path
3,237
30.134615
94
py
TTS
TTS-master/TTS/tts/utils/ssim.py
# taken from https://github.com/Po-Hsun-Su/pytorch-ssim from math import exp import torch import torch.nn.functional as F from torch.autograd import Variable def gaussian(window_size, sigma): gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)]) return gauss/gauss.sum() def create_window(window_size, channel): _1D_window = gaussian(window_size, 1.5).unsqueeze(1) _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0) window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous()) return window def _ssim(img1, img2, window, window_size, channel, size_average = True): mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel) mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel) mu1_sq = mu1.pow(2) mu2_sq = mu2.pow(2) mu1_mu2 = mu1*mu2 sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2 C1 = 0.01**2 C2 = 0.03**2 ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2)) if size_average: return ssim_map.mean() return ssim_map.mean(1).mean(1).mean(1) class SSIM(torch.nn.Module): def __init__(self, window_size = 11, size_average = True): super().__init__() self.window_size = window_size self.size_average = size_average self.channel = 1 self.window = create_window(window_size, self.channel) def forward(self, img1, img2): (_, channel, _, _) = img1.size() if channel == self.channel and self.window.data.type() == img1.data.type(): window = self.window else: window = create_window(self.window_size, channel) if img1.is_cuda: window = window.cuda(img1.get_device()) window = window.type_as(img1) self.window = window self.channel = channel return _ssim(img1, img2, window, self.window_size, channel, self.size_average) def ssim(img1, img2, window_size = 11, size_average = True): (_, channel, _, _) = img1.size() window = create_window(window_size, channel) if img1.is_cuda: window = window.cuda(img1.get_device()) window = window.type_as(img1) return _ssim(img1, img2, window, window_size, channel, size_average)
2,619
33.473684
104
py
TTS
TTS-master/TTS/tts/utils/generic_utils.py
import re import torch import importlib import numpy as np from collections import Counter from TTS.utils.generic_utils import check_argument def split_dataset(items): speakers = [item[-1] for item in items] is_multi_speaker = len(set(speakers)) > 1 eval_split_size = min(500, int(len(items) * 0.01)) assert eval_split_size > 0, " [!] You do not have enough samples to train. You need at least 100 samples." np.random.seed(0) np.random.shuffle(items) if is_multi_speaker: items_eval = [] speakers = [item[-1] for item in items] speaker_counter = Counter(speakers) while len(items_eval) < eval_split_size: item_idx = np.random.randint(0, len(items)) speaker_to_be_removed = items[item_idx][-1] if speaker_counter[speaker_to_be_removed] > 1: items_eval.append(items[item_idx]) speaker_counter[speaker_to_be_removed] -= 1 del items[item_idx] return items_eval, items return items[:eval_split_size], items[eval_split_size:] # from https://gist.github.com/jihunchoi/f1434a77df9db1bb337417854b398df1 def sequence_mask(sequence_length, max_len=None): if max_len is None: max_len = sequence_length.data.max() seq_range = torch.arange(max_len, dtype=sequence_length.dtype, device=sequence_length.device) # B x T_max return seq_range.unsqueeze(0) < sequence_length.unsqueeze(1) def to_camel(text): text = text.capitalize() return re.sub(r'(?!^)_([a-zA-Z])', lambda m: m.group(1).upper(), text) def setup_model(num_chars, num_speakers, c, speaker_embedding_dim=None): print(" > Using model: {}".format(c.model)) MyModel = importlib.import_module('TTS.tts.models.' + c.model.lower()) MyModel = getattr(MyModel, to_camel(c.model)) if c.model.lower() in "tacotron": model = MyModel(num_chars=num_chars + getattr(c, "add_blank", False), num_speakers=num_speakers, r=c.r, postnet_output_dim=int(c.audio['fft_size'] / 2 + 1), decoder_output_dim=c.audio['num_mels'], gst=c.use_gst, gst_embedding_dim=c.gst['gst_embedding_dim'], gst_num_heads=c.gst['gst_num_heads'], gst_style_tokens=c.gst['gst_style_tokens'], gst_use_speaker_embedding=c.gst['gst_use_speaker_embedding'], memory_size=c.memory_size, attn_type=c.attention_type, attn_win=c.windowing, attn_norm=c.attention_norm, prenet_type=c.prenet_type, prenet_dropout=c.prenet_dropout, forward_attn=c.use_forward_attn, trans_agent=c.transition_agent, forward_attn_mask=c.forward_attn_mask, location_attn=c.location_attn, attn_K=c.attention_heads, separate_stopnet=c.separate_stopnet, bidirectional_decoder=c.bidirectional_decoder, double_decoder_consistency=c.double_decoder_consistency, ddc_r=c.ddc_r, speaker_embedding_dim=speaker_embedding_dim) elif c.model.lower() == "tacotron2": model = MyModel(num_chars=num_chars + getattr(c, "add_blank", False), num_speakers=num_speakers, r=c.r, postnet_output_dim=c.audio['num_mels'], decoder_output_dim=c.audio['num_mels'], gst=c.use_gst, gst_embedding_dim=c.gst['gst_embedding_dim'], gst_num_heads=c.gst['gst_num_heads'], gst_style_tokens=c.gst['gst_style_tokens'], gst_use_speaker_embedding=c.gst['gst_use_speaker_embedding'], attn_type=c.attention_type, attn_win=c.windowing, attn_norm=c.attention_norm, prenet_type=c.prenet_type, prenet_dropout=c.prenet_dropout, forward_attn=c.use_forward_attn, trans_agent=c.transition_agent, forward_attn_mask=c.forward_attn_mask, location_attn=c.location_attn, attn_K=c.attention_heads, separate_stopnet=c.separate_stopnet, bidirectional_decoder=c.bidirectional_decoder, double_decoder_consistency=c.double_decoder_consistency, ddc_r=c.ddc_r, speaker_embedding_dim=speaker_embedding_dim) elif c.model.lower() == "glow_tts": model = MyModel(num_chars=num_chars + getattr(c, "add_blank", False), hidden_channels_enc=c['hidden_channels_encoder'], hidden_channels_dec=c['hidden_channels_decoder'], hidden_channels_dp=c['hidden_channels_duration_predictor'], out_channels=c.audio['num_mels'], encoder_type=c.encoder_type, encoder_params=c.encoder_params, use_encoder_prenet=c["use_encoder_prenet"], num_flow_blocks_dec=12, kernel_size_dec=5, dilation_rate=1, num_block_layers=4, dropout_p_dec=0.05, num_speakers=num_speakers, c_in_channels=0, num_splits=4, num_squeeze=2, sigmoid_scale=False, mean_only=True, external_speaker_embedding_dim=speaker_embedding_dim) elif c.model.lower() == "speedy_speech": model = MyModel(num_chars=num_chars + getattr(c, "add_blank", False), out_channels=c.audio['num_mels'], hidden_channels=c['hidden_channels'], positional_encoding=c['positional_encoding'], encoder_type=c['encoder_type'], encoder_params=c['encoder_params'], decoder_type=c['decoder_type'], decoder_params=c['decoder_params'], c_in_channels=0) return model def is_tacotron(c): return False if c['model'] in ['speedy_speech', 'glow_tts'] else True def check_config_tts(c): check_argument('model', c, enum_list=['tacotron', 'tacotron2', 'glow_tts', 'speedy_speech'], restricted=True, val_type=str) check_argument('run_name', c, restricted=True, val_type=str) check_argument('run_description', c, val_type=str) # AUDIO check_argument('audio', c, restricted=True, val_type=dict) # audio processing parameters check_argument('num_mels', c['audio'], restricted=True, val_type=int, min_val=10, max_val=2056) check_argument('fft_size', c['audio'], restricted=True, val_type=int, min_val=128, max_val=4058) check_argument('sample_rate', c['audio'], restricted=True, val_type=int, min_val=512, max_val=100000) check_argument('frame_length_ms', c['audio'], restricted=True, val_type=float, min_val=10, max_val=1000, alternative='win_length') check_argument('frame_shift_ms', c['audio'], restricted=True, val_type=float, min_val=1, max_val=1000, alternative='hop_length') check_argument('preemphasis', c['audio'], restricted=True, val_type=float, min_val=0, max_val=1) check_argument('min_level_db', c['audio'], restricted=True, val_type=int, min_val=-1000, max_val=10) check_argument('ref_level_db', c['audio'], restricted=True, val_type=int, min_val=0, max_val=1000) check_argument('power', c['audio'], restricted=True, val_type=float, min_val=1, max_val=5) check_argument('griffin_lim_iters', c['audio'], restricted=True, val_type=int, min_val=10, max_val=1000) # vocabulary parameters check_argument('characters', c, restricted=False, val_type=dict) check_argument('pad', c['characters'] if 'characters' in c.keys() else {}, restricted='characters' in c.keys(), val_type=str) check_argument('eos', c['characters'] if 'characters' in c.keys() else {}, restricted='characters' in c.keys(), val_type=str) check_argument('bos', c['characters'] if 'characters' in c.keys() else {}, restricted='characters' in c.keys(), val_type=str) check_argument('characters', c['characters'] if 'characters' in c.keys() else {}, restricted='characters' in c.keys(), val_type=str) check_argument('phonemes', c['characters'] if 'characters' in c.keys() else {}, restricted='characters' in c.keys(), val_type=str) check_argument('punctuations', c['characters'] if 'characters' in c.keys() else {}, restricted='characters' in c.keys(), val_type=str) # normalization parameters check_argument('signal_norm', c['audio'], restricted=True, val_type=bool) check_argument('symmetric_norm', c['audio'], restricted=True, val_type=bool) check_argument('max_norm', c['audio'], restricted=True, val_type=float, min_val=0.1, max_val=1000) check_argument('clip_norm', c['audio'], restricted=True, val_type=bool) check_argument('mel_fmin', c['audio'], restricted=True, val_type=float, min_val=0.0, max_val=1000) check_argument('mel_fmax', c['audio'], restricted=True, val_type=float, min_val=500.0) check_argument('spec_gain', c['audio'], restricted=True, val_type=[int, float], min_val=1, max_val=100) check_argument('do_trim_silence', c['audio'], restricted=True, val_type=bool) check_argument('trim_db', c['audio'], restricted=True, val_type=int) # training parameters check_argument('batch_size', c, restricted=True, val_type=int, min_val=1) check_argument('eval_batch_size', c, restricted=True, val_type=int, min_val=1) check_argument('r', c, restricted=True, val_type=int, min_val=1) check_argument('gradual_training', c, restricted=False, val_type=list) check_argument('mixed_precision', c, restricted=False, val_type=bool) # check_argument('grad_accum', c, restricted=True, val_type=int, min_val=1, max_val=100) # loss parameters check_argument('loss_masking', c, restricted=True, val_type=bool) if c['model'].lower() in ['tacotron', 'tacotron2']: check_argument('decoder_loss_alpha', c, restricted=True, val_type=float, min_val=0) check_argument('postnet_loss_alpha', c, restricted=True, val_type=float, min_val=0) check_argument('postnet_diff_spec_alpha', c, restricted=True, val_type=float, min_val=0) check_argument('decoder_diff_spec_alpha', c, restricted=True, val_type=float, min_val=0) check_argument('decoder_ssim_alpha', c, restricted=True, val_type=float, min_val=0) check_argument('postnet_ssim_alpha', c, restricted=True, val_type=float, min_val=0) check_argument('ga_alpha', c, restricted=True, val_type=float, min_val=0) if c['model'].lower == "speedy_speech": check_argument('ssim_alpha', c, restricted=True, val_type=float, min_val=0) check_argument('l1_alpha', c, restricted=True, val_type=float, min_val=0) check_argument('huber_alpha', c, restricted=True, val_type=float, min_val=0) # validation parameters check_argument('run_eval', c, restricted=True, val_type=bool) check_argument('test_delay_epochs', c, restricted=True, val_type=int, min_val=0) check_argument('test_sentences_file', c, restricted=False, val_type=str) # optimizer check_argument('noam_schedule', c, restricted=False, val_type=bool) check_argument('grad_clip', c, restricted=True, val_type=float, min_val=0.0) check_argument('epochs', c, restricted=True, val_type=int, min_val=1) check_argument('lr', c, restricted=True, val_type=float, min_val=0) check_argument('wd', c, restricted=is_tacotron(c), val_type=float, min_val=0) check_argument('warmup_steps', c, restricted=True, val_type=int, min_val=0) check_argument('seq_len_norm', c, restricted=is_tacotron(c), val_type=bool) # tacotron prenet check_argument('memory_size', c, restricted=is_tacotron(c), val_type=int, min_val=-1) check_argument('prenet_type', c, restricted=is_tacotron(c), val_type=str, enum_list=['original', 'bn']) check_argument('prenet_dropout', c, restricted=is_tacotron(c), val_type=bool) # attention check_argument('attention_type', c, restricted=is_tacotron(c), val_type=str, enum_list=['graves', 'original', 'dynamic_convolution']) check_argument('attention_heads', c, restricted=is_tacotron(c), val_type=int) check_argument('attention_norm', c, restricted=is_tacotron(c), val_type=str, enum_list=['sigmoid', 'softmax']) check_argument('windowing', c, restricted=is_tacotron(c), val_type=bool) check_argument('use_forward_attn', c, restricted=is_tacotron(c), val_type=bool) check_argument('forward_attn_mask', c, restricted=is_tacotron(c), val_type=bool) check_argument('transition_agent', c, restricted=is_tacotron(c), val_type=bool) check_argument('transition_agent', c, restricted=is_tacotron(c), val_type=bool) check_argument('location_attn', c, restricted=is_tacotron(c), val_type=bool) check_argument('bidirectional_decoder', c, restricted=is_tacotron(c), val_type=bool) check_argument('double_decoder_consistency', c, restricted=is_tacotron(c), val_type=bool) check_argument('ddc_r', c, restricted='double_decoder_consistency' in c.keys(), min_val=1, max_val=7, val_type=int) if c['model'].lower() in ['tacotron', 'tacotron2']: # stopnet check_argument('stopnet', c, restricted=is_tacotron(c), val_type=bool) check_argument('separate_stopnet', c, restricted=is_tacotron(c), val_type=bool) # Model Parameters for non-tacotron models if c['model'].lower == "speedy_speech": check_argument('positional_encoding', c, restricted=True, val_type=type) check_argument('encoder_type', c, restricted=True, val_type=str) check_argument('encoder_params', c, restricted=True, val_type=dict) check_argument('decoder_residual_conv_bn_params', c, restricted=True, val_type=dict) # GlowTTS parameters check_argument('encoder_type', c, restricted=not is_tacotron(c), val_type=str) # tensorboard check_argument('print_step', c, restricted=True, val_type=int, min_val=1) check_argument('tb_plot_step', c, restricted=True, val_type=int, min_val=1) check_argument('save_step', c, restricted=True, val_type=int, min_val=1) check_argument('checkpoint', c, restricted=True, val_type=bool) check_argument('tb_model_param_stats', c, restricted=True, val_type=bool) # dataloading # pylint: disable=import-outside-toplevel from TTS.tts.utils.text import cleaners check_argument('text_cleaner', c, restricted=True, val_type=str, enum_list=dir(cleaners)) check_argument('enable_eos_bos_chars', c, restricted=True, val_type=bool) check_argument('num_loader_workers', c, restricted=True, val_type=int, min_val=0) check_argument('num_val_loader_workers', c, restricted=True, val_type=int, min_val=0) check_argument('batch_group_size', c, restricted=True, val_type=int, min_val=0) check_argument('min_seq_len', c, restricted=True, val_type=int, min_val=0) check_argument('max_seq_len', c, restricted=True, val_type=int, min_val=10) check_argument('compute_input_seq_cache', c, restricted=True, val_type=bool) # paths check_argument('output_path', c, restricted=True, val_type=str) # multi-speaker and gst check_argument('use_speaker_embedding', c, restricted=True, val_type=bool) check_argument('use_external_speaker_embedding_file', c, restricted=c['use_speaker_embedding'], val_type=bool) check_argument('external_speaker_embedding_file', c, restricted=c['use_external_speaker_embedding_file'], val_type=str) if c['model'].lower() in ['tacotron', 'tacotron2'] and c['use_gst']: check_argument('use_gst', c, restricted=is_tacotron(c), val_type=bool) check_argument('gst', c, restricted=is_tacotron(c), val_type=dict) check_argument('gst_style_input', c['gst'], restricted=is_tacotron(c), val_type=[str, dict]) check_argument('gst_embedding_dim', c['gst'], restricted=is_tacotron(c), val_type=int, min_val=0, max_val=1000) check_argument('gst_use_speaker_embedding', c['gst'], restricted=is_tacotron(c), val_type=bool) check_argument('gst_num_heads', c['gst'], restricted=is_tacotron(c), val_type=int, min_val=2, max_val=10) check_argument('gst_style_tokens', c['gst'], restricted=is_tacotron(c), val_type=int, min_val=1, max_val=1000) # datasets - checking only the first entry check_argument('datasets', c, restricted=True, val_type=list) for dataset_entry in c['datasets']: check_argument('name', dataset_entry, restricted=True, val_type=str) check_argument('path', dataset_entry, restricted=True, val_type=str) check_argument('meta_file_train', dataset_entry, restricted=True, val_type=[str, list]) check_argument('meta_file_val', dataset_entry, restricted=True, val_type=str)
17,513
58.774744
138
py
TTS
TTS-master/TTS/tts/utils/synthesis.py
import pkg_resources installed = {pkg.key for pkg in pkg_resources.working_set} #pylint: disable=not-an-iterable if 'tensorflow' in installed or 'tensorflow-gpu' in installed: import tensorflow as tf import torch import numpy as np from .text import text_to_sequence, phoneme_to_sequence def text_to_seqvec(text, CONFIG): text_cleaner = [CONFIG.text_cleaner] # text ot phonemes to sequence vector if CONFIG.use_phonemes: seq = np.asarray( phoneme_to_sequence(text, text_cleaner, CONFIG.phoneme_language, CONFIG.enable_eos_bos_chars, tp=CONFIG.characters if 'characters' in CONFIG.keys() else None, add_blank=CONFIG['add_blank'] if 'add_blank' in CONFIG.keys() else False), dtype=np.int32) else: seq = np.asarray( text_to_sequence(text, text_cleaner, tp=CONFIG.characters if 'characters' in CONFIG.keys() else None, add_blank=CONFIG['add_blank'] if 'add_blank' in CONFIG.keys() else False), dtype=np.int32) return seq def numpy_to_torch(np_array, dtype, cuda=False): if np_array is None: return None tensor = torch.as_tensor(np_array, dtype=dtype) if cuda: return tensor.cuda() return tensor def numpy_to_tf(np_array, dtype): if np_array is None: return None tensor = tf.convert_to_tensor(np_array, dtype=dtype) return tensor def compute_style_mel(style_wav, ap, cuda=False): style_mel = torch.FloatTensor(ap.melspectrogram( ap.load_wav(style_wav, sr=ap.sample_rate))).unsqueeze(0) if cuda: return style_mel.cuda() return style_mel def run_model_torch(model, inputs, CONFIG, truncated, speaker_id=None, style_mel=None, speaker_embeddings=None): if 'tacotron' in CONFIG.model.lower(): if CONFIG.use_gst: decoder_output, postnet_output, alignments, stop_tokens = model.inference( inputs, style_mel=style_mel, speaker_ids=speaker_id, speaker_embeddings=speaker_embeddings) else: if truncated: decoder_output, postnet_output, alignments, stop_tokens = model.inference_truncated( inputs, speaker_ids=speaker_id, speaker_embeddings=speaker_embeddings) else: decoder_output, postnet_output, alignments, stop_tokens = model.inference( inputs, speaker_ids=speaker_id, speaker_embeddings=speaker_embeddings) elif 'glow' in CONFIG.model.lower(): inputs_lengths = torch.tensor(inputs.shape[1:2]).to(inputs.device) # pylint: disable=not-callable if hasattr(model, 'module'): # distributed model postnet_output, _, _, _, alignments, _, _ = model.module.inference(inputs, inputs_lengths, g=speaker_id if speaker_id is not None else speaker_embeddings) else: postnet_output, _, _, _, alignments, _, _ = model.inference(inputs, inputs_lengths, g=speaker_id if speaker_id is not None else speaker_embeddings) postnet_output = postnet_output.permute(0, 2, 1) # these only belong to tacotron models. decoder_output = None stop_tokens = None elif 'speedy_speech' in CONFIG.model.lower(): inputs_lengths = torch.tensor(inputs.shape[1:2]).to(inputs.device) # pylint: disable=not-callable if hasattr(model, 'module'): # distributed model postnet_output, alignments= model.module.inference(inputs, inputs_lengths, g=speaker_id if speaker_id is not None else speaker_embeddings) else: postnet_output, alignments= model.inference(inputs, inputs_lengths, g=speaker_id if speaker_id is not None else speaker_embeddings) postnet_output = postnet_output.permute(0, 2, 1) # these only belong to tacotron models. decoder_output = None stop_tokens = None return decoder_output, postnet_output, alignments, stop_tokens def run_model_tf(model, inputs, CONFIG, truncated, speaker_id=None, style_mel=None): if CONFIG.use_gst and style_mel is not None: raise NotImplementedError(' [!] GST inference not implemented for TF') if truncated: raise NotImplementedError(' [!] Truncated inference not implemented for TF') if speaker_id is not None: raise NotImplementedError(' [!] Multi-Speaker not implemented for TF') # TODO: handle multispeaker case decoder_output, postnet_output, alignments, stop_tokens = model( inputs, training=False) return decoder_output, postnet_output, alignments, stop_tokens def run_model_tflite(model, inputs, CONFIG, truncated, speaker_id=None, style_mel=None): if CONFIG.use_gst and style_mel is not None: raise NotImplementedError(' [!] GST inference not implemented for TfLite') if truncated: raise NotImplementedError(' [!] Truncated inference not implemented for TfLite') if speaker_id is not None: raise NotImplementedError(' [!] Multi-Speaker not implemented for TfLite') # get input and output details input_details = model.get_input_details() output_details = model.get_output_details() # reshape input tensor for the new input shape model.resize_tensor_input(input_details[0]['index'], inputs.shape) model.allocate_tensors() detail = input_details[0] # input_shape = detail['shape'] model.set_tensor(detail['index'], inputs) # run the model model.invoke() # collect outputs decoder_output = model.get_tensor(output_details[0]['index']) postnet_output = model.get_tensor(output_details[1]['index']) # tflite model only returns feature frames return decoder_output, postnet_output, None, None def parse_outputs_torch(postnet_output, decoder_output, alignments, stop_tokens): postnet_output = postnet_output[0].data.cpu().numpy() decoder_output = None if decoder_output is None else decoder_output[0].data.cpu().numpy() alignment = alignments[0].cpu().data.numpy() stop_tokens = None if stop_tokens is None else stop_tokens[0].cpu().numpy() return postnet_output, decoder_output, alignment, stop_tokens def parse_outputs_tf(postnet_output, decoder_output, alignments, stop_tokens): postnet_output = postnet_output[0].numpy() decoder_output = decoder_output[0].numpy() alignment = alignments[0].numpy() stop_tokens = stop_tokens[0].numpy() return postnet_output, decoder_output, alignment, stop_tokens def parse_outputs_tflite(postnet_output, decoder_output): postnet_output = postnet_output[0] decoder_output = decoder_output[0] return postnet_output, decoder_output def trim_silence(wav, ap): return wav[:ap.find_endpoint(wav)] def inv_spectrogram(postnet_output, ap, CONFIG): if CONFIG.model.lower() in ["tacotron"]: wav = ap.inv_spectrogram(postnet_output.T) else: wav = ap.inv_melspectrogram(postnet_output.T) return wav def id_to_torch(speaker_id, cuda=False): if speaker_id is not None: speaker_id = np.asarray(speaker_id) # TODO: test this for tacotron models speaker_id = torch.from_numpy(speaker_id) if cuda: return speaker_id.cuda() return speaker_id def embedding_to_torch(speaker_embedding, cuda=False): if speaker_embedding is not None: speaker_embedding = np.asarray(speaker_embedding) speaker_embedding = torch.from_numpy(speaker_embedding).unsqueeze(0).type(torch.FloatTensor) if cuda: return speaker_embedding.cuda() return speaker_embedding # TODO: perform GL with pytorch for batching def apply_griffin_lim(inputs, input_lens, CONFIG, ap): '''Apply griffin-lim to each sample iterating throught the first dimension. Args: inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size. input_lens (Tensor or np.Array): 1D array of sample lengths. CONFIG (Dict): TTS config. ap (AudioProcessor): TTS audio processor. ''' wavs = [] for idx, spec in enumerate(inputs): wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length # inverse librosa padding wav = inv_spectrogram(spec, ap, CONFIG) # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}" wavs.append(wav[:wav_len]) return wavs def synthesis(model, text, CONFIG, use_cuda, ap, speaker_id=None, style_wav=None, truncated=False, enable_eos_bos_chars=False, #pylint: disable=unused-argument use_griffin_lim=False, do_trim_silence=False, speaker_embedding=None, backend='torch'): """Synthesize voice for the given text. Args: model (TTS.tts.models): model to synthesize. text (str): target text CONFIG (dict): config dictionary to be loaded from config.json. use_cuda (bool): enable cuda. ap (TTS.tts.utils.audio.AudioProcessor): audio processor to process model outputs. speaker_id (int): id of speaker style_wav (str): Uses for style embedding of GST. truncated (bool): keep model states after inference. It can be used for continuous inference at long texts. enable_eos_bos_chars (bool): enable special chars for end of sentence and start of sentence. do_trim_silence (bool): trim silence after synthesis. backend (str): tf or torch """ # GST processing style_mel = None if 'use_gst' in CONFIG.keys() and CONFIG.use_gst and style_wav is not None: if isinstance(style_wav, dict): style_mel = style_wav else: style_mel = compute_style_mel(style_wav, ap, cuda=use_cuda) # preprocess the given text inputs = text_to_seqvec(text, CONFIG) # pass tensors to backend if backend == 'torch': if speaker_id is not None: speaker_id = id_to_torch(speaker_id, cuda=use_cuda) if speaker_embedding is not None: speaker_embedding = embedding_to_torch(speaker_embedding, cuda=use_cuda) if not isinstance(style_mel, dict): style_mel = numpy_to_torch(style_mel, torch.float, cuda=use_cuda) inputs = numpy_to_torch(inputs, torch.long, cuda=use_cuda) inputs = inputs.unsqueeze(0) elif backend == 'tf': # TODO: handle speaker id for tf model style_mel = numpy_to_tf(style_mel, tf.float32) inputs = numpy_to_tf(inputs, tf.int32) inputs = tf.expand_dims(inputs, 0) elif backend == 'tflite': style_mel = numpy_to_tf(style_mel, tf.float32) inputs = numpy_to_tf(inputs, tf.int32) inputs = tf.expand_dims(inputs, 0) # synthesize voice if backend == 'torch': decoder_output, postnet_output, alignments, stop_tokens = run_model_torch( model, inputs, CONFIG, truncated, speaker_id, style_mel, speaker_embeddings=speaker_embedding) postnet_output, decoder_output, alignment, stop_tokens = parse_outputs_torch( postnet_output, decoder_output, alignments, stop_tokens) elif backend == 'tf': decoder_output, postnet_output, alignments, stop_tokens = run_model_tf( model, inputs, CONFIG, truncated, speaker_id, style_mel) postnet_output, decoder_output, alignment, stop_tokens = parse_outputs_tf( postnet_output, decoder_output, alignments, stop_tokens) elif backend == 'tflite': decoder_output, postnet_output, alignment, stop_tokens = run_model_tflite( model, inputs, CONFIG, truncated, speaker_id, style_mel) postnet_output, decoder_output = parse_outputs_tflite( postnet_output, decoder_output) # convert outputs to numpy # plot results wav = None if use_griffin_lim: wav = inv_spectrogram(postnet_output, ap, CONFIG) # trim silence if do_trim_silence: wav = trim_silence(wav, ap) return wav, alignment, decoder_output, postnet_output, stop_tokens, inputs
12,251
42.446809
166
py
TTS
TTS-master/TTS/tts/utils/measures.py
def alignment_diagonal_score(alignments, binary=False): """ Compute how diagonal alignment predictions are. It is useful to measure the alignment consistency of a model Args: alignments (torch.Tensor): batch of alignments. binary (bool): if True, ignore scores and consider attention as a binary mask. Shape: alignments : batch x decoder_steps x encoder_steps """ maxs = alignments.max(dim=1)[0] if binary: maxs[maxs > 0] = 1 return maxs.mean(dim=1).mean(dim=0).item()
545
33.125
68
py
TTS
TTS-master/TTS/tts/utils/io.py
import os import torch import datetime import pickle as pickle_tts from TTS.utils.io import RenamingUnpickler def load_checkpoint(model, checkpoint_path, amp=None, use_cuda=False, eval=False): """Load ```TTS.tts.models``` checkpoints. Args: model (TTS.tts.models): model object to load the weights for. checkpoint_path (string): checkpoint file path. amp (apex.amp, optional): Apex amp abject to load apex related state vars. Defaults to None. use_cuda (bool, optional): load model to GPU if True. Defaults to False. Returns: [type]: [description] """ try: state = torch.load(checkpoint_path, map_location=torch.device('cpu')) except ModuleNotFoundError: pickle_tts.Unpickler = RenamingUnpickler state = torch.load(checkpoint_path, map_location=torch.device('cpu'), pickle_module=pickle_tts) model.load_state_dict(state['model']) if amp and 'amp' in state: amp.load_state_dict(state['amp']) if use_cuda: model.cuda() # set model stepsize if hasattr(model.decoder, 'r'): model.decoder.set_r(state['r']) print(" > Model r: ", state['r']) if eval: model.eval() return model, state def save_model(model, optimizer, current_step, epoch, r, output_path, amp_state_dict=None, **kwargs): """Save ```TTS.tts.models``` states with extra fields. Args: model (TTS.tts.models.Model): models object to be saved. optimizer (torch.optim.optimizers.Optimizer): model optimizer used for training. current_step (int): current number of training steps. epoch (int): current number of training epochs. r (int): model reduction rate for Tacotron models. output_path (str): output path to save the model file. amp_state_dict (state_dict, optional): Apex.amp state dict if Apex is enabled. Defaults to None. """ if hasattr(model, 'module'): model_state = model.module.state_dict() else: model_state = model.state_dict() state = { 'model': model_state, 'optimizer': optimizer.state_dict() if optimizer is not None else None, 'step': current_step, 'epoch': epoch, 'date': datetime.date.today().strftime("%B %d, %Y"), 'r': r } if amp_state_dict: state['amp'] = amp_state_dict state.update(kwargs) torch.save(state, output_path) def save_checkpoint(model, optimizer, current_step, epoch, r, output_folder, **kwargs): """Save model checkpoint, intended for saving checkpoints at training. Args: model (TTS.tts.models.Model): models object to be saved. optimizer (torch.optim.optimizers.Optimizer): model optimizer used for training. current_step (int): current number of training steps. epoch (int): current number of training epochs. r (int): model reduction rate for Tacotron models. output_path (str): output path to save the model file. """ file_name = 'checkpoint_{}.pth.tar'.format(current_step) checkpoint_path = os.path.join(output_folder, file_name) print(" > CHECKPOINT : {}".format(checkpoint_path)) save_model(model, optimizer, current_step, epoch, r, checkpoint_path, **kwargs) def save_best_model(target_loss, best_loss, model, optimizer, current_step, epoch, r, output_folder, **kwargs): """Save model checkpoint, intended for saving the best model after each epoch. It compares the current model loss with the best loss so far and saves the model if the current loss is better. Args: target_loss (float): current model loss. best_loss (float): best loss so far. model (TTS.tts.models.Model): models object to be saved. optimizer (torch.optim.optimizers.Optimizer): model optimizer used for training. current_step (int): current number of training steps. epoch (int): current number of training epochs. r (int): model reduction rate for Tacotron models. output_path (str): output path to save the model file. Returns: float: updated current best loss. """ if target_loss < best_loss: file_name = 'best_model.pth.tar' checkpoint_path = os.path.join(output_folder, file_name) print(" >> BEST MODEL : {}".format(checkpoint_path)) save_model(model, optimizer, current_step, epoch, r, checkpoint_path, model_loss=target_loss, **kwargs) best_loss = target_loss return best_loss
4,518
38.99115
111
py
TTS
TTS-master/TTS/tts/utils/visual.py
import librosa import matplotlib import numpy as np import torch matplotlib.use('Agg') import matplotlib.pyplot as plt from TTS.tts.utils.text import phoneme_to_sequence, sequence_to_phoneme def plot_alignment(alignment, info=None, fig_size=(16, 10), title=None, output_fig=False): if isinstance(alignment, torch.Tensor): alignment_ = alignment.detach().cpu().numpy().squeeze() else: alignment_ = alignment alignment_ = alignment_.astype( np.float32) if alignment_.dtype == np.float16 else alignment_ fig, ax = plt.subplots(figsize=fig_size) im = ax.imshow(alignment_.T, aspect='auto', origin='lower', interpolation='none') fig.colorbar(im, ax=ax) xlabel = 'Decoder timestep' if info is not None: xlabel += '\n\n' + info plt.xlabel(xlabel) plt.ylabel('Encoder timestep') # plt.yticks(range(len(text)), list(text)) plt.tight_layout() if title is not None: plt.title(title) if not output_fig: plt.close() return fig def plot_spectrogram(spectrogram, ap=None, fig_size=(16, 10), output_fig=False): if isinstance(spectrogram, torch.Tensor): spectrogram_ = spectrogram.detach().cpu().numpy().squeeze().T else: spectrogram_ = spectrogram.T spectrogram_ = spectrogram_.astype( np.float32) if spectrogram_.dtype == np.float16 else spectrogram_ if ap is not None: spectrogram_ = ap.denormalize(spectrogram_) # pylint: disable=protected-access fig = plt.figure(figsize=fig_size) plt.imshow(spectrogram_, aspect="auto", origin="lower") plt.colorbar() plt.tight_layout() if not output_fig: plt.close() return fig def visualize(alignment, postnet_output, text, hop_length, CONFIG, stop_tokens=None, decoder_output=None, output_path=None, figsize=(8, 24), output_fig=False): if decoder_output is not None: num_plot = 4 else: num_plot = 3 label_fontsize = 16 fig = plt.figure(figsize=figsize) plt.subplot(num_plot, 1, 1) plt.imshow(alignment.T, aspect="auto", origin="lower", interpolation=None) plt.xlabel("Decoder timestamp", fontsize=label_fontsize) plt.ylabel("Encoder timestamp", fontsize=label_fontsize) # compute phoneme representation and back if CONFIG.use_phonemes: seq = phoneme_to_sequence( text, [CONFIG.text_cleaner], CONFIG.phoneme_language, CONFIG.enable_eos_bos_chars, tp=CONFIG.characters if 'characters' in CONFIG.keys() else None) text = sequence_to_phoneme( seq, tp=CONFIG.characters if 'characters' in CONFIG.keys() else None) print(text) plt.yticks(range(len(text)), list(text)) plt.colorbar() if stop_tokens is not None: # plot stopnet predictions plt.subplot(num_plot, 1, 2) plt.plot(range(len(stop_tokens)), list(stop_tokens)) # plot postnet spectrogram plt.subplot(num_plot, 1, 3) librosa.display.specshow(postnet_output.T, sr=CONFIG.audio['sample_rate'], hop_length=hop_length, x_axis="time", y_axis="linear", fmin=CONFIG.audio['mel_fmin'], fmax=CONFIG.audio['mel_fmax']) plt.xlabel("Time", fontsize=label_fontsize) plt.ylabel("Hz", fontsize=label_fontsize) plt.tight_layout() plt.colorbar() if decoder_output is not None: plt.subplot(num_plot, 1, 4) librosa.display.specshow(decoder_output.T, sr=CONFIG.audio['sample_rate'], hop_length=hop_length, x_axis="time", y_axis="linear", fmin=CONFIG.audio['mel_fmin'], fmax=CONFIG.audio['mel_fmax']) plt.xlabel("Time", fontsize=label_fontsize) plt.ylabel("Hz", fontsize=label_fontsize) plt.tight_layout() plt.colorbar() if output_path: print(output_path) fig.savefig(output_path) plt.close() if not output_fig: plt.close()
4,586
31.531915
87
py
TTS
TTS-master/TTS/tts/tf/models/tacotron2.py
import tensorflow as tf from tensorflow import keras from TTS.tts.tf.layers.tacotron2 import Encoder, Decoder, Postnet from TTS.tts.tf.utils.tf_utils import shape_list #pylint: disable=too-many-ancestors, abstract-method class Tacotron2(keras.models.Model): def __init__(self, num_chars, num_speakers, r, postnet_output_dim=80, decoder_output_dim=80, attn_type='original', attn_win=False, attn_norm="softmax", attn_K=4, prenet_type="original", prenet_dropout=True, forward_attn=False, trans_agent=False, forward_attn_mask=False, location_attn=True, separate_stopnet=True, bidirectional_decoder=False, enable_tflite=False): super(Tacotron2, self).__init__() self.r = r self.decoder_output_dim = decoder_output_dim self.postnet_output_dim = postnet_output_dim self.bidirectional_decoder = bidirectional_decoder self.num_speakers = num_speakers self.speaker_embed_dim = 256 self.enable_tflite = enable_tflite self.embedding = keras.layers.Embedding(num_chars, 512, name='embedding') self.encoder = Encoder(512, name='encoder') # TODO: most of the decoder args have no use at the momment self.decoder = Decoder(decoder_output_dim, r, attn_type=attn_type, use_attn_win=attn_win, attn_norm=attn_norm, prenet_type=prenet_type, prenet_dropout=prenet_dropout, use_forward_attn=forward_attn, use_trans_agent=trans_agent, use_forward_attn_mask=forward_attn_mask, use_location_attn=location_attn, attn_K=attn_K, separate_stopnet=separate_stopnet, speaker_emb_dim=self.speaker_embed_dim, name='decoder', enable_tflite=enable_tflite) self.postnet = Postnet(postnet_output_dim, 5, name='postnet') @tf.function(experimental_relax_shapes=True) def call(self, characters, text_lengths=None, frames=None, training=None): if training: return self.training(characters, text_lengths, frames) if not training: return self.inference(characters) raise RuntimeError(' [!] Set model training mode True or False') def training(self, characters, text_lengths, frames): B, T = shape_list(characters) embedding_vectors = self.embedding(characters, training=True) encoder_output = self.encoder(embedding_vectors, training=True) decoder_states = self.decoder.build_decoder_initial_states(B, 512, T) decoder_frames, stop_tokens, attentions = self.decoder(encoder_output, decoder_states, frames, text_lengths, training=True) postnet_frames = self.postnet(decoder_frames, training=True) output_frames = decoder_frames + postnet_frames return decoder_frames, output_frames, attentions, stop_tokens def inference(self, characters): B, T = shape_list(characters) embedding_vectors = self.embedding(characters, training=False) encoder_output = self.encoder(embedding_vectors, training=False) decoder_states = self.decoder.build_decoder_initial_states(B, 512, T) decoder_frames, stop_tokens, attentions = self.decoder(encoder_output, decoder_states, training=False) postnet_frames = self.postnet(decoder_frames, training=False) output_frames = decoder_frames + postnet_frames print(output_frames.shape) return decoder_frames, output_frames, attentions, stop_tokens @tf.function( experimental_relax_shapes=True, input_signature=[ tf.TensorSpec([1, None], dtype=tf.int32), ],) def inference_tflite(self, characters): B, T = shape_list(characters) embedding_vectors = self.embedding(characters, training=False) encoder_output = self.encoder(embedding_vectors, training=False) decoder_states = self.decoder.build_decoder_initial_states(B, 512, T) decoder_frames, stop_tokens, attentions = self.decoder(encoder_output, decoder_states, training=False) postnet_frames = self.postnet(decoder_frames, training=False) output_frames = decoder_frames + postnet_frames print(output_frames.shape) return decoder_frames, output_frames, attentions, stop_tokens def build_inference(self, ): # TODO: issue https://github.com/PyCQA/pylint/issues/3613 input_ids = tf.random.uniform(shape=[1, 4], maxval=10, dtype=tf.int32) #pylint: disable=unexpected-keyword-arg self(input_ids)
5,151
46.703704
131
py
TTS
TTS-master/TTS/tts/tf/layers/common_layers.py
import tensorflow as tf from tensorflow import keras from tensorflow.python.ops import math_ops # from tensorflow_addons.seq2seq import BahdanauAttention # NOTE: linter has a problem with the current TF release #pylint: disable=no-value-for-parameter #pylint: disable=unexpected-keyword-arg class Linear(keras.layers.Layer): def __init__(self, units, use_bias, **kwargs): super(Linear, self).__init__(**kwargs) self.linear_layer = keras.layers.Dense(units, use_bias=use_bias, name='linear_layer') self.activation = keras.layers.ReLU() def call(self, x): """ shapes: x: B x T x C """ return self.activation(self.linear_layer(x)) class LinearBN(keras.layers.Layer): def __init__(self, units, use_bias, **kwargs): super(LinearBN, self).__init__(**kwargs) self.linear_layer = keras.layers.Dense(units, use_bias=use_bias, name='linear_layer') self.batch_normalization = keras.layers.BatchNormalization(axis=-1, momentum=0.90, epsilon=1e-5, name='batch_normalization') self.activation = keras.layers.ReLU() def call(self, x, training=None): """ shapes: x: B x T x C """ out = self.linear_layer(x) out = self.batch_normalization(out, training=training) return self.activation(out) class Prenet(keras.layers.Layer): def __init__(self, prenet_type, prenet_dropout, units, bias, **kwargs): super(Prenet, self).__init__(**kwargs) self.prenet_type = prenet_type self.prenet_dropout = prenet_dropout self.linear_layers = [] if prenet_type == "bn": self.linear_layers += [LinearBN(unit, use_bias=bias, name=f'linear_layer_{idx}') for idx, unit in enumerate(units)] elif prenet_type == "original": self.linear_layers += [Linear(unit, use_bias=bias, name=f'linear_layer_{idx}') for idx, unit in enumerate(units)] else: raise RuntimeError(' [!] Unknown prenet type.') if prenet_dropout: self.dropout = keras.layers.Dropout(rate=0.5) def call(self, x, training=None): """ shapes: x: B x T x C """ for linear in self.linear_layers: if self.prenet_dropout: x = self.dropout(linear(x), training=training) else: x = linear(x) return x def _sigmoid_norm(score): attn_weights = tf.nn.sigmoid(score) attn_weights = attn_weights / tf.reduce_sum(attn_weights, axis=1, keepdims=True) return attn_weights class Attention(keras.layers.Layer): """TODO: implement forward_attention TODO: location sensitive attention TODO: implement attention windowing """ def __init__(self, attn_dim, use_loc_attn, loc_attn_n_filters, loc_attn_kernel_size, use_windowing, norm, use_forward_attn, use_trans_agent, use_forward_attn_mask, **kwargs): super(Attention, self).__init__(**kwargs) self.use_loc_attn = use_loc_attn self.loc_attn_n_filters = loc_attn_n_filters self.loc_attn_kernel_size = loc_attn_kernel_size self.use_windowing = use_windowing self.norm = norm self.use_forward_attn = use_forward_attn self.use_trans_agent = use_trans_agent self.use_forward_attn_mask = use_forward_attn_mask self.query_layer = tf.keras.layers.Dense(attn_dim, use_bias=False, name='query_layer/linear_layer') self.inputs_layer = tf.keras.layers.Dense(attn_dim, use_bias=False, name=f'{self.name}/inputs_layer/linear_layer') self.v = tf.keras.layers.Dense(1, use_bias=True, name='v/linear_layer') if use_loc_attn: self.location_conv1d = keras.layers.Conv1D( filters=loc_attn_n_filters, kernel_size=loc_attn_kernel_size, padding='same', use_bias=False, name='location_layer/location_conv1d') self.location_dense = keras.layers.Dense(attn_dim, use_bias=False, name='location_layer/location_dense') if norm == 'softmax': self.norm_func = tf.nn.softmax elif norm == 'sigmoid': self.norm_func = _sigmoid_norm else: raise ValueError("Unknown value for attention norm type") def init_states(self, batch_size, value_length): states = [] if self.use_loc_attn: attention_cum = tf.zeros([batch_size, value_length]) attention_old = tf.zeros([batch_size, value_length]) states = [attention_cum, attention_old] if self.use_forward_attn: alpha = tf.concat([ tf.ones([batch_size, 1]), tf.zeros([batch_size, value_length])[:, :-1] + 1e-7 ], 1) states.append(alpha) return tuple(states) def process_values(self, values): """ cache values for decoder iterations """ #pylint: disable=attribute-defined-outside-init self.processed_values = self.inputs_layer(values) self.values = values def get_loc_attn(self, query, states): """ compute location attention, query layer and unnorm. attention weights""" attention_cum, attention_old = states[:2] attn_cat = tf.stack([attention_old, attention_cum], axis=2) processed_query = self.query_layer(tf.expand_dims(query, 1)) processed_attn = self.location_dense(self.location_conv1d(attn_cat)) score = self.v( tf.nn.tanh(self.processed_values + processed_query + processed_attn)) score = tf.squeeze(score, axis=2) return score, processed_query def get_attn(self, query): """ compute query layer and unnormalized attention weights """ processed_query = self.query_layer(tf.expand_dims(query, 1)) score = self.v(tf.nn.tanh(self.processed_values + processed_query)) score = tf.squeeze(score, axis=2) return score, processed_query def apply_score_masking(self, score, mask): #pylint: disable=no-self-use """ ignore sequence paddings """ padding_mask = tf.expand_dims(math_ops.logical_not(mask), 2) # Bias so padding positions do not contribute to attention distribution. score -= 1.e9 * math_ops.cast(padding_mask, dtype=tf.float32) return score def apply_forward_attention(self, alignment, alpha): #pylint: disable=no-self-use # forward attention fwd_shifted_alpha = tf.pad(alpha[:, :-1], ((0, 0), (1, 0)), constant_values=0.0) # compute transition potentials new_alpha = ((1 - 0.5) * alpha + 0.5 * fwd_shifted_alpha + 1e-8) * alignment # renormalize attention weights new_alpha = new_alpha / tf.reduce_sum(new_alpha, axis=1, keepdims=True) return new_alpha def update_states(self, old_states, scores_norm, attn_weights, new_alpha=None): states = [] if self.use_loc_attn: states = [old_states[0] + scores_norm, attn_weights] if self.use_forward_attn: states.append(new_alpha) return tuple(states) def call(self, query, states): """ shapes: query: B x D """ if self.use_loc_attn: score, _ = self.get_loc_attn(query, states) else: score, _ = self.get_attn(query) # TODO: masking # if mask is not None: # self.apply_score_masking(score, mask) # attn_weights shape == (batch_size, max_length, 1) # normalize attention scores scores_norm = self.norm_func(score) attn_weights = scores_norm # apply forward attention new_alpha = None if self.use_forward_attn: new_alpha = self.apply_forward_attention(attn_weights, states[-1]) attn_weights = new_alpha # update states tuple # states = (cum_attn_weights, attn_weights, new_alpha) states = self.update_states(states, scores_norm, attn_weights, new_alpha) # context_vector shape after sum == (batch_size, hidden_size) context_vector = tf.matmul(tf.expand_dims(attn_weights, axis=2), self.values, transpose_a=True, transpose_b=False) context_vector = tf.squeeze(context_vector, axis=1) return context_vector, attn_weights, states # def _location_sensitive_score(processed_query, keys, processed_loc, attention_v, attention_b): # dtype = processed_query.dtype # num_units = keys.shape[-1].value or array_ops.shape(keys)[-1] # return tf.reduce_sum(attention_v * tf.tanh(keys + processed_query + processed_loc + attention_b), [2]) # class LocationSensitiveAttention(BahdanauAttention): # def __init__(self, # units, # memory=None, # memory_sequence_length=None, # normalize=False, # probability_fn="softmax", # kernel_initializer="glorot_uniform", # dtype=None, # name="LocationSensitiveAttention", # location_attention_filters=32, # location_attention_kernel_size=31): # super(LocationSensitiveAttention, # self).__init__(units=units, # memory=memory, # memory_sequence_length=memory_sequence_length, # normalize=normalize, # probability_fn='softmax', ## parent module default # kernel_initializer=kernel_initializer, # dtype=dtype, # name=name) # if probability_fn == 'sigmoid': # self.probability_fn = lambda score, _: self._sigmoid_normalization(score) # self.location_conv = keras.layers.Conv1D(filters=location_attention_filters, kernel_size=location_attention_kernel_size, padding='same', use_bias=False) # self.location_dense = keras.layers.Dense(units, use_bias=False) # # self.v = keras.layers.Dense(1, use_bias=True) # def _location_sensitive_score(self, processed_query, keys, processed_loc): # processed_query = tf.expand_dims(processed_query, 1) # return tf.reduce_sum(self.attention_v * tf.tanh(keys + processed_query + processed_loc), [2]) # def _location_sensitive(self, alignment_cum, alignment_old): # alignment_cat = tf.stack([alignment_cum, alignment_old], axis=2) # return self.location_dense(self.location_conv(alignment_cat)) # def _sigmoid_normalization(self, score): # return tf.nn.sigmoid(score) / tf.reduce_sum(tf.nn.sigmoid(score), axis=-1, keepdims=True) # # def _apply_masking(self, score, mask): # # padding_mask = tf.expand_dims(math_ops.logical_not(mask), 2) # # # Bias so padding positions do not contribute to attention distribution. # # score -= 1.e9 * math_ops.cast(padding_mask, dtype=tf.float32) # # return score # def _calculate_attention(self, query, state): # alignment_cum, alignment_old = state[:2] # processed_query = self.query_layer( # query) if self.query_layer else query # processed_loc = self._location_sensitive(alignment_cum, alignment_old) # score = self._location_sensitive_score( # processed_query, # self.keys, # processed_loc) # alignment = self.probability_fn(score, state) # alignment_cum = alignment_cum + alignment # state[0] = alignment_cum # state[1] = alignment # return alignment, state # def compute_context(self, alignments): # expanded_alignments = tf.expand_dims(alignments, 1) # context = tf.matmul(expanded_alignments, self.values) # context = tf.squeeze(context, [1]) # return context # # def call(self, query, state): # # alignment, next_state = self._calculate_attention(query, state) # # return alignment, next_state
12,264
41.439446
162
py
TTS
TTS-master/TTS/tts/tf/layers/tacotron2.py
import tensorflow as tf from tensorflow import keras from TTS.tts.tf.utils.tf_utils import shape_list from TTS.tts.tf.layers.common_layers import Prenet, Attention # NOTE: linter has a problem with the current TF release #pylint: disable=no-value-for-parameter #pylint: disable=unexpected-keyword-arg class ConvBNBlock(keras.layers.Layer): def __init__(self, filters, kernel_size, activation, **kwargs): super(ConvBNBlock, self).__init__(**kwargs) self.convolution1d = keras.layers.Conv1D(filters, kernel_size, padding='same', name='convolution1d') self.batch_normalization = keras.layers.BatchNormalization(axis=2, momentum=0.90, epsilon=1e-5, name='batch_normalization') self.dropout = keras.layers.Dropout(rate=0.5, name='dropout') self.activation = keras.layers.Activation(activation, name='activation') def call(self, x, training=None): o = self.convolution1d(x) o = self.batch_normalization(o, training=training) o = self.activation(o) o = self.dropout(o, training=training) return o class Postnet(keras.layers.Layer): def __init__(self, output_filters, num_convs, **kwargs): super(Postnet, self).__init__(**kwargs) self.convolutions = [] self.convolutions.append(ConvBNBlock(512, 5, 'tanh', name='convolutions_0')) for idx in range(1, num_convs - 1): self.convolutions.append(ConvBNBlock(512, 5, 'tanh', name=f'convolutions_{idx}')) self.convolutions.append(ConvBNBlock(output_filters, 5, 'linear', name=f'convolutions_{idx+1}')) def call(self, x, training=None): o = x for layer in self.convolutions: o = layer(o, training=training) return o class Encoder(keras.layers.Layer): def __init__(self, output_input_dim, **kwargs): super(Encoder, self).__init__(**kwargs) self.convolutions = [] for idx in range(3): self.convolutions.append(ConvBNBlock(output_input_dim, 5, 'relu', name=f'convolutions_{idx}')) self.lstm = keras.layers.Bidirectional(keras.layers.LSTM(output_input_dim // 2, return_sequences=True, use_bias=True), name='lstm') def call(self, x, training=None): o = x for layer in self.convolutions: o = layer(o, training=training) o = self.lstm(o) return o class Decoder(keras.layers.Layer): #pylint: disable=unused-argument def __init__(self, frame_dim, r, attn_type, use_attn_win, attn_norm, prenet_type, prenet_dropout, use_forward_attn, use_trans_agent, use_forward_attn_mask, use_location_attn, attn_K, separate_stopnet, speaker_emb_dim, enable_tflite, **kwargs): super(Decoder, self).__init__(**kwargs) self.frame_dim = frame_dim self.r_init = tf.constant(r, dtype=tf.int32) self.r = tf.constant(r, dtype=tf.int32) self.output_dim = r * self.frame_dim self.separate_stopnet = separate_stopnet self.enable_tflite = enable_tflite # layer constants self.max_decoder_steps = tf.constant(1000, dtype=tf.int32) self.stop_thresh = tf.constant(0.5, dtype=tf.float32) # model dimensions self.query_dim = 1024 self.decoder_rnn_dim = 1024 self.prenet_dim = 256 self.attn_dim = 128 self.p_attention_dropout = 0.1 self.p_decoder_dropout = 0.1 self.prenet = Prenet(prenet_type, prenet_dropout, [self.prenet_dim, self.prenet_dim], bias=False, name='prenet') self.attention_rnn = keras.layers.LSTMCell(self.query_dim, use_bias=True, name='attention_rnn', ) self.attention_rnn_dropout = keras.layers.Dropout(0.5) # TODO: implement other attn options self.attention = Attention(attn_dim=self.attn_dim, use_loc_attn=True, loc_attn_n_filters=32, loc_attn_kernel_size=31, use_windowing=False, norm=attn_norm, use_forward_attn=use_forward_attn, use_trans_agent=use_trans_agent, use_forward_attn_mask=use_forward_attn_mask, name='attention') self.decoder_rnn = keras.layers.LSTMCell(self.decoder_rnn_dim, use_bias=True, name='decoder_rnn') self.decoder_rnn_dropout = keras.layers.Dropout(0.5) self.linear_projection = keras.layers.Dense(self.frame_dim * r, name='linear_projection/linear_layer') self.stopnet = keras.layers.Dense(1, name='stopnet/linear_layer') def set_max_decoder_steps(self, new_max_steps): self.max_decoder_steps = tf.constant(new_max_steps, dtype=tf.int32) def set_r(self, new_r): self.r = tf.constant(new_r, dtype=tf.int32) self.output_dim = self.frame_dim * new_r def build_decoder_initial_states(self, batch_size, memory_dim, memory_length): zero_frame = tf.zeros([batch_size, self.frame_dim]) zero_context = tf.zeros([batch_size, memory_dim]) attention_rnn_state = self.attention_rnn.get_initial_state(batch_size=batch_size, dtype=tf.float32) decoder_rnn_state = self.decoder_rnn.get_initial_state(batch_size=batch_size, dtype=tf.float32) attention_states = self.attention.init_states(batch_size, memory_length) return zero_frame, zero_context, attention_rnn_state, decoder_rnn_state, attention_states def step(self, prenet_next, states, memory_seq_length=None, training=None): _, context_next, attention_rnn_state, decoder_rnn_state, attention_states = states attention_rnn_input = tf.concat([prenet_next, context_next], -1) attention_rnn_output, attention_rnn_state = \ self.attention_rnn(attention_rnn_input, attention_rnn_state, training=training) attention_rnn_output = self.attention_rnn_dropout(attention_rnn_output, training=training) context, attention, attention_states = self.attention(attention_rnn_output, attention_states, training=training) decoder_rnn_input = tf.concat([attention_rnn_output, context], -1) decoder_rnn_output, decoder_rnn_state = \ self.decoder_rnn(decoder_rnn_input, decoder_rnn_state, training=training) decoder_rnn_output = self.decoder_rnn_dropout(decoder_rnn_output, training=training) linear_projection_input = tf.concat([decoder_rnn_output, context], -1) output_frame = self.linear_projection(linear_projection_input, training=training) stopnet_input = tf.concat([decoder_rnn_output, output_frame], -1) stopnet_output = self.stopnet(stopnet_input, training=training) output_frame = output_frame[:, :self.r * self.frame_dim] states = (output_frame[:, self.frame_dim * (self.r - 1):], context, attention_rnn_state, decoder_rnn_state, attention_states) return output_frame, stopnet_output, states, attention def decode(self, memory, states, frames, memory_seq_length=None): B, _, _ = shape_list(memory) num_iter = shape_list(frames)[1] // self.r # init states frame_zero = tf.expand_dims(states[0], 1) frames = tf.concat([frame_zero, frames], axis=1) outputs = tf.TensorArray(dtype=tf.float32, size=num_iter) attentions = tf.TensorArray(dtype=tf.float32, size=num_iter) stop_tokens = tf.TensorArray(dtype=tf.float32, size=num_iter) # pre-computes self.attention.process_values(memory) prenet_output = self.prenet(frames, training=True) step_count = tf.constant(0, dtype=tf.int32) def _body(step, memory, prenet_output, states, outputs, stop_tokens, attentions): prenet_next = prenet_output[:, step] output, stop_token, states, attention = self.step(prenet_next, states, memory_seq_length) outputs = outputs.write(step, output) attentions = attentions.write(step, attention) stop_tokens = stop_tokens.write(step, stop_token) return step + 1, memory, prenet_output, states, outputs, stop_tokens, attentions _, memory, _, states, outputs, stop_tokens, attentions = \ tf.while_loop(lambda *arg: True, _body, loop_vars=(step_count, memory, prenet_output, states, outputs, stop_tokens, attentions), parallel_iterations=32, swap_memory=True, maximum_iterations=num_iter) outputs = outputs.stack() attentions = attentions.stack() stop_tokens = stop_tokens.stack() outputs = tf.transpose(outputs, [1, 0, 2]) attentions = tf.transpose(attentions, [1, 0, 2]) stop_tokens = tf.transpose(stop_tokens, [1, 0, 2]) stop_tokens = tf.squeeze(stop_tokens, axis=2) outputs = tf.reshape(outputs, [B, -1, self.frame_dim]) return outputs, stop_tokens, attentions def decode_inference(self, memory, states): B, _, _ = shape_list(memory) # init states outputs = tf.TensorArray(dtype=tf.float32, size=0, clear_after_read=False, dynamic_size=True) attentions = tf.TensorArray(dtype=tf.float32, size=0, clear_after_read=False, dynamic_size=True) stop_tokens = tf.TensorArray(dtype=tf.float32, size=0, clear_after_read=False, dynamic_size=True) # pre-computes self.attention.process_values(memory) # iter vars stop_flag = tf.constant(False, dtype=tf.bool) step_count = tf.constant(0, dtype=tf.int32) def _body(step, memory, states, outputs, stop_tokens, attentions, stop_flag): frame_next = states[0] prenet_next = self.prenet(frame_next, training=False) output, stop_token, states, attention = self.step(prenet_next, states, None, training=False) stop_token = tf.math.sigmoid(stop_token) outputs = outputs.write(step, output) attentions = attentions.write(step, attention) stop_tokens = stop_tokens.write(step, stop_token) stop_flag = tf.greater(stop_token, self.stop_thresh) stop_flag = tf.reduce_all(stop_flag) return step + 1, memory, states, outputs, stop_tokens, attentions, stop_flag cond = lambda step, m, s, o, st, a, stop_flag: tf.equal(stop_flag, tf.constant(False, dtype=tf.bool)) _, memory, states, outputs, stop_tokens, attentions, stop_flag = \ tf.while_loop(cond, _body, loop_vars=(step_count, memory, states, outputs, stop_tokens, attentions, stop_flag), parallel_iterations=32, swap_memory=True, maximum_iterations=self.max_decoder_steps) outputs = outputs.stack() attentions = attentions.stack() stop_tokens = stop_tokens.stack() outputs = tf.transpose(outputs, [1, 0, 2]) attentions = tf.transpose(attentions, [1, 0, 2]) stop_tokens = tf.transpose(stop_tokens, [1, 0, 2]) stop_tokens = tf.squeeze(stop_tokens, axis=2) outputs = tf.reshape(outputs, [B, -1, self.frame_dim]) return outputs, stop_tokens, attentions def decode_inference_tflite(self, memory, states): """Inference with TF-Lite compatibility. It assumes batch_size is 1""" # init states # dynamic_shape is not supported in TFLite outputs = tf.TensorArray(dtype=tf.float32, size=self.max_decoder_steps, element_shape=tf.TensorShape( [self.output_dim]), clear_after_read=False, dynamic_size=False) # stop_flags = tf.TensorArray(dtype=tf.bool, # size=self.max_decoder_steps, # element_shape=tf.TensorShape( # []), # clear_after_read=False, # dynamic_size=False) attentions = () stop_tokens = () # pre-computes self.attention.process_values(memory) # iter vars stop_flag = tf.constant(False, dtype=tf.bool) step_count = tf.constant(0, dtype=tf.int32) def _body(step, memory, states, outputs, stop_flag): frame_next = states[0] prenet_next = self.prenet(frame_next, training=False) output, stop_token, states, _ = self.step(prenet_next, states, None, training=False) stop_token = tf.math.sigmoid(stop_token) stop_flag = tf.greater(stop_token, self.stop_thresh) stop_flag = tf.reduce_all(stop_flag) # stop_flags = stop_flags.write(step, tf.logical_not(stop_flag)) outputs = outputs.write(step, tf.reshape(output, [-1])) return step + 1, memory, states, outputs, stop_flag cond = lambda step, m, s, o, stop_flag: tf.equal(stop_flag, tf.constant(False, dtype=tf.bool)) step_count, memory, states, outputs, stop_flag = \ tf.while_loop(cond, _body, loop_vars=(step_count, memory, states, outputs, stop_flag), parallel_iterations=32, swap_memory=True, maximum_iterations=self.max_decoder_steps) outputs = outputs.stack() outputs = tf.gather(outputs, tf.range(step_count)) # pylint: disable=no-value-for-parameter outputs = tf.expand_dims(outputs, axis=[0]) outputs = tf.transpose(outputs, [1, 0, 2]) outputs = tf.reshape(outputs, [1, -1, self.frame_dim]) return outputs, stop_tokens, attentions def call(self, memory, states, frames=None, memory_seq_length=None, training=False): if training: return self.decode(memory, states, frames, memory_seq_length) if self.enable_tflite: return self.decode_inference_tflite(memory, states) return self.decode_inference(memory, states)
15,199
49.165017
139
py
TTS
TTS-master/TTS/tts/tf/utils/generic_utils.py
import datetime import importlib import pickle import numpy as np import tensorflow as tf def save_checkpoint(model, optimizer, current_step, epoch, r, output_path, **kwargs): state = { 'model': model.weights, 'optimizer': optimizer, 'step': current_step, 'epoch': epoch, 'date': datetime.date.today().strftime("%B %d, %Y"), 'r': r } state.update(kwargs) pickle.dump(state, open(output_path, 'wb')) def load_checkpoint(model, checkpoint_path): checkpoint = pickle.load(open(checkpoint_path, 'rb')) chkp_var_dict = {var.name: var.numpy() for var in checkpoint['model']} tf_vars = model.weights for tf_var in tf_vars: layer_name = tf_var.name try: chkp_var_value = chkp_var_dict[layer_name] except KeyError: class_name = list(chkp_var_dict.keys())[0].split("/")[0] layer_name = f"{class_name}/{layer_name}" chkp_var_value = chkp_var_dict[layer_name] tf.keras.backend.set_value(tf_var, chkp_var_value) if 'r' in checkpoint.keys(): model.decoder.set_r(checkpoint['r']) return model def sequence_mask(sequence_length, max_len=None): if max_len is None: max_len = sequence_length.max() batch_size = sequence_length.size(0) seq_range = np.empty([0, max_len], dtype=np.int8) seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len) if sequence_length.is_cuda: seq_range_expand = seq_range_expand.cuda() seq_length_expand = ( sequence_length.unsqueeze(1).expand_as(seq_range_expand)) # B x T_max return seq_range_expand < seq_length_expand # @tf.custom_gradient def check_gradient(x, grad_clip): x_normed = tf.clip_by_norm(x, grad_clip) grad_norm = tf.norm(grad_clip) return x_normed, grad_norm def count_parameters(model, c): try: return model.count_params() except RuntimeError: input_dummy = tf.convert_to_tensor(np.random.rand(8, 128).astype('int32')) input_lengths = np.random.randint(100, 129, (8, )) input_lengths[-1] = 128 input_lengths = tf.convert_to_tensor(input_lengths.astype('int32')) mel_spec = np.random.rand(8, 2 * c.r, c.audio['num_mels']).astype('float32') mel_spec = tf.convert_to_tensor(mel_spec) speaker_ids = np.random.randint( 0, 5, (8, )) if c.use_speaker_embedding else None _ = model(input_dummy, input_lengths, mel_spec, speaker_ids=speaker_ids) return model.count_params() def setup_model(num_chars, num_speakers, c, enable_tflite=False): print(" > Using model: {}".format(c.model)) MyModel = importlib.import_module('TTS.tts.tf.models.' + c.model.lower()) MyModel = getattr(MyModel, c.model) if c.model.lower() in "tacotron": raise NotImplementedError(' [!] Tacotron model is not ready.') # tacotron2 model = MyModel(num_chars=num_chars, num_speakers=num_speakers, r=c.r, postnet_output_dim=c.audio['num_mels'], decoder_output_dim=c.audio['num_mels'], attn_type=c.attention_type, attn_win=c.windowing, attn_norm=c.attention_norm, prenet_type=c.prenet_type, prenet_dropout=c.prenet_dropout, forward_attn=c.use_forward_attn, trans_agent=c.transition_agent, forward_attn_mask=c.forward_attn_mask, location_attn=c.location_attn, attn_K=c.attention_heads, separate_stopnet=c.separate_stopnet, bidirectional_decoder=c.bidirectional_decoder, enable_tflite=enable_tflite) return model
3,878
36.298077
85
py
TTS
TTS-master/TTS/tts/tf/utils/convert_torch_to_tf_utils.py
import numpy as np import tensorflow as tf # NOTE: linter has a problem with the current TF release #pylint: disable=no-value-for-parameter #pylint: disable=unexpected-keyword-arg def tf_create_dummy_inputs(): """ Create dummy inputs for TF Tacotron2 model """ batch_size = 4 max_input_length = 32 max_mel_length = 128 pad = 1 n_chars = 24 input_ids = tf.random.uniform([batch_size, max_input_length + pad], maxval=n_chars, dtype=tf.int32) input_lengths = np.random.randint(0, high=max_input_length+1 + pad, size=[batch_size]) input_lengths[-1] = max_input_length input_lengths = tf.convert_to_tensor(input_lengths, dtype=tf.int32) mel_outputs = tf.random.uniform(shape=[batch_size, max_mel_length + pad, 80]) mel_lengths = np.random.randint(0, high=max_mel_length+1 + pad, size=[batch_size]) mel_lengths[-1] = max_mel_length mel_lengths = tf.convert_to_tensor(mel_lengths, dtype=tf.int32) return input_ids, input_lengths, mel_outputs, mel_lengths def compare_torch_tf(torch_tensor, tf_tensor): """ Compute the average absolute difference b/w torch and tf tensors """ return abs(torch_tensor.detach().numpy() - tf_tensor.numpy()).mean() def convert_tf_name(tf_name): """ Convert certain patterns in TF layer names to Torch patterns """ tf_name_tmp = tf_name tf_name_tmp = tf_name_tmp.replace(':0', '') tf_name_tmp = tf_name_tmp.replace('/forward_lstm/lstm_cell_1/recurrent_kernel', '/weight_hh_l0') tf_name_tmp = tf_name_tmp.replace('/forward_lstm/lstm_cell_2/kernel', '/weight_ih_l1') tf_name_tmp = tf_name_tmp.replace('/recurrent_kernel', '/weight_hh') tf_name_tmp = tf_name_tmp.replace('/kernel', '/weight') tf_name_tmp = tf_name_tmp.replace('/gamma', '/weight') tf_name_tmp = tf_name_tmp.replace('/beta', '/bias') tf_name_tmp = tf_name_tmp.replace('/', '.') return tf_name_tmp def transfer_weights_torch_to_tf(tf_vars, var_map_dict, state_dict): """ Transfer weigths from torch state_dict to TF variables """ print(" > Passing weights from Torch to TF ...") for tf_var in tf_vars: torch_var_name = var_map_dict[tf_var.name] print(f' | > {tf_var.name} <-- {torch_var_name}') # if tuple, it is a bias variable if not isinstance(torch_var_name, tuple): torch_layer_name = '.'.join(torch_var_name.split('.')[-2:]) torch_weight = state_dict[torch_var_name] if 'convolution1d/kernel' in tf_var.name or 'conv1d/kernel' in tf_var.name: # out_dim, in_dim, filter -> filter, in_dim, out_dim numpy_weight = torch_weight.permute([2, 1, 0]).detach().cpu().numpy() elif 'lstm_cell' in tf_var.name and 'kernel' in tf_var.name: numpy_weight = torch_weight.transpose(0, 1).detach().cpu().numpy() # if variable is for bidirectional lstm and it is a bias vector there # needs to be pre-defined two matching torch bias vectors elif '_lstm/lstm_cell_' in tf_var.name and 'bias' in tf_var.name: bias_vectors = [value for key, value in state_dict.items() if key in torch_var_name] assert len(bias_vectors) == 2 numpy_weight = bias_vectors[0] + bias_vectors[1] elif 'rnn' in tf_var.name and 'kernel' in tf_var.name: numpy_weight = torch_weight.transpose(0, 1).detach().cpu().numpy() elif 'rnn' in tf_var.name and 'bias' in tf_var.name: bias_vectors = [value for key, value in state_dict.items() if torch_var_name[:-2] in key] assert len(bias_vectors) == 2 numpy_weight = bias_vectors[0] + bias_vectors[1] elif 'linear_layer' in torch_layer_name and 'weight' in torch_var_name: numpy_weight = torch_weight.transpose(0, 1).detach().cpu().numpy() else: numpy_weight = torch_weight.detach().cpu().numpy() assert np.all(tf_var.shape == numpy_weight.shape), f" [!] weight shapes does not match: {tf_var.name} vs {torch_var_name} --> {tf_var.shape} vs {numpy_weight.shape}" tf.keras.backend.set_value(tf_var, numpy_weight) return tf_vars def load_tf_vars(model_tf, tf_vars): for tf_var in tf_vars: model_tf.get_layer(tf_var.name).set_weights(tf_var) return model_tf
4,281
49.376471
173
py
TTS
TTS-master/TTS/tts/tf/utils/io.py
import pickle import datetime import tensorflow as tf def save_checkpoint(model, optimizer, current_step, epoch, r, output_path, **kwargs): state = { 'model': model.weights, 'optimizer': optimizer, 'step': current_step, 'epoch': epoch, 'date': datetime.date.today().strftime("%B %d, %Y"), 'r': r } state.update(kwargs) pickle.dump(state, open(output_path, 'wb')) def load_checkpoint(model, checkpoint_path): checkpoint = pickle.load(open(checkpoint_path, 'rb')) chkp_var_dict = {var.name: var.numpy() for var in checkpoint['model']} tf_vars = model.weights for tf_var in tf_vars: layer_name = tf_var.name try: chkp_var_value = chkp_var_dict[layer_name] except KeyError: class_name = list(chkp_var_dict.keys())[0].split("/")[0] layer_name = f"{class_name}/{layer_name}" chkp_var_value = chkp_var_dict[layer_name] tf.keras.backend.set_value(tf_var, chkp_var_value) if 'r' in checkpoint.keys(): model.decoder.set_r(checkpoint['r']) return model def load_tflite_model(tflite_path): tflite_model = tf.lite.Interpreter(model_path=tflite_path) tflite_model.allocate_tensors() return tflite_model
1,284
29.595238
85
py
TTS
TTS-master/TTS/utils/generic_utils.py
import datetime import glob import os import shutil import subprocess import sys from pathlib import Path import torch def get_git_branch(): try: out = subprocess.check_output(["git", "branch"]).decode("utf8") current = next(line for line in out.split("\n") if line.startswith("*")) current.replace("* ", "") except subprocess.CalledProcessError: current = "inside_docker" return current def get_commit_hash(): """https://stackoverflow.com/questions/14989858/get-the-current-git-hash-in-a-python-script""" # try: # subprocess.check_output(['git', 'diff-index', '--quiet', # 'HEAD']) # Verify client is clean # except: # raise RuntimeError( # " !! Commit before training to get the commit hash.") try: commit = subprocess.check_output( ['git', 'rev-parse', '--short', 'HEAD']).decode().strip() # Not copying .git folder into docker container except subprocess.CalledProcessError: commit = "0000000" print(' > Git Hash: {}'.format(commit)) return commit def create_experiment_folder(root_path, model_name, debug): """ Create a folder with the current date and time """ date_str = datetime.datetime.now().strftime("%B-%d-%Y_%I+%M%p") if debug: commit_hash = 'debug' else: commit_hash = get_commit_hash() output_folder = os.path.join( root_path, model_name + '-' + date_str + '-' + commit_hash) os.makedirs(output_folder, exist_ok=True) print(" > Experiment folder: {}".format(output_folder)) return output_folder def remove_experiment_folder(experiment_path): """Check folder if there is a checkpoint, otherwise remove the folder""" checkpoint_files = glob.glob(experiment_path + "/*.pth.tar") if not checkpoint_files: if os.path.exists(experiment_path): shutil.rmtree(experiment_path, ignore_errors=True) print(" ! Run is removed from {}".format(experiment_path)) else: print(" ! Run is kept in {}".format(experiment_path)) def count_parameters(model): r"""Count number of trainable parameters in a network""" return sum(p.numel() for p in model.parameters() if p.requires_grad) def get_user_data_dir(appname): if sys.platform == "win32": import winreg # pylint: disable=import-outside-toplevel key = winreg.OpenKey( winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" ) dir_, _ = winreg.QueryValueEx(key, "Local AppData") ans = Path(dir_).resolve(strict=False) elif sys.platform == 'darwin': ans = Path('~/Library/Application Support/').expanduser() else: ans = Path.home().joinpath('.local/share') return ans.joinpath(appname) def set_init_dict(model_dict, checkpoint_state, c): # Partial initialization: if there is a mismatch with new and old layer, it is skipped. for k, v in checkpoint_state.items(): if k not in model_dict: print(" | > Layer missing in the model definition: {}".format(k)) # 1. filter out unnecessary keys pretrained_dict = { k: v for k, v in checkpoint_state.items() if k in model_dict } # 2. filter out different size layers pretrained_dict = { k: v for k, v in pretrained_dict.items() if v.numel() == model_dict[k].numel() } # 3. skip reinit layers if c.reinit_layers is not None: for reinit_layer_name in c.reinit_layers: pretrained_dict = { k: v for k, v in pretrained_dict.items() if reinit_layer_name not in k } # 4. overwrite entries in the existing state dict model_dict.update(pretrained_dict) print(" | > {} / {} layers are restored.".format(len(pretrained_dict), len(model_dict))) return model_dict class KeepAverage(): def __init__(self): self.avg_values = {} self.iters = {} def __getitem__(self, key): return self.avg_values[key] def items(self): return self.avg_values.items() def add_value(self, name, init_val=0, init_iter=0): self.avg_values[name] = init_val self.iters[name] = init_iter def update_value(self, name, value, weighted_avg=False): if name not in self.avg_values: # add value if not exist before self.add_value(name, init_val=value) else: # else update existing value if weighted_avg: self.avg_values[name] = 0.99 * self.avg_values[name] + 0.01 * value self.iters[name] += 1 else: self.avg_values[name] = self.avg_values[name] * \ self.iters[name] + value self.iters[name] += 1 self.avg_values[name] /= self.iters[name] def add_values(self, name_dict): for key, value in name_dict.items(): self.add_value(key, init_val=value) def update_values(self, value_dict): for key, value in value_dict.items(): self.update_value(key, value) def check_argument(name, c, enum_list=None, max_val=None, min_val=None, restricted=False, val_type=None, alternative=None): if alternative in c.keys() and c[alternative] is not None: return if restricted: assert name in c.keys(), f' [!] {name} not defined in config.json' if name in c.keys(): if max_val: assert c[name] <= max_val, f' [!] {name} is larger than max value {max_val}' if min_val: assert c[name] >= min_val, f' [!] {name} is smaller than min value {min_val}' if enum_list: assert c[name].lower() in enum_list, f' [!] {name} is not a valid value' if isinstance(val_type, list): is_valid = False for typ in val_type: if isinstance(c[name], typ): is_valid = True assert is_valid or c[name] is None, f' [!] {name} has wrong type - {type(c[name])} vs {val_type}' elif val_type: assert isinstance(c[name], val_type) or c[name] is None, f' [!] {name} has wrong type - {type(c[name])} vs {val_type}'
6,393
34.921348
130
py
TTS
TTS-master/TTS/utils/synthesizer.py
import time import numpy as np import torch import pysbd from TTS.utils.audio import AudioProcessor from TTS.utils.io import load_config from TTS.tts.utils.generic_utils import setup_model from TTS.tts.utils.speakers import load_speaker_mapping from TTS.vocoder.utils.generic_utils import setup_generator, interpolate_vocoder_input # pylint: disable=unused-wildcard-import # pylint: disable=wildcard-import from TTS.tts.utils.synthesis import * from TTS.tts.utils.text import make_symbols, phonemes, symbols class Synthesizer(object): def __init__(self, tts_checkpoint, tts_config, vocoder_checkpoint=None, vocoder_config=None, use_cuda=False): """Encapsulation of tts and vocoder models for inference. TODO: handle multi-speaker and GST inference. Args: tts_checkpoint (str): path to the tts model file. tts_config (str): path to the tts config file. vocoder_checkpoint (str, optional): path to the vocoder model file. Defaults to None. vocoder_config (str, optional): path to the vocoder config file. Defaults to None. use_cuda (bool, optional): enable/disable cuda. Defaults to False. """ self.tts_checkpoint = tts_checkpoint self.tts_config = tts_config self.vocoder_checkpoint = vocoder_checkpoint self.vocoder_config = vocoder_config self.use_cuda = use_cuda self.wavernn = None self.vocoder_model = None self.num_speakers = 0 self.tts_speakers = None self.speaker_embedding_dim = None self.seg = self.get_segmenter("en") self.use_cuda = use_cuda if self.use_cuda: assert torch.cuda.is_available(), "CUDA is not availabe on this machine." self.load_tts(tts_checkpoint, tts_config, use_cuda) if vocoder_checkpoint: self.load_vocoder(vocoder_checkpoint, vocoder_config, use_cuda) @staticmethod def get_segmenter(lang): return pysbd.Segmenter(language=lang, clean=True) def load_speakers(self): # load speakers if self.model_config.use_speaker_embedding is not None: self.tts_speakers = load_speaker_mapping(self.tts_config.tts_speakers_json) self.num_speakers = len(self.tts_speakers) else: self.num_speakers = 0 # set external speaker embedding if self.tts_config.use_external_speaker_embedding_file: speaker_embedding = self.tts_speakers[list(self.tts_speakers.keys())[0]]['embedding'] self.speaker_embedding_dim = len(speaker_embedding) def init_speaker(self, speaker_idx): # load speakers speaker_embedding = None if hasattr(self, 'tts_speakers') and speaker_idx is not None: assert speaker_idx < len(self.tts_speakers), f" [!] speaker_idx is out of the range. {speaker_idx} vs {len(self.tts_speakers)}" if self.tts_config.use_external_speaker_embedding_file: speaker_embedding = self.tts_speakers[speaker_idx]['embedding'] return speaker_embedding def load_tts(self, tts_checkpoint, tts_config, use_cuda): # pylint: disable=global-statement global symbols, phonemes self.tts_config = load_config(tts_config) self.use_phonemes = self.tts_config.use_phonemes self.ap = AudioProcessor(**self.tts_config.audio) if 'characters' in self.tts_config.keys(): symbols, phonemes = make_symbols(**self.tts_config.characters) if self.use_phonemes: self.input_size = len(phonemes) else: self.input_size = len(symbols) self.tts_model = setup_model(self.input_size, num_speakers=self.num_speakers, c=self.tts_config) self.tts_model.load_checkpoint(tts_config, tts_checkpoint, eval=True) if use_cuda: self.tts_model.cuda() def load_vocoder(self, model_file, model_config, use_cuda): self.vocoder_config = load_config(model_config) self.vocoder_ap = AudioProcessor(**self.vocoder_config['audio']) self.vocoder_model = setup_generator(self.vocoder_config) self.vocoder_model.load_checkpoint(self.vocoder_config, model_file, eval=True) if use_cuda: self.vocoder_model.cuda() def save_wav(self, wav, path): wav = np.array(wav) self.ap.save_wav(wav, path) def split_into_sentences(self, text): return self.seg.segment(text) def tts(self, text, speaker_idx=None): start_time = time.time() wavs = [] sens = self.split_into_sentences(text) print(" > Text splitted to sentences.") print(sens) speaker_embedding = self.init_speaker(speaker_idx) use_gl = self.vocoder_model is None for sen in sens: # synthesize voice waveform, _, _, mel_postnet_spec, _, _ = synthesis( self.tts_model, sen, self.tts_config, self.use_cuda, self.ap, speaker_idx, None, False, self.tts_config.enable_eos_bos_chars, use_gl, speaker_embedding=speaker_embedding) if not use_gl: # denormalize tts output based on tts audio config mel_postnet_spec = self.ap.denormalize(mel_postnet_spec.T).T device_type = "cuda" if self.use_cuda else "cpu" # renormalize spectrogram based on vocoder config vocoder_input = self.vocoder_ap.normalize(mel_postnet_spec.T) # compute scale factor for possible sample rate mismatch scale_factor = [1, self.vocoder_config['audio']['sample_rate'] / self.ap.sample_rate] if scale_factor[1] != 1: print(" > interpolating tts model output.") vocoder_input = interpolate_vocoder_input(scale_factor, vocoder_input) else: vocoder_input = torch.tensor(vocoder_input).unsqueeze(0) # pylint: disable=not-callable # run vocoder model # [1, T, C] waveform = self.vocoder_model.inference(vocoder_input.to(device_type)) if self.use_cuda and not use_gl: waveform = waveform.cpu() if not use_gl: waveform = waveform.numpy() waveform = waveform.squeeze() # trim silence waveform = trim_silence(waveform, self.ap) wavs += list(waveform) wavs += [0] * 10000 # compute stats process_time = time.time() - start_time audio_time = len(wavs) / self.tts_config.audio['sample_rate'] print(f" > Processing time: {process_time}") print(f" > Real-time factor: {process_time / audio_time}") return wavs
6,964
39.970588
139
py
TTS
TTS-master/TTS/utils/radam.py
# from https://github.com/LiyuanLucasLiu/RAdam import math import torch from torch.optim.optimizer import Optimizer class RAdam(Optimizer): def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, degenerated_to_sgd=True): if lr < 0.0: raise ValueError("Invalid learning rate: {}".format(lr)) if eps < 0.0: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) self.degenerated_to_sgd = degenerated_to_sgd if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict): for param in params: if 'betas' in param and (param['betas'][0] != betas[0] or param['betas'][1] != betas[1]): param['buffer'] = [[None, None, None] for _ in range(10)] defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, buffer=[[None, None, None] for _ in range(10)]) super(RAdam, self).__init__(params, defaults) def __setstate__(self, state): # pylint: disable=useless-super-delegation super(RAdam, self).__setstate__(state) def step(self, closure=None): loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data.float() if grad.is_sparse: raise RuntimeError('RAdam does not support sparse gradients') p_data_fp32 = p.data.float() state = self.state[p] if len(state) == 0: state['step'] = 0 state['exp_avg'] = torch.zeros_like(p_data_fp32) state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) else: state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] beta1, beta2 = group['betas'] exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) state['step'] += 1 buffered = group['buffer'][int(state['step'] % 10)] if state['step'] == buffered[0]: N_sma, step_size = buffered[1], buffered[2] else: buffered[0] = state['step'] beta2_t = beta2 ** state['step'] N_sma_max = 2 / (1 - beta2) - 1 N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) buffered[1] = N_sma # more conservative since it's an approximated value if N_sma >= 5: step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step']) elif self.degenerated_to_sgd: step_size = 1.0 / (1 - beta1 ** state['step']) else: step_size = -1 buffered[2] = step_size # more conservative since it's an approximated value if N_sma >= 5: if group['weight_decay'] != 0: p_data_fp32.add_(p_data_fp32, alpha=-group['weight_decay'] * group['lr']) denom = exp_avg_sq.sqrt().add_(group['eps']) p_data_fp32.addcdiv_(exp_avg, denom, value=-step_size * group['lr']) p.data.copy_(p_data_fp32) elif step_size > 0: if group['weight_decay'] != 0: p_data_fp32.add_(p_data_fp32, alpha=-group['weight_decay'] * group['lr']) p_data_fp32.add_(exp_avg, alpha=-step_size * group['lr']) p.data.copy_(p_data_fp32) return loss
4,346
43.357143
175
py
TTS
TTS-master/TTS/utils/training.py
import torch import numpy as np def setup_torch_training_env(cudnn_enable, cudnn_benchmark): torch.backends.cudnn.enabled = cudnn_enable torch.backends.cudnn.benchmark = cudnn_benchmark torch.manual_seed(54321) use_cuda = torch.cuda.is_available() num_gpus = torch.cuda.device_count() print(" > Using CUDA: ", use_cuda) print(" > Number of GPUs: ", num_gpus) return use_cuda, num_gpus def check_update(model, grad_clip, ignore_stopnet=False, amp_opt_params=None): r'''Check model gradient against unexpected jumps and failures''' skip_flag = False if ignore_stopnet: if not amp_opt_params: grad_norm = torch.nn.utils.clip_grad_norm_( [param for name, param in model.named_parameters() if 'stopnet' not in name], grad_clip) else: grad_norm = torch.nn.utils.clip_grad_norm_(amp_opt_params, grad_clip) else: if not amp_opt_params: grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip) else: grad_norm = torch.nn.utils.clip_grad_norm_(amp_opt_params, grad_clip) # compatibility with different torch versions if isinstance(grad_norm, float): if np.isinf(grad_norm): print(" | > Gradient is INF !!") skip_flag = True else: if torch.isinf(grad_norm): print(" | > Gradient is INF !!") skip_flag = True return grad_norm, skip_flag def lr_decay(init_lr, global_step, warmup_steps): r'''from https://github.com/r9y9/tacotron_pytorch/blob/master/train.py''' warmup_steps = float(warmup_steps) step = global_step + 1. lr = init_lr * warmup_steps**0.5 * np.minimum(step * warmup_steps**-1.5, step**-0.5) return lr def adam_weight_decay(optimizer): """ Custom weight decay operation, not effecting grad values. """ for group in optimizer.param_groups: for param in group['params']: current_lr = group['lr'] weight_decay = group['weight_decay'] factor = -weight_decay * group['lr'] param.data = param.data.add(param.data, alpha=factor) return optimizer, current_lr # pylint: disable=dangerous-default-value def set_weight_decay(model, weight_decay, skip_list={"decoder.attention.v", "rnn", "lstm", "gru", "embedding"}): """ Skip biases, BatchNorm parameters, rnns. and attention projection layer v """ decay = [] no_decay = [] for name, param in model.named_parameters(): if not param.requires_grad: continue if len(param.shape) == 1 or any([skip_name in name for skip_name in skip_list]): no_decay.append(param) else: decay.append(param) return [{ 'params': no_decay, 'weight_decay': 0. }, { 'params': decay, 'weight_decay': weight_decay }] # pylint: disable=protected-access class NoamLR(torch.optim.lr_scheduler._LRScheduler): def __init__(self, optimizer, warmup_steps=0.1, last_epoch=-1): self.warmup_steps = float(warmup_steps) super(NoamLR, self).__init__(optimizer, last_epoch) def get_lr(self): step = max(self.last_epoch, 1) return [ base_lr * self.warmup_steps**0.5 * min(step * self.warmup_steps**-1.5, step**-0.5) for base_lr in self.base_lrs ] def gradual_training_scheduler(global_step, config): """Setup the gradual training schedule wrt number of active GPUs""" num_gpus = torch.cuda.device_count() if num_gpus == 0: num_gpus = 1 new_values = None # we set the scheduling wrt num_gpus for values in config.gradual_training: if global_step * num_gpus >= values[0]: new_values = values return new_values[1], new_values[2]
3,943
32.709402
112
py
TTS
TTS-master/TTS/utils/distribute.py
# edited from https://github.com/fastai/imagenet-fast/blob/master/imagenet_nv/distributed.py import math import torch import torch.distributed as dist from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors from torch.autograd import Variable from torch.utils.data.sampler import Sampler class DistributedSampler(Sampler): """ Non shuffling Distributed Sampler """ def __init__(self, dataset, num_replicas=None, rank=None): super(DistributedSampler, self).__init__(dataset) if num_replicas is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") num_replicas = dist.get_world_size() if rank is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") rank = dist.get_rank() self.dataset = dataset self.num_replicas = num_replicas self.rank = rank self.epoch = 0 self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas)) self.total_size = self.num_samples * self.num_replicas def __iter__(self): indices = torch.arange(len(self.dataset)).tolist() # add extra samples to make it evenly divisible indices += indices[:(self.total_size - len(indices))] assert len(indices) == self.total_size # subsample indices = indices[self.rank:self.total_size:self.num_replicas] assert len(indices) == self.num_samples return iter(indices) def __len__(self): return self.num_samples def set_epoch(self, epoch): self.epoch = epoch def reduce_tensor(tensor, num_gpus): rt = tensor.clone() dist.all_reduce(rt, op=dist.reduce_op.SUM) rt /= num_gpus return rt def init_distributed(rank, num_gpus, group_name, dist_backend, dist_url): assert torch.cuda.is_available(), "Distributed mode requires CUDA." # Set cuda device so everything is done on the right GPU. torch.cuda.set_device(rank % torch.cuda.device_count()) # Initialize distributed communication dist.init_process_group( dist_backend, init_method=dist_url, world_size=num_gpus, rank=rank, group_name=group_name) def apply_gradient_allreduce(module): # sync model parameters for p in module.state_dict().values(): if not torch.is_tensor(p): continue dist.broadcast(p, 0) def allreduce_params(): if module.needs_reduction: module.needs_reduction = False # bucketing params based on value types buckets = {} for param in module.parameters(): if param.requires_grad and param.grad is not None: tp = type(param.data) if tp not in buckets: buckets[tp] = [] buckets[tp].append(param) for tp in buckets: bucket = buckets[tp] grads = [param.grad.data for param in bucket] coalesced = _flatten_dense_tensors(grads) dist.all_reduce(coalesced, op=dist.reduce_op.SUM) coalesced /= dist.get_world_size() for buf, synced in zip( grads, _unflatten_dense_tensors(coalesced, grads)): buf.copy_(synced) for param in list(module.parameters()): def allreduce_hook(*_): Variable._execution_engine.queue_callback(allreduce_params) #pylint: disable=protected-access if param.requires_grad: param.register_hook(allreduce_hook) def set_needs_reduction(self, *_): self.needs_reduction = True module.register_forward_hook(set_needs_reduction) return module
3,871
32.094017
106
py
TTS
TTS-master/TTS/vocoder/models/parallel_wavegan_generator.py
import math import numpy as np import torch from TTS.vocoder.layers.parallel_wavegan import ResidualBlock from TTS.vocoder.layers.upsample import ConvUpsample class ParallelWaveganGenerator(torch.nn.Module): """PWGAN generator as in https://arxiv.org/pdf/1910.11480.pdf. It is similar to WaveNet with no causal convolution. It is conditioned on an aux feature (spectrogram) to generate an output waveform from an input noise. """ # pylint: disable=dangerous-default-value def __init__(self, in_channels=1, out_channels=1, kernel_size=3, num_res_blocks=30, stacks=3, res_channels=64, gate_channels=128, skip_channels=64, aux_channels=80, dropout=0.0, bias=True, use_weight_norm=True, upsample_factors=[4, 4, 4, 4], inference_padding=2): super(ParallelWaveganGenerator, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.aux_channels = aux_channels self.num_res_blocks = num_res_blocks self.stacks = stacks self.kernel_size = kernel_size self.upsample_factors = upsample_factors self.upsample_scale = np.prod(upsample_factors) self.inference_padding = inference_padding self.use_weight_norm = use_weight_norm # check the number of layers and stacks assert num_res_blocks % stacks == 0 layers_per_stack = num_res_blocks // stacks # define first convolution self.first_conv = torch.nn.Conv1d(in_channels, res_channels, kernel_size=1, bias=True) # define conv + upsampling network self.upsample_net = ConvUpsample(upsample_factors=upsample_factors) # define residual blocks self.conv_layers = torch.nn.ModuleList() for layer in range(num_res_blocks): dilation = 2**(layer % layers_per_stack) conv = ResidualBlock( kernel_size=kernel_size, res_channels=res_channels, gate_channels=gate_channels, skip_channels=skip_channels, aux_channels=aux_channels, dilation=dilation, dropout=dropout, bias=bias, ) self.conv_layers += [conv] # define output layers self.last_conv_layers = torch.nn.ModuleList([ torch.nn.ReLU(inplace=True), torch.nn.Conv1d(skip_channels, skip_channels, kernel_size=1, bias=True), torch.nn.ReLU(inplace=True), torch.nn.Conv1d(skip_channels, out_channels, kernel_size=1, bias=True), ]) # apply weight norm if use_weight_norm: self.apply_weight_norm() def forward(self, c): """ c: (B, C ,T'). o: Output tensor (B, out_channels, T) """ # random noise x = torch.randn([c.shape[0], 1, c.shape[2] * self.upsample_scale]) x = x.to(self.first_conv.bias.device) # perform upsampling if c is not None and self.upsample_net is not None: c = self.upsample_net(c) assert c.shape[-1] == x.shape[ -1], f" [!] Upsampling scale does not match the expected output. {c.shape} vs {x.shape}" # encode to hidden representation x = self.first_conv(x) skips = 0 for f in self.conv_layers: x, h = f(x, c) skips += h skips *= math.sqrt(1.0 / len(self.conv_layers)) # apply final layers x = skips for f in self.last_conv_layers: x = f(x) return x @torch.no_grad() def inference(self, c): c = c.to(self.first_conv.weight.device) c = torch.nn.functional.pad( c, (self.inference_padding, self.inference_padding), 'replicate') return self.forward(c) def remove_weight_norm(self): def _remove_weight_norm(m): try: # print(f"Weight norm is removed from {m}.") torch.nn.utils.remove_weight_norm(m) except ValueError: # this module didn't have weight norm return self.apply(_remove_weight_norm) def apply_weight_norm(self): def _apply_weight_norm(m): if isinstance(m, (torch.nn.Conv1d, torch.nn.Conv2d)): torch.nn.utils.weight_norm(m) # print(f"Weight norm is applied to {m}.") self.apply(_apply_weight_norm) @staticmethod def _get_receptive_field_size(layers, stacks, kernel_size, dilation=lambda x: 2**x): assert layers % stacks == 0 layers_per_cycle = layers // stacks dilations = [dilation(i % layers_per_cycle) for i in range(layers)] return (kernel_size - 1) * sum(dilations) + 1 @property def receptive_field_size(self): return self._get_receptive_field_size(self.layers, self.stacks, self.kernel_size) def load_checkpoint(self, config, checkpoint_path, eval=False): # pylint: disable=unused-argument, redefined-builtin state = torch.load(checkpoint_path, map_location=torch.device('cpu')) self.load_state_dict(state['model']) if eval: self.eval() assert not self.training if self.use_weight_norm: self.remove_weight_norm()
5,992
34.461538
121
py
TTS
TTS-master/TTS/vocoder/models/parallel_wavegan_discriminator.py
import math import torch from torch import nn from TTS.vocoder.layers.parallel_wavegan import ResidualBlock class ParallelWaveganDiscriminator(nn.Module): """PWGAN discriminator as in https://arxiv.org/abs/1910.11480. It classifies each audio window real/fake and returns a sequence of predictions. It is a stack of convolutional blocks with dilation. """ # pylint: disable=dangerous-default-value def __init__(self, in_channels=1, out_channels=1, kernel_size=3, num_layers=10, conv_channels=64, dilation_factor=1, nonlinear_activation="LeakyReLU", nonlinear_activation_params={"negative_slope": 0.2}, bias=True, ): super(ParallelWaveganDiscriminator, self).__init__() assert (kernel_size - 1) % 2 == 0, " [!] does not support even number kernel size." assert dilation_factor > 0, " [!] dilation factor must be > 0." self.conv_layers = nn.ModuleList() conv_in_channels = in_channels for i in range(num_layers - 1): if i == 0: dilation = 1 else: dilation = i if dilation_factor == 1 else dilation_factor ** i conv_in_channels = conv_channels padding = (kernel_size - 1) // 2 * dilation conv_layer = [ nn.Conv1d(conv_in_channels, conv_channels, kernel_size=kernel_size, padding=padding, dilation=dilation, bias=bias), getattr(nn, nonlinear_activation)(inplace=True, **nonlinear_activation_params) ] self.conv_layers += conv_layer padding = (kernel_size - 1) // 2 last_conv_layer = nn.Conv1d( conv_in_channels, out_channels, kernel_size=kernel_size, padding=padding, bias=bias) self.conv_layers += [last_conv_layer] self.apply_weight_norm() def forward(self, x): """ x : (B, 1, T). Returns: Tensor: (B, 1, T) """ for f in self.conv_layers: x = f(x) return x def apply_weight_norm(self): def _apply_weight_norm(m): if isinstance(m, (torch.nn.Conv1d, torch.nn.Conv2d)): torch.nn.utils.weight_norm(m) self.apply(_apply_weight_norm) def remove_weight_norm(self): def _remove_weight_norm(m): try: # print(f"Weight norm is removed from {m}.") nn.utils.remove_weight_norm(m) except ValueError: # this module didn't have weight norm return self.apply(_remove_weight_norm) class ResidualParallelWaveganDiscriminator(nn.Module): # pylint: disable=dangerous-default-value def __init__(self, in_channels=1, out_channels=1, kernel_size=3, num_layers=30, stacks=3, res_channels=64, gate_channels=128, skip_channels=64, dropout=0.0, bias=True, nonlinear_activation="LeakyReLU", nonlinear_activation_params={"negative_slope": 0.2}, ): super(ResidualParallelWaveganDiscriminator, self).__init__() assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size." self.in_channels = in_channels self.out_channels = out_channels self.num_layers = num_layers self.stacks = stacks self.kernel_size = kernel_size self.res_factor = math.sqrt(1.0 / num_layers) # check the number of num_layers and stacks assert num_layers % stacks == 0 layers_per_stack = num_layers // stacks # define first convolution self.first_conv = nn.Sequential( nn.Conv1d(in_channels, res_channels, kernel_size=1, padding=0, dilation=1, bias=True), getattr(nn, nonlinear_activation)(inplace=True, **nonlinear_activation_params), ) # define residual blocks self.conv_layers = nn.ModuleList() for layer in range(num_layers): dilation = 2 ** (layer % layers_per_stack) conv = ResidualBlock( kernel_size=kernel_size, res_channels=res_channels, gate_channels=gate_channels, skip_channels=skip_channels, aux_channels=-1, dilation=dilation, dropout=dropout, bias=bias, use_causal_conv=False, ) self.conv_layers += [conv] # define output layers self.last_conv_layers = nn.ModuleList([ getattr(nn, nonlinear_activation)(inplace=True, **nonlinear_activation_params), nn.Conv1d(skip_channels, skip_channels, kernel_size=1, padding=0, dilation=1, bias=True), getattr(nn, nonlinear_activation)(inplace=True, **nonlinear_activation_params), nn.Conv1d(skip_channels, out_channels, kernel_size=1, padding=0, dilation=1, bias=True), ]) # apply weight norm self.apply_weight_norm() def forward(self, x): """ x: (B, 1, T). """ x = self.first_conv(x) skips = 0 for f in self.conv_layers: x, h = f(x, None) skips += h skips *= self.res_factor # apply final layers x = skips for f in self.last_conv_layers: x = f(x) return x def apply_weight_norm(self): def _apply_weight_norm(m): if isinstance(m, (torch.nn.Conv1d, torch.nn.Conv2d)): torch.nn.utils.weight_norm(m) self.apply(_apply_weight_norm) def remove_weight_norm(self): def _remove_weight_norm(m): try: print(f"Weight norm is removed from {m}.") nn.utils.remove_weight_norm(m) except ValueError: # this module didn't have weight norm return self.apply(_remove_weight_norm)
6,868
33.691919
91
py
TTS
TTS-master/TTS/vocoder/models/wavernn.py
import sys import torch import torch.nn as nn import numpy as np import torch.nn.functional as F import time # fix this from TTS.utils.audio import AudioProcessor as ap from TTS.vocoder.utils.distribution import ( sample_from_gaussian, sample_from_discretized_mix_logistic, ) def stream(string, variables): sys.stdout.write(f"\r{string}" % variables) # pylint: disable=abstract-method # relates https://github.com/pytorch/pytorch/issues/42305 class ResBlock(nn.Module): def __init__(self, dims): super().__init__() self.conv1 = nn.Conv1d(dims, dims, kernel_size=1, bias=False) self.conv2 = nn.Conv1d(dims, dims, kernel_size=1, bias=False) self.batch_norm1 = nn.BatchNorm1d(dims) self.batch_norm2 = nn.BatchNorm1d(dims) def forward(self, x): residual = x x = self.conv1(x) x = self.batch_norm1(x) x = F.relu(x) x = self.conv2(x) x = self.batch_norm2(x) return x + residual class MelResNet(nn.Module): def __init__(self, num_res_blocks, in_dims, compute_dims, res_out_dims, pad): super().__init__() k_size = pad * 2 + 1 self.conv_in = nn.Conv1d( in_dims, compute_dims, kernel_size=k_size, bias=False) self.batch_norm = nn.BatchNorm1d(compute_dims) self.layers = nn.ModuleList() for _ in range(num_res_blocks): self.layers.append(ResBlock(compute_dims)) self.conv_out = nn.Conv1d(compute_dims, res_out_dims, kernel_size=1) def forward(self, x): x = self.conv_in(x) x = self.batch_norm(x) x = F.relu(x) for f in self.layers: x = f(x) x = self.conv_out(x) return x class Stretch2d(nn.Module): def __init__(self, x_scale, y_scale): super().__init__() self.x_scale = x_scale self.y_scale = y_scale def forward(self, x): b, c, h, w = x.size() x = x.unsqueeze(-1).unsqueeze(3) x = x.repeat(1, 1, 1, self.y_scale, 1, self.x_scale) return x.view(b, c, h * self.y_scale, w * self.x_scale) class UpsampleNetwork(nn.Module): def __init__( self, feat_dims, upsample_scales, compute_dims, num_res_blocks, res_out_dims, pad, use_aux_net, ): super().__init__() self.total_scale = np.cumproduct(upsample_scales)[-1] self.indent = pad * self.total_scale self.use_aux_net = use_aux_net if use_aux_net: self.resnet = MelResNet( num_res_blocks, feat_dims, compute_dims, res_out_dims, pad ) self.resnet_stretch = Stretch2d(self.total_scale, 1) self.up_layers = nn.ModuleList() for scale in upsample_scales: k_size = (1, scale * 2 + 1) padding = (0, scale) stretch = Stretch2d(scale, 1) conv = nn.Conv2d(1, 1, kernel_size=k_size, padding=padding, bias=False) conv.weight.data.fill_(1.0 / k_size[1]) self.up_layers.append(stretch) self.up_layers.append(conv) def forward(self, m): if self.use_aux_net: aux = self.resnet(m).unsqueeze(1) aux = self.resnet_stretch(aux) aux = aux.squeeze(1) aux = aux.transpose(1, 2) else: aux = None m = m.unsqueeze(1) for f in self.up_layers: m = f(m) m = m.squeeze(1)[:, :, self.indent: -self.indent] return m.transpose(1, 2), aux class Upsample(nn.Module): def __init__( self, scale, pad, num_res_blocks, feat_dims, compute_dims, res_out_dims, use_aux_net ): super().__init__() self.scale = scale self.pad = pad self.indent = pad * scale self.use_aux_net = use_aux_net self.resnet = MelResNet(num_res_blocks, feat_dims, compute_dims, res_out_dims, pad) def forward(self, m): if self.use_aux_net: aux = self.resnet(m) aux = torch.nn.functional.interpolate( aux, scale_factor=self.scale, mode="linear", align_corners=True ) aux = aux.transpose(1, 2) else: aux = None m = torch.nn.functional.interpolate( m, scale_factor=self.scale, mode="linear", align_corners=True ) m = m[:, :, self.indent: -self.indent] m = m * 0.045 # empirically found return m.transpose(1, 2), aux class WaveRNN(nn.Module): def __init__(self, rnn_dims, fc_dims, mode, mulaw, pad, use_aux_net, use_upsample_net, upsample_factors, feat_dims, compute_dims, res_out_dims, num_res_blocks, hop_length, sample_rate, ): super().__init__() self.mode = mode self.mulaw = mulaw self.pad = pad self.use_upsample_net = use_upsample_net self.use_aux_net = use_aux_net if isinstance(self.mode, int): self.n_classes = 2 ** self.mode elif self.mode == "mold": self.n_classes = 3 * 10 elif self.mode == "gauss": self.n_classes = 2 else: raise RuntimeError("Unknown model mode value - ", self.mode) self.rnn_dims = rnn_dims self.aux_dims = res_out_dims // 4 self.hop_length = hop_length self.sample_rate = sample_rate if self.use_upsample_net: assert ( np.cumproduct(upsample_factors)[-1] == self.hop_length ), " [!] upsample scales needs to be equal to hop_length" self.upsample = UpsampleNetwork( feat_dims, upsample_factors, compute_dims, num_res_blocks, res_out_dims, pad, use_aux_net, ) else: self.upsample = Upsample( hop_length, pad, num_res_blocks, feat_dims, compute_dims, res_out_dims, use_aux_net, ) if self.use_aux_net: self.I = nn.Linear(feat_dims + self.aux_dims + 1, rnn_dims) self.rnn1 = nn.GRU(rnn_dims, rnn_dims, batch_first=True) self.rnn2 = nn.GRU(rnn_dims + self.aux_dims, rnn_dims, batch_first=True) self.fc1 = nn.Linear(rnn_dims + self.aux_dims, fc_dims) self.fc2 = nn.Linear(fc_dims + self.aux_dims, fc_dims) self.fc3 = nn.Linear(fc_dims, self.n_classes) else: self.I = nn.Linear(feat_dims + 1, rnn_dims) self.rnn1 = nn.GRU(rnn_dims, rnn_dims, batch_first=True) self.rnn2 = nn.GRU(rnn_dims, rnn_dims, batch_first=True) self.fc1 = nn.Linear(rnn_dims, fc_dims) self.fc2 = nn.Linear(fc_dims, fc_dims) self.fc3 = nn.Linear(fc_dims, self.n_classes) def forward(self, x, mels): bsize = x.size(0) h1 = torch.zeros(1, bsize, self.rnn_dims).to(x.device) h2 = torch.zeros(1, bsize, self.rnn_dims).to(x.device) mels, aux = self.upsample(mels) if self.use_aux_net: aux_idx = [self.aux_dims * i for i in range(5)] a1 = aux[:, :, aux_idx[0]: aux_idx[1]] a2 = aux[:, :, aux_idx[1]: aux_idx[2]] a3 = aux[:, :, aux_idx[2]: aux_idx[3]] a4 = aux[:, :, aux_idx[3]: aux_idx[4]] x = ( torch.cat([x.unsqueeze(-1), mels, a1], dim=2) if self.use_aux_net else torch.cat([x.unsqueeze(-1), mels], dim=2) ) x = self.I(x) res = x self.rnn1.flatten_parameters() x, _ = self.rnn1(x, h1) x = x + res res = x x = torch.cat([x, a2], dim=2) if self.use_aux_net else x self.rnn2.flatten_parameters() x, _ = self.rnn2(x, h2) x = x + res x = torch.cat([x, a3], dim=2) if self.use_aux_net else x x = F.relu(self.fc1(x)) x = torch.cat([x, a4], dim=2) if self.use_aux_net else x x = F.relu(self.fc2(x)) return self.fc3(x) def inference(self, mels, batched, target, overlap): self.eval() device = mels.device output = [] start = time.time() rnn1 = self.get_gru_cell(self.rnn1) rnn2 = self.get_gru_cell(self.rnn2) with torch.no_grad(): if isinstance(mels, np.ndarray): mels = torch.FloatTensor(mels).to(device) if mels.ndim == 2: mels = mels.unsqueeze(0) wave_len = (mels.size(-1) - 1) * self.hop_length mels = self.pad_tensor(mels.transpose( 1, 2), pad=self.pad, side="both") mels, aux = self.upsample(mels.transpose(1, 2)) if batched: mels = self.fold_with_overlap(mels, target, overlap) if aux is not None: aux = self.fold_with_overlap(aux, target, overlap) b_size, seq_len, _ = mels.size() h1 = torch.zeros(b_size, self.rnn_dims).to(device) h2 = torch.zeros(b_size, self.rnn_dims).to(device) x = torch.zeros(b_size, 1).to(device) if self.use_aux_net: d = self.aux_dims aux_split = [aux[:, :, d * i: d * (i + 1)] for i in range(4)] for i in range(seq_len): m_t = mels[:, i, :] if self.use_aux_net: a1_t, a2_t, a3_t, a4_t = (a[:, i, :] for a in aux_split) x = ( torch.cat([x, m_t, a1_t], dim=1) if self.use_aux_net else torch.cat([x, m_t], dim=1) ) x = self.I(x) h1 = rnn1(x, h1) x = x + h1 inp = torch.cat([x, a2_t], dim=1) if self.use_aux_net else x h2 = rnn2(inp, h2) x = x + h2 x = torch.cat([x, a3_t], dim=1) if self.use_aux_net else x x = F.relu(self.fc1(x)) x = torch.cat([x, a4_t], dim=1) if self.use_aux_net else x x = F.relu(self.fc2(x)) logits = self.fc3(x) if self.mode == "mold": sample = sample_from_discretized_mix_logistic( logits.unsqueeze(0).transpose(1, 2) ) output.append(sample.view(-1)) x = sample.transpose(0, 1).to(device) elif self.mode == "gauss": sample = sample_from_gaussian( logits.unsqueeze(0).transpose(1, 2)) output.append(sample.view(-1)) x = sample.transpose(0, 1).to(device) elif isinstance(self.mode, int): posterior = F.softmax(logits, dim=1) distrib = torch.distributions.Categorical(posterior) sample = 2 * distrib.sample().float() / (self.n_classes - 1.0) - 1.0 output.append(sample) x = sample.unsqueeze(-1) else: raise RuntimeError( "Unknown model mode value - ", self.mode) if i % 100 == 0: self.gen_display(i, seq_len, b_size, start) output = torch.stack(output).transpose(0, 1) output = output.cpu().numpy() output = output.astype(np.float64) if batched: output = self.xfade_and_unfold(output, target, overlap) else: output = output[0] if self.mulaw and isinstance(self.mode, int): output = ap.mulaw_decode(output, self.mode) # Fade-out at the end to avoid signal cutting out suddenly fade_out = np.linspace(1, 0, 20 * self.hop_length) output = output[:wave_len] if wave_len > len(fade_out): output[-20 * self.hop_length:] *= fade_out self.train() return output def gen_display(self, i, seq_len, b_size, start): gen_rate = (i + 1) / (time.time() - start) * b_size / 1000 realtime_ratio = gen_rate * 1000 / self.sample_rate stream( "%i/%i -- batch_size: %i -- gen_rate: %.1f kHz -- x_realtime: %.1f ", (i * b_size, seq_len * b_size, b_size, gen_rate, realtime_ratio), ) def fold_with_overlap(self, x, target, overlap): """Fold the tensor with overlap for quick batched inference. Overlap will be used for crossfading in xfade_and_unfold() Args: x (tensor) : Upsampled conditioning features. shape=(1, timesteps, features) target (int) : Target timesteps for each index of batch overlap (int) : Timesteps for both xfade and rnn warmup Return: (tensor) : shape=(num_folds, target + 2 * overlap, features) Details: x = [[h1, h2, ... hn]] Where each h is a vector of conditioning features Eg: target=2, overlap=1 with x.size(1)=10 folded = [[h1, h2, h3, h4], [h4, h5, h6, h7], [h7, h8, h9, h10]] """ _, total_len, features = x.size() # Calculate variables needed num_folds = (total_len - overlap) // (target + overlap) extended_len = num_folds * (overlap + target) + overlap remaining = total_len - extended_len # Pad if some time steps poking out if remaining != 0: num_folds += 1 padding = target + 2 * overlap - remaining x = self.pad_tensor(x, padding, side="after") folded = torch.zeros(num_folds, target + 2 * overlap, features).to(x.device) # Get the values for the folded tensor for i in range(num_folds): start = i * (target + overlap) end = start + target + 2 * overlap folded[i] = x[:, start:end, :] return folded @staticmethod def get_gru_cell(gru): gru_cell = nn.GRUCell(gru.input_size, gru.hidden_size) gru_cell.weight_hh.data = gru.weight_hh_l0.data gru_cell.weight_ih.data = gru.weight_ih_l0.data gru_cell.bias_hh.data = gru.bias_hh_l0.data gru_cell.bias_ih.data = gru.bias_ih_l0.data return gru_cell @staticmethod def pad_tensor(x, pad, side="both"): # NB - this is just a quick method i need right now # i.e., it won't generalise to other shapes/dims b, t, c = x.size() total = t + 2 * pad if side == "both" else t + pad padded = torch.zeros(b, total, c).to(x.device) if side in ("before", "both"): padded[:, pad: pad + t, :] = x elif side == "after": padded[:, :t, :] = x return padded @staticmethod def xfade_and_unfold(y, target, overlap): """Applies a crossfade and unfolds into a 1d array. Args: y (ndarry) : Batched sequences of audio samples shape=(num_folds, target + 2 * overlap) dtype=np.float64 overlap (int) : Timesteps for both xfade and rnn warmup Return: (ndarry) : audio samples in a 1d array shape=(total_len) dtype=np.float64 Details: y = [[seq1], [seq2], [seq3]] Apply a gain envelope at both ends of the sequences y = [[seq1_in, seq1_target, seq1_out], [seq2_in, seq2_target, seq2_out], [seq3_in, seq3_target, seq3_out]] Stagger and add up the groups of samples: [seq1_in, seq1_target, (seq1_out + seq2_in), seq2_target, ...] """ num_folds, length = y.shape target = length - 2 * overlap total_len = num_folds * (target + overlap) + overlap # Need some silence for the rnn warmup silence_len = overlap // 2 fade_len = overlap - silence_len silence = np.zeros((silence_len), dtype=np.float64) # Equal power crossfade t = np.linspace(-1, 1, fade_len, dtype=np.float64) fade_in = np.sqrt(0.5 * (1 + t)) fade_out = np.sqrt(0.5 * (1 - t)) # Concat the silence to the fades fade_in = np.concatenate([silence, fade_in]) fade_out = np.concatenate([fade_out, silence]) # Apply the gain to the overlap samples y[:, :overlap] *= fade_in y[:, -overlap:] *= fade_out unfolded = np.zeros((total_len), dtype=np.float64) # Loop to add up all the samples for i in range(num_folds): start = i * (target + overlap) end = start + target + 2 * overlap unfolded[start:end] += y[i] return unfolded def load_checkpoint(self, config, checkpoint_path, eval=False): # pylint: disable=unused-argument, redefined-builtin state = torch.load(checkpoint_path, map_location=torch.device('cpu')) self.load_state_dict(state['model']) if eval: self.eval() assert not self.training
17,680
33.736739
121
py
TTS
TTS-master/TTS/vocoder/models/melgan_discriminator.py
import numpy as np from torch import nn from torch.nn.utils import weight_norm class MelganDiscriminator(nn.Module): def __init__(self, in_channels=1, out_channels=1, kernel_sizes=(5, 3), base_channels=16, max_channels=1024, downsample_factors=(4, 4, 4, 4)): super(MelganDiscriminator, self).__init__() self.layers = nn.ModuleList() layer_kernel_size = np.prod(kernel_sizes) layer_padding = (layer_kernel_size - 1) // 2 # initial layer self.layers += [ nn.Sequential( nn.ReflectionPad1d(layer_padding), weight_norm( nn.Conv1d(in_channels, base_channels, layer_kernel_size, stride=1)), nn.LeakyReLU(0.2, inplace=True)) ] # downsampling layers layer_in_channels = base_channels for downsample_factor in downsample_factors: layer_out_channels = min(layer_in_channels * downsample_factor, max_channels) layer_kernel_size = downsample_factor * 10 + 1 layer_padding = (layer_kernel_size - 1) // 2 layer_groups = layer_in_channels // 4 self.layers += [ nn.Sequential( weight_norm( nn.Conv1d(layer_in_channels, layer_out_channels, kernel_size=layer_kernel_size, stride=downsample_factor, padding=layer_padding, groups=layer_groups)), nn.LeakyReLU(0.2, inplace=True)) ] layer_in_channels = layer_out_channels # last 2 layers layer_padding1 = (kernel_sizes[0] - 1) // 2 layer_padding2 = (kernel_sizes[1] - 1) // 2 self.layers += [ nn.Sequential( weight_norm( nn.Conv1d(layer_out_channels, layer_out_channels, kernel_size=kernel_sizes[0], stride=1, padding=layer_padding1)), nn.LeakyReLU(0.2, inplace=True), ), weight_norm( nn.Conv1d(layer_out_channels, out_channels, kernel_size=kernel_sizes[1], stride=1, padding=layer_padding2)), ] def forward(self, x): feats = [] for layer in self.layers: x = layer(x) feats.append(x) return x, feats
2,866
35.291139
75
py
TTS
TTS-master/TTS/vocoder/models/random_window_discriminator.py
import numpy as np from torch import nn class GBlock(nn.Module): def __init__(self, in_channels, cond_channels, downsample_factor): super(GBlock, self).__init__() self.in_channels = in_channels self.cond_channels = cond_channels self.downsample_factor = downsample_factor self.start = nn.Sequential( nn.AvgPool1d(downsample_factor, stride=downsample_factor), nn.ReLU(), nn.Conv1d(in_channels, in_channels * 2, kernel_size=3, padding=1)) self.lc_conv1d = nn.Conv1d(cond_channels, in_channels * 2, kernel_size=1) self.end = nn.Sequential( nn.ReLU(), nn.Conv1d(in_channels * 2, in_channels * 2, kernel_size=3, dilation=2, padding=2)) self.residual = nn.Sequential( nn.Conv1d(in_channels, in_channels * 2, kernel_size=1), nn.AvgPool1d(downsample_factor, stride=downsample_factor)) def forward(self, inputs, conditions): outputs = self.start(inputs) + self.lc_conv1d(conditions) outputs = self.end(outputs) residual_outputs = self.residual(inputs) outputs = outputs + residual_outputs return outputs class DBlock(nn.Module): def __init__(self, in_channels, out_channels, downsample_factor): super(DBlock, self).__init__() self.in_channels = in_channels self.downsample_factor = downsample_factor self.out_channels = out_channels self.donwsample_layer = nn.AvgPool1d(downsample_factor, stride=downsample_factor) self.layers = nn.Sequential( nn.ReLU(), nn.Conv1d(in_channels, out_channels, kernel_size=3, padding=1), nn.ReLU(), nn.Conv1d(out_channels, out_channels, kernel_size=3, dilation=2, padding=2)) self.residual = nn.Sequential( nn.Conv1d(in_channels, out_channels, kernel_size=1), ) def forward(self, inputs): if self.downsample_factor > 1: outputs = self.layers(self.donwsample_layer(inputs))\ + self.donwsample_layer(self.residual(inputs)) else: outputs = self.layers(inputs) + self.residual(inputs) return outputs class ConditionalDiscriminator(nn.Module): def __init__(self, in_channels, cond_channels, downsample_factors=(2, 2, 2), out_channels=(128, 256)): super(ConditionalDiscriminator, self).__init__() assert len(downsample_factors) == len(out_channels) + 1 self.in_channels = in_channels self.cond_channels = cond_channels self.downsample_factors = downsample_factors self.out_channels = out_channels self.pre_cond_layers = nn.ModuleList() self.post_cond_layers = nn.ModuleList() # layers before condition features self.pre_cond_layers += [DBlock(in_channels, 64, 1)] in_channels = 64 for (i, channel) in enumerate(out_channels): self.pre_cond_layers.append( DBlock(in_channels, channel, downsample_factors[i])) in_channels = channel # condition block self.cond_block = GBlock(in_channels, cond_channels, downsample_factors[-1]) # layers after condition block self.post_cond_layers += [ DBlock(in_channels * 2, in_channels * 2, 1), DBlock(in_channels * 2, in_channels * 2, 1), nn.AdaptiveAvgPool1d(1), nn.Conv1d(in_channels * 2, 1, kernel_size=1), ] def forward(self, inputs, conditions): batch_size = inputs.size()[0] outputs = inputs.view(batch_size, self.in_channels, -1) for layer in self.pre_cond_layers: outputs = layer(outputs) outputs = self.cond_block(outputs, conditions) for layer in self.post_cond_layers: outputs = layer(outputs) return outputs class UnconditionalDiscriminator(nn.Module): def __init__(self, in_channels, base_channels=64, downsample_factors=(8, 4), out_channels=(128, 256)): super(UnconditionalDiscriminator, self).__init__() self.downsample_factors = downsample_factors self.in_channels = in_channels self.downsample_factors = downsample_factors self.out_channels = out_channels self.layers = nn.ModuleList() self.layers += [DBlock(self.in_channels, base_channels, 1)] in_channels = base_channels for (i, factor) in enumerate(downsample_factors): self.layers.append(DBlock(in_channels, out_channels[i], factor)) in_channels *= 2 self.layers += [ DBlock(in_channels, in_channels, 1), DBlock(in_channels, in_channels, 1), nn.AdaptiveAvgPool1d(1), nn.Conv1d(in_channels, 1, kernel_size=1), ] def forward(self, inputs): batch_size = inputs.size()[0] outputs = inputs.view(batch_size, self.in_channels, -1) for layer in self.layers: outputs = layer(outputs) return outputs class RandomWindowDiscriminator(nn.Module): """Random Window Discriminator as described in http://arxiv.org/abs/1909.11646""" def __init__(self, cond_channels, hop_length, uncond_disc_donwsample_factors=(8, 4), cond_disc_downsample_factors=((8, 4, 2, 2, 2), (8, 4, 2, 2), (8, 4, 2), (8, 4), (4, 2, 2)), cond_disc_out_channels=((128, 128, 256, 256), (128, 256, 256), (128, 256), (256, ), (128, 256)), window_sizes=(512, 1024, 2048, 4096, 8192)): super(RandomWindowDiscriminator, self).__init__() self.cond_channels = cond_channels self.window_sizes = window_sizes self.hop_length = hop_length self.base_window_size = self.hop_length * 2 self.ks = [ws // self.base_window_size for ws in window_sizes] # check arguments assert len(cond_disc_downsample_factors) == len( cond_disc_out_channels) == len(window_sizes) for ws in window_sizes: assert ws % hop_length == 0 for idx, cf in enumerate(cond_disc_downsample_factors): assert np.prod(cf) == hop_length // self.ks[idx] # define layers self.unconditional_discriminators = nn.ModuleList([]) for k in self.ks: layer = UnconditionalDiscriminator( in_channels=k, base_channels=64, downsample_factors=uncond_disc_donwsample_factors) self.unconditional_discriminators.append(layer) self.conditional_discriminators = nn.ModuleList([]) for idx, k in enumerate(self.ks): layer = ConditionalDiscriminator( in_channels=k, cond_channels=cond_channels, downsample_factors=cond_disc_downsample_factors[idx], out_channels=cond_disc_out_channels[idx]) self.conditional_discriminators.append(layer) def forward(self, x, c): scores = [] feats = [] # unconditional pass for (window_size, layer) in zip(self.window_sizes, self.unconditional_discriminators): index = np.random.randint(x.shape[-1] - window_size) score = layer(x[:, :, index:index + window_size]) scores.append(score) # conditional pass for (window_size, layer) in zip(self.window_sizes, self.conditional_discriminators): frame_size = window_size // self.hop_length lc_index = np.random.randint(c.shape[-1] - frame_size) sample_index = lc_index * self.hop_length x_sub = x[:, :, sample_index:(lc_index + frame_size) * self.hop_length] c_sub = c[:, :, lc_index:lc_index + frame_size] score = layer(x_sub, c_sub) scores.append(score) return scores, feats
8,554
36.853982
79
py
TTS
TTS-master/TTS/vocoder/models/melgan_multiscale_discriminator.py
from torch import nn from TTS.vocoder.models.melgan_discriminator import MelganDiscriminator class MelganMultiscaleDiscriminator(nn.Module): def __init__(self, in_channels=1, out_channels=1, num_scales=3, kernel_sizes=(5, 3), base_channels=16, max_channels=1024, downsample_factors=(4, 4, 4), pooling_kernel_size=4, pooling_stride=2, pooling_padding=1): super(MelganMultiscaleDiscriminator, self).__init__() self.discriminators = nn.ModuleList([ MelganDiscriminator(in_channels=in_channels, out_channels=out_channels, kernel_sizes=kernel_sizes, base_channels=base_channels, max_channels=max_channels, downsample_factors=downsample_factors) for _ in range(num_scales) ]) self.pooling = nn.AvgPool1d(kernel_size=pooling_kernel_size, stride=pooling_stride, padding=pooling_padding, count_include_pad=False) def forward(self, x): scores = list() feats = list() for disc in self.discriminators: score, feat = disc(x) scores.append(score) feats.append(feat) x = self.pooling(x) return scores, feats
1,474
34.119048
141
py
TTS
TTS-master/TTS/vocoder/models/multiband_melgan_generator.py
import torch from TTS.vocoder.models.melgan_generator import MelganGenerator from TTS.vocoder.layers.pqmf import PQMF class MultibandMelganGenerator(MelganGenerator): def __init__(self, in_channels=80, out_channels=4, proj_kernel=7, base_channels=384, upsample_factors=(2, 8, 2, 2), res_kernel=3, num_res_blocks=3): super(MultibandMelganGenerator, self).__init__(in_channels=in_channels, out_channels=out_channels, proj_kernel=proj_kernel, base_channels=base_channels, upsample_factors=upsample_factors, res_kernel=res_kernel, num_res_blocks=num_res_blocks) self.pqmf_layer = PQMF(N=4, taps=62, cutoff=0.15, beta=9.0) def pqmf_analysis(self, x): return self.pqmf_layer.analysis(x) def pqmf_synthesis(self, x): return self.pqmf_layer.synthesis(x) @torch.no_grad() def inference(self, cond_features): cond_features = cond_features.to(self.layers[1].weight.device) cond_features = torch.nn.functional.pad( cond_features, (self.inference_padding, self.inference_padding), 'replicate') return self.pqmf_synthesis(self.layers(cond_features))
1,461
35.55
70
py
TTS
TTS-master/TTS/vocoder/models/wavegrad.py
import numpy as np import torch from torch import nn from torch.nn.utils import weight_norm from ..layers.wavegrad import DBlock, FiLM, UBlock, Conv1d class Wavegrad(nn.Module): # pylint: disable=dangerous-default-value def __init__(self, in_channels=80, out_channels=1, use_weight_norm=False, y_conv_channels=32, x_conv_channels=768, dblock_out_channels=[128, 128, 256, 512], ublock_out_channels=[512, 512, 256, 128, 128], upsample_factors=[5, 5, 3, 2, 2], upsample_dilations=[[1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 4, 8], [1, 2, 4, 8], [1, 2, 4, 8]]): super().__init__() self.use_weight_norm = use_weight_norm self.hop_len = np.prod(upsample_factors) self.noise_level = None self.num_steps = None self.beta = None self.alpha = None self.alpha_hat = None self.noise_level = None self.c1 = None self.c2 = None self.sigma = None # dblocks self.y_conv = Conv1d(1, y_conv_channels, 5, padding=2) self.dblocks = nn.ModuleList([]) ic = y_conv_channels for oc, df in zip(dblock_out_channels, reversed(upsample_factors)): self.dblocks.append(DBlock(ic, oc, df)) ic = oc # film self.film = nn.ModuleList([]) ic = y_conv_channels for oc in reversed(ublock_out_channels): self.film.append(FiLM(ic, oc)) ic = oc # ublocks self.ublocks = nn.ModuleList([]) ic = x_conv_channels for oc, uf, ud in zip(ublock_out_channels, upsample_factors, upsample_dilations): self.ublocks.append(UBlock(ic, oc, uf, ud)) ic = oc self.x_conv = Conv1d(in_channels, x_conv_channels, 3, padding=1) self.out_conv = Conv1d(oc, out_channels, 3, padding=1) if use_weight_norm: self.apply_weight_norm() def forward(self, x, spectrogram, noise_scale): shift_and_scale = [] x = self.y_conv(x) shift_and_scale.append(self.film[0](x, noise_scale)) for film, layer in zip(self.film[1:], self.dblocks): x = layer(x) shift_and_scale.append(film(x, noise_scale)) x = self.x_conv(spectrogram) for layer, (film_shift, film_scale) in zip(self.ublocks, reversed(shift_and_scale)): x = layer(x, film_shift, film_scale) x = self.out_conv(x) return x def load_noise_schedule(self, path): beta = np.load(path, allow_pickle=True).item()['beta'] self.compute_noise_level(beta) @torch.no_grad() def inference(self, x, y_n=None): """ x: B x D X T """ if y_n is None: y_n = torch.randn(x.shape[0], 1, self.hop_len * x.shape[-1], dtype=torch.float32).to(x) else: y_n = torch.FloatTensor(y_n).unsqueeze(0).unsqueeze(0).to(x) sqrt_alpha_hat = self.noise_level.to(x) for n in range(len(self.alpha) - 1, -1, -1): y_n = self.c1[n] * (y_n - self.c2[n] * self.forward(y_n, x, sqrt_alpha_hat[n].repeat(x.shape[0]))) if n > 0: z = torch.randn_like(y_n) y_n += self.sigma[n - 1] * z y_n.clamp_(-1.0, 1.0) return y_n def compute_y_n(self, y_0): """Compute noisy audio based on noise schedule""" self.noise_level = self.noise_level.to(y_0) if len(y_0.shape) == 3: y_0 = y_0.squeeze(1) s = torch.randint(0, self.num_steps - 1, [y_0.shape[0]]) l_a, l_b = self.noise_level[s], self.noise_level[s+1] noise_scale = l_a + torch.rand(y_0.shape[0]).to(y_0) * (l_b - l_a) noise_scale = noise_scale.unsqueeze(1) noise = torch.randn_like(y_0) noisy_audio = noise_scale * y_0 + (1.0 - noise_scale**2)**0.5 * noise return noise.unsqueeze(1), noisy_audio.unsqueeze(1), noise_scale[:, 0] def compute_noise_level(self, beta): """Compute noise schedule parameters""" self.num_steps = len(beta) alpha = 1 - beta alpha_hat = np.cumprod(alpha) noise_level = np.concatenate([[1.0], alpha_hat ** 0.5], axis=0) noise_level = alpha_hat ** 0.5 # pylint: disable=not-callable self.beta = torch.tensor(beta.astype(np.float32)) self.alpha = torch.tensor(alpha.astype(np.float32)) self.alpha_hat = torch.tensor(alpha_hat.astype(np.float32)) self.noise_level = torch.tensor(noise_level.astype(np.float32)) self.c1 = 1 / self.alpha**0.5 self.c2 = (1 - self.alpha) / (1 - self.alpha_hat)**0.5 self.sigma = ((1.0 - self.alpha_hat[:-1]) / (1.0 - self.alpha_hat[1:]) * self.beta[1:])**0.5 def remove_weight_norm(self): for _, layer in enumerate(self.dblocks): if len(layer.state_dict()) != 0: try: nn.utils.remove_weight_norm(layer) except ValueError: layer.remove_weight_norm() for _, layer in enumerate(self.film): if len(layer.state_dict()) != 0: try: nn.utils.remove_weight_norm(layer) except ValueError: layer.remove_weight_norm() for _, layer in enumerate(self.ublocks): if len(layer.state_dict()) != 0: try: nn.utils.remove_weight_norm(layer) except ValueError: layer.remove_weight_norm() nn.utils.remove_weight_norm(self.x_conv) nn.utils.remove_weight_norm(self.out_conv) nn.utils.remove_weight_norm(self.y_conv) def apply_weight_norm(self): for _, layer in enumerate(self.dblocks): if len(layer.state_dict()) != 0: layer.apply_weight_norm() for _, layer in enumerate(self.film): if len(layer.state_dict()) != 0: layer.apply_weight_norm() for _, layer in enumerate(self.ublocks): if len(layer.state_dict()) != 0: layer.apply_weight_norm() self.x_conv = weight_norm(self.x_conv) self.out_conv = weight_norm(self.out_conv) self.y_conv = weight_norm(self.y_conv) def load_checkpoint(self, config, checkpoint_path, eval=False): # pylint: disable=unused-argument, redefined-builtin state = torch.load(checkpoint_path, map_location=torch.device('cpu')) self.load_state_dict(state['model']) if eval: self.eval() assert not self.training if self.use_weight_norm: self.remove_weight_norm() betas = np.linspace(config['test_noise_schedule']['min_val'], config['test_noise_schedule']['max_val'], config['test_noise_schedule']['num_steps']) self.compute_noise_level(betas) else: betas = np.linspace(config['train_noise_schedule']['min_val'], config['train_noise_schedule']['max_val'], config['train_noise_schedule']['num_steps']) self.compute_noise_level(betas)
7,448
36.812183
121
py
TTS
TTS-master/TTS/vocoder/models/fullband_melgan_generator.py
import torch from TTS.vocoder.models.melgan_generator import MelganGenerator class FullbandMelganGenerator(MelganGenerator): def __init__(self, in_channels=80, out_channels=1, proj_kernel=7, base_channels=512, upsample_factors=(2, 8, 2, 2), res_kernel=3, num_res_blocks=4): super().__init__(in_channels=in_channels, out_channels=out_channels, proj_kernel=proj_kernel, base_channels=base_channels, upsample_factors=upsample_factors, res_kernel=res_kernel, num_res_blocks=num_res_blocks) @torch.no_grad() def inference(self, cond_features): cond_features = cond_features.to(self.layers[1].weight.device) cond_features = torch.nn.functional.pad( cond_features, (self.inference_padding, self.inference_padding), 'replicate') return self.layers(cond_features)
1,108
34.774194
70
py
TTS
TTS-master/TTS/vocoder/models/melgan_generator.py
import torch from torch import nn from torch.nn.utils import weight_norm from TTS.vocoder.layers.melgan import ResidualStack class MelganGenerator(nn.Module): def __init__(self, in_channels=80, out_channels=1, proj_kernel=7, base_channels=512, upsample_factors=(8, 8, 2, 2), res_kernel=3, num_res_blocks=3): super(MelganGenerator, self).__init__() # assert model parameters assert (proj_kernel - 1) % 2 == 0, " [!] proj_kernel should be an odd number." # setup additional model parameters base_padding = (proj_kernel - 1) // 2 act_slope = 0.2 self.inference_padding = 2 # initial layer layers = [] layers += [ nn.ReflectionPad1d(base_padding), weight_norm( nn.Conv1d(in_channels, base_channels, kernel_size=proj_kernel, stride=1, bias=True)) ] # upsampling layers and residual stacks for idx, upsample_factor in enumerate(upsample_factors): layer_in_channels = base_channels // (2**idx) layer_out_channels = base_channels // (2**(idx + 1)) layer_filter_size = upsample_factor * 2 layer_stride = upsample_factor layer_output_padding = upsample_factor % 2 layer_padding = upsample_factor // 2 + layer_output_padding layers += [ nn.LeakyReLU(act_slope), weight_norm( nn.ConvTranspose1d(layer_in_channels, layer_out_channels, layer_filter_size, stride=layer_stride, padding=layer_padding, output_padding=layer_output_padding, bias=True)), ResidualStack( channels=layer_out_channels, num_res_blocks=num_res_blocks, kernel_size=res_kernel ) ] layers += [nn.LeakyReLU(act_slope)] # final layer layers += [ nn.ReflectionPad1d(base_padding), weight_norm( nn.Conv1d(layer_out_channels, out_channels, proj_kernel, stride=1, bias=True)), nn.Tanh() ] self.layers = nn.Sequential(*layers) def forward(self, c): return self.layers(c) def inference(self, c): c = c.to(self.layers[1].weight.device) c = torch.nn.functional.pad( c, (self.inference_padding, self.inference_padding), 'replicate') return self.layers(c) def remove_weight_norm(self): for _, layer in enumerate(self.layers): if len(layer.state_dict()) != 0: try: nn.utils.remove_weight_norm(layer) except ValueError: layer.remove_weight_norm() def load_checkpoint(self, config, checkpoint_path, eval=False): # pylint: disable=unused-argument, redefined-builtin state = torch.load(checkpoint_path, map_location=torch.device('cpu')) self.load_state_dict(state['model']) if eval: self.eval() assert not self.training self.remove_weight_norm()
3,691
33.830189
121
py
TTS
TTS-master/TTS/vocoder/datasets/wavegrad_dataset.py
import os import glob import torch import random import numpy as np from torch.utils.data import Dataset from multiprocessing import Manager class WaveGradDataset(Dataset): """ WaveGrad Dataset searchs for all the wav files under root path and converts them to acoustic features on the fly and returns random segments of (audio, feature) couples. """ def __init__(self, ap, items, seq_len, hop_len, pad_short, conv_pad=2, is_training=True, return_segments=True, use_noise_augment=False, use_cache=False, verbose=False): self.ap = ap self.item_list = items self.seq_len = seq_len if return_segments else None self.hop_len = hop_len self.pad_short = pad_short self.conv_pad = conv_pad self.is_training = is_training self.return_segments = return_segments self.use_cache = use_cache self.use_noise_augment = use_noise_augment self.verbose = verbose if return_segments: assert seq_len % hop_len == 0, " [!] seq_len has to be a multiple of hop_len." self.feat_frame_len = seq_len // hop_len + (2 * conv_pad) # cache acoustic features if use_cache: self.create_feature_cache() def create_feature_cache(self): self.manager = Manager() self.cache = self.manager.list() self.cache += [None for _ in range(len(self.item_list))] @staticmethod def find_wav_files(path): return glob.glob(os.path.join(path, '**', '*.wav'), recursive=True) def __len__(self): return len(self.item_list) def __getitem__(self, idx): item = self.load_item(idx) return item def load_test_samples(self, num_samples): samples = [] return_segments = self.return_segments self.return_segments = False for idx in range(num_samples): mel, audio = self.load_item(idx) samples.append([mel, audio]) self.return_segments = return_segments return samples def load_item(self, idx): """ load (audio, feat) couple """ # compute features from wav wavpath = self.item_list[idx] if self.use_cache and self.cache[idx] is not None: audio = self.cache[idx] else: audio = self.ap.load_wav(wavpath) if self.return_segments: # correct audio length wrt segment length if audio.shape[-1] < self.seq_len + self.pad_short: audio = np.pad(audio, (0, self.seq_len + self.pad_short - len(audio)), \ mode='constant', constant_values=0.0) assert audio.shape[-1] >= self.seq_len + self.pad_short, f"{audio.shape[-1]} vs {self.seq_len + self.pad_short}" # correct the audio length wrt hop length p = (audio.shape[-1] // self.hop_len + 1) * self.hop_len - audio.shape[-1] audio = np.pad(audio, (0, p), mode='constant', constant_values=0.0) if self.use_cache: self.cache[idx] = audio if self.return_segments: max_start = len(audio) - self.seq_len start = random.randint(0, max_start) end = start + self.seq_len audio = audio[start:end] if self.use_noise_augment and self.is_training and self.return_segments: audio = audio + (1 / 32768) * torch.randn_like(audio) mel = self.ap.melspectrogram(audio) mel = mel[..., :-1] # ignore the padding audio = torch.from_numpy(audio).float() mel = torch.from_numpy(mel).float().squeeze(0) return (mel, audio) @staticmethod def collate_full_clips(batch): """This is used in tune_wavegrad.py. It pads sequences to the max length.""" max_mel_length = max([b[0].shape[1] for b in batch]) if len(batch) > 1 else batch[0][0].shape[1] max_audio_length = max([b[1].shape[0] for b in batch]) if len(batch) > 1 else batch[0][1].shape[0] mels = torch.zeros([len(batch), batch[0][0].shape[0], max_mel_length]) audios = torch.zeros([len(batch), max_audio_length]) for idx, b in enumerate(batch): mel = b[0] audio = b[1] mels[idx, :, :mel.shape[1]] = mel audios[idx, :audio.shape[0]] = audio return mels, audios
4,572
33.643939
128
py
TTS
TTS-master/TTS/vocoder/datasets/gan_dataset.py
import os import glob import torch import random import numpy as np from torch.utils.data import Dataset from multiprocessing import Manager class GANDataset(Dataset): """ GAN Dataset searchs for all the wav files under root path and converts them to acoustic features on the fly and returns random segments of (audio, feature) couples. """ def __init__(self, ap, items, seq_len, hop_len, pad_short, conv_pad=2, is_training=True, return_segments=True, use_noise_augment=False, use_cache=False, verbose=False): self.ap = ap self.item_list = items self.compute_feat = not isinstance(items[0], (tuple, list)) self.seq_len = seq_len self.hop_len = hop_len self.pad_short = pad_short self.conv_pad = conv_pad self.is_training = is_training self.return_segments = return_segments self.use_cache = use_cache self.use_noise_augment = use_noise_augment self.verbose = verbose assert seq_len % hop_len == 0, " [!] seq_len has to be a multiple of hop_len." self.feat_frame_len = seq_len // hop_len + (2 * conv_pad) # map G and D instances self.G_to_D_mappings = list(range(len(self.item_list))) self.shuffle_mapping() # cache acoustic features if use_cache: self.create_feature_cache() def create_feature_cache(self): self.manager = Manager() self.cache = self.manager.list() self.cache += [None for _ in range(len(self.item_list))] @staticmethod def find_wav_files(path): return glob.glob(os.path.join(path, '**', '*.wav'), recursive=True) def __len__(self): return len(self.item_list) def __getitem__(self, idx): """ Return different items for Generator and Discriminator and cache acoustic features """ if self.return_segments: idx2 = self.G_to_D_mappings[idx] item1 = self.load_item(idx) item2 = self.load_item(idx2) return item1, item2 item1 = self.load_item(idx) return item1 def shuffle_mapping(self): random.shuffle(self.G_to_D_mappings) def load_item(self, idx): """ load (audio, feat) couple """ if self.compute_feat: # compute features from wav wavpath = self.item_list[idx] # print(wavpath) if self.use_cache and self.cache[idx] is not None: audio, mel = self.cache[idx] else: audio = self.ap.load_wav(wavpath) if len(audio) < self.seq_len + self.pad_short: audio = np.pad(audio, (0, self.seq_len + self.pad_short - len(audio)), \ mode='constant', constant_values=0.0) mel = self.ap.melspectrogram(audio) else: # load precomputed features wavpath, feat_path = self.item_list[idx] if self.use_cache and self.cache[idx] is not None: audio, mel = self.cache[idx] else: audio = self.ap.load_wav(wavpath) mel = np.load(feat_path) # correct the audio length wrt padding applied in stft audio = np.pad(audio, (0, self.hop_len), mode="edge") audio = audio[:mel.shape[-1] * self.hop_len] assert mel.shape[-1] * self.hop_len == audio.shape[-1], f' [!] {mel.shape[-1] * self.hop_len} vs {audio.shape[-1]}' audio = torch.from_numpy(audio).float().unsqueeze(0) mel = torch.from_numpy(mel).float().squeeze(0) if self.return_segments: max_mel_start = mel.shape[1] - self.feat_frame_len mel_start = random.randint(0, max_mel_start) mel_end = mel_start + self.feat_frame_len mel = mel[:, mel_start:mel_end] audio_start = mel_start * self.hop_len audio = audio[:, audio_start:audio_start + self.seq_len] if self.use_noise_augment and self.is_training and self.return_segments: audio = audio + (1 / 32768) * torch.randn_like(audio) return (mel, audio)
4,369
33.140625
123
py
TTS
TTS-master/TTS/vocoder/datasets/wavernn_dataset.py
import torch import numpy as np from torch.utils.data import Dataset class WaveRNNDataset(Dataset): """ WaveRNN Dataset searchs for all the wav files under root path and converts them to acoustic features on the fly. """ def __init__(self, ap, items, seq_len, hop_len, pad, mode, mulaw, is_training=True, verbose=False, ): self.ap = ap self.compute_feat = not isinstance(items[0], (tuple, list)) self.item_list = items self.seq_len = seq_len self.hop_len = hop_len self.mel_len = seq_len // hop_len self.pad = pad self.mode = mode self.mulaw = mulaw self.is_training = is_training self.verbose = verbose assert self.seq_len % self.hop_len == 0 def __len__(self): return len(self.item_list) def __getitem__(self, index): item = self.load_item(index) return item def load_item(self, index): """ load (audio, feat) couple if feature_path is set else compute it on the fly """ if self.compute_feat: wavpath = self.item_list[index] audio = self.ap.load_wav(wavpath) min_audio_len = 2 * self.seq_len + (2 * self.pad * self.hop_len) if audio.shape[0] < min_audio_len: print(" [!] Instance is too short! : {}".format(wavpath)) audio = np.pad(audio, [0, min_audio_len - audio.shape[0] + self.hop_len]) mel = self.ap.melspectrogram(audio) if self.mode in ["gauss", "mold"]: x_input = audio elif isinstance(self.mode, int): x_input = (self.ap.mulaw_encode(audio, qc=self.mode) if self.mulaw else self.ap.quantize(audio, bits=self.mode)) else: raise RuntimeError("Unknown dataset mode - ", self.mode) else: wavpath, feat_path = self.item_list[index] mel = np.load(feat_path.replace("/quant/", "/mel/")) if mel.shape[-1] < self.mel_len + 2 * self.pad: print(" [!] Instance is too short! : {}".format(wavpath)) self.item_list[index] = self.item_list[index + 1] feat_path = self.item_list[index] mel = np.load(feat_path.replace("/quant/", "/mel/")) if self.mode in ["gauss", "mold"]: x_input = self.ap.load_wav(wavpath) elif isinstance(self.mode, int): x_input = np.load(feat_path.replace("/mel/", "/quant/")) else: raise RuntimeError("Unknown dataset mode - ", self.mode) return mel, x_input, wavpath def collate(self, batch): mel_win = self.seq_len // self.hop_len + 2 * self.pad max_offsets = [x[0].shape[-1] - (mel_win + 2 * self.pad) for x in batch] mel_offsets = [np.random.randint(0, offset) for offset in max_offsets] sig_offsets = [(offset + self.pad) * self.hop_len for offset in mel_offsets] mels = [ x[0][:, mel_offsets[i]: mel_offsets[i] + mel_win] for i, x in enumerate(batch) ] coarse = [ x[1][sig_offsets[i]: sig_offsets[i] + self.seq_len + 1] for i, x in enumerate(batch) ] mels = np.stack(mels).astype(np.float32) if self.mode in ["gauss", "mold"]: coarse = np.stack(coarse).astype(np.float32) coarse = torch.FloatTensor(coarse) x_input = coarse[:, : self.seq_len] elif isinstance(self.mode, int): coarse = np.stack(coarse).astype(np.int64) coarse = torch.LongTensor(coarse) x_input = (2 * coarse[:, : self.seq_len].float() / (2 ** self.mode - 1.0) - 1.0) y_coarse = coarse[:, 1:] mels = torch.FloatTensor(mels) return x_input, mels, y_coarse
4,136
33.764706
89
py
TTS
TTS-master/TTS/vocoder/layers/losses.py
import torch from torch import nn from torch.nn import functional as F class TorchSTFT(nn.Module): def __init__(self, n_fft, hop_length, win_length, window='hann_window'): """ Torch based STFT operation """ super(TorchSTFT, self).__init__() self.n_fft = n_fft self.hop_length = hop_length self.win_length = win_length self.window = nn.Parameter(getattr(torch, window)(win_length), requires_grad=False) def __call__(self, x): # B x D x T x 2 o = torch.stft(x, self.n_fft, self.hop_length, self.win_length, self.window, center=True, pad_mode="reflect", # compatible with audio.py normalized=False, onesided=True, return_complex=False) M = o[:, :, :, 0] P = o[:, :, :, 1] return torch.sqrt(torch.clamp(M ** 2 + P ** 2, min=1e-8)) ################################# # GENERATOR LOSSES ################################# class STFTLoss(nn.Module): """ Single scale STFT Loss """ def __init__(self, n_fft, hop_length, win_length): super(STFTLoss, self).__init__() self.n_fft = n_fft self.hop_length = hop_length self.win_length = win_length self.stft = TorchSTFT(n_fft, hop_length, win_length) def forward(self, y_hat, y): y_hat_M = self.stft(y_hat) y_M = self.stft(y) # magnitude loss loss_mag = F.l1_loss(torch.log(y_M), torch.log(y_hat_M)) # spectral convergence loss loss_sc = torch.norm(y_M - y_hat_M, p="fro") / torch.norm(y_M, p="fro") return loss_mag, loss_sc class MultiScaleSTFTLoss(torch.nn.Module): """ Multi scale STFT loss """ def __init__(self, n_ffts=(1024, 2048, 512), hop_lengths=(120, 240, 50), win_lengths=(600, 1200, 240)): super(MultiScaleSTFTLoss, self).__init__() self.loss_funcs = torch.nn.ModuleList() for n_fft, hop_length, win_length in zip(n_ffts, hop_lengths, win_lengths): self.loss_funcs.append(STFTLoss(n_fft, hop_length, win_length)) def forward(self, y_hat, y): N = len(self.loss_funcs) loss_sc = 0 loss_mag = 0 for f in self.loss_funcs: lm, lsc = f(y_hat, y) loss_mag += lm loss_sc += lsc loss_sc /= N loss_mag /= N return loss_mag, loss_sc class MultiScaleSubbandSTFTLoss(MultiScaleSTFTLoss): """ Multiscale STFT loss for multi band model outputs """ # pylint: disable=no-self-use def forward(self, y_hat, y): y_hat = y_hat.view(-1, 1, y_hat.shape[2]) y = y.view(-1, 1, y.shape[2]) return super().forward(y_hat.squeeze(1), y.squeeze(1)) class MSEGLoss(nn.Module): """ Mean Squared Generator Loss """ # pylint: disable=no-self-use def forward(self, score_real): loss_fake = F.mse_loss(score_real, score_real.new_ones(score_real.shape)) return loss_fake class HingeGLoss(nn.Module): """ Hinge Discriminator Loss """ # pylint: disable=no-self-use def forward(self, score_real): # TODO: this might be wrong loss_fake = torch.mean(F.relu(1. - score_real)) return loss_fake ################################## # DISCRIMINATOR LOSSES ################################## class MSEDLoss(nn.Module): """ Mean Squared Discriminator Loss """ def __init__(self,): super(MSEDLoss, self).__init__() self.loss_func = nn.MSELoss() # pylint: disable=no-self-use def forward(self, score_fake, score_real): loss_real = self.loss_func(score_real, score_real.new_ones(score_real.shape)) loss_fake = self.loss_func(score_fake, score_fake.new_zeros(score_fake.shape)) loss_d = loss_real + loss_fake return loss_d, loss_real, loss_fake class HingeDLoss(nn.Module): """ Hinge Discriminator Loss """ # pylint: disable=no-self-use def forward(self, score_fake, score_real): loss_real = torch.mean(F.relu(1. - score_real)) loss_fake = torch.mean(F.relu(1. + score_fake)) loss_d = loss_real + loss_fake return loss_d, loss_real, loss_fake class MelganFeatureLoss(nn.Module): def __init__(self,): super(MelganFeatureLoss, self).__init__() self.loss_func = nn.L1Loss() # pylint: disable=no-self-use def forward(self, fake_feats, real_feats): loss_feats = 0 for fake_feat, real_feat in zip(fake_feats, real_feats): loss_feats += self.loss_func(fake_feat, real_feat) loss_feats /= len(fake_feats) + len(real_feats) return loss_feats ##################################### # LOSS WRAPPERS ##################################### def _apply_G_adv_loss(scores_fake, loss_func): """ Compute G adversarial loss function and normalize values """ adv_loss = 0 if isinstance(scores_fake, list): for score_fake in scores_fake: fake_loss = loss_func(score_fake) adv_loss += fake_loss adv_loss /= len(scores_fake) else: fake_loss = loss_func(scores_fake) adv_loss = fake_loss return adv_loss def _apply_D_loss(scores_fake, scores_real, loss_func): """ Compute D loss func and normalize loss values """ loss = 0 real_loss = 0 fake_loss = 0 if isinstance(scores_fake, list): # multi-scale loss for score_fake, score_real in zip(scores_fake, scores_real): total_loss, real_loss, fake_loss = loss_func(score_fake=score_fake, score_real=score_real) loss += total_loss real_loss += real_loss fake_loss += fake_loss # normalize loss values with number of scales loss /= len(scores_fake) real_loss /= len(scores_real) fake_loss /= len(scores_fake) else: # single scale loss total_loss, real_loss, fake_loss = loss_func(scores_fake, scores_real) loss = total_loss return loss, real_loss, fake_loss ################################## # MODEL LOSSES ################################## class GeneratorLoss(nn.Module): def __init__(self, C): """ Compute Generator Loss values depending on training configuration """ super(GeneratorLoss, self).__init__() assert not(C.use_mse_gan_loss and C.use_hinge_gan_loss),\ " [!] Cannot use HingeGANLoss and MSEGANLoss together." self.use_stft_loss = C.use_stft_loss self.use_subband_stft_loss = C.use_subband_stft_loss self.use_mse_gan_loss = C.use_mse_gan_loss self.use_hinge_gan_loss = C.use_hinge_gan_loss self.use_feat_match_loss = C.use_feat_match_loss self.stft_loss_weight = C.stft_loss_weight self.subband_stft_loss_weight = C.subband_stft_loss_weight self.mse_gan_loss_weight = C.mse_G_loss_weight self.hinge_gan_loss_weight = C.hinge_G_loss_weight self.feat_match_loss_weight = C.feat_match_loss_weight if C.use_stft_loss: self.stft_loss = MultiScaleSTFTLoss(**C.stft_loss_params) if C.use_subband_stft_loss: self.subband_stft_loss = MultiScaleSubbandSTFTLoss(**C.subband_stft_loss_params) if C.use_mse_gan_loss: self.mse_loss = MSEGLoss() if C.use_hinge_gan_loss: self.hinge_loss = HingeGLoss() if C.use_feat_match_loss: self.feat_match_loss = MelganFeatureLoss() def forward(self, y_hat=None, y=None, scores_fake=None, feats_fake=None, feats_real=None, y_hat_sub=None, y_sub=None): gen_loss = 0 adv_loss = 0 return_dict = {} # STFT Loss if self.use_stft_loss: stft_loss_mg, stft_loss_sc = self.stft_loss(y_hat.squeeze(1), y.squeeze(1)) return_dict['G_stft_loss_mg'] = stft_loss_mg return_dict['G_stft_loss_sc'] = stft_loss_sc gen_loss += self.stft_loss_weight * (stft_loss_mg + stft_loss_sc) # subband STFT Loss if self.use_subband_stft_loss: subband_stft_loss_mg, subband_stft_loss_sc = self.subband_stft_loss(y_hat_sub, y_sub) return_dict['G_subband_stft_loss_mg'] = subband_stft_loss_mg return_dict['G_subband_stft_loss_sc'] = subband_stft_loss_sc gen_loss += self.subband_stft_loss_weight * (subband_stft_loss_mg + subband_stft_loss_sc) # multiscale MSE adversarial loss if self.use_mse_gan_loss and scores_fake is not None: mse_fake_loss = _apply_G_adv_loss(scores_fake, self.mse_loss) return_dict['G_mse_fake_loss'] = mse_fake_loss adv_loss += self.mse_gan_loss_weight * mse_fake_loss # multiscale Hinge adversarial loss if self.use_hinge_gan_loss and not scores_fake is not None: hinge_fake_loss = _apply_G_adv_loss(scores_fake, self.hinge_loss) return_dict['G_hinge_fake_loss'] = hinge_fake_loss adv_loss += self.hinge_gan_loss_weight * hinge_fake_loss # Feature Matching Loss if self.use_feat_match_loss and not feats_fake: feat_match_loss = self.feat_match_loss(feats_fake, feats_real) return_dict['G_feat_match_loss'] = feat_match_loss adv_loss += self.feat_match_loss_weight * feat_match_loss return_dict['G_loss'] = gen_loss + adv_loss return_dict['G_gen_loss'] = gen_loss return_dict['G_adv_loss'] = adv_loss return return_dict class DiscriminatorLoss(nn.Module): """ Compute Discriminator Loss values depending on training configuration """ def __init__(self, C): super(DiscriminatorLoss, self).__init__() assert not(C.use_mse_gan_loss and C.use_hinge_gan_loss),\ " [!] Cannot use HingeGANLoss and MSEGANLoss together." self.use_mse_gan_loss = C.use_mse_gan_loss self.use_hinge_gan_loss = C.use_hinge_gan_loss if C.use_mse_gan_loss: self.mse_loss = MSEDLoss() if C.use_hinge_gan_loss: self.hinge_loss = HingeDLoss() def forward(self, scores_fake, scores_real): loss = 0 return_dict = {} if self.use_mse_gan_loss: mse_D_loss, mse_D_real_loss, mse_D_fake_loss = _apply_D_loss( scores_fake=scores_fake, scores_real=scores_real, loss_func=self.mse_loss) return_dict['D_mse_gan_loss'] = mse_D_loss return_dict['D_mse_gan_real_loss'] = mse_D_real_loss return_dict['D_mse_gan_fake_loss'] = mse_D_fake_loss loss += mse_D_loss if self.use_hinge_gan_loss: hinge_D_loss, hinge_D_real_loss, hinge_D_fake_loss = _apply_D_loss( scores_fake=scores_fake, scores_real=scores_real, loss_func=self.hinge_loss) return_dict['D_hinge_gan_loss'] = hinge_D_loss return_dict['D_hinge_gan_real_loss'] = hinge_D_real_loss return_dict['D_hinge_gan_fake_loss'] = hinge_D_fake_loss loss += hinge_D_loss return_dict['D_loss'] = loss return return_dict
11,390
35.392971
122
py
TTS
TTS-master/TTS/vocoder/layers/parallel_wavegan.py
import torch from torch.nn import functional as F class ResidualBlock(torch.nn.Module): """Residual block module in WaveNet.""" def __init__(self, kernel_size=3, res_channels=64, gate_channels=128, skip_channels=64, aux_channels=80, dropout=0.0, dilation=1, bias=True, use_causal_conv=False): super(ResidualBlock, self).__init__() self.dropout = dropout # no future time stamps available if use_causal_conv: padding = (kernel_size - 1) * dilation else: assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size." padding = (kernel_size - 1) // 2 * dilation self.use_causal_conv = use_causal_conv # dilation conv self.conv = torch.nn.Conv1d(res_channels, gate_channels, kernel_size, padding=padding, dilation=dilation, bias=bias) # local conditioning if aux_channels > 0: self.conv1x1_aux = torch.nn.Conv1d(aux_channels, gate_channels, 1, bias=False) else: self.conv1x1_aux = None # conv output is split into two groups gate_out_channels = gate_channels // 2 self.conv1x1_out = torch.nn.Conv1d(gate_out_channels, res_channels, 1, bias=bias) self.conv1x1_skip = torch.nn.Conv1d(gate_out_channels, skip_channels, 1, bias=bias) def forward(self, x, c): """ x: B x D_res x T c: B x D_aux x T """ residual = x x = F.dropout(x, p=self.dropout, training=self.training) x = self.conv(x) # remove future time steps if use_causal_conv conv x = x[:, :, :residual.size(-1)] if self.use_causal_conv else x # split into two part for gated activation splitdim = 1 xa, xb = x.split(x.size(splitdim) // 2, dim=splitdim) # local conditioning if c is not None: assert self.conv1x1_aux is not None c = self.conv1x1_aux(c) ca, cb = c.split(c.size(splitdim) // 2, dim=splitdim) xa, xb = xa + ca, xb + cb x = torch.tanh(xa) * torch.sigmoid(xb) # for skip connection s = self.conv1x1_skip(x) # for residual connection x = (self.conv1x1_out(x) + residual) * (0.5**2) return x, s
3,035
33.5
71
py
TTS
TTS-master/TTS/vocoder/layers/pqmf.py
import numpy as np import torch import torch.nn.functional as F from scipy import signal as sig # adapted from # https://github.com/kan-bayashi/ParallelWaveGAN/tree/master/parallel_wavegan class PQMF(torch.nn.Module): def __init__(self, N=4, taps=62, cutoff=0.15, beta=9.0): super(PQMF, self).__init__() self.N = N self.taps = taps self.cutoff = cutoff self.beta = beta QMF = sig.firwin(taps + 1, cutoff, window=('kaiser', beta)) H = np.zeros((N, len(QMF))) G = np.zeros((N, len(QMF))) for k in range(N): constant_factor = (2 * k + 1) * (np.pi / (2 * N)) * (np.arange(taps + 1) - ((taps - 1) / 2)) # TODO: (taps - 1) -> taps phase = (-1)**k * np.pi / 4 H[k] = 2 * QMF * np.cos(constant_factor + phase) G[k] = 2 * QMF * np.cos(constant_factor - phase) H = torch.from_numpy(H[:, None, :]).float() G = torch.from_numpy(G[None, :, :]).float() self.register_buffer("H", H) self.register_buffer("G", G) updown_filter = torch.zeros((N, N, N)).float() for k in range(N): updown_filter[k, k, 0] = 1.0 self.register_buffer("updown_filter", updown_filter) self.N = N self.pad_fn = torch.nn.ConstantPad1d(taps // 2, 0.0) def forward(self, x): return self.analysis(x) def analysis(self, x): return F.conv1d(x, self.H, padding=self.taps // 2, stride=self.N) def synthesis(self, x): x = F.conv_transpose1d(x, self.updown_filter * self.N, stride=self.N) x = F.conv1d(x, self.G, padding=self.taps // 2) return x
1,833
31.175439
102
py
TTS
TTS-master/TTS/vocoder/layers/melgan.py
from torch import nn from torch.nn.utils import weight_norm class ResidualStack(nn.Module): def __init__(self, channels, num_res_blocks, kernel_size): super(ResidualStack, self).__init__() assert (kernel_size - 1) % 2 == 0, " [!] kernel_size has to be odd." base_padding = (kernel_size - 1) // 2 self.blocks = nn.ModuleList() for idx in range(num_res_blocks): layer_kernel_size = kernel_size layer_dilation = layer_kernel_size**idx layer_padding = base_padding * layer_dilation self.blocks += [nn.Sequential( nn.LeakyReLU(0.2), nn.ReflectionPad1d(layer_padding), weight_norm( nn.Conv1d(channels, channels, kernel_size=kernel_size, dilation=layer_dilation, bias=True)), nn.LeakyReLU(0.2), weight_norm( nn.Conv1d(channels, channels, kernel_size=1, bias=True)), )] self.shortcuts = nn.ModuleList([ weight_norm(nn.Conv1d(channels, channels, kernel_size=1, bias=True)) for i in range(num_res_blocks) ]) def forward(self, x): for block, shortcut in zip(self.blocks, self.shortcuts): x = shortcut(x) + block(x) return x def remove_weight_norm(self): for block, shortcut in zip(self.blocks, self.shortcuts): nn.utils.remove_weight_norm(block[2]) nn.utils.remove_weight_norm(block[4]) nn.utils.remove_weight_norm(shortcut)
1,707
36.130435
77
py
TTS
TTS-master/TTS/vocoder/layers/wavegrad.py
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.utils import weight_norm class Conv1d(nn.Conv1d): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) nn.init.orthogonal_(self.weight) nn.init.zeros_(self.bias) class PositionalEncoding(nn.Module): """Positional encoding with noise level conditioning""" def __init__(self, n_channels, max_len=10000): super().__init__() self.n_channels = n_channels self.max_len = max_len self.C = 5000 self.pe = torch.zeros(0, 0) def forward(self, x, noise_level): if x.shape[2] > self.pe.shape[1]: self.init_pe_matrix(x.shape[1] ,x.shape[2], x) return x + noise_level[..., None, None] + self.pe[:, :x.size(2)].repeat(x.shape[0], 1, 1) / self.C def init_pe_matrix(self, n_channels, max_len, x): pe = torch.zeros(max_len, n_channels) position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) div_term = torch.pow(10000, torch.arange(0, n_channels, 2).float() / n_channels) pe[:, 0::2] = torch.sin(position / div_term) pe[:, 1::2] = torch.cos(position / div_term) self.pe = pe.transpose(0, 1).to(x) class FiLM(nn.Module): def __init__(self, input_size, output_size): super().__init__() self.encoding = PositionalEncoding(input_size) self.input_conv = nn.Conv1d(input_size, input_size, 3, padding=1) self.output_conv = nn.Conv1d(input_size, output_size * 2, 3, padding=1) nn.init.xavier_uniform_(self.input_conv.weight) nn.init.xavier_uniform_(self.output_conv.weight) nn.init.zeros_(self.input_conv.bias) nn.init.zeros_(self.output_conv.bias) def forward(self, x, noise_scale): o = self.input_conv(x) o = F.leaky_relu(o, 0.2) o = self.encoding(o, noise_scale) shift, scale = torch.chunk(self.output_conv(o), 2, dim=1) return shift, scale def remove_weight_norm(self): nn.utils.remove_weight_norm(self.input_conv) nn.utils.remove_weight_norm(self.output_conv) def apply_weight_norm(self): self.input_conv = weight_norm(self.input_conv) self.output_conv = weight_norm(self.output_conv) @torch.jit.script def shif_and_scale(x, scale, shift): o = shift + scale * x return o class UBlock(nn.Module): def __init__(self, input_size, hidden_size, factor, dilation): super().__init__() assert isinstance(dilation, (list, tuple)) assert len(dilation) == 4 self.factor = factor self.res_block = Conv1d(input_size, hidden_size, 1) self.main_block = nn.ModuleList([ Conv1d(input_size, hidden_size, 3, dilation=dilation[0], padding=dilation[0]), Conv1d(hidden_size, hidden_size, 3, dilation=dilation[1], padding=dilation[1]) ]) self.out_block = nn.ModuleList([ Conv1d(hidden_size, hidden_size, 3, dilation=dilation[2], padding=dilation[2]), Conv1d(hidden_size, hidden_size, 3, dilation=dilation[3], padding=dilation[3]) ]) def forward(self, x, shift, scale): x_inter = F.interpolate(x, size=x.shape[-1] * self.factor) res = self.res_block(x_inter) o = F.leaky_relu(x_inter, 0.2) o = F.interpolate(o, size=x.shape[-1] * self.factor) o = self.main_block[0](o) o = shif_and_scale(o, scale, shift) o = F.leaky_relu(o, 0.2) o = self.main_block[1](o) res2 = res + o o = shif_and_scale(res2, scale, shift) o = F.leaky_relu(o, 0.2) o = self.out_block[0](o) o = shif_and_scale(o, scale, shift) o = F.leaky_relu(o, 0.2) o = self.out_block[1](o) o = o + res2 return o def remove_weight_norm(self): nn.utils.remove_weight_norm(self.res_block) for _, layer in enumerate(self.main_block): if len(layer.state_dict()) != 0: nn.utils.remove_weight_norm(layer) for _, layer in enumerate(self.out_block): if len(layer.state_dict()) != 0: nn.utils.remove_weight_norm(layer) def apply_weight_norm(self): self.res_block = weight_norm(self.res_block) for idx, layer in enumerate(self.main_block): if len(layer.state_dict()) != 0: self.main_block[idx] = weight_norm(layer) for idx, layer in enumerate(self.out_block): if len(layer.state_dict()) != 0: self.out_block[idx] = weight_norm(layer) class DBlock(nn.Module): def __init__(self, input_size, hidden_size, factor): super().__init__() self.factor = factor self.res_block = Conv1d(input_size, hidden_size, 1) self.main_block = nn.ModuleList([ Conv1d(input_size, hidden_size, 3, dilation=1, padding=1), Conv1d(hidden_size, hidden_size, 3, dilation=2, padding=2), Conv1d(hidden_size, hidden_size, 3, dilation=4, padding=4), ]) def forward(self, x): size = x.shape[-1] // self.factor res = self.res_block(x) res = F.interpolate(res, size=size) o = F.interpolate(x, size=size) for layer in self.main_block: o = F.leaky_relu(o, 0.2) o = layer(o) return o + res def remove_weight_norm(self): nn.utils.remove_weight_norm(self.res_block) for _, layer in enumerate(self.main_block): if len(layer.state_dict()) != 0: nn.utils.remove_weight_norm(layer) def apply_weight_norm(self): self.res_block = weight_norm(self.res_block) for idx, layer in enumerate(self.main_block): if len(layer.state_dict()) != 0: self.main_block[idx] = weight_norm(layer)
6,178
34.107955
106
py
TTS
TTS-master/TTS/vocoder/layers/upsample.py
import torch from torch.nn import functional as F class Stretch2d(torch.nn.Module): def __init__(self, x_scale, y_scale, mode="nearest"): super(Stretch2d, self).__init__() self.x_scale = x_scale self.y_scale = y_scale self.mode = mode def forward(self, x): """ x (Tensor): Input tensor (B, C, F, T). Tensor: Interpolated tensor (B, C, F * y_scale, T * x_scale), """ return F.interpolate( x, scale_factor=(self.y_scale, self.x_scale), mode=self.mode) class UpsampleNetwork(torch.nn.Module): # pylint: disable=dangerous-default-value def __init__(self, upsample_factors, nonlinear_activation=None, nonlinear_activation_params={}, interpolate_mode="nearest", freq_axis_kernel_size=1, use_causal_conv=False, ): super(UpsampleNetwork, self).__init__() self.use_causal_conv = use_causal_conv self.up_layers = torch.nn.ModuleList() for scale in upsample_factors: # interpolation layer stretch = Stretch2d(scale, 1, interpolate_mode) self.up_layers += [stretch] # conv layer assert (freq_axis_kernel_size - 1) % 2 == 0, "Not support even number freq axis kernel size." freq_axis_padding = (freq_axis_kernel_size - 1) // 2 kernel_size = (freq_axis_kernel_size, scale * 2 + 1) if use_causal_conv: padding = (freq_axis_padding, scale * 2) else: padding = (freq_axis_padding, scale) conv = torch.nn.Conv2d(1, 1, kernel_size=kernel_size, padding=padding, bias=False) self.up_layers += [conv] # nonlinear if nonlinear_activation is not None: nonlinear = getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params) self.up_layers += [nonlinear] def forward(self, c): """ c : (B, C, T_in). Tensor: (B, C, T_upsample) """ c = c.unsqueeze(1) # (B, 1, C, T) for f in self.up_layers: c = f(c) return c.squeeze(1) # (B, C, T') class ConvUpsample(torch.nn.Module): # pylint: disable=dangerous-default-value def __init__(self, upsample_factors, nonlinear_activation=None, nonlinear_activation_params={}, interpolate_mode="nearest", freq_axis_kernel_size=1, aux_channels=80, aux_context_window=0, use_causal_conv=False ): super(ConvUpsample, self).__init__() self.aux_context_window = aux_context_window self.use_causal_conv = use_causal_conv and aux_context_window > 0 # To capture wide-context information in conditional features kernel_size = aux_context_window + 1 if use_causal_conv else 2 * aux_context_window + 1 # NOTE(kan-bayashi): Here do not use padding because the input is already padded self.conv_in = torch.nn.Conv1d(aux_channels, aux_channels, kernel_size=kernel_size, bias=False) self.upsample = UpsampleNetwork( upsample_factors=upsample_factors, nonlinear_activation=nonlinear_activation, nonlinear_activation_params=nonlinear_activation_params, interpolate_mode=interpolate_mode, freq_axis_kernel_size=freq_axis_kernel_size, use_causal_conv=use_causal_conv, ) def forward(self, c): """ c : (B, C, T_in). Tensor: (B, C, T_upsampled), """ c_ = self.conv_in(c) c = c_[:, :, :-self.aux_context_window] if self.use_causal_conv else c_ return self.upsample(c)
3,899
37.235294
105
py
TTS
TTS-master/TTS/vocoder/utils/generic_utils.py
import re import torch import importlib import numpy as np from matplotlib import pyplot as plt from TTS.tts.utils.visual import plot_spectrogram def interpolate_vocoder_input(scale_factor, spec): """Interpolate spectrogram by the scale factor. It is mainly used to match the sampling rates of the tts and vocoder models. Args: scale_factor (float): scale factor to interpolate the spectrogram spec (np.array): spectrogram to be interpolated Returns: torch.tensor: interpolated spectrogram. """ print(" > before interpolation :", spec.shape) spec = torch.tensor(spec).unsqueeze(0).unsqueeze(0) # pylint: disable=not-callable spec = torch.nn.functional.interpolate(spec, scale_factor=scale_factor, recompute_scale_factor=True, mode='bilinear', align_corners=False).squeeze(0) print(" > after interpolation :", spec.shape) return spec def plot_results(y_hat, y, ap, global_step, name_prefix): """ Plot vocoder model results """ # select an instance from batch y_hat = y_hat[0].squeeze(0).detach().cpu().numpy() y = y[0].squeeze(0).detach().cpu().numpy() spec_fake = ap.melspectrogram(y_hat).T spec_real = ap.melspectrogram(y).T spec_diff = np.abs(spec_fake - spec_real) # plot figure and save it fig_wave = plt.figure() plt.subplot(2, 1, 1) plt.plot(y) plt.title("groundtruth speech") plt.subplot(2, 1, 2) plt.plot(y_hat) plt.title(f"generated speech @ {global_step} steps") plt.tight_layout() plt.close() figures = { name_prefix + "spectrogram/fake": plot_spectrogram(spec_fake), name_prefix + "spectrogram/real": plot_spectrogram(spec_real), name_prefix + "spectrogram/diff": plot_spectrogram(spec_diff), name_prefix + "speech_comparison": fig_wave, } return figures def to_camel(text): text = text.capitalize() return re.sub(r'(?!^)_([a-zA-Z])', lambda m: m.group(1).upper(), text) def setup_wavernn(c): print(" > Model: WaveRNN") MyModel = importlib.import_module("TTS.vocoder.models.wavernn") MyModel = getattr(MyModel, "WaveRNN") model = MyModel( rnn_dims=c.wavernn_model_params['rnn_dims'], fc_dims=c.wavernn_model_params['fc_dims'], mode=c.mode, mulaw=c.mulaw, pad=c.padding, use_aux_net=c.wavernn_model_params['use_aux_net'], use_upsample_net=c.wavernn_model_params['use_upsample_net'], upsample_factors=c.wavernn_model_params['upsample_factors'], feat_dims=c.audio['num_mels'], compute_dims=c.wavernn_model_params['compute_dims'], res_out_dims=c.wavernn_model_params['res_out_dims'], num_res_blocks=c.wavernn_model_params['num_res_blocks'], hop_length=c.audio["hop_length"], sample_rate=c.audio["sample_rate"], ) return model def setup_generator(c): print(" > Generator Model: {}".format(c.generator_model)) MyModel = importlib.import_module('TTS.vocoder.models.' + c.generator_model.lower()) MyModel = getattr(MyModel, to_camel(c.generator_model)) if c.generator_model.lower() in 'melgan_generator': model = MyModel( in_channels=c.audio['num_mels'], out_channels=1, proj_kernel=7, base_channels=512, upsample_factors=c.generator_model_params['upsample_factors'], res_kernel=3, num_res_blocks=c.generator_model_params['num_res_blocks']) if c.generator_model in 'melgan_fb_generator': pass if c.generator_model.lower() in 'multiband_melgan_generator': model = MyModel( in_channels=c.audio['num_mels'], out_channels=4, proj_kernel=7, base_channels=384, upsample_factors=c.generator_model_params['upsample_factors'], res_kernel=3, num_res_blocks=c.generator_model_params['num_res_blocks']) if c.generator_model.lower() in 'fullband_melgan_generator': model = MyModel( in_channels=c.audio['num_mels'], out_channels=1, proj_kernel=7, base_channels=512, upsample_factors=c.generator_model_params['upsample_factors'], res_kernel=3, num_res_blocks=c.generator_model_params['num_res_blocks']) if c.generator_model.lower() in 'parallel_wavegan_generator': model = MyModel( in_channels=1, out_channels=1, kernel_size=3, num_res_blocks=c.generator_model_params['num_res_blocks'], stacks=c.generator_model_params['stacks'], res_channels=64, gate_channels=128, skip_channels=64, aux_channels=c.audio['num_mels'], dropout=0.0, bias=True, use_weight_norm=True, upsample_factors=c.generator_model_params['upsample_factors']) if c.generator_model.lower() in 'wavegrad': model = MyModel( in_channels=c['audio']['num_mels'], out_channels=1, use_weight_norm=c['model_params']['use_weight_norm'], x_conv_channels=c['model_params']['x_conv_channels'], y_conv_channels=c['model_params']['y_conv_channels'], dblock_out_channels=c['model_params']['dblock_out_channels'], ublock_out_channels=c['model_params']['ublock_out_channels'], upsample_factors=c['model_params']['upsample_factors'], upsample_dilations=c['model_params']['upsample_dilations']) return model def setup_discriminator(c): print(" > Discriminator Model: {}".format(c.discriminator_model)) if 'parallel_wavegan' in c.discriminator_model: MyModel = importlib.import_module( 'TTS.vocoder.models.parallel_wavegan_discriminator') else: MyModel = importlib.import_module('TTS.vocoder.models.' + c.discriminator_model.lower()) MyModel = getattr(MyModel, to_camel(c.discriminator_model.lower())) if c.discriminator_model in 'random_window_discriminator': model = MyModel( cond_channels=c.audio['num_mels'], hop_length=c.audio['hop_length'], uncond_disc_donwsample_factors=c. discriminator_model_params['uncond_disc_donwsample_factors'], cond_disc_downsample_factors=c. discriminator_model_params['cond_disc_downsample_factors'], cond_disc_out_channels=c. discriminator_model_params['cond_disc_out_channels'], window_sizes=c.discriminator_model_params['window_sizes']) if c.discriminator_model in 'melgan_multiscale_discriminator': model = MyModel( in_channels=1, out_channels=1, kernel_sizes=(5, 3), base_channels=c.discriminator_model_params['base_channels'], max_channels=c.discriminator_model_params['max_channels'], downsample_factors=c. discriminator_model_params['downsample_factors']) if c.discriminator_model == 'residual_parallel_wavegan_discriminator': model = MyModel( in_channels=1, out_channels=1, kernel_size=3, num_layers=c.discriminator_model_params['num_layers'], stacks=c.discriminator_model_params['stacks'], res_channels=64, gate_channels=128, skip_channels=64, dropout=0.0, bias=True, nonlinear_activation="LeakyReLU", nonlinear_activation_params={"negative_slope": 0.2}, ) if c.discriminator_model == 'parallel_wavegan_discriminator': model = MyModel( in_channels=1, out_channels=1, kernel_size=3, num_layers=c.discriminator_model_params['num_layers'], conv_channels=64, dilation_factor=1, nonlinear_activation="LeakyReLU", nonlinear_activation_params={"negative_slope": 0.2}, bias=True ) return model # def check_config(c): # c = None # pass
8,372
37.585253
87
py
TTS
TTS-master/TTS/vocoder/utils/distribution.py
import numpy as np import math import torch from torch.distributions.normal import Normal import torch.nn.functional as F def gaussian_loss(y_hat, y, log_std_min=-7.0): assert y_hat.dim() == 3 assert y_hat.size(2) == 2 mean = y_hat[:, :, :1] log_std = torch.clamp(y_hat[:, :, 1:], min=log_std_min) # TODO: replace with pytorch dist log_probs = -0.5 * ( -math.log(2.0 * math.pi) - 2.0 * log_std - torch.pow(y - mean, 2) * torch.exp((-2.0 * log_std)) ) return log_probs.squeeze().mean() def sample_from_gaussian(y_hat, log_std_min=-7.0, scale_factor=1.0): assert y_hat.size(2) == 2 mean = y_hat[:, :, :1] log_std = torch.clamp(y_hat[:, :, 1:], min=log_std_min) dist = Normal( mean, torch.exp(log_std), ) sample = dist.sample() sample = torch.clamp(torch.clamp( sample, min=-scale_factor), max=scale_factor) del dist return sample def log_sum_exp(x): """ numerically stable log_sum_exp implementation that prevents overflow """ # TF ordering axis = len(x.size()) - 1 m, _ = torch.max(x, dim=axis) m2, _ = torch.max(x, dim=axis, keepdim=True) return m + torch.log(torch.sum(torch.exp(x - m2), dim=axis)) # It is adapted from https://github.com/r9y9/wavenet_vocoder/blob/master/wavenet_vocoder/mixture.py def discretized_mix_logistic_loss( y_hat, y, num_classes=65536, log_scale_min=None, reduce=True ): if log_scale_min is None: log_scale_min = float(np.log(1e-14)) y_hat = y_hat.permute(0, 2, 1) assert y_hat.dim() == 3 assert y_hat.size(1) % 3 == 0 nr_mix = y_hat.size(1) // 3 # (B x T x C) y_hat = y_hat.transpose(1, 2) # unpack parameters. (B, T, num_mixtures) x 3 logit_probs = y_hat[:, :, :nr_mix] means = y_hat[:, :, nr_mix: 2 * nr_mix] log_scales = torch.clamp( y_hat[:, :, 2 * nr_mix: 3 * nr_mix], min=log_scale_min) # B x T x 1 -> B x T x num_mixtures y = y.expand_as(means) centered_y = y - means inv_stdv = torch.exp(-log_scales) plus_in = inv_stdv * (centered_y + 1.0 / (num_classes - 1)) cdf_plus = torch.sigmoid(plus_in) min_in = inv_stdv * (centered_y - 1.0 / (num_classes - 1)) cdf_min = torch.sigmoid(min_in) # log probability for edge case of 0 (before scaling) # equivalent: torch.log(F.sigmoid(plus_in)) log_cdf_plus = plus_in - F.softplus(plus_in) # log probability for edge case of 255 (before scaling) # equivalent: (1 - F.sigmoid(min_in)).log() log_one_minus_cdf_min = -F.softplus(min_in) # probability for all other cases cdf_delta = cdf_plus - cdf_min mid_in = inv_stdv * centered_y # log probability in the center of the bin, to be used in extreme cases # (not actually used in our code) log_pdf_mid = mid_in - log_scales - 2.0 * F.softplus(mid_in) # tf equivalent # log_probs = tf.where(x < -0.999, log_cdf_plus, # tf.where(x > 0.999, log_one_minus_cdf_min, # tf.where(cdf_delta > 1e-5, # tf.log(tf.maximum(cdf_delta, 1e-12)), # log_pdf_mid - np.log(127.5)))) # TODO: cdf_delta <= 1e-5 actually can happen. How can we choose the value # for num_classes=65536 case? 1e-7? not sure.. inner_inner_cond = (cdf_delta > 1e-5).float() inner_inner_out = inner_inner_cond * torch.log( torch.clamp(cdf_delta, min=1e-12) ) + (1.0 - inner_inner_cond) * (log_pdf_mid - np.log((num_classes - 1) / 2)) inner_cond = (y > 0.999).float() inner_out = ( inner_cond * log_one_minus_cdf_min + (1.0 - inner_cond) * inner_inner_out ) cond = (y < -0.999).float() log_probs = cond * log_cdf_plus + (1.0 - cond) * inner_out log_probs = log_probs + F.log_softmax(logit_probs, -1) if reduce: return -torch.mean(log_sum_exp(log_probs)) return -log_sum_exp(log_probs).unsqueeze(-1) def sample_from_discretized_mix_logistic(y, log_scale_min=None): """ Sample from discretized mixture of logistic distributions Args: y (Tensor): B x C x T log_scale_min (float): Log scale minimum value Returns: Tensor: sample in range of [-1, 1]. """ if log_scale_min is None: log_scale_min = float(np.log(1e-14)) assert y.size(1) % 3 == 0 nr_mix = y.size(1) // 3 # B x T x C y = y.transpose(1, 2) logit_probs = y[:, :, :nr_mix] # sample mixture indicator from softmax temp = logit_probs.data.new(logit_probs.size()).uniform_(1e-5, 1.0 - 1e-5) temp = logit_probs.data - torch.log(-torch.log(temp)) _, argmax = temp.max(dim=-1) # (B, T) -> (B, T, nr_mix) one_hot = to_one_hot(argmax, nr_mix) # select logistic parameters means = torch.sum(y[:, :, nr_mix: 2 * nr_mix] * one_hot, dim=-1) log_scales = torch.clamp( torch.sum(y[:, :, 2 * nr_mix: 3 * nr_mix] * one_hot, dim=-1), min=log_scale_min ) # sample from logistic & clip to interval # we don't actually round to the nearest 8bit value when sampling u = means.data.new(means.size()).uniform_(1e-5, 1.0 - 1e-5) x = means + torch.exp(log_scales) * (torch.log(u) - torch.log(1.0 - u)) x = torch.clamp(torch.clamp(x, min=-1.0), max=1.0) return x def to_one_hot(tensor, n, fill_with=1.0): # we perform one hot encore with respect to the last axis one_hot = torch.FloatTensor(tensor.size() + (n,)).zero_() if tensor.is_cuda: one_hot = one_hot.cuda() one_hot.scatter_(len(tensor.size()), tensor.unsqueeze(-1), fill_with) return one_hot
5,684
32.639053
99
py
TTS
TTS-master/TTS/vocoder/utils/io.py
import os import torch import datetime import pickle as pickle_tts from TTS.utils.io import RenamingUnpickler def load_checkpoint(model, checkpoint_path, use_cuda=False, eval=False): try: state = torch.load(checkpoint_path, map_location=torch.device('cpu')) except ModuleNotFoundError: pickle_tts.Unpickler = RenamingUnpickler state = torch.load(checkpoint_path, map_location=torch.device('cpu'), pickle_module=pickle_tts) model.load_state_dict(state['model']) if use_cuda: model.cuda() if eval: model.eval() return model, state def save_model(model, optimizer, scheduler, model_disc, optimizer_disc, scheduler_disc, current_step, epoch, output_path, **kwargs): if hasattr(model, 'module'): model_state = model.module.state_dict() else: model_state = model.state_dict() model_disc_state = model_disc.state_dict()\ if model_disc is not None else None optimizer_state = optimizer.state_dict()\ if optimizer is not None else None optimizer_disc_state = optimizer_disc.state_dict()\ if optimizer_disc is not None else None scheduler_state = scheduler.state_dict()\ if scheduler is not None else None scheduler_disc_state = scheduler_disc.state_dict()\ if scheduler_disc is not None else None state = { 'model': model_state, 'optimizer': optimizer_state, 'scheduler': scheduler_state, 'model_disc': model_disc_state, 'optimizer_disc': optimizer_disc_state, 'scheduler_disc': scheduler_disc_state, 'step': current_step, 'epoch': epoch, 'date': datetime.date.today().strftime("%B %d, %Y"), } state.update(kwargs) torch.save(state, output_path) def save_checkpoint(model, optimizer, scheduler, model_disc, optimizer_disc, scheduler_disc, current_step, epoch, output_folder, **kwargs): file_name = 'checkpoint_{}.pth.tar'.format(current_step) checkpoint_path = os.path.join(output_folder, file_name) print(" > CHECKPOINT : {}".format(checkpoint_path)) save_model(model, optimizer, scheduler, model_disc, optimizer_disc, scheduler_disc, current_step, epoch, checkpoint_path, **kwargs) def save_best_model(target_loss, best_loss, model, optimizer, scheduler, model_disc, optimizer_disc, scheduler_disc, current_step, epoch, output_folder, **kwargs): if target_loss < best_loss: file_name = 'best_model.pth.tar' checkpoint_path = os.path.join(output_folder, file_name) print(" > BEST MODEL : {}".format(checkpoint_path)) save_model(model, optimizer, scheduler, model_disc, optimizer_disc, scheduler_disc, current_step, epoch, checkpoint_path, model_loss=target_loss, **kwargs) best_loss = target_loss return best_loss
3,117
36.119048
103
py
TTS
TTS-master/TTS/vocoder/tf/models/melgan_generator.py
import logging import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # FATAL logging.getLogger('tensorflow').setLevel(logging.FATAL) import tensorflow as tf from TTS.vocoder.tf.layers.melgan import ResidualStack, ReflectionPad1d #pylint: disable=too-many-ancestors #pylint: disable=abstract-method class MelganGenerator(tf.keras.models.Model): """ Melgan Generator TF implementation dedicated for inference with no weight norm """ def __init__(self, in_channels=80, out_channels=1, proj_kernel=7, base_channels=512, upsample_factors=(8, 8, 2, 2), res_kernel=3, num_res_blocks=3): super(MelganGenerator, self).__init__() self.in_channels = in_channels # assert model parameters assert (proj_kernel - 1) % 2 == 0, " [!] proj_kernel should be an odd number." # setup additional model parameters base_padding = (proj_kernel - 1) // 2 act_slope = 0.2 self.inference_padding = 2 # initial layer self.initial_layer = [ ReflectionPad1d(base_padding), tf.keras.layers.Conv2D(filters=base_channels, kernel_size=(proj_kernel, 1), strides=1, padding='valid', use_bias=True, name="1") ] num_layers = 3 # count number of layers for layer naming # upsampling layers and residual stacks self.upsample_layers = [] for idx, upsample_factor in enumerate(upsample_factors): layer_out_channels = base_channels // (2**(idx + 1)) layer_filter_size = upsample_factor * 2 layer_stride = upsample_factor # layer_output_padding = upsample_factor % 2 self.upsample_layers += [ tf.keras.layers.LeakyReLU(act_slope), tf.keras.layers.Conv2DTranspose( filters=layer_out_channels, kernel_size=(layer_filter_size, 1), strides=(layer_stride, 1), padding='same', # output_padding=layer_output_padding, use_bias=True, name=f'{num_layers}'), ResidualStack(channels=layer_out_channels, num_res_blocks=num_res_blocks, kernel_size=res_kernel, name=f'layers.{num_layers + 1}') ] num_layers += num_res_blocks - 1 self.upsample_layers += [tf.keras.layers.LeakyReLU(act_slope)] # final layer self.final_layers = [ ReflectionPad1d(base_padding), tf.keras.layers.Conv2D(filters=out_channels, kernel_size=(proj_kernel, 1), use_bias=True, name=f'layers.{num_layers + 1}'), tf.keras.layers.Activation("tanh") ] # self.model_layers = tf.keras.models.Sequential(self.initial_layer + self.upsample_layers + self.final_layers, name="layers") self.model_layers = self.initial_layer + self.upsample_layers + self.final_layers @tf.function(experimental_relax_shapes=True) def call(self, c, training=False): """ c : B x C x T """ if training: raise NotImplementedError() return self.inference(c) def inference(self, c): c = tf.transpose(c, perm=[0, 2, 1]) c = tf.expand_dims(c, 2) # FIXME: TF had no replicate padding as in Torch # c = tf.pad(c, [[0, 0], [self.inference_padding, self.inference_padding], [0, 0], [0, 0]], "REFLECT") o = c for layer in self.model_layers: o = layer(o) # o = self.model_layers(c) o = tf.transpose(o, perm=[0, 3, 2, 1]) return o[:, :, 0, :] def build_inference(self): x = tf.random.uniform((1, self.in_channels, 4), dtype=tf.float32) self(x, training=False) @tf.function( experimental_relax_shapes=True, input_signature=[ tf.TensorSpec([1, None, None], dtype=tf.float32), ],) def inference_tflite(self, c): c = tf.transpose(c, perm=[0, 2, 1]) c = tf.expand_dims(c, 2) # FIXME: TF had no replicate padding as in Torch # c = tf.pad(c, [[0, 0], [self.inference_padding, self.inference_padding], [0, 0], [0, 0]], "REFLECT") o = c for layer in self.model_layers: o = layer(o) # o = self.model_layers(c) o = tf.transpose(o, perm=[0, 3, 2, 1]) return o[:, :, 0, :]
4,858
36.666667
134
py
TTS
TTS-master/TTS/vocoder/tf/layers/pqmf.py
import numpy as np import tensorflow as tf from scipy import signal as sig class PQMF(tf.keras.layers.Layer): def __init__(self, N=4, taps=62, cutoff=0.15, beta=9.0): super(PQMF, self).__init__() # define filter coefficient self.N = N self.taps = taps self.cutoff = cutoff self.beta = beta QMF = sig.firwin(taps + 1, cutoff, window=('kaiser', beta)) H = np.zeros((N, len(QMF))) G = np.zeros((N, len(QMF))) for k in range(N): constant_factor = (2 * k + 1) * (np.pi / (2 * N)) * (np.arange(taps + 1) - ((taps - 1) / 2)) phase = (-1)**k * np.pi / 4 H[k] = 2 * QMF * np.cos(constant_factor + phase) G[k] = 2 * QMF * np.cos(constant_factor - phase) # [N, 1, taps + 1] == [filter_width, in_channels, out_channels] self.H = np.transpose(H[:, None, :], (2, 1, 0)).astype('float32') self.G = np.transpose(G[None, :, :], (2, 1, 0)).astype('float32') # filter for downsampling & upsampling updown_filter = np.zeros((N, N, N), dtype=np.float32) for k in range(N): updown_filter[0, k, k] = 1.0 self.updown_filter = updown_filter.astype(np.float32) def analysis(self, x): """ x : B x 1 x T """ x = tf.transpose(x, perm=[0, 2, 1]) x = tf.pad(x, [[0, 0], [self.taps // 2, self.taps // 2], [0, 0]], constant_values=0.0) x = tf.nn.conv1d(x, self.H, stride=1, padding='VALID') x = tf.nn.conv1d(x, self.updown_filter, stride=self.N, padding='VALID') x = tf.transpose(x, perm=[0, 2, 1]) return x def synthesis(self, x): """ x : B x D x T """ x = tf.transpose(x, perm=[0, 2, 1]) x = tf.nn.conv1d_transpose( x, self.updown_filter * self.N, strides=self.N, output_shape=(tf.shape(x)[0], tf.shape(x)[1] * self.N, self.N)) x = tf.pad(x, [[0, 0], [self.taps // 2, self.taps // 2], [0, 0]], constant_values=0.0) x = tf.nn.conv1d(x, self.G, stride=1, padding="VALID") x = tf.transpose(x, perm=[0, 2, 1]) return x
2,396
34.776119
94
py
TTS
TTS-master/TTS/vocoder/tf/layers/melgan.py
import tensorflow as tf class ReflectionPad1d(tf.keras.layers.Layer): def __init__(self, padding): super(ReflectionPad1d, self).__init__() self.padding = padding def call(self, x): return tf.pad(x, [[0, 0], [self.padding, self.padding], [0, 0], [0, 0]], "REFLECT") class ResidualStack(tf.keras.layers.Layer): def __init__(self, channels, num_res_blocks, kernel_size, name): super(ResidualStack, self).__init__(name=name) assert (kernel_size - 1) % 2 == 0, " [!] kernel_size has to be odd." base_padding = (kernel_size - 1) // 2 self.blocks = [] num_layers = 2 for idx in range(num_res_blocks): layer_kernel_size = kernel_size layer_dilation = layer_kernel_size**idx layer_padding = base_padding * layer_dilation block = [ tf.keras.layers.LeakyReLU(0.2), ReflectionPad1d(layer_padding), tf.keras.layers.Conv2D(filters=channels, kernel_size=(kernel_size, 1), dilation_rate=(layer_dilation, 1), use_bias=True, padding='valid', name=f'blocks.{idx}.{num_layers}'), tf.keras.layers.LeakyReLU(0.2), tf.keras.layers.Conv2D(filters=channels, kernel_size=(1, 1), use_bias=True, name=f'blocks.{idx}.{num_layers + 2}') ] self.blocks.append(block) self.shortcuts = [ tf.keras.layers.Conv2D(channels, kernel_size=1, use_bias=True, name=f'shortcuts.{i}') for i in range(num_res_blocks) ] def call(self, x): for block, shortcut in zip(self.blocks, self.shortcuts): res = shortcut(x) for layer in block: x = layer(x) x += res return x
2,191
37.45614
91
py
TTS
TTS-master/TTS/vocoder/tf/utils/convert_torch_to_tf_utils.py
import numpy as np import tensorflow as tf def compare_torch_tf(torch_tensor, tf_tensor): """ Compute the average absolute difference b/w torch and tf tensors """ return abs(torch_tensor.detach().numpy() - tf_tensor.numpy()).mean() def convert_tf_name(tf_name): """ Convert certain patterns in TF layer names to Torch patterns """ tf_name_tmp = tf_name tf_name_tmp = tf_name_tmp.replace(':0', '') tf_name_tmp = tf_name_tmp.replace('/forward_lstm/lstm_cell_1/recurrent_kernel', '/weight_hh_l0') tf_name_tmp = tf_name_tmp.replace('/forward_lstm/lstm_cell_2/kernel', '/weight_ih_l1') tf_name_tmp = tf_name_tmp.replace('/recurrent_kernel', '/weight_hh') tf_name_tmp = tf_name_tmp.replace('/kernel', '/weight') tf_name_tmp = tf_name_tmp.replace('/gamma', '/weight') tf_name_tmp = tf_name_tmp.replace('/beta', '/bias') tf_name_tmp = tf_name_tmp.replace('/', '.') return tf_name_tmp def transfer_weights_torch_to_tf(tf_vars, var_map_dict, state_dict): """ Transfer weigths from torch state_dict to TF variables """ print(" > Passing weights from Torch to TF ...") for tf_var in tf_vars: torch_var_name = var_map_dict[tf_var.name] print(f' | > {tf_var.name} <-- {torch_var_name}') # if tuple, it is a bias variable if 'kernel' in tf_var.name: torch_weight = state_dict[torch_var_name] numpy_weight = torch_weight.permute([2, 1, 0]).numpy()[:, None, :, :] if 'bias' in tf_var.name: torch_weight = state_dict[torch_var_name] numpy_weight = torch_weight assert np.all(tf_var.shape == numpy_weight.shape), f" [!] weight shapes does not match: {tf_var.name} vs {torch_var_name} --> {tf_var.shape} vs {numpy_weight.shape}" tf.keras.backend.set_value(tf_var, numpy_weight) return tf_vars def load_tf_vars(model_tf, tf_vars): for tf_var in tf_vars: model_tf.get_layer(tf_var.name).set_weights(tf_var) return model_tf
1,997
42.434783
173
py
TTS
TTS-master/TTS/vocoder/tf/utils/io.py
import datetime import pickle import tensorflow as tf def save_checkpoint(model, current_step, epoch, output_path, **kwargs): """ Save TF Vocoder model """ state = { 'model': model.weights, 'step': current_step, 'epoch': epoch, 'date': datetime.date.today().strftime("%B %d, %Y"), } state.update(kwargs) pickle.dump(state, open(output_path, 'wb')) def load_checkpoint(model, checkpoint_path): """ Load TF Vocoder model """ checkpoint = pickle.load(open(checkpoint_path, 'rb')) chkp_var_dict = {var.name: var.numpy() for var in checkpoint['model']} tf_vars = model.weights for tf_var in tf_vars: layer_name = tf_var.name chkp_var_value = chkp_var_dict[layer_name] tf.keras.backend.set_value(tf_var, chkp_var_value) return model
831
28.714286
74
py
TTS
TTS-master/tests/test_layers.py
import unittest import torch as T from TTS.tts.layers.tacotron import Prenet, CBHG, Decoder, Encoder from TTS.tts.layers.losses import L1LossMasked, SSIMLoss from TTS.tts.utils.generic_utils import sequence_mask # pylint: disable=unused-variable class PrenetTests(unittest.TestCase): def test_in_out(self): #pylint: disable=no-self-use layer = Prenet(128, out_features=[256, 128]) dummy_input = T.rand(4, 128) print(layer) output = layer(dummy_input) assert output.shape[0] == 4 assert output.shape[1] == 128 class CBHGTests(unittest.TestCase): def test_in_out(self): #pylint: disable=attribute-defined-outside-init layer = self.cbhg = CBHG( 128, K=8, conv_bank_features=80, conv_projections=[160, 128], highway_features=80, gru_features=80, num_highways=4) # B x D x T dummy_input = T.rand(4, 128, 8) print(layer) output = layer(dummy_input) assert output.shape[0] == 4 assert output.shape[1] == 8 assert output.shape[2] == 160 class DecoderTests(unittest.TestCase): @staticmethod def test_in_out(): layer = Decoder( in_channels=256, frame_channels=80, r=2, memory_size=4, attn_windowing=False, attn_norm="sigmoid", attn_K=5, attn_type="original", prenet_type='original', prenet_dropout=True, forward_attn=True, trans_agent=True, forward_attn_mask=True, location_attn=True, separate_stopnet=True) dummy_input = T.rand(4, 8, 256) dummy_memory = T.rand(4, 2, 80) output, alignment, stop_tokens = layer( dummy_input, dummy_memory, mask=None) assert output.shape[0] == 4 assert output.shape[1] == 80, "size not {}".format(output.shape[1]) assert output.shape[2] == 2, "size not {}".format(output.shape[2]) assert stop_tokens.shape[0] == 4 class EncoderTests(unittest.TestCase): def test_in_out(self): #pylint: disable=no-self-use layer = Encoder(128) dummy_input = T.rand(4, 8, 128) print(layer) output = layer(dummy_input) print(output.shape) assert output.shape[0] == 4 assert output.shape[1] == 8 assert output.shape[2] == 256 # 128 * 2 BiRNN class L1LossMaskedTests(unittest.TestCase): def test_in_out(self): #pylint: disable=no-self-use # test input == target layer = L1LossMasked(seq_len_norm=False) dummy_input = T.ones(4, 8, 128).float() dummy_target = T.ones(4, 8, 128).float() dummy_length = (T.ones(4) * 8).long() output = layer(dummy_input, dummy_target, dummy_length) assert output.item() == 0.0 # test input != target dummy_input = T.ones(4, 8, 128).float() dummy_target = T.zeros(4, 8, 128).float() dummy_length = (T.ones(4) * 8).long() output = layer(dummy_input, dummy_target, dummy_length) assert output.item() == 1.0, "1.0 vs {}".format(output.item()) # test if padded values of input makes any difference dummy_input = T.ones(4, 8, 128).float() dummy_target = T.zeros(4, 8, 128).float() dummy_length = (T.arange(5, 9)).long() mask = ( (sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2) output = layer(dummy_input + mask, dummy_target, dummy_length) assert output.item() == 1.0, "1.0 vs {}".format(output.item()) dummy_input = T.rand(4, 8, 128).float() dummy_target = dummy_input.detach() dummy_length = (T.arange(5, 9)).long() mask = ( (sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2) output = layer(dummy_input + mask, dummy_target, dummy_length) assert output.item() == 0, "0 vs {}".format(output.item()) # seq_len_norm = True # test input == target layer = L1LossMasked(seq_len_norm=True) dummy_input = T.ones(4, 8, 128).float() dummy_target = T.ones(4, 8, 128).float() dummy_length = (T.ones(4) * 8).long() output = layer(dummy_input, dummy_target, dummy_length) assert output.item() == 0.0 # test input != target dummy_input = T.ones(4, 8, 128).float() dummy_target = T.zeros(4, 8, 128).float() dummy_length = (T.ones(4) * 8).long() output = layer(dummy_input, dummy_target, dummy_length) assert output.item() == 1.0, "1.0 vs {}".format(output.item()) # test if padded values of input makes any difference dummy_input = T.ones(4, 8, 128).float() dummy_target = T.zeros(4, 8, 128).float() dummy_length = (T.arange(5, 9)).long() mask = ( (sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2) output = layer(dummy_input + mask, dummy_target, dummy_length) assert abs(output.item() - 1.0) < 1e-5, "1.0 vs {}".format(output.item()) dummy_input = T.rand(4, 8, 128).float() dummy_target = dummy_input.detach() dummy_length = (T.arange(5, 9)).long() mask = ( (sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2) output = layer(dummy_input + mask, dummy_target, dummy_length) assert output.item() == 0, "0 vs {}".format(output.item()) class SSIMLossTests(unittest.TestCase): def test_in_out(self): #pylint: disable=no-self-use # test input == target layer = SSIMLoss() dummy_input = T.ones(4, 8, 128).float() dummy_target = T.ones(4, 8, 128).float() dummy_length = (T.ones(4) * 8).long() output = layer(dummy_input, dummy_target, dummy_length) assert output.item() == 0.0 # test input != target dummy_input = T.ones(4, 8, 128).float() dummy_target = T.zeros(4, 8, 128).float() dummy_length = (T.ones(4) * 8).long() output = layer(dummy_input, dummy_target, dummy_length) assert abs(output.item() - 1.0) < 1e-4 , "1.0 vs {}".format(output.item()) # test if padded values of input makes any difference dummy_input = T.ones(4, 8, 128).float() dummy_target = T.zeros(4, 8, 128).float() dummy_length = (T.arange(5, 9)).long() mask = ( (sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2) output = layer(dummy_input + mask, dummy_target, dummy_length) assert abs(output.item() - 1.0) < 1e-4, "1.0 vs {}".format(output.item()) dummy_input = T.rand(4, 8, 128).float() dummy_target = dummy_input.detach() dummy_length = (T.arange(5, 9)).long() mask = ( (sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2) output = layer(dummy_input + mask, dummy_target, dummy_length) assert output.item() == 0, "0 vs {}".format(output.item()) # seq_len_norm = True # test input == target layer = L1LossMasked(seq_len_norm=True) dummy_input = T.ones(4, 8, 128).float() dummy_target = T.ones(4, 8, 128).float() dummy_length = (T.ones(4) * 8).long() output = layer(dummy_input, dummy_target, dummy_length) assert output.item() == 0.0 # test input != target dummy_input = T.ones(4, 8, 128).float() dummy_target = T.zeros(4, 8, 128).float() dummy_length = (T.ones(4) * 8).long() output = layer(dummy_input, dummy_target, dummy_length) assert output.item() == 1.0, "1.0 vs {}".format(output.item()) # test if padded values of input makes any difference dummy_input = T.ones(4, 8, 128).float() dummy_target = T.zeros(4, 8, 128).float() dummy_length = (T.arange(5, 9)).long() mask = ( (sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2) output = layer(dummy_input + mask, dummy_target, dummy_length) assert abs(output.item() - 1.0) < 1e-5, "1.0 vs {}".format(output.item()) dummy_input = T.rand(4, 8, 128).float() dummy_target = dummy_input.detach() dummy_length = (T.arange(5, 9)).long() mask = ( (sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2) output = layer(dummy_input + mask, dummy_target, dummy_length) assert output.item() == 0, "0 vs {}".format(output.item())
8,598
37.909502
82
py
TTS
TTS-master/tests/test_vocoder_melgan_generator.py
import numpy as np import torch from TTS.vocoder.models.melgan_generator import MelganGenerator def test_melgan_generator(): model = MelganGenerator() print(model) dummy_input = torch.rand((4, 80, 64)) output = model(dummy_input) assert np.all(output.shape == (4, 1, 64 * 256)) output = model.inference(dummy_input) assert np.all(output.shape == (4, 1, (64 + 4) * 256))
400
27.642857
63
py
TTS
TTS-master/tests/test_glow_tts.py
import copy import os import unittest import torch from tests import get_tests_input_path from torch import optim from TTS.tts.layers.losses import GlowTTSLoss from TTS.tts.models.glow_tts import GlowTts from TTS.utils.io import load_config from TTS.utils.audio import AudioProcessor #pylint: disable=unused-variable torch.manual_seed(1) use_cuda = torch.cuda.is_available() device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") c = load_config(os.path.join(get_tests_input_path(), 'test_config.json')) ap = AudioProcessor(**c.audio) WAV_FILE = os.path.join(get_tests_input_path(), "example_1.wav") def count_parameters(model): r"""Count number of trainable parameters in a network""" return sum(p.numel() for p in model.parameters() if p.requires_grad) class GlowTTSTrainTest(unittest.TestCase): @staticmethod def test_train_step(): input_dummy = torch.randint(0, 24, (8, 128)).long().to(device) input_lengths = torch.randint(100, 129, (8, )).long().to(device) input_lengths[-1] = 128 mel_spec = torch.rand(8, c.audio['num_mels'], 30).to(device) linear_spec = torch.rand(8, 30, c.audio['fft_size']).to(device) mel_lengths = torch.randint(20, 30, (8, )).long().to(device) speaker_ids = torch.randint(0, 5, (8, )).long().to(device) criterion = criterion = GlowTTSLoss() # model to train model = GlowTts( num_chars=32, hidden_channels_enc=128, hidden_channels_dec=128, hidden_channels_dp=32, out_channels=80, encoder_type='rel_pos_transformer', encoder_params={ 'kernel_size': 3, 'dropout_p': 0.1, 'num_layers': 6, 'num_heads': 2, 'hidden_channels_ffn': 768, # 4 times the hidden_channels 'input_length': None }, use_encoder_prenet=True, num_flow_blocks_dec=12, kernel_size_dec=5, dilation_rate=5, num_block_layers=4, dropout_p_dec=0., num_speakers=0, c_in_channels=0, num_splits=4, num_squeeze=1, sigmoid_scale=False, mean_only=False).to(device) # reference model to compare model weights model_ref = GlowTts( num_chars=32, hidden_channels_enc=128, hidden_channels_dec=128, hidden_channels_dp=32, out_channels=80, encoder_type='rel_pos_transformer', encoder_params={ 'kernel_size': 3, 'dropout_p': 0.1, 'num_layers': 6, 'num_heads': 2, 'hidden_channels_ffn': 768, # 4 times the hidden_channels 'input_length': None }, use_encoder_prenet=True, num_flow_blocks_dec=12, kernel_size_dec=5, dilation_rate=5, num_block_layers=4, dropout_p_dec=0., num_speakers=0, c_in_channels=0, num_splits=4, num_squeeze=1, sigmoid_scale=False, mean_only=False).to(device) model.train() print(" > Num parameters for GlowTTS model:%s" % (count_parameters(model))) # pass the state to ref model model_ref.load_state_dict(copy.deepcopy(model.state_dict())) count = 0 for param, param_ref in zip(model.parameters(), model_ref.parameters()): assert (param - param_ref).sum() == 0, param count += 1 optimizer = optim.Adam(model.parameters(), lr=c.lr) for _ in range(5): z, logdet, y_mean, y_log_scale, alignments, o_dur_log, o_total_dur = model.forward( input_dummy, input_lengths, mel_spec, mel_lengths, None) optimizer.zero_grad() loss_dict = criterion(z, y_mean, y_log_scale, logdet, mel_lengths, o_dur_log, o_total_dur, input_lengths) loss = loss_dict['loss'] loss.backward() optimizer.step() # check parameter changes count = 0 for param, param_ref in zip(model.parameters(), model_ref.parameters()): assert (param != param_ref).any( ), "param {} with shape {} not updated!! \n{}\n{}".format( count, param.shape, param, param_ref) count += 1
4,612
33.684211
95
py
TTS
TTS-master/tests/test_vocoder_rwd.py
import torch import numpy as np from TTS.vocoder.models.random_window_discriminator import RandomWindowDiscriminator def test_rwd(): layer = RandomWindowDiscriminator(cond_channels=80, window_sizes=(512, 1024, 2048, 4096, 8192), cond_disc_downsample_factors=[ (8, 4, 2, 2, 2), (8, 4, 2, 2), (8, 4, 2), (8, 4), (4, 2, 2) ], hop_length=256) x = torch.rand([4, 1, 22050]) c = torch.rand([4, 80, 22050 // 256]) scores, _ = layer(x, c) assert len(scores) == 10 assert np.all(scores[0].shape == (4, 1, 1))
816
36.136364
84
py
TTS
TTS-master/tests/test_tacotron_model.py
import copy import os import unittest import torch from tests import get_tests_input_path from torch import nn, optim from TTS.tts.layers.losses import L1LossMasked from TTS.tts.models.tacotron import Tacotron from TTS.utils.io import load_config from TTS.utils.audio import AudioProcessor #pylint: disable=unused-variable torch.manual_seed(1) use_cuda = torch.cuda.is_available() device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") c = load_config(os.path.join(get_tests_input_path(), 'test_config.json')) ap = AudioProcessor(**c.audio) WAV_FILE = os.path.join(get_tests_input_path(), "example_1.wav") def count_parameters(model): r"""Count number of trainable parameters in a network""" return sum(p.numel() for p in model.parameters() if p.requires_grad) class TacotronTrainTest(unittest.TestCase): @staticmethod def test_train_step(): input_dummy = torch.randint(0, 24, (8, 128)).long().to(device) input_lengths = torch.randint(100, 129, (8, )).long().to(device) input_lengths[-1] = 128 mel_spec = torch.rand(8, 30, c.audio['num_mels']).to(device) linear_spec = torch.rand(8, 30, c.audio['fft_size']).to(device) mel_lengths = torch.randint(20, 30, (8, )).long().to(device) stop_targets = torch.zeros(8, 30, 1).float().to(device) speaker_ids = torch.randint(0, 5, (8, )).long().to(device) for idx in mel_lengths: stop_targets[:, int(idx.item()):, 0] = 1.0 stop_targets = stop_targets.view(input_dummy.shape[0], stop_targets.size(1) // c.r, -1) stop_targets = (stop_targets.sum(2) > 0.0).unsqueeze(2).float().squeeze() criterion = L1LossMasked(seq_len_norm=False).to(device) criterion_st = nn.BCEWithLogitsLoss().to(device) model = Tacotron( num_chars=32, num_speakers=5, postnet_output_dim=c.audio['fft_size'], decoder_output_dim=c.audio['num_mels'], r=c.r, memory_size=c.memory_size ).to(device) #FIXME: missing num_speakers parameter to Tacotron ctor model.train() print(" > Num parameters for Tacotron model:%s" % (count_parameters(model))) model_ref = copy.deepcopy(model) count = 0 for param, param_ref in zip(model.parameters(), model_ref.parameters()): assert (param - param_ref).sum() == 0, param count += 1 optimizer = optim.Adam(model.parameters(), lr=c.lr) for _ in range(5): mel_out, linear_out, align, stop_tokens = model.forward( input_dummy, input_lengths, mel_spec, mel_lengths, speaker_ids) optimizer.zero_grad() loss = criterion(mel_out, mel_spec, mel_lengths) stop_loss = criterion_st(stop_tokens, stop_targets) loss = loss + criterion(linear_out, linear_spec, mel_lengths) + stop_loss loss.backward() optimizer.step() # check parameter changes count = 0 for param, param_ref in zip(model.parameters(), model_ref.parameters()): # ignore pre-higway layer since it works conditional # if count not in [145, 59]: assert (param != param_ref).any( ), "param {} with shape {} not updated!! \n{}\n{}".format( count, param.shape, param, param_ref) count += 1 class MultiSpeakeTacotronTrainTest(unittest.TestCase): @staticmethod def test_train_step(): input_dummy = torch.randint(0, 24, (8, 128)).long().to(device) input_lengths = torch.randint(100, 129, (8, )).long().to(device) input_lengths[-1] = 128 mel_spec = torch.rand(8, 30, c.audio['num_mels']).to(device) linear_spec = torch.rand(8, 30, c.audio['fft_size']).to(device) mel_lengths = torch.randint(20, 30, (8, )).long().to(device) stop_targets = torch.zeros(8, 30, 1).float().to(device) speaker_embeddings = torch.rand(8, 55).to(device) for idx in mel_lengths: stop_targets[:, int(idx.item()):, 0] = 1.0 stop_targets = stop_targets.view(input_dummy.shape[0], stop_targets.size(1) // c.r, -1) stop_targets = (stop_targets.sum(2) > 0.0).unsqueeze(2).float().squeeze() criterion = L1LossMasked(seq_len_norm=False).to(device) criterion_st = nn.BCEWithLogitsLoss().to(device) model = Tacotron( num_chars=32, num_speakers=5, postnet_output_dim=c.audio['fft_size'], decoder_output_dim=c.audio['num_mels'], r=c.r, memory_size=c.memory_size, speaker_embedding_dim=55, ).to(device) #FIXME: missing num_speakers parameter to Tacotron ctor model.train() print(" > Num parameters for Tacotron model:%s" % (count_parameters(model))) model_ref = copy.deepcopy(model) count = 0 for param, param_ref in zip(model.parameters(), model_ref.parameters()): assert (param - param_ref).sum() == 0, param count += 1 optimizer = optim.Adam(model.parameters(), lr=c.lr) for _ in range(5): mel_out, linear_out, align, stop_tokens = model.forward( input_dummy, input_lengths, mel_spec, mel_lengths, speaker_embeddings=speaker_embeddings) optimizer.zero_grad() loss = criterion(mel_out, mel_spec, mel_lengths) stop_loss = criterion_st(stop_tokens, stop_targets) loss = loss + criterion(linear_out, linear_spec, mel_lengths) + stop_loss loss.backward() optimizer.step() # check parameter changes count = 0 for param, param_ref in zip(model.parameters(), model_ref.parameters()): # ignore pre-higway layer since it works conditional # if count not in [145, 59]: assert (param != param_ref).any( ), "param {} with shape {} not updated!! \n{}\n{}".format( count, param.shape, param, param_ref) count += 1 class TacotronGSTTrainTest(unittest.TestCase): @staticmethod def test_train_step(): # with random gst mel style input_dummy = torch.randint(0, 24, (8, 128)).long().to(device) input_lengths = torch.randint(100, 129, (8, )).long().to(device) input_lengths[-1] = 128 mel_spec = torch.rand(8, 120, c.audio['num_mels']).to(device) linear_spec = torch.rand(8, 120, c.audio['fft_size']).to(device) mel_lengths = torch.randint(20, 120, (8, )).long().to(device) mel_lengths[-1] = 120 stop_targets = torch.zeros(8, 120, 1).float().to(device) speaker_ids = torch.randint(0, 5, (8, )).long().to(device) for idx in mel_lengths: stop_targets[:, int(idx.item()):, 0] = 1.0 stop_targets = stop_targets.view(input_dummy.shape[0], stop_targets.size(1) // c.r, -1) stop_targets = (stop_targets.sum(2) > 0.0).unsqueeze(2).float().squeeze() criterion = L1LossMasked(seq_len_norm=False).to(device) criterion_st = nn.BCEWithLogitsLoss().to(device) model = Tacotron( num_chars=32, num_speakers=5, gst=True, gst_embedding_dim=c.gst['gst_embedding_dim'], gst_num_heads=c.gst['gst_num_heads'], gst_style_tokens=c.gst['gst_style_tokens'], postnet_output_dim=c.audio['fft_size'], decoder_output_dim=c.audio['num_mels'], r=c.r, memory_size=c.memory_size ).to(device) #FIXME: missing num_speakers parameter to Tacotron ctor model.train() # print(model) print(" > Num parameters for Tacotron GST model:%s" % (count_parameters(model))) model_ref = copy.deepcopy(model) count = 0 for param, param_ref in zip(model.parameters(), model_ref.parameters()): assert (param - param_ref).sum() == 0, param count += 1 optimizer = optim.Adam(model.parameters(), lr=c.lr) for _ in range(10): mel_out, linear_out, align, stop_tokens = model.forward( input_dummy, input_lengths, mel_spec, mel_lengths, speaker_ids) optimizer.zero_grad() loss = criterion(mel_out, mel_spec, mel_lengths) stop_loss = criterion_st(stop_tokens, stop_targets) loss = loss + criterion(linear_out, linear_spec, mel_lengths) + stop_loss loss.backward() optimizer.step() # check parameter changes count = 0 for param, param_ref in zip(model.parameters(), model_ref.parameters()): # ignore pre-higway layer since it works conditional assert (param != param_ref).any( ), "param {} with shape {} not updated!! \n{}\n{}".format( count, param.shape, param, param_ref) count += 1 # with file gst style mel_spec = torch.FloatTensor(ap.melspectrogram(ap.load_wav(WAV_FILE)))[:, :120].unsqueeze(0).transpose(1, 2).to(device) mel_spec = mel_spec.repeat(8, 1, 1) input_dummy = torch.randint(0, 24, (8, 128)).long().to(device) input_lengths = torch.randint(100, 129, (8, )).long().to(device) input_lengths[-1] = 128 linear_spec = torch.rand(8, mel_spec.size(1), c.audio['fft_size']).to(device) mel_lengths = torch.randint(20, mel_spec.size(1), (8, )).long().to(device) mel_lengths[-1] = mel_spec.size(1) stop_targets = torch.zeros(8, mel_spec.size(1), 1).float().to(device) speaker_ids = torch.randint(0, 5, (8, )).long().to(device) for idx in mel_lengths: stop_targets[:, int(idx.item()):, 0] = 1.0 stop_targets = stop_targets.view(input_dummy.shape[0], stop_targets.size(1) // c.r, -1) stop_targets = (stop_targets.sum(2) > 0.0).unsqueeze(2).float().squeeze() criterion = L1LossMasked(seq_len_norm=False).to(device) criterion_st = nn.BCEWithLogitsLoss().to(device) model = Tacotron( num_chars=32, num_speakers=5, gst=True, gst_embedding_dim=c.gst['gst_embedding_dim'], gst_num_heads=c.gst['gst_num_heads'], gst_style_tokens=c.gst['gst_style_tokens'], postnet_output_dim=c.audio['fft_size'], decoder_output_dim=c.audio['num_mels'], r=c.r, memory_size=c.memory_size ).to(device) #FIXME: missing num_speakers parameter to Tacotron ctor model.train() # print(model) print(" > Num parameters for Tacotron GST model:%s" % (count_parameters(model))) model_ref = copy.deepcopy(model) count = 0 for param, param_ref in zip(model.parameters(), model_ref.parameters()): assert (param - param_ref).sum() == 0, param count += 1 optimizer = optim.Adam(model.parameters(), lr=c.lr) for _ in range(10): mel_out, linear_out, align, stop_tokens = model.forward( input_dummy, input_lengths, mel_spec, mel_lengths, speaker_ids) optimizer.zero_grad() loss = criterion(mel_out, mel_spec, mel_lengths) stop_loss = criterion_st(stop_tokens, stop_targets) loss = loss + criterion(linear_out, linear_spec, mel_lengths) + stop_loss loss.backward() optimizer.step() # check parameter changes count = 0 for param, param_ref in zip(model.parameters(), model_ref.parameters()): # ignore pre-higway layer since it works conditional assert (param != param_ref).any( ), "param {} with shape {} not updated!! \n{}\n{}".format( count, param.shape, param, param_ref) count += 1 class SCGSTMultiSpeakeTacotronTrainTest(unittest.TestCase): @staticmethod def test_train_step(): input_dummy = torch.randint(0, 24, (8, 128)).long().to(device) input_lengths = torch.randint(100, 129, (8, )).long().to(device) input_lengths[-1] = 128 mel_spec = torch.rand(8, 30, c.audio['num_mels']).to(device) linear_spec = torch.rand(8, 30, c.audio['fft_size']).to(device) mel_lengths = torch.randint(20, 30, (8, )).long().to(device) mel_lengths[-1] = mel_spec.size(1) stop_targets = torch.zeros(8, 30, 1).float().to(device) speaker_embeddings = torch.rand(8, 55).to(device) for idx in mel_lengths: stop_targets[:, int(idx.item()):, 0] = 1.0 stop_targets = stop_targets.view(input_dummy.shape[0], stop_targets.size(1) // c.r, -1) stop_targets = (stop_targets.sum(2) > 0.0).unsqueeze(2).float().squeeze() criterion = L1LossMasked(seq_len_norm=False).to(device) criterion_st = nn.BCEWithLogitsLoss().to(device) model = Tacotron( num_chars=32, num_speakers=5, postnet_output_dim=c.audio['fft_size'], decoder_output_dim=c.audio['num_mels'], gst=True, gst_embedding_dim=c.gst['gst_embedding_dim'], gst_num_heads=c.gst['gst_num_heads'], gst_style_tokens=c.gst['gst_style_tokens'], gst_use_speaker_embedding=c.gst['gst_use_speaker_embedding'], r=c.r, memory_size=c.memory_size, speaker_embedding_dim=55, ).to(device) #FIXME: missing num_speakers parameter to Tacotron ctor model.train() print(" > Num parameters for Tacotron model:%s" % (count_parameters(model))) model_ref = copy.deepcopy(model) count = 0 for param, param_ref in zip(model.parameters(), model_ref.parameters()): assert (param - param_ref).sum() == 0, param count += 1 optimizer = optim.Adam(model.parameters(), lr=c.lr) for _ in range(5): mel_out, linear_out, align, stop_tokens = model.forward( input_dummy, input_lengths, mel_spec, mel_lengths, speaker_embeddings=speaker_embeddings) optimizer.zero_grad() loss = criterion(mel_out, mel_spec, mel_lengths) stop_loss = criterion_st(stop_tokens, stop_targets) loss = loss + criterion(linear_out, linear_spec, mel_lengths) + stop_loss loss.backward() optimizer.step() # check parameter changes count = 0 for name_param, param_ref in zip(model.named_parameters(), model_ref.parameters()): # ignore pre-higway layer since it works conditional # if count not in [145, 59]: name, param = name_param if name == 'gst_layer.encoder.recurrence.weight_hh_l0': continue assert (param != param_ref).any( ), "param {} with shape {} not updated!! \n{}\n{}".format( count, param.shape, param, param_ref) count += 1
15,960
43.336111
127
py
TTS
TTS-master/tests/test_vocoder_losses.py
import os import torch from tests import get_tests_input_path, get_tests_output_path, get_tests_path from TTS.utils.audio import AudioProcessor from TTS.utils.io import load_config from TTS.vocoder.layers.losses import MultiScaleSTFTLoss, STFTLoss, TorchSTFT TESTS_PATH = get_tests_path() OUT_PATH = os.path.join(get_tests_output_path(), "audio_tests") os.makedirs(OUT_PATH, exist_ok=True) WAV_FILE = os.path.join(get_tests_input_path(), "example_1.wav") C = load_config(os.path.join(get_tests_input_path(), 'test_config.json')) ap = AudioProcessor(**C.audio) def test_torch_stft(): torch_stft = TorchSTFT(ap.fft_size, ap.hop_length, ap.win_length) # librosa stft wav = ap.load_wav(WAV_FILE) M_librosa = abs(ap._stft(wav)) # pylint: disable=protected-access # torch stft wav = torch.from_numpy(wav[None, :]).float() M_torch = torch_stft(wav) # check the difference b/w librosa and torch outputs assert (M_librosa - M_torch[0].data.numpy()).max() < 1e-5 def test_stft_loss(): stft_loss = STFTLoss(ap.fft_size, ap.hop_length, ap.win_length) wav = ap.load_wav(WAV_FILE) wav = torch.from_numpy(wav[None, :]).float() loss_m, loss_sc = stft_loss(wav, wav) assert loss_m + loss_sc == 0 loss_m, loss_sc = stft_loss(wav, torch.rand_like(wav)) assert loss_sc < 1.0 assert loss_m + loss_sc > 0 def test_multiscale_stft_loss(): stft_loss = MultiScaleSTFTLoss([ap.fft_size//2, ap.fft_size, ap.fft_size*2], [ap.hop_length // 2, ap.hop_length, ap.hop_length * 2], [ap.win_length // 2, ap.win_length, ap.win_length * 2]) wav = ap.load_wav(WAV_FILE) wav = torch.from_numpy(wav[None, :]).float() loss_m, loss_sc = stft_loss(wav, wav) assert loss_m + loss_sc == 0 loss_m, loss_sc = stft_loss(wav, torch.rand_like(wav)) assert loss_sc < 1.0 assert loss_m + loss_sc > 0
1,931
34.127273
90
py
TTS
TTS-master/tests/test_loader.py
import os import shutil import unittest import numpy as np import torch from tests import get_tests_input_path, get_tests_output_path from torch.utils.data import DataLoader from TTS.tts.datasets import TTSDataset from TTS.tts.datasets.preprocess import ljspeech from TTS.utils.audio import AudioProcessor from TTS.utils.io import load_config #pylint: disable=unused-variable OUTPATH = os.path.join(get_tests_output_path(), "loader_tests/") os.makedirs(OUTPATH, exist_ok=True) c = load_config(os.path.join(get_tests_input_path(), 'test_config.json')) ok_ljspeech = os.path.exists(c.data_path) DATA_EXIST = True if not os.path.exists(c.data_path): DATA_EXIST = False print(" > Dynamic data loader test: {}".format(DATA_EXIST)) class TestTTSDataset(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestTTSDataset, self).__init__(*args, **kwargs) self.max_loader_iter = 4 self.ap = AudioProcessor(**c.audio) def _create_dataloader(self, batch_size, r, bgs): items = ljspeech(c.data_path, 'metadata.csv') dataset = TTSDataset.MyDataset( r, c.text_cleaner, compute_linear_spec=True, ap=self.ap, meta_data=items, tp=c.characters if 'characters' in c.keys() else None, batch_group_size=bgs, min_seq_len=c.min_seq_len, max_seq_len=float("inf"), use_phonemes=False) dataloader = DataLoader( dataset, batch_size=batch_size, shuffle=False, collate_fn=dataset.collate_fn, drop_last=True, num_workers=c.num_loader_workers) return dataloader, dataset def test_loader(self): if ok_ljspeech: dataloader, dataset = self._create_dataloader(2, c.r, 0) for i, data in enumerate(dataloader): if i == self.max_loader_iter: break text_input = data[0] text_lengths = data[1] speaker_name = data[2] linear_input = data[3] mel_input = data[4] mel_lengths = data[5] stop_target = data[6] item_idx = data[7] neg_values = text_input[text_input < 0] check_count = len(neg_values) assert check_count == 0, \ " !! Negative values in text_input: {}".format(check_count) # TODO: more assertion here assert isinstance(speaker_name[0], str) assert linear_input.shape[0] == c.batch_size assert linear_input.shape[2] == self.ap.fft_size // 2 + 1 assert mel_input.shape[0] == c.batch_size assert mel_input.shape[2] == c.audio['num_mels'] # check normalization ranges if self.ap.symmetric_norm: assert mel_input.max() <= self.ap.max_norm assert mel_input.min() >= -self.ap.max_norm #pylint: disable=invalid-unary-operand-type assert mel_input.min() < 0 else: assert mel_input.max() <= self.ap.max_norm assert mel_input.min() >= 0 def test_batch_group_shuffle(self): if ok_ljspeech: dataloader, dataset = self._create_dataloader(2, c.r, 16) last_length = 0 frames = dataset.items for i, data in enumerate(dataloader): if i == self.max_loader_iter: break text_input = data[0] text_lengths = data[1] speaker_name = data[2] linear_input = data[3] mel_input = data[4] mel_lengths = data[5] stop_target = data[6] item_idx = data[7] avg_length = mel_lengths.numpy().mean() assert avg_length >= last_length dataloader.dataset.sort_items() is_items_reordered = False for idx, item in enumerate(dataloader.dataset.items): if item != frames[idx]: is_items_reordered = True break assert is_items_reordered def test_padding_and_spec(self): if ok_ljspeech: dataloader, dataset = self._create_dataloader(1, 1, 0) for i, data in enumerate(dataloader): if i == self.max_loader_iter: break text_input = data[0] text_lengths = data[1] speaker_name = data[2] linear_input = data[3] mel_input = data[4] mel_lengths = data[5] stop_target = data[6] item_idx = data[7] # check mel_spec consistency wav = np.asarray(self.ap.load_wav(item_idx[0]), dtype=np.float32) mel = self.ap.melspectrogram(wav).astype('float32') mel = torch.FloatTensor(mel).contiguous() mel_dl = mel_input[0] # NOTE: Below needs to check == 0 but due to an unknown reason # there is a slight difference between two matrices. # TODO: Check this assert cond more in detail. assert abs(mel.T - mel_dl).max() < 1e-5, abs(mel.T - mel_dl).max() # check mel-spec correctness mel_spec = mel_input[0].cpu().numpy() wav = self.ap.inv_melspectrogram(mel_spec.T) self.ap.save_wav(wav, OUTPATH + '/mel_inv_dataloader.wav') shutil.copy(item_idx[0], OUTPATH + '/mel_target_dataloader.wav') # check linear-spec linear_spec = linear_input[0].cpu().numpy() wav = self.ap.inv_spectrogram(linear_spec.T) self.ap.save_wav(wav, OUTPATH + '/linear_inv_dataloader.wav') shutil.copy(item_idx[0], OUTPATH + '/linear_target_dataloader.wav') # check the last time step to be zero padded assert linear_input[0, -1].sum() != 0 assert linear_input[0, -2].sum() != 0 assert mel_input[0, -1].sum() != 0 assert mel_input[0, -2].sum() != 0 assert stop_target[0, -1] == 1 assert stop_target[0, -2] == 0 assert stop_target.sum() == 1 assert len(mel_lengths.shape) == 1 assert mel_lengths[0] == linear_input[0].shape[0] assert mel_lengths[0] == mel_input[0].shape[0] # Test for batch size 2 dataloader, dataset = self._create_dataloader(2, 1, 0) for i, data in enumerate(dataloader): if i == self.max_loader_iter: break text_input = data[0] text_lengths = data[1] speaker_name = data[2] linear_input = data[3] mel_input = data[4] mel_lengths = data[5] stop_target = data[6] item_idx = data[7] if mel_lengths[0] > mel_lengths[1]: idx = 0 else: idx = 1 # check the first item in the batch assert linear_input[idx, -1].sum() != 0 assert linear_input[idx, -2].sum() != 0, linear_input assert mel_input[idx, -1].sum() != 0 assert mel_input[idx, -2].sum() != 0, mel_input assert stop_target[idx, -1] == 1 assert stop_target[idx, -2] == 0 assert stop_target[idx].sum() == 1 assert len(mel_lengths.shape) == 1 assert mel_lengths[idx] == mel_input[idx].shape[0] assert mel_lengths[idx] == linear_input[idx].shape[0] # check the second itme in the batch assert linear_input[1 - idx, -1].sum() == 0 assert mel_input[1 - idx, -1].sum() == 0 assert stop_target[1, mel_lengths[1]-1] == 1 assert stop_target[1, mel_lengths[1]:].sum() == 0 assert len(mel_lengths.shape) == 1 # check batch zero-frame conditions (zero-frame disabled) # assert (linear_input * stop_target.unsqueeze(2)).sum() == 0 # assert (mel_input * stop_target.unsqueeze(2)).sum() == 0
8,580
39.476415
108
py
TTS
TTS-master/tests/test_encoder.py
import os import unittest import torch as T from tests import get_tests_input_path from TTS.speaker_encoder.losses import GE2ELoss, AngleProtoLoss from TTS.speaker_encoder.model import SpeakerEncoder from TTS.utils.io import load_config file_path = get_tests_input_path() c = load_config(os.path.join(file_path, "test_config.json")) class SpeakerEncoderTests(unittest.TestCase): # pylint: disable=R0201 def test_in_out(self): dummy_input = T.rand(4, 20, 80) # B x T x D dummy_hidden = [T.rand(2, 4, 128), T.rand(2, 4, 128)] model = SpeakerEncoder( input_dim=80, proj_dim=256, lstm_dim=768, num_lstm_layers=3 ) # computing d vectors output = model.forward(dummy_input) assert output.shape[0] == 4 assert output.shape[1] == 256 output = model.inference(dummy_input) assert output.shape[0] == 4 assert output.shape[1] == 256 # compute d vectors by passing LSTM hidden # output = model.forward(dummy_input, dummy_hidden) # assert output.shape[0] == 4 # assert output.shape[1] == 20 # assert output.shape[2] == 256 # check normalization output_norm = T.nn.functional.normalize(output, dim=1, p=2) assert_diff = (output_norm - output).sum().item() assert output.type() == "torch.FloatTensor" assert ( abs(assert_diff) < 1e-4 ), f" [!] output_norm has wrong values - {assert_diff}" # compute d for a given batch dummy_input = T.rand(1, 240, 80) # B x T x D output = model.compute_embedding(dummy_input, num_frames=160, overlap=0.5) assert output.shape[0] == 1 assert output.shape[1] == 256 assert len(output.shape) == 2 class GE2ELossTests(unittest.TestCase): # pylint: disable=R0201 def test_in_out(self): # check random input dummy_input = T.rand(4, 5, 64) # num_speaker x num_utterance x dim loss = GE2ELoss(loss_method="softmax") output = loss.forward(dummy_input) assert output.item() >= 0.0 # check all zeros dummy_input = T.ones(4, 5, 64) # num_speaker x num_utterance x dim loss = GE2ELoss(loss_method="softmax") output = loss.forward(dummy_input) assert output.item() >= 0.0 # check speaker loss with orthogonal d-vectors dummy_input = T.empty(3, 64) dummy_input = T.nn.init.orthogonal_(dummy_input) dummy_input = T.cat( [ dummy_input[0].repeat(5, 1, 1).transpose(0, 1), dummy_input[1].repeat(5, 1, 1).transpose(0, 1), dummy_input[2].repeat(5, 1, 1).transpose(0, 1), ] ) # num_speaker x num_utterance x dim loss = GE2ELoss(loss_method="softmax") output = loss.forward(dummy_input) assert output.item() < 0.005 class AngleProtoLossTests(unittest.TestCase): # pylint: disable=R0201 def test_in_out(self): # check random input dummy_input = T.rand(4, 5, 64) # num_speaker x num_utterance x dim loss = AngleProtoLoss() output = loss.forward(dummy_input) assert output.item() >= 0.0 # check all zeros dummy_input = T.ones(4, 5, 64) # num_speaker x num_utterance x dim loss = AngleProtoLoss() output = loss.forward(dummy_input) assert output.item() >= 0.0 # check speaker loss with orthogonal d-vectors dummy_input = T.empty(3, 64) dummy_input = T.nn.init.orthogonal_(dummy_input) dummy_input = T.cat( [ dummy_input[0].repeat(5, 1, 1).transpose(0, 1), dummy_input[1].repeat(5, 1, 1).transpose(0, 1), dummy_input[2].repeat(5, 1, 1).transpose(0, 1), ] ) # num_speaker x num_utterance x dim loss = AngleProtoLoss() output = loss.forward(dummy_input) assert output.item() < 0.005 # class LoaderTest(unittest.TestCase): # def test_output(self): # items = libri_tts("/home/erogol/Data/Libri-TTS/train-clean-360/") # ap = AudioProcessor(**c['audio']) # dataset = MyDataset(ap, items, 1.6, 64, 10) # loader = DataLoader(dataset, batch_size=32, shuffle=False, num_workers=0, collate_fn=dataset.collate_fn) # count = 0 # for mel, spk in loader: # print(mel.shape) # if count == 4: # break # count += 1
4,515
37.271186
114
py
TTS
TTS-master/tests/test_wavegrad_layers.py
import torch from TTS.vocoder.layers.wavegrad import PositionalEncoding, FiLM, UBlock, DBlock from TTS.vocoder.models.wavegrad import Wavegrad def test_positional_encoding(): layer = PositionalEncoding(50) inp = torch.rand(32, 50, 100) nl = torch.rand(32) o = layer(inp, nl) assert o.shape[0] == 32 assert o.shape[1] == 50 assert o.shape[2] == 100 assert isinstance(o, torch.FloatTensor) def test_film(): layer = FiLM(50, 76) inp = torch.rand(32, 50, 100) nl = torch.rand(32) shift, scale = layer(inp, nl) assert shift.shape[0] == 32 assert shift.shape[1] == 76 assert shift.shape[2] == 100 assert isinstance(shift, torch.FloatTensor) assert scale.shape[0] == 32 assert scale.shape[1] == 76 assert scale.shape[2] == 100 assert isinstance(scale, torch.FloatTensor) layer.apply_weight_norm() layer.remove_weight_norm() def test_ublock(): inp1 = torch.rand(32, 50, 100) inp2 = torch.rand(32, 50, 50) nl = torch.rand(32) layer_film = FiLM(50, 100) layer = UBlock(50, 100, 2, [1, 2, 4, 8]) scale, shift = layer_film(inp1, nl) o = layer(inp2, shift, scale) assert o.shape[0] == 32 assert o.shape[1] == 100 assert o.shape[2] == 100 assert isinstance(o, torch.FloatTensor) layer.apply_weight_norm() layer.remove_weight_norm() def test_dblock(): inp = torch.rand(32, 50, 130) layer = DBlock(50, 100, 2) o = layer(inp) assert o.shape[0] == 32 assert o.shape[1] == 100 assert o.shape[2] == 65 assert isinstance(o, torch.FloatTensor) layer.apply_weight_norm() layer.remove_weight_norm() def test_wavegrad_forward(): x = torch.rand(32, 1, 20 * 300) c = torch.rand(32, 80, 20) noise_scale = torch.rand(32) model = Wavegrad(in_channels=80, out_channels=1, upsample_factors=[5, 5, 3, 2, 2], upsample_dilations=[[1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 4, 8], [1, 2, 4, 8], [1, 2, 4, 8]]) o = model.forward(x, c, noise_scale) assert o.shape[0] == 32 assert o.shape[1] == 1 assert o.shape[2] == 20 * 300 assert isinstance(o, torch.FloatTensor) model.apply_weight_norm() model.remove_weight_norm()
2,362
24.408602
80
py
TTS
TTS-master/tests/test_vocoder_parallel_wavegan_discriminator.py
import numpy as np import torch from TTS.vocoder.models.parallel_wavegan_discriminator import ParallelWaveganDiscriminator, ResidualParallelWaveganDiscriminator def test_pwgan_disciminator(): model = ParallelWaveganDiscriminator( in_channels=1, out_channels=1, kernel_size=3, num_layers=10, conv_channels=64, dilation_factor=1, nonlinear_activation="LeakyReLU", nonlinear_activation_params={"negative_slope": 0.2}, bias=True) dummy_x = torch.rand((4, 1, 64 * 256)) output = model(dummy_x) assert np.all(output.shape == (4, 1, 64 * 256)) model.remove_weight_norm() def test_redisual_pwgan_disciminator(): model = ResidualParallelWaveganDiscriminator( in_channels=1, out_channels=1, kernel_size=3, num_layers=30, stacks=3, res_channels=64, gate_channels=128, skip_channels=64, dropout=0.0, bias=True, nonlinear_activation="LeakyReLU", nonlinear_activation_params={"negative_slope": 0.2}) dummy_x = torch.rand((4, 1, 64 * 256)) output = model(dummy_x) assert np.all(output.shape == (4, 1, 64 * 256)) model.remove_weight_norm()
1,237
28.47619
128
py