| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| import logging |
| import random |
| from typing import Dict, Optional |
|
|
| logger = logging.getLogger(__name__) |
| import torch |
| import torch.nn as nn |
| from torch.nn import functional as F |
| from .utils.mask import make_pad_mask |
| from .configs import CFM_PARAMS |
|
|
|
|
| class MaskedDiffWithXvec(torch.nn.Module): |
| def __init__( |
| self, |
| input_size: int = 512, |
| output_size: int = 80, |
| spk_embed_dim: int = 192, |
| output_type: str = "mel", |
| vocab_size: int = 4096, |
| input_frame_rate: int = 50, |
| only_mask_loss: bool = True, |
| encoder: torch.nn.Module = None, |
| length_regulator: torch.nn.Module = None, |
| decoder: torch.nn.Module = None, |
| decoder_conf: Dict = { |
| 'in_channels': 240, |
| 'out_channel': 80, |
| 'spk_emb_dim': 80, |
| 'n_spks': 1, |
| 'cfm_params': CFM_PARAMS, |
| 'decoder_params': { |
| 'channels': [256, 256], |
| 'dropout': 0.0, |
| 'attention_head_dim': 64, |
| 'n_blocks': 4, |
| 'num_mid_blocks': 12, |
| 'num_heads': 8, |
| 'act_fn': 'gelu', |
| } |
| }, |
| mel_feat_conf: Dict = { |
| 'n_fft': 1024, |
| 'num_mels': 80, |
| 'sampling_rate': 22050, |
| 'hop_size': 256, |
| 'win_size': 1024, |
| 'fmin': 0, |
| 'fmax': 8000 |
| } |
| ): |
| super().__init__() |
| self.input_size = input_size |
| self.output_size = output_size |
| self.decoder_conf = decoder_conf |
| self.mel_feat_conf = mel_feat_conf |
| self.vocab_size = vocab_size |
| self.output_type = output_type |
| self.input_frame_rate = input_frame_rate |
| logging.info(f"input frame rate={self.input_frame_rate}") |
| self.input_embedding = nn.Embedding(vocab_size, input_size) |
| self.spk_embed_affine_layer = torch.nn.Linear(spk_embed_dim, output_size) |
| self.encoder = encoder |
| self.encoder_proj = torch.nn.Linear(self.encoder.output_size(), output_size) |
| self.decoder = decoder |
| self.length_regulator = length_regulator |
| self.only_mask_loss = only_mask_loss |
|
|
| def forward( |
| self, |
| batch: dict, |
| device: torch.device, |
| ) -> Dict[str, Optional[torch.Tensor]]: |
| token = batch['speech_token'].to(device) |
| token_len = batch['speech_token_len'].to(device) |
| feat = batch['speech_feat'].to(device) |
| feat_len = batch['speech_feat_len'].to(device) |
| embedding = batch['embedding'].to(device) |
|
|
| |
| embedding = F.normalize(embedding, dim=1) |
| embedding = self.spk_embed_affine_layer(embedding) |
|
|
| |
| mask = (~make_pad_mask(token_len)).float().unsqueeze(-1).to(device) |
| token = self.input_embedding(torch.clamp(token, min=0, max=self.input_embedding.num_embeddings-1)) * mask |
|
|
| |
| h, h_lengths = self.encoder(token, token_len) |
| h = self.encoder_proj(h) |
| h, h_lengths = self.length_regulator(h, feat_len) |
|
|
| |
| conds = torch.zeros(feat.shape, device=token.device) |
| for i, j in enumerate(feat_len): |
| if random.random() < 0.5: |
| continue |
| index = random.randint(0, int(0.3 * j)) |
| conds[i, :index] = feat[i, :index] |
| conds = conds.transpose(1, 2) |
|
|
| mask = (~make_pad_mask(feat_len)).to(h) |
| feat = F.interpolate(feat.unsqueeze(dim=1), size=h.shape[1:], mode="nearest").squeeze(dim=1) |
| loss, _ = self.decoder.compute_loss( |
| feat.transpose(1, 2).contiguous(), |
| mask.unsqueeze(1), |
| h.transpose(1, 2).contiguous(), |
| embedding, |
| cond=conds |
| ) |
| return {'loss': loss} |
|
|
| @torch.inference_mode() |
| def inference(self, |
| token, |
| token_len, |
| prompt_token, |
| prompt_token_len, |
| prompt_feat, |
| prompt_feat_len, |
| embedding, |
| flow_cache): |
| if self.fp16 is True: |
| prompt_feat = prompt_feat.half() |
| embedding = embedding.half() |
|
|
| assert token.shape[0] == 1 |
| |
| embedding = F.normalize(embedding, dim=1) |
| embedding = self.spk_embed_affine_layer(embedding) |
|
|
| |
| token_len1, token_len2 = prompt_token.shape[1], token.shape[1] |
| token, token_len = torch.concat([prompt_token, token], dim=1), prompt_token_len + token_len |
| mask = (~make_pad_mask(token_len)).unsqueeze(-1).to(embedding) |
| |
| |
| vocab_size = self.input_embedding.num_embeddings |
| if token.max() >= vocab_size or token.min() < 0: |
| logging.warning(f"S3Gen: Token IDs out of bounds: min={token.min().item()}, max={token.max().item()}, vocab_size={vocab_size}") |
| |
| token = self.input_embedding(torch.clamp(token, min=0, max=vocab_size-1)) * mask |
|
|
| |
| h, h_lengths = self.encoder(token, token_len) |
| h = self.encoder_proj(h) |
| mel_len1, mel_len2 = prompt_feat.shape[1], int(token_len2 / self.input_frame_rate * 22050 / 256) |
| h, h_lengths = self.length_regulator.inference(h[:, :token_len1], h[:, token_len1:], mel_len1, mel_len2, self.input_frame_rate) |
|
|
| |
| conds = torch.zeros([1, mel_len1 + mel_len2, self.output_size], device=token.device).to(h.dtype) |
| conds[:, :mel_len1] = prompt_feat |
| conds = conds.transpose(1, 2) |
|
|
| mask = (~make_pad_mask(torch.tensor([mel_len1 + mel_len2]))).to(h) |
| feat, flow_cache = self.decoder( |
| mu=h.transpose(1, 2).contiguous(), |
| mask=mask.unsqueeze(1), |
| spks=embedding, |
| cond=conds, |
| n_timesteps=10, |
| prompt_len=mel_len1, |
| flow_cache=flow_cache |
| ) |
| feat = feat[:, :, mel_len1:] |
| assert feat.shape[2] == mel_len2 |
| return feat.float(), flow_cache |
|
|
|
|
| class CausalMaskedDiffWithXvec(torch.nn.Module): |
| def __init__( |
| self, |
| input_size: int = 512, |
| output_size: int = 80, |
| spk_embed_dim: int = 192, |
| output_type: str = "mel", |
| vocab_size: int = 6561, |
| input_frame_rate: int = 25, |
| only_mask_loss: bool = True, |
| token_mel_ratio: int = 2, |
| pre_lookahead_len: int = 3, |
| encoder: torch.nn.Module = None, |
| decoder: torch.nn.Module = None, |
| decoder_conf: Dict = { |
| 'in_channels': 240, |
| 'out_channel': 80, |
| 'spk_emb_dim': 80, |
| 'n_spks': 1, |
| 'cfm_params': CFM_PARAMS, |
| 'decoder_params': { |
| 'channels': [256, 256], |
| 'dropout': 0.0, |
| 'attention_head_dim': 64, |
| 'n_blocks': 4, |
| 'num_mid_blocks': 12, |
| 'num_heads': 8, |
| 'act_fn': 'gelu', |
| } |
| }, |
| mel_feat_conf: Dict = { |
| 'n_fft': 1024, |
| 'num_mels': 80, |
| 'sampling_rate': 22050, |
| 'hop_size': 256, |
| 'win_size': 1024, |
| 'fmin': 0, |
| 'fmax': 8000 |
| } |
| ): |
| super().__init__() |
| self.input_size = input_size |
| self.output_size = output_size |
| self.decoder_conf = decoder_conf |
| self.mel_feat_conf = mel_feat_conf |
| self.vocab_size = vocab_size |
| self.output_type = output_type |
| self.input_frame_rate = input_frame_rate |
| logging.info(f"input frame rate={self.input_frame_rate}") |
| self.input_embedding = nn.Embedding(vocab_size, input_size) |
| self.spk_embed_affine_layer = torch.nn.Linear(spk_embed_dim, output_size) |
| self.encoder = encoder |
| self.encoder_proj = torch.nn.Linear(self.encoder.output_size(), output_size) |
| self.decoder = decoder |
| self.only_mask_loss = only_mask_loss |
| self.token_mel_ratio = token_mel_ratio |
| self.pre_lookahead_len = pre_lookahead_len |
|
|
| |
| self.fp16 = False |
|
|
| @torch.inference_mode() |
| def inference(self, |
| token, |
| token_len, |
| prompt_token, |
| prompt_token_len, |
| prompt_feat, |
| prompt_feat_len, |
| embedding, |
| finalize): |
| if self.fp16 is True: |
| prompt_feat = prompt_feat.half() |
| embedding = embedding.half() |
|
|
| assert token.shape[0] == 1 |
| |
| embedding = F.normalize(embedding, dim=1) |
| embedding = self.spk_embed_affine_layer(embedding) |
|
|
| |
| token, token_len = torch.concat([prompt_token, token], dim=1), prompt_token_len + token_len |
| mask = (~make_pad_mask(token_len)).unsqueeze(-1).to(embedding) |
| token = self.input_embedding(torch.clamp(token, min=0, max=self.input_embedding.num_embeddings-1)) * mask |
|
|
| |
| h, h_lengths = self.encoder(token, token_len) |
| if finalize is False: |
| h = h[:, :-self.pre_lookahead_len * self.token_mel_ratio] |
| mel_len1, mel_len2 = prompt_feat.shape[1], h.shape[1] - prompt_feat.shape[1] |
| h = self.encoder_proj(h) |
|
|
| |
| conds = torch.zeros([1, mel_len1 + mel_len2, self.output_size], device=token.device).to(h.dtype) |
| conds[:, :mel_len1] = prompt_feat |
| conds = conds.transpose(1, 2) |
|
|
| mask = (~make_pad_mask(torch.tensor([mel_len1 + mel_len2]))).to(h) |
| feat, _ = self.decoder( |
| mu=h.transpose(1, 2).contiguous(), |
| mask=mask.unsqueeze(1), |
| spks=embedding, |
| cond=conds, |
| n_timesteps=10 |
| ) |
| feat = feat[:, :, mel_len1:] |
| assert feat.shape[2] == mel_len2 |
| return feat.float(), None |
|
|