|
|
from typing import Any, Optional, Union |
|
|
import numpy as np |
|
|
import torch |
|
|
from torch import nn |
|
|
from transformers import VitsPreTrainedModel , VitsConfig |
|
|
from transformers.models.vits.modeling_vits import VitsTextEncoder , VitsResidualCouplingBlock , VitsHifiGan , VitsStochasticDurationPredictor , VitsDurationPredictor , VitsPosteriorEncoder , VitsModelOutput |
|
|
from transformers.utils import auto_docstring |
|
|
from torch.nn.utils.parametrizations import weight_norm |
|
|
@auto_docstring( |
|
|
custom_intro=""" |
|
|
The complete VITS model, for text-to-speech synthesis. |
|
|
""" |
|
|
) |
|
|
class VitsModel(VitsPreTrainedModel): |
|
|
def __init__(self, config: VitsConfig): |
|
|
super().__init__(config) |
|
|
self.config = config |
|
|
self.text_encoder = VitsTextEncoder(config) |
|
|
self.flow = VitsResidualCouplingBlock(config) |
|
|
self.decoder = VitsHifiGan(config) |
|
|
|
|
|
if config.use_stochastic_duration_prediction: |
|
|
self.duration_predictor = VitsStochasticDurationPredictor(config) |
|
|
else: |
|
|
self.duration_predictor = VitsDurationPredictor(config) |
|
|
|
|
|
if config.num_speakers > 1: |
|
|
self.embed_speaker = nn.Embedding(config.num_speakers, config.speaker_embedding_size) |
|
|
|
|
|
if config.num_emotions > 1: |
|
|
self.embed_emotion = nn.Embedding(config.num_emotions, config.emotion_embedding_size) |
|
|
|
|
|
|
|
|
self.posterior_encoder = VitsPosteriorEncoder(config) |
|
|
|
|
|
|
|
|
self.speaking_rate = config.speaking_rate |
|
|
self.noise_scale = config.noise_scale |
|
|
self.noise_scale_duration = config.noise_scale_duration |
|
|
|
|
|
|
|
|
for block in self.decoder.resblocks: |
|
|
block.convs1 = nn.ModuleList([weight_norm(layer) for layer in block.convs1]) |
|
|
block.convs2 = nn.ModuleList([weight_norm(layer) for layer in block.convs2]) |
|
|
self.decoder.upsampler = nn.ModuleList([weight_norm(layer) for layer in self.decoder.upsampler]) |
|
|
|
|
|
|
|
|
self.post_init() |
|
|
|
|
|
@auto_docstring |
|
|
def forward( |
|
|
self, |
|
|
input_ids: Optional[torch.Tensor] = None, |
|
|
attention_mask: Optional[torch.Tensor] = None, |
|
|
speaker_id: Optional[int] = None, |
|
|
emotion_id: Optional[int] = None, |
|
|
output_attentions: Optional[bool] = None, |
|
|
output_hidden_states: Optional[bool] = None, |
|
|
return_dict: Optional[bool] = None, |
|
|
labels: Optional[torch.FloatTensor] = None, |
|
|
**kwargs, |
|
|
) -> Union[tuple[Any], VitsModelOutput]: |
|
|
r""" |
|
|
speaker_id (`int`, *optional*): |
|
|
Which speaker embedding to use. Only used for multispeaker models. |
|
|
emotion_id (`int`, *optional*): |
|
|
Which emotion embedding to use. Only used for multiemotion models. |
|
|
|
|
|
labels (`torch.FloatTensor` of shape `(batch_size, config.spectrogram_bins, sequence_length)`, *optional*): |
|
|
Float values of target spectrogram. Timesteps set to `-100.0` are ignored (masked) for the loss |
|
|
computation. |
|
|
|
|
|
Example: |
|
|
|
|
|
```python |
|
|
>>> from transformers import VitsTokenizer, VitsModel, set_seed |
|
|
>>> import torch |
|
|
|
|
|
>>> tokenizer = VitsTokenizer.from_pretrained("facebook/mms-tts-eng") |
|
|
>>> model = VitsModel.from_pretrained("facebook/mms-tts-eng") |
|
|
|
|
|
>>> inputs = tokenizer(text="Hello - my dog is cute", return_tensors="pt") |
|
|
|
|
|
>>> set_seed(555) # make deterministic |
|
|
|
|
|
>>> with torch.no_grad(): |
|
|
... outputs = model(inputs["input_ids"]) |
|
|
>>> outputs.waveform.shape |
|
|
torch.Size([1, 45824]) |
|
|
``` |
|
|
""" |
|
|
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
|
|
output_hidden_states = ( |
|
|
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
|
|
) |
|
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
|
|
|
if labels is not None: |
|
|
raise NotImplementedError("Training of VITS is not supported yet.") |
|
|
|
|
|
mask_dtype = self.text_encoder.embed_tokens.weight.dtype |
|
|
if attention_mask is not None: |
|
|
input_padding_mask = attention_mask.unsqueeze(-1).to(mask_dtype) |
|
|
else: |
|
|
input_padding_mask = torch.ones_like(input_ids).unsqueeze(-1).to(mask_dtype) |
|
|
|
|
|
if self.config.num_speakers > 1 and speaker_id is not None and self.config.num_emotions > 1 and emotion_id is not None: |
|
|
if not 0 <= speaker_id < self.config.num_speakers: |
|
|
raise ValueError(f"Set `speaker_id` in the range 0-{self.config.num_speakers - 1}. or Set `emotion_id` in the range 0-{self.config.num_emotions - 1}.") |
|
|
if isinstance(speaker_id, int) and isinstance(emotion_id, int): |
|
|
speaker_id = torch.full(size=(1,), fill_value=speaker_id, device=self.device) |
|
|
emotion_id = torch.full(size=(1,), fill_value=emotion_id, device=self.device) |
|
|
speaker_embeddings = self.embed_speaker(speaker_id).unsqueeze(-1) + self.embed_emotion(emotion_id).unsqueeze(-1) |
|
|
else: |
|
|
speaker_embeddings = None |
|
|
|
|
|
text_encoder_output = self.text_encoder( |
|
|
input_ids=input_ids, |
|
|
padding_mask=input_padding_mask, |
|
|
attention_mask=attention_mask, |
|
|
output_attentions=output_attentions, |
|
|
output_hidden_states=output_hidden_states, |
|
|
return_dict=return_dict, |
|
|
) |
|
|
hidden_states = text_encoder_output[0] if not return_dict else text_encoder_output.last_hidden_state |
|
|
hidden_states = hidden_states.transpose(1, 2) |
|
|
input_padding_mask = input_padding_mask.transpose(1, 2) |
|
|
prior_means = text_encoder_output[1] if not return_dict else text_encoder_output.prior_means |
|
|
prior_log_variances = text_encoder_output[2] if not return_dict else text_encoder_output.prior_log_variances |
|
|
|
|
|
if self.config.use_stochastic_duration_prediction: |
|
|
log_duration = self.duration_predictor( |
|
|
hidden_states, |
|
|
input_padding_mask, |
|
|
speaker_embeddings, |
|
|
reverse=True, |
|
|
noise_scale=self.noise_scale_duration, |
|
|
) |
|
|
else: |
|
|
log_duration = self.duration_predictor(hidden_states, input_padding_mask, speaker_embeddings) |
|
|
|
|
|
length_scale = 1.0 / self.speaking_rate |
|
|
duration = torch.ceil(torch.exp(log_duration) * input_padding_mask * length_scale) |
|
|
predicted_lengths = torch.clamp_min(torch.sum(duration, [1, 2]), 1).long() |
|
|
|
|
|
|
|
|
indices = torch.arange(predicted_lengths.max(), dtype=predicted_lengths.dtype, device=predicted_lengths.device) |
|
|
output_padding_mask = indices.unsqueeze(0) < predicted_lengths.unsqueeze(1) |
|
|
output_padding_mask = output_padding_mask.unsqueeze(1).to(input_padding_mask.dtype) |
|
|
|
|
|
|
|
|
attn_mask = torch.unsqueeze(input_padding_mask, 2) * torch.unsqueeze(output_padding_mask, -1) |
|
|
batch_size, _, output_length, input_length = attn_mask.shape |
|
|
cum_duration = torch.cumsum(duration, -1).view(batch_size * input_length, 1) |
|
|
indices = torch.arange(output_length, dtype=duration.dtype, device=duration.device) |
|
|
valid_indices = indices.unsqueeze(0) < cum_duration |
|
|
valid_indices = valid_indices.to(attn_mask.dtype).view(batch_size, input_length, output_length) |
|
|
padded_indices = valid_indices - nn.functional.pad(valid_indices, [0, 0, 1, 0, 0, 0])[:, :-1] |
|
|
attn = padded_indices.unsqueeze(1).transpose(2, 3) * attn_mask |
|
|
|
|
|
|
|
|
prior_means = torch.matmul(attn.squeeze(1), prior_means).transpose(1, 2) |
|
|
prior_log_variances = torch.matmul(attn.squeeze(1), prior_log_variances).transpose(1, 2) |
|
|
|
|
|
prior_latents = prior_means + torch.randn_like(prior_means) * torch.exp(prior_log_variances) * self.noise_scale |
|
|
latents = self.flow(prior_latents, output_padding_mask, speaker_embeddings, reverse=True) |
|
|
|
|
|
spectrogram = latents * output_padding_mask |
|
|
waveform = self.decoder(spectrogram, speaker_embeddings) |
|
|
waveform = waveform.squeeze(1) |
|
|
sequence_lengths = predicted_lengths * np.prod(self.config.upsample_rates) |
|
|
|
|
|
if not return_dict: |
|
|
outputs = (waveform, sequence_lengths, spectrogram) + text_encoder_output[3:] |
|
|
return outputs |
|
|
|
|
|
return VitsModelOutput( |
|
|
waveform=waveform, |
|
|
sequence_lengths=sequence_lengths, |
|
|
spectrogram=spectrogram, |
|
|
hidden_states=text_encoder_output.hidden_states, |
|
|
attentions=text_encoder_output.attentions, |
|
|
) |
|
|
|
|
|
|
|
|
__all__ = ["VitsModel"] |