repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
espnet | espnet-master/espnet/optimizer/chainer.py | """Chainer optimizer builders."""
import argparse
import chainer
from chainer.optimizer_hooks import WeightDecay
from espnet.optimizer.factory import OptimizerFactoryInterface
from espnet.optimizer.parser import adadelta, adam, sgd
class AdamFactory(OptimizerFactoryInterface):
"""Adam factory."""
@staticmethod
def add_arguments(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
"""Register args."""
return adam(parser)
@staticmethod
def from_args(target, args: argparse.Namespace):
"""Initialize optimizer from argparse Namespace.
Args:
target: for pytorch `model.parameters()`,
for chainer `model`
args (argparse.Namespace): parsed command-line args
"""
opt = chainer.optimizers.Adam(
alpha=args.lr,
beta1=args.beta1,
beta2=args.beta2,
)
opt.setup(target)
opt.add_hook(WeightDecay(args.weight_decay))
return opt
class SGDFactory(OptimizerFactoryInterface):
"""SGD factory."""
@staticmethod
def add_arguments(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
"""Register args."""
return sgd(parser)
@staticmethod
def from_args(target, args: argparse.Namespace):
"""Initialize optimizer from argparse Namespace.
Args:
target: for pytorch `model.parameters()`,
for chainer `model`
args (argparse.Namespace): parsed command-line args
"""
opt = chainer.optimizers.SGD(
lr=args.lr,
)
opt.setup(target)
opt.add_hook(WeightDecay(args.weight_decay))
return opt
class AdadeltaFactory(OptimizerFactoryInterface):
"""Adadelta factory."""
@staticmethod
def add_arguments(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
"""Register args."""
return adadelta(parser)
@staticmethod
def from_args(target, args: argparse.Namespace):
"""Initialize optimizer from argparse Namespace.
Args:
target: for pytorch `model.parameters()`,
for chainer `model`
args (argparse.Namespace): parsed command-line args
"""
opt = chainer.optimizers.AdaDelta(
rho=args.rho,
eps=args.eps,
)
opt.setup(target)
opt.add_hook(WeightDecay(args.weight_decay))
return opt
OPTIMIZER_FACTORY_DICT = {
"adam": AdamFactory,
"sgd": SGDFactory,
"adadelta": AdadeltaFactory,
}
| 2,588 | 25.690722 | 82 | py |
espnet | espnet-master/espnet/transform/spec_augment.py | """Spec Augment module for preprocessing i.e., data augmentation"""
import random
import numpy
from espnet.transform.functional import FuncTrans
def time_warp(x, max_time_warp=80, inplace=False, mode="PIL"):
"""time warp for spec augment
move random center frame by the random width ~ uniform(-window, window)
:param numpy.ndarray x: spectrogram (time, freq)
:param int max_time_warp: maximum time frames to warp
:param bool inplace: overwrite x with the result
:param str mode: "PIL" (default, fast, not differentiable) or "sparse_image_warp"
(slow, differentiable)
:returns numpy.ndarray: time warped spectrogram (time, freq)
"""
from PIL import Image
from PIL.Image import BICUBIC
window = max_time_warp
if mode == "PIL":
t = x.shape[0]
if t - window <= window:
return x
# NOTE: randrange(a, b) emits a, a + 1, ..., b - 1
center = random.randrange(window, t - window)
warped = random.randrange(center - window, center + window) + 1 # 1 ... t - 1
left = Image.fromarray(x[:center]).resize((x.shape[1], warped), BICUBIC)
right = Image.fromarray(x[center:]).resize((x.shape[1], t - warped), BICUBIC)
if inplace:
x[:warped] = left
x[warped:] = right
return x
return numpy.concatenate((left, right), 0)
elif mode == "sparse_image_warp":
import torch
from espnet.utils import spec_augment
# TODO(karita): make this differentiable again
return spec_augment.time_warp(torch.from_numpy(x), window).numpy()
else:
raise NotImplementedError(
"unknown resize mode: "
+ mode
+ ", choose one from (PIL, sparse_image_warp)."
)
class TimeWarp(FuncTrans):
_func = time_warp
__doc__ = time_warp.__doc__
def __call__(self, x, train):
if not train:
return x
return super().__call__(x)
def freq_mask(x, F=30, n_mask=2, replace_with_zero=True, inplace=False):
"""freq mask for spec agument
:param numpy.ndarray x: (time, freq)
:param int n_mask: the number of masks
:param bool inplace: overwrite
:param bool replace_with_zero: pad zero on mask if true else use mean
"""
if inplace:
cloned = x
else:
cloned = x.copy()
num_mel_channels = cloned.shape[1]
fs = numpy.random.randint(0, F, size=(n_mask, 2))
for f, mask_end in fs:
f_zero = random.randrange(0, num_mel_channels - f)
mask_end += f_zero
# avoids randrange error if values are equal and range is empty
if f_zero == f_zero + f:
continue
if replace_with_zero:
cloned[:, f_zero:mask_end] = 0
else:
cloned[:, f_zero:mask_end] = cloned.mean()
return cloned
class FreqMask(FuncTrans):
_func = freq_mask
__doc__ = freq_mask.__doc__
def __call__(self, x, train):
if not train:
return x
return super().__call__(x)
def time_mask(spec, T=40, n_mask=2, replace_with_zero=True, inplace=False):
"""freq mask for spec agument
:param numpy.ndarray spec: (time, freq)
:param int n_mask: the number of masks
:param bool inplace: overwrite
:param bool replace_with_zero: pad zero on mask if true else use mean
"""
if inplace:
cloned = spec
else:
cloned = spec.copy()
len_spectro = cloned.shape[0]
ts = numpy.random.randint(0, T, size=(n_mask, 2))
for t, mask_end in ts:
# avoid randint range error
if len_spectro - t <= 0:
continue
t_zero = random.randrange(0, len_spectro - t)
# avoids randrange error if values are equal and range is empty
if t_zero == t_zero + t:
continue
mask_end += t_zero
if replace_with_zero:
cloned[t_zero:mask_end] = 0
else:
cloned[t_zero:mask_end] = cloned.mean()
return cloned
class TimeMask(FuncTrans):
_func = time_mask
__doc__ = time_mask.__doc__
def __call__(self, x, train):
if not train:
return x
return super().__call__(x)
def spec_augment(
x,
resize_mode="PIL",
max_time_warp=80,
max_freq_width=27,
n_freq_mask=2,
max_time_width=100,
n_time_mask=2,
inplace=True,
replace_with_zero=True,
):
"""spec agument
apply random time warping and time/freq masking
default setting is based on LD (Librispeech double) in Table 2
https://arxiv.org/pdf/1904.08779.pdf
:param numpy.ndarray x: (time, freq)
:param str resize_mode: "PIL" (fast, nondifferentiable) or "sparse_image_warp"
(slow, differentiable)
:param int max_time_warp: maximum frames to warp the center frame in spectrogram (W)
:param int freq_mask_width: maximum width of the random freq mask (F)
:param int n_freq_mask: the number of the random freq mask (m_F)
:param int time_mask_width: maximum width of the random time mask (T)
:param int n_time_mask: the number of the random time mask (m_T)
:param bool inplace: overwrite intermediate array
:param bool replace_with_zero: pad zero on mask if true else use mean
"""
assert isinstance(x, numpy.ndarray)
assert x.ndim == 2
x = time_warp(x, max_time_warp, inplace=inplace, mode=resize_mode)
x = freq_mask(
x,
max_freq_width,
n_freq_mask,
inplace=inplace,
replace_with_zero=replace_with_zero,
)
x = time_mask(
x,
max_time_width,
n_time_mask,
inplace=inplace,
replace_with_zero=replace_with_zero,
)
return x
class SpecAugment(FuncTrans):
_func = spec_augment
__doc__ = spec_augment.__doc__
def __call__(self, x, train):
if not train:
return x
return super().__call__(x)
| 5,933 | 28.088235 | 88 | py |
espnet | espnet-master/espnet2/svs/espnet_model.py | # Copyright 2020 Nagoya University (Tomoki Hayashi)
# Copyright 2021 Carnegie Mellon University (Jiatong Shi)
# Copyright 2022 Renmin University of China (Yuning Wu)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Singing-voice-synthesis ESPnet model."""
from contextlib import contextmanager
from distutils.version import LooseVersion
from typing import Dict, Optional, Tuple
import torch
from typeguard import check_argument_types
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.layers.inversible_interface import InversibleInterface
from espnet2.svs.abs_svs import AbsSVS
from espnet2.svs.feats_extract.score_feats_extract import (
FrameScoreFeats,
SyllableScoreFeats,
expand_to_frame,
)
from espnet2.train.abs_espnet_model import AbsESPnetModel
from espnet2.tts.feats_extract.abs_feats_extract import AbsFeatsExtract
if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"):
from torch.cuda.amp import autocast
else:
# Nothing to do if torch<1.6.0
@contextmanager
def autocast(enabled=True): # NOQA
yield
class ESPnetSVSModel(AbsESPnetModel):
"""ESPnet model for singing voice synthesis task."""
def __init__(
self,
text_extract: Optional[AbsFeatsExtract],
feats_extract: Optional[AbsFeatsExtract],
score_feats_extract: Optional[AbsFeatsExtract],
label_extract: Optional[AbsFeatsExtract],
pitch_extract: Optional[AbsFeatsExtract],
ying_extract: Optional[AbsFeatsExtract],
duration_extract: Optional[AbsFeatsExtract],
energy_extract: Optional[AbsFeatsExtract],
normalize: Optional[AbsNormalize and InversibleInterface],
pitch_normalize: Optional[AbsNormalize and InversibleInterface],
energy_normalize: Optional[AbsNormalize and InversibleInterface],
svs: AbsSVS,
):
"""Initialize ESPnetSVSModel module."""
assert check_argument_types()
super().__init__()
self.text_extract = text_extract
self.feats_extract = feats_extract
self.score_feats_extract = score_feats_extract
self.label_extract = label_extract
self.pitch_extract = pitch_extract
self.ying_extract = ying_extract
self.duration_extract = duration_extract
self.energy_extract = energy_extract
self.normalize = normalize
self.pitch_normalize = pitch_normalize
self.energy_normalize = energy_normalize
self.svs = svs
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
singing: torch.Tensor,
singing_lengths: torch.Tensor,
feats: Optional[torch.Tensor] = None,
feats_lengths: Optional[torch.Tensor] = None,
label: Optional[torch.Tensor] = None,
label_lengths: Optional[torch.Tensor] = None,
phn_cnt: Optional[torch.Tensor] = None,
midi: Optional[torch.Tensor] = None,
midi_lengths: Optional[torch.Tensor] = None,
duration_phn: Optional[torch.Tensor] = None,
duration_phn_lengths: Optional[torch.Tensor] = None,
duration_ruled_phn: Optional[torch.Tensor] = None,
duration_ruled_phn_lengths: Optional[torch.Tensor] = None,
duration_syb: Optional[torch.Tensor] = None,
duration_syb_lengths: Optional[torch.Tensor] = None,
slur: Optional[torch.Tensor] = None,
slur_lengths: Optional[torch.Tensor] = None,
pitch: Optional[torch.Tensor] = None,
pitch_lengths: Optional[torch.Tensor] = None,
energy: Optional[torch.Tensor] = None,
energy_lengths: Optional[torch.Tensor] = None,
ying: Optional[torch.Tensor] = None,
ying_lengths: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
flag_IsValid=False,
**kwargs,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Caclualte outputs and return the loss tensor.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
singing (Tensor): Singing waveform tensor (B, T_wav).
singing_lengths (Tensor): Singing length tensor (B,).
label (Option[Tensor]): Label tensor (B, T_label).
label_lengths (Optional[Tensor]): Label lrngth tensor (B,).
phn_cnt (Optional[Tensor]): Number of phones in each syllable (B, T_syb)
midi (Option[Tensor]): Midi tensor (B, T_label).
midi_lengths (Optional[Tensor]): Midi lrngth tensor (B,).
duration_phn (Optional[Tensor]): duration tensor (B, T_label).
duration_phn_lengths (Optional[Tensor]): duration length tensor (B,).
duration_ruled_phn (Optional[Tensor]): duration tensor (B, T_phone).
duration_ruled_phn_lengths (Optional[Tensor]): duration length tensor (B,).
duration_syb (Optional[Tensor]): duration tensor (B, T_syllable).
duration_syb_lengths (Optional[Tensor]): duration length tensor (B,).
slur (Optional[Tensor]): slur tensor (B, T_slur).
slur_lengths (Optional[Tensor]): slur length tensor (B,).
pitch (Optional[Tensor]): Pitch tensor (B, T_wav). - f0 sequence
pitch_lengths (Optional[Tensor]): Pitch length tensor (B,).
energy (Optional[Tensor]): Energy tensor.
energy_lengths (Optional[Tensor]): Energy length tensor (B,).
spembs (Optional[Tensor]): Speaker embedding tensor (B, D).
sids (Optional[Tensor]): Speaker ID tensor (B, 1).
lids (Optional[Tensor]): Language ID tensor (B, 1).
kwargs: "utt_id" is among the input.
Returns:
Tensor: Loss scalar tensor.
Dict[str, float]: Statistics to be monitored.
Tensor: Weight tensor to summarize losses.
"""
with autocast(False):
# Extract features
if self.feats_extract is not None and feats is None:
feats, feats_lengths = self.feats_extract(
singing, singing_lengths
) # singing to spec feature (frame level)
# Extract auxiliary features
# melody : 128 note pitch
# duration :
# input-> phone-id seqence
# output -> frame level(take mode from window) or syllable level
# cut length
for i in range(feats.size(0)):
dur_len = sum(duration_phn[i])
if feats_lengths[i] > dur_len:
feats_lengths[i] = dur_len
else: # decrease duration at the end of sequence
delta = dur_len - feats_lengths[i]
end = duration_phn_lengths[i] - 1
while delta > 0 and end >= 0:
new = duration_phn[i][end] - delta
if new < 0: # keep on decreasing the previous one
delta -= duration_phn[i][end]
duration_phn[i][end] = 0
end -= 1
else: # stop
delta -= duration_phn[i][end] - new
duration_phn[i][end] = new
feats = feats[:, : feats_lengths.max()]
if isinstance(self.score_feats_extract, FrameScoreFeats):
(
label_lab,
label_lab_lengths,
midi_lab,
midi_lab_lengths,
duration_lab,
duration_lab_lengths,
) = expand_to_frame(
duration_phn, duration_phn_lengths, label, midi, duration_phn
)
# for data-parallel
label_lab = label_lab[:, : label_lab_lengths.max()]
midi_lab = midi_lab[:, : midi_lab_lengths.max()]
duration_lab = duration_lab[:, : duration_lab_lengths.max()]
(
label_score,
label_score_lengths,
midi_score,
midi_score_lengths,
duration_score,
duration_score_phn_lengths,
) = expand_to_frame(
duration_ruled_phn,
duration_ruled_phn_lengths,
label,
midi,
duration_ruled_phn,
)
# for data-parallel
label_score = label_score[:, : label_score_lengths.max()]
midi_score = midi_score[:, : midi_score_lengths.max()]
duration_score = duration_score[:, : duration_score_phn_lengths.max()]
duration_score_syb = None
elif isinstance(self.score_feats_extract, SyllableScoreFeats):
label_lab_lengths = label_lengths
midi_lab_lengths = midi_lengths
duration_lab_lengths = duration_phn_lengths
label_lab = label[:, : label_lab_lengths.max()]
midi_lab = midi[:, : midi_lab_lengths.max()]
duration_lab = duration_phn[:, : duration_lab_lengths.max()]
label_score_lengths = label_lengths
midi_score_lengths = midi_lengths
duration_score_phn_lengths = duration_ruled_phn_lengths
duration_score_syb_lengths = duration_syb_lengths
label_score = label[:, : label_score_lengths.max()]
midi_score = midi[:, : midi_score_lengths.max()]
duration_score = duration_ruled_phn[
:, : duration_score_phn_lengths.max()
]
duration_score_syb = duration_syb[:, : duration_score_syb_lengths.max()]
slur = slur[:, : slur_lengths.max()]
else:
raise RuntimeError("Cannot understand score_feats extract type")
if self.pitch_extract is not None and pitch is None:
pitch, pitch_lengths = self.pitch_extract(
input=singing,
input_lengths=singing_lengths,
feats_lengths=feats_lengths,
)
if self.energy_extract is not None and energy is None:
energy, energy_lengths = self.energy_extract(
singing,
singing_lengths,
feats_lengths=feats_lengths,
)
if self.ying_extract is not None and ying is None:
ying, ying_lengths = self.ying_extract(
singing,
singing_lengths,
feats_lengths=feats_lengths,
)
# Normalize
if self.normalize is not None:
feats, feats_lengths = self.normalize(feats, feats_lengths)
if self.pitch_normalize is not None:
pitch, pitch_lengths = self.pitch_normalize(pitch, pitch_lengths)
if self.energy_normalize is not None:
energy, energy_lengths = self.energy_normalize(energy, energy_lengths)
# Make batch for svs inputs
batch = dict(
text=text,
text_lengths=text_lengths,
feats=feats,
feats_lengths=feats_lengths,
flag_IsValid=flag_IsValid,
)
# label
# NOTE(Yuning): Label can be word, syllable or phoneme,
# which is determined by annotation file.
label = dict()
label_lengths = dict()
if label_lab is not None:
label_lab = label_lab.to(dtype=torch.long)
label.update(lab=label_lab)
label_lengths.update(lab=label_lab_lengths)
if label_score is not None:
label_score = label_score.to(dtype=torch.long)
label.update(score=label_score)
label_lengths.update(score=label_score_lengths)
batch.update(label=label, label_lengths=label_lengths)
# melody
melody = dict()
melody_lengths = dict()
if midi_lab is not None:
midi_lab = midi_lab.to(dtype=torch.long)
melody.update(lab=midi_lab)
melody_lengths.update(lab=midi_lab_lengths)
if midi_score is not None:
midi_score = midi_score.to(dtype=torch.long)
melody.update(score=midi_score)
melody_lengths.update(score=midi_score_lengths)
batch.update(melody=melody, melody_lengths=melody_lengths)
# duration
# NOTE(Yuning): duration = duration_time / time_shift (same as Xiaoice paper)
duration = dict()
duration_lengths = dict()
if duration_lab is not None:
duration_lab = duration_lab.to(dtype=torch.long)
duration.update(lab=duration_lab)
duration_lengths.update(lab=duration_lab_lengths)
if duration_score is not None:
duration_phn_score = duration_score.to(dtype=torch.long)
duration.update(score_phn=duration_phn_score)
duration_lengths.update(score_phn=duration_score_phn_lengths)
if duration_score_syb is not None:
duration_syb_score = duration_score_syb.to(dtype=torch.long)
duration.update(score_syb=duration_syb_score)
duration_lengths.update(score_syb=duration_score_syb_lengths)
batch.update(duration=duration, duration_lengths=duration_lengths)
if slur is not None:
batch.update(slur=slur, slur_lengths=slur_lengths)
if spembs is not None:
batch.update(spembs=spembs)
if sids is not None:
batch.update(sids=sids)
if lids is not None:
batch.update(lids=lids)
if self.pitch_extract is not None and pitch is not None:
batch.update(pitch=pitch, pitch_lengths=pitch_lengths)
if self.energy_extract is not None and energy is not None:
batch.update(energy=energy, energy_lengths=energy_lengths)
if self.ying_extract is not None and ying is not None:
batch.update(ying=ying)
if self.svs.require_raw_singing:
batch.update(singing=singing, singing_lengths=singing_lengths)
return self.svs(**batch)
def collect_feats(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
singing: torch.Tensor,
singing_lengths: torch.Tensor,
label: Optional[torch.Tensor] = None,
label_lengths: Optional[torch.Tensor] = None,
phn_cnt: Optional[torch.Tensor] = None,
midi: Optional[torch.Tensor] = None,
midi_lengths: Optional[torch.Tensor] = None,
duration_phn: Optional[torch.Tensor] = None,
duration_phn_lengths: Optional[torch.Tensor] = None,
duration_ruled_phn: Optional[torch.Tensor] = None,
duration_ruled_phn_lengths: Optional[torch.Tensor] = None,
duration_syb: Optional[torch.Tensor] = None,
duration_syb_lengths: Optional[torch.Tensor] = None,
slur: Optional[torch.Tensor] = None,
slur_lengths: Optional[torch.Tensor] = None,
pitch: Optional[torch.Tensor] = None,
pitch_lengths: Optional[torch.Tensor] = None,
energy: Optional[torch.Tensor] = None,
energy_lengths: Optional[torch.Tensor] = None,
ying: Optional[torch.Tensor] = None,
ying_lengths: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
**kwargs,
) -> Dict[str, torch.Tensor]:
"""Caclualte features and return them as a dict.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
singing (Tensor): Singing waveform tensor (B, T_wav).
singing_lengths (Tensor): Singing length tensor (B,).
label (Option[Tensor]): Label tensor (B, T_label).
label_lengths (Optional[Tensor]): Label lrngth tensor (B,).
phn_cnt (Optional[Tensor]): Number of phones in each syllable (B, T_syb)
midi (Option[Tensor]): Midi tensor (B, T_label).
midi_lengths (Optional[Tensor]): Midi lrngth tensor (B,).
---- duration* is duration in time_shift ----
duration_phn (Optional[Tensor]): duration tensor (B, T_label).
duration_phn_lengths (Optional[Tensor]): duration length tensor (B,).
duration_ruled_phn (Optional[Tensor]): duration tensor (B, T_phone).
duration_ruled_phn_lengths (Optional[Tensor]): duration length tensor (B,).
duration_syb (Optional[Tensor]): duration tensor (B, T_syb).
duration_syb_lengths (Optional[Tensor]): duration length tensor (B,).
slur (Optional[Tensor]): slur tensor (B, T_slur).
slur_lengths (Optional[Tensor]): slur length tensor (B,).
pitch (Optional[Tensor]): Pitch tensor (B, T_wav). - f0 sequence
pitch_lengths (Optional[Tensor]): Pitch length tensor (B,).
energy (Optional[Tensor): Energy tensor.
energy_lengths (Optional[Tensor): Energy length tensor (B,).
spembs (Optional[Tensor]): Speaker embedding tensor (B, D).
sids (Optional[Tensor]): Speaker ID tensor (B, 1).
lids (Optional[Tensor]): Language ID tensor (B, 1).
Returns:
Dict[str, Tensor]: Dict of features.
"""
feats = None
if self.feats_extract is not None:
feats, feats_lengths = self.feats_extract(singing, singing_lengths)
else:
# Use precalculated feats (feats_type != raw case)
feats, feats_lengths = singing, singing_lengths
# cut length
for i in range(feats.size(0)):
dur_len = sum(duration_phn[i])
if feats_lengths[i] > dur_len:
feats_lengths[i] = dur_len
else: # decrease duration at the end of sequence
delta = dur_len - feats_lengths[i]
end = duration_phn_lengths[i] - 1
while delta > 0 and end >= 0:
new = duration_phn[i][end] - delta
if new < 0: # keep on decreasing the previous one
delta -= duration_phn[i][end]
duration_phn[i][end] = 0
end -= 1
else: # stop
delta -= duration_phn[i][end] - new
duration_phn[i][end] = new
feats = feats[:, : feats_lengths.max()]
if self.pitch_extract is not None:
pitch, pitch_lengths = self.pitch_extract(
input=singing,
input_lengths=singing_lengths,
feats_lengths=feats_lengths,
)
if self.energy_extract is not None:
energy, energy_lengths = self.energy_extract(
singing,
singing_lengths,
feats_lengths=feats_lengths,
)
if self.ying_extract is not None and ying is None:
ying, ying_lengths = self.ying_extract(
singing,
singing_lengths,
feats_lengths=feats_lengths,
)
# store in dict
feats_dict = {}
if feats is not None:
feats_dict.update(feats=feats, feats_lengths=feats_lengths)
if pitch is not None:
feats_dict.update(pitch=pitch, pitch_lengths=pitch_lengths)
if energy is not None:
feats_dict.update(energy=energy, energy_lengths=energy_lengths)
if ying is not None:
feats_dict.update(ying=ying, ying_lengths=ying_lengths)
return feats_dict
def inference(
self,
text: torch.Tensor,
singing: Optional[torch.Tensor] = None,
label: Optional[torch.Tensor] = None,
phn_cnt: Optional[torch.Tensor] = None,
midi: Optional[torch.Tensor] = None,
duration_phn: Optional[torch.Tensor] = None,
duration_ruled_phn: Optional[torch.Tensor] = None,
duration_syb: Optional[torch.Tensor] = None,
slur: Optional[torch.Tensor] = None,
pitch: Optional[torch.Tensor] = None,
energy: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
**decode_config,
) -> Dict[str, torch.Tensor]:
"""Caclualte features and return them as a dict.
Args:
text (Tensor): Text index tensor (T_text).
singing (Tensor): Singing waveform tensor (T_wav).
label (Option[Tensor]): Label tensor (T_label).
phn_cnt (Optional[Tensor]): Number of phones in each syllable (T_syb)
midi (Option[Tensor]): Midi tensor (T_l abel).
duration_phn (Optional[Tensor]): duration tensor (T_label).
duration_ruled_phn (Optional[Tensor]): duration tensor (T_phone).
duration_syb (Optional[Tensor]): duration tensor (T_phone).
slur (Optional[Tensor]): slur tensor (T_phone).
spembs (Optional[Tensor]): Speaker embedding tensor (D,).
sids (Optional[Tensor]): Speaker ID tensor (1,).
lids (Optional[Tensor]): Language ID tensor (1,).
pitch (Optional[Tensor): Pitch tensor (T_wav).
energy (Optional[Tensor): Energy tensor.
Returns:
Dict[str, Tensor]: Dict of outputs.
"""
label_lengths = torch.tensor([len(label)])
midi_lengths = torch.tensor([len(midi)])
duration_phn_lengths = torch.tensor([len(duration_phn)])
duration_ruled_phn_lengths = torch.tensor([len(duration_ruled_phn)])
duration_syb_lengths = torch.tensor([len(duration_syb)])
slur_lengths = torch.tensor([len(slur)])
# unsqueeze of singing needed otherwise causing error in STFT dimension
# for data-parallel
text = text.unsqueeze(0)
label = label.unsqueeze(0)
midi = midi.unsqueeze(0)
duration_phn = duration_phn.unsqueeze(0)
duration_ruled_phn = duration_ruled_phn.unsqueeze(0)
duration_syb = duration_syb.unsqueeze(0)
phn_cnt = phn_cnt.unsqueeze(0)
slur = slur.unsqueeze(0)
# Extract auxiliary features
# melody : 128 midi pitch
# duration :
# input-> phone-id seqence
# output -> frame level or syllable level
batch_size = text.size(0)
assert batch_size == 1
if isinstance(self.score_feats_extract, FrameScoreFeats):
(
label_lab,
label_lab_lengths,
midi_lab,
midi_lab_lengths,
duration_lab,
duration_lab_lengths,
) = expand_to_frame(
duration_phn, duration_phn_lengths, label, midi, duration_phn
)
# for data-parallel
label_lab = label_lab[:, : label_lab_lengths.max()]
midi_lab = midi_lab[:, : midi_lab_lengths.max()]
duration_lab = duration_lab[:, : duration_lab_lengths.max()]
(
label_score,
label_score_lengths,
midi_score,
midi_score_lengths,
duration_score,
duration_score_phn_lengths,
) = expand_to_frame(
duration_ruled_phn,
duration_ruled_phn_lengths,
label,
midi,
duration_ruled_phn,
)
# for data-parallel
label_score = label_score[:, : label_score_lengths.max()]
midi_score = midi_score[:, : midi_score_lengths.max()]
duration_score = duration_score[:, : duration_score_phn_lengths.max()]
duration_score_syb = None
elif isinstance(self.score_feats_extract, SyllableScoreFeats):
# Remove unused paddings at end
label_lab = label[:, : label_lengths.max()]
midi_lab = midi[:, : midi_lengths.max()]
duration_lab = duration_phn[:, : duration_phn_lengths.max()]
label_score = label[:, : label_lengths.max()]
midi_score = midi[:, : midi_lengths.max()]
duration_score = duration_ruled_phn[:, : duration_ruled_phn_lengths.max()]
duration_score_syb = duration_syb[:, : duration_syb_lengths.max()]
slur = slur[:, : slur_lengths.max()]
input_dict = dict(text=text)
if decode_config["use_teacher_forcing"] or getattr(self.svs, "use_gst", False):
if singing is None:
raise RuntimeError("missing required argument: 'singing'")
if self.feats_extract is not None:
feats = self.feats_extract(singing[None])[0][0]
else:
# Use precalculated feats (feats_type != raw case)
feats = singing
if self.normalize is not None:
feats = self.normalize(feats[None])[0][0]
input_dict.update(feats=feats)
# if self.svs.require_raw_singing:
# input_dict.update(singing=singing)
if decode_config["use_teacher_forcing"]:
if self.pitch_extract is not None:
pitch = self.pitch_extract(
singing[None],
feats_lengths=torch.LongTensor([len(feats)]),
)[0][0]
if self.pitch_normalize is not None:
pitch = self.pitch_normalize(pitch[None])[0][0]
if pitch is not None:
input_dict.update(pitch=pitch)
if self.energy_extract is not None:
energy = self.energy_extract(
singing[None],
feats_lengths=torch.LongTensor([len(feats)]),
)[0][0]
if self.energy_normalize is not None:
energy = self.energy_normalize(energy[None])[0][0]
if energy is not None:
input_dict.update(energy=energy)
# label
label = dict()
if label_lab is not None:
label_lab = label_lab.to(dtype=torch.long)
label.update(lab=label_lab)
if label_score is not None:
label_score = label_score.to(dtype=torch.long)
label.update(score=label_score)
input_dict.update(label=label)
# melody
melody = dict()
if midi_lab is not None:
midi_lab = midi_lab.to(dtype=torch.long)
melody.update(lab=midi_lab)
if midi_score is not None:
midi_score = midi_score.to(dtype=torch.long)
melody.update(score=midi_score)
input_dict.update(melody=melody)
# duration
duration = dict()
if duration_lab is not None:
duration_lab = duration_lab.to(dtype=torch.long)
duration.update(lab=duration_lab)
if duration_score is not None:
duration_phn_score = duration_score.to(dtype=torch.long)
duration.update(score_phn=duration_phn_score)
if duration_score_syb is not None:
duration_syb_score = duration_score_syb.to(dtype=torch.long)
duration.update(score_syb=duration_syb_score)
input_dict.update(duration=duration)
if slur is not None:
input_dict.update(slur=slur)
if spembs is not None:
input_dict.update(spembs=spembs)
if sids is not None:
input_dict.update(sids=sids)
if lids is not None:
input_dict.update(lids=lids)
output_dict = self.svs.inference(**input_dict, **decode_config)
if self.normalize is not None and output_dict.get("feat_gen") is not None:
# NOTE: normalize.inverse is in-place operation
feat_gen_denorm = self.normalize.inverse(
output_dict["feat_gen"].clone()[None]
)[0][0]
output_dict.update(feat_gen_denorm=feat_gen_denorm)
return output_dict
| 28,411 | 42.443425 | 88 | py |
espnet | espnet-master/espnet2/svs/abs_svs.py | # Copyright 2021 Tomoki Hayashi
# Copyright 2021 Carnegie Mellon University (Jiatong Shi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Singing-voice-synthesis abstrast class."""
from abc import ABC, abstractmethod
from typing import Dict, Tuple
import torch
class AbsSVS(torch.nn.Module, ABC):
"""SVS abstract class."""
@abstractmethod
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: torch.Tensor,
feats_lengths: torch.Tensor,
**kwargs,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Calculate outputs and return the loss tensor."""
raise NotImplementedError
@abstractmethod
def inference(
self,
text: torch.Tensor,
**kwargs,
) -> Dict[str, torch.Tensor]:
"""Return predicted output as a dict."""
raise NotImplementedError
@property
def require_raw_singing(self):
"""Return whether or not raw_singing is required."""
return False
@property
def require_vocoder(self):
"""Return whether or not vocoder is required."""
return True
| 1,182 | 24.717391 | 68 | py |
espnet | espnet-master/espnet2/svs/singing_tacotron/singing_tacotron.py | # Copyright 2020 Nagoya University (Tomoki Hayashi)
# Copyright 2023 Renmin University of China (Yuning Wu)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Singing Tacotron related modules for ESPnet2."""
import logging
from typing import Dict, Optional, Sequence, Tuple
import six
import torch
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from typeguard import check_argument_types
from espnet2.svs.abs_svs import AbsSVS
from espnet2.svs.singing_tacotron.decoder import Decoder
from espnet2.svs.singing_tacotron.encoder import Duration_Encoder, Encoder
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet2.tts.gst.style_encoder import StyleEncoder
from espnet.nets.pytorch_backend.e2e_tts_tacotron2 import (
GuidedAttentionLoss,
Tacotron2Loss,
)
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
from espnet.nets.pytorch_backend.rnn.attentions import (
AttForward,
AttForwardTA,
AttLoc,
GDCAttLoc,
)
class singing_tacotron(AbsSVS):
"""singing_Tacotron module for end-to-end singing-voice-synthesis.
This is a module of Spectrogram prediction network in Singing Tacotron
described in `Singing-Tacotron: Global Duration Control Attention and
Dynamic Filter for End-to-end Singing Voice Synthesis`_,
which learn accurate alignment information automatically.
.. _`Singing-Tacotron: Global Duration Control Attention and Dynamic
Filter for End-to-end Singing Voice Synthesis`:
https://arxiv.org/pdf/2202.07907v1.pdf
"""
def __init__(
self,
# network structure related
idim: int,
odim: int,
midi_dim: int = 129,
duration_dim: int = 500,
embed_dim: int = 512,
elayers: int = 1,
eunits: int = 512,
econv_layers: int = 3,
econv_chans: int = 512,
econv_filts: int = 5,
atype: str = "GDCA",
adim: int = 512,
aconv_chans: int = 32,
aconv_filts: int = 15,
cumulate_att_w: bool = True,
dlayers: int = 2,
dunits: int = 1024,
prenet_layers: int = 2,
prenet_units: int = 256,
postnet_layers: int = 5,
postnet_chans: int = 512,
postnet_filts: int = 5,
output_activation: str = None,
use_batch_norm: bool = True,
use_concate: bool = True,
use_residual: bool = False,
reduction_factor: int = 1,
# extra embedding related
spks: Optional[int] = None,
langs: Optional[int] = None,
spk_embed_dim: Optional[int] = None,
spk_embed_integration_type: str = "concat",
use_gst: bool = False,
gst_tokens: int = 10,
gst_heads: int = 4,
gst_conv_layers: int = 6,
gst_conv_chans_list: Sequence[int] = (32, 32, 64, 64, 128, 128),
gst_conv_kernel_size: int = 3,
gst_conv_stride: int = 2,
gst_gru_layers: int = 1,
gst_gru_units: int = 128,
# training related
dropout_rate: float = 0.5,
zoneout_rate: float = 0.1,
use_masking: bool = True,
use_weighted_masking: bool = False,
bce_pos_weight: float = 5.0,
loss_type: str = "L1",
use_guided_attn_loss: bool = True,
guided_attn_loss_sigma: float = 0.4,
guided_attn_loss_lambda: float = 1.0,
):
"""Initialize Singing Tacotron module.
Args:
idim (int): Dimension of the inputs.
odim: (int) Dimension of the outputs.
embed_dim (int): Dimension of the token embedding.
elayers (int): Number of encoder blstm layers.
eunits (int): Number of encoder blstm units.
econv_layers (int): Number of encoder conv layers.
econv_filts (int): Number of encoder conv filter size.
econv_chans (int): Number of encoder conv filter channels.
dlayers (int): Number of decoder lstm layers.
dunits (int): Number of decoder lstm units.
prenet_layers (int): Number of prenet layers.
prenet_units (int): Number of prenet units.
postnet_layers (int): Number of postnet layers.
postnet_filts (int): Number of postnet filter size.
postnet_chans (int): Number of postnet filter channels.
output_activation (str): Name of activation function for outputs.
adim (int): Number of dimension of mlp in attention.
aconv_chans (int): Number of attention conv filter channels.
aconv_filts (int): Number of attention conv filter size.
cumulate_att_w (bool): Whether to cumulate previous attention weight.
use_batch_norm (bool): Whether to use batch normalization.
use_concate (bool): Whether to concat enc outputs w/ dec lstm outputs.
reduction_factor (int): Reduction factor.
spks (Optional[int]): Number of speakers. If set to > 1, assume that the
sids will be provided as the input and use sid embedding layer.
langs (Optional[int]): Number of languages. If set to > 1, assume that the
lids will be provided as the input and use sid embedding layer.
spk_embed_dim (Optional[int]): Speaker embedding dimension. If set to > 0,
assume that spembs will be provided as the input.
spk_embed_integration_type (str): How to integrate speaker embedding.
use_gst (str): Whether to use global style token.
gst_tokens (int): Number of GST embeddings.
gst_heads (int): Number of heads in GST multihead attention.
gst_conv_layers (int): Number of conv layers in GST.
gst_conv_chans_list: (Sequence[int]): List of the number of channels of conv
layers in GST.
gst_conv_kernel_size (int): Kernel size of conv layers in GST.
gst_conv_stride (int): Stride size of conv layers in GST.
gst_gru_layers (int): Number of GRU layers in GST.
gst_gru_units (int): Number of GRU units in GST.
dropout_rate (float): Dropout rate.
zoneout_rate (float): Zoneout rate.
use_masking (bool): Whether to mask padded part in loss calculation.
use_weighted_masking (bool): Whether to apply weighted masking in
loss calculation.
bce_pos_weight (float): Weight of positive sample of stop token
(only for use_masking=True).
loss_type (str): Loss function type ("L1", "L2", or "L1+L2").
use_guided_attn_loss (bool): Whether to use guided attention loss.
guided_attn_loss_sigma (float): Sigma in guided attention loss.
guided_attn_loss_lambda (float): Lambda in guided attention loss.
"""
assert check_argument_types()
super().__init__()
# store hyperparameters
self.idim = idim
self.odim = odim
self.eos = idim - 1
self.midi_eos = midi_dim - 1
self.duration_eos = 0
self.cumulate_att_w = cumulate_att_w
self.reduction_factor = reduction_factor
self.use_gst = use_gst
self.use_guided_attn_loss = use_guided_attn_loss
self.loss_type = loss_type
self.atype = atype
# define activation function for the final output
if output_activation is None:
self.output_activation_fn = None
elif hasattr(F, output_activation):
self.output_activation_fn = getattr(F, output_activation)
else:
raise ValueError(
f"there is no such an activation function. " f"({output_activation})"
)
# set padding idx
padding_idx = 0
self.padding_idx = padding_idx
# define encoder
self.phone_encode_layer = torch.nn.Embedding(
num_embeddings=idim, embedding_dim=embed_dim, padding_idx=self.padding_idx
)
self.midi_encode_layer = torch.nn.Embedding(
num_embeddings=midi_dim,
embedding_dim=embed_dim,
padding_idx=self.padding_idx,
)
self.duration_encode_layer = torch.nn.Embedding(
num_embeddings=duration_dim,
embedding_dim=embed_dim,
padding_idx=self.padding_idx,
)
# define network modules
self.enc = Encoder(
idim=embed_dim,
embed_dim=embed_dim,
elayers=elayers,
eunits=eunits,
econv_layers=econv_layers,
econv_chans=econv_chans,
econv_filts=econv_filts,
use_batch_norm=use_batch_norm,
use_residual=use_residual,
dropout_rate=dropout_rate,
padding_idx=padding_idx,
)
# duration encoder for LA, FA
self.dur_enc = Encoder(
idim=embed_dim,
embed_dim=embed_dim,
elayers=elayers,
eunits=eunits,
econv_layers=econv_layers,
econv_chans=econv_chans,
econv_filts=econv_filts,
use_batch_norm=use_batch_norm,
use_residual=use_residual,
dropout_rate=dropout_rate,
padding_idx=padding_idx,
)
# duration encoder for GDCA
self.enc_duration = Duration_Encoder(
idim=embed_dim,
embed_dim=embed_dim,
dropout_rate=dropout_rate,
padding_idx=self.padding_idx,
)
if self.use_gst:
self.gst = StyleEncoder(
idim=odim, # the input is mel-spectrogram
gst_tokens=gst_tokens,
gst_token_dim=eunits,
gst_heads=gst_heads,
conv_layers=gst_conv_layers,
conv_chans_list=gst_conv_chans_list,
conv_kernel_size=gst_conv_kernel_size,
conv_stride=gst_conv_stride,
gru_layers=gst_gru_layers,
gru_units=gst_gru_units,
)
self.spks = None
if spks is not None and spks > 1:
self.spks = spks
self.sid_emb = torch.nn.Embedding(spks, eunits)
self.langs = None
if langs is not None and langs > 1:
self.langs = langs
self.lid_emb = torch.nn.Embedding(langs, eunits)
self.spk_embed_dim = None
if spk_embed_dim is not None and spk_embed_dim > 0:
self.spk_embed_dim = spk_embed_dim
self.spk_embed_integration_type = spk_embed_integration_type
if self.spk_embed_dim is None:
dec_idim = eunits
elif self.spk_embed_integration_type == "concat":
dec_idim = eunits + spk_embed_dim
elif self.spk_embed_integration_type == "add":
dec_idim = eunits
self.projection = torch.nn.Linear(self.spk_embed_dim, eunits)
else:
raise ValueError(f"{spk_embed_integration_type} is not supported.")
if self.atype == "location":
att = AttLoc(dec_idim, dunits, adim, aconv_chans, aconv_filts)
elif self.atype == "forward":
att = AttForward(dec_idim, dunits, adim, aconv_chans, aconv_filts)
if self.cumulate_att_w:
logging.warning(
"cumulation of attention weights is disabled "
"in forward attention."
)
self.cumulate_att_w = False
elif self.atype == "forward_ta":
att = AttForwardTA(dec_idim, dunits, adim, aconv_chans, aconv_filts, odim)
if self.cumulate_att_w:
logging.warning(
"cumulation of attention weights is disabled "
"in forward attention."
)
self.cumulate_att_w = False
elif self.atype == "GDCA":
att = GDCAttLoc(dec_idim, dunits, adim, aconv_chans, aconv_filts)
else:
raise NotImplementedError(
"Support only location, forward, forward_ta or GDCA"
)
self.dec = Decoder(
idim=dec_idim,
odim=odim,
att=att,
dlayers=dlayers,
dunits=dunits,
prenet_layers=prenet_layers,
prenet_units=prenet_units,
postnet_layers=postnet_layers,
postnet_chans=postnet_chans,
postnet_filts=postnet_filts,
output_activation_fn=self.output_activation_fn,
cumulate_att_w=self.cumulate_att_w,
use_batch_norm=use_batch_norm,
use_concate=use_concate,
dropout_rate=dropout_rate,
zoneout_rate=zoneout_rate,
reduction_factor=reduction_factor,
)
self.taco2_loss = Tacotron2Loss(
use_masking=use_masking,
use_weighted_masking=use_weighted_masking,
bce_pos_weight=bce_pos_weight,
)
if self.use_guided_attn_loss:
self.attn_loss = GuidedAttentionLoss(
sigma=guided_attn_loss_sigma,
alpha=guided_attn_loss_lambda,
)
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: torch.Tensor,
feats_lengths: torch.Tensor,
label: Optional[Dict[str, torch.Tensor]] = None,
label_lengths: Optional[Dict[str, torch.Tensor]] = None,
melody: Optional[Dict[str, torch.Tensor]] = None,
melody_lengths: Optional[Dict[str, torch.Tensor]] = None,
duration: Optional[Dict[str, torch.Tensor]] = None,
duration_lengths: Optional[Dict[str, torch.Tensor]] = None,
pitch: Optional[torch.Tensor] = None,
pitch_lengths: Optional[torch.Tensor] = None,
slur: torch.LongTensor = None,
slur_lengths: torch.Tensor = None,
ying: torch.Tensor = None,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
joint_training: bool = False,
flag_IsValid=False,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Calculate forward propagation.
Args:
text (LongTensor): Batch of padded character ids (B, T_text).
text_lengths (LongTensor): Batch of lengths of each input batch (B,).
feats (Tensor): Batch of padded target features (B, T_feats, odim).
feats_lengths (LongTensor): Batch of the lengths of each target (B,).
label (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of padded label ids (B, Tmax).
label_lengths (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of the lengths of padded label ids (B, ).
melody (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of padded melody (B, Tmax).
melody_lengths (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of the lengths of padded melody (B, ).
pitch (FloatTensor): Batch of padded f0 (B, Tmax).
pitch_lengths (LongTensor): Batch of the lengths of padded f0 (B, ).
duration (Optional[Dict]): key is "lab", "score_phn" or "score_syb";
value (LongTensor): Batch of padded duration (B, Tmax).
duration_length (Optional[Dict]): key is "lab", "score_phn" or "score_syb";
value (LongTensor): Batch of the lengths of padded duration (B, ).
slur (LongTensor): Batch of padded slur (B, Tmax).
slur_lengths (LongTensor): Batch of the lengths of padded slur (B, ).
spembs (Optional[Tensor]): Batch of speaker embeddings (B, spk_embed_dim).
sids (Optional[Tensor]): Batch of speaker IDs (B, 1).
lids (Optional[Tensor]): Batch of language IDs (B, 1).
joint_training (bool): Whether to perform joint training with vocoder.
Returns:
Tensor: Loss scalar value.
Dict: Statistics to be monitored.
Tensor: Weight value if not joint training else model outputs.
"""
label = label["score"]
midi = melody["score"]
duration = duration["score_phn"]
label_lengths = label_lengths["score"]
midi_lengths = melody_lengths["score"]
duration_lengths = duration_lengths["score_phn"]
feats = feats[:, : feats_lengths.max()] # for data-parallel
midi = midi[:, : midi_lengths.max()] # for data-parallel
label = label[:, : label_lengths.max()] # for data-parallel
duration = duration[:, : duration_lengths.max()] # for data-parallel
batch_size = text.size(0)
# Add eos at the last of sequence
label = F.pad(label, [0, 1], "constant", self.padding_idx)
midi = F.pad(midi, [0, 1], "constant", self.padding_idx)
duration = F.pad(duration, [0, 1], "constant", self.padding_idx)
for i, l in enumerate(label_lengths):
label[i, l] = self.eos
midi[i, l] = self.midi_eos
duration[i, l] = self.duration_eos
# Add sos at the last of sequence
label = F.pad(label, [1, 0], "constant", self.eos)
midi = F.pad(midi, [1, 0], "constant", self.midi_eos)
duration = F.pad(duration, [1, 0], "constant", self.duration_eos)
ilens = label_lengths + 2
label_emb = self.phone_encode_layer(label)
midi_emb = self.midi_encode_layer(midi)
duration_emb = self.duration_encode_layer(duration)
input_emb = label_emb + midi_emb + duration_emb
con = label_emb + midi_emb
dur = duration_emb
ys = feats
olens = feats_lengths
# make labels for stop prediction
labels = make_pad_mask(olens - 1).to(ys.device, ys.dtype)
labels = F.pad(labels, [0, 1], "constant", 1.0)
# calculate tacotron2 outputs
after_outs, before_outs, logits, att_ws = self._forward(
xs=input_emb,
con=con,
dur=dur,
ilens=ilens,
ys=ys,
olens=olens,
spembs=spembs,
sids=sids,
lids=lids,
)
# modify mod part of groundtruth
if self.reduction_factor > 1:
assert olens.ge(
self.reduction_factor
).all(), "Output length must be greater than or equal to reduction factor."
olens = olens.new([olen - olen % self.reduction_factor for olen in olens])
max_out = max(olens)
ys = ys[:, :max_out]
labels = labels[:, :max_out]
labels = torch.scatter(
labels, 1, (olens - 1).unsqueeze(1), 1.0
) # see #3388
# calculate taco2 loss
l1_loss, mse_loss, bce_loss = self.taco2_loss(
after_outs, before_outs, logits, ys, labels, olens
)
if self.loss_type == "L1+L2":
loss = l1_loss + mse_loss + bce_loss
elif self.loss_type == "L1":
loss = l1_loss + bce_loss
elif self.loss_type == "L2":
loss = mse_loss + bce_loss
else:
raise ValueError(f"unknown --loss-type {self.loss_type}")
stats = dict(
l1_loss=l1_loss.item(),
mse_loss=mse_loss.item(),
bce_loss=bce_loss.item(),
)
# calculate attention loss
if self.use_guided_attn_loss:
# NOTE(kan-bayashi): length of output for auto-regressive
# input will be changed when r > 1
if self.reduction_factor > 1:
olens_in = olens.new([olen // self.reduction_factor for olen in olens])
else:
olens_in = olens
attn_loss = self.attn_loss(att_ws, ilens, olens_in)
loss = loss + attn_loss
stats.update(attn_loss=attn_loss.item())
if not joint_training:
stats.update(loss=loss.item())
loss, stats, weight = force_gatherable(
(loss, stats, batch_size), loss.device
)
return loss, stats, weight
else:
return loss, stats, after_outs
def _forward(
self,
xs: torch.Tensor,
con: torch.Tensor,
dur: torch.Tensor,
ilens: torch.Tensor,
ys: torch.Tensor,
olens: torch.Tensor,
spembs: torch.Tensor,
sids: torch.Tensor,
lids: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
if self.atype == "GDCA":
hs, hlens = self.enc(con, ilens) # hs: (B, seq_len, emb_dim)
trans_token = self.enc_duration(dur) # (B, seq_len, 1)
else:
hs, hlens = self.enc(con, ilens) # hs: (B, seq_len, emb_dim)
hs_dur, hlens_dur = self.dur_enc(dur, ilens)
# dense_dur = self.dur_dense(dur)
hs += hs_dur
trans_token = None
if self.use_gst:
style_embs = self.gst(ys)
hs = hs + style_embs.unsqueeze(1)
if self.spks is not None:
sid_embs = self.sid_emb(sids.view(-1))
hs = hs + sid_embs.unsqueeze(1)
if self.langs is not None:
lid_embs = self.lid_emb(lids.view(-1))
hs = hs + lid_embs.unsqueeze(1)
if self.spk_embed_dim is not None:
hs = self._integrate_with_spk_embed(hs, spembs)
return self.dec(hs, hlens, trans_token, ys)
def inference(
self,
text: torch.Tensor,
feats: Optional[torch.Tensor] = None,
label: Optional[Dict[str, torch.Tensor]] = None,
melody: Optional[Dict[str, torch.Tensor]] = None,
duration: Optional[Dict[str, torch.Tensor]] = None,
slur: Optional[Dict[str, torch.Tensor]] = None,
pitch: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
threshold: float = 0.5,
minlenratio: float = 0.0,
maxlenratio: float = 30.0,
use_att_constraint: bool = False,
use_dynamic_filter: bool = False,
backward_window: int = 1,
forward_window: int = 3,
use_teacher_forcing: bool = False,
) -> Dict[str, torch.Tensor]:
"""Generate the sequence of features given the sequences of characters.
Args:
text (LongTensor): Input sequence of characters (T_text,).
feats (Optional[Tensor]): Feature sequence to extract style (N, idim).
label (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of padded label ids (Tmax).
melody (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of padded melody (Tmax).
pitch (FloatTensor): Batch of padded f0 (Tmax).
duration (Optional[Dict]): key is "lab", "score_phn" or "score_syb";
value (LongTensor): Batch of padded duration (Tmax).
slur (LongTensor): Batch of padded slur (B, Tmax).
spembs (Optional[Tensor]): Speaker embedding (spk_embed_dim,).
sids (Optional[Tensor]): Speaker ID (1,).
lids (Optional[Tensor]): Language ID (1,).
threshold (float): Threshold in inference.
minlenratio (float): Minimum length ratio in inference.
maxlenratio (float): Maximum length ratio in inference.
use_att_constraint (bool): Whether to apply attention constraint.
use_dynamic_filter (bool): Whether to apply dynamic filter.
backward_window (int): Backward window in attention constraint
or dynamic filter.
forward_window (int): Forward window in attention constraint
or dynamic filter.
use_teacher_forcing (bool): Whether to use teacher forcing.
Returns:
Dict[str, Tensor]: Output dict including the following items:
* feat_gen (Tensor): Output sequence of features (T_feats, odim).
* prob (Tensor): Output sequence of stop probabilities (T_feats,).
* att_w (Tensor): Attention weights (T_feats, T).
"""
label = label["score"]
midi = melody["score"]
duration = duration["lab"]
y = feats
spemb = spembs
# add eos at the last of sequence
label = F.pad(label, [0, 1], "constant", self.eos)
midi = F.pad(midi, [0, 1], "constant", self.midi_eos)
duration = F.pad(duration, [0, 1], "constant", self.duration_eos)
# add sos at the last of sequence
label = F.pad(label, [1, 0], "constant", self.eos)
midi = F.pad(midi, [1, 0], "constant", self.midi_eos)
duration = F.pad(duration, [1, 0], "constant", self.duration_eos)
ilens = torch.tensor([label.size(1)])
label_emb = self.phone_encode_layer(label)
midi_emb = self.midi_encode_layer(midi)
duration_emb = self.duration_encode_layer(duration)
input_emb = label_emb + midi_emb + duration_emb
con = label_emb + midi_emb
dur = duration_emb
# inference with teacher forcing
if use_teacher_forcing:
assert feats is not None, "feats must be provided with teacher forcing."
spembs = None if spemb is None else spemb.unsqueeze(0)
ys = y.unsqueeze(0)
olens = torch.tensor([ys.size(1)])
outs, _, _, att_ws = self._forward(
xs=input_emb,
con=con,
dur=dur,
ilens=ilens,
ys=ys,
olens=olens,
spembs=spembs,
sids=sids,
lids=lids,
)
return dict(feat_gen=outs[0], att_w=att_ws[0])
# inference
if self.atype == "GDCA":
h = self.enc.inference(con, ilens) # h: (B, seq_len, emb_dim)
trans_token = self.enc_duration.inference(dur) # (B, seq_len, 1)
else:
h = self.enc.inference(con, ilens) # hs: (B, seq_len, emb_dim)
h_dur = self.dur_enc.inference(dur, ilens)
h += h_dur
trans_token = None
if self.use_gst:
style_emb = self.gst(y.unsqueeze(0))
h = h + style_emb
if self.spks is not None:
sid_emb = self.sid_emb(sids.view(-1))
h = h + sid_emb
if self.langs is not None:
lid_emb = self.lid_emb(lids.view(-1))
h = h + lid_emb
if self.spk_embed_dim is not None:
hs, spembs = h.unsqueeze(0), spemb.unsqueeze(0)
h = self._integrate_with_spk_embed(hs, spembs)[0]
out, prob, att_w = self.dec.inference(
h,
trans_token,
threshold=threshold,
minlenratio=minlenratio,
maxlenratio=maxlenratio,
use_att_constraint=use_att_constraint,
use_dynamic_filter=use_dynamic_filter,
backward_window=backward_window,
forward_window=forward_window,
)
return dict(feat_gen=out, prob=prob, att_w=att_w)
def _integrate_with_spk_embed(
self, hs: torch.Tensor, spembs: torch.Tensor
) -> torch.Tensor:
"""Integrate speaker embedding with hidden states.
Args:
hs (Tensor): Batch of hidden state sequences (B, Tmax, eunits).
spembs (Tensor): Batch of speaker embeddings (B, spk_embed_dim).
Returns:
Tensor: Batch of integrated hidden state sequences (B, Tmax, eunits) if
integration_type is "add" else (B, Tmax, eunits + spk_embed_dim).
"""
if self.spk_embed_integration_type == "add":
# apply projection and then add to hidden states
spembs = self.projection(F.normalize(spembs))
hs = hs + spembs.unsqueeze(1)
elif self.spk_embed_integration_type == "concat":
# concat hidden states with spk embeds
spembs = F.normalize(spembs).unsqueeze(1).expand(-1, hs.size(1), -1)
hs = torch.cat([hs, spembs], dim=-1)
else:
raise NotImplementedError("support only add or concat.")
return hs
| 28,517 | 39.915352 | 88 | py |
espnet | espnet-master/espnet2/svs/singing_tacotron/encoder.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Nagoya University (Tomoki Hayashi)
# Copyright 2023 Renmin University of China (Yuning Wu)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Singing Tacotron encoder related modules."""
import torch
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
def encoder_init(m):
"""Initialize encoder parameters."""
if isinstance(m, torch.nn.Conv1d):
torch.nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("relu"))
class Encoder(torch.nn.Module):
"""Encoder module of Spectrogram prediction network.
This is a module of encoder of Spectrogram prediction network in Singing Tacotron,
which described in `Singing-Tacotron: Global Duration Control Attention and Dynamic
Filter for End-to-end Singing Voice Synthesis`_. This is the encoder which converts
either a sequence of characters or acoustic features into the sequence of
hidden states.
.. _`Singing-Tacotron: Global Duration Control Attention and Dynamic
Filter for End-to-end Singing Voice Synthesis`:
https://arxiv.org/abs/2202.07907
"""
def __init__(
self,
idim,
input_layer="embed",
embed_dim=512,
elayers=1,
eunits=512,
econv_layers=3,
econv_chans=512,
econv_filts=5,
use_batch_norm=True,
use_residual=False,
dropout_rate=0.5,
padding_idx=0,
):
"""Initialize Singing Tacotron encoder module.
Args:
idim (int) Dimension of the inputs.
input_layer (str): Input layer type.
embed_dim (int, optional) Dimension of character embedding.
elayers (int, optional) The number of encoder blstm layers.
eunits (int, optional) The number of encoder blstm units.
econv_layers (int, optional) The number of encoder conv layers.
econv_filts (int, optional) The number of encoder conv filter size.
econv_chans (int, optional) The number of encoder conv filter channels.
use_batch_norm (bool, optional) Whether to use batch normalization.
use_residual (bool, optional) Whether to use residual connection.
dropout_rate (float, optional) Dropout rate.
"""
super(Encoder, self).__init__()
# store the hyperparameters
self.idim = idim
self.use_residual = use_residual
# define network layer modules
if input_layer == "linear":
self.embed = torch.nn.Linear(idim, econv_chans)
elif input_layer == "embed":
self.embed = torch.nn.Embedding(idim, embed_dim, padding_idx=padding_idx)
else:
raise ValueError("unknown input_layer: " + input_layer)
if econv_layers > 0:
self.convs = torch.nn.ModuleList()
for layer in range(econv_layers):
ichans = (
embed_dim if layer == 0 and input_layer == "embed" else econv_chans
)
if use_batch_norm:
self.convs += [
torch.nn.Sequential(
torch.nn.Conv1d(
ichans,
econv_chans,
econv_filts,
stride=1,
padding=(econv_filts - 1) // 2,
bias=False,
),
torch.nn.BatchNorm1d(econv_chans),
torch.nn.ReLU(),
torch.nn.Dropout(dropout_rate),
)
]
else:
self.convs += [
torch.nn.Sequential(
torch.nn.Conv1d(
ichans,
econv_chans,
econv_filts,
stride=1,
padding=(econv_filts - 1) // 2,
bias=False,
),
torch.nn.ReLU(),
torch.nn.Dropout(dropout_rate),
)
]
else:
self.convs = None
if elayers > 0:
iunits = econv_chans if econv_layers != 0 else embed_dim
self.blstm = torch.nn.LSTM(
iunits, eunits // 2, elayers, batch_first=True, bidirectional=True
)
else:
self.blstm = None
# initialize
self.apply(encoder_init)
def forward(self, xs, ilens=None):
"""Calculate forward propagation.
Args:
xs (Tensor): Batch of the padded sequence. Either character ids (B, Tmax)
or acoustic feature (B, Tmax, idim * encoder_reduction_factor). Padded
value should be 0.
ilens (LongTensor): Batch of lengths of each input batch (B,).
Returns:
Tensor: Batch of the sequences of encoder states(B, Tmax, eunits).
LongTensor: Batch of lengths of each sequence (B,)
"""
xs = xs.transpose(1, 2)
if self.convs is not None:
for i in range(len(self.convs)):
if self.use_residual:
xs = xs + self.convs[i](xs)
else:
xs = self.convs[i](xs)
if self.blstm is None:
return xs.transpose(1, 2)
if not isinstance(ilens, torch.Tensor):
ilens = torch.tensor(ilens)
xs = pack_padded_sequence(
xs.transpose(1, 2), ilens.cpu(), batch_first=True, enforce_sorted=False
)
self.blstm.flatten_parameters()
xs, _ = self.blstm(xs) # (B, Tmax, C)
xs, hlens = pad_packed_sequence(xs, batch_first=True)
return xs, hlens
def inference(self, x, ilens):
"""Inference.
Args:
x (Tensor): The sequeunce of character ids (T,)
or acoustic feature (T, idim * encoder_reduction_factor).
Returns:
Tensor: The sequences of encoder states(T, eunits).
"""
xs = x
return self.forward(xs, ilens)[0][0]
class Duration_Encoder(torch.nn.Module):
"""Duration_Encoder module of Spectrogram prediction network.
This is a module of encoder of Spectrogram prediction network in Singing-Tacotron,
This is the encoder which converts the sequence
of durations and tempo features into a transition token.
.. _`SINGING-TACOTRON: GLOBAL DURATION CONTROL ATTENTION AND DYNAMIC FILTER FOR
END-TO-END SINGING VOICE SYNTHESIS`:
https://arxiv.org/abs/2202.07907
"""
def __init__(
self,
idim,
embed_dim=512,
dropout_rate=0.5,
padding_idx=0,
):
"""Initialize Singing-Tacotron encoder module.
Args:
idim (int) Dimension of the inputs.
embed_dim (int, optional) Dimension of character embedding.
dropout_rate (float, optional) Dropout rate.
"""
super(Duration_Encoder, self).__init__()
# store the hyperparameters
self.idim = idim
# define network layer modules
self.dense24 = torch.nn.Linear(idim, 24)
self.convs = torch.nn.Sequential(
torch.nn.ReLU(),
torch.nn.Conv1d(
24,
32,
3,
stride=1,
bias=False,
padding=2 // 2,
),
torch.nn.ReLU(),
torch.nn.Conv1d(
32,
32,
3,
stride=1,
bias=False,
padding=2 // 2,
),
torch.nn.ReLU(),
)
self.dense1 = torch.nn.Linear(32, 1)
self.nntanh = torch.nn.Tanh()
# initialize
self.apply(encoder_init)
def forward(self, xs):
"""Calculate forward propagation.
Args:
xs (Tensor): Batch of the duration sequence.(B, Tmax, feature_len)
Returns:
Tensor: Batch of the sequences of transition token (B, Tmax, 1).
LongTensor: Batch of lengths of each sequence (B,)
"""
xs = self.dense24(xs).transpose(1, 2)
xs = self.convs(xs).transpose(1, 2)
xs = self.dense1(xs)
xs = self.nntanh(xs)
xs = (xs + 1) / 2
return xs
def inference(self, x):
"""Inference."""
xs = x
return self.forward(xs)
| 8,752 | 32.030189 | 87 | py |
espnet | espnet-master/espnet2/svs/singing_tacotron/decoder.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Nagoya University (Tomoki Hayashi)
# Copyright 2023 Renmin University of China (Yuning Wu)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Singing Tacotron decoder related modules."""
import six
import torch
import torch.nn.functional as F
from espnet.nets.pytorch_backend.rnn.attentions import AttForwardTA
from espnet.nets.pytorch_backend.tacotron2.decoder import Postnet, Prenet, ZoneOutCell
def decoder_init(m):
"""Initialize decoder parameters."""
if isinstance(m, torch.nn.Conv1d):
torch.nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("tanh"))
class Decoder(torch.nn.Module):
"""Decoder module of Spectrogram prediction network.
This is a module of decoder of Spectrogram prediction network in Singing Tacotron,
which described in `https://arxiv.org/pdf/2202.07907v1.pdf`_.
The decoder generates the sequence of
features from the sequence of the hidden states.
.. _`Singing-Tacotron: Global Duration Control Attention and Dynamic
Filter for End-to-end Singing Voice Synthesis`:
https://arxiv.org/pdf/2202.07907v1.pdf
"""
def __init__(
self,
idim,
odim,
att,
dlayers=2,
dunits=1024,
prenet_layers=2,
prenet_units=256,
postnet_layers=5,
postnet_chans=512,
postnet_filts=5,
output_activation_fn=None,
cumulate_att_w=True,
use_batch_norm=True,
use_concate=True,
dropout_rate=0.5,
zoneout_rate=0.1,
reduction_factor=1,
):
"""Initialize Singing Tacotron decoder module.
Args:
idim (int): Dimension of the inputs.
odim (int): Dimension of the outputs.
att (torch.nn.Module): Instance of attention class.
dlayers (int, optional): The number of decoder lstm layers.
dunits (int, optional): The number of decoder lstm units.
prenet_layers (int, optional): The number of prenet layers.
prenet_units (int, optional): The number of prenet units.
postnet_layers (int, optional): The number of postnet layers.
postnet_filts (int, optional): The number of postnet filter size.
postnet_chans (int, optional): The number of postnet filter channels.
output_activation_fn (torch.nn.Module, optional):
Activation function for outputs.
cumulate_att_w (bool, optional):
Whether to cumulate previous attention weight.
use_batch_norm (bool, optional): Whether to use batch normalization.
use_concate (bool, optional): Whether to concatenate encoder embedding
with decoder lstm outputs.
dropout_rate (float, optional): Dropout rate.
zoneout_rate (float, optional): Zoneout rate.
reduction_factor (int, optional): Reduction factor.
"""
super(Decoder, self).__init__()
# store the hyperparameters
self.idim = idim
self.odim = odim
self.att = att
self.output_activation_fn = output_activation_fn
self.cumulate_att_w = cumulate_att_w
self.use_concate = use_concate
self.reduction_factor = reduction_factor
# check attention type
if isinstance(self.att, AttForwardTA):
self.use_att_extra_inputs = True
else:
self.use_att_extra_inputs = False
# define lstm network
prenet_units = prenet_units if prenet_layers != 0 else odim
self.lstm = torch.nn.ModuleList()
for layer in six.moves.range(dlayers):
iunits = idim + prenet_units if layer == 0 else dunits
lstm = torch.nn.LSTMCell(iunits, dunits)
if zoneout_rate > 0.0:
lstm = ZoneOutCell(lstm, zoneout_rate)
self.lstm += [lstm]
# define prenet
if prenet_layers > 0:
self.prenet = Prenet(
idim=odim,
n_layers=prenet_layers,
n_units=prenet_units,
dropout_rate=dropout_rate,
)
else:
self.prenet = None
# define postnet
if postnet_layers > 0:
self.postnet = Postnet(
idim=idim,
odim=odim,
n_layers=postnet_layers,
n_chans=postnet_chans,
n_filts=postnet_filts,
use_batch_norm=use_batch_norm,
dropout_rate=dropout_rate,
)
else:
self.postnet = None
# define projection layers
iunits = idim + dunits if use_concate else dunits
self.feat_out = torch.nn.Linear(iunits, odim * reduction_factor, bias=False)
self.prob_out = torch.nn.Linear(iunits, reduction_factor)
# initialize
self.apply(decoder_init)
def _zero_state(self, hs):
init_hs = hs.new_zeros(hs.size(0), self.lstm[0].hidden_size)
return init_hs
def forward(self, hs, hlens, trans_token, ys):
"""Calculate forward propagation.
Args:
hs (Tensor): Batch of the sequences of padded hidden states (B, Tmax, idim).
hlens (LongTensor): Batch of lengths of each input batch (B,).
trans_token (Tensor): Global transition token for duration (B x Tmax x 1)
ys (Tensor):
Batch of the sequences of padded target features (B, Lmax, odim).
Returns:
Tensor: Batch of output tensors after postnet (B, Lmax, odim).
Tensor: Batch of output tensors before postnet (B, Lmax, odim).
Tensor: Batch of logits of stop prediction (B, Lmax).
Tensor: Batch of attention weights (B, Lmax, Tmax).
Note:
This computation is performed in teacher-forcing manner.
"""
# thin out frames (B, Lmax, odim) -> (B, Lmax/r, odim)
if self.reduction_factor > 1:
ys = ys[:, self.reduction_factor - 1 :: self.reduction_factor]
# length list should be list of int
hlens = list(map(int, hlens))
# initialize hidden states of decoder
c_list = [self._zero_state(hs)]
z_list = [self._zero_state(hs)]
for _ in range(1, len(self.lstm)):
c_list += [self._zero_state(hs)]
z_list += [self._zero_state(hs)]
prev_out = hs.new_zeros(hs.size(0), self.odim)
# initialize attention
prev_att_w = None
self.att.reset()
# loop for an output sequence
outs, logits, att_ws = [], [], []
for y in ys.transpose(0, 1):
if trans_token is None:
if self.use_att_extra_inputs:
att_c, att_w = self.att(hs, hlens, z_list[0], prev_att_w, prev_out)
else:
att_c, att_w = self.att(hs, hlens, z_list[0], prev_att_w)
else: # GDCA
att_c, att_w = self.att(hs, hlens, trans_token, z_list[0], prev_att_w)
prenet_out = self.prenet(prev_out) if self.prenet is not None else prev_out
xs = torch.cat([att_c, prenet_out], dim=1)
z_list[0], c_list[0] = self.lstm[0](xs, (z_list[0], c_list[0]))
for i in range(1, len(self.lstm)):
z_list[i], c_list[i] = self.lstm[i](
z_list[i - 1], (z_list[i], c_list[i])
)
zcs = (
torch.cat([z_list[-1], att_c], dim=1)
if self.use_concate
else z_list[-1]
)
outs += [self.feat_out(zcs).view(hs.size(0), self.odim, -1)]
logits += [self.prob_out(zcs)]
att_ws += [att_w]
prev_out = y # teacher forcing
if self.cumulate_att_w and prev_att_w is not None:
prev_att_w = prev_att_w + att_w # Note: error when use +=
else:
prev_att_w = att_w
logits = torch.cat(logits, dim=1) # (B, Lmax)
before_outs = torch.cat(outs, dim=2) # (B, odim, Lmax)
att_ws = torch.stack(att_ws, dim=1) # (B, Lmax, Tmax)
if self.reduction_factor > 1:
before_outs = before_outs.view(
before_outs.size(0), self.odim, -1
) # (B, odim, Lmax)
if self.postnet is not None:
after_outs = before_outs + self.postnet(before_outs) # (B, odim, Lmax)
else:
after_outs = before_outs
before_outs = before_outs.transpose(2, 1) # (B, Lmax, odim)
after_outs = after_outs.transpose(2, 1) # (B, Lmax, odim)
logits = logits
# apply activation function for scaling
if self.output_activation_fn is not None:
before_outs = self.output_activation_fn(before_outs)
after_outs = self.output_activation_fn(after_outs)
return after_outs, before_outs, logits, att_ws
def inference(
self,
h,
trans_token,
threshold=0.5,
minlenratio=0.0,
maxlenratio=30.0,
use_att_constraint=False,
use_dynamic_filter=True,
backward_window=1,
forward_window=3,
):
"""Generate the sequence of features given the sequences of characters.
Args:
h (Tensor): Input sequence of encoder hidden states (T, C).
trans_token (Tensor): Global transition token for duration.
threshold (float, optional): Threshold to stop generation.
minlenratio (float, optional): Minimum length ratio.
If set to 1.0 and the length of input is 10,
the minimum length of outputs will be 10 * 1 = 10.
minlenratio (float, optional): Minimum length ratio.
If set to 10 and the length of input is 10,
the maximum length of outputs will be 10 * 10 = 100.
use_att_constraint (bool):
Whether to apply attention constraint introduced in `Deep Voice 3`_.
use_dynamic_filter (bool):
Whether to apply dynamic filter introduced in `Singing Tacotron`_.
backward_window (int): Backward window size in attention constraint.
forward_window (int): Forward window size in attention constraint.
Returns:
Tensor: Output sequence of features (L, odim).
Tensor: Output sequence of stop probabilities (L,).
Tensor: Attention weights (L, T).
Note:
This computation is performed in auto-regressive manner.
.. _`Deep Voice 3`: https://arxiv.org/abs/1710.07654
"""
# setup
assert len(h.size()) == 2
hs = h.unsqueeze(0)
ilens = [h.size(0)]
maxlen = int(h.size(0) * maxlenratio)
minlen = int(h.size(0) * minlenratio)
# initialize hidden states of decoder
c_list = [self._zero_state(hs)]
z_list = [self._zero_state(hs)]
for _ in range(1, len(self.lstm)):
c_list += [self._zero_state(hs)]
z_list += [self._zero_state(hs)]
prev_out = hs.new_zeros(1, self.odim)
# initialize attention
prev_att_w = None
self.att.reset()
# setup for attention constraint
if use_att_constraint or use_dynamic_filter:
last_attended_idx = 0
else:
last_attended_idx = None
# loop for an output sequence
idx = 0
outs, att_ws, probs = [], [], []
while True:
# updated index
idx += self.reduction_factor
# decoder calculation
if self.use_att_extra_inputs:
att_c, att_w = self.att(
hs,
ilens,
z_list[0],
prev_att_w,
prev_out,
last_attended_idx=last_attended_idx,
backward_window=backward_window,
forward_window=forward_window,
)
else:
if trans_token is None:
att_c, att_w = self.att(
hs,
ilens,
z_list[0],
prev_att_w,
last_attended_idx=last_attended_idx,
backward_window=backward_window,
forward_window=forward_window,
)
else: # GDCA
att_c, att_w = self.att(
hs,
ilens,
trans_token,
z_list[0],
prev_att_w,
last_attended_idx=last_attended_idx,
backward_window=backward_window,
forward_window=forward_window,
)
att_ws += [att_w]
prenet_out = self.prenet(prev_out) if self.prenet is not None else prev_out
xs = torch.cat([att_c, prenet_out], dim=1)
z_list[0], c_list[0] = self.lstm[0](xs, (z_list[0], c_list[0]))
for i in range(1, len(self.lstm)):
z_list[i], c_list[i] = self.lstm[i](
z_list[i - 1], (z_list[i], c_list[i])
)
zcs = (
torch.cat([z_list[-1], att_c], dim=1)
if self.use_concate
else z_list[-1]
)
outs += [self.feat_out(zcs).view(1, self.odim, -1)] # [(1, odim, r), ...]
probs += [torch.sigmoid(self.prob_out(zcs))[0]] # [(r), ...]
if self.output_activation_fn is not None:
prev_out = self.output_activation_fn(outs[-1][:, :, -1]) # (1, odim)
else:
prev_out = outs[-1][:, :, -1] # (1, odim)
if self.cumulate_att_w and prev_att_w is not None:
prev_att_w = prev_att_w + att_w # Note: error when use +=
else:
prev_att_w = att_w
if use_att_constraint or use_dynamic_filter:
last_attended_idx = int(att_w.argmax())
# check whether to finish generation
if int(sum(probs[-1] >= threshold)) > 0 or idx >= maxlen:
# check mininum length
if idx < minlen:
continue
outs = torch.cat(outs, dim=2) # (1, odim, L)
if self.postnet is not None:
outs = outs + self.postnet(outs) # (1, odim, L)
outs = outs.transpose(2, 1).squeeze(0) # (L, odim)
probs = torch.cat(probs, dim=0)
att_ws = torch.cat(att_ws, dim=0)
break
if self.output_activation_fn is not None:
outs = self.output_activation_fn(outs)
return outs, probs, att_ws
def calculate_all_attentions(self, hs, hlens, ys):
"""Calculate all of the attention weights.
Args:
hs (Tensor): Batch of the sequences of padded hidden states (B, Tmax, idim).
hlens (LongTensor): Batch of lengths of each input batch (B,).
trans_token (Tensor): Global transition token for duration (B x Tmax x 1)
ys (Tensor):
Batch of the sequences of padded target features (B, Lmax, odim).
Returns:
numpy.ndarray: Batch of attention weights (B, Lmax, Tmax).
Note:
This computation is performed in teacher-forcing manner.
"""
# thin out frames (B, Lmax, odim) -> (B, Lmax/r, odim)
if self.reduction_factor > 1:
ys = ys[:, self.reduction_factor - 1 :: self.reduction_factor]
# length list should be list of int
hlens = list(map(int, hlens))
# initialize hidden states of decoder
c_list = [self._zero_state(hs)]
z_list = [self._zero_state(hs)]
for _ in six.moves.range(1, len(self.lstm)):
c_list += [self._zero_state(hs)]
z_list += [self._zero_state(hs)]
prev_out = hs.new_zeros(hs.size(0), self.odim)
# initialize attention
prev_att_w = None
self.att.reset()
# loop for an output sequence
att_ws = []
for y in ys.transpose(0, 1):
if self.use_att_extra_inputs:
att_c, att_w = self.att(hs, hlens, z_list[0], prev_att_w, prev_out)
else:
att_c, att_w = self.att(hs, hlens, z_list[0], prev_att_w)
att_ws += [att_w]
prenet_out = self.prenet(prev_out) if self.prenet is not None else prev_out
xs = torch.cat([att_c, prenet_out], dim=1)
z_list[0], c_list[0] = self.lstm[0](xs, (z_list[0], c_list[0]))
for i in range(1, len(self.lstm)):
z_list[i], c_list[i] = self.lstm[i](
z_list[i - 1], (z_list[i], c_list[i])
)
prev_out = y # teacher forcing
if self.cumulate_att_w and prev_att_w is not None:
prev_att_w = prev_att_w + att_w # Note: error when use +=
else:
prev_att_w = att_w
att_ws = torch.stack(att_ws, dim=1) # (B, Lmax, Tmax)
return att_ws
| 17,363 | 37.932735 | 88 | py |
espnet | espnet-master/espnet2/svs/xiaoice/loss.py | # Copyright 2020 Nagoya University (Tomoki Hayashi)
# Copyright 2023 Renmin University of China (Yuning Wu)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""XiaoiceSing2 related loss module for ESPnet2."""
from typing import Tuple
import torch
from typeguard import check_argument_types
from espnet.nets.pytorch_backend.fastspeech.duration_predictor import ( # noqa: H301
DurationPredictorLoss,
)
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask
class XiaoiceSing2Loss(torch.nn.Module):
"""Loss function module for FastSpeech2."""
def __init__(self, use_masking: bool = True, use_weighted_masking: bool = False):
"""Initialize feed-forward Transformer loss module.
Args:
use_masking (bool): Whether to apply masking for padded part in loss
calculation.
use_weighted_masking (bool): Whether to weighted masking in loss
calculation.
"""
assert check_argument_types()
super().__init__()
assert (use_masking != use_weighted_masking) or not use_masking
self.use_masking = use_masking
self.use_weighted_masking = use_weighted_masking
# define criterions
reduction = "none" if self.use_weighted_masking else "mean"
self.l1_criterion = torch.nn.L1Loss(reduction=reduction)
self.mse_criterion = torch.nn.MSELoss(reduction=reduction)
self.bce_criterion = torch.nn.BCEWithLogitsLoss(reduction=reduction)
self.duration_criterion = DurationPredictorLoss(reduction=reduction)
def forward(
self,
after_outs: torch.Tensor,
before_outs: torch.Tensor,
d_outs: torch.Tensor,
p_outs: torch.Tensor,
v_outs: torch.Tensor,
ys: torch.Tensor,
ds: torch.Tensor,
ps: torch.Tensor,
vs: torch.Tensor,
ilens: torch.Tensor,
olens: torch.Tensor,
loss_type: str = "L2",
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""Calculate forward propagation.
Args:
after_outs (Tensor): Batch of outputs after postnets (B, T_feats, odim).
before_outs (Tensor): Batch of outputs before postnets (B, T_feats, odim).
d_outs (LongTensor): Batch of outputs of duration predictor (B, T_text).
p_outs (Tensor): Batch of outputs of log_f0 (B, T_text, 1).
v_outs (Tensor): Batch of outputs of VUV (B, T_text, 1).
ys (Tensor): Batch of target features (B, T_feats, odim).
ds (LongTensor): Batch of durations (B, T_text).
ps (Tensor): Batch of target log_f0 (B, T_text, 1).
vs (Tensor): Batch of target VUV (B, T_text, 1).
ilens (LongTensor): Batch of the lengths of each input (B,).
olens (LongTensor): Batch of the lengths of each target (B,).
Returns:
Tensor: Mel loss value.
Tensor: Duration predictor loss value.
Tensor: Pitch predictor loss value.
Tensor: VUV predictor loss value.
"""
# apply mask to remove padded part
if self.use_masking:
out_masks = make_non_pad_mask(olens).unsqueeze(-1).to(ys.device)
before_outs = before_outs.masked_select(out_masks)
if after_outs is not None:
after_outs = after_outs.masked_select(out_masks)
ys = ys.masked_select(out_masks)
duration_masks = make_non_pad_mask(ilens).to(ys.device)
d_outs = d_outs.masked_select(duration_masks)
ds = ds.masked_select(duration_masks)
p_outs = p_outs.masked_select(out_masks)
v_outs = v_outs.masked_select(out_masks)
ps = ps.masked_select(out_masks)
vs = vs.masked_select(out_masks)
# calculate loss
if loss_type == "L1":
mel_loss = self.l1_criterion(before_outs, ys)
if after_outs is not None:
mel_loss += self.l1_criterion(after_outs, ys)
else:
mel_loss = self.mse_criterion(before_outs, ys)
if after_outs is not None:
mel_loss += self.mse_criterion(after_outs, ys)
duration_loss = self.duration_criterion(d_outs, ds)
pitch_loss = self.mse_criterion(p_outs, ps)
vuv_loss = self.bce_criterion(v_outs, vs.float())
# make weighted mask and apply it
if self.use_weighted_masking:
out_masks = make_non_pad_mask(olens).unsqueeze(-1).to(ys.device)
out_weights = out_masks.float() / out_masks.sum(dim=1, keepdim=True).float()
out_weights /= ys.size(0) * ys.size(2)
duration_masks = make_non_pad_mask(ilens).to(ys.device)
duration_weights = (
duration_masks.float() / duration_masks.sum(dim=1, keepdim=True).float()
)
duration_weights /= ds.size(0)
# apply weight
mel_loss = mel_loss.mul(out_weights).masked_select(out_masks).sum()
duration_loss = (
duration_loss.mul(duration_weights).masked_select(duration_masks).sum()
)
pitch_loss = pitch_loss.mul(pitch_weights).masked_select(out_masks).sum()
vuv_loss = vuv_loss.mul(pitch_weights).masked_select(pitch_masks).sum()
return mel_loss, duration_loss, pitch_loss, vuv_loss
| 5,424 | 40.730769 | 88 | py |
espnet | espnet-master/espnet2/svs/xiaoice/XiaoiceSing.py | # Copyright 2020 Nagoya University (Tomoki Hayashi)
# Copyright 2021 Renmin University of China (Shuai Guo)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""XiaoiceSing related modules."""
import logging
from typing import Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from typeguard import check_argument_types
from espnet2.svs.abs_svs import AbsSVS
from espnet2.svs.xiaoice.loss import XiaoiceSing2Loss
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet2.torch_utils.initialize import initialize
from espnet.nets.pytorch_backend.conformer.encoder import ( # noqa: H301
Encoder as ConformerEncoder,
)
from espnet.nets.pytorch_backend.e2e_tts_fastspeech import (
FeedForwardTransformerLoss as XiaoiceSingLoss, # NOQA
)
from espnet.nets.pytorch_backend.fastspeech.duration_predictor import DurationPredictor
from espnet.nets.pytorch_backend.fastspeech.length_regulator import LengthRegulator
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask, make_pad_mask
from espnet.nets.pytorch_backend.tacotron2.decoder import Postnet
from espnet.nets.pytorch_backend.transformer.embedding import (
PositionalEncoding,
ScaledPositionalEncoding,
)
from espnet.nets.pytorch_backend.transformer.encoder import ( # noqa: H301
Encoder as TransformerEncoder,
)
class XiaoiceSing(AbsSVS):
"""XiaoiceSing module for Singing Voice Synthesis.
This is a module of XiaoiceSing. A high-quality singing voice synthesis system which
employs an integrated network for spectrum, F0 and duration modeling. It follows the
main architecture of FastSpeech while proposing some singing-specific design:
1) Add features from musical score (e.g.note pitch and length)
2) Add a residual connection in F0 prediction to attenuate off-key issues
3) The duration of all the phonemes in a musical note is accumulated to
calculate the syllable duration loss for rhythm enhancement (syllable loss)
.. _`XiaoiceSing: A High-Quality and Integrated Singing Voice Synthesis System`:
https://arxiv.org/pdf/2006.06261.pdf
"""
def __init__(
self,
# network structure related
idim: int,
odim: int,
midi_dim: int = 129,
duration_dim: int = 500,
embed_dim: int = 512,
adim: int = 384,
aheads: int = 4,
elayers: int = 6,
eunits: int = 1536,
dlayers: int = 6,
dunits: int = 1536,
postnet_layers: int = 5,
postnet_chans: int = 512,
postnet_filts: int = 5,
postnet_dropout_rate: float = 0.5,
positionwise_layer_type: str = "conv1d",
positionwise_conv_kernel_size: int = 1,
use_scaled_pos_enc: bool = True,
use_batch_norm: bool = True,
encoder_normalize_before: bool = True,
decoder_normalize_before: bool = True,
encoder_concat_after: bool = False,
decoder_concat_after: bool = False,
duration_predictor_layers: int = 2,
duration_predictor_chans: int = 384,
duration_predictor_kernel_size: int = 3,
duration_predictor_dropout_rate: float = 0.1,
reduction_factor: int = 1,
encoder_type: str = "transformer",
decoder_type: str = "transformer",
transformer_enc_dropout_rate: float = 0.1,
transformer_enc_positional_dropout_rate: float = 0.1,
transformer_enc_attn_dropout_rate: float = 0.1,
transformer_dec_dropout_rate: float = 0.1,
transformer_dec_positional_dropout_rate: float = 0.1,
transformer_dec_attn_dropout_rate: float = 0.1,
# only for conformer
conformer_rel_pos_type: str = "legacy",
conformer_pos_enc_layer_type: str = "rel_pos",
conformer_self_attn_layer_type: str = "rel_selfattn",
conformer_activation_type: str = "swish",
use_macaron_style_in_conformer: bool = True,
use_cnn_in_conformer: bool = True,
zero_triu: bool = False,
conformer_enc_kernel_size: int = 7,
conformer_dec_kernel_size: int = 31,
# extra embedding related
spks: Optional[int] = None,
langs: Optional[int] = None,
spk_embed_dim: Optional[int] = None,
spk_embed_integration_type: str = "add",
# training related
init_type: str = "xavier_uniform",
init_enc_alpha: float = 1.0,
init_dec_alpha: float = 1.0,
use_masking: bool = False,
use_weighted_masking: bool = False,
loss_function: str = "XiaoiceSing2", # FastSpeech1, XiaoiceSing2
loss_type: str = "L1",
lambda_mel: float = 1,
lambda_dur: float = 0.1,
lambda_pitch: float = 0.01,
lambda_vuv: float = 0.01,
):
"""Initialize XiaoiceSing module.
Args:
idim (int): Dimension of the inputs.
odim (int): Dimension of the outputs.
elayers (int): Number of encoder layers.
eunits (int): Number of encoder hidden units.
dlayers (int): Number of decoder layers.
dunits (int): Number of decoder hidden units.
postnet_layers (int): Number of postnet layers.
postnet_chans (int): Number of postnet channels.
postnet_filts (int): Kernel size of postnet.
postnet_dropout_rate (float): Dropout rate in postnet.
use_scaled_pos_enc (bool): Whether to use trainable scaled pos encoding.
use_batch_norm (bool): Whether to use batch normalization in encoder prenet.
encoder_normalize_before (bool): Whether to apply layernorm layer before
encoder block.
decoder_normalize_before (bool): Whether to apply layernorm layer before
decoder block.
encoder_concat_after (bool): Whether to concatenate attention layer's input
and output in encoder.
decoder_concat_after (bool): Whether to concatenate attention layer's input
and output in decoder.
duration_predictor_layers (int): Number of duration predictor layers.
duration_predictor_chans (int): Number of duration predictor channels.
duration_predictor_kernel_size (int): Kernel size of duration predictor.
duration_predictor_dropout_rate (float): Dropout rate in duration predictor.
reduction_factor (int): Reduction factor.
encoder_type (str): Encoder type ("transformer" or "conformer").
decoder_type (str): Decoder type ("transformer" or "conformer").
transformer_enc_dropout_rate (float): Dropout rate in encoder except
attention and positional encoding.
transformer_enc_positional_dropout_rate (float): Dropout rate after encoder
positional encoding.
transformer_enc_attn_dropout_rate (float): Dropout rate in encoder
self-attention module.
transformer_dec_dropout_rate (float): Dropout rate in decoder except
attention & positional encoding.
transformer_dec_positional_dropout_rate (float): Dropout rate after decoder
positional encoding.
transformer_dec_attn_dropout_rate (float): Dropout rate in decoder
self-attention module.
spks (Optional[int]): Number of speakers. If set to > 1, assume that the
sids will be provided as the input and use sid embedding layer.
langs (Optional[int]): Number of languages. If set to > 1, assume that the
lids will be provided as the input and use sid embedding layer.
spk_embed_dim (Optional[int]): Speaker embedding dimension. If set to > 0,
assume that spembs will be provided as the input.
spk_embed_integration_type: How to integrate speaker embedding.
init_type (str): How to initialize transformer parameters.
init_enc_alpha (float): Initial value of alpha in scaled pos encoding of the
encoder.
init_dec_alpha (float): Initial value of alpha in scaled pos encoding of the
decoder.
use_masking (bool): Whether to apply masking for padded part in loss
calculation.
use_weighted_masking (bool): Whether to apply weighted masking in loss
calculation.
loss_function (str): Loss functions ("FastSpeech1" or "XiaoiceSing2")
loss_type (str): Loss type ("L1" (MAE) or "L2" (MSE))
lambda_mel (float): Loss scaling coefficient for Mel loss.
lambda_dur (float): Loss scaling coefficient for duration loss.
lambda_pitch (float): Loss scaling coefficient for pitch loss.
lambda_vuv (float): Loss scaling coefficient for VUV loss.
"""
assert check_argument_types()
super().__init__()
# store hyperparameters
self.idim = idim
self.midi_dim = midi_dim
self.duration_dim = duration_dim
self.odim = odim
self.embed_dim = embed_dim
self.eos = idim - 1
self.reduction_factor = reduction_factor
self.encoder_type = encoder_type
self.decoder_type = decoder_type
self.use_scaled_pos_enc = use_scaled_pos_enc
self.loss_function = loss_function
self.loss_type = loss_type
self.lambda_mel = lambda_mel
self.lambda_dur = lambda_dur
self.lambda_pitch = lambda_pitch
self.lambda_vuv = lambda_vuv
# use idx 0 as padding idx
self.padding_idx = 0
# get positional encoding class
pos_enc_class = (
ScaledPositionalEncoding if self.use_scaled_pos_enc else PositionalEncoding
)
# check relative positional encoding compatibility
if "conformer" in [encoder_type, decoder_type]:
if conformer_rel_pos_type == "legacy":
if conformer_pos_enc_layer_type == "rel_pos":
conformer_pos_enc_layer_type = "legacy_rel_pos"
logging.warning(
"Fallback to conformer_pos_enc_layer_type = 'legacy_rel_pos' "
"due to the compatibility. If you want to use the new one, "
"please use conformer_pos_enc_layer_type = 'latest'."
)
if conformer_self_attn_layer_type == "rel_selfattn":
conformer_self_attn_layer_type = "legacy_rel_selfattn"
logging.warning(
"Fallback to "
"conformer_self_attn_layer_type = 'legacy_rel_selfattn' "
"due to the compatibility. If you want to use the new one, "
"please use conformer_pos_enc_layer_type = 'latest'."
)
elif conformer_rel_pos_type == "latest":
assert conformer_pos_enc_layer_type != "legacy_rel_pos"
assert conformer_self_attn_layer_type != "legacy_rel_selfattn"
else:
raise ValueError(f"Unknown rel_pos_type: {conformer_rel_pos_type}")
# define encoder
self.phone_encode_layer = torch.nn.Embedding(
num_embeddings=idim, embedding_dim=embed_dim, padding_idx=self.padding_idx
)
self.midi_encode_layer = torch.nn.Embedding(
num_embeddings=midi_dim,
embedding_dim=embed_dim,
padding_idx=self.padding_idx,
)
self.duration_encode_layer = torch.nn.Embedding(
num_embeddings=duration_dim,
embedding_dim=embed_dim,
padding_idx=self.padding_idx,
)
if encoder_type == "transformer":
self.encoder = TransformerEncoder(
idim=0,
attention_dim=adim,
attention_heads=aheads,
linear_units=eunits,
num_blocks=elayers,
input_layer=None,
dropout_rate=transformer_enc_dropout_rate,
positional_dropout_rate=transformer_enc_positional_dropout_rate,
attention_dropout_rate=transformer_enc_attn_dropout_rate,
pos_enc_class=pos_enc_class,
normalize_before=encoder_normalize_before,
concat_after=encoder_concat_after,
positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size,
)
elif encoder_type == "conformer":
self.encoder = ConformerEncoder(
idim=idim,
attention_dim=adim,
attention_heads=aheads,
linear_units=eunits,
num_blocks=elayers,
input_layer=None,
dropout_rate=transformer_enc_dropout_rate,
positional_dropout_rate=transformer_enc_positional_dropout_rate,
attention_dropout_rate=transformer_enc_attn_dropout_rate,
normalize_before=encoder_normalize_before,
concat_after=encoder_concat_after,
positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size,
macaron_style=use_macaron_style_in_conformer,
pos_enc_layer_type=conformer_pos_enc_layer_type,
selfattention_layer_type=conformer_self_attn_layer_type,
activation_type=conformer_activation_type,
use_cnn_module=use_cnn_in_conformer,
cnn_module_kernel=conformer_enc_kernel_size,
zero_triu=zero_triu,
)
else:
raise ValueError(f"{encoder_type} is not supported.")
# define spk and lang embedding
self.spks = None
if spks is not None and spks > 1:
self.spks = spks
self.sid_emb = torch.nn.Embedding(spks, adim)
self.langs = None
if langs is not None and langs > 1:
self.langs = langs
self.lid_emb = torch.nn.Embedding(langs, adim)
# define additional projection for speaker embedding
self.spk_embed_dim = None
if spk_embed_dim is not None and spk_embed_dim > 0:
self.spk_embed_dim = spk_embed_dim
self.spk_embed_integration_type = spk_embed_integration_type
if self.spk_embed_dim is not None:
if self.spk_embed_integration_type == "add":
self.projection = torch.nn.Linear(self.spk_embed_dim, adim)
else:
self.projection = torch.nn.Linear(adim + self.spk_embed_dim, adim)
# define duration predictor
self.duration_predictor = DurationPredictor(
idim=adim,
n_layers=duration_predictor_layers,
n_chans=duration_predictor_chans,
kernel_size=duration_predictor_kernel_size,
dropout_rate=duration_predictor_dropout_rate,
)
# define length regulator
self.length_regulator = LengthRegulator()
# define decoder
# NOTE: we use encoder as decoder
# because fastspeech's decoder is the same as encoder
if decoder_type == "transformer":
self.decoder = TransformerEncoder(
idim=0,
attention_dim=adim,
attention_heads=aheads,
linear_units=dunits,
num_blocks=dlayers,
input_layer=None,
dropout_rate=transformer_dec_dropout_rate,
positional_dropout_rate=transformer_dec_positional_dropout_rate,
attention_dropout_rate=transformer_dec_attn_dropout_rate,
pos_enc_class=pos_enc_class,
normalize_before=decoder_normalize_before,
concat_after=decoder_concat_after,
positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size,
)
elif decoder_type == "conformer":
self.decoder = ConformerEncoder(
idim=0,
attention_dim=adim,
attention_heads=aheads,
linear_units=dunits,
num_blocks=dlayers,
input_layer=None,
dropout_rate=transformer_dec_dropout_rate,
positional_dropout_rate=transformer_dec_positional_dropout_rate,
attention_dropout_rate=transformer_dec_attn_dropout_rate,
normalize_before=decoder_normalize_before,
concat_after=decoder_concat_after,
positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size,
macaron_style=use_macaron_style_in_conformer,
pos_enc_layer_type=conformer_pos_enc_layer_type,
selfattention_layer_type=conformer_self_attn_layer_type,
activation_type=conformer_activation_type,
use_cnn_module=use_cnn_in_conformer,
cnn_module_kernel=conformer_dec_kernel_size,
)
else:
raise ValueError(f"{decoder_type} is not supported.")
# define final projection
self.linear_projection = torch.nn.Linear(adim, odim * reduction_factor + 2)
# define postnet
self.postnet = (
None
if postnet_layers == 0
else Postnet(
idim=idim,
odim=odim,
n_layers=postnet_layers,
n_chans=postnet_chans,
n_filts=postnet_filts,
use_batch_norm=use_batch_norm,
dropout_rate=postnet_dropout_rate,
)
)
# initialize parameters
self._reset_parameters(
init_type=init_type,
init_enc_alpha=init_enc_alpha,
init_dec_alpha=init_dec_alpha,
)
# define criterions
if self.loss_function == "FastSpeech1":
self.criterion = XiaoiceSingLoss(
use_masking=use_masking, use_weighted_masking=use_weighted_masking
)
elif self.loss_function == "XiaoiceSing2":
self.criterion = XiaoiceSing2Loss(
use_masking=use_masking, use_weighted_masking=use_weighted_masking
)
else:
raise ValueError(f"{self.loss_function} is not supported.")
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: torch.Tensor,
feats_lengths: torch.Tensor,
label: Optional[Dict[str, torch.Tensor]] = None,
label_lengths: Optional[Dict[str, torch.Tensor]] = None,
melody: Optional[Dict[str, torch.Tensor]] = None,
melody_lengths: Optional[Dict[str, torch.Tensor]] = None,
pitch: Optional[torch.Tensor] = None,
pitch_lengths: Optional[torch.Tensor] = None,
duration: Optional[Dict[str, torch.Tensor]] = None,
duration_lengths: Optional[Dict[str, torch.Tensor]] = None,
slur: torch.LongTensor = None,
slur_lengths: torch.Tensor = None,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
joint_training: bool = False,
flag_IsValid=False,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Calculate forward propagation.
Args:
text (LongTensor): Batch of padded character ids (B, T_text).
text_lengths (LongTensor): Batch of lengths of each input (B,).
feats (Tensor): Batch of padded target features (B, T_feats, odim).
feats_lengths (LongTensor): Batch of the lengths of each target (B,).
label (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of padded label ids (B, Tmax).
label_lengths (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of the lengths of padded label ids (B, ).
melody (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of padded melody (B, Tmax).
melody_lengths (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of the lengths of padded melody (B, ).
pitch (FloatTensor): Batch of padded f0 (B, Tmax).
pitch_lengths (LongTensor): Batch of the lengths of padded f0 (B, ).
duration (Optional[Dict]): key is "lab", "score_phn" or "score_syb";
value (LongTensor): Batch of padded duration (B, Tmax).
duration_length (Optional[Dict]): key is "lab", "score_phn" or "score_syb";
value (LongTensor): Batch of the lengths of padded duration (B, ).
slur (LongTensor): Batch of padded slur (B, Tmax).
slur_lengths (LongTensor): Batch of the lengths of padded slur (B, ).
spembs (Optional[Tensor]): Batch of speaker embeddings (B, spk_embed_dim).
sids (Optional[Tensor]): Batch of speaker IDs (B, 1).
lids (Optional[Tensor]): Batch of language IDs (B, 1).
joint_training (bool): Whether to perform joint training with vocoder.
Returns:
Tensor: Loss scalar value.
Dict: Statistics to be monitored.
Tensor: Weight value if not joint training else model outputs.
"""
if joint_training:
label = label
midi = melody
label_lengths = label_lengths
midi_lengths = melody_lengths
duration_lengths = duration_lengths
ds = duration
else:
label = label["score"]
midi = melody["score"]
duration_ = duration["score_phn"]
label_lengths = label_lengths["score"]
midi_lengths = melody_lengths["score"]
duration_lengths = duration_lengths["score_phn"]
ds = duration["lab"]
feats = feats[:, : feats_lengths.max()] # for data-parallel
midi = midi[:, : midi_lengths.max()] # for data-parallel
label = label[:, : label_lengths.max()] # for data-parallel
duration_ = duration_[:, : duration_lengths.max()] # for data-parallel
if self.loss_function == "XiaoiceSing2":
pitch = pitch[:, : pitch_lengths.max()]
log_f0 = torch.clamp(pitch, min=0)
vuv = log_f0 != 0
batch_size = text.size(0)
label_emb = self.phone_encode_layer(label)
midi_emb = self.midi_encode_layer(midi)
duration_emb = self.duration_encode_layer(duration_)
input_emb = label_emb + midi_emb + duration_emb
x_masks = self._source_mask(label_lengths)
hs, _ = self.encoder(input_emb, x_masks) # (B, T_text, adim)
# integrate with SID and LID embeddings
if self.spks is not None:
sid_embs = self.sid_emb(sids.view(-1))
hs = hs + sid_embs.unsqueeze(1)
if self.langs is not None:
lid_embs = self.lid_emb(lids.view(-1))
hs = hs + lid_embs.unsqueeze(1)
# integrate speaker embedding
if self.spk_embed_dim is not None:
hs = self._integrate_with_spk_embed(hs, spembs)
# forward duration predictor and length regulator
d_masks = make_pad_mask(label_lengths).to(input_emb.device)
d_outs = self.duration_predictor(hs, d_masks) # (B, T_text)
hs = self.length_regulator(hs, ds) # (B, T_feats, adim)
# forward decoder
h_masks = self._source_mask(feats_lengths)
zs, _ = self.decoder(hs, h_masks) # (B, T_feats, adim)
before_outs, log_f0_outs, vuv_outs = self.linear_projection(
zs
).split_with_sizes([self.odim * self.reduction_factor, 1, 1], dim=2)
before_outs = before_outs.view(zs.size(0), -1, self.odim) # (B. T_feats, odim)
# postnet -> (B, Lmax//r * r, odim)
if self.postnet is None:
after_outs = before_outs
else:
after_outs = before_outs + self.postnet(
before_outs.transpose(1, 2)
).transpose(1, 2)
# modifiy mod part of groundtruth
if self.reduction_factor > 1:
assert feats_lengths.ge(
self.reduction_factor
).all(), "Output length must be greater than or equal to reduction factor."
olens = feats_lengths.new(
[olen - olen % self.reduction_factor for olen in feats_lengths]
)
max_olen = max(olens)
ys = feats[:, :max_olen]
if self.loss_function == "XiaoiceSing2":
log_f0 = log_f0[:, :max_olen]
vuv = vuv[:, :max_olen]
else:
ys = feats
olens = feats_lengths
ilens = label_lengths
if self.loss_function == "FastSpeech1":
mel_loss, duration_loss = self.criterion(
after_outs, before_outs, d_outs, ys, ds, ilens, olens
)
elif self.loss_function == "XiaoiceSing2":
mel_loss, duration_loss, pitch_loss, vuv_loss = self.criterion(
after_outs=after_outs,
before_outs=before_outs,
d_outs=d_outs,
p_outs=log_f0_outs,
v_outs=vuv_outs,
ys=ys,
ds=ds,
ps=log_f0,
vs=vuv,
ilens=ilens,
olens=olens,
loss_type=self.loss_type,
)
mel_loss = mel_loss * self.lambda_mel
duration_loss = duration_loss * self.lambda_dur
loss = mel_loss + duration_loss
stats = dict(mel_loss=mel_loss.item(), duration_loss=duration_loss.item())
if self.loss_function == "XiaoiceSing2":
pitch_loss = pitch_loss * self.lambda_pitch
vuv_loss = vuv_loss * self.lambda_vuv
loss += pitch_loss + vuv_loss
stats["pitch_loss"] = pitch_loss.item()
stats["vuv_loss"] = vuv_loss.item()
stats["loss"] = loss.item()
# report extra information
if self.encoder_type == "transformer" and self.use_scaled_pos_enc:
stats.update(
encoder_alpha=self.encoder.embed[-1].alpha.data.item(),
)
if self.decoder_type == "transformer" and self.use_scaled_pos_enc:
stats.update(
decoder_alpha=self.decoder.embed[-1].alpha.data.item(),
)
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
if joint_training:
return loss, stats, after_outs if after_outs is not None else before_outs
else:
if flag_IsValid is False:
return loss, stats, weight
else:
return loss, stats, weight, after_outs[:, : olens.max()], ys, olens
def inference(
self,
text: torch.Tensor,
feats: Optional[torch.Tensor] = None,
label: Optional[Dict[str, torch.Tensor]] = None,
melody: Optional[Dict[str, torch.Tensor]] = None,
pitch: Optional[torch.Tensor] = None,
duration: Optional[Dict[str, torch.Tensor]] = None,
slur: Optional[Dict[str, torch.Tensor]] = None,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
use_teacher_forcing: torch.Tensor = False,
joint_training: bool = False,
) -> Dict[str, torch.Tensor]:
"""Generate the sequence of features given the sequences of characters.
Args:
text (LongTensor): Input sequence of characters (T_text,).
feats (Optional[Tensor]): Feature sequence to extract style (N, idim).
durations (Optional[LongTensor]): Groundtruth of duration (T_text + 1,).
label (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of padded label ids (Tmax).
melody (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of padded melody (Tmax).
pitch (FloatTensor): Batch of padded f0 (B, Tmax).
duration (Optional[Dict]): key is "lab", "score_phn" or "score_syb";
value (LongTensor): Batch of padded duration (Tmax).
slur (LongTensor): Batch of padded slur (B, Tmax).
spembs (Optional[Tensor]): Speaker embedding (spk_embed_dim,).
sids (Optional[Tensor]): Speaker ID (1,).
lids (Optional[Tensor]): Language ID (1,).
alpha (float): Alpha to control the speed.
Returns:
Dict[str, Tensor]: Output dict including the following items:
* feat_gen (Tensor): Output sequence of features (T_feats, odim).
* duration (Tensor): Duration sequence (T_text + 1,).
"""
label = label["score"]
midi = melody["score"]
if joint_training:
duration_ = duration["lab"]
else:
duration_ = duration["score_phn"]
ds = duration["lab"]
label_emb = self.phone_encode_layer(label)
midi_emb = self.midi_encode_layer(midi)
duration_emb = self.duration_encode_layer(duration_)
input_emb = label_emb + midi_emb + duration_emb
x_masks = None # self._source_mask(label_lengths)
hs, _ = self.encoder(input_emb, x_masks) # (B, T_text, adim)
# integrate with SID and LID embeddings
if self.spks is not None:
sid_embs = self.sid_emb(sids.view(-1))
hs = hs + sid_embs.unsqueeze(1)
if self.langs is not None:
lid_embs = self.lid_emb(lids.view(-1))
hs = hs + lid_embs.unsqueeze(1)
# integrate speaker embedding
if self.spk_embed_dim is not None:
hs = self._integrate_with_spk_embed(hs, spembs)
# forward duration predictor and length regulator
d_masks = None # make_pad_mask(label_lengths).to(input_emb.device)
d_outs = self.duration_predictor.inference(hs, d_masks) # (B, T_text)
d_outs_int = torch.floor(d_outs + 0.5).to(dtype=torch.long) # (B, T_text)
logging.info(f"ds: {ds}")
logging.info(f"ds.shape: {ds.shape}")
logging.info(f"d_outs: {d_outs}")
logging.info(f"d_outs.shape: {d_outs.shape}")
# use duration model output
hs = self.length_regulator(hs, d_outs_int) # (B, T_feats, adim)
# forward decoder
h_masks = None # self._source_mask(feats_lengths)
zs, _ = self.decoder(hs, h_masks) # (B, T_feats, adim)
before_outs, _, _ = self.linear_projection(zs).split_with_sizes(
[self.odim * self.reduction_factor, 1, 1], dim=2
)
before_outs = before_outs.view(zs.size(0), -1, self.odim) # (B. T_feats, odim)
# (B, T_feats, odim)
# postnet -> (B, Lmax//r * r, odim)
if self.postnet is None:
after_outs = before_outs
else:
after_outs = before_outs + self.postnet(
before_outs.transpose(1, 2)
).transpose(1, 2)
return dict(
feat_gen=after_outs[0], prob=None, att_w=None
) # outs, probs, att_ws
def _integrate_with_spk_embed(
self, hs: torch.Tensor, spembs: torch.Tensor
) -> torch.Tensor:
"""Integrate speaker embedding with hidden states.
Args:
hs (Tensor): Batch of hidden state sequences (B, T_text, adim).
spembs (Tensor): Batch of speaker embeddings (B, spk_embed_dim).
Returns:
Tensor: Batch of integrated hidden state sequences (B, T_text, adim).
"""
if self.spk_embed_integration_type == "add":
# apply projection and then add to hidden states
spembs = self.projection(F.normalize(spembs))
hs = hs + spembs.unsqueeze(1)
elif self.spk_embed_integration_type == "concat":
# concat hidden states with spk embeds and then apply projection
spembs = F.normalize(spembs).unsqueeze(1).expand(-1, hs.size(1), -1)
hs = self.projection(torch.cat([hs, spembs], dim=-1))
else:
raise NotImplementedError("support only add or concat.")
return hs
def _source_mask(self, ilens: torch.Tensor) -> torch.Tensor:
"""Make masks for self-attention.
Args:
ilens (LongTensor): Batch of lengths (B,).
Returns:
Tensor: Mask tensor for self-attention.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
>>> ilens = [5, 3]
>>> self._source_mask(ilens)
tensor([[[1, 1, 1, 1, 1],
[1, 1, 1, 0, 0]]], dtype=torch.uint8)
"""
x_masks = make_non_pad_mask(ilens).to(next(self.parameters()).device)
return x_masks.unsqueeze(-2)
def _reset_parameters(
self, init_type: str, init_enc_alpha: float, init_dec_alpha: float
):
# initialize parameters
if init_type != "pytorch":
initialize(self, init_type)
# initialize alpha in scaled positional encoding
if self.encoder_type == "transformer" and self.use_scaled_pos_enc:
self.encoder.embed[-1].alpha.data = torch.tensor(init_enc_alpha)
if self.decoder_type == "transformer" and self.use_scaled_pos_enc:
self.decoder.embed[-1].alpha.data = torch.tensor(init_dec_alpha)
| 33,849 | 43.422572 | 88 | py |
espnet | espnet-master/espnet2/svs/feats_extract/score_feats_extract.py | from typing import Any, Dict, Optional, Tuple, Union
import torch
from typeguard import check_argument_types
from espnet2.tts.feats_extract.abs_feats_extract import AbsFeatsExtract
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
def ListsToTensor(xs):
max_len = max(len(x) for x in xs)
ys = []
for x in xs:
y = x + [0] * (max_len - len(x))
ys.append(y)
return ys
class FrameScoreFeats(AbsFeatsExtract):
def __init__(
self,
fs: Union[int, str] = 22050,
n_fft: int = 1024,
win_length: int = 512,
hop_length: int = 128,
window: str = "hann",
center: bool = True,
):
if win_length is None:
win_length = n_fft
assert check_argument_types()
super().__init__()
self.fs = fs
self.n_fft = n_fft
self.win_length = win_length
self.hop_length = hop_length
self.window = window
self.center = center
def extra_repr(self):
return (
f"win_length={self.win_length}, "
f"hop_length={self.hop_length}, "
f"center={self.center}, "
)
def output_size(self) -> int:
return 1
def get_parameters(self) -> Dict[str, Any]:
return dict(
fs=self.fs,
n_fft=self.n_fft,
hop_length=self.hop_length,
window=self.window,
win_length=self.win_length,
center=self.stft.center,
)
def label_aggregate(
self, input: torch.Tensor, input_lengths: torch.Tensor = None
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
"""lage_aggregate function.
Args:
input: (Batch, Nsamples, Label_dim)
input_lengths: (Batch)
Returns:
output: (Batch, Frames, Label_dim)
"""
bs = input.size(0)
max_length = input.size(1)
label_dim = input.size(2)
# NOTE(jiatong):
# The default behaviour of label aggregation is compatible with
# torch.stft about framing and padding.
# Step1: center padding
if self.center:
pad = self.win_length // 2
max_length = max_length + 2 * pad
input = torch.nn.functional.pad(input, (0, 0, pad, pad), "constant", 0)
input[:, :pad, :] = input[:, pad : (2 * pad), :]
input[:, (max_length - pad) : max_length, :] = input[
:, (max_length - 2 * pad) : (max_length - pad), :
]
nframe = (max_length - self.win_length) // self.hop_length + 1
# Step2: framing
output = input.as_strided(
(bs, nframe, self.win_length, label_dim),
(max_length * label_dim, self.hop_length * label_dim, label_dim, 1),
)
# Step3: aggregate label
_tmp = output.sum(dim=-1, keepdim=False).float()
output = _tmp[:, :, self.win_length // 2]
# Step4: process lengths
if input_lengths is not None:
if self.center:
pad = self.win_length // 2
input_lengths = input_lengths + 2 * pad
olens = (input_lengths - self.win_length) // self.hop_length + 1
output.masked_fill_(make_pad_mask(olens, output, 1), 0.0)
else:
olens = None
return output, olens
def forward(
self,
label: Optional[torch.Tensor] = None,
label_lengths: Optional[torch.Tensor] = None,
midi: Optional[torch.Tensor] = None,
midi_lengths: Optional[torch.Tensor] = None,
duration: Optional[torch.Tensor] = None,
duration_lengths: Optional[torch.Tensor] = None,
) -> Tuple[
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
]:
"""FrameScoreFeats forward function.
Args:
label: (Batch, Nsamples)
label_lengths: (Batch)
midi: (Batch, Nsamples)
midi_lengths: (Batch)
duration: (Batch, Nsamples)
duration_lengths: (Batch)
Returns:
output: (Batch, Frames)
"""
label, label_lengths = self.label_aggregate(label, label_lengths)
midi, midi_lengths = self.label_aggregate(midi, midi_lengths)
duration, duration_lengths = self.label_aggregate(duration, duration_lengths)
return (
label,
label_lengths,
midi,
midi_lengths,
duration,
duration_lengths,
)
class SyllableScoreFeats(AbsFeatsExtract):
def __init__(
self,
fs: Union[int, str] = 22050,
n_fft: int = 1024,
win_length: int = 512,
hop_length: int = 128,
window: str = "hann",
center: bool = True,
):
if win_length is None:
win_length = n_fft
assert check_argument_types()
super().__init__()
self.fs = fs
self.n_fft = n_fft
self.win_length = win_length
self.hop_length = hop_length
self.window = window
self.center = center
def extra_repr(self):
return (
f"win_length={self.win_length}, "
f"hop_length={self.hop_length}, "
f"center={self.center}, "
)
def output_size(self) -> int:
return 1
def get_parameters(self) -> Dict[str, Any]:
return dict(
fs=self.fs,
n_fft=self.n_fft,
hop_length=self.hop_length,
window=self.window,
win_length=self.win_length,
center=self.stft.center,
)
def get_segments(
self,
label: Optional[torch.Tensor] = None,
label_lengths: Optional[torch.Tensor] = None,
midi: Optional[torch.Tensor] = None,
midi_lengths: Optional[torch.Tensor] = None,
duration: Optional[torch.Tensor] = None,
duration_lengths: Optional[torch.Tensor] = None,
):
seq = [0]
for i in range(label_lengths):
if label[seq[-1]] != label[i]:
seq.append(i)
seq.append(label_lengths.item())
seq.append(0)
for i in range(midi_lengths):
if midi[seq[-1]] != midi[i]:
seq.append(i)
seq.append(midi_lengths.item())
seq = list(set(seq))
seq.sort()
lengths = len(seq) - 1
seg_label = []
seg_midi = []
seg_duration = []
for i in range(lengths):
l, r = seq[i], seq[i + 1]
tmp_label = label[l:r][(r - l) // 2]
tmp_midi = midi[l:r][(r - l) // 2]
tmp_duration = duration[l:r][(r - l) // 2]
seg_label.append(tmp_label.item())
seg_midi.append(tmp_midi.item())
seg_duration.append(tmp_duration.item())
return (
seg_label,
lengths,
seg_midi,
lengths,
seg_duration,
lengths,
)
def forward(
self,
label: Optional[torch.Tensor] = None,
label_lengths: Optional[torch.Tensor] = None,
midi: Optional[torch.Tensor] = None,
midi_lengths: Optional[torch.Tensor] = None,
duration: Optional[torch.Tensor] = None,
duration_lengths: Optional[torch.Tensor] = None,
) -> Tuple[
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
]:
"""SyllableScoreFeats forward function.
Args:
label: (Batch, Nsamples)
label_lengths: (Batch)
midi: (Batch, Nsamples)
midi_lengths: (Batch)
duration: (Batch, Nsamples)
duration_lengths: (Batch)
Returns:
output: (Batch, Frames)
"""
assert label.shape == midi.shape and midi.shape == duration.shape
assert (
label_lengths.shape == midi_lengths.shape
and midi_lengths.shape == duration_lengths.shape
)
bs = label.size(0)
seg_label, seg_label_lengths = [], []
seg_midi, seg_midi_lengths = [], []
seg_duration, seg_duration_lengths = [], []
for i in range(bs):
seg = self.get_segments(
label=label[i],
label_lengths=label_lengths[i],
midi=midi[i],
midi_lengths=midi_lengths[i],
duration=duration[i],
duration_lengths=duration_lengths[i],
)
seg_label.append(seg[0])
seg_label_lengths.append(seg[1])
seg_midi.append(seg[2])
seg_midi_lengths.append(seg[3])
seg_duration.append(seg[6])
seg_duration_lengths.append(seg[7])
seg_label = torch.LongTensor(ListsToTensor(seg_label)).to(label.device)
seg_label_lengths = torch.LongTensor(seg_label_lengths).to(label.device)
seg_midi = torch.LongTensor(ListsToTensor(seg_midi)).to(label.device)
seg_midi_lengths = torch.LongTensor(seg_midi_lengths).to(label.device)
seg_duration = torch.LongTensor(ListsToTensor(seg_duration)).to(label.device)
seg_duration_lengths = torch.LongTensor(seg_duration_lengths).to(label.device)
return (
seg_label,
seg_label_lengths,
seg_midi,
seg_midi_lengths,
seg_duration,
seg_duration_lengths,
)
def expand_to_frame(expand_len, len_size, label, midi, duration):
# expand phone to frame level
bs = label.size(0)
seq_label, seq_label_lengths = [], []
seq_midi, seq_midi_lengths = [], []
seq_duration, seq_duration_lengths = [], []
for i in range(bs):
length = sum(expand_len[i])
seq_label_lengths.append(length)
seq_midi_lengths.append(length)
seq_duration_lengths.append(length)
seq_label.append(
[
label[i][j]
for j in range(len_size[i])
for k in range(int(expand_len[i][j]))
]
)
seq_midi.append(
[
midi[i][j]
for j in range(len_size[i])
for k in range(int(expand_len[i][j]))
]
)
seq_duration.append(
[
duration[i][j]
for j in range(len_size[i])
for k in range(int(expand_len[i][j]))
]
)
seq_label = torch.LongTensor(ListsToTensor(seq_label)).to(label.device)
seq_label_lengths = torch.LongTensor(seq_label_lengths).to(label.device)
seq_midi = torch.LongTensor(ListsToTensor(seq_midi)).to(label.device)
seq_midi_lengths = torch.LongTensor(seq_midi_lengths).to(label.device)
seq_duration = torch.LongTensor(ListsToTensor(seq_duration)).to(label.device)
seq_duration_lengths = torch.LongTensor(seq_duration_lengths).to(label.device)
return (
seq_label,
seq_label_lengths,
seq_midi,
seq_midi_lengths,
seq_duration,
seq_duration_lengths,
)
| 11,227 | 29.51087 | 86 | py |
espnet | espnet-master/espnet2/svs/naive_rnn/naive_rnn.py | # Copyright 2021 Carnegie Mellon University (Jiatong Shi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Naive-SVS related modules."""
from typing import Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from typeguard import check_argument_types
from espnet2.svs.abs_svs import AbsSVS
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet2.torch_utils.initialize import initialize
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask
from espnet.nets.pytorch_backend.tacotron2.decoder import Postnet
from espnet.nets.pytorch_backend.tacotron2.encoder import Encoder as EncoderPrenet
class NaiveRNNLoss(torch.nn.Module):
"""Loss function module for Tacotron2."""
def __init__(self, use_masking=True, use_weighted_masking=False):
"""Initialize Tactoron2 loss module.
Args:
use_masking (bool): Whether to apply masking
for padded part in loss calculation.
use_weighted_masking (bool):
Whether to apply weighted masking in loss calculation.
"""
super(NaiveRNNLoss, self).__init__()
assert (use_masking != use_weighted_masking) or not use_masking
self.use_masking = use_masking
self.use_weighted_masking = use_weighted_masking
# define criterions
reduction = "none" if self.use_weighted_masking else "mean"
self.l1_criterion = torch.nn.L1Loss(reduction=reduction)
self.mse_criterion = torch.nn.MSELoss(reduction=reduction)
# NOTE(kan-bayashi): register pre hook function for the compatibility
# self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook)
def forward(self, after_outs, before_outs, ys, olens):
"""Calculate forward propagation.
Args:
after_outs (Tensor): Batch of outputs after postnets (B, Lmax, odim).
before_outs (Tensor): Batch of outputs before postnets (B, Lmax, odim).
ys (Tensor): Batch of padded target features (B, Lmax, odim).
olens (LongTensor): Batch of the lengths of each target (B,).
Returns:
Tensor: L1 loss value.
Tensor: Mean square error loss value.
"""
# make mask and apply it
if self.use_masking:
masks = make_non_pad_mask(olens).unsqueeze(-1).to(ys.device)
ys = ys.masked_select(masks)
after_outs = after_outs.masked_select(masks)
before_outs = before_outs.masked_select(masks)
# calculate loss
l1_loss = self.l1_criterion(after_outs, ys) + self.l1_criterion(before_outs, ys)
mse_loss = self.mse_criterion(after_outs, ys) + self.mse_criterion(
before_outs, ys
)
# make weighted mask and apply it
if self.use_weighted_masking:
masks = make_non_pad_mask(olens).unsqueeze(-1).to(ys.device)
weights = masks.float() / masks.sum(dim=1, keepdim=True).float()
out_weights = weights.div(ys.size(0) * ys.size(2))
# apply weight
l1_loss = l1_loss.mul(out_weights).masked_select(masks).sum()
mse_loss = mse_loss.mul(out_weights).masked_select(masks).sum()
return l1_loss, mse_loss
class NaiveRNN(AbsSVS):
"""NaiveRNN-SVS module.
This is an implementation of naive RNN for singing voice synthesis
The features are processed directly over time-domain from music score and
predict the singing voice features
"""
def __init__(
self,
# network structure related
idim: int,
odim: int,
midi_dim: int = 129,
embed_dim: int = 512,
eprenet_conv_layers: int = 3,
eprenet_conv_chans: int = 256,
eprenet_conv_filts: int = 5,
elayers: int = 3,
eunits: int = 1024,
ebidirectional: bool = True,
midi_embed_integration_type: str = "add",
dlayers: int = 3,
dunits: int = 1024,
dbidirectional: bool = True,
postnet_layers: int = 5,
postnet_chans: int = 256,
postnet_filts: int = 5,
use_batch_norm: bool = True,
reduction_factor: int = 1,
# extra embedding related
spks: Optional[int] = None,
langs: Optional[int] = None,
spk_embed_dim: Optional[int] = None,
spk_embed_integration_type: str = "add",
eprenet_dropout_rate: float = 0.5,
edropout_rate: float = 0.1,
ddropout_rate: float = 0.1,
postnet_dropout_rate: float = 0.5,
init_type: str = "xavier_uniform",
use_masking: bool = False,
use_weighted_masking: bool = False,
loss_type: str = "L1",
):
"""Initialize NaiveRNN module.
Args: TODO(Yuning)
"""
assert check_argument_types()
super().__init__()
# store hyperparameters
self.idim = idim
self.midi_dim = midi_dim
self.eunits = eunits
self.odim = odim
self.eos = idim - 1
self.reduction_factor = reduction_factor
self.loss_type = loss_type
self.midi_embed_integration_type = midi_embed_integration_type
# use idx 0 as padding idx
self.padding_idx = 0
# define transformer encoder
if eprenet_conv_layers != 0:
# encoder prenet
self.encoder_input_layer = torch.nn.Sequential(
EncoderPrenet(
idim=idim,
embed_dim=embed_dim,
elayers=0,
econv_layers=eprenet_conv_layers,
econv_chans=eprenet_conv_chans,
econv_filts=eprenet_conv_filts,
use_batch_norm=use_batch_norm,
dropout_rate=eprenet_dropout_rate,
padding_idx=self.padding_idx,
),
torch.nn.Linear(eprenet_conv_chans, eunits),
)
self.midi_encoder_input_layer = torch.nn.Sequential(
EncoderPrenet(
idim=midi_dim,
embed_dim=embed_dim,
elayers=0,
econv_layers=eprenet_conv_layers,
econv_chans=eprenet_conv_chans,
econv_filts=eprenet_conv_filts,
use_batch_norm=use_batch_norm,
dropout_rate=eprenet_dropout_rate,
padding_idx=self.padding_idx,
),
torch.nn.Linear(eprenet_conv_chans, eunits),
)
else:
self.encoder_input_layer = torch.nn.Embedding(
num_embeddings=idim, embedding_dim=eunits, padding_idx=self.padding_idx
)
self.midi_encoder_input_layer = torch.nn.Embedding(
num_embeddings=midi_dim,
embedding_dim=eunits,
padding_idx=self.padding_idx,
)
self.encoder = torch.nn.LSTM(
input_size=eunits,
hidden_size=eunits,
num_layers=elayers,
batch_first=True,
dropout=edropout_rate,
bidirectional=ebidirectional,
)
self.midi_encoder = torch.nn.LSTM(
input_size=eunits,
hidden_size=eunits,
num_layers=elayers,
batch_first=True,
dropout=edropout_rate,
bidirectional=ebidirectional,
)
dim_direction = 2 if ebidirectional is True else 1
if self.midi_embed_integration_type == "add":
self.midi_projection = torch.nn.Linear(
eunits * dim_direction, eunits * dim_direction
)
else:
self.midi_projection = torch.nn.linear(
2 * eunits * dim_direction, eunits * dim_direction
)
self.decoder = torch.nn.LSTM(
input_size=eunits,
hidden_size=eunits,
num_layers=dlayers,
batch_first=True,
dropout=ddropout_rate,
bidirectional=dbidirectional,
)
# define spk and lang embedding
self.spks = None
if spks is not None and spks > 1:
self.spks = spks
self.sid_emb = torch.nn.Embedding(spks, eunits * dim_direction)
self.langs = None
if langs is not None and langs > 1:
# TODO(Yuning): not encode yet
self.langs = langs
self.lid_emb = torch.nn.Embedding(langs, eunits * dim_direction)
# define projection layer
self.spk_embed_dim = None
if spk_embed_dim is not None and spk_embed_dim > 0:
self.spk_embed_dim = spk_embed_dim
self.spk_embed_integration_type = spk_embed_integration_type
if self.spk_embed_dim is not None:
if self.spk_embed_integration_type == "add":
self.projection = torch.nn.Linear(
self.spk_embed_dim, eunits * dim_direction
)
else:
self.projection = torch.nn.Linear(
eunits * dim_direction + self.spk_embed_dim, eunits * dim_direction
)
# define final projection
self.feat_out = torch.nn.Linear(eunits * dim_direction, odim * reduction_factor)
# define postnet
self.postnet = (
None
if postnet_layers == 0
else Postnet(
idim=idim,
odim=odim,
n_layers=postnet_layers,
n_chans=postnet_chans,
n_filts=postnet_filts,
use_batch_norm=use_batch_norm,
dropout_rate=postnet_dropout_rate,
)
)
# define loss function
self.criterion = NaiveRNNLoss(
use_masking=use_masking,
use_weighted_masking=use_weighted_masking,
)
# initialize parameters
self._reset_parameters(
init_type=init_type,
)
def _reset_parameters(self, init_type):
# initialize parameters
if init_type != "pytorch":
initialize(self, init_type)
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: torch.Tensor,
feats_lengths: torch.Tensor,
label: Optional[Dict[str, torch.Tensor]] = None,
label_lengths: Optional[Dict[str, torch.Tensor]] = None,
melody: Optional[Dict[str, torch.Tensor]] = None,
melody_lengths: Optional[Dict[str, torch.Tensor]] = None,
pitch: Optional[torch.Tensor] = None,
pitch_lengths: Optional[torch.Tensor] = None,
duration: Optional[Dict[str, torch.Tensor]] = None,
duration_lengths: Optional[Dict[str, torch.Tensor]] = None,
slur: torch.LongTensor = None,
slur_lengths: torch.Tensor = None,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
flag_IsValid=False,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Calculate forward propagation.
Args:
text (LongTensor): Batch of padded character ids (B, Tmax).
text_lengths (LongTensor): Batch of lengths of each input batch (B,).
feats (Tensor): Batch of padded target features (B, Lmax, odim).
feats_lengths (LongTensor): Batch of the lengths of each target (B,).
label (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of padded label ids (B, Tmax).
label_lengths (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of the lengths of padded label ids (B, ).
melody (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of padded melody (B, Tmax).
melody_lengths (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of the lengths of padded melody (B, ).
pitch (FloatTensor): Batch of padded f0 (B, Tmax).
pitch_lengths (LongTensor): Batch of the lengths of padded f0 (B, ).
duration (Optional[Dict]): key is "lab", "score";
value (LongTensor): Batch of padded duration (B, Tmax).
duration_lengths (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of the lengths of padded duration (B, ).
slur (LongTensor): Batch of padded slur (B, Tmax).
slur_lengths (LongTensor): Batch of the lengths of padded slur (B, ).
spembs (Optional[Tensor]): Batch of speaker embeddings (B, spk_embed_dim).
sids (Optional[Tensor]): Batch of speaker IDs (B, 1).
lids (Optional[Tensor]): Batch of language IDs (B, 1).
GS Fix:
arguements from forward func. V.S. **batch from espnet_model.py
label == durations | phone sequence
melody -> pitch sequence
Returns:
Tensor: Loss scalar value.
Dict: Statistics to be monitored.
Tensor: Weight value if not joint training else model outputs.
"""
label = label["lab"]
midi = melody["lab"]
label_lengths = label_lengths["lab"]
midi_lengths = melody_lengths["lab"]
text = text[:, : text_lengths.max()] # for data-parallel
feats = feats[:, : feats_lengths.max()] # for data-parallel
midi = midi[:, : midi_lengths.max()] # for data-parallel
label = label[:, : label_lengths.max()] # for data-parallel
batch_size = feats.size(0)
label_emb = self.encoder_input_layer(label) # FIX ME: label Float to Int
midi_emb = self.midi_encoder_input_layer(midi)
label_emb = torch.nn.utils.rnn.pack_padded_sequence(
label_emb, label_lengths.to("cpu"), batch_first=True, enforce_sorted=False
)
midi_emb = torch.nn.utils.rnn.pack_padded_sequence(
midi_emb, midi_lengths.to("cpu"), batch_first=True, enforce_sorted=False
)
hs_label, (_, _) = self.encoder(label_emb)
hs_midi, (_, _) = self.midi_encoder(midi_emb)
hs_label, _ = torch.nn.utils.rnn.pad_packed_sequence(hs_label, batch_first=True)
hs_midi, _ = torch.nn.utils.rnn.pad_packed_sequence(hs_midi, batch_first=True)
if self.midi_embed_integration_type == "add":
hs = hs_label + hs_midi
hs = F.leaky_relu(self.midi_projection(hs))
else:
hs = torch.cat((hs_label, hs_midi), dim=-1)
hs = F.leaky_relu(self.midi_projection(hs))
# integrate spk & lang embeddings
if self.spks is not None:
sid_embs = self.sid_emb(sids.view(-1))
hs = hs + sid_embs.unsqueeze(1)
if self.langs is not None:
lid_embs = self.lid_emb(lids.view(-1))
hs = hs + lid_embs.unsqueeze(1)
# integrate speaker embedding
if self.spk_embed_dim is not None:
hs = self._integrate_with_spk_embed(hs, spembs)
# (B, T_feats//r, odim * r) -> (B, T_feats//r * r, odim)
before_outs = F.leaky_relu(self.feat_out(hs).view(hs.size(0), -1, self.odim))
# postnet -> (B, T_feats//r * r, odim)
if self.postnet is None:
after_outs = before_outs
else:
after_outs = before_outs + self.postnet(
before_outs.transpose(1, 2)
).transpose(1, 2)
# modifiy mod part of groundtruth
if self.reduction_factor > 1:
assert feats_lengths.ge(
self.reduction_factor
).all(), "Output length must be greater than or equal to reduction factor."
olens = feats_lengths.new(
[olen - olen % self.reduction_factor for olen in feats_lengths]
)
max_olen = max(olens)
ys = feats[:, :max_olen]
else:
ys = feats
olens = feats_lengths
# calculate loss values
l1_loss, l2_loss = self.criterion(
after_outs[:, : olens.max()], before_outs[:, : olens.max()], ys, olens
)
if self.loss_type == "L1":
loss = l1_loss
elif self.loss_type == "L2":
loss = l2_loss
elif self.loss_type == "L1+L2":
loss = l1_loss + l2_loss
else:
raise ValueError("unknown --loss-type " + self.loss_type)
stats = dict(
loss=loss.item(),
l1_loss=l1_loss.item(),
l2_loss=l2_loss.item(),
)
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
if flag_IsValid is False:
# training stage
return loss, stats, weight
else:
# validation stage
return loss, stats, weight, after_outs[:, : olens.max()], ys, olens
def inference(
self,
text: torch.Tensor,
feats: Optional[torch.Tensor] = None,
label: Optional[Dict[str, torch.Tensor]] = None,
melody: Optional[Dict[str, torch.Tensor]] = None,
pitch: Optional[torch.Tensor] = None,
duration: Optional[Dict[str, torch.Tensor]] = None,
slur: Optional[Dict[str, torch.Tensor]] = None,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
use_teacher_forcing: torch.Tensor = False,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Calculate forward propagation.
Args:
text (LongTensor): Batch of padded character ids (Tmax).
feats (Tensor): Batch of padded target features (Lmax, odim).
label (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of padded label ids (Tmax).
melody (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of padded melody (Tmax).
pitch (FloatTensor): Batch of padded f0 (Tmax).
slur (LongTensor): Batch of padded slur (B, Tmax).
duration (Optional[Dict]): key is "lab", "score";
value (LongTensor): Batch of padded duration (Tmax).
spembs (Optional[Tensor]): Batch of speaker embeddings (spk_embed_dim).
sids (Optional[Tensor]): Batch of speaker IDs (1).
lids (Optional[Tensor]): Batch of language IDs (1).
Returns:
Dict[str, Tensor]: Output dict including the following items:
* feat_gen (Tensor): Output sequence of features (T_feats, odim).
"""
label = label["lab"]
midi = melody["lab"]
label_emb = self.encoder_input_layer(label)
midi_emb = self.midi_encoder_input_layer(midi)
hs_label, (_, _) = self.encoder(label_emb)
hs_midi, (_, _) = self.midi_encoder(midi_emb)
if self.midi_embed_integration_type == "add":
hs = hs_label + hs_midi
hs = F.leaky_relu(self.midi_projection(hs))
else:
hs = torch.cat((hs_label, hs_midi), dim=-1)
hs = F.leaky_relu(self.midi_projection(hs))
# integrate spk & lang embeddings
if self.spks is not None:
sid_embs = self.sid_emb(sids.view(-1))
hs = hs + sid_embs.unsqueeze(1)
if self.langs is not None:
lid_embs = self.lid_emb(lids.view(-1))
hs = hs + lid_embs.unsqueeze(1)
# integrate speaker embedding
if self.spk_embed_dim is not None:
hs = self._integrate_with_spk_embed(hs, spembs)
# (B, T_feats//r, odim * r) -> (B, T_feats//r * r, odim)
before_outs = F.leaky_relu(self.feat_out(hs).view(hs.size(0), -1, self.odim))
# postnet -> (B, T_feats//r * r, odim)
if self.postnet is None:
after_outs = before_outs
else:
after_outs = before_outs + self.postnet(
before_outs.transpose(1, 2)
).transpose(1, 2)
return dict(
feat_gen=after_outs[0], prob=None, att_w=None
) # outs, probs, att_ws
def _integrate_with_spk_embed(
self, hs: torch.Tensor, spembs: torch.Tensor
) -> torch.Tensor:
"""Integrate speaker embedding with hidden states.
Args:
hs (Tensor): Batch of hidden state sequences (B, Tmax, adim).
spembs (Tensor): Batch of speaker embeddings (B, spk_embed_dim).
Returns:
Tensor: Batch of integrated hidden state sequences (B, Tmax, adim).
"""
if self.spk_embed_integration_type == "add":
# apply projection and then add to hidden states
spembs = self.projection(F.normalize(spembs))
hs = hs + spembs.unsqueeze(1)
elif self.spk_embed_integration_type == "concat":
# concat hidden states with spk embeds and then apply projection
spembs = F.normalize(spembs).unsqueeze(1).expand(-1, hs.size(1), -1)
hs = self.projection(torch.cat([hs, spembs], dim=-1))
else:
raise NotImplementedError("support only add or concat.")
return hs
| 21,296 | 38.005495 | 88 | py |
espnet | espnet-master/espnet2/svs/naive_rnn/naive_rnn_dp.py | # Copyright 2021 Carnegie Mellon University (Jiatong Shi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""NaiveRNN-DP-SVS related modules."""
from typing import Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from typeguard import check_argument_types
from espnet2.svs.abs_svs import AbsSVS
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet2.torch_utils.initialize import initialize
from espnet.nets.pytorch_backend.e2e_tts_fastspeech import (
FeedForwardTransformerLoss as FastSpeechLoss,
)
from espnet.nets.pytorch_backend.fastspeech.duration_predictor import DurationPredictor
from espnet.nets.pytorch_backend.fastspeech.length_regulator import LengthRegulator
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
from espnet.nets.pytorch_backend.tacotron2.decoder import Postnet
from espnet.nets.pytorch_backend.tacotron2.encoder import Encoder as EncoderPrenet
class NaiveRNNDP(AbsSVS):
"""NaiveRNNDP-SVS module.
This is an implementation of naive RNN with duration prediction
for singing voice synthesis
The features are processed directly over time-domain from music score and
predict the singing voice features
"""
def __init__(
self,
# network structure related
idim: int,
odim: int,
midi_dim: int = 129,
embed_dim: int = 512,
duration_dim: int = 500,
eprenet_conv_layers: int = 3,
eprenet_conv_chans: int = 256,
eprenet_conv_filts: int = 5,
elayers: int = 3,
eunits: int = 1024,
ebidirectional: bool = True,
midi_embed_integration_type: str = "add",
dlayers: int = 3,
dunits: int = 1024,
dbidirectional: bool = True,
postnet_layers: int = 5,
postnet_chans: int = 256,
postnet_filts: int = 5,
use_batch_norm: bool = True,
duration_predictor_layers: int = 2,
duration_predictor_chans: int = 384,
duration_predictor_kernel_size: int = 3,
duration_predictor_dropout_rate: float = 0.1,
reduction_factor: int = 1,
# extra embedding related
spks: Optional[int] = None,
langs: Optional[int] = None,
spk_embed_dim: Optional[int] = None,
spk_embed_integration_type: str = "add",
eprenet_dropout_rate: float = 0.5,
edropout_rate: float = 0.1,
ddropout_rate: float = 0.1,
postnet_dropout_rate: float = 0.5,
init_type: str = "xavier_uniform",
use_masking: bool = False,
use_weighted_masking: bool = False,
):
"""Initialize NaiveRNN module.
Args: TODO(Yuning)
"""
assert check_argument_types()
super().__init__()
# store hyperparameters
self.idim = idim
self.midi_dim = midi_dim
self.duration_dim = duration_dim
self.eunits = eunits
self.odim = odim
self.eos = idim - 1
self.reduction_factor = reduction_factor
self.midi_embed_integration_type = midi_embed_integration_type
# use idx 0 as padding idx
self.padding_idx = 0
# define transformer encoder
if eprenet_conv_layers != 0:
# encoder prenet
self.encoder_input_layer = torch.nn.Sequential(
EncoderPrenet(
idim=idim,
embed_dim=embed_dim,
elayers=0,
econv_layers=eprenet_conv_layers,
econv_chans=eprenet_conv_chans,
econv_filts=eprenet_conv_filts,
use_batch_norm=use_batch_norm,
dropout_rate=eprenet_dropout_rate,
padding_idx=self.padding_idx,
),
torch.nn.Linear(eprenet_conv_chans, eunits),
)
self.midi_encoder_input_layer = torch.nn.Sequential(
EncoderPrenet(
idim=midi_dim,
embed_dim=embed_dim,
elayers=0,
econv_layers=eprenet_conv_layers,
econv_chans=eprenet_conv_chans,
econv_filts=eprenet_conv_filts,
use_batch_norm=use_batch_norm,
dropout_rate=eprenet_dropout_rate,
padding_idx=self.padding_idx,
),
torch.nn.Linear(eprenet_conv_chans, eunits),
)
self.duration_encoder_input_layer = torch.nn.Sequential(
EncoderPrenet(
idim=midi_dim,
embed_dim=embed_dim,
elayers=0,
econv_layers=eprenet_conv_layers,
econv_chans=eprenet_conv_chans,
econv_filts=eprenet_conv_filts,
use_batch_norm=use_batch_norm,
dropout_rate=eprenet_dropout_rate,
padding_idx=self.padding_idx,
),
torch.nn.Linear(eprenet_conv_chans, eunits),
)
else:
self.encoder_input_layer = torch.nn.Embedding(
num_embeddings=idim, embedding_dim=eunits, padding_idx=self.padding_idx
)
self.midi_encoder_input_layer = torch.nn.Embedding(
num_embeddings=midi_dim,
embedding_dim=eunits,
padding_idx=self.padding_idx,
)
self.duration_encoder_input_layer = torch.nn.Embedding(
num_embeddings=duration_dim,
embedding_dim=eunits,
padding_idx=self.padding_idx,
)
self.encoder = torch.nn.LSTM(
input_size=eunits,
hidden_size=eunits,
num_layers=elayers,
batch_first=True,
dropout=edropout_rate,
bidirectional=ebidirectional,
# proj_size=eunits,
)
self.midi_encoder = torch.nn.LSTM(
input_size=eunits,
hidden_size=eunits,
num_layers=elayers,
batch_first=True,
dropout=edropout_rate,
bidirectional=ebidirectional,
# proj_size=eunits,
)
self.duration_encoder = torch.nn.LSTM(
input_size=eunits,
hidden_size=eunits,
num_layers=elayers,
batch_first=True,
dropout=edropout_rate,
bidirectional=ebidirectional,
# proj_size=eunits,
)
dim_direction = 2 if ebidirectional is True else 1
if self.midi_embed_integration_type == "add":
self.midi_projection = torch.nn.Linear(
eunits * dim_direction, eunits * dim_direction
)
else:
self.midi_projection = torch.nn.Linear(
3 * eunits * dim_direction, eunits * dim_direction
)
# define duration predictor
self.duration_predictor = DurationPredictor(
idim=eunits * dim_direction,
n_layers=duration_predictor_layers,
n_chans=duration_predictor_chans,
kernel_size=duration_predictor_kernel_size,
dropout_rate=duration_predictor_dropout_rate,
)
# define length regulator
self.length_regulator = LengthRegulator()
self.decoder = torch.nn.LSTM(
input_size=eunits * dim_direction,
hidden_size=dunits,
num_layers=dlayers,
batch_first=True,
dropout=ddropout_rate,
bidirectional=dbidirectional,
# proj_size=dunits,
)
# define spk and lang embedding
self.spks = None
if spks is not None and spks > 1:
self.spks = spks
self.sid_emb = torch.nn.Embedding(spks, dunits * dim_direction)
self.langs = None
if langs is not None and langs > 1:
# TODO(Yuning): not encode yet
self.langs = langs
self.lid_emb = torch.nn.Embedding(langs, dunits * dim_direction)
# define projection layer
self.spk_embed_dim = None
if spk_embed_dim is not None and spk_embed_dim > 0:
self.spk_embed_dim = spk_embed_dim
self.spk_embed_integration_type = spk_embed_integration_type
if self.spk_embed_dim is not None:
if self.spk_embed_integration_type == "add":
self.projection = torch.nn.Linear(
self.spk_embed_dim, dunits * dim_direction
)
else:
self.projection = torch.nn.Linear(
dunits * dim_direction + self.spk_embed_dim, dunits * dim_direction
)
# define final projection
self.feat_out = torch.nn.Linear(dunits * dim_direction, odim * reduction_factor)
# define postnet
self.postnet = (
None
if postnet_layers == 0
else Postnet(
idim=idim,
odim=odim,
n_layers=postnet_layers,
n_chans=postnet_chans,
n_filts=postnet_filts,
use_batch_norm=use_batch_norm,
dropout_rate=postnet_dropout_rate,
)
)
# define loss function
self.criterion = FastSpeechLoss(
use_masking=use_masking, use_weighted_masking=use_weighted_masking
)
# initialize parameters
self._reset_parameters(
init_type=init_type,
)
def _reset_parameters(self, init_type):
# initialize parameters
if init_type != "pytorch":
initialize(self, init_type)
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: torch.Tensor,
feats_lengths: torch.Tensor,
label: Optional[Dict[str, torch.Tensor]] = None,
label_lengths: Optional[Dict[str, torch.Tensor]] = None,
melody: Optional[Dict[str, torch.Tensor]] = None,
melody_lengths: Optional[Dict[str, torch.Tensor]] = None,
pitch: Optional[torch.Tensor] = None,
pitch_lengths: Optional[torch.Tensor] = None,
duration: Optional[Dict[str, torch.Tensor]] = None,
duration_lengths: Optional[Dict[str, torch.Tensor]] = None,
slur: torch.LongTensor = None,
slur_lengths: torch.Tensor = None,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
joint_training: bool = False,
flag_IsValid=False,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Calculate forward propagation.
Args:
text (LongTensor): Batch of padded character ids (B, Tmax).
text_lengths (LongTensor): Batch of lengths of each input batch (B,).
feats (Tensor): Batch of padded target features (B, Lmax, odim).
feats_lengths (LongTensor): Batch of the lengths of each target (B,).
label (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of padded label ids (B, Tmax).
label_lengths (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of the lengths of padded label ids (B, ).
melody (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of padded melody (B, Tmax).
melody_lengths (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of the lengths of padded melody (B, ).
pitch (FloatTensor): Batch of padded f0 (B, Tmax).
pitch_lengths (LongTensor): Batch of the lengths of padded f0 (B, ).
duration (Optional[Dict]): key is "lab", "score_phn" or "score_syb";
value (LongTensor): Batch of padded duration (B, Tmax).
duration_length (Optional[Dict]): key is "lab", "score_phn" or "score_syb";
value (LongTensor): Batch of the lengths of padded duration (B, ).
slur (LongTensor): Batch of padded slur (B, Tmax).
slur_lengths (LongTensor): Batch of the lengths of padded slur (B, ).
spembs (Optional[Tensor]): Batch of speaker embeddings (B, spk_embed_dim).
sids (Optional[Tensor]): Batch of speaker IDs (B, 1).
lids (Optional[Tensor]): Batch of language IDs (B, 1).
joint_training (bool): Whether to perform joint training with vocoder.
GS Fix:
arguements from forward func. V.S. **batch from espnet_model.py
label == durations | phone sequence
melody -> pitch sequence
Returns:
Tensor: Loss scalar value.
Dict: Statistics to be monitored.
Tensor: Weight value if not joint training else model outputs.
"""
if joint_training:
label = label
midi = melody
label_lengths = label_lengths
midi_lengths = melody_lengths
ds = duration
else:
label = label["score"]
midi = melody["score"]
duration_ = duration["score_phn"]
label_lengths = label_lengths["score"]
midi_lengths = melody_lengths["score"]
duration_lengths = duration_lengths["score_phn"]
ds = duration["lab"]
feats = feats[:, : feats_lengths.max()] # for data-parallel
midi = midi[:, : midi_lengths.max()] # for data-parallel
label = label[:, : label_lengths.max()] # for data-parallel
duration_ = duration_[:, : duration_lengths.max()] # for data-parallel
batch_size = feats.size(0)
label_emb = self.encoder_input_layer(label) # FIX ME: label Float to Int
midi_emb = self.midi_encoder_input_layer(midi)
duration_emb = self.duration_encoder_input_layer(duration_)
label_emb = torch.nn.utils.rnn.pack_padded_sequence(
label_emb, label_lengths.to("cpu"), batch_first=True, enforce_sorted=False
)
midi_emb = torch.nn.utils.rnn.pack_padded_sequence(
midi_emb, midi_lengths.to("cpu"), batch_first=True, enforce_sorted=False
)
duration_emb = torch.nn.utils.rnn.pack_padded_sequence(
duration_emb, midi_lengths.to("cpu"), batch_first=True, enforce_sorted=False
)
hs_label, (_, _) = self.encoder(label_emb)
hs_midi, (_, _) = self.midi_encoder(midi_emb)
hs_duration, (_, _) = self.duration_encoder(duration_emb)
hs_label, _ = torch.nn.utils.rnn.pad_packed_sequence(hs_label, batch_first=True)
hs_midi, _ = torch.nn.utils.rnn.pad_packed_sequence(hs_midi, batch_first=True)
hs_duration, _ = torch.nn.utils.rnn.pad_packed_sequence(
hs_duration, batch_first=True
)
if self.midi_embed_integration_type == "add":
hs = hs_label + hs_midi + hs_duration
hs = F.leaky_relu(self.midi_projection(hs))
else:
hs = torch.cat((hs_label, hs_midi, hs_duration), dim=-1)
hs = F.leaky_relu(self.midi_projection(hs))
# integrate spk & lang embeddings
if self.spks is not None:
sid_embs = self.sid_emb(sids.view(-1))
hs = hs + sid_embs.unsqueeze(1)
if self.langs is not None:
lid_embs = self.lid_emb(lids.view(-1))
hs = hs + lid_embs.unsqueeze(1)
# integrate speaker embedding
if self.spk_embed_dim is not None:
hs = self._integrate_with_spk_embed(hs, spembs)
# forward duration predictor and length regulator
d_masks = make_pad_mask(label_lengths).to(hs.device)
d_outs = self.duration_predictor(hs, d_masks) # (B, T_text)
hs = self.length_regulator(hs, ds) # (B, seq_len, eunits)
hs_emb = torch.nn.utils.rnn.pack_padded_sequence(
hs, feats_lengths.to("cpu"), batch_first=True, enforce_sorted=False
)
zs, (_, _) = self.decoder(hs_emb)
zs, _ = torch.nn.utils.rnn.pad_packed_sequence(zs, batch_first=True)
zs = zs[:, self.reduction_factor - 1 :: self.reduction_factor]
# (B, T_feats//r, odim * r) -> (B, T_feats//r * r, odim)
before_outs = F.leaky_relu(self.feat_out(zs).view(zs.size(0), -1, self.odim))
# postnet -> (B, T_feats//r * r, odim)
if self.postnet is None:
after_outs = before_outs
else:
after_outs = before_outs + self.postnet(
before_outs.transpose(1, 2)
).transpose(1, 2)
# modifiy mod part of groundtruth
if self.reduction_factor > 1:
assert feats_lengths.ge(
self.reduction_factor
).all(), "Output length must be greater than or equal to reduction factor."
olens = feats_lengths.new(
[olen - olen % self.reduction_factor for olen in feats_lengths]
)
max_olen = max(olens)
ys = feats[:, :max_olen]
else:
ys = feats
olens = feats_lengths
# calculate loss values
ilens = label_lengths
l1_loss, duration_loss = self.criterion(
after_outs, before_outs, d_outs, ys, ds, ilens, olens
)
loss = l1_loss + duration_loss
stats = dict(
loss=loss.item(), l1_loss=l1_loss.item(), duration_loss=duration_loss.item()
)
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
if joint_training:
return loss, stats, after_outs if after_outs is not None else before_outs
else:
if flag_IsValid is False:
# training stage
return loss, stats, weight
else:
# validation stage
return loss, stats, weight, after_outs[:, : olens.max()], ys, olens
def inference(
self,
text: torch.Tensor,
feats: Optional[torch.Tensor] = None,
label: Optional[Dict[str, torch.Tensor]] = None,
melody: Optional[Dict[str, torch.Tensor]] = None,
pitch: Optional[torch.Tensor] = None,
duration: Optional[Dict[str, torch.Tensor]] = None,
slur: Optional[Dict[str, torch.Tensor]] = None,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
joint_training: bool = False,
use_teacher_forcing: torch.Tensor = False,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Calculate forward propagation.
Args:
text (LongTensor): Batch of padded character ids (Tmax).
feats (Tensor): Batch of padded target features (Lmax, odim).
label (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of padded label ids (Tmax).
melody (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of padded melody (Tmax).
pitch (FloatTensor): Batch of padded f0 (Tmax).
duration (Optional[Dict]): key is "lab", "score_phn" or "score_syb";
value (LongTensor): Batch of padded duration (Tmax).
slur (LongTensor): Batch of padded slur (B, Tmax).
spembs (Optional[Tensor]): Batch of speaker embeddings (spk_embed_dim).
sids (Optional[Tensor]): Batch of speaker IDs (1).
lids (Optional[Tensor]): Batch of language IDs (1).
Returns:
Dict[str, Tensor]: Output dict including the following items:
* feat_gen (Tensor): Output sequence of features (T_feats, odim).
"""
label = label["score"]
midi = melody["score"]
if joint_training:
duration_ = duration["lab"]
else:
duration_ = duration["score_phn"]
label_emb = self.encoder_input_layer(label) # FIX ME: label Float to Int
midi_emb = self.midi_encoder_input_layer(midi)
duration_emb = self.duration_encoder_input_layer(duration_)
hs_label, (_, _) = self.encoder(label_emb)
hs_midi, (_, _) = self.midi_encoder(midi_emb)
hs_duration, (_, _) = self.duration_encoder(duration_emb)
if self.midi_embed_integration_type == "add":
hs = hs_label + hs_midi + hs_duration
hs = F.leaky_relu(self.midi_projection(hs))
else:
hs = torch.cat((hs_label, hs_midi, hs_duration), dim=-1)
hs = F.leaky_relu(self.midi_projection(hs))
# integrate spk & lang embeddings
if self.spks is not None:
sid_embs = self.sid_emb(sids.view(-1))
hs = hs + sid_embs.unsqueeze(1)
if self.langs is not None:
lid_embs = self.lid_emb(lids.view(-1))
hs = hs + lid_embs.unsqueeze(1)
if spembs is not None:
spembs = spembs.unsqueeze(0)
# integrate speaker embedding
if self.spk_embed_dim is not None:
hs = self._integrate_with_spk_embed(hs, spembs)
# forward duration predictor and length regulator
d_masks = None # make_pad_mask(label_lengths).to(input_emb.device)
d_outs = self.duration_predictor.inference(hs, d_masks) # (B, T_text)
d_outs_int = torch.floor(d_outs + 0.5).to(dtype=torch.long) # (B, T_text)
hs = self.length_regulator(hs, d_outs_int) # (B, T_feats, adim)
zs, (_, _) = self.decoder(hs)
if self.reduction_factor > zs.size(0):
zs = zs[:, :1] # if too short, use the first frame
else:
zs = zs[:, self.reduction_factor - 1 :: self.reduction_factor]
# (B, T_feats//r, odim * r) -> (B, T_feats//r * r, odim)
before_outs = F.leaky_relu(self.feat_out(zs).view(zs.size(0), -1, self.odim))
# postnet -> (B, T_feats//r * r, odim)
if self.postnet is None:
after_outs = before_outs
else:
after_outs = before_outs + self.postnet(
before_outs.transpose(1, 2)
).transpose(1, 2)
return dict(
feat_gen=after_outs[0], prob=None, att_w=None
) # outs, probs, att_ws
def _integrate_with_spk_embed(
self, hs: torch.Tensor, spembs: torch.Tensor
) -> torch.Tensor:
"""Integrate speaker embedding with hidden states.
Args:
hs (Tensor): Batch of hidden state sequences (B, Tmax, adim).
spembs (Tensor): Batch of speaker embeddings (B, spk_embed_dim).
Returns:
Tensor: Batch of integrated hidden state sequences (B, Tmax, adim).
"""
if self.spk_embed_integration_type == "add":
# apply projection and then add to hidden states
spembs = self.projection(F.normalize(spembs))
hs = hs + spembs.unsqueeze(1)
elif self.spk_embed_integration_type == "concat":
# concat hidden states with spk embeds and then apply projection
spembs = F.normalize(spembs).unsqueeze(1).expand(-1, hs.size(1), -1)
hs = self.projection(torch.cat([hs, spembs], dim=-1))
else:
raise NotImplementedError("support only add or concat.")
return hs
| 23,391 | 38.986325 | 88 | py |
espnet | espnet-master/espnet2/gan_tts/espnet_model.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""GAN-based text-to-speech ESPnet model."""
from contextlib import contextmanager
from typing import Any, Dict, Optional
import torch
from packaging.version import parse as V
from typeguard import check_argument_types
from espnet2.gan_tts.abs_gan_tts import AbsGANTTS
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.layers.inversible_interface import InversibleInterface
from espnet2.train.abs_gan_espnet_model import AbsGANESPnetModel
from espnet2.tts.feats_extract.abs_feats_extract import AbsFeatsExtract
if V(torch.__version__) >= V("1.6.0"):
from torch.cuda.amp import autocast
else:
# Nothing to do if torch < 1.6.0
@contextmanager
def autocast(enabled=True): # NOQA
yield
class ESPnetGANTTSModel(AbsGANESPnetModel):
"""ESPnet model for GAN-based text-to-speech task."""
def __init__(
self,
feats_extract: Optional[AbsFeatsExtract],
normalize: Optional[AbsNormalize and InversibleInterface],
pitch_extract: Optional[AbsFeatsExtract],
pitch_normalize: Optional[AbsNormalize and InversibleInterface],
energy_extract: Optional[AbsFeatsExtract],
energy_normalize: Optional[AbsNormalize and InversibleInterface],
tts: AbsGANTTS,
):
"""Initialize ESPnetGANTTSModel module."""
assert check_argument_types()
super().__init__()
self.feats_extract = feats_extract
self.normalize = normalize
self.pitch_extract = pitch_extract
self.pitch_normalize = pitch_normalize
self.energy_extract = energy_extract
self.energy_normalize = energy_normalize
self.tts = tts
assert hasattr(
tts, "generator"
), "generator module must be registered as tts.generator"
assert hasattr(
tts, "discriminator"
), "discriminator module must be registered as tts.discriminator"
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
durations: Optional[torch.Tensor] = None,
durations_lengths: Optional[torch.Tensor] = None,
pitch: Optional[torch.Tensor] = None,
pitch_lengths: Optional[torch.Tensor] = None,
energy: Optional[torch.Tensor] = None,
energy_lengths: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
forward_generator: bool = True,
**kwargs,
) -> Dict[str, Any]:
"""Return generator or discriminator loss with dict format.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B,).
duration (Optional[Tensor]): Duration tensor.
duration_lengths (Optional[Tensor]): Duration length tensor (B,).
pitch (Optional[Tensor]): Pitch tensor.
pitch_lengths (Optional[Tensor]): Pitch length tensor (B,).
energy (Optional[Tensor]): Energy tensor.
energy_lengths (Optional[Tensor]): Energy length tensor (B,).
spembs (Optional[Tensor]): Speaker embedding tensor (B, D).
sids (Optional[Tensor]): Speaker ID tensor (B, 1).
lids (Optional[Tensor]): Language ID tensor (B, 1).
forward_generator (bool): Whether to forward generator.
kwargs: "utt_id" is among the input.
Returns:
Dict[str, Any]:
- loss (Tensor): Loss scalar tensor.
- stats (Dict[str, float]): Statistics to be monitored.
- weight (Tensor): Weight tensor to summarize losses.
- optim_idx (int): Optimizer index (0 for G and 1 for D).
"""
with autocast(False):
# Extract features
feats = None
if self.feats_extract is not None:
feats, feats_lengths = self.feats_extract(
speech,
speech_lengths,
)
if self.pitch_extract is not None and pitch is None:
pitch, pitch_lengths = self.pitch_extract(
speech,
speech_lengths,
feats_lengths=feats_lengths,
durations=durations,
durations_lengths=durations_lengths,
)
if self.energy_extract is not None and energy is None:
energy, energy_lengths = self.energy_extract(
speech,
speech_lengths,
feats_lengths=feats_lengths,
durations=durations,
durations_lengths=durations_lengths,
)
# Normalize
if self.normalize is not None:
feats, feats_lengths = self.normalize(feats, feats_lengths)
if self.pitch_normalize is not None:
pitch, pitch_lengths = self.pitch_normalize(pitch, pitch_lengths)
if self.energy_normalize is not None:
energy, energy_lengths = self.energy_normalize(energy, energy_lengths)
# Make batch for tts inputs
batch = dict(
text=text,
text_lengths=text_lengths,
forward_generator=forward_generator,
)
# Update batch for additional auxiliary inputs
if feats is not None:
batch.update(feats=feats, feats_lengths=feats_lengths)
if self.tts.require_raw_speech:
batch.update(speech=speech, speech_lengths=speech_lengths)
if durations is not None:
batch.update(durations=durations, durations_lengths=durations_lengths)
if self.pitch_extract is not None and pitch is not None:
batch.update(pitch=pitch, pitch_lengths=pitch_lengths)
if self.energy_extract is not None and energy is not None:
batch.update(energy=energy, energy_lengths=energy_lengths)
if spembs is not None:
batch.update(spembs=spembs)
if sids is not None:
batch.update(sids=sids)
if lids is not None:
batch.update(lids=lids)
return self.tts(**batch)
def collect_feats(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
durations: Optional[torch.Tensor] = None,
durations_lengths: Optional[torch.Tensor] = None,
pitch: Optional[torch.Tensor] = None,
pitch_lengths: Optional[torch.Tensor] = None,
energy: Optional[torch.Tensor] = None,
energy_lengths: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
**kwargs,
) -> Dict[str, torch.Tensor]:
"""Calculate features and return them as a dict.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B, 1).
durations (Optional[Tensor): Duration tensor.
durations_lengths (Optional[Tensor): Duration length tensor (B,).
pitch (Optional[Tensor): Pitch tensor.
pitch_lengths (Optional[Tensor): Pitch length tensor (B,).
energy (Optional[Tensor): Energy tensor.
energy_lengths (Optional[Tensor): Energy length tensor (B,).
spembs (Optional[Tensor]): Speaker embedding tensor (B, D).
sids (Optional[Tensor]): Speaker index tensor (B, 1).
lids (Optional[Tensor]): Language ID tensor (B, 1).
Returns:
Dict[str, Tensor]: Dict of features.
"""
feats = None
if self.feats_extract is not None:
feats, feats_lengths = self.feats_extract(
speech,
speech_lengths,
)
if self.pitch_extract is not None:
pitch, pitch_lengths = self.pitch_extract(
speech,
speech_lengths,
feats_lengths=feats_lengths,
durations=durations,
durations_lengths=durations_lengths,
)
if self.energy_extract is not None:
energy, energy_lengths = self.energy_extract(
speech,
speech_lengths,
feats_lengths=feats_lengths,
durations=durations,
durations_lengths=durations_lengths,
)
# store in dict
feats_dict = {}
if feats is not None:
feats_dict.update(feats=feats, feats_lengths=feats_lengths)
if pitch is not None:
feats_dict.update(pitch=pitch, pitch_lengths=pitch_lengths)
if energy is not None:
feats_dict.update(energy=energy, energy_lengths=energy_lengths)
return feats_dict
| 9,364 | 39.021368 | 86 | py |
espnet | espnet-master/espnet2/gan_tts/abs_gan_tts.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""GAN-based TTS abstrast class."""
from abc import ABC, abstractmethod
from typing import Dict, Union
import torch
from espnet2.tts.abs_tts import AbsTTS
class AbsGANTTS(AbsTTS, ABC):
"""GAN-based TTS model abstract class."""
@abstractmethod
def forward(
self,
forward_generator,
*args,
**kwargs,
) -> Dict[str, Union[torch.Tensor, Dict[str, torch.Tensor], int]]:
"""Return generator or discriminator loss."""
raise NotImplementedError
| 600 | 22.115385 | 70 | py |
espnet | espnet-master/espnet2/gan_tts/jets/alignments.py | # Copyright 2022 Dan Lim
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from numba import jit
class AlignmentModule(nn.Module):
"""Alignment Learning Framework proposed for parallel TTS models in:
https://arxiv.org/abs/2108.10447
"""
def __init__(self, adim, odim):
super().__init__()
self.t_conv1 = nn.Conv1d(adim, adim, kernel_size=3, padding=1)
self.t_conv2 = nn.Conv1d(adim, adim, kernel_size=1, padding=0)
self.f_conv1 = nn.Conv1d(odim, adim, kernel_size=3, padding=1)
self.f_conv2 = nn.Conv1d(adim, adim, kernel_size=3, padding=1)
self.f_conv3 = nn.Conv1d(adim, adim, kernel_size=1, padding=0)
def forward(self, text, feats, x_masks=None):
"""Calculate alignment loss.
Args:
text (Tensor): Batched text embedding (B, T_text, adim).
feats (Tensor): Batched acoustic feature (B, T_feats, odim).
x_masks (Tensor): Mask tensor (B, T_text).
Returns:
Tensor: Log probability of attention matrix (B, T_feats, T_text).
"""
text = text.transpose(1, 2)
text = F.relu(self.t_conv1(text))
text = self.t_conv2(text)
text = text.transpose(1, 2)
feats = feats.transpose(1, 2)
feats = F.relu(self.f_conv1(feats))
feats = F.relu(self.f_conv2(feats))
feats = self.f_conv3(feats)
feats = feats.transpose(1, 2)
dist = feats.unsqueeze(2) - text.unsqueeze(1)
dist = torch.norm(dist, p=2, dim=3)
score = -dist
if x_masks is not None:
x_masks = x_masks.unsqueeze(-2)
score = score.masked_fill(x_masks, -np.inf)
log_p_attn = F.log_softmax(score, dim=-1)
return log_p_attn
@jit(nopython=True)
def _monotonic_alignment_search(log_p_attn):
# https://arxiv.org/abs/2005.11129
T_mel = log_p_attn.shape[0]
T_inp = log_p_attn.shape[1]
Q = np.full((T_inp, T_mel), fill_value=-np.inf)
log_prob = log_p_attn.transpose(1, 0) # -> (T_inp,T_mel)
# 1. Q <- init first row for all j
for j in range(T_mel):
Q[0, j] = log_prob[0, : j + 1].sum()
# 2.
for j in range(1, T_mel):
for i in range(1, min(j + 1, T_inp)):
Q[i, j] = max(Q[i - 1, j - 1], Q[i, j - 1]) + log_prob[i, j]
# 3.
A = np.full((T_mel,), fill_value=T_inp - 1)
for j in range(T_mel - 2, -1, -1): # T_mel-2, ..., 0
# 'i' in {A[j+1]-1, A[j+1]}
i_a = A[j + 1] - 1
i_b = A[j + 1]
if i_b == 0:
argmax_i = 0
elif Q[i_a, j] >= Q[i_b, j]:
argmax_i = i_a
else:
argmax_i = i_b
A[j] = argmax_i
return A
def viterbi_decode(log_p_attn, text_lengths, feats_lengths):
"""Extract duration from an attention probability matrix
Args:
log_p_attn (Tensor): Batched log probability of attention
matrix (B, T_feats, T_text).
text_lengths (Tensor): Text length tensor (B,).
feats_legnths (Tensor): Feature length tensor (B,).
Returns:
Tensor: Batched token duration extracted from `log_p_attn` (B, T_text).
Tensor: Binarization loss tensor ().
"""
B = log_p_attn.size(0)
T_text = log_p_attn.size(2)
device = log_p_attn.device
bin_loss = 0
ds = torch.zeros((B, T_text), device=device)
for b in range(B):
cur_log_p_attn = log_p_attn[b, : feats_lengths[b], : text_lengths[b]]
viterbi = _monotonic_alignment_search(cur_log_p_attn.detach().cpu().numpy())
_ds = np.bincount(viterbi)
ds[b, : len(_ds)] = torch.from_numpy(_ds).to(device)
t_idx = torch.arange(feats_lengths[b])
bin_loss = bin_loss - cur_log_p_attn[t_idx, viterbi].mean()
bin_loss = bin_loss / B
return ds, bin_loss
@jit(nopython=True)
def _average_by_duration(ds, xs, text_lengths, feats_lengths):
B = ds.shape[0]
xs_avg = np.zeros_like(ds)
ds = ds.astype(np.int32)
for b in range(B):
t_text = text_lengths[b]
t_feats = feats_lengths[b]
d = ds[b, :t_text]
d_cumsum = d.cumsum()
d_cumsum = [0] + list(d_cumsum)
x = xs[b, :t_feats]
for n, (start, end) in enumerate(zip(d_cumsum[:-1], d_cumsum[1:])):
if len(x[start:end]) != 0:
xs_avg[b, n] = x[start:end].mean()
else:
xs_avg[b, n] = 0
return xs_avg
def average_by_duration(ds, xs, text_lengths, feats_lengths):
"""Average frame-level features into token-level according to durations
Args:
ds (Tensor): Batched token duration (B, T_text).
xs (Tensor): Batched feature sequences to be averaged (B, T_feats).
text_lengths (Tensor): Text length tensor (B,).
feats_lengths (Tensor): Feature length tensor (B,).
Returns:
Tensor: Batched feature averaged according to the token duration (B, T_text).
"""
device = ds.device
args = [ds, xs, text_lengths, feats_lengths]
args = [arg.detach().cpu().numpy() for arg in args]
xs_avg = _average_by_duration(*args)
xs_avg = torch.from_numpy(xs_avg).to(device)
return xs_avg
| 5,279 | 30.807229 | 85 | py |
espnet | espnet-master/espnet2/gan_tts/jets/loss.py | # Copyright 2022 Dan Lim
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""JETS related loss module for ESPnet2."""
from typing import Tuple
import numpy as np
import torch
import torch.nn.functional as F
from scipy.stats import betabinom
from typeguard import check_argument_types
from espnet.nets.pytorch_backend.fastspeech.duration_predictor import ( # noqa: H301
DurationPredictorLoss,
)
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask
class VarianceLoss(torch.nn.Module):
def __init__(self, use_masking: bool = True, use_weighted_masking: bool = False):
"""Initialize JETS variance loss module.
Args:
use_masking (bool): Whether to apply masking for padded part in loss
calculation.
use_weighted_masking (bool): Whether to weighted masking in loss
calculation.
"""
assert check_argument_types()
super().__init__()
assert (use_masking != use_weighted_masking) or not use_masking
self.use_masking = use_masking
self.use_weighted_masking = use_weighted_masking
# define criterions
reduction = "none" if self.use_weighted_masking else "mean"
self.mse_criterion = torch.nn.MSELoss(reduction=reduction)
self.duration_criterion = DurationPredictorLoss(reduction=reduction)
def forward(
self,
d_outs: torch.Tensor,
ds: torch.Tensor,
p_outs: torch.Tensor,
ps: torch.Tensor,
e_outs: torch.Tensor,
es: torch.Tensor,
ilens: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""Calculate forward propagation.
Args:
d_outs (LongTensor): Batch of outputs of duration predictor (B, T_text).
ds (LongTensor): Batch of durations (B, T_text).
p_outs (Tensor): Batch of outputs of pitch predictor (B, T_text, 1).
ps (Tensor): Batch of target token-averaged pitch (B, T_text, 1).
e_outs (Tensor): Batch of outputs of energy predictor (B, T_text, 1).
es (Tensor): Batch of target token-averaged energy (B, T_text, 1).
ilens (LongTensor): Batch of the lengths of each input (B,).
Returns:
Tensor: Duration predictor loss value.
Tensor: Pitch predictor loss value.
Tensor: Energy predictor loss value.
"""
# apply mask to remove padded part
if self.use_masking:
duration_masks = make_non_pad_mask(ilens).to(ds.device)
d_outs = d_outs.masked_select(duration_masks)
ds = ds.masked_select(duration_masks)
pitch_masks = make_non_pad_mask(ilens).unsqueeze(-1).to(ds.device)
p_outs = p_outs.masked_select(pitch_masks)
e_outs = e_outs.masked_select(pitch_masks)
ps = ps.masked_select(pitch_masks)
es = es.masked_select(pitch_masks)
# calculate loss
duration_loss = self.duration_criterion(d_outs, ds)
pitch_loss = self.mse_criterion(p_outs, ps)
energy_loss = self.mse_criterion(e_outs, es)
# make weighted mask and apply it
if self.use_weighted_masking:
duration_masks = make_non_pad_mask(ilens).to(ds.device)
duration_weights = (
duration_masks.float() / duration_masks.sum(dim=1, keepdim=True).float()
)
duration_weights /= ds.size(0)
# apply weight
duration_loss = (
duration_loss.mul(duration_weights).masked_select(duration_masks).sum()
)
pitch_masks = duration_masks.unsqueeze(-1)
pitch_weights = duration_weights.unsqueeze(-1)
pitch_loss = pitch_loss.mul(pitch_weights).masked_select(pitch_masks).sum()
energy_loss = (
energy_loss.mul(pitch_weights).masked_select(pitch_masks).sum()
)
return duration_loss, pitch_loss, energy_loss
class ForwardSumLoss(torch.nn.Module):
"""Forwardsum loss described at https://openreview.net/forum?id=0NQwnnwAORi"""
def __init__(self, cache_prior: bool = True):
"""Initialize forwardsum loss module.
Args:
cache_prior (bool): Whether to cache beta-binomial prior
"""
super().__init__()
self.cache_prior = cache_prior
self._cache = {}
def forward(
self,
log_p_attn: torch.Tensor,
ilens: torch.Tensor,
olens: torch.Tensor,
blank_prob: float = np.e**-1,
) -> torch.Tensor:
"""Calculate forward propagation.
Args:
log_p_attn (Tensor): Batch of log probability of attention matrix
(B, T_feats, T_text).
ilens (Tensor): Batch of the lengths of each input (B,).
olens (Tensor): Batch of the lengths of each target (B,).
blank_prob (float): Blank symbol probability.
Returns:
Tensor: forwardsum loss value.
"""
B = log_p_attn.size(0)
# add beta-binomial prior
bb_prior = self._generate_prior(ilens, olens)
bb_prior = bb_prior.to(dtype=log_p_attn.dtype, device=log_p_attn.device)
log_p_attn = log_p_attn + bb_prior
# a row must be added to the attention matrix to account for
# blank token of CTC loss
# (B,T_feats,T_text+1)
log_p_attn_pd = F.pad(log_p_attn, (1, 0, 0, 0, 0, 0), value=np.log(blank_prob))
loss = 0
for bidx in range(B):
# construct target sequnece.
# Every text token is mapped to a unique sequnece number.
target_seq = torch.arange(1, ilens[bidx] + 1).unsqueeze(0)
cur_log_p_attn_pd = log_p_attn_pd[
bidx, : olens[bidx], : ilens[bidx] + 1
].unsqueeze(
1
) # (T_feats,1,T_text+1)
loss += F.ctc_loss(
log_probs=cur_log_p_attn_pd,
targets=target_seq,
input_lengths=olens[bidx : bidx + 1],
target_lengths=ilens[bidx : bidx + 1],
zero_infinity=True,
)
loss = loss / B
return loss
def _generate_prior(self, text_lengths, feats_lengths, w=1) -> torch.Tensor:
"""Generate alignment prior formulated as beta-binomial distribution
Args:
text_lengths (Tensor): Batch of the lengths of each input (B,).
feats_lengths (Tensor): Batch of the lengths of each target (B,).
w (float): Scaling factor; lower -> wider the width.
Returns:
Tensor: Batched 2d static prior matrix (B, T_feats, T_text).
"""
B = len(text_lengths)
T_text = text_lengths.max()
T_feats = feats_lengths.max()
bb_prior = torch.full((B, T_feats, T_text), fill_value=-np.inf)
for bidx in range(B):
T = feats_lengths[bidx].item()
N = text_lengths[bidx].item()
key = str(T) + "," + str(N)
if self.cache_prior and key in self._cache:
prob = self._cache[key]
else:
alpha = w * np.arange(1, T + 1, dtype=float) # (T,)
beta = w * np.array([T - t + 1 for t in alpha])
k = np.arange(N)
batched_k = k[..., None] # (N,1)
prob = betabinom.logpmf(batched_k, N, alpha, beta) # (N,T)
# store cache
if self.cache_prior and key not in self._cache:
self._cache[key] = prob
prob = torch.from_numpy(prob).transpose(0, 1) # -> (T,N)
bb_prior[bidx, :T, :N] = prob
return bb_prior
| 7,779 | 35.525822 | 88 | py |
espnet | espnet-master/espnet2/gan_tts/jets/jets.py | # Copyright 2022 Dan Lim
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""JETS module for GAN-TTS task."""
from typing import Any, Dict, Optional
import torch
from typeguard import check_argument_types
from espnet2.gan_tts.abs_gan_tts import AbsGANTTS
from espnet2.gan_tts.hifigan import (
HiFiGANMultiPeriodDiscriminator,
HiFiGANMultiScaleDiscriminator,
HiFiGANMultiScaleMultiPeriodDiscriminator,
HiFiGANPeriodDiscriminator,
HiFiGANScaleDiscriminator,
)
from espnet2.gan_tts.hifigan.loss import (
DiscriminatorAdversarialLoss,
FeatureMatchLoss,
GeneratorAdversarialLoss,
MelSpectrogramLoss,
)
from espnet2.gan_tts.jets.generator import JETSGenerator
from espnet2.gan_tts.jets.loss import ForwardSumLoss, VarianceLoss
from espnet2.gan_tts.utils import get_segments
from espnet2.torch_utils.device_funcs import force_gatherable
AVAILABLE_GENERATERS = {
"jets_generator": JETSGenerator,
}
AVAILABLE_DISCRIMINATORS = {
"hifigan_period_discriminator": HiFiGANPeriodDiscriminator,
"hifigan_scale_discriminator": HiFiGANScaleDiscriminator,
"hifigan_multi_period_discriminator": HiFiGANMultiPeriodDiscriminator,
"hifigan_multi_scale_discriminator": HiFiGANMultiScaleDiscriminator,
"hifigan_multi_scale_multi_period_discriminator": HiFiGANMultiScaleMultiPeriodDiscriminator, # NOQA
}
class JETS(AbsGANTTS):
"""JETS module (generator + discriminator).
This is a module of JETS described in `JETS: Jointly Training FastSpeech2
and HiFi-GAN for End to End Text to Speech'_.
.. _`JETS: Jointly Training FastSpeech2 and HiFi-GAN for End to End Text to Speech`
: https://arxiv.org/abs/2203.16852
"""
def __init__(
self,
# generator related
idim: int,
odim: int,
sampling_rate: int = 22050,
generator_type: str = "jets_generator",
generator_params: Dict[str, Any] = {
"adim": 256,
"aheads": 2,
"elayers": 4,
"eunits": 1024,
"dlayers": 4,
"dunits": 1024,
"positionwise_layer_type": "conv1d",
"positionwise_conv_kernel_size": 1,
"use_scaled_pos_enc": True,
"use_batch_norm": True,
"encoder_normalize_before": True,
"decoder_normalize_before": True,
"encoder_concat_after": False,
"decoder_concat_after": False,
"reduction_factor": 1,
"encoder_type": "transformer",
"decoder_type": "transformer",
"transformer_enc_dropout_rate": 0.1,
"transformer_enc_positional_dropout_rate": 0.1,
"transformer_enc_attn_dropout_rate": 0.1,
"transformer_dec_dropout_rate": 0.1,
"transformer_dec_positional_dropout_rate": 0.1,
"transformer_dec_attn_dropout_rate": 0.1,
"conformer_rel_pos_type": "latest",
"conformer_pos_enc_layer_type": "rel_pos",
"conformer_self_attn_layer_type": "rel_selfattn",
"conformer_activation_type": "swish",
"use_macaron_style_in_conformer": True,
"use_cnn_in_conformer": True,
"zero_triu": False,
"conformer_enc_kernel_size": 7,
"conformer_dec_kernel_size": 31,
"duration_predictor_layers": 2,
"duration_predictor_chans": 384,
"duration_predictor_kernel_size": 3,
"duration_predictor_dropout_rate": 0.1,
"energy_predictor_layers": 2,
"energy_predictor_chans": 384,
"energy_predictor_kernel_size": 3,
"energy_predictor_dropout": 0.5,
"energy_embed_kernel_size": 1,
"energy_embed_dropout": 0.5,
"stop_gradient_from_energy_predictor": False,
"pitch_predictor_layers": 5,
"pitch_predictor_chans": 384,
"pitch_predictor_kernel_size": 5,
"pitch_predictor_dropout": 0.5,
"pitch_embed_kernel_size": 1,
"pitch_embed_dropout": 0.5,
"stop_gradient_from_pitch_predictor": True,
"generator_out_channels": 1,
"generator_channels": 512,
"generator_global_channels": -1,
"generator_kernel_size": 7,
"generator_upsample_scales": [8, 8, 2, 2],
"generator_upsample_kernel_sizes": [16, 16, 4, 4],
"generator_resblock_kernel_sizes": [3, 7, 11],
"generator_resblock_dilations": [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
"generator_use_additional_convs": True,
"generator_bias": True,
"generator_nonlinear_activation": "LeakyReLU",
"generator_nonlinear_activation_params": {"negative_slope": 0.1},
"generator_use_weight_norm": True,
"segment_size": 64,
"spks": -1,
"langs": -1,
"spk_embed_dim": None,
"spk_embed_integration_type": "add",
"use_gst": False,
"gst_tokens": 10,
"gst_heads": 4,
"gst_conv_layers": 6,
"gst_conv_chans_list": [32, 32, 64, 64, 128, 128],
"gst_conv_kernel_size": 3,
"gst_conv_stride": 2,
"gst_gru_layers": 1,
"gst_gru_units": 128,
"init_type": "xavier_uniform",
"init_enc_alpha": 1.0,
"init_dec_alpha": 1.0,
"use_masking": False,
"use_weighted_masking": False,
},
# discriminator related
discriminator_type: str = "hifigan_multi_scale_multi_period_discriminator",
discriminator_params: Dict[str, Any] = {
"scales": 1,
"scale_downsample_pooling": "AvgPool1d",
"scale_downsample_pooling_params": {
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
"scale_discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 128,
"max_downsample_channels": 1024,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 2, 4, 4, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
"follow_official_norm": False,
"periods": [2, 3, 5, 7, 11],
"period_discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 32,
"downsample_scales": [3, 3, 3, 3, 1],
"max_downsample_channels": 1024,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
},
# loss related
generator_adv_loss_params: Dict[str, Any] = {
"average_by_discriminators": False,
"loss_type": "mse",
},
discriminator_adv_loss_params: Dict[str, Any] = {
"average_by_discriminators": False,
"loss_type": "mse",
},
feat_match_loss_params: Dict[str, Any] = {
"average_by_discriminators": False,
"average_by_layers": False,
"include_final_outputs": True,
},
mel_loss_params: Dict[str, Any] = {
"fs": 22050,
"n_fft": 1024,
"hop_length": 256,
"win_length": None,
"window": "hann",
"n_mels": 80,
"fmin": 0,
"fmax": None,
"log_base": None,
},
lambda_adv: float = 1.0,
lambda_mel: float = 45.0,
lambda_feat_match: float = 2.0,
lambda_var: float = 1.0,
lambda_align: float = 2.0,
cache_generator_outputs: bool = True,
):
"""Initialize JETS module.
Args:
idim (int): Input vocabrary size.
odim (int): Acoustic feature dimension. The actual output channels will
be 1 since JETS is the end-to-end text-to-wave model but for the
compatibility odim is used to indicate the acoustic feature dimension.
sampling_rate (int): Sampling rate, not used for the training but it will
be referred in saving waveform during the inference.
generator_type (str): Generator type.
generator_params (Dict[str, Any]): Parameter dict for generator.
discriminator_type (str): Discriminator type.
discriminator_params (Dict[str, Any]): Parameter dict for discriminator.
generator_adv_loss_params (Dict[str, Any]): Parameter dict for generator
adversarial loss.
discriminator_adv_loss_params (Dict[str, Any]): Parameter dict for
discriminator adversarial loss.
feat_match_loss_params (Dict[str, Any]): Parameter dict for feat match loss.
mel_loss_params (Dict[str, Any]): Parameter dict for mel loss.
lambda_adv (float): Loss scaling coefficient for adversarial loss.
lambda_mel (float): Loss scaling coefficient for mel spectrogram loss.
lambda_feat_match (float): Loss scaling coefficient for feat match loss.
lambda_var (float): Loss scaling coefficient for variance loss.
lambda_align (float): Loss scaling coefficient for alignment loss.
cache_generator_outputs (bool): Whether to cache generator outputs.
"""
assert check_argument_types()
super().__init__()
# define modules
generator_class = AVAILABLE_GENERATERS[generator_type]
generator_params.update(idim=idim, odim=odim)
self.generator = generator_class(
**generator_params,
)
discriminator_class = AVAILABLE_DISCRIMINATORS[discriminator_type]
self.discriminator = discriminator_class(
**discriminator_params,
)
self.generator_adv_loss = GeneratorAdversarialLoss(
**generator_adv_loss_params,
)
self.discriminator_adv_loss = DiscriminatorAdversarialLoss(
**discriminator_adv_loss_params,
)
self.feat_match_loss = FeatureMatchLoss(
**feat_match_loss_params,
)
self.mel_loss = MelSpectrogramLoss(
**mel_loss_params,
)
self.var_loss = VarianceLoss()
self.forwardsum_loss = ForwardSumLoss()
# coefficients
self.lambda_adv = lambda_adv
self.lambda_mel = lambda_mel
self.lambda_feat_match = lambda_feat_match
self.lambda_var = lambda_var
self.lambda_align = lambda_align
# cache
self.cache_generator_outputs = cache_generator_outputs
self._cache = None
# store sampling rate for saving wav file
# (not used for the training)
self.fs = sampling_rate
# store parameters for test compatibility
self.spks = self.generator.spks
self.langs = self.generator.langs
self.spk_embed_dim = self.generator.spk_embed_dim
self.use_gst = getattr(self.generator, "use_gst", False)
@property
def require_raw_speech(self):
"""Return whether or not speech is required."""
return True
@property
def require_vocoder(self):
"""Return whether or not vocoder is required."""
return False
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: torch.Tensor,
feats_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
sids: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
forward_generator: bool = True,
**kwargs,
) -> Dict[str, Any]:
"""Perform generator forward.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
feats (Tensor): Feature tensor (B, T_feats, aux_channels).
feats_lengths (Tensor): Feature length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B,).
sids (Optional[Tensor]): Speaker index tensor (B,) or (B, 1).
spembs (Optional[Tensor]): Speaker embedding tensor (B, spk_embed_dim).
lids (Optional[Tensor]): Language index tensor (B,) or (B, 1).
forward_generator (bool): Whether to forward generator.
Returns:
Dict[str, Any]:
- loss (Tensor): Loss scalar tensor.
- stats (Dict[str, float]): Statistics to be monitored.
- weight (Tensor): Weight tensor to summarize losses.
- optim_idx (int): Optimizer index (0 for G and 1 for D).
"""
if forward_generator:
return self._forward_generator(
text=text,
text_lengths=text_lengths,
feats=feats,
feats_lengths=feats_lengths,
speech=speech,
speech_lengths=speech_lengths,
sids=sids,
spembs=spembs,
lids=lids,
**kwargs,
)
else:
return self._forward_discrminator(
text=text,
text_lengths=text_lengths,
feats=feats,
feats_lengths=feats_lengths,
speech=speech,
speech_lengths=speech_lengths,
sids=sids,
spembs=spembs,
lids=lids,
**kwargs,
)
def _forward_generator(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: torch.Tensor,
feats_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
sids: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
**kwargs,
) -> Dict[str, Any]:
"""Perform generator forward.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
feats (Tensor): Feature tensor (B, T_feats, aux_channels).
feats_lengths (Tensor): Feature length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B,).
sids (Optional[Tensor]): Speaker index tensor (B,) or (B, 1).
spembs (Optional[Tensor]): Speaker embedding tensor (B, spk_embed_dim).
lids (Optional[Tensor]): Language index tensor (B,) or (B, 1).
Returns:
Dict[str, Any]:
* loss (Tensor): Loss scalar tensor.
* stats (Dict[str, float]): Statistics to be monitored.
* weight (Tensor): Weight tensor to summarize losses.
* optim_idx (int): Optimizer index (0 for G and 1 for D).
"""
# setup
batch_size = text.size(0)
speech = speech.unsqueeze(1)
# calculate generator outputs
reuse_cache = True
if not self.cache_generator_outputs or self._cache is None:
reuse_cache = False
outs = self.generator(
text=text,
text_lengths=text_lengths,
feats=feats,
feats_lengths=feats_lengths,
sids=sids,
spembs=spembs,
lids=lids,
**kwargs,
)
else:
outs = self._cache
# store cache
if self.training and self.cache_generator_outputs and not reuse_cache:
self._cache = outs
# parse outputs
(
speech_hat_,
bin_loss,
log_p_attn,
start_idxs,
d_outs,
ds,
p_outs,
ps,
e_outs,
es,
) = outs
speech_ = get_segments(
x=speech,
start_idxs=start_idxs * self.generator.upsample_factor,
segment_size=self.generator.segment_size * self.generator.upsample_factor,
)
# calculate discriminator outputs
p_hat = self.discriminator(speech_hat_)
with torch.no_grad():
# do not store discriminator gradient in generator turn
p = self.discriminator(speech_)
# calculate losses
mel_loss = self.mel_loss(speech_hat_, speech_)
adv_loss = self.generator_adv_loss(p_hat)
feat_match_loss = self.feat_match_loss(p_hat, p)
dur_loss, pitch_loss, energy_loss = self.var_loss(
d_outs, ds, p_outs, ps, e_outs, es, text_lengths
)
forwardsum_loss = self.forwardsum_loss(log_p_attn, text_lengths, feats_lengths)
mel_loss = mel_loss * self.lambda_mel
adv_loss = adv_loss * self.lambda_adv
feat_match_loss = feat_match_loss * self.lambda_feat_match
g_loss = mel_loss + adv_loss + feat_match_loss
var_loss = (dur_loss + pitch_loss + energy_loss) * self.lambda_var
align_loss = (forwardsum_loss + bin_loss) * self.lambda_align
loss = g_loss + var_loss + align_loss
stats = dict(
generator_loss=loss.item(),
generator_g_loss=g_loss.item(),
generator_var_loss=var_loss.item(),
generator_align_loss=align_loss.item(),
generator_g_mel_loss=mel_loss.item(),
generator_g_adv_loss=adv_loss.item(),
generator_g_feat_match_loss=feat_match_loss.item(),
generator_var_dur_loss=dur_loss.item(),
generator_var_pitch_loss=pitch_loss.item(),
generator_var_energy_loss=energy_loss.item(),
generator_align_forwardsum_loss=forwardsum_loss.item(),
generator_align_bin_loss=bin_loss.item(),
)
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
# reset cache
if reuse_cache or not self.training:
self._cache = None
return {
"loss": loss,
"stats": stats,
"weight": weight,
"optim_idx": 0, # needed for trainer
}
def _forward_discrminator(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: torch.Tensor,
feats_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
sids: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
**kwargs,
) -> Dict[str, Any]:
"""Perform discriminator forward.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
feats (Tensor): Feature tensor (B, T_feats, aux_channels).
feats_lengths (Tensor): Feature length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B,).
sids (Optional[Tensor]): Speaker index tensor (B,) or (B, 1).
spembs (Optional[Tensor]): Speaker embedding tensor (B, spk_embed_dim).
lids (Optional[Tensor]): Language index tensor (B,) or (B, 1).
Returns:
Dict[str, Any]:
* loss (Tensor): Loss scalar tensor.
* stats (Dict[str, float]): Statistics to be monitored.
* weight (Tensor): Weight tensor to summarize losses.
* optim_idx (int): Optimizer index (0 for G and 1 for D).
"""
# setup
batch_size = text.size(0)
speech = speech.unsqueeze(1)
# calculate generator outputs
reuse_cache = True
if not self.cache_generator_outputs or self._cache is None:
reuse_cache = False
outs = self.generator(
text=text,
text_lengths=text_lengths,
feats=feats,
feats_lengths=feats_lengths,
sids=sids,
spembs=spembs,
lids=lids,
**kwargs,
)
else:
outs = self._cache
# store cache
if self.cache_generator_outputs and not reuse_cache:
self._cache = outs
# parse outputs
speech_hat_, _, _, start_idxs, *_ = outs
speech_ = get_segments(
x=speech,
start_idxs=start_idxs * self.generator.upsample_factor,
segment_size=self.generator.segment_size * self.generator.upsample_factor,
)
# calculate discriminator outputs
p_hat = self.discriminator(speech_hat_.detach())
p = self.discriminator(speech_)
# calculate losses
real_loss, fake_loss = self.discriminator_adv_loss(p_hat, p)
loss = real_loss + fake_loss
stats = dict(
discriminator_loss=loss.item(),
discriminator_real_loss=real_loss.item(),
discriminator_fake_loss=fake_loss.item(),
)
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
# reset cache
if reuse_cache or not self.training:
self._cache = None
return {
"loss": loss,
"stats": stats,
"weight": weight,
"optim_idx": 1, # needed for trainer
}
def inference(
self,
text: torch.Tensor,
feats: Optional[torch.Tensor] = None,
pitch: Optional[torch.Tensor] = None,
energy: Optional[torch.Tensor] = None,
use_teacher_forcing: bool = False,
**kwargs,
) -> Dict[str, torch.Tensor]:
"""Run inference.
Args:
text (Tensor): Input text index tensor (T_text,).
feats (Tensor): Feature tensor (T_feats, aux_channels).
pitch (Tensor): Pitch tensor (T_feats, 1).
energy (Tensor): Energy tensor (T_feats, 1).
use_teacher_forcing (bool): Whether to use teacher forcing.
Returns:
Dict[str, Tensor]:
* wav (Tensor): Generated waveform tensor (T_wav,).
* duration (Tensor): Predicted duration tensor (T_text,).
"""
# setup
text = text[None]
text_lengths = torch.tensor(
[text.size(1)],
dtype=torch.long,
device=text.device,
)
if "spembs" in kwargs:
kwargs["spembs"] = kwargs["spembs"][None]
if self.use_gst and "speech" in kwargs:
# NOTE(kan-bayashi): Workaround for the use of GST
kwargs.pop("speech")
# inference
if use_teacher_forcing:
assert feats is not None
feats = feats[None]
feats_lengths = torch.tensor(
[feats.size(1)],
dtype=torch.long,
device=feats.device,
)
assert pitch is not None
pitch = pitch[None]
assert energy is not None
energy = energy[None]
wav, dur = self.generator.inference(
text=text,
text_lengths=text_lengths,
feats=feats,
feats_lengths=feats_lengths,
pitch=pitch,
energy=energy,
use_teacher_forcing=use_teacher_forcing,
**kwargs,
)
else:
wav, dur = self.generator.inference(
text=text,
text_lengths=text_lengths,
feats=feats[None] if self.use_gst else None,
**kwargs,
)
return dict(wav=wav.view(-1), duration=dur[0])
| 24,388 | 36.121766 | 104 | py |
espnet | espnet-master/espnet2/gan_tts/jets/generator.py | # Copyright 2022 Dan Lim
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Generator module in JETS."""
import logging
from typing import Any, Dict, List, Optional, Sequence, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from espnet2.gan_tts.hifigan import HiFiGANGenerator
from espnet2.gan_tts.jets.alignments import (
AlignmentModule,
average_by_duration,
viterbi_decode,
)
from espnet2.gan_tts.jets.length_regulator import GaussianUpsampling
from espnet2.gan_tts.utils import get_random_segments
from espnet2.torch_utils.initialize import initialize
from espnet2.tts.fastspeech2.variance_predictor import VariancePredictor
from espnet2.tts.gst.style_encoder import StyleEncoder
from espnet.nets.pytorch_backend.conformer.encoder import Encoder as ConformerEncoder
from espnet.nets.pytorch_backend.fastspeech.duration_predictor import DurationPredictor
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask, make_pad_mask
from espnet.nets.pytorch_backend.transformer.embedding import (
PositionalEncoding,
ScaledPositionalEncoding,
)
from espnet.nets.pytorch_backend.transformer.encoder import (
Encoder as TransformerEncoder,
)
class JETSGenerator(torch.nn.Module):
"""Generator module in JETS."""
def __init__(
self,
idim: int,
odim: int,
adim: int = 256,
aheads: int = 2,
elayers: int = 4,
eunits: int = 1024,
dlayers: int = 4,
dunits: int = 1024,
positionwise_layer_type: str = "conv1d",
positionwise_conv_kernel_size: int = 1,
use_scaled_pos_enc: bool = True,
use_batch_norm: bool = True,
encoder_normalize_before: bool = True,
decoder_normalize_before: bool = True,
encoder_concat_after: bool = False,
decoder_concat_after: bool = False,
reduction_factor: int = 1,
encoder_type: str = "transformer",
decoder_type: str = "transformer",
transformer_enc_dropout_rate: float = 0.1,
transformer_enc_positional_dropout_rate: float = 0.1,
transformer_enc_attn_dropout_rate: float = 0.1,
transformer_dec_dropout_rate: float = 0.1,
transformer_dec_positional_dropout_rate: float = 0.1,
transformer_dec_attn_dropout_rate: float = 0.1,
# only for conformer
conformer_rel_pos_type: str = "legacy",
conformer_pos_enc_layer_type: str = "rel_pos",
conformer_self_attn_layer_type: str = "rel_selfattn",
conformer_activation_type: str = "swish",
use_macaron_style_in_conformer: bool = True,
use_cnn_in_conformer: bool = True,
zero_triu: bool = False,
conformer_enc_kernel_size: int = 7,
conformer_dec_kernel_size: int = 31,
# duration predictor
duration_predictor_layers: int = 2,
duration_predictor_chans: int = 384,
duration_predictor_kernel_size: int = 3,
duration_predictor_dropout_rate: float = 0.1,
# energy predictor
energy_predictor_layers: int = 2,
energy_predictor_chans: int = 384,
energy_predictor_kernel_size: int = 3,
energy_predictor_dropout: float = 0.5,
energy_embed_kernel_size: int = 9,
energy_embed_dropout: float = 0.5,
stop_gradient_from_energy_predictor: bool = False,
# pitch predictor
pitch_predictor_layers: int = 2,
pitch_predictor_chans: int = 384,
pitch_predictor_kernel_size: int = 3,
pitch_predictor_dropout: float = 0.5,
pitch_embed_kernel_size: int = 9,
pitch_embed_dropout: float = 0.5,
stop_gradient_from_pitch_predictor: bool = False,
# extra embedding related
spks: Optional[int] = None,
langs: Optional[int] = None,
spk_embed_dim: Optional[int] = None,
spk_embed_integration_type: str = "add",
use_gst: bool = False,
gst_tokens: int = 10,
gst_heads: int = 4,
gst_conv_layers: int = 6,
gst_conv_chans_list: Sequence[int] = (32, 32, 64, 64, 128, 128),
gst_conv_kernel_size: int = 3,
gst_conv_stride: int = 2,
gst_gru_layers: int = 1,
gst_gru_units: int = 128,
# training related
init_type: str = "xavier_uniform",
init_enc_alpha: float = 1.0,
init_dec_alpha: float = 1.0,
use_masking: bool = False,
use_weighted_masking: bool = False,
segment_size: int = 64,
# hifigan generator
generator_out_channels: int = 1,
generator_channels: int = 512,
generator_global_channels: int = -1,
generator_kernel_size: int = 7,
generator_upsample_scales: List[int] = [8, 8, 2, 2],
generator_upsample_kernel_sizes: List[int] = [16, 16, 4, 4],
generator_resblock_kernel_sizes: List[int] = [3, 7, 11],
generator_resblock_dilations: List[List[int]] = [
[1, 3, 5],
[1, 3, 5],
[1, 3, 5],
],
generator_use_additional_convs: bool = True,
generator_bias: bool = True,
generator_nonlinear_activation: str = "LeakyReLU",
generator_nonlinear_activation_params: Dict[str, Any] = {"negative_slope": 0.1},
generator_use_weight_norm: bool = True,
):
"""Initialize JETS generator module.
Args:
idim (int): Dimension of the inputs.
odim (int): Dimension of the outputs.
elayers (int): Number of encoder layers.
eunits (int): Number of encoder hidden units.
dlayers (int): Number of decoder layers.
dunits (int): Number of decoder hidden units.
use_scaled_pos_enc (bool): Whether to use trainable scaled pos encoding.
use_batch_norm (bool): Whether to use batch normalization in encoder prenet.
encoder_normalize_before (bool): Whether to apply layernorm layer before
encoder block.
decoder_normalize_before (bool): Whether to apply layernorm layer before
decoder block.
encoder_concat_after (bool): Whether to concatenate attention layer's input
and output in encoder.
decoder_concat_after (bool): Whether to concatenate attention layer's input
and output in decoder.
reduction_factor (int): Reduction factor.
encoder_type (str): Encoder type ("transformer" or "conformer").
decoder_type (str): Decoder type ("transformer" or "conformer").
transformer_enc_dropout_rate (float): Dropout rate in encoder except
attention and positional encoding.
transformer_enc_positional_dropout_rate (float): Dropout rate after encoder
positional encoding.
transformer_enc_attn_dropout_rate (float): Dropout rate in encoder
self-attention module.
transformer_dec_dropout_rate (float): Dropout rate in decoder except
attention & positional encoding.
transformer_dec_positional_dropout_rate (float): Dropout rate after decoder
positional encoding.
transformer_dec_attn_dropout_rate (float): Dropout rate in decoder
self-attention module.
conformer_rel_pos_type (str): Relative pos encoding type in conformer.
conformer_pos_enc_layer_type (str): Pos encoding layer type in conformer.
conformer_self_attn_layer_type (str): Self-attention layer type in conformer
conformer_activation_type (str): Activation function type in conformer.
use_macaron_style_in_conformer: Whether to use macaron style FFN.
use_cnn_in_conformer: Whether to use CNN in conformer.
zero_triu: Whether to use zero triu in relative self-attention module.
conformer_enc_kernel_size: Kernel size of encoder conformer.
conformer_dec_kernel_size: Kernel size of decoder conformer.
duration_predictor_layers (int): Number of duration predictor layers.
duration_predictor_chans (int): Number of duration predictor channels.
duration_predictor_kernel_size (int): Kernel size of duration predictor.
duration_predictor_dropout_rate (float): Dropout rate in duration predictor.
pitch_predictor_layers (int): Number of pitch predictor layers.
pitch_predictor_chans (int): Number of pitch predictor channels.
pitch_predictor_kernel_size (int): Kernel size of pitch predictor.
pitch_predictor_dropout_rate (float): Dropout rate in pitch predictor.
pitch_embed_kernel_size (float): Kernel size of pitch embedding.
pitch_embed_dropout_rate (float): Dropout rate for pitch embedding.
stop_gradient_from_pitch_predictor: Whether to stop gradient from pitch
predictor to encoder.
energy_predictor_layers (int): Number of energy predictor layers.
energy_predictor_chans (int): Number of energy predictor channels.
energy_predictor_kernel_size (int): Kernel size of energy predictor.
energy_predictor_dropout_rate (float): Dropout rate in energy predictor.
energy_embed_kernel_size (float): Kernel size of energy embedding.
energy_embed_dropout_rate (float): Dropout rate for energy embedding.
stop_gradient_from_energy_predictor: Whether to stop gradient from energy
predictor to encoder.
spks (Optional[int]): Number of speakers. If set to > 1, assume that the
sids will be provided as the input and use sid embedding layer.
langs (Optional[int]): Number of languages. If set to > 1, assume that the
lids will be provided as the input and use sid embedding layer.
spk_embed_dim (Optional[int]): Speaker embedding dimension. If set to > 0,
assume that spembs will be provided as the input.
spk_embed_integration_type: How to integrate speaker embedding.
use_gst (str): Whether to use global style token.
gst_tokens (int): The number of GST embeddings.
gst_heads (int): The number of heads in GST multihead attention.
gst_conv_layers (int): The number of conv layers in GST.
gst_conv_chans_list: (Sequence[int]):
List of the number of channels of conv layers in GST.
gst_conv_kernel_size (int): Kernel size of conv layers in GST.
gst_conv_stride (int): Stride size of conv layers in GST.
gst_gru_layers (int): The number of GRU layers in GST.
gst_gru_units (int): The number of GRU units in GST.
init_type (str): How to initialize transformer parameters.
init_enc_alpha (float): Initial value of alpha in scaled pos encoding of the
encoder.
init_dec_alpha (float): Initial value of alpha in scaled pos encoding of the
decoder.
use_masking (bool): Whether to apply masking for padded part in loss
calculation.
use_weighted_masking (bool): Whether to apply weighted masking in loss
calculation.
segment_size (int): Segment size for random windowed discriminator
generator_out_channels (int): Number of output channels.
generator_channels (int): Number of hidden representation channels.
generator_global_channels (int): Number of global conditioning channels.
generator_kernel_size (int): Kernel size of initial and final conv layer.
generator_upsample_scales (List[int]): List of upsampling scales.
generator_upsample_kernel_sizes (List[int]): List of kernel sizes for
upsample layers.
generator_resblock_kernel_sizes (List[int]): List of kernel sizes for
residual blocks.
generator_resblock_dilations (List[List[int]]): List of list of dilations
for residual blocks.
generator_use_additional_convs (bool): Whether to use additional conv layers
in residual blocks.
generator_bias (bool): Whether to add bias parameter in convolution layers.
generator_nonlinear_activation (str): Activation function module name.
generator_nonlinear_activation_params (Dict[str, Any]): Hyperparameters for
activation function.
generator_use_weight_norm (bool): Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
"""
super().__init__()
self.segment_size = segment_size
self.upsample_factor = int(np.prod(generator_upsample_scales))
self.idim = idim
self.odim = odim
self.reduction_factor = reduction_factor
self.encoder_type = encoder_type
self.decoder_type = decoder_type
self.stop_gradient_from_pitch_predictor = stop_gradient_from_pitch_predictor
self.stop_gradient_from_energy_predictor = stop_gradient_from_energy_predictor
self.use_scaled_pos_enc = use_scaled_pos_enc
self.use_gst = use_gst
# use idx 0 as padding idx
self.padding_idx = 0
# get positional encoding class
pos_enc_class = (
ScaledPositionalEncoding if self.use_scaled_pos_enc else PositionalEncoding
)
# check relative positional encoding compatibility
if "conformer" in [encoder_type, decoder_type]:
if conformer_rel_pos_type == "legacy":
if conformer_pos_enc_layer_type == "rel_pos":
conformer_pos_enc_layer_type = "legacy_rel_pos"
logging.warning(
"Fallback to conformer_pos_enc_layer_type = 'legacy_rel_pos' "
"due to the compatibility. If you want to use the new one, "
"please use conformer_pos_enc_layer_type = 'latest'."
)
if conformer_self_attn_layer_type == "rel_selfattn":
conformer_self_attn_layer_type = "legacy_rel_selfattn"
logging.warning(
"Fallback to "
"conformer_self_attn_layer_type = 'legacy_rel_selfattn' "
"due to the compatibility. If you want to use the new one, "
"please use conformer_pos_enc_layer_type = 'latest'."
)
elif conformer_rel_pos_type == "latest":
assert conformer_pos_enc_layer_type != "legacy_rel_pos"
assert conformer_self_attn_layer_type != "legacy_rel_selfattn"
else:
raise ValueError(f"Unknown rel_pos_type: {conformer_rel_pos_type}")
# define encoder
encoder_input_layer = torch.nn.Embedding(
num_embeddings=idim, embedding_dim=adim, padding_idx=self.padding_idx
)
if encoder_type == "transformer":
self.encoder = TransformerEncoder(
idim=idim,
attention_dim=adim,
attention_heads=aheads,
linear_units=eunits,
num_blocks=elayers,
input_layer=encoder_input_layer,
dropout_rate=transformer_enc_dropout_rate,
positional_dropout_rate=transformer_enc_positional_dropout_rate,
attention_dropout_rate=transformer_enc_attn_dropout_rate,
pos_enc_class=pos_enc_class,
normalize_before=encoder_normalize_before,
concat_after=encoder_concat_after,
positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size,
)
elif encoder_type == "conformer":
self.encoder = ConformerEncoder(
idim=idim,
attention_dim=adim,
attention_heads=aheads,
linear_units=eunits,
num_blocks=elayers,
input_layer=encoder_input_layer,
dropout_rate=transformer_enc_dropout_rate,
positional_dropout_rate=transformer_enc_positional_dropout_rate,
attention_dropout_rate=transformer_enc_attn_dropout_rate,
normalize_before=encoder_normalize_before,
concat_after=encoder_concat_after,
positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size,
macaron_style=use_macaron_style_in_conformer,
pos_enc_layer_type=conformer_pos_enc_layer_type,
selfattention_layer_type=conformer_self_attn_layer_type,
activation_type=conformer_activation_type,
use_cnn_module=use_cnn_in_conformer,
cnn_module_kernel=conformer_enc_kernel_size,
zero_triu=zero_triu,
)
else:
raise ValueError(f"{encoder_type} is not supported.")
# define GST
if self.use_gst:
self.gst = StyleEncoder(
idim=odim, # the input is mel-spectrogram
gst_tokens=gst_tokens,
gst_token_dim=adim,
gst_heads=gst_heads,
conv_layers=gst_conv_layers,
conv_chans_list=gst_conv_chans_list,
conv_kernel_size=gst_conv_kernel_size,
conv_stride=gst_conv_stride,
gru_layers=gst_gru_layers,
gru_units=gst_gru_units,
)
# define spk and lang embedding
self.spks = None
if spks is not None and spks > 1:
self.spks = spks
self.sid_emb = torch.nn.Embedding(spks, adim)
self.langs = None
if langs is not None and langs > 1:
self.langs = langs
self.lid_emb = torch.nn.Embedding(langs, adim)
# define additional projection for speaker embedding
self.spk_embed_dim = None
if spk_embed_dim is not None and spk_embed_dim > 0:
self.spk_embed_dim = spk_embed_dim
self.spk_embed_integration_type = spk_embed_integration_type
if self.spk_embed_dim is not None:
if self.spk_embed_integration_type == "add":
self.projection = torch.nn.Linear(self.spk_embed_dim, adim)
else:
self.projection = torch.nn.Linear(adim + self.spk_embed_dim, adim)
# define duration predictor
self.duration_predictor = DurationPredictor(
idim=adim,
n_layers=duration_predictor_layers,
n_chans=duration_predictor_chans,
kernel_size=duration_predictor_kernel_size,
dropout_rate=duration_predictor_dropout_rate,
)
# define pitch predictor
self.pitch_predictor = VariancePredictor(
idim=adim,
n_layers=pitch_predictor_layers,
n_chans=pitch_predictor_chans,
kernel_size=pitch_predictor_kernel_size,
dropout_rate=pitch_predictor_dropout,
)
# NOTE(kan-bayashi): We use continuous pitch + FastPitch style avg
self.pitch_embed = torch.nn.Sequential(
torch.nn.Conv1d(
in_channels=1,
out_channels=adim,
kernel_size=pitch_embed_kernel_size,
padding=(pitch_embed_kernel_size - 1) // 2,
),
torch.nn.Dropout(pitch_embed_dropout),
)
# define energy predictor
self.energy_predictor = VariancePredictor(
idim=adim,
n_layers=energy_predictor_layers,
n_chans=energy_predictor_chans,
kernel_size=energy_predictor_kernel_size,
dropout_rate=energy_predictor_dropout,
)
# NOTE(kan-bayashi): We use continuous enegy + FastPitch style avg
self.energy_embed = torch.nn.Sequential(
torch.nn.Conv1d(
in_channels=1,
out_channels=adim,
kernel_size=energy_embed_kernel_size,
padding=(energy_embed_kernel_size - 1) // 2,
),
torch.nn.Dropout(energy_embed_dropout),
)
# define AlignmentModule
self.alignment_module = AlignmentModule(adim, odim)
# define length regulator
self.length_regulator = GaussianUpsampling()
# define decoder
# NOTE: we use encoder as decoder
# because fastspeech's decoder is the same as encoder
if decoder_type == "transformer":
self.decoder = TransformerEncoder(
idim=0,
attention_dim=adim,
attention_heads=aheads,
linear_units=dunits,
num_blocks=dlayers,
input_layer=None,
dropout_rate=transformer_dec_dropout_rate,
positional_dropout_rate=transformer_dec_positional_dropout_rate,
attention_dropout_rate=transformer_dec_attn_dropout_rate,
pos_enc_class=pos_enc_class,
normalize_before=decoder_normalize_before,
concat_after=decoder_concat_after,
positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size,
)
elif decoder_type == "conformer":
self.decoder = ConformerEncoder(
idim=0,
attention_dim=adim,
attention_heads=aheads,
linear_units=dunits,
num_blocks=dlayers,
input_layer=None,
dropout_rate=transformer_dec_dropout_rate,
positional_dropout_rate=transformer_dec_positional_dropout_rate,
attention_dropout_rate=transformer_dec_attn_dropout_rate,
normalize_before=decoder_normalize_before,
concat_after=decoder_concat_after,
positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size,
macaron_style=use_macaron_style_in_conformer,
pos_enc_layer_type=conformer_pos_enc_layer_type,
selfattention_layer_type=conformer_self_attn_layer_type,
activation_type=conformer_activation_type,
use_cnn_module=use_cnn_in_conformer,
cnn_module_kernel=conformer_dec_kernel_size,
)
else:
raise ValueError(f"{decoder_type} is not supported.")
# define hifigan generator
self.generator = HiFiGANGenerator(
in_channels=adim,
out_channels=generator_out_channels,
channels=generator_channels,
global_channels=generator_global_channels,
kernel_size=generator_kernel_size,
upsample_scales=generator_upsample_scales,
upsample_kernel_sizes=generator_upsample_kernel_sizes,
resblock_kernel_sizes=generator_resblock_kernel_sizes,
resblock_dilations=generator_resblock_dilations,
use_additional_convs=generator_use_additional_convs,
bias=generator_bias,
nonlinear_activation=generator_nonlinear_activation,
nonlinear_activation_params=generator_nonlinear_activation_params,
use_weight_norm=generator_use_weight_norm,
)
# initialize parameters
self._reset_parameters(
init_type=init_type,
init_enc_alpha=init_enc_alpha,
init_dec_alpha=init_dec_alpha,
)
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: torch.Tensor,
feats_lengths: torch.Tensor,
pitch: torch.Tensor,
pitch_lengths: torch.Tensor,
energy: torch.Tensor,
energy_lengths: torch.Tensor,
sids: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
) -> Tuple[
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
]:
"""Calculate forward propagation.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
feats (Tensor): Feature tensor (B, T_feats, aux_channels).
feats_lengths (Tensor): Feature length tensor (B,).
pitch (Tensor): Batch of padded token-averaged pitch (B, T_text, 1).
pitch_lengths (LongTensor): Batch of pitch lengths (B, T_text).
energy (Tensor): Batch of padded token-averaged energy (B, T_text, 1).
energy_lengths (LongTensor): Batch of energy lengths (B, T_text).
sids (Optional[Tensor]): Speaker index tensor (B,) or (B, 1).
spembs (Optional[Tensor]): Speaker embedding tensor (B, spk_embed_dim).
lids (Optional[Tensor]): Language index tensor (B,) or (B, 1).
Returns:
Tensor: Waveform tensor (B, 1, segment_size * upsample_factor).
Tensor: Binarization loss ().
Tensor: Log probability attention matrix (B, T_feats, T_text).
Tensor: Segments start index tensor (B,).
Tensor: predicted duration (B, T_text).
Tensor: ground-truth duration obtained from an alignment module (B, T_text).
Tensor: predicted pitch (B, T_text,1).
Tensor: ground-truth averaged pitch (B, T_text, 1).
Tensor: predicted energy (B, T_text, 1).
Tensor: ground-truth averaged energy (B, T_text, 1).
"""
text = text[:, : text_lengths.max()] # for data-parallel
feats = feats[:, : feats_lengths.max()] # for data-parallel
pitch = pitch[:, : pitch_lengths.max()] # for data-parallel
energy = energy[:, : energy_lengths.max()] # for data-parallel
# forward encoder
x_masks = self._source_mask(text_lengths)
hs, _ = self.encoder(text, x_masks) # (B, T_text, adim)
# integrate with GST
if self.use_gst:
style_embs = self.gst(feats)
hs = hs + style_embs.unsqueeze(1)
# integrate with SID and LID embeddings
if self.spks is not None:
sid_embs = self.sid_emb(sids.view(-1))
hs = hs + sid_embs.unsqueeze(1)
if self.langs is not None:
lid_embs = self.lid_emb(lids.view(-1))
hs = hs + lid_embs.unsqueeze(1)
# integrate speaker embedding
if self.spk_embed_dim is not None:
hs = self._integrate_with_spk_embed(hs, spembs)
# forward alignment module and obtain duration, averaged pitch, energy
h_masks = make_pad_mask(text_lengths).to(hs.device)
log_p_attn = self.alignment_module(hs, feats, h_masks)
ds, bin_loss = viterbi_decode(log_p_attn, text_lengths, feats_lengths)
ps = average_by_duration(
ds, pitch.squeeze(-1), text_lengths, feats_lengths
).unsqueeze(-1)
es = average_by_duration(
ds, energy.squeeze(-1), text_lengths, feats_lengths
).unsqueeze(-1)
# forward duration predictor and variance predictors
if self.stop_gradient_from_pitch_predictor:
p_outs = self.pitch_predictor(hs.detach(), h_masks.unsqueeze(-1))
else:
p_outs = self.pitch_predictor(hs, h_masks.unsqueeze(-1))
if self.stop_gradient_from_energy_predictor:
e_outs = self.energy_predictor(hs.detach(), h_masks.unsqueeze(-1))
else:
e_outs = self.energy_predictor(hs, h_masks.unsqueeze(-1))
d_outs = self.duration_predictor(hs, h_masks)
# use groundtruth in training
p_embs = self.pitch_embed(ps.transpose(1, 2)).transpose(1, 2)
e_embs = self.energy_embed(es.transpose(1, 2)).transpose(1, 2)
hs = hs + e_embs + p_embs
# upsampling
h_masks = make_non_pad_mask(feats_lengths).to(hs.device)
d_masks = make_non_pad_mask(text_lengths).to(ds.device)
hs = self.length_regulator(hs, ds, h_masks, d_masks) # (B, T_feats, adim)
# forward decoder
h_masks = self._source_mask(feats_lengths)
zs, _ = self.decoder(hs, h_masks) # (B, T_feats, adim)
# get random segments
z_segments, z_start_idxs = get_random_segments(
zs.transpose(1, 2),
feats_lengths,
self.segment_size,
)
# forward generator
wav = self.generator(z_segments)
return (
wav,
bin_loss,
log_p_attn,
z_start_idxs,
d_outs,
ds,
p_outs,
ps,
e_outs,
es,
)
def inference(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: Optional[torch.Tensor] = None,
feats_lengths: Optional[torch.Tensor] = None,
pitch: Optional[torch.Tensor] = None,
energy: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
use_teacher_forcing: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Run inference.
Args:
text (Tensor): Input text index tensor (B, T_text,).
text_lengths (Tensor): Text length tensor (B,).
feats (Tensor): Feature tensor (B, T_feats, aux_channels).
feats_lengths (Tensor): Feature length tensor (B,).
pitch (Tensor): Pitch tensor (B, T_feats, 1)
energy (Tensor): Energy tensor (B, T_feats, 1)
sids (Optional[Tensor]): Speaker index tensor (B,) or (B, 1).
spembs (Optional[Tensor]): Speaker embedding tensor (B, spk_embed_dim).
lids (Optional[Tensor]): Language index tensor (B,) or (B, 1).
use_teacher_forcing (bool): Whether to use teacher forcing.
Returns:
Tensor: Generated waveform tensor (B, T_wav).
Tensor: Duration tensor (B, T_text).
"""
# forward encoder
x_masks = self._source_mask(text_lengths)
hs, _ = self.encoder(text, x_masks) # (B, T_text, adim)
# integrate with GST
if self.use_gst:
style_embs = self.gst(feats)
hs = hs + style_embs.unsqueeze(1)
# integrate with SID and LID embeddings
if self.spks is not None:
sid_embs = self.sid_emb(sids.view(-1))
hs = hs + sid_embs.unsqueeze(1)
if self.langs is not None:
lid_embs = self.lid_emb(lids.view(-1))
hs = hs + lid_embs.unsqueeze(1)
# integrate speaker embedding
if self.spk_embed_dim is not None:
hs = self._integrate_with_spk_embed(hs, spembs)
h_masks = make_pad_mask(text_lengths).to(hs.device)
if use_teacher_forcing:
# forward alignment module and obtain duration, averaged pitch, energy
log_p_attn = self.alignment_module(hs, feats, h_masks)
d_outs, _ = viterbi_decode(log_p_attn, text_lengths, feats_lengths)
p_outs = average_by_duration(
d_outs, pitch.squeeze(-1), text_lengths, feats_lengths
).unsqueeze(-1)
e_outs = average_by_duration(
d_outs, energy.squeeze(-1), text_lengths, feats_lengths
).unsqueeze(-1)
else:
# forward duration predictor and variance predictors
p_outs = self.pitch_predictor(hs, h_masks.unsqueeze(-1))
e_outs = self.energy_predictor(hs, h_masks.unsqueeze(-1))
d_outs = self.duration_predictor.inference(hs, h_masks)
p_embs = self.pitch_embed(p_outs.transpose(1, 2)).transpose(1, 2)
e_embs = self.energy_embed(e_outs.transpose(1, 2)).transpose(1, 2)
hs = hs + e_embs + p_embs
# upsampling
if feats_lengths is not None:
h_masks = make_non_pad_mask(feats_lengths).to(hs.device)
else:
h_masks = None
d_masks = make_non_pad_mask(text_lengths).to(d_outs.device)
hs = self.length_regulator(hs, d_outs, h_masks, d_masks) # (B, T_feats, adim)
# forward decoder
if feats_lengths is not None:
h_masks = self._source_mask(feats_lengths)
else:
h_masks = None
zs, _ = self.decoder(hs, h_masks) # (B, T_feats, adim)
# forward generator
wav = self.generator(zs.transpose(1, 2))
return wav.squeeze(1), d_outs
def _integrate_with_spk_embed(
self, hs: torch.Tensor, spembs: torch.Tensor
) -> torch.Tensor:
"""Integrate speaker embedding with hidden states.
Args:
hs (Tensor): Batch of hidden state sequences (B, T_text, adim).
spembs (Tensor): Batch of speaker embeddings (B, spk_embed_dim).
Returns:
Tensor: Batch of integrated hidden state sequences (B, T_text, adim).
"""
if self.spk_embed_integration_type == "add":
# apply projection and then add to hidden states
spembs = self.projection(F.normalize(spembs))
hs = hs + spembs.unsqueeze(1)
elif self.spk_embed_integration_type == "concat":
# concat hidden states with spk embeds and then apply projection
spembs = F.normalize(spembs).unsqueeze(1).expand(-1, hs.size(1), -1)
hs = self.projection(torch.cat([hs, spembs], dim=-1))
else:
raise NotImplementedError("support only add or concat.")
return hs
def _source_mask(self, ilens: torch.Tensor) -> torch.Tensor:
"""Make masks for self-attention.
Args:
ilens (LongTensor): Batch of lengths (B,).
Returns:
Tensor: Mask tensor for self-attention.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
>>> ilens = [5, 3]
>>> self._source_mask(ilens)
tensor([[[1, 1, 1, 1, 1],
[1, 1, 1, 0, 0]]], dtype=torch.uint8)
"""
x_masks = make_non_pad_mask(ilens).to(next(self.parameters()).device)
return x_masks.unsqueeze(-2)
def _reset_parameters(
self, init_type: str, init_enc_alpha: float, init_dec_alpha: float
):
# initialize parameters
if init_type != "pytorch":
initialize(self, init_type)
# initialize alpha in scaled positional encoding
if self.encoder_type == "transformer" and self.use_scaled_pos_enc:
self.encoder.embed[-1].alpha.data = torch.tensor(init_enc_alpha)
if self.decoder_type == "transformer" and self.use_scaled_pos_enc:
self.decoder.embed[-1].alpha.data = torch.tensor(init_dec_alpha)
| 35,386 | 43.850444 | 88 | py |
espnet | espnet-master/espnet2/gan_tts/jets/length_regulator.py | # Copyright 2022 Dan Lim
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import logging
import torch
class GaussianUpsampling(torch.nn.Module):
"""Gaussian upsampling with fixed temperature as in:
https://arxiv.org/abs/2010.04301
"""
def __init__(self, delta=0.1):
super().__init__()
self.delta = delta
def forward(self, hs, ds, h_masks=None, d_masks=None):
"""Upsample hidden states according to durations.
Args:
hs (Tensor): Batched hidden state to be expanded (B, T_text, adim).
ds (Tensor): Batched token duration (B, T_text).
h_masks (Tensor): Mask tensor (B, T_feats).
d_masks (Tensor): Mask tensor (B, T_text).
Returns:
Tensor: Expanded hidden state (B, T_feat, adim).
"""
B = ds.size(0)
device = ds.device
if ds.sum() == 0:
logging.warning(
"predicted durations includes all 0 sequences. "
"fill the first element with 1."
)
# NOTE(kan-bayashi): This case must not be happened in teacher forcing.
# It will be happened in inference with a bad duration predictor.
# So we do not need to care the padded sequence case here.
ds[ds.sum(dim=1).eq(0)] = 1
if h_masks is None:
T_feats = ds.sum().int()
else:
T_feats = h_masks.size(-1)
t = torch.arange(0, T_feats).unsqueeze(0).repeat(B, 1).to(device).float()
if h_masks is not None:
t = t * h_masks.float()
c = ds.cumsum(dim=-1) - ds / 2
energy = -1 * self.delta * (t.unsqueeze(-1) - c.unsqueeze(1)) ** 2
if d_masks is not None:
energy = energy.masked_fill(
~(d_masks.unsqueeze(1).repeat(1, T_feats, 1)), -float("inf")
)
p_attn = torch.softmax(energy, dim=2) # (B, T_feats, T_text)
hs = torch.matmul(p_attn, hs)
return hs
| 2,017 | 30.53125 | 83 | py |
espnet | espnet-master/espnet2/gan_tts/melgan/residual_stack.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Residual stack module in MelGAN.
This code is modified from https://github.com/kan-bayashi/ParallelWaveGAN.
"""
from typing import Any, Dict
import torch
class ResidualStack(torch.nn.Module):
"""Residual stack module introduced in MelGAN."""
def __init__(
self,
kernel_size: int = 3,
channels: int = 32,
dilation: int = 1,
bias: bool = True,
nonlinear_activation: str = "LeakyReLU",
nonlinear_activation_params: Dict[str, Any] = {"negative_slope": 0.2},
pad: str = "ReflectionPad1d",
pad_params: Dict[str, Any] = {},
):
"""Initialize ResidualStack module.
Args:
kernel_size (int): Kernel size of dilation convolution layer.
channels (int): Number of channels of convolution layers.
dilation (int): Dilation factor.
bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (Dict[str, Any]): Hyperparameters for
activation function.
pad (str): Padding function module name before dilated convolution layer.
pad_params (Dict[str, Any]): Hyperparameters for padding function.
"""
super().__init__()
# defile residual stack part
assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size."
self.stack = torch.nn.Sequential(
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
getattr(torch.nn, pad)((kernel_size - 1) // 2 * dilation, **pad_params),
torch.nn.Conv1d(
channels, channels, kernel_size, dilation=dilation, bias=bias
),
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
torch.nn.Conv1d(channels, channels, 1, bias=bias),
)
# defile extra layer for skip connection
self.skip_layer = torch.nn.Conv1d(channels, channels, 1, bias=bias)
def forward(self, c: torch.Tensor) -> torch.Tensor:
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, channels, T).
Returns:
Tensor: Output tensor (B, chennels, T).
"""
return self.stack(c) + self.skip_layer(c)
| 2,464 | 33.71831 | 85 | py |
espnet | espnet-master/espnet2/gan_tts/melgan/pqmf.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Pseudo QMF modules.
This code is modified from https://github.com/kan-bayashi/ParallelWaveGAN.
"""
import numpy as np
import torch
import torch.nn.functional as F
from scipy.signal import kaiser
def design_prototype_filter(
taps: int = 62, cutoff_ratio: float = 0.142, beta: float = 9.0
) -> np.ndarray:
"""Design prototype filter for PQMF.
This method is based on `A Kaiser window approach for the design of prototype
filters of cosine modulated filterbanks`_.
Args:
taps (int): The number of filter taps.
cutoff_ratio (float): Cut-off frequency ratio.
beta (float): Beta coefficient for kaiser window.
Returns:
ndarray: Impluse response of prototype filter (taps + 1,).
.. _`A Kaiser window approach for the design of prototype filters of cosine
modulated filterbanks`: https://ieeexplore.ieee.org/abstract/document/681427
"""
# check the arguments are valid
assert taps % 2 == 0, "The number of taps mush be even number."
assert 0.0 < cutoff_ratio < 1.0, "Cutoff ratio must be > 0.0 and < 1.0."
# make initial filter
omega_c = np.pi * cutoff_ratio
with np.errstate(invalid="ignore"):
h_i = np.sin(omega_c * (np.arange(taps + 1) - 0.5 * taps)) / (
np.pi * (np.arange(taps + 1) - 0.5 * taps)
)
h_i[taps // 2] = np.cos(0) * cutoff_ratio # fix nan due to indeterminate form
# apply kaiser window
w = kaiser(taps + 1, beta)
h = h_i * w
return h
class PQMF(torch.nn.Module):
"""PQMF module.
This module is based on `Near-perfect-reconstruction pseudo-QMF banks`_.
.. _`Near-perfect-reconstruction pseudo-QMF banks`:
https://ieeexplore.ieee.org/document/258122
"""
def __init__(
self,
subbands: int = 4,
taps: int = 62,
cutoff_ratio: float = 0.142,
beta: float = 9.0,
):
"""Initilize PQMF module.
The cutoff_ratio and beta parameters are optimized for #subbands = 4.
See dicussion in https://github.com/kan-bayashi/ParallelWaveGAN/issues/195.
Args:
subbands (int): The number of subbands.
taps (int): The number of filter taps.
cutoff_ratio (float): Cut-off frequency ratio.
beta (float): Beta coefficient for kaiser window.
"""
super().__init__()
# build analysis & synthesis filter coefficients
h_proto = design_prototype_filter(taps, cutoff_ratio, beta)
h_analysis = np.zeros((subbands, len(h_proto)))
h_synthesis = np.zeros((subbands, len(h_proto)))
for k in range(subbands):
h_analysis[k] = (
2
* h_proto
* np.cos(
(2 * k + 1)
* (np.pi / (2 * subbands))
* (np.arange(taps + 1) - (taps / 2))
+ (-1) ** k * np.pi / 4
)
)
h_synthesis[k] = (
2
* h_proto
* np.cos(
(2 * k + 1)
* (np.pi / (2 * subbands))
* (np.arange(taps + 1) - (taps / 2))
- (-1) ** k * np.pi / 4
)
)
# convert to tensor
analysis_filter = torch.from_numpy(h_analysis).float().unsqueeze(1)
synthesis_filter = torch.from_numpy(h_synthesis).float().unsqueeze(0)
# register coefficients as beffer
self.register_buffer("analysis_filter", analysis_filter)
self.register_buffer("synthesis_filter", synthesis_filter)
# filter for downsampling & upsampling
updown_filter = torch.zeros((subbands, subbands, subbands)).float()
for k in range(subbands):
updown_filter[k, k, 0] = 1.0
self.register_buffer("updown_filter", updown_filter)
self.subbands = subbands
# keep padding info
self.pad_fn = torch.nn.ConstantPad1d(taps // 2, 0.0)
def analysis(self, x: torch.Tensor) -> torch.Tensor:
"""Analysis with PQMF.
Args:
x (Tensor): Input tensor (B, 1, T).
Returns:
Tensor: Output tensor (B, subbands, T // subbands).
"""
x = F.conv1d(self.pad_fn(x), self.analysis_filter)
return F.conv1d(x, self.updown_filter, stride=self.subbands)
def synthesis(self, x: torch.Tensor) -> torch.Tensor:
"""Synthesis with PQMF.
Args:
x (Tensor): Input tensor (B, subbands, T // subbands).
Returns:
Tensor: Output tensor (B, 1, T).
"""
# NOTE(kan-bayashi): Power will be dreased so here multipy by # subbands.
# Not sure this is the correct way, it is better to check again.
# TODO(kan-bayashi): Understand the reconstruction procedure
x = F.conv_transpose1d(
x, self.updown_filter * self.subbands, stride=self.subbands
)
return F.conv1d(self.pad_fn(x), self.synthesis_filter)
| 5,141 | 31.1375 | 84 | py |
espnet | espnet-master/espnet2/gan_tts/melgan/melgan.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""MelGAN Modules.
This code is modified from https://github.com/kan-bayashi/ParallelWaveGAN.
"""
import logging
from typing import Any, Dict, List
import numpy as np
import torch
from espnet2.gan_tts.melgan.residual_stack import ResidualStack
class MelGANGenerator(torch.nn.Module):
"""MelGAN generator module."""
def __init__(
self,
in_channels: int = 80,
out_channels: int = 1,
kernel_size: int = 7,
channels: int = 512,
bias: bool = True,
upsample_scales: List[int] = [8, 8, 2, 2],
stack_kernel_size: int = 3,
stacks: int = 3,
nonlinear_activation: str = "LeakyReLU",
nonlinear_activation_params: Dict[str, Any] = {"negative_slope": 0.2},
pad: str = "ReflectionPad1d",
pad_params: Dict[str, Any] = {},
use_final_nonlinear_activation: bool = True,
use_weight_norm: bool = True,
):
"""Initialize MelGANGenerator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
kernel_size (int): Kernel size of initial and final conv layer.
channels (int): Initial number of channels for conv layer.
bias (bool): Whether to add bias parameter in convolution layers.
upsample_scales (List[int]): List of upsampling scales.
stack_kernel_size (int): Kernel size of dilated conv layers in residual
stack.
stacks (int): Number of stacks in a single residual stack.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (Dict[str, Any]): Hyperparameters for activation
function.
pad (str): Padding function module name before dilated convolution layer.
pad_params (Dict[str, Any]): Hyperparameters for padding function.
use_final_nonlinear_activation (torch.nn.Module): Activation function for
the final layer.
use_weight_norm (bool): Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
"""
super().__init__()
# check hyper parameters is valid
assert channels >= np.prod(upsample_scales)
assert channels % (2 ** len(upsample_scales)) == 0
assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size."
# add initial layer
layers = []
layers += [
getattr(torch.nn, pad)((kernel_size - 1) // 2, **pad_params),
torch.nn.Conv1d(in_channels, channels, kernel_size, bias=bias),
]
self.upsample_factor = int(np.prod(upsample_scales) * out_channels)
for i, upsample_scale in enumerate(upsample_scales):
# add upsampling layer
layers += [
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params)
]
layers += [
torch.nn.ConvTranspose1d(
channels // (2**i),
channels // (2 ** (i + 1)),
upsample_scale * 2,
stride=upsample_scale,
padding=upsample_scale // 2 + upsample_scale % 2,
output_padding=upsample_scale % 2,
bias=bias,
)
]
# add residual stack
for j in range(stacks):
layers += [
ResidualStack(
kernel_size=stack_kernel_size,
channels=channels // (2 ** (i + 1)),
dilation=stack_kernel_size**j,
bias=bias,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
pad=pad,
pad_params=pad_params,
)
]
# add final layer
layers += [
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params)
]
layers += [
getattr(torch.nn, pad)((kernel_size - 1) // 2, **pad_params),
torch.nn.Conv1d(
channels // (2 ** (i + 1)), out_channels, kernel_size, bias=bias
),
]
if use_final_nonlinear_activation:
layers += [torch.nn.Tanh()]
# define the model as a single function
self.melgan = torch.nn.Sequential(*layers)
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
# reset parameters
self.reset_parameters()
def forward(self, c: torch.Tensor) -> torch.Tensor:
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, channels, T).
Returns:
Tensor: Output tensor (B, 1, T ** prod(upsample_scales)).
"""
return self.melgan(c)
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m: torch.nn.Module):
try:
logging.debug(f"Weight norm is removed from {m}.")
torch.nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m: torch.nn.Module):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def reset_parameters(self):
"""Reset parameters.
This initialization follows official implementation manner.
https://github.com/descriptinc/melgan-neurips/blob/master/mel2wav/modules.py
"""
def _reset_parameters(m):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
m.weight.data.normal_(0.0, 0.02)
logging.debug(f"Reset parameters in {m}.")
self.apply(_reset_parameters)
def inference(self, c: torch.Tensor) -> torch.Tensor:
"""Perform inference.
Args:
c (Tensor): Input tensor (T, in_channels).
Returns:
Tensor: Output tensor (T ** prod(upsample_scales), out_channels).
"""
c = self.melgan(c.transpose(1, 0).unsqueeze(0))
return c.squeeze(0).transpose(1, 0)
class MelGANDiscriminator(torch.nn.Module):
"""MelGAN discriminator module."""
def __init__(
self,
in_channels: int = 1,
out_channels: int = 1,
kernel_sizes: List[int] = [5, 3],
channels: int = 16,
max_downsample_channels: int = 1024,
bias: bool = True,
downsample_scales: List[int] = [4, 4, 4, 4],
nonlinear_activation: str = "LeakyReLU",
nonlinear_activation_params: Dict[str, Any] = {"negative_slope": 0.2},
pad: str = "ReflectionPad1d",
pad_params: Dict[str, Any] = {},
):
"""Initilize MelGANDiscriminator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
kernel_sizes (List[int]): List of two kernel sizes. The prod will be used
for the first conv layer, and the first and the second kernel sizes
will be used for the last two layers. For example if kernel_sizes =
[5, 3], the first layer kernel size will be 5 * 3 = 15, the last two
layers' kernel size will be 5 and 3, respectively.
channels (int): Initial number of channels for conv layer.
max_downsample_channels (int): Maximum number of channels for downsampling
layers.
bias (bool): Whether to add bias parameter in convolution layers.
downsample_scales (List[int]): List of downsampling scales.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (Dict[str, Any]): Hyperparameters for activation
function.
pad (str): Padding function module name before dilated convolution layer.
pad_params (Dict[str, Any]): Hyperparameters for padding function.
"""
super().__init__()
self.layers = torch.nn.ModuleList()
# check kernel size is valid
assert len(kernel_sizes) == 2
assert kernel_sizes[0] % 2 == 1
assert kernel_sizes[1] % 2 == 1
# add first layer
self.layers += [
torch.nn.Sequential(
getattr(torch.nn, pad)((np.prod(kernel_sizes) - 1) // 2, **pad_params),
torch.nn.Conv1d(
in_channels, channels, np.prod(kernel_sizes), bias=bias
),
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
)
]
# add downsample layers
in_chs = channels
for downsample_scale in downsample_scales:
out_chs = min(in_chs * downsample_scale, max_downsample_channels)
self.layers += [
torch.nn.Sequential(
torch.nn.Conv1d(
in_chs,
out_chs,
kernel_size=downsample_scale * 10 + 1,
stride=downsample_scale,
padding=downsample_scale * 5,
groups=in_chs // 4,
bias=bias,
),
getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
),
)
]
in_chs = out_chs
# add final layers
out_chs = min(in_chs * 2, max_downsample_channels)
self.layers += [
torch.nn.Sequential(
torch.nn.Conv1d(
in_chs,
out_chs,
kernel_sizes[0],
padding=(kernel_sizes[0] - 1) // 2,
bias=bias,
),
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
)
]
self.layers += [
torch.nn.Conv1d(
out_chs,
out_channels,
kernel_sizes[1],
padding=(kernel_sizes[1] - 1) // 2,
bias=bias,
),
]
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
Returns:
List[Tensor]: List of output tensors of each layer.
"""
outs = []
for f in self.layers:
x = f(x)
outs += [x]
return outs
class MelGANMultiScaleDiscriminator(torch.nn.Module):
"""MelGAN multi-scale discriminator module."""
def __init__(
self,
in_channels: int = 1,
out_channels: int = 1,
scales: int = 3,
downsample_pooling: str = "AvgPool1d",
# follow the official implementation setting
downsample_pooling_params: Dict[str, Any] = {
"kernel_size": 4,
"stride": 2,
"padding": 1,
"count_include_pad": False,
},
kernel_sizes: List[int] = [5, 3],
channels: int = 16,
max_downsample_channels: int = 1024,
bias: bool = True,
downsample_scales: List[int] = [4, 4, 4, 4],
nonlinear_activation: str = "LeakyReLU",
nonlinear_activation_params: Dict[str, Any] = {"negative_slope": 0.2},
pad: str = "ReflectionPad1d",
pad_params: Dict[str, Any] = {},
use_weight_norm: bool = True,
):
"""Initilize MelGANMultiScaleDiscriminator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
scales (int): Number of multi-scales.
downsample_pooling (str): Pooling module name for downsampling of the
inputs.
downsample_pooling_params (Dict[str, Any]): Parameters for the above
pooling module.
kernel_sizes (List[int]): List of two kernel sizes. The sum will be used
for the first conv layer, and the first and the second kernel sizes
will be used for the last two layers.
channels (int): Initial number of channels for conv layer.
max_downsample_channels (int): Maximum number of channels for downsampling
layers.
bias (bool): Whether to add bias parameter in convolution layers.
downsample_scales (List[int]): List of downsampling scales.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (Dict[str, Any]): Hyperparameters for activation
function.
pad (str): Padding function module name before dilated convolution layer.
pad_params (Dict[str, Any]): Hyperparameters for padding function.
use_weight_norm (bool): Whether to use weight norm.
"""
super().__init__()
self.discriminators = torch.nn.ModuleList()
# add discriminators
for _ in range(scales):
self.discriminators += [
MelGANDiscriminator(
in_channels=in_channels,
out_channels=out_channels,
kernel_sizes=kernel_sizes,
channels=channels,
max_downsample_channels=max_downsample_channels,
bias=bias,
downsample_scales=downsample_scales,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
pad=pad,
pad_params=pad_params,
)
]
self.pooling = getattr(torch.nn, downsample_pooling)(
**downsample_pooling_params
)
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
# reset parameters
self.reset_parameters()
def forward(self, x: torch.Tensor) -> List[List[torch.Tensor]]:
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
Returns:
List[List[Tensor]]: List of list of each discriminator outputs, which
consists of each layer output tensors.
"""
outs = []
for f in self.discriminators:
outs += [f(x)]
x = self.pooling(x)
return outs
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m: torch.nn.Module):
try:
logging.debug(f"Weight norm is removed from {m}.")
torch.nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m: torch.nn.Module):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def reset_parameters(self):
"""Reset parameters.
This initialization follows official implementation manner.
https://github.com/descriptinc/melgan-neurips/blob/master/mel2wav/modules.py
"""
def _reset_parameters(m: torch.nn.Module):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
m.weight.data.normal_(0.0, 0.02)
logging.debug(f"Reset parameters in {m}.")
self.apply(_reset_parameters)
| 16,694 | 35.058315 | 88 | py |
espnet | espnet-master/espnet2/gan_tts/wavenet/wavenet.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""WaveNet modules.
This code is modified from https://github.com/kan-bayashi/ParallelWaveGAN.
"""
import logging
import math
from typing import Optional
import torch
from espnet2.gan_tts.wavenet.residual_block import Conv1d1x1, ResidualBlock
class WaveNet(torch.nn.Module):
"""WaveNet with global conditioning."""
def __init__(
self,
in_channels: int = 1,
out_channels: int = 1,
kernel_size: int = 3,
layers: int = 30,
stacks: int = 3,
base_dilation: int = 2,
residual_channels: int = 64,
aux_channels: int = -1,
gate_channels: int = 128,
skip_channels: int = 64,
global_channels: int = -1,
dropout_rate: float = 0.0,
bias: bool = True,
use_weight_norm: bool = True,
use_first_conv: bool = False,
use_last_conv: bool = False,
scale_residual: bool = False,
scale_skip_connect: bool = False,
):
"""Initialize WaveNet module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
kernel_size (int): Kernel size of dilated convolution.
layers (int): Number of residual block layers.
stacks (int): Number of stacks i.e., dilation cycles.
base_dilation (int): Base dilation factor.
residual_channels (int): Number of channels in residual conv.
gate_channels (int): Number of channels in gated conv.
skip_channels (int): Number of channels in skip conv.
aux_channels (int): Number of channels for local conditioning feature.
global_channels (int): Number of channels for global conditioning feature.
dropout_rate (float): Dropout rate. 0.0 means no dropout applied.
bias (bool): Whether to use bias parameter in conv layer.
use_weight_norm (bool): Whether to use weight norm. If set to true, it will
be applied to all of the conv layers.
use_first_conv (bool): Whether to use the first conv layers.
use_last_conv (bool): Whether to use the last conv layers.
scale_residual (bool): Whether to scale the residual outputs.
scale_skip_connect (bool): Whether to scale the skip connection outputs.
"""
super().__init__()
self.layers = layers
self.stacks = stacks
self.kernel_size = kernel_size
self.base_dilation = base_dilation
self.use_first_conv = use_first_conv
self.use_last_conv = use_last_conv
self.scale_skip_connect = scale_skip_connect
# check the number of layers and stacks
assert layers % stacks == 0
layers_per_stack = layers // stacks
# define first convolution
if self.use_first_conv:
self.first_conv = Conv1d1x1(in_channels, residual_channels, bias=True)
# define residual blocks
self.conv_layers = torch.nn.ModuleList()
for layer in range(layers):
dilation = base_dilation ** (layer % layers_per_stack)
conv = ResidualBlock(
kernel_size=kernel_size,
residual_channels=residual_channels,
gate_channels=gate_channels,
skip_channels=skip_channels,
aux_channels=aux_channels,
global_channels=global_channels,
dilation=dilation,
dropout_rate=dropout_rate,
bias=bias,
scale_residual=scale_residual,
)
self.conv_layers += [conv]
# define output layers
if self.use_last_conv:
self.last_conv = torch.nn.Sequential(
torch.nn.ReLU(inplace=True),
Conv1d1x1(skip_channels, skip_channels, bias=True),
torch.nn.ReLU(inplace=True),
Conv1d1x1(skip_channels, out_channels, bias=True),
)
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
def forward(
self,
x: torch.Tensor,
x_mask: Optional[torch.Tensor] = None,
c: Optional[torch.Tensor] = None,
g: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T) if use_first_conv else
(B, residual_channels, T).
x_mask (Optional[Tensor]): Mask tensor (B, 1, T).
c (Optional[Tensor]): Local conditioning features (B, aux_channels, T).
g (Optional[Tensor]): Global conditioning features (B, global_channels, 1).
Returns:
Tensor: Output tensor (B, out_channels, T) if use_last_conv else
(B, residual_channels, T).
"""
# encode to hidden representation
if self.use_first_conv:
x = self.first_conv(x)
# residual block
skips = 0.0
for f in self.conv_layers:
x, h = f(x, x_mask=x_mask, c=c, g=g)
skips = skips + h
x = skips
if self.scale_skip_connect:
x = x * math.sqrt(1.0 / len(self.conv_layers))
# apply final layers
if self.use_last_conv:
x = self.last_conv(x)
return x
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m: torch.nn.Module):
try:
logging.debug(f"Weight norm is removed from {m}.")
torch.nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m: torch.nn.Module):
if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
@staticmethod
def _get_receptive_field_size(
layers: int,
stacks: int,
kernel_size: int,
base_dilation: int,
) -> int:
assert layers % stacks == 0
layers_per_cycle = layers // stacks
dilations = [base_dilation ** (i % layers_per_cycle) for i in range(layers)]
return (kernel_size - 1) * sum(dilations) + 1
@property
def receptive_field_size(self) -> int:
"""Return receptive field size."""
return self._get_receptive_field_size(
self.layers, self.stacks, self.kernel_size, self.base_dilation
)
| 6,901 | 34.394872 | 87 | py |
espnet | espnet-master/espnet2/gan_tts/wavenet/residual_block.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Residual block modules.
This code is modified from https://github.com/kan-bayashi/ParallelWaveGAN.
"""
import math
from typing import Optional, Tuple
import torch
import torch.nn.functional as F
class Conv1d(torch.nn.Conv1d):
"""Conv1d module with customized initialization."""
def __init__(self, *args, **kwargs):
"""Initialize Conv1d module."""
super().__init__(*args, **kwargs)
def reset_parameters(self):
"""Reset parameters."""
torch.nn.init.kaiming_normal_(self.weight, nonlinearity="relu")
if self.bias is not None:
torch.nn.init.constant_(self.bias, 0.0)
class Conv1d1x1(Conv1d):
"""1x1 Conv1d with customized initialization."""
def __init__(self, in_channels: int, out_channels: int, bias: bool):
"""Initialize 1x1 Conv1d module."""
super().__init__(
in_channels, out_channels, kernel_size=1, padding=0, dilation=1, bias=bias
)
class ResidualBlock(torch.nn.Module):
"""Residual block module in WaveNet."""
def __init__(
self,
kernel_size: int = 3,
residual_channels: int = 64,
gate_channels: int = 128,
skip_channels: int = 64,
aux_channels: int = 80,
global_channels: int = -1,
dropout_rate: float = 0.0,
dilation: int = 1,
bias: bool = True,
scale_residual: bool = False,
):
"""Initialize ResidualBlock module.
Args:
kernel_size (int): Kernel size of dilation convolution layer.
residual_channels (int): Number of channels for residual connection.
skip_channels (int): Number of channels for skip connection.
aux_channels (int): Number of local conditioning channels.
dropout (float): Dropout probability.
dilation (int): Dilation factor.
bias (bool): Whether to add bias parameter in convolution layers.
scale_residual (bool): Whether to scale the residual outputs.
"""
super().__init__()
self.dropout_rate = dropout_rate
self.residual_channels = residual_channels
self.skip_channels = skip_channels
self.scale_residual = scale_residual
# check
assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size."
assert gate_channels % 2 == 0
# dilation conv
padding = (kernel_size - 1) // 2 * dilation
self.conv = Conv1d(
residual_channels,
gate_channels,
kernel_size,
padding=padding,
dilation=dilation,
bias=bias,
)
# local conditioning
if aux_channels > 0:
self.conv1x1_aux = Conv1d1x1(aux_channels, gate_channels, bias=False)
else:
self.conv1x1_aux = None
# global conditioning
if global_channels > 0:
self.conv1x1_glo = Conv1d1x1(global_channels, gate_channels, bias=False)
else:
self.conv1x1_glo = None
# conv output is split into two groups
gate_out_channels = gate_channels // 2
# NOTE(kan-bayashi): concat two convs into a single conv for the efficiency
# (integrate res 1x1 + skip 1x1 convs)
self.conv1x1_out = Conv1d1x1(
gate_out_channels, residual_channels + skip_channels, bias=bias
)
def forward(
self,
x: torch.Tensor,
x_mask: Optional[torch.Tensor] = None,
c: Optional[torch.Tensor] = None,
g: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, residual_channels, T).
x_mask Optional[torch.Tensor]: Mask tensor (B, 1, T).
c (Optional[Tensor]): Local conditioning tensor (B, aux_channels, T).
g (Optional[Tensor]): Global conditioning tensor (B, global_channels, 1).
Returns:
Tensor: Output tensor for residual connection (B, residual_channels, T).
Tensor: Output tensor for skip connection (B, skip_channels, T).
"""
residual = x
x = F.dropout(x, p=self.dropout_rate, training=self.training)
x = self.conv(x)
# split into two part for gated activation
splitdim = 1
xa, xb = x.split(x.size(splitdim) // 2, dim=splitdim)
# local conditioning
if c is not None:
c = self.conv1x1_aux(c)
ca, cb = c.split(c.size(splitdim) // 2, dim=splitdim)
xa, xb = xa + ca, xb + cb
# global conditioning
if g is not None:
g = self.conv1x1_glo(g)
ga, gb = g.split(g.size(splitdim) // 2, dim=splitdim)
xa, xb = xa + ga, xb + gb
x = torch.tanh(xa) * torch.sigmoid(xb)
# residual + skip 1x1 conv
x = self.conv1x1_out(x)
if x_mask is not None:
x = x * x_mask
# split integrated conv results
x, s = x.split([self.residual_channels, self.skip_channels], dim=1)
# for residual connection
x = x + residual
if self.scale_residual:
x = x * math.sqrt(0.5)
return x, s
| 5,352 | 30.863095 | 86 | py |
espnet | espnet-master/espnet2/gan_tts/joint/joint_text2wav.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Joint text-to-wav module for end-to-end training."""
from typing import Any, Dict
import torch
from typeguard import check_argument_types
from espnet2.gan_tts.abs_gan_tts import AbsGANTTS
from espnet2.gan_tts.hifigan import (
HiFiGANGenerator,
HiFiGANMultiPeriodDiscriminator,
HiFiGANMultiScaleDiscriminator,
HiFiGANMultiScaleMultiPeriodDiscriminator,
HiFiGANPeriodDiscriminator,
HiFiGANScaleDiscriminator,
)
from espnet2.gan_tts.hifigan.loss import (
DiscriminatorAdversarialLoss,
FeatureMatchLoss,
GeneratorAdversarialLoss,
MelSpectrogramLoss,
)
from espnet2.gan_tts.melgan import MelGANGenerator, MelGANMultiScaleDiscriminator
from espnet2.gan_tts.melgan.pqmf import PQMF
from espnet2.gan_tts.parallel_wavegan import (
ParallelWaveGANDiscriminator,
ParallelWaveGANGenerator,
)
from espnet2.gan_tts.style_melgan import StyleMelGANDiscriminator, StyleMelGANGenerator
from espnet2.gan_tts.utils import get_random_segments, get_segments
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet2.tts.fastspeech import FastSpeech
from espnet2.tts.fastspeech2 import FastSpeech2
from espnet2.tts.tacotron2 import Tacotron2
from espnet2.tts.transformer import Transformer
AVAILABLE_TEXT2MEL = {
"tacotron2": Tacotron2,
"transformer": Transformer,
"fastspeech": FastSpeech,
"fastspeech2": FastSpeech2,
}
AVAILABLE_VOCODER = {
"hifigan_generator": HiFiGANGenerator,
"melgan_generator": MelGANGenerator,
"parallel_wavegan_generator": ParallelWaveGANGenerator,
"style_melgan_generator": StyleMelGANGenerator,
}
AVAILABLE_DISCRIMINATORS = {
"hifigan_period_discriminator": HiFiGANPeriodDiscriminator,
"hifigan_scale_discriminator": HiFiGANScaleDiscriminator,
"hifigan_multi_period_discriminator": HiFiGANMultiPeriodDiscriminator,
"hifigan_multi_scale_discriminator": HiFiGANMultiScaleDiscriminator,
"hifigan_multi_scale_multi_period_discriminator": HiFiGANMultiScaleMultiPeriodDiscriminator, # NOQA
"melgan_multi_scale_discriminator": MelGANMultiScaleDiscriminator,
"parallel_wavegan_discriminator": ParallelWaveGANDiscriminator,
"style_melgan_discriminator": StyleMelGANDiscriminator,
}
class JointText2Wav(AbsGANTTS):
"""General class to jointly train text2mel and vocoder parts."""
def __init__(
self,
# generator (text2mel + vocoder) related
idim: int,
odim: int,
segment_size: int = 32,
sampling_rate: int = 22050,
text2mel_type: str = "fastspeech2",
text2mel_params: Dict[str, Any] = {
"adim": 384,
"aheads": 2,
"elayers": 4,
"eunits": 1536,
"dlayers": 4,
"dunits": 1536,
"postnet_layers": 5,
"postnet_chans": 512,
"postnet_filts": 5,
"postnet_dropout_rate": 0.5,
"positionwise_layer_type": "conv1d",
"positionwise_conv_kernel_size": 1,
"use_scaled_pos_enc": True,
"use_batch_norm": True,
"encoder_normalize_before": True,
"decoder_normalize_before": True,
"encoder_concat_after": False,
"decoder_concat_after": False,
"reduction_factor": 1,
"encoder_type": "conformer",
"decoder_type": "conformer",
"transformer_enc_dropout_rate": 0.1,
"transformer_enc_positional_dropout_rate": 0.1,
"transformer_enc_attn_dropout_rate": 0.1,
"transformer_dec_dropout_rate": 0.1,
"transformer_dec_positional_dropout_rate": 0.1,
"transformer_dec_attn_dropout_rate": 0.1,
"conformer_rel_pos_type": "latest",
"conformer_pos_enc_layer_type": "rel_pos",
"conformer_self_attn_layer_type": "rel_selfattn",
"conformer_activation_type": "swish",
"use_macaron_style_in_conformer": True,
"use_cnn_in_conformer": True,
"zero_triu": False,
"conformer_enc_kernel_size": 7,
"conformer_dec_kernel_size": 31,
"duration_predictor_layers": 2,
"duration_predictor_chans": 384,
"duration_predictor_kernel_size": 3,
"duration_predictor_dropout_rate": 0.1,
"energy_predictor_layers": 2,
"energy_predictor_chans": 384,
"energy_predictor_kernel_size": 3,
"energy_predictor_dropout": 0.5,
"energy_embed_kernel_size": 1,
"energy_embed_dropout": 0.5,
"stop_gradient_from_energy_predictor": False,
"pitch_predictor_layers": 5,
"pitch_predictor_chans": 384,
"pitch_predictor_kernel_size": 5,
"pitch_predictor_dropout": 0.5,
"pitch_embed_kernel_size": 1,
"pitch_embed_dropout": 0.5,
"stop_gradient_from_pitch_predictor": True,
"spks": -1,
"langs": -1,
"spk_embed_dim": None,
"spk_embed_integration_type": "add",
"use_gst": False,
"gst_tokens": 10,
"gst_heads": 4,
"gst_conv_layers": 6,
"gst_conv_chans_list": [32, 32, 64, 64, 128, 128],
"gst_conv_kernel_size": 3,
"gst_conv_stride": 2,
"gst_gru_layers": 1,
"gst_gru_units": 128,
"init_type": "xavier_uniform",
"init_enc_alpha": 1.0,
"init_dec_alpha": 1.0,
"use_masking": False,
"use_weighted_masking": False,
},
vocoder_type: str = "hifigan_generator",
vocoder_params: Dict[str, Any] = {
"out_channels": 1,
"channels": 512,
"global_channels": -1,
"kernel_size": 7,
"upsample_scales": [8, 8, 2, 2],
"upsample_kernel_sizes": [16, 16, 4, 4],
"resblock_kernel_sizes": [3, 7, 11],
"resblock_dilations": [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
"use_additional_convs": True,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
},
use_pqmf: bool = False,
pqmf_params: Dict[str, Any] = {
"subbands": 4,
"taps": 62,
"cutoff_ratio": 0.142,
"beta": 9.0,
},
# discriminator related
discriminator_type: str = "hifigan_multi_scale_multi_period_discriminator",
discriminator_params: Dict[str, Any] = {
"scales": 1,
"scale_downsample_pooling": "AvgPool1d",
"scale_downsample_pooling_params": {
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
"scale_discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 128,
"max_downsample_channels": 1024,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 2, 4, 4, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
"follow_official_norm": False,
"periods": [2, 3, 5, 7, 11],
"period_discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 32,
"downsample_scales": [3, 3, 3, 3, 1],
"max_downsample_channels": 1024,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
},
# loss related
generator_adv_loss_params: Dict[str, Any] = {
"average_by_discriminators": False,
"loss_type": "mse",
},
discriminator_adv_loss_params: Dict[str, Any] = {
"average_by_discriminators": False,
"loss_type": "mse",
},
use_feat_match_loss: bool = True,
feat_match_loss_params: Dict[str, Any] = {
"average_by_discriminators": False,
"average_by_layers": False,
"include_final_outputs": True,
},
use_mel_loss: bool = True,
mel_loss_params: Dict[str, Any] = {
"fs": 22050,
"n_fft": 1024,
"hop_length": 256,
"win_length": None,
"window": "hann",
"n_mels": 80,
"fmin": 0,
"fmax": None,
"log_base": None,
},
lambda_text2mel: float = 1.0,
lambda_adv: float = 1.0,
lambda_feat_match: float = 2.0,
lambda_mel: float = 45.0,
cache_generator_outputs: bool = False,
):
"""Initialize JointText2Wav module.
Args:
idim (int): Input vocabrary size.
odim (int): Acoustic feature dimension. The actual output channels will
be 1 since the model is the end-to-end text-to-wave model but for the
compatibility odim is used to indicate the acoustic feature dimension.
segment_size (int): Segment size for random windowed inputs.
sampling_rate (int): Sampling rate, not used for the training but it will
be referred in saving waveform during the inference.
text2mel_type (str): The text2mel model type.
text2mel_params (Dict[str, Any]): Parameter dict for text2mel model.
use_pqmf (bool): Whether to use PQMF for multi-band vocoder.
pqmf_params (Dict[str, Any]): Parameter dict for PQMF module.
vocoder_type (str): The vocoder model type.
vocoder_params (Dict[str, Any]): Parameter dict for vocoder model.
discriminator_type (str): Discriminator type.
discriminator_params (Dict[str, Any]): Parameter dict for discriminator.
generator_adv_loss_params (Dict[str, Any]): Parameter dict for generator
adversarial loss.
discriminator_adv_loss_params (Dict[str, Any]): Parameter dict for
discriminator adversarial loss.
use_feat_match_loss (bool): Whether to use feat match loss.
feat_match_loss_params (Dict[str, Any]): Parameter dict for feat match loss.
use_mel_loss (bool): Whether to use mel loss.
mel_loss_params (Dict[str, Any]): Parameter dict for mel loss.
lambda_text2mel (float): Loss scaling coefficient for text2mel model loss.
lambda_adv (float): Loss scaling coefficient for adversarial loss.
lambda_feat_match (float): Loss scaling coefficient for feat match loss.
lambda_mel (float): Loss scaling coefficient for mel loss.
cache_generator_outputs (bool): Whether to cache generator outputs.
"""
assert check_argument_types()
super().__init__()
self.segment_size = segment_size
self.use_pqmf = use_pqmf
# define modules
self.generator = torch.nn.ModuleDict()
text2mel_class = AVAILABLE_TEXT2MEL[text2mel_type]
text2mel_params.update(idim=idim, odim=odim)
self.generator["text2mel"] = text2mel_class(
**text2mel_params,
)
vocoder_class = AVAILABLE_VOCODER[vocoder_type]
if vocoder_type in ["hifigan_generator", "melgan_generator"]:
vocoder_params.update(in_channels=odim)
elif vocoder_type in ["parallel_wavegan_generator", "style_melgan_generator"]:
vocoder_params.update(aux_channels=odim)
self.generator["vocoder"] = vocoder_class(
**vocoder_params,
)
if self.use_pqmf:
self.pqmf = PQMF(**pqmf_params)
discriminator_class = AVAILABLE_DISCRIMINATORS[discriminator_type]
self.discriminator = discriminator_class(
**discriminator_params,
)
self.generator_adv_loss = GeneratorAdversarialLoss(
**generator_adv_loss_params,
)
self.discriminator_adv_loss = DiscriminatorAdversarialLoss(
**discriminator_adv_loss_params,
)
self.use_feat_match_loss = use_feat_match_loss
if self.use_feat_match_loss:
self.feat_match_loss = FeatureMatchLoss(
**feat_match_loss_params,
)
self.use_mel_loss = use_mel_loss
if self.use_mel_loss:
self.mel_loss = MelSpectrogramLoss(
**mel_loss_params,
)
# coefficients
self.lambda_text2mel = lambda_text2mel
self.lambda_adv = lambda_adv
if self.use_feat_match_loss:
self.lambda_feat_match = lambda_feat_match
if self.use_mel_loss:
self.lambda_mel = lambda_mel
# cache
self.cache_generator_outputs = cache_generator_outputs
self._cache = None
# store sampling rate for saving wav file
# (not used for the training)
self.fs = sampling_rate
# store parameters for test compatibility
self.spks = self.generator["text2mel"].spks
self.langs = self.generator["text2mel"].langs
self.spk_embed_dim = self.generator["text2mel"].spk_embed_dim
@property
def require_raw_speech(self):
"""Return whether or not speech is required."""
return True
@property
def require_vocoder(self):
"""Return whether or not vocoder is required."""
return False
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: torch.Tensor,
feats_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
forward_generator: bool = True,
**kwargs,
) -> Dict[str, Any]:
"""Perform generator forward.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
feats (Tensor): Feature tensor (B, T_feats, aux_channels).
feats_lengths (Tensor): Feature length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B,).
forward_generator (bool): Whether to forward generator.
Returns:
Dict[str, Any]:
- loss (Tensor): Loss scalar tensor.
- stats (Dict[str, float]): Statistics to be monitored.
- weight (Tensor): Weight tensor to summarize losses.
- optim_idx (int): Optimizer index (0 for G and 1 for D).
"""
if forward_generator:
return self._forward_generator(
text=text,
text_lengths=text_lengths,
feats=feats,
feats_lengths=feats_lengths,
speech=speech,
speech_lengths=speech_lengths,
**kwargs,
)
else:
return self._forward_discrminator(
text=text,
text_lengths=text_lengths,
feats=feats,
feats_lengths=feats_lengths,
speech=speech,
speech_lengths=speech_lengths,
**kwargs,
)
def _forward_generator(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: torch.Tensor,
feats_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
**kwargs,
) -> Dict[str, Any]:
"""Perform generator forward.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
feats (Tensor): Feature tensor (B, T_feats, aux_channels).
feats_lengths (Tensor): Feature length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B,).
Returns:
Dict[str, Any]:
* loss (Tensor): Loss scalar tensor.
* stats (Dict[str, float]): Statistics to be monitored.
* weight (Tensor): Weight tensor to summarize losses.
* optim_idx (int): Optimizer index (0 for G and 1 for D).
"""
# setup
batch_size = text.size(0)
speech = speech.unsqueeze(1)
# calculate generator outputs
reuse_cache = True
if not self.cache_generator_outputs or self._cache is None:
reuse_cache = False
# calculate text2mel outputs
text2mel_loss, stats, feats_gen = self.generator["text2mel"](
text=text,
text_lengths=text_lengths,
feats=feats,
feats_lengths=feats_lengths,
joint_training=True,
**kwargs,
)
# get random segments
feats_gen_, start_idxs = get_random_segments(
x=feats_gen.transpose(1, 2),
x_lengths=feats_lengths,
segment_size=self.segment_size,
)
# calculate vocoder outputs
speech_hat_ = self.generator["vocoder"](feats_gen_)
if self.use_pqmf:
speech_hat_ = self.pqmf.synthesis(speech_hat_)
else:
text2mel_loss, stats, speech_hat_, start_idxs = self._cache
# store cache
if self.training and self.cache_generator_outputs and not reuse_cache:
self._cache = (text2mel_loss, stats, speech_hat_, start_idxs)
speech_ = get_segments(
x=speech,
start_idxs=start_idxs * self.generator["vocoder"].upsample_factor,
segment_size=self.segment_size * self.generator["vocoder"].upsample_factor,
)
# calculate discriminator outputs
p_hat = self.discriminator(speech_hat_)
with torch.no_grad():
# do not store discriminator gradient in generator turn
p = self.discriminator(speech_)
# calculate losses
adv_loss = self.generator_adv_loss(p_hat)
adv_loss = adv_loss * self.lambda_adv
text2mel_loss = text2mel_loss * self.lambda_text2mel
loss = adv_loss + text2mel_loss
if self.use_feat_match_loss:
feat_match_loss = self.feat_match_loss(p_hat, p)
feat_match_loss = feat_match_loss * self.lambda_feat_match
loss = loss + feat_match_loss
stats.update(feat_match_loss=feat_match_loss.item())
if self.use_mel_loss:
mel_loss = self.mel_loss(speech_hat_, speech_)
mel_loss = self.lambda_mel * mel_loss
loss = loss + mel_loss
stats.update(mel_loss=mel_loss.item())
stats.update(
adv_loss=adv_loss.item(),
text2mel_loss=text2mel_loss.item(),
loss=loss.item(),
)
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
# reset cache
if reuse_cache or not self.training:
self._cache = None
return {
"loss": loss,
"stats": stats,
"weight": weight,
"optim_idx": 0, # needed for trainer
}
def _forward_discrminator(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: torch.Tensor,
feats_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
**kwargs,
) -> Dict[str, Any]:
"""Perform discriminator forward.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
feats (Tensor): Feature tensor (B, T_feats, aux_channels).
feats_lengths (Tensor): Feature length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B,).
Returns:
Dict[str, Any]:
* loss (Tensor): Loss scalar tensor.
* stats (Dict[str, float]): Statistics to be monitored.
* weight (Tensor): Weight tensor to summarize losses.
* optim_idx (int): Optimizer index (0 for G and 1 for D).
"""
# setup
batch_size = text.size(0)
speech = speech.unsqueeze(1)
# calculate generator outputs
reuse_cache = True
if not self.cache_generator_outputs or self._cache is None:
reuse_cache = False
# calculate text2mel outputs
text2mel_loss, stats, feats_gen = self.generator["text2mel"](
text=text,
text_lengths=text_lengths,
feats=feats,
feats_lengths=feats_lengths,
joint_training=True,
**kwargs,
)
# get random segments
feats_gen_, start_idxs = get_random_segments(
x=feats_gen.transpose(1, 2),
x_lengths=feats_lengths,
segment_size=self.segment_size,
)
# calculate vocoder outputs
speech_hat_ = self.generator["vocoder"](feats_gen_)
if self.use_pqmf:
speech_hat_ = self.pqmf.synthesis(speech_hat_)
else:
_, _, speech_hat_, start_idxs = self._cache
# store cache
if self.cache_generator_outputs and not reuse_cache:
self._cache = (text2mel_loss, stats, speech_hat_, start_idxs)
# parse outputs
speech_ = get_segments(
x=speech,
start_idxs=start_idxs * self.generator["vocoder"].upsample_factor,
segment_size=self.segment_size * self.generator["vocoder"].upsample_factor,
)
# calculate discriminator outputs
p_hat = self.discriminator(speech_hat_.detach())
p = self.discriminator(speech_)
# calculate losses
real_loss, fake_loss = self.discriminator_adv_loss(p_hat, p)
loss = real_loss + fake_loss
stats = dict(
discriminator_loss=loss.item(),
real_loss=real_loss.item(),
fake_loss=fake_loss.item(),
)
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
# reset cache
if reuse_cache or not self.training:
self._cache = None
return {
"loss": loss,
"stats": stats,
"weight": weight,
"optim_idx": 1, # needed for trainer
}
def inference(
self,
text: torch.Tensor,
**kwargs,
) -> Dict[str, torch.Tensor]:
"""Run inference.
Args:
text (Tensor): Input text index tensor (T_text,).
Returns:
Dict[str, Tensor]:
* wav (Tensor): Generated waveform tensor (T_wav,).
* feat_gan (Tensor): Generated feature tensor (T_text, C).
"""
output_dict = self.generator["text2mel"].inference(
text=text,
**kwargs,
)
wav = self.generator["vocoder"].inference(output_dict["feat_gen"])
if self.use_pqmf:
wav = self.pqmf.synthesis(wav.unsqueeze(0).transpose(1, 2))
wav = wav.squeeze(0).transpose(0, 1)
output_dict.update(wav=wav)
return output_dict
| 24,009 | 36.870662 | 104 | py |
espnet | espnet-master/espnet2/gan_tts/style_melgan/tade_res_block.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""StyleMelGAN's TADEResBlock Modules.
This code is modified from https://github.com/kan-bayashi/ParallelWaveGAN.
"""
from functools import partial
import torch
class TADELayer(torch.nn.Module):
"""TADE Layer module."""
def __init__(
self,
in_channels: int = 64,
aux_channels: int = 80,
kernel_size: int = 9,
bias: bool = True,
upsample_factor: int = 2,
upsample_mode: str = "nearest",
):
"""Initilize TADELayer module.
Args:
in_channels (int): Number of input channles.
aux_channels (int): Number of auxirialy channles.
kernel_size (int): Kernel size.
bias (bool): Whether to use bias parameter in conv.
upsample_factor (int): Upsample factor.
upsample_mode (str): Upsample mode.
"""
super().__init__()
self.norm = torch.nn.InstanceNorm1d(in_channels)
self.aux_conv = torch.nn.Sequential(
torch.nn.Conv1d(
aux_channels,
in_channels,
kernel_size,
1,
bias=bias,
padding=(kernel_size - 1) // 2,
),
# NOTE(kan-bayashi): Use non-linear activation?
)
self.gated_conv = torch.nn.Sequential(
torch.nn.Conv1d(
in_channels,
in_channels * 2,
kernel_size,
1,
bias=bias,
padding=(kernel_size - 1) // 2,
),
# NOTE(kan-bayashi): Use non-linear activation?
)
self.upsample = torch.nn.Upsample(
scale_factor=upsample_factor, mode=upsample_mode
)
def forward(self, x: torch.Tensor, c: torch.Tensor) -> torch.Tensor:
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, in_channels, T).
c (Tensor): Auxiliary input tensor (B, aux_channels, T').
Returns:
Tensor: Output tensor (B, in_channels, T * in_upsample_factor).
Tensor: Upsampled aux tensor (B, in_channels, T * aux_upsample_factor).
"""
x = self.norm(x)
c = self.upsample(c)
c = self.aux_conv(c)
cg = self.gated_conv(c)
cg1, cg2 = cg.split(cg.size(1) // 2, dim=1)
# NOTE(kan-bayashi): Use upsample for noise input here?
y = cg1 * self.upsample(x) + cg2
# NOTE(kan-bayashi): Return upsampled aux here?
return y, c
class TADEResBlock(torch.nn.Module):
"""TADEResBlock module."""
def __init__(
self,
in_channels: int = 64,
aux_channels: int = 80,
kernel_size: int = 9,
dilation: int = 2,
bias: bool = True,
upsample_factor: int = 2,
upsample_mode: str = "nearest",
gated_function: str = "softmax",
):
"""Initialize TADEResBlock module.
Args:
in_channels (int): Number of input channles.
aux_channels (int): Number of auxirialy channles.
kernel_size (int): Kernel size.
bias (bool): Whether to use bias parameter in conv.
upsample_factor (int): Upsample factor.
upsample_mode (str): Upsample mode.
gated_function (str): Gated function type (softmax of sigmoid).
"""
super().__init__()
self.tade1 = TADELayer(
in_channels=in_channels,
aux_channels=aux_channels,
kernel_size=kernel_size,
bias=bias,
# NOTE(kan-bayashi): Use upsample in the first TADE layer?
upsample_factor=1,
upsample_mode=upsample_mode,
)
self.gated_conv1 = torch.nn.Conv1d(
in_channels,
in_channels * 2,
kernel_size,
1,
bias=bias,
padding=(kernel_size - 1) // 2,
)
self.tade2 = TADELayer(
in_channels=in_channels,
aux_channels=in_channels,
kernel_size=kernel_size,
bias=bias,
upsample_factor=upsample_factor,
upsample_mode=upsample_mode,
)
self.gated_conv2 = torch.nn.Conv1d(
in_channels,
in_channels * 2,
kernel_size,
1,
bias=bias,
dilation=dilation,
padding=(kernel_size - 1) // 2 * dilation,
)
self.upsample = torch.nn.Upsample(
scale_factor=upsample_factor, mode=upsample_mode
)
if gated_function == "softmax":
self.gated_function = partial(torch.softmax, dim=1)
elif gated_function == "sigmoid":
self.gated_function = torch.sigmoid
else:
raise ValueError(f"{gated_function} is not supported.")
def forward(self, x: torch.Tensor, c: torch.Tensor) -> torch.Tensor:
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, in_channels, T).
c (Tensor): Auxiliary input tensor (B, aux_channels, T').
Returns:
Tensor: Output tensor (B, in_channels, T * in_upsample_factor).
Tensor: Upsampled auxirialy tensor (B, in_channels, T * in_upsample_factor).
"""
residual = x
x, c = self.tade1(x, c)
x = self.gated_conv1(x)
xa, xb = x.split(x.size(1) // 2, dim=1)
x = self.gated_function(xa) * torch.tanh(xb)
x, c = self.tade2(x, c)
x = self.gated_conv2(x)
xa, xb = x.split(x.size(1) // 2, dim=1)
x = self.gated_function(xa) * torch.tanh(xb)
# NOTE(kan-bayashi): Return upsampled aux here?
return self.upsample(residual) + x, c
| 5,864 | 30.532258 | 88 | py |
espnet | espnet-master/espnet2/gan_tts/style_melgan/style_melgan.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""StyleMelGAN Modules.
This code is modified from https://github.com/kan-bayashi/ParallelWaveGAN.
"""
import copy
import logging
import math
from typing import Any, Dict, List, Optional
import numpy as np
import torch
import torch.nn.functional as F
from espnet2.gan_tts.melgan import MelGANDiscriminator as BaseDiscriminator
from espnet2.gan_tts.melgan.pqmf import PQMF
from espnet2.gan_tts.style_melgan.tade_res_block import TADEResBlock
class StyleMelGANGenerator(torch.nn.Module):
"""Style MelGAN generator module."""
def __init__(
self,
in_channels: int = 128,
aux_channels: int = 80,
channels: int = 64,
out_channels: int = 1,
kernel_size: int = 9,
dilation: int = 2,
bias: bool = True,
noise_upsample_scales: List[int] = [11, 2, 2, 2],
noise_upsample_activation: str = "LeakyReLU",
noise_upsample_activation_params: Dict[str, Any] = {"negative_slope": 0.2},
upsample_scales: List[int] = [2, 2, 2, 2, 2, 2, 2, 2, 1],
upsample_mode: str = "nearest",
gated_function: str = "softmax",
use_weight_norm: bool = True,
):
"""Initilize StyleMelGANGenerator module.
Args:
in_channels (int): Number of input noise channels.
aux_channels (int): Number of auxiliary input channels.
channels (int): Number of channels for conv layer.
out_channels (int): Number of output channels.
kernel_size (int): Kernel size of conv layers.
dilation (int): Dilation factor for conv layers.
bias (bool): Whether to add bias parameter in convolution layers.
noise_upsample_scales (List[int]): List of noise upsampling scales.
noise_upsample_activation (str): Activation function module name for noise
upsampling.
noise_upsample_activation_params (Dict[str, Any]): Hyperparameters for the
above activation function.
upsample_scales (List[int]): List of upsampling scales.
upsample_mode (str): Upsampling mode in TADE layer.
gated_function (str): Gated function used in TADEResBlock
("softmax" or "sigmoid").
use_weight_norm (bool): Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
"""
super().__init__()
self.in_channels = in_channels
noise_upsample = []
in_chs = in_channels
for noise_upsample_scale in noise_upsample_scales:
# NOTE(kan-bayashi): How should we design noise upsampling part?
noise_upsample += [
torch.nn.ConvTranspose1d(
in_chs,
channels,
noise_upsample_scale * 2,
stride=noise_upsample_scale,
padding=noise_upsample_scale // 2 + noise_upsample_scale % 2,
output_padding=noise_upsample_scale % 2,
bias=bias,
)
]
noise_upsample += [
getattr(torch.nn, noise_upsample_activation)(
**noise_upsample_activation_params
)
]
in_chs = channels
self.noise_upsample = torch.nn.Sequential(*noise_upsample)
self.noise_upsample_factor = int(np.prod(noise_upsample_scales))
self.blocks = torch.nn.ModuleList()
aux_chs = aux_channels
for upsample_scale in upsample_scales:
self.blocks += [
TADEResBlock(
in_channels=channels,
aux_channels=aux_chs,
kernel_size=kernel_size,
dilation=dilation,
bias=bias,
upsample_factor=upsample_scale,
upsample_mode=upsample_mode,
gated_function=gated_function,
),
]
aux_chs = channels
self.upsample_factor = int(np.prod(upsample_scales) * out_channels)
self.output_conv = torch.nn.Sequential(
torch.nn.Conv1d(
channels,
out_channels,
kernel_size,
1,
bias=bias,
padding=(kernel_size - 1) // 2,
),
torch.nn.Tanh(),
)
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
# reset parameters
self.reset_parameters()
def forward(
self, c: torch.Tensor, z: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""Calculate forward propagation.
Args:
c (Tensor): Auxiliary input tensor (B, channels, T).
z (Tensor): Input noise tensor (B, in_channels, 1).
Returns:
Tensor: Output tensor (B, out_channels, T ** prod(upsample_scales)).
"""
if z is None:
z = torch.randn(c.size(0), self.in_channels, 1).to(
device=c.device,
dtype=c.dtype,
)
x = self.noise_upsample(z)
for block in self.blocks:
x, c = block(x, c)
x = self.output_conv(x)
return x
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m: torch.nn.Module):
try:
logging.debug(f"Weight norm is removed from {m}.")
torch.nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m: torch.nn.Module):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def reset_parameters(self):
"""Reset parameters."""
def _reset_parameters(m: torch.nn.Module):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
m.weight.data.normal_(0.0, 0.02)
logging.debug(f"Reset parameters in {m}.")
self.apply(_reset_parameters)
def inference(self, c: torch.Tensor) -> torch.Tensor:
"""Perform inference.
Args:
c (Tensor): Input tensor (T, in_channels).
Returns:
Tensor: Output tensor (T ** prod(upsample_scales), out_channels).
"""
c = c.transpose(1, 0).unsqueeze(0)
# prepare noise input
noise_size = (
1,
self.in_channels,
math.ceil(c.size(2) / self.noise_upsample_factor),
)
noise = torch.randn(*noise_size, dtype=torch.float).to(
next(self.parameters()).device
)
x = self.noise_upsample(noise)
# NOTE(kan-bayashi): To remove pop noise at the end of audio, perform padding
# for feature sequence and after generation cut the generated audio. This
# requires additional computation but it can prevent pop noise.
total_length = c.size(2) * self.upsample_factor
c = F.pad(c, (0, x.size(2) - c.size(2)), "replicate")
# This version causes pop noise.
# x = x[:, :, :c.size(2)]
for block in self.blocks:
x, c = block(x, c)
x = self.output_conv(x)[..., :total_length]
return x.squeeze(0).transpose(1, 0)
class StyleMelGANDiscriminator(torch.nn.Module):
"""Style MelGAN disciminator module."""
def __init__(
self,
repeats: int = 2,
window_sizes: List[int] = [512, 1024, 2048, 4096],
pqmf_params: List[List[int]] = [
[1, None, None, None],
[2, 62, 0.26700, 9.0],
[4, 62, 0.14200, 9.0],
[8, 62, 0.07949, 9.0],
],
discriminator_params: Dict[str, Any] = {
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 16,
"max_downsample_channels": 512,
"bias": True,
"downsample_scales": [4, 4, 4, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.2},
"pad": "ReflectionPad1d",
"pad_params": {},
},
use_weight_norm: bool = True,
):
"""Initilize StyleMelGANDiscriminator module.
Args:
repeats (int): Number of repititons to apply RWD.
window_sizes (List[int]): List of random window sizes.
pqmf_params (List[List[int]]): List of list of Parameters for PQMF modules
discriminator_params (Dict[str, Any]): Parameters for base discriminator
module.
use_weight_nom (bool): Whether to apply weight normalization.
"""
super().__init__()
# window size check
assert len(window_sizes) == len(pqmf_params)
sizes = [ws // p[0] for ws, p in zip(window_sizes, pqmf_params)]
assert len(window_sizes) == sum([sizes[0] == size for size in sizes])
self.repeats = repeats
self.window_sizes = window_sizes
self.pqmfs = torch.nn.ModuleList()
self.discriminators = torch.nn.ModuleList()
for pqmf_param in pqmf_params:
d_params = copy.deepcopy(discriminator_params)
d_params["in_channels"] = pqmf_param[0]
if pqmf_param[0] == 1:
self.pqmfs += [torch.nn.Identity()]
else:
self.pqmfs += [PQMF(*pqmf_param)]
self.discriminators += [BaseDiscriminator(**d_params)]
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
# reset parameters
self.reset_parameters()
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, 1, T).
Returns:
List: List of discriminator outputs, #items in the list will be
equal to repeats * #discriminators.
"""
outs = []
for _ in range(self.repeats):
outs += self._forward(x)
return outs
def _forward(self, x: torch.Tensor) -> List[torch.Tensor]:
outs = []
for idx, (ws, pqmf, disc) in enumerate(
zip(self.window_sizes, self.pqmfs, self.discriminators)
):
# NOTE(kan-bayashi): Is it ok to apply different window for real and fake
# samples?
start_idx = np.random.randint(x.size(-1) - ws)
x_ = x[:, :, start_idx : start_idx + ws]
if idx == 0:
x_ = pqmf(x_)
else:
x_ = pqmf.analysis(x_)
outs += [disc(x_)]
return outs
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m: torch.nn.Module):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def reset_parameters(self):
"""Reset parameters."""
def _reset_parameters(m: torch.nn.Module):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
m.weight.data.normal_(0.0, 0.02)
logging.debug(f"Reset parameters in {m}.")
self.apply(_reset_parameters)
| 12,076 | 33.309659 | 86 | py |
espnet | espnet-master/espnet2/gan_tts/hifigan/loss.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""HiFiGAN-related loss modules.
This code is modified from https://github.com/kan-bayashi/ParallelWaveGAN.
"""
from typing import List, Optional, Tuple, Union
import torch
import torch.nn.functional as F
from espnet2.tts.feats_extract.log_mel_fbank import LogMelFbank
class GeneratorAdversarialLoss(torch.nn.Module):
"""Generator adversarial loss module."""
def __init__(
self,
average_by_discriminators: bool = True,
loss_type: str = "mse",
):
"""Initialize GeneratorAversarialLoss module.
Args:
average_by_discriminators (bool): Whether to average the loss by
the number of discriminators.
loss_type (str): Loss type, "mse" or "hinge".
"""
super().__init__()
self.average_by_discriminators = average_by_discriminators
assert loss_type in ["mse", "hinge"], f"{loss_type} is not supported."
if loss_type == "mse":
self.criterion = self._mse_loss
else:
self.criterion = self._hinge_loss
def forward(
self,
outputs: Union[List[List[torch.Tensor]], List[torch.Tensor], torch.Tensor],
) -> torch.Tensor:
"""Calcualate generator adversarial loss.
Args:
outputs (Union[List[List[Tensor]], List[Tensor], Tensor]): Discriminator
outputs, list of discriminator outputs, or list of list of discriminator
outputs..
Returns:
Tensor: Generator adversarial loss value.
"""
if isinstance(outputs, (tuple, list)):
adv_loss = 0.0
for i, outputs_ in enumerate(outputs):
if isinstance(outputs_, (tuple, list)):
# NOTE(kan-bayashi): case including feature maps
outputs_ = outputs_[-1]
adv_loss += self.criterion(outputs_)
if self.average_by_discriminators:
adv_loss /= i + 1
else:
adv_loss = self.criterion(outputs)
return adv_loss
def _mse_loss(self, x):
return F.mse_loss(x, x.new_ones(x.size()))
def _hinge_loss(self, x):
return -x.mean()
class DiscriminatorAdversarialLoss(torch.nn.Module):
"""Discriminator adversarial loss module."""
def __init__(
self,
average_by_discriminators: bool = True,
loss_type: str = "mse",
):
"""Initialize DiscriminatorAversarialLoss module.
Args:
average_by_discriminators (bool): Whether to average the loss by
the number of discriminators.
loss_type (str): Loss type, "mse" or "hinge".
"""
super().__init__()
self.average_by_discriminators = average_by_discriminators
assert loss_type in ["mse", "hinge"], f"{loss_type} is not supported."
if loss_type == "mse":
self.fake_criterion = self._mse_fake_loss
self.real_criterion = self._mse_real_loss
else:
self.fake_criterion = self._hinge_fake_loss
self.real_criterion = self._hinge_real_loss
def forward(
self,
outputs_hat: Union[List[List[torch.Tensor]], List[torch.Tensor], torch.Tensor],
outputs: Union[List[List[torch.Tensor]], List[torch.Tensor], torch.Tensor],
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Calcualate discriminator adversarial loss.
Args:
outputs_hat (Union[List[List[Tensor]], List[Tensor], Tensor]): Discriminator
outputs, list of discriminator outputs, or list of list of discriminator
outputs calculated from generator.
outputs (Union[List[List[Tensor]], List[Tensor], Tensor]): Discriminator
outputs, list of discriminator outputs, or list of list of discriminator
outputs calculated from groundtruth.
Returns:
Tensor: Discriminator real loss value.
Tensor: Discriminator fake loss value.
"""
if isinstance(outputs, (tuple, list)):
real_loss = 0.0
fake_loss = 0.0
for i, (outputs_hat_, outputs_) in enumerate(zip(outputs_hat, outputs)):
if isinstance(outputs_hat_, (tuple, list)):
# NOTE(kan-bayashi): case including feature maps
outputs_hat_ = outputs_hat_[-1]
outputs_ = outputs_[-1]
real_loss += self.real_criterion(outputs_)
fake_loss += self.fake_criterion(outputs_hat_)
if self.average_by_discriminators:
fake_loss /= i + 1
real_loss /= i + 1
else:
real_loss = self.real_criterion(outputs)
fake_loss = self.fake_criterion(outputs_hat)
return real_loss, fake_loss
def _mse_real_loss(self, x: torch.Tensor) -> torch.Tensor:
return F.mse_loss(x, x.new_ones(x.size()))
def _mse_fake_loss(self, x: torch.Tensor) -> torch.Tensor:
return F.mse_loss(x, x.new_zeros(x.size()))
def _hinge_real_loss(self, x: torch.Tensor) -> torch.Tensor:
return -torch.mean(torch.min(x - 1, x.new_zeros(x.size())))
def _hinge_fake_loss(self, x: torch.Tensor) -> torch.Tensor:
return -torch.mean(torch.min(-x - 1, x.new_zeros(x.size())))
class FeatureMatchLoss(torch.nn.Module):
"""Feature matching loss module."""
def __init__(
self,
average_by_layers: bool = True,
average_by_discriminators: bool = True,
include_final_outputs: bool = False,
):
"""Initialize FeatureMatchLoss module.
Args:
average_by_layers (bool): Whether to average the loss by the number
of layers.
average_by_discriminators (bool): Whether to average the loss by
the number of discriminators.
include_final_outputs (bool): Whether to include the final output of
each discriminator for loss calculation.
"""
super().__init__()
self.average_by_layers = average_by_layers
self.average_by_discriminators = average_by_discriminators
self.include_final_outputs = include_final_outputs
def forward(
self,
feats_hat: Union[List[List[torch.Tensor]], List[torch.Tensor]],
feats: Union[List[List[torch.Tensor]], List[torch.Tensor]],
) -> torch.Tensor:
"""Calculate feature matching loss.
Args:
feats_hat (Union[List[List[Tensor]], List[Tensor]]): List of list of
discriminator outputs or list of discriminator outputs calcuated
from generator's outputs.
feats (Union[List[List[Tensor]], List[Tensor]]): List of list of
discriminator outputs or list of discriminator outputs calcuated
from groundtruth..
Returns:
Tensor: Feature matching loss value.
"""
feat_match_loss = 0.0
for i, (feats_hat_, feats_) in enumerate(zip(feats_hat, feats)):
feat_match_loss_ = 0.0
if not self.include_final_outputs:
feats_hat_ = feats_hat_[:-1]
feats_ = feats_[:-1]
for j, (feat_hat_, feat_) in enumerate(zip(feats_hat_, feats_)):
feat_match_loss_ += F.l1_loss(feat_hat_, feat_.detach())
if self.average_by_layers:
feat_match_loss_ /= j + 1
feat_match_loss += feat_match_loss_
if self.average_by_discriminators:
feat_match_loss /= i + 1
return feat_match_loss
class MelSpectrogramLoss(torch.nn.Module):
"""Mel-spectrogram loss."""
def __init__(
self,
fs: int = 22050,
n_fft: int = 1024,
hop_length: int = 256,
win_length: Optional[int] = None,
window: str = "hann",
n_mels: int = 80,
fmin: Optional[int] = 0,
fmax: Optional[int] = None,
center: bool = True,
normalized: bool = False,
onesided: bool = True,
log_base: Optional[float] = 10.0,
):
"""Initialize Mel-spectrogram loss.
Args:
fs (int): Sampling rate.
n_fft (int): FFT points.
hop_length (int): Hop length.
win_length (Optional[int]): Window length.
window (str): Window type.
n_mels (int): Number of Mel basis.
fmin (Optional[int]): Minimum frequency for Mel.
fmax (Optional[int]): Maximum frequency for Mel.
center (bool): Whether to use center window.
normalized (bool): Whether to use normalized one.
onesided (bool): Whether to use oneseded one.
log_base (Optional[float]): Log base value.
"""
super().__init__()
self.wav_to_mel = LogMelFbank(
fs=fs,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
n_mels=n_mels,
fmin=fmin,
fmax=fmax,
center=center,
normalized=normalized,
onesided=onesided,
log_base=log_base,
)
def forward(
self,
y_hat: torch.Tensor,
y: torch.Tensor,
spec: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""Calculate Mel-spectrogram loss.
Args:
y_hat (Tensor): Generated waveform tensor (B, 1, T).
y (Tensor): Groundtruth waveform tensor (B, 1, T).
spec (Optional[Tensor]): Groundtruth linear amplitude spectrum tensor
(B, n_fft, T). if provided, use it instead of groundtruth waveform.
Returns:
Tensor: Mel-spectrogram loss value.
"""
mel_hat, _ = self.wav_to_mel(y_hat.squeeze(1))
if spec is None:
mel, _ = self.wav_to_mel(y.squeeze(1))
else:
mel, _ = self.wav_to_mel.logmel(spec)
mel_loss = F.l1_loss(mel_hat, mel)
return mel_loss
| 10,182 | 33.636054 | 88 | py |
espnet | espnet-master/espnet2/gan_tts/hifigan/residual_block.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""HiFiGAN Residual block modules.
This code is modified from https://github.com/kan-bayashi/ParallelWaveGAN.
"""
from typing import Any, Dict, List
import torch
class ResidualBlock(torch.nn.Module):
"""Residual block module in HiFiGAN."""
def __init__(
self,
kernel_size: int = 3,
channels: int = 512,
dilations: List[int] = [1, 3, 5],
bias: bool = True,
use_additional_convs: bool = True,
nonlinear_activation: str = "LeakyReLU",
nonlinear_activation_params: Dict[str, Any] = {"negative_slope": 0.1},
):
"""Initialize ResidualBlock module.
Args:
kernel_size (int): Kernel size of dilation convolution layer.
channels (int): Number of channels for convolution layer.
dilations (List[int]): List of dilation factors.
use_additional_convs (bool): Whether to use additional convolution layers.
bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (Dict[str, Any]): Hyperparameters for activation
function.
"""
super().__init__()
self.use_additional_convs = use_additional_convs
self.convs1 = torch.nn.ModuleList()
if use_additional_convs:
self.convs2 = torch.nn.ModuleList()
assert kernel_size % 2 == 1, "Kernel size must be odd number."
for dilation in dilations:
self.convs1 += [
torch.nn.Sequential(
getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
),
torch.nn.Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation,
bias=bias,
padding=(kernel_size - 1) // 2 * dilation,
),
)
]
if use_additional_convs:
self.convs2 += [
torch.nn.Sequential(
getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
),
torch.nn.Conv1d(
channels,
channels,
kernel_size,
1,
dilation=1,
bias=bias,
padding=(kernel_size - 1) // 2,
),
)
]
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, channels, T).
Returns:
Tensor: Output tensor (B, channels, T).
"""
for idx in range(len(self.convs1)):
xt = self.convs1[idx](x)
if self.use_additional_convs:
xt = self.convs2[idx](xt)
x = xt + x
return x
| 3,313 | 32.816327 | 88 | py |
espnet | espnet-master/espnet2/gan_tts/hifigan/hifigan.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""HiFi-GAN Modules.
This code is modified from https://github.com/kan-bayashi/ParallelWaveGAN.
"""
import copy
import logging
from typing import Any, Dict, List, Optional
import numpy as np
import torch
import torch.nn.functional as F
from espnet2.gan_tts.hifigan.residual_block import ResidualBlock
class HiFiGANGenerator(torch.nn.Module):
"""HiFiGAN generator module."""
def __init__(
self,
in_channels: int = 80,
out_channels: int = 1,
channels: int = 512,
global_channels: int = -1,
kernel_size: int = 7,
upsample_scales: List[int] = [8, 8, 2, 2],
upsample_kernel_sizes: List[int] = [16, 16, 4, 4],
resblock_kernel_sizes: List[int] = [3, 7, 11],
resblock_dilations: List[List[int]] = [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
use_additional_convs: bool = True,
bias: bool = True,
nonlinear_activation: str = "LeakyReLU",
nonlinear_activation_params: Dict[str, Any] = {"negative_slope": 0.1},
use_weight_norm: bool = True,
):
"""Initialize HiFiGANGenerator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
channels (int): Number of hidden representation channels.
global_channels (int): Number of global conditioning channels.
kernel_size (int): Kernel size of initial and final conv layer.
upsample_scales (List[int]): List of upsampling scales.
upsample_kernel_sizes (List[int]): List of kernel sizes for upsample layers.
resblock_kernel_sizes (List[int]): List of kernel sizes for residual blocks.
resblock_dilations (List[List[int]]): List of list of dilations for residual
blocks.
use_additional_convs (bool): Whether to use additional conv layers in
residual blocks.
bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (Dict[str, Any]): Hyperparameters for activation
function.
use_weight_norm (bool): Whether to use weight norm. If set to true, it will
be applied to all of the conv layers.
"""
super().__init__()
# check hyperparameters are valid
assert kernel_size % 2 == 1, "Kernel size must be odd number."
assert len(upsample_scales) == len(upsample_kernel_sizes)
assert len(resblock_dilations) == len(resblock_kernel_sizes)
# define modules
self.upsample_factor = int(np.prod(upsample_scales) * out_channels)
self.num_upsamples = len(upsample_kernel_sizes)
self.num_blocks = len(resblock_kernel_sizes)
self.input_conv = torch.nn.Conv1d(
in_channels,
channels,
kernel_size,
1,
padding=(kernel_size - 1) // 2,
)
self.upsamples = torch.nn.ModuleList()
self.blocks = torch.nn.ModuleList()
for i in range(len(upsample_kernel_sizes)):
assert upsample_kernel_sizes[i] == 2 * upsample_scales[i]
self.upsamples += [
torch.nn.Sequential(
getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
),
torch.nn.ConvTranspose1d(
channels // (2**i),
channels // (2 ** (i + 1)),
upsample_kernel_sizes[i],
upsample_scales[i],
padding=upsample_scales[i] // 2 + upsample_scales[i] % 2,
output_padding=upsample_scales[i] % 2,
),
)
]
for j in range(len(resblock_kernel_sizes)):
self.blocks += [
ResidualBlock(
kernel_size=resblock_kernel_sizes[j],
channels=channels // (2 ** (i + 1)),
dilations=resblock_dilations[j],
bias=bias,
use_additional_convs=use_additional_convs,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
)
]
self.output_conv = torch.nn.Sequential(
# NOTE(kan-bayashi): follow official implementation but why
# using different slope parameter here? (0.1 vs. 0.01)
torch.nn.LeakyReLU(),
torch.nn.Conv1d(
channels // (2 ** (i + 1)),
out_channels,
kernel_size,
1,
padding=(kernel_size - 1) // 2,
),
torch.nn.Tanh(),
)
if global_channels > 0:
self.global_conv = torch.nn.Conv1d(global_channels, channels, 1)
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
# reset parameters
self.reset_parameters()
def forward(
self, c: torch.Tensor, g: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, in_channels, T).
g (Optional[Tensor]): Global conditioning tensor (B, global_channels, 1).
Returns:
Tensor: Output tensor (B, out_channels, T).
"""
c = self.input_conv(c)
if g is not None:
c = c + self.global_conv(g)
for i in range(self.num_upsamples):
c = self.upsamples[i](c)
cs = 0.0 # initialize
for j in range(self.num_blocks):
cs += self.blocks[i * self.num_blocks + j](c)
c = cs / self.num_blocks
c = self.output_conv(c)
return c
def reset_parameters(self):
"""Reset parameters.
This initialization follows the official implementation manner.
https://github.com/jik876/hifi-gan/blob/master/models.py
"""
def _reset_parameters(m: torch.nn.Module):
if isinstance(m, (torch.nn.Conv1d, torch.nn.ConvTranspose1d)):
m.weight.data.normal_(0.0, 0.01)
logging.debug(f"Reset parameters in {m}.")
self.apply(_reset_parameters)
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m: torch.nn.Module):
try:
logging.debug(f"Weight norm is removed from {m}.")
torch.nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m: torch.nn.Module):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def inference(
self, c: torch.Tensor, g: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""Perform inference.
Args:
c (torch.Tensor): Input tensor (T, in_channels).
g (Optional[Tensor]): Global conditioning tensor (global_channels, 1).
Returns:
Tensor: Output tensor (T ** upsample_factor, out_channels).
"""
if g is not None:
g = g.unsqueeze(0)
c = self.forward(c.transpose(1, 0).unsqueeze(0), g=g)
return c.squeeze(0).transpose(1, 0)
class HiFiGANPeriodDiscriminator(torch.nn.Module):
"""HiFiGAN period discriminator module."""
def __init__(
self,
in_channels: int = 1,
out_channels: int = 1,
period: int = 3,
kernel_sizes: List[int] = [5, 3],
channels: int = 32,
downsample_scales: List[int] = [3, 3, 3, 3, 1],
max_downsample_channels: int = 1024,
bias: bool = True,
nonlinear_activation: str = "LeakyReLU",
nonlinear_activation_params: Dict[str, Any] = {"negative_slope": 0.1},
use_weight_norm: bool = True,
use_spectral_norm: bool = False,
):
"""Initialize HiFiGANPeriodDiscriminator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
period (int): Period.
kernel_sizes (list): Kernel sizes of initial conv layers and the final conv
layer.
channels (int): Number of initial channels.
downsample_scales (List[int]): List of downsampling scales.
max_downsample_channels (int): Number of maximum downsampling channels.
use_additional_convs (bool): Whether to use additional conv layers in
residual blocks.
bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (Dict[str, Any]): Hyperparameters for activation
function.
use_weight_norm (bool): Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
use_spectral_norm (bool): Whether to use spectral norm.
If set to true, it will be applied to all of the conv layers.
"""
super().__init__()
assert len(kernel_sizes) == 2
assert kernel_sizes[0] % 2 == 1, "Kernel size must be odd number."
assert kernel_sizes[1] % 2 == 1, "Kernel size must be odd number."
self.period = period
self.convs = torch.nn.ModuleList()
in_chs = in_channels
out_chs = channels
for downsample_scale in downsample_scales:
self.convs += [
torch.nn.Sequential(
torch.nn.Conv2d(
in_chs,
out_chs,
(kernel_sizes[0], 1),
(downsample_scale, 1),
padding=((kernel_sizes[0] - 1) // 2, 0),
),
getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
),
)
]
in_chs = out_chs
# NOTE(kan-bayashi): Use downsample_scale + 1?
out_chs = min(out_chs * 4, max_downsample_channels)
self.output_conv = torch.nn.Conv2d(
out_chs,
out_channels,
(kernel_sizes[1] - 1, 1),
1,
padding=((kernel_sizes[1] - 1) // 2, 0),
)
if use_weight_norm and use_spectral_norm:
raise ValueError("Either use use_weight_norm or use_spectral_norm.")
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
# apply spectral norm
if use_spectral_norm:
self.apply_spectral_norm()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, in_channels, T).
Returns:
list: List of each layer's tensors.
"""
# transform 1d to 2d -> (B, C, T/P, P)
b, c, t = x.shape
if t % self.period != 0:
n_pad = self.period - (t % self.period)
x = F.pad(x, (0, n_pad), "reflect")
t += n_pad
x = x.view(b, c, t // self.period, self.period)
# forward conv
outs = []
for layer in self.convs:
x = layer(x)
outs += [x]
x = self.output_conv(x)
x = torch.flatten(x, 1, -1)
outs += [x]
return outs
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m: torch.nn.Module):
if isinstance(m, torch.nn.Conv2d):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def apply_spectral_norm(self):
"""Apply spectral normalization module from all of the layers."""
def _apply_spectral_norm(m: torch.nn.Module):
if isinstance(m, torch.nn.Conv2d):
torch.nn.utils.spectral_norm(m)
logging.debug(f"Spectral norm is applied to {m}.")
self.apply(_apply_spectral_norm)
class HiFiGANMultiPeriodDiscriminator(torch.nn.Module):
"""HiFiGAN multi-period discriminator module."""
def __init__(
self,
periods: List[int] = [2, 3, 5, 7, 11],
discriminator_params: Dict[str, Any] = {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 32,
"downsample_scales": [3, 3, 3, 3, 1],
"max_downsample_channels": 1024,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
):
"""Initialize HiFiGANMultiPeriodDiscriminator module.
Args:
periods (List[int]): List of periods.
discriminator_params (Dict[str, Any]): Parameters for hifi-gan period
discriminator module. The period parameter will be overwritten.
"""
super().__init__()
self.discriminators = torch.nn.ModuleList()
for period in periods:
params = copy.deepcopy(discriminator_params)
params["period"] = period
self.discriminators += [HiFiGANPeriodDiscriminator(**params)]
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
Returns:
List: List of list of each discriminator outputs, which consists of each
layer output tensors.
"""
outs = []
for f in self.discriminators:
outs += [f(x)]
return outs
class HiFiGANScaleDiscriminator(torch.nn.Module):
"""HiFi-GAN scale discriminator module."""
def __init__(
self,
in_channels: int = 1,
out_channels: int = 1,
kernel_sizes: List[int] = [15, 41, 5, 3],
channels: int = 128,
max_downsample_channels: int = 1024,
max_groups: int = 16,
bias: int = True,
downsample_scales: List[int] = [2, 2, 4, 4, 1],
nonlinear_activation: str = "LeakyReLU",
nonlinear_activation_params: Dict[str, Any] = {"negative_slope": 0.1},
use_weight_norm: bool = True,
use_spectral_norm: bool = False,
):
"""Initilize HiFiGAN scale discriminator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
kernel_sizes (List[int]): List of four kernel sizes. The first will be used
for the first conv layer, and the second is for downsampling part, and
the remaining two are for the last two output layers.
channels (int): Initial number of channels for conv layer.
max_downsample_channels (int): Maximum number of channels for downsampling
layers.
bias (bool): Whether to add bias parameter in convolution layers.
downsample_scales (List[int]): List of downsampling scales.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (Dict[str, Any]): Hyperparameters for activation
function.
use_weight_norm (bool): Whether to use weight norm. If set to true, it will
be applied to all of the conv layers.
use_spectral_norm (bool): Whether to use spectral norm. If set to true, it
will be applied to all of the conv layers.
"""
super().__init__()
self.layers = torch.nn.ModuleList()
# check kernel size is valid
assert len(kernel_sizes) == 4
for ks in kernel_sizes:
assert ks % 2 == 1
# add first layer
self.layers += [
torch.nn.Sequential(
torch.nn.Conv1d(
in_channels,
channels,
# NOTE(kan-bayashi): Use always the same kernel size
kernel_sizes[0],
bias=bias,
padding=(kernel_sizes[0] - 1) // 2,
),
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
)
]
# add downsample layers
in_chs = channels
out_chs = channels
# NOTE(kan-bayashi): Remove hard coding?
groups = 4
for downsample_scale in downsample_scales:
self.layers += [
torch.nn.Sequential(
torch.nn.Conv1d(
in_chs,
out_chs,
kernel_size=kernel_sizes[1],
stride=downsample_scale,
padding=(kernel_sizes[1] - 1) // 2,
groups=groups,
bias=bias,
),
getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
),
)
]
in_chs = out_chs
# NOTE(kan-bayashi): Remove hard coding?
out_chs = min(in_chs * 2, max_downsample_channels)
# NOTE(kan-bayashi): Remove hard coding?
groups = min(groups * 4, max_groups)
# add final layers
out_chs = min(in_chs * 2, max_downsample_channels)
self.layers += [
torch.nn.Sequential(
torch.nn.Conv1d(
in_chs,
out_chs,
kernel_size=kernel_sizes[2],
stride=1,
padding=(kernel_sizes[2] - 1) // 2,
bias=bias,
),
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
)
]
self.layers += [
torch.nn.Conv1d(
out_chs,
out_channels,
kernel_size=kernel_sizes[3],
stride=1,
padding=(kernel_sizes[3] - 1) // 2,
bias=bias,
),
]
if use_weight_norm and use_spectral_norm:
raise ValueError("Either use use_weight_norm or use_spectral_norm.")
# apply weight norm
self.use_weight_norm = use_weight_norm
if use_weight_norm:
self.apply_weight_norm()
# apply spectral norm
self.use_spectral_norm = use_spectral_norm
if use_spectral_norm:
self.apply_spectral_norm()
# backward compatibility
self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook)
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
Returns:
List[Tensor]: List of output tensors of each layer.
"""
outs = []
for f in self.layers:
x = f(x)
outs += [x]
return outs
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m: torch.nn.Module):
if isinstance(m, torch.nn.Conv1d):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def apply_spectral_norm(self):
"""Apply spectral normalization module from all of the layers."""
def _apply_spectral_norm(m: torch.nn.Module):
if isinstance(m, torch.nn.Conv1d):
torch.nn.utils.spectral_norm(m)
logging.debug(f"Spectral norm is applied to {m}.")
self.apply(_apply_spectral_norm)
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m):
try:
logging.debug(f"Weight norm is removed from {m}.")
torch.nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
def remove_spectral_norm(self):
"""Remove spectral normalization module from all of the layers."""
def _remove_spectral_norm(m):
try:
logging.debug(f"Spectral norm is removed from {m}.")
torch.nn.utils.remove_spectral_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_spectral_norm)
def _load_state_dict_pre_hook(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
"""Fix the compatibility of weight / spectral normalization issue.
Some pretrained models are trained with configs that use weight / spectral
normalization, but actually, the norm is not applied. This causes the mismatch
of the parameters with configs. To solve this issue, when parameter mismatch
happens in loading pretrained model, we remove the norm from the current model.
See also:
- https://github.com/espnet/espnet/pull/5240
- https://github.com/espnet/espnet/pull/5249
- https://github.com/kan-bayashi/ParallelWaveGAN/pull/409
"""
current_module_keys = [x for x in state_dict.keys() if x.startswith(prefix)]
if self.use_weight_norm and any(
[k.endswith("weight") for k in current_module_keys]
):
logging.warning(
"It seems weight norm is not applied in the pretrained model but the"
" current model uses it. To keep the compatibility, we remove the norm"
" from the current model. This may cause unexpected behavior due to the"
" parameter mismatch in finetuning. To avoid this issue, please change"
" the following parameters in config to false:\n"
" - discriminator_params.follow_official_norm\n"
" - discriminator_params.scale_discriminator_params.use_weight_norm\n"
" - discriminator_params.scale_discriminator_params.use_spectral_norm\n"
"\n"
"See also:\n"
" - https://github.com/espnet/espnet/pull/5240\n"
" - https://github.com/espnet/espnet/pull/5249"
)
self.remove_weight_norm()
self.use_weight_norm = False
for k in current_module_keys:
if k.endswith("weight_g") or k.endswith("weight_v"):
del state_dict[k]
if self.use_spectral_norm and any(
[k.endswith("weight") for k in current_module_keys]
):
logging.warning(
"It seems spectral norm is not applied in the pretrained model but the"
" current model uses it. To keep the compatibility, we remove the norm"
" from the current model. This may cause unexpected behavior due to the"
" parameter mismatch in finetuning. To avoid this issue, please change"
" the following parameters in config to false:\n"
" - discriminator_params.follow_official_norm\n"
" - discriminator_params.scale_discriminator_params.use_weight_norm\n"
" - discriminator_params.scale_discriminator_params.use_spectral_norm\n"
"\n"
"See also:\n"
" - https://github.com/espnet/espnet/pull/5240\n"
" - https://github.com/espnet/espnet/pull/5249"
)
self.remove_spectral_norm()
self.use_spectral_norm = False
for k in current_module_keys:
if (
k.endswith("weight_u")
or k.endswith("weight_v")
or k.endswith("weight_orig")
):
del state_dict[k]
class HiFiGANMultiScaleDiscriminator(torch.nn.Module):
"""HiFi-GAN multi-scale discriminator module."""
def __init__(
self,
scales: int = 3,
downsample_pooling: str = "AvgPool1d",
# follow the official implementation setting
downsample_pooling_params: Dict[str, Any] = {
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
discriminator_params: Dict[str, Any] = {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 128,
"max_downsample_channels": 1024,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 2, 4, 4, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
},
follow_official_norm: bool = False,
):
"""Initilize HiFiGAN multi-scale discriminator module.
Args:
scales (int): Number of multi-scales.
downsample_pooling (str): Pooling module name for downsampling of the
inputs.
downsample_pooling_params (Dict[str, Any]): Parameters for the above pooling
module.
discriminator_params (Dict[str, Any]): Parameters for hifi-gan scale
discriminator module.
follow_official_norm (bool): Whether to follow the norm setting of the
official implementaion. The first discriminator uses spectral norm
and the other discriminators use weight norm.
"""
super().__init__()
self.discriminators = torch.nn.ModuleList()
# add discriminators
for i in range(scales):
params = copy.deepcopy(discriminator_params)
if follow_official_norm:
if i == 0:
params["use_weight_norm"] = False
params["use_spectral_norm"] = True
else:
params["use_weight_norm"] = True
params["use_spectral_norm"] = False
self.discriminators += [HiFiGANScaleDiscriminator(**params)]
self.pooling = None
if scales > 1:
self.pooling = getattr(torch.nn, downsample_pooling)(
**downsample_pooling_params
)
def forward(self, x: torch.Tensor) -> List[List[torch.Tensor]]:
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
Returns:
List[List[torch.Tensor]]: List of list of each discriminator outputs,
which consists of eachlayer output tensors.
"""
outs = []
for f in self.discriminators:
outs += [f(x)]
if self.pooling is not None:
x = self.pooling(x)
return outs
class HiFiGANMultiScaleMultiPeriodDiscriminator(torch.nn.Module):
"""HiFi-GAN multi-scale + multi-period discriminator module."""
def __init__(
self,
# Multi-scale discriminator related
scales: int = 3,
scale_downsample_pooling: str = "AvgPool1d",
scale_downsample_pooling_params: Dict[str, Any] = {
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
scale_discriminator_params: Dict[str, Any] = {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 128,
"max_downsample_channels": 1024,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 2, 4, 4, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
},
follow_official_norm: bool = True,
# Multi-period discriminator related
periods: List[int] = [2, 3, 5, 7, 11],
period_discriminator_params: Dict[str, Any] = {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 32,
"downsample_scales": [3, 3, 3, 3, 1],
"max_downsample_channels": 1024,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
):
"""Initilize HiFiGAN multi-scale + multi-period discriminator module.
Args:
scales (int): Number of multi-scales.
scale_downsample_pooling (str): Pooling module name for downsampling of the
inputs.
scale_downsample_pooling_params (dict): Parameters for the above pooling
module.
scale_discriminator_params (dict): Parameters for hifi-gan scale
discriminator module.
follow_official_norm (bool): Whether to follow the norm setting of the
official implementaion. The first discriminator uses spectral norm and
the other discriminators use weight norm.
periods (list): List of periods.
period_discriminator_params (dict): Parameters for hifi-gan period
discriminator module. The period parameter will be overwritten.
"""
super().__init__()
self.msd = HiFiGANMultiScaleDiscriminator(
scales=scales,
downsample_pooling=scale_downsample_pooling,
downsample_pooling_params=scale_downsample_pooling_params,
discriminator_params=scale_discriminator_params,
follow_official_norm=follow_official_norm,
)
self.mpd = HiFiGANMultiPeriodDiscriminator(
periods=periods,
discriminator_params=period_discriminator_params,
)
def forward(self, x: torch.Tensor) -> List[List[torch.Tensor]]:
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
Returns:
List[List[Tensor]]: List of list of each discriminator outputs,
which consists of each layer output tensors. Multi scale and
multi period ones are concatenated.
"""
msd_outs = self.msd(x)
mpd_outs = self.mpd(x)
return msd_outs + mpd_outs
| 31,567 | 36.182568 | 88 | py |
espnet | espnet-master/espnet2/gan_tts/utils/get_random_segments.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Function to get random segments."""
from typing import Optional, Tuple
import torch
def get_random_segments(
x: torch.Tensor,
x_lengths: torch.Tensor,
segment_size: int,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Get random segments.
Args:
x (Tensor): Input tensor (B, C, T).
x_lengths (Tensor): Length tensor (B,).
segment_size (int): Segment size.
Returns:
Tensor: Segmented tensor (B, C, segment_size).
Tensor: Start index tensor (B,).
"""
b, c, t = x.size()
max_start_idx = x_lengths - segment_size
start_idxs = (torch.rand([b]).to(x.device) * max_start_idx).to(
dtype=torch.long,
)
segments = get_segments(x, start_idxs, segment_size)
return segments, start_idxs
def get_segments(
x: torch.Tensor,
start_idxs: torch.Tensor,
segment_size: int,
) -> torch.Tensor:
"""Get segments.
Args:
x (Tensor): Input tensor (B, C, T).
start_idxs (Tensor): Start index tensor (B,).
segment_size (int): Segment size.
Returns:
Tensor: Segmented tensor (B, C, segment_size).
"""
b, c, t = x.size()
segments = x.new_zeros(b, c, segment_size)
for i, start_idx in enumerate(start_idxs):
segments[i] = x[i, :, start_idx : start_idx + segment_size]
return segments
| 1,440 | 23.423729 | 67 | py |
espnet | espnet-master/espnet2/gan_tts/vits/residual_coupling.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Residual affine coupling modules in VITS.
This code is based on https://github.com/jaywalnut310/vits.
"""
from typing import Optional, Tuple, Union
import torch
from espnet2.gan_tts.vits.flow import FlipFlow
from espnet2.gan_tts.wavenet import WaveNet
class ResidualAffineCouplingBlock(torch.nn.Module):
"""Residual affine coupling block module.
This is a module of residual affine coupling block, which used as "Flow" in
`Conditional Variational Autoencoder with Adversarial Learning for End-to-End
Text-to-Speech`_.
.. _`Conditional Variational Autoencoder with Adversarial Learning for End-to-End
Text-to-Speech`: https://arxiv.org/abs/2006.04558
"""
def __init__(
self,
in_channels: int = 192,
hidden_channels: int = 192,
flows: int = 4,
kernel_size: int = 5,
base_dilation: int = 1,
layers: int = 4,
global_channels: int = -1,
dropout_rate: float = 0.0,
use_weight_norm: bool = True,
bias: bool = True,
use_only_mean: bool = True,
):
"""Initilize ResidualAffineCouplingBlock module.
Args:
in_channels (int): Number of input channels.
hidden_channels (int): Number of hidden channels.
flows (int): Number of flows.
kernel_size (int): Kernel size for WaveNet.
base_dilation (int): Base dilation factor for WaveNet.
layers (int): Number of layers of WaveNet.
stacks (int): Number of stacks of WaveNet.
global_channels (int): Number of global channels.
dropout_rate (float): Dropout rate.
use_weight_norm (bool): Whether to use weight normalization in WaveNet.
bias (bool): Whether to use bias paramters in WaveNet.
use_only_mean (bool): Whether to estimate only mean.
"""
super().__init__()
self.flows = torch.nn.ModuleList()
for i in range(flows):
self.flows += [
ResidualAffineCouplingLayer(
in_channels=in_channels,
hidden_channels=hidden_channels,
kernel_size=kernel_size,
base_dilation=base_dilation,
layers=layers,
stacks=1,
global_channels=global_channels,
dropout_rate=dropout_rate,
use_weight_norm=use_weight_norm,
bias=bias,
use_only_mean=use_only_mean,
)
]
self.flows += [FlipFlow()]
def forward(
self,
x: torch.Tensor,
x_mask: torch.Tensor,
g: Optional[torch.Tensor] = None,
inverse: bool = False,
) -> torch.Tensor:
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, in_channels, T).
x_lengths (Tensor): Length tensor (B,).
g (Optional[Tensor]): Global conditioning tensor (B, global_channels, 1).
inverse (bool): Whether to inverse the flow.
Returns:
Tensor: Output tensor (B, in_channels, T).
"""
if not inverse:
for flow in self.flows:
x, _ = flow(x, x_mask, g=g, inverse=inverse)
else:
for flow in reversed(self.flows):
x = flow(x, x_mask, g=g, inverse=inverse)
return x
class ResidualAffineCouplingLayer(torch.nn.Module):
"""Residual affine coupling layer."""
def __init__(
self,
in_channels: int = 192,
hidden_channels: int = 192,
kernel_size: int = 5,
base_dilation: int = 1,
layers: int = 5,
stacks: int = 1,
global_channels: int = -1,
dropout_rate: float = 0.0,
use_weight_norm: bool = True,
bias: bool = True,
use_only_mean: bool = True,
):
"""Initialzie ResidualAffineCouplingLayer module.
Args:
in_channels (int): Number of input channels.
hidden_channels (int): Number of hidden channels.
kernel_size (int): Kernel size for WaveNet.
base_dilation (int): Base dilation factor for WaveNet.
layers (int): Number of layers of WaveNet.
stacks (int): Number of stacks of WaveNet.
global_channels (int): Number of global channels.
dropout_rate (float): Dropout rate.
use_weight_norm (bool): Whether to use weight normalization in WaveNet.
bias (bool): Whether to use bias paramters in WaveNet.
use_only_mean (bool): Whether to estimate only mean.
"""
assert in_channels % 2 == 0, "in_channels should be divisible by 2"
super().__init__()
self.half_channels = in_channels // 2
self.use_only_mean = use_only_mean
# define modules
self.input_conv = torch.nn.Conv1d(
self.half_channels,
hidden_channels,
1,
)
self.encoder = WaveNet(
in_channels=-1,
out_channels=-1,
kernel_size=kernel_size,
layers=layers,
stacks=stacks,
base_dilation=base_dilation,
residual_channels=hidden_channels,
aux_channels=-1,
gate_channels=hidden_channels * 2,
skip_channels=hidden_channels,
global_channels=global_channels,
dropout_rate=dropout_rate,
bias=bias,
use_weight_norm=use_weight_norm,
use_first_conv=False,
use_last_conv=False,
scale_residual=False,
scale_skip_connect=True,
)
if use_only_mean:
self.proj = torch.nn.Conv1d(
hidden_channels,
self.half_channels,
1,
)
else:
self.proj = torch.nn.Conv1d(
hidden_channels,
self.half_channels * 2,
1,
)
self.proj.weight.data.zero_()
self.proj.bias.data.zero_()
def forward(
self,
x: torch.Tensor,
x_mask: torch.Tensor,
g: Optional[torch.Tensor] = None,
inverse: bool = False,
) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, in_channels, T).
x_lengths (Tensor): Length tensor (B,).
g (Optional[Tensor]): Global conditioning tensor (B, global_channels, 1).
inverse (bool): Whether to inverse the flow.
Returns:
Tensor: Output tensor (B, in_channels, T).
Tensor: Log-determinant tensor for NLL (B,) if not inverse.
"""
xa, xb = x.split(x.size(1) // 2, dim=1)
h = self.input_conv(xa) * x_mask
h = self.encoder(h, x_mask, g=g)
stats = self.proj(h) * x_mask
if not self.use_only_mean:
m, logs = stats.split(stats.size(1) // 2, dim=1)
else:
m = stats
logs = torch.zeros_like(m)
if not inverse:
xb = m + xb * torch.exp(logs) * x_mask
x = torch.cat([xa, xb], 1)
logdet = torch.sum(logs, [1, 2])
return x, logdet
else:
xb = (xb - m) * torch.exp(-logs) * x_mask
x = torch.cat([xa, xb], 1)
return x
| 7,596 | 32.320175 | 85 | py |
espnet | espnet-master/espnet2/gan_tts/vits/flow.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Basic Flow modules used in VITS.
This code is based on https://github.com/jaywalnut310/vits.
"""
import math
from typing import Optional, Tuple, Union
import torch
from espnet2.gan_tts.vits.transform import piecewise_rational_quadratic_transform
class FlipFlow(torch.nn.Module):
"""Flip flow module."""
def forward(
self, x: torch.Tensor, *args, inverse: bool = False, **kwargs
) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, channels, T).
inverse (bool): Whether to inverse the flow.
Returns:
Tensor: Flipped tensor (B, channels, T).
Tensor: Log-determinant tensor for NLL (B,) if not inverse.
"""
x = torch.flip(x, [1])
if not inverse:
logdet = x.new_zeros(x.size(0))
return x, logdet
else:
return x
class LogFlow(torch.nn.Module):
"""Log flow module."""
def forward(
self,
x: torch.Tensor,
x_mask: torch.Tensor,
inverse: bool = False,
eps: float = 1e-5,
**kwargs
) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, channels, T).
x_mask (Tensor): Mask tensor (B, 1, T).
inverse (bool): Whether to inverse the flow.
eps (float): Epsilon for log.
Returns:
Tensor: Output tensor (B, channels, T).
Tensor: Log-determinant tensor for NLL (B,) if not inverse.
"""
if not inverse:
y = torch.log(torch.clamp_min(x, eps)) * x_mask
logdet = torch.sum(-y, [1, 2])
return y, logdet
else:
x = torch.exp(x) * x_mask
return x
class ElementwiseAffineFlow(torch.nn.Module):
"""Elementwise affine flow module."""
def __init__(self, channels: int):
"""Initialize ElementwiseAffineFlow module.
Args:
channels (int): Number of channels.
"""
super().__init__()
self.channels = channels
self.register_parameter("m", torch.nn.Parameter(torch.zeros(channels, 1)))
self.register_parameter("logs", torch.nn.Parameter(torch.zeros(channels, 1)))
def forward(
self, x: torch.Tensor, x_mask: torch.Tensor, inverse: bool = False, **kwargs
) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, channels, T).
x_lengths (Tensor): Length tensor (B,).
inverse (bool): Whether to inverse the flow.
Returns:
Tensor: Output tensor (B, channels, T).
Tensor: Log-determinant tensor for NLL (B,) if not inverse.
"""
if not inverse:
y = self.m + torch.exp(self.logs) * x
y = y * x_mask
logdet = torch.sum(self.logs * x_mask, [1, 2])
return y, logdet
else:
x = (x - self.m) * torch.exp(-self.logs) * x_mask
return x
class Transpose(torch.nn.Module):
"""Transpose module for torch.nn.Sequential()."""
def __init__(self, dim1: int, dim2: int):
"""Initialize Transpose module."""
super().__init__()
self.dim1 = dim1
self.dim2 = dim2
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Transpose."""
return x.transpose(self.dim1, self.dim2)
class DilatedDepthSeparableConv(torch.nn.Module):
"""Dilated depth-separable conv module."""
def __init__(
self,
channels: int,
kernel_size: int,
layers: int,
dropout_rate: float = 0.0,
eps: float = 1e-5,
):
"""Initialize DilatedDepthSeparableConv module.
Args:
channels (int): Number of channels.
kernel_size (int): Kernel size.
layers (int): Number of layers.
dropout_rate (float): Dropout rate.
eps (float): Epsilon for layer norm.
"""
super().__init__()
self.convs = torch.nn.ModuleList()
for i in range(layers):
dilation = kernel_size**i
padding = (kernel_size * dilation - dilation) // 2
self.convs += [
torch.nn.Sequential(
torch.nn.Conv1d(
channels,
channels,
kernel_size,
groups=channels,
dilation=dilation,
padding=padding,
),
Transpose(1, 2),
torch.nn.LayerNorm(
channels,
eps=eps,
elementwise_affine=True,
),
Transpose(1, 2),
torch.nn.GELU(),
torch.nn.Conv1d(
channels,
channels,
1,
),
Transpose(1, 2),
torch.nn.LayerNorm(
channels,
eps=eps,
elementwise_affine=True,
),
Transpose(1, 2),
torch.nn.GELU(),
torch.nn.Dropout(dropout_rate),
)
]
def forward(
self, x: torch.Tensor, x_mask: torch.Tensor, g: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, in_channels, T).
x_mask (Tensor): Mask tensor (B, 1, T).
g (Optional[Tensor]): Global conditioning tensor (B, global_channels, 1).
Returns:
Tensor: Output tensor (B, channels, T).
"""
if g is not None:
x = x + g
for f in self.convs:
y = f(x * x_mask)
x = x + y
return x * x_mask
class ConvFlow(torch.nn.Module):
"""Convolutional flow module."""
def __init__(
self,
in_channels: int,
hidden_channels: int,
kernel_size: int,
layers: int,
bins: int = 10,
tail_bound: float = 5.0,
):
"""Initialize ConvFlow module.
Args:
in_channels (int): Number of input channels.
hidden_channels (int): Number of hidden channels.
kernel_size (int): Kernel size.
layers (int): Number of layers.
bins (int): Number of bins.
tail_bound (float): Tail bound value.
"""
super().__init__()
self.half_channels = in_channels // 2
self.hidden_channels = hidden_channels
self.bins = bins
self.tail_bound = tail_bound
self.input_conv = torch.nn.Conv1d(
self.half_channels,
hidden_channels,
1,
)
self.dds_conv = DilatedDepthSeparableConv(
hidden_channels,
kernel_size,
layers,
dropout_rate=0.0,
)
self.proj = torch.nn.Conv1d(
hidden_channels,
self.half_channels * (bins * 3 - 1),
1,
)
self.proj.weight.data.zero_()
self.proj.bias.data.zero_()
def forward(
self,
x: torch.Tensor,
x_mask: torch.Tensor,
g: Optional[torch.Tensor] = None,
inverse: bool = False,
) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, channels, T).
x_mask (Tensor): Mask tensor (B,).
g (Optional[Tensor]): Global conditioning tensor (B, channels, 1).
inverse (bool): Whether to inverse the flow.
Returns:
Tensor: Output tensor (B, channels, T).
Tensor: Log-determinant tensor for NLL (B,) if not inverse.
"""
xa, xb = x.split(x.size(1) // 2, 1)
h = self.input_conv(xa)
h = self.dds_conv(h, x_mask, g=g)
h = self.proj(h) * x_mask # (B, half_channels * (bins * 3 - 1), T)
b, c, t = xa.shape
# (B, half_channels, bins * 3 - 1, T) -> (B, half_channels, T, bins * 3 - 1)
h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2)
# TODO(kan-bayashi): Understand this calculation
denom = math.sqrt(self.hidden_channels)
unnorm_widths = h[..., : self.bins] / denom
unnorm_heights = h[..., self.bins : 2 * self.bins] / denom
unnorm_derivatives = h[..., 2 * self.bins :]
xb, logdet_abs = piecewise_rational_quadratic_transform(
xb,
unnorm_widths,
unnorm_heights,
unnorm_derivatives,
inverse=inverse,
tails="linear",
tail_bound=self.tail_bound,
)
x = torch.cat([xa, xb], 1) * x_mask
logdet = torch.sum(logdet_abs * x_mask, [1, 2])
if not inverse:
return x, logdet
else:
return x
| 9,380 | 29.163987 | 85 | py |
espnet | espnet-master/espnet2/gan_tts/vits/transform.py | """Flow-related transformation.
This code is derived from https://github.com/bayesiains/nflows.
"""
import numpy as np
import torch
from torch.nn import functional as F
DEFAULT_MIN_BIN_WIDTH = 1e-3
DEFAULT_MIN_BIN_HEIGHT = 1e-3
DEFAULT_MIN_DERIVATIVE = 1e-3
# TODO(kan-bayashi): Documentation and type hint
def piecewise_rational_quadratic_transform(
inputs,
unnormalized_widths,
unnormalized_heights,
unnormalized_derivatives,
inverse=False,
tails=None,
tail_bound=1.0,
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
min_derivative=DEFAULT_MIN_DERIVATIVE,
):
if tails is None:
spline_fn = rational_quadratic_spline
spline_kwargs = {}
else:
spline_fn = unconstrained_rational_quadratic_spline
spline_kwargs = {"tails": tails, "tail_bound": tail_bound}
outputs, logabsdet = spline_fn(
inputs=inputs,
unnormalized_widths=unnormalized_widths,
unnormalized_heights=unnormalized_heights,
unnormalized_derivatives=unnormalized_derivatives,
inverse=inverse,
min_bin_width=min_bin_width,
min_bin_height=min_bin_height,
min_derivative=min_derivative,
**spline_kwargs
)
return outputs, logabsdet
# TODO(kan-bayashi): Documentation and type hint
def unconstrained_rational_quadratic_spline(
inputs,
unnormalized_widths,
unnormalized_heights,
unnormalized_derivatives,
inverse=False,
tails="linear",
tail_bound=1.0,
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
min_derivative=DEFAULT_MIN_DERIVATIVE,
):
inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
outside_interval_mask = ~inside_interval_mask
outputs = torch.zeros_like(inputs)
logabsdet = torch.zeros_like(inputs)
if tails == "linear":
unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
constant = np.log(np.exp(1 - min_derivative) - 1)
unnormalized_derivatives[..., 0] = constant
unnormalized_derivatives[..., -1] = constant
outputs[outside_interval_mask] = inputs[outside_interval_mask]
logabsdet[outside_interval_mask] = 0
else:
raise RuntimeError("{} tails are not implemented.".format(tails))
(
outputs[inside_interval_mask],
logabsdet[inside_interval_mask],
) = rational_quadratic_spline(
inputs=inputs[inside_interval_mask],
unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
inverse=inverse,
left=-tail_bound,
right=tail_bound,
bottom=-tail_bound,
top=tail_bound,
min_bin_width=min_bin_width,
min_bin_height=min_bin_height,
min_derivative=min_derivative,
)
return outputs, logabsdet
# TODO(kan-bayashi): Documentation and type hint
def rational_quadratic_spline(
inputs,
unnormalized_widths,
unnormalized_heights,
unnormalized_derivatives,
inverse=False,
left=0.0,
right=1.0,
bottom=0.0,
top=1.0,
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
min_derivative=DEFAULT_MIN_DERIVATIVE,
):
if torch.min(inputs) < left or torch.max(inputs) > right:
raise ValueError("Input to a transform is not within its domain")
num_bins = unnormalized_widths.shape[-1]
if min_bin_width * num_bins > 1.0:
raise ValueError("Minimal bin width too large for the number of bins")
if min_bin_height * num_bins > 1.0:
raise ValueError("Minimal bin height too large for the number of bins")
widths = F.softmax(unnormalized_widths, dim=-1)
widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
cumwidths = torch.cumsum(widths, dim=-1)
cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0)
cumwidths = (right - left) * cumwidths + left
cumwidths[..., 0] = left
cumwidths[..., -1] = right
widths = cumwidths[..., 1:] - cumwidths[..., :-1]
derivatives = min_derivative + F.softplus(unnormalized_derivatives)
heights = F.softmax(unnormalized_heights, dim=-1)
heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
cumheights = torch.cumsum(heights, dim=-1)
cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0)
cumheights = (top - bottom) * cumheights + bottom
cumheights[..., 0] = bottom
cumheights[..., -1] = top
heights = cumheights[..., 1:] - cumheights[..., :-1]
if inverse:
bin_idx = _searchsorted(cumheights, inputs)[..., None]
else:
bin_idx = _searchsorted(cumwidths, inputs)[..., None]
input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
delta = heights / widths
input_delta = delta.gather(-1, bin_idx)[..., 0]
input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
input_heights = heights.gather(-1, bin_idx)[..., 0]
if inverse:
a = (inputs - input_cumheights) * (
input_derivatives + input_derivatives_plus_one - 2 * input_delta
) + input_heights * (input_delta - input_derivatives)
b = input_heights * input_derivatives - (inputs - input_cumheights) * (
input_derivatives + input_derivatives_plus_one - 2 * input_delta
)
c = -input_delta * (inputs - input_cumheights)
discriminant = b.pow(2) - 4 * a * c
assert (discriminant >= 0).all()
root = (2 * c) / (-b - torch.sqrt(discriminant))
outputs = root * input_bin_widths + input_cumwidths
theta_one_minus_theta = root * (1 - root)
denominator = input_delta + (
(input_derivatives + input_derivatives_plus_one - 2 * input_delta)
* theta_one_minus_theta
)
derivative_numerator = input_delta.pow(2) * (
input_derivatives_plus_one * root.pow(2)
+ 2 * input_delta * theta_one_minus_theta
+ input_derivatives * (1 - root).pow(2)
)
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
return outputs, -logabsdet
else:
theta = (inputs - input_cumwidths) / input_bin_widths
theta_one_minus_theta = theta * (1 - theta)
numerator = input_heights * (
input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta
)
denominator = input_delta + (
(input_derivatives + input_derivatives_plus_one - 2 * input_delta)
* theta_one_minus_theta
)
outputs = input_cumheights + numerator / denominator
derivative_numerator = input_delta.pow(2) * (
input_derivatives_plus_one * theta.pow(2)
+ 2 * input_delta * theta_one_minus_theta
+ input_derivatives * (1 - theta).pow(2)
)
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
return outputs, logabsdet
def _searchsorted(bin_locations, inputs, eps=1e-6):
bin_locations[..., -1] += eps
return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1
| 7,504 | 33.585253 | 83 | py |
espnet | espnet-master/espnet2/gan_tts/vits/loss.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""VITS-related loss modules.
This code is based on https://github.com/jaywalnut310/vits.
"""
import torch
import torch.distributions as D
class KLDivergenceLoss(torch.nn.Module):
"""KL divergence loss."""
def forward(
self,
z_p: torch.Tensor,
logs_q: torch.Tensor,
m_p: torch.Tensor,
logs_p: torch.Tensor,
z_mask: torch.Tensor,
) -> torch.Tensor:
"""Calculate KL divergence loss.
Args:
z_p (Tensor): Flow hidden representation (B, H, T_feats).
logs_q (Tensor): Posterior encoder projected scale (B, H, T_feats).
m_p (Tensor): Expanded text encoder projected mean (B, H, T_feats).
logs_p (Tensor): Expanded text encoder projected scale (B, H, T_feats).
z_mask (Tensor): Mask tensor (B, 1, T_feats).
Returns:
Tensor: KL divergence loss.
"""
z_p = z_p.float()
logs_q = logs_q.float()
m_p = m_p.float()
logs_p = logs_p.float()
z_mask = z_mask.float()
kl = logs_p - logs_q - 0.5
kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)
kl = torch.sum(kl * z_mask)
loss = kl / torch.sum(z_mask)
return loss
class KLDivergenceLossWithoutFlow(torch.nn.Module):
"""KL divergence loss without flow."""
def forward(
self,
m_q: torch.Tensor,
logs_q: torch.Tensor,
m_p: torch.Tensor,
logs_p: torch.Tensor,
) -> torch.Tensor:
"""Calculate KL divergence loss without flow.
Args:
m_q (Tensor): Posterior encoder projected mean (B, H, T_feats).
logs_q (Tensor): Posterior encoder projected scale (B, H, T_feats).
m_p (Tensor): Expanded text encoder projected mean (B, H, T_feats).
logs_p (Tensor): Expanded text encoder projected scale (B, H, T_feats).
"""
posterior_norm = D.Normal(m_q, torch.exp(logs_q))
prior_norm = D.Normal(m_p, torch.exp(logs_p))
loss = D.kl_divergence(posterior_norm, prior_norm).mean()
return loss
| 2,215 | 29.356164 | 83 | py |
espnet | espnet-master/espnet2/gan_tts/vits/duration_predictor.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Stochastic duration predictor modules in VITS.
This code is based on https://github.com/jaywalnut310/vits.
"""
import math
from typing import Optional
import torch
import torch.nn.functional as F
from espnet2.gan_tts.vits.flow import (
ConvFlow,
DilatedDepthSeparableConv,
ElementwiseAffineFlow,
FlipFlow,
LogFlow,
)
class StochasticDurationPredictor(torch.nn.Module):
"""Stochastic duration predictor module.
This is a module of stochastic duration predictor described in `Conditional
Variational Autoencoder with Adversarial Learning for End-to-End Text-to-Speech`_.
.. _`Conditional Variational Autoencoder with Adversarial Learning for End-to-End
Text-to-Speech`: https://arxiv.org/abs/2006.04558
"""
def __init__(
self,
channels: int = 192,
kernel_size: int = 3,
dropout_rate: float = 0.5,
flows: int = 4,
dds_conv_layers: int = 3,
global_channels: int = -1,
):
"""Initialize StochasticDurationPredictor module.
Args:
channels (int): Number of channels.
kernel_size (int): Kernel size.
dropout_rate (float): Dropout rate.
flows (int): Number of flows.
dds_conv_layers (int): Number of conv layers in DDS conv.
global_channels (int): Number of global conditioning channels.
"""
super().__init__()
self.pre = torch.nn.Conv1d(channels, channels, 1)
self.dds = DilatedDepthSeparableConv(
channels,
kernel_size,
layers=dds_conv_layers,
dropout_rate=dropout_rate,
)
self.proj = torch.nn.Conv1d(channels, channels, 1)
self.log_flow = LogFlow()
self.flows = torch.nn.ModuleList()
self.flows += [ElementwiseAffineFlow(2)]
for i in range(flows):
self.flows += [
ConvFlow(
2,
channels,
kernel_size,
layers=dds_conv_layers,
)
]
self.flows += [FlipFlow()]
self.post_pre = torch.nn.Conv1d(1, channels, 1)
self.post_dds = DilatedDepthSeparableConv(
channels,
kernel_size,
layers=dds_conv_layers,
dropout_rate=dropout_rate,
)
self.post_proj = torch.nn.Conv1d(channels, channels, 1)
self.post_flows = torch.nn.ModuleList()
self.post_flows += [ElementwiseAffineFlow(2)]
for i in range(flows):
self.post_flows += [
ConvFlow(
2,
channels,
kernel_size,
layers=dds_conv_layers,
)
]
self.post_flows += [FlipFlow()]
if global_channels > 0:
self.global_conv = torch.nn.Conv1d(global_channels, channels, 1)
def forward(
self,
x: torch.Tensor,
x_mask: torch.Tensor,
w: Optional[torch.Tensor] = None,
g: Optional[torch.Tensor] = None,
inverse: bool = False,
noise_scale: float = 1.0,
) -> torch.Tensor:
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, channels, T_text).
x_mask (Tensor): Mask tensor (B, 1, T_text).
w (Optional[Tensor]): Duration tensor (B, 1, T_text).
g (Optional[Tensor]): Global conditioning tensor (B, channels, 1)
inverse (bool): Whether to inverse the flow.
noise_scale (float): Noise scale value.
Returns:
Tensor: If not inverse, negative log-likelihood (NLL) tensor (B,).
If inverse, log-duration tensor (B, 1, T_text).
"""
x = x.detach() # stop gradient
x = self.pre(x)
if g is not None:
x = x + self.global_conv(g.detach()) # stop gradient
x = self.dds(x, x_mask)
x = self.proj(x) * x_mask
if not inverse:
assert w is not None, "w must be provided."
h_w = self.post_pre(w)
h_w = self.post_dds(h_w, x_mask)
h_w = self.post_proj(h_w) * x_mask
e_q = (
torch.randn(
w.size(0),
2,
w.size(2),
).to(device=x.device, dtype=x.dtype)
* x_mask
)
z_q = e_q
logdet_tot_q = 0.0
for flow in self.post_flows:
z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
logdet_tot_q += logdet_q
z_u, z1 = torch.split(z_q, [1, 1], 1)
u = torch.sigmoid(z_u) * x_mask
z0 = (w - u) * x_mask
logdet_tot_q += torch.sum(
(F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2]
)
logq = (
torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q**2)) * x_mask, [1, 2])
- logdet_tot_q
)
logdet_tot = 0
z0, logdet = self.log_flow(z0, x_mask)
logdet_tot += logdet
z = torch.cat([z0, z1], 1)
for flow in self.flows:
z, logdet = flow(z, x_mask, g=x, inverse=inverse)
logdet_tot = logdet_tot + logdet
nll = (
torch.sum(0.5 * (math.log(2 * math.pi) + (z**2)) * x_mask, [1, 2])
- logdet_tot
)
return nll + logq # (B,)
else:
flows = list(reversed(self.flows))
flows = flows[:-2] + [flows[-1]] # remove a useless vflow
z = (
torch.randn(
x.size(0),
2,
x.size(2),
).to(device=x.device, dtype=x.dtype)
* noise_scale
)
for flow in flows:
z = flow(z, x_mask, g=x, inverse=inverse)
z0, z1 = z.split(1, 1)
logw = z0
return logw
| 6,185 | 31.051813 | 86 | py |
espnet | espnet-master/espnet2/gan_tts/vits/vits.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""VITS module for GAN-TTS task."""
from contextlib import contextmanager
from distutils.version import LooseVersion
from typing import Any, Dict, Optional
import torch
from typeguard import check_argument_types
from espnet2.gan_tts.abs_gan_tts import AbsGANTTS
from espnet2.gan_tts.hifigan import (
HiFiGANMultiPeriodDiscriminator,
HiFiGANMultiScaleDiscriminator,
HiFiGANMultiScaleMultiPeriodDiscriminator,
HiFiGANPeriodDiscriminator,
HiFiGANScaleDiscriminator,
)
from espnet2.gan_tts.hifigan.loss import (
DiscriminatorAdversarialLoss,
FeatureMatchLoss,
GeneratorAdversarialLoss,
MelSpectrogramLoss,
)
from espnet2.gan_tts.utils import get_segments
from espnet2.gan_tts.vits.generator import VITSGenerator
from espnet2.gan_tts.vits.loss import KLDivergenceLoss
from espnet2.torch_utils.device_funcs import force_gatherable
AVAILABLE_GENERATERS = {
"vits_generator": VITSGenerator,
}
AVAILABLE_DISCRIMINATORS = {
"hifigan_period_discriminator": HiFiGANPeriodDiscriminator,
"hifigan_scale_discriminator": HiFiGANScaleDiscriminator,
"hifigan_multi_period_discriminator": HiFiGANMultiPeriodDiscriminator,
"hifigan_multi_scale_discriminator": HiFiGANMultiScaleDiscriminator,
"hifigan_multi_scale_multi_period_discriminator": HiFiGANMultiScaleMultiPeriodDiscriminator, # NOQA
}
if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"):
from torch.cuda.amp import autocast
else:
# Nothing to do if torch<1.6.0
@contextmanager
def autocast(enabled=True): # NOQA
yield
class VITS(AbsGANTTS):
"""VITS module (generator + discriminator).
This is a module of VITS described in `Conditional Variational Autoencoder
with Adversarial Learning for End-to-End Text-to-Speech`_.
.. _`Conditional Variational Autoencoder with Adversarial Learning for End-to-End
Text-to-Speech`: https://arxiv.org/abs/2006.04558
"""
def __init__(
self,
# generator related
idim: int,
odim: int,
sampling_rate: int = 22050,
generator_type: str = "vits_generator",
generator_params: Dict[str, Any] = {
"hidden_channels": 192,
"spks": None,
"langs": None,
"spk_embed_dim": None,
"global_channels": -1,
"segment_size": 32,
"text_encoder_attention_heads": 2,
"text_encoder_ffn_expand": 4,
"text_encoder_blocks": 6,
"text_encoder_positionwise_layer_type": "conv1d",
"text_encoder_positionwise_conv_kernel_size": 1,
"text_encoder_positional_encoding_layer_type": "rel_pos",
"text_encoder_self_attention_layer_type": "rel_selfattn",
"text_encoder_activation_type": "swish",
"text_encoder_normalize_before": True,
"text_encoder_dropout_rate": 0.1,
"text_encoder_positional_dropout_rate": 0.0,
"text_encoder_attention_dropout_rate": 0.0,
"text_encoder_conformer_kernel_size": 7,
"use_macaron_style_in_text_encoder": True,
"use_conformer_conv_in_text_encoder": True,
"decoder_kernel_size": 7,
"decoder_channels": 512,
"decoder_upsample_scales": [8, 8, 2, 2],
"decoder_upsample_kernel_sizes": [16, 16, 4, 4],
"decoder_resblock_kernel_sizes": [3, 7, 11],
"decoder_resblock_dilations": [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
"use_weight_norm_in_decoder": True,
"posterior_encoder_kernel_size": 5,
"posterior_encoder_layers": 16,
"posterior_encoder_stacks": 1,
"posterior_encoder_base_dilation": 1,
"posterior_encoder_dropout_rate": 0.0,
"use_weight_norm_in_posterior_encoder": True,
"flow_flows": 4,
"flow_kernel_size": 5,
"flow_base_dilation": 1,
"flow_layers": 4,
"flow_dropout_rate": 0.0,
"use_weight_norm_in_flow": True,
"use_only_mean_in_flow": True,
"stochastic_duration_predictor_kernel_size": 3,
"stochastic_duration_predictor_dropout_rate": 0.5,
"stochastic_duration_predictor_flows": 4,
"stochastic_duration_predictor_dds_conv_layers": 3,
},
# discriminator related
discriminator_type: str = "hifigan_multi_scale_multi_period_discriminator",
discriminator_params: Dict[str, Any] = {
"scales": 1,
"scale_downsample_pooling": "AvgPool1d",
"scale_downsample_pooling_params": {
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
"scale_discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 128,
"max_downsample_channels": 1024,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 2, 4, 4, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
"follow_official_norm": False,
"periods": [2, 3, 5, 7, 11],
"period_discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 32,
"downsample_scales": [3, 3, 3, 3, 1],
"max_downsample_channels": 1024,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
},
# loss related
generator_adv_loss_params: Dict[str, Any] = {
"average_by_discriminators": False,
"loss_type": "mse",
},
discriminator_adv_loss_params: Dict[str, Any] = {
"average_by_discriminators": False,
"loss_type": "mse",
},
feat_match_loss_params: Dict[str, Any] = {
"average_by_discriminators": False,
"average_by_layers": False,
"include_final_outputs": True,
},
mel_loss_params: Dict[str, Any] = {
"fs": 22050,
"n_fft": 1024,
"hop_length": 256,
"win_length": None,
"window": "hann",
"n_mels": 80,
"fmin": 0,
"fmax": None,
"log_base": None,
},
lambda_adv: float = 1.0,
lambda_mel: float = 45.0,
lambda_feat_match: float = 2.0,
lambda_dur: float = 1.0,
lambda_kl: float = 1.0,
cache_generator_outputs: bool = True,
):
"""Initialize VITS module.
Args:
idim (int): Input vocabrary size.
odim (int): Acoustic feature dimension. The actual output channels will
be 1 since VITS is the end-to-end text-to-wave model but for the
compatibility odim is used to indicate the acoustic feature dimension.
sampling_rate (int): Sampling rate, not used for the training but it will
be referred in saving waveform during the inference.
generator_type (str): Generator type.
generator_params (Dict[str, Any]): Parameter dict for generator.
discriminator_type (str): Discriminator type.
discriminator_params (Dict[str, Any]): Parameter dict for discriminator.
generator_adv_loss_params (Dict[str, Any]): Parameter dict for generator
adversarial loss.
discriminator_adv_loss_params (Dict[str, Any]): Parameter dict for
discriminator adversarial loss.
feat_match_loss_params (Dict[str, Any]): Parameter dict for feat match loss.
mel_loss_params (Dict[str, Any]): Parameter dict for mel loss.
lambda_adv (float): Loss scaling coefficient for adversarial loss.
lambda_mel (float): Loss scaling coefficient for mel spectrogram loss.
lambda_feat_match (float): Loss scaling coefficient for feat match loss.
lambda_dur (float): Loss scaling coefficient for duration loss.
lambda_kl (float): Loss scaling coefficient for KL divergence loss.
cache_generator_outputs (bool): Whether to cache generator outputs.
"""
assert check_argument_types()
super().__init__()
# define modules
generator_class = AVAILABLE_GENERATERS[generator_type]
if generator_type == "vits_generator":
# NOTE(kan-bayashi): Update parameters for the compatibility.
# The idim and odim is automatically decided from input data,
# where idim represents #vocabularies and odim represents
# the input acoustic feature dimension.
generator_params.update(vocabs=idim, aux_channels=odim)
self.generator = generator_class(
**generator_params,
)
discriminator_class = AVAILABLE_DISCRIMINATORS[discriminator_type]
self.discriminator = discriminator_class(
**discriminator_params,
)
self.generator_adv_loss = GeneratorAdversarialLoss(
**generator_adv_loss_params,
)
self.discriminator_adv_loss = DiscriminatorAdversarialLoss(
**discriminator_adv_loss_params,
)
self.feat_match_loss = FeatureMatchLoss(
**feat_match_loss_params,
)
self.mel_loss = MelSpectrogramLoss(
**mel_loss_params,
)
self.kl_loss = KLDivergenceLoss()
# coefficients
self.lambda_adv = lambda_adv
self.lambda_mel = lambda_mel
self.lambda_kl = lambda_kl
self.lambda_feat_match = lambda_feat_match
self.lambda_dur = lambda_dur
# cache
self.cache_generator_outputs = cache_generator_outputs
self._cache = None
# store sampling rate for saving wav file
# (not used for the training)
self.fs = sampling_rate
# store parameters for test compatibility
self.spks = self.generator.spks
self.langs = self.generator.langs
self.spk_embed_dim = self.generator.spk_embed_dim
@property
def require_raw_speech(self):
"""Return whether or not speech is required."""
return True
@property
def require_vocoder(self):
"""Return whether or not vocoder is required."""
return False
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: torch.Tensor,
feats_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
sids: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
forward_generator: bool = True,
) -> Dict[str, Any]:
"""Perform generator forward.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
feats (Tensor): Feature tensor (B, T_feats, aux_channels).
feats_lengths (Tensor): Feature length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B,).
sids (Optional[Tensor]): Speaker index tensor (B,) or (B, 1).
spembs (Optional[Tensor]): Speaker embedding tensor (B, spk_embed_dim).
lids (Optional[Tensor]): Language index tensor (B,) or (B, 1).
forward_generator (bool): Whether to forward generator.
Returns:
Dict[str, Any]:
- loss (Tensor): Loss scalar tensor.
- stats (Dict[str, float]): Statistics to be monitored.
- weight (Tensor): Weight tensor to summarize losses.
- optim_idx (int): Optimizer index (0 for G and 1 for D).
"""
if forward_generator:
return self._forward_generator(
text=text,
text_lengths=text_lengths,
feats=feats,
feats_lengths=feats_lengths,
speech=speech,
speech_lengths=speech_lengths,
sids=sids,
spembs=spembs,
lids=lids,
)
else:
return self._forward_discrminator(
text=text,
text_lengths=text_lengths,
feats=feats,
feats_lengths=feats_lengths,
speech=speech,
speech_lengths=speech_lengths,
sids=sids,
spembs=spembs,
lids=lids,
)
def _forward_generator(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: torch.Tensor,
feats_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
sids: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
) -> Dict[str, Any]:
"""Perform generator forward.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
feats (Tensor): Feature tensor (B, T_feats, aux_channels).
feats_lengths (Tensor): Feature length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B,).
sids (Optional[Tensor]): Speaker index tensor (B,) or (B, 1).
spembs (Optional[Tensor]): Speaker embedding tensor (B, spk_embed_dim).
lids (Optional[Tensor]): Language index tensor (B,) or (B, 1).
Returns:
Dict[str, Any]:
* loss (Tensor): Loss scalar tensor.
* stats (Dict[str, float]): Statistics to be monitored.
* weight (Tensor): Weight tensor to summarize losses.
* optim_idx (int): Optimizer index (0 for G and 1 for D).
"""
# setup
batch_size = text.size(0)
feats = feats.transpose(1, 2)
speech = speech.unsqueeze(1)
# calculate generator outputs
reuse_cache = True
if not self.cache_generator_outputs or self._cache is None:
reuse_cache = False
outs = self.generator(
text=text,
text_lengths=text_lengths,
feats=feats,
feats_lengths=feats_lengths,
sids=sids,
spembs=spembs,
lids=lids,
)
else:
outs = self._cache
# store cache
if self.training and self.cache_generator_outputs and not reuse_cache:
self._cache = outs
# parse outputs
speech_hat_, dur_nll, _, start_idxs, _, z_mask, outs_ = outs
_, z_p, m_p, logs_p, _, logs_q = outs_
speech_ = get_segments(
x=speech,
start_idxs=start_idxs * self.generator.upsample_factor,
segment_size=self.generator.segment_size * self.generator.upsample_factor,
)
# calculate discriminator outputs
p_hat = self.discriminator(speech_hat_)
with torch.no_grad():
# do not store discriminator gradient in generator turn
p = self.discriminator(speech_)
# calculate losses
with autocast(enabled=False):
mel_loss = self.mel_loss(speech_hat_, speech_)
kl_loss = self.kl_loss(z_p, logs_q, m_p, logs_p, z_mask)
dur_loss = torch.sum(dur_nll.float())
adv_loss = self.generator_adv_loss(p_hat)
feat_match_loss = self.feat_match_loss(p_hat, p)
mel_loss = mel_loss * self.lambda_mel
kl_loss = kl_loss * self.lambda_kl
dur_loss = dur_loss * self.lambda_dur
adv_loss = adv_loss * self.lambda_adv
feat_match_loss = feat_match_loss * self.lambda_feat_match
loss = mel_loss + kl_loss + dur_loss + adv_loss + feat_match_loss
stats = dict(
generator_loss=loss.item(),
generator_mel_loss=mel_loss.item(),
generator_kl_loss=kl_loss.item(),
generator_dur_loss=dur_loss.item(),
generator_adv_loss=adv_loss.item(),
generator_feat_match_loss=feat_match_loss.item(),
)
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
# reset cache
if reuse_cache or not self.training:
self._cache = None
return {
"loss": loss,
"stats": stats,
"weight": weight,
"optim_idx": 0, # needed for trainer
}
def _forward_discrminator(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: torch.Tensor,
feats_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
sids: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
) -> Dict[str, Any]:
"""Perform discriminator forward.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
feats (Tensor): Feature tensor (B, T_feats, aux_channels).
feats_lengths (Tensor): Feature length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B,).
sids (Optional[Tensor]): Speaker index tensor (B,) or (B, 1).
spembs (Optional[Tensor]): Speaker embedding tensor (B, spk_embed_dim).
lids (Optional[Tensor]): Language index tensor (B,) or (B, 1).
Returns:
Dict[str, Any]:
* loss (Tensor): Loss scalar tensor.
* stats (Dict[str, float]): Statistics to be monitored.
* weight (Tensor): Weight tensor to summarize losses.
* optim_idx (int): Optimizer index (0 for G and 1 for D).
"""
# setup
batch_size = text.size(0)
feats = feats.transpose(1, 2)
speech = speech.unsqueeze(1)
# calculate generator outputs
reuse_cache = True
if not self.cache_generator_outputs or self._cache is None:
reuse_cache = False
outs = self.generator(
text=text,
text_lengths=text_lengths,
feats=feats,
feats_lengths=feats_lengths,
sids=sids,
spembs=spembs,
lids=lids,
)
else:
outs = self._cache
# store cache
if self.cache_generator_outputs and not reuse_cache:
self._cache = outs
# parse outputs
speech_hat_, _, _, start_idxs, *_ = outs
speech_ = get_segments(
x=speech,
start_idxs=start_idxs * self.generator.upsample_factor,
segment_size=self.generator.segment_size * self.generator.upsample_factor,
)
# calculate discriminator outputs
p_hat = self.discriminator(speech_hat_.detach())
p = self.discriminator(speech_)
# calculate losses
with autocast(enabled=False):
real_loss, fake_loss = self.discriminator_adv_loss(p_hat, p)
loss = real_loss + fake_loss
stats = dict(
discriminator_loss=loss.item(),
discriminator_real_loss=real_loss.item(),
discriminator_fake_loss=fake_loss.item(),
)
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
# reset cache
if reuse_cache or not self.training:
self._cache = None
return {
"loss": loss,
"stats": stats,
"weight": weight,
"optim_idx": 1, # needed for trainer
}
def inference(
self,
text: torch.Tensor,
feats: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
durations: Optional[torch.Tensor] = None,
noise_scale: float = 0.667,
noise_scale_dur: float = 0.8,
alpha: float = 1.0,
max_len: Optional[int] = None,
use_teacher_forcing: bool = False,
) -> Dict[str, torch.Tensor]:
"""Run inference.
Args:
text (Tensor): Input text index tensor (T_text,).
feats (Tensor): Feature tensor (T_feats, aux_channels).
sids (Tensor): Speaker index tensor (1,).
spembs (Optional[Tensor]): Speaker embedding tensor (spk_embed_dim,).
lids (Tensor): Language index tensor (1,).
durations (Tensor): Ground-truth duration tensor (T_text,).
noise_scale (float): Noise scale value for flow.
noise_scale_dur (float): Noise scale value for duration predictor.
alpha (float): Alpha parameter to control the speed of generated speech.
max_len (Optional[int]): Maximum length.
use_teacher_forcing (bool): Whether to use teacher forcing.
Returns:
Dict[str, Tensor]:
* wav (Tensor): Generated waveform tensor (T_wav,).
* att_w (Tensor): Monotonic attention weight tensor (T_feats, T_text).
* duration (Tensor): Predicted duration tensor (T_text,).
"""
# setup
text = text[None]
text_lengths = torch.tensor(
[text.size(1)],
dtype=torch.long,
device=text.device,
)
if sids is not None:
sids = sids.view(1)
if lids is not None:
lids = lids.view(1)
if durations is not None:
durations = durations.view(1, 1, -1)
# inference
if use_teacher_forcing:
assert feats is not None
feats = feats[None].transpose(1, 2)
feats_lengths = torch.tensor(
[feats.size(2)],
dtype=torch.long,
device=feats.device,
)
wav, att_w, dur = self.generator.inference(
text=text,
text_lengths=text_lengths,
feats=feats,
feats_lengths=feats_lengths,
sids=sids,
spembs=spembs,
lids=lids,
max_len=max_len,
use_teacher_forcing=use_teacher_forcing,
)
else:
wav, att_w, dur = self.generator.inference(
text=text,
text_lengths=text_lengths,
sids=sids,
spembs=spembs,
lids=lids,
dur=durations,
noise_scale=noise_scale,
noise_scale_dur=noise_scale_dur,
alpha=alpha,
max_len=max_len,
)
return dict(wav=wav.view(-1), att_w=att_w[0], duration=dur[0])
| 23,742 | 36.9888 | 104 | py |
espnet | espnet-master/espnet2/gan_tts/vits/generator.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Generator module in VITS.
This code is based on https://github.com/jaywalnut310/vits.
"""
import math
from typing import List, Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from espnet2.gan_tts.hifigan import HiFiGANGenerator
from espnet2.gan_tts.utils import get_random_segments
from espnet2.gan_tts.vits.duration_predictor import StochasticDurationPredictor
from espnet2.gan_tts.vits.posterior_encoder import PosteriorEncoder
from espnet2.gan_tts.vits.residual_coupling import ResidualAffineCouplingBlock
from espnet2.gan_tts.vits.text_encoder import TextEncoder
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask
class VITSGenerator(torch.nn.Module):
"""Generator module in VITS.
This is a module of VITS described in `Conditional Variational Autoencoder
with Adversarial Learning for End-to-End Text-to-Speech`_.
As text encoder, we use conformer architecture instead of the relative positional
Transformer, which contains additional convolution layers.
.. _`Conditional Variational Autoencoder with Adversarial Learning for End-to-End
Text-to-Speech`: https://arxiv.org/abs/2006.04558
"""
def __init__(
self,
vocabs: int,
aux_channels: int = 513,
hidden_channels: int = 192,
spks: Optional[int] = None,
langs: Optional[int] = None,
spk_embed_dim: Optional[int] = None,
global_channels: int = -1,
segment_size: int = 32,
text_encoder_attention_heads: int = 2,
text_encoder_ffn_expand: int = 4,
text_encoder_blocks: int = 6,
text_encoder_positionwise_layer_type: str = "conv1d",
text_encoder_positionwise_conv_kernel_size: int = 1,
text_encoder_positional_encoding_layer_type: str = "rel_pos",
text_encoder_self_attention_layer_type: str = "rel_selfattn",
text_encoder_activation_type: str = "swish",
text_encoder_normalize_before: bool = True,
text_encoder_dropout_rate: float = 0.1,
text_encoder_positional_dropout_rate: float = 0.0,
text_encoder_attention_dropout_rate: float = 0.0,
text_encoder_conformer_kernel_size: int = 7,
use_macaron_style_in_text_encoder: bool = True,
use_conformer_conv_in_text_encoder: bool = True,
decoder_kernel_size: int = 7,
decoder_channels: int = 512,
decoder_upsample_scales: List[int] = [8, 8, 2, 2],
decoder_upsample_kernel_sizes: List[int] = [16, 16, 4, 4],
decoder_resblock_kernel_sizes: List[int] = [3, 7, 11],
decoder_resblock_dilations: List[List[int]] = [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
use_weight_norm_in_decoder: bool = True,
posterior_encoder_kernel_size: int = 5,
posterior_encoder_layers: int = 16,
posterior_encoder_stacks: int = 1,
posterior_encoder_base_dilation: int = 1,
posterior_encoder_dropout_rate: float = 0.0,
use_weight_norm_in_posterior_encoder: bool = True,
flow_flows: int = 4,
flow_kernel_size: int = 5,
flow_base_dilation: int = 1,
flow_layers: int = 4,
flow_dropout_rate: float = 0.0,
use_weight_norm_in_flow: bool = True,
use_only_mean_in_flow: bool = True,
stochastic_duration_predictor_kernel_size: int = 3,
stochastic_duration_predictor_dropout_rate: float = 0.5,
stochastic_duration_predictor_flows: int = 4,
stochastic_duration_predictor_dds_conv_layers: int = 3,
):
"""Initialize VITS generator module.
Args:
vocabs (int): Input vocabulary size.
aux_channels (int): Number of acoustic feature channels.
hidden_channels (int): Number of hidden channels.
spks (Optional[int]): Number of speakers. If set to > 1, assume that the
sids will be provided as the input and use sid embedding layer.
langs (Optional[int]): Number of languages. If set to > 1, assume that the
lids will be provided as the input and use sid embedding layer.
spk_embed_dim (Optional[int]): Speaker embedding dimension. If set to > 0,
assume that spembs will be provided as the input.
global_channels (int): Number of global conditioning channels.
segment_size (int): Segment size for decoder.
text_encoder_attention_heads (int): Number of heads in conformer block
of text encoder.
text_encoder_ffn_expand (int): Expansion ratio of FFN in conformer block
of text encoder.
text_encoder_blocks (int): Number of conformer blocks in text encoder.
text_encoder_positionwise_layer_type (str): Position-wise layer type in
conformer block of text encoder.
text_encoder_positionwise_conv_kernel_size (int): Position-wise convolution
kernel size in conformer block of text encoder. Only used when the
above layer type is conv1d or conv1d-linear.
text_encoder_positional_encoding_layer_type (str): Positional encoding layer
type in conformer block of text encoder.
text_encoder_self_attention_layer_type (str): Self-attention layer type in
conformer block of text encoder.
text_encoder_activation_type (str): Activation function type in conformer
block of text encoder.
text_encoder_normalize_before (bool): Whether to apply layer norm before
self-attention in conformer block of text encoder.
text_encoder_dropout_rate (float): Dropout rate in conformer block of
text encoder.
text_encoder_positional_dropout_rate (float): Dropout rate for positional
encoding in conformer block of text encoder.
text_encoder_attention_dropout_rate (float): Dropout rate for attention in
conformer block of text encoder.
text_encoder_conformer_kernel_size (int): Conformer conv kernel size. It
will be used when only use_conformer_conv_in_text_encoder = True.
use_macaron_style_in_text_encoder (bool): Whether to use macaron style FFN
in conformer block of text encoder.
use_conformer_conv_in_text_encoder (bool): Whether to use covolution in
conformer block of text encoder.
decoder_kernel_size (int): Decoder kernel size.
decoder_channels (int): Number of decoder initial channels.
decoder_upsample_scales (List[int]): List of upsampling scales in decoder.
decoder_upsample_kernel_sizes (List[int]): List of kernel size for
upsampling layers in decoder.
decoder_resblock_kernel_sizes (List[int]): List of kernel size for resblocks
in decoder.
decoder_resblock_dilations (List[List[int]]): List of list of dilations for
resblocks in decoder.
use_weight_norm_in_decoder (bool): Whether to apply weight normalization in
decoder.
posterior_encoder_kernel_size (int): Posterior encoder kernel size.
posterior_encoder_layers (int): Number of layers of posterior encoder.
posterior_encoder_stacks (int): Number of stacks of posterior encoder.
posterior_encoder_base_dilation (int): Base dilation of posterior encoder.
posterior_encoder_dropout_rate (float): Dropout rate for posterior encoder.
use_weight_norm_in_posterior_encoder (bool): Whether to apply weight
normalization in posterior encoder.
flow_flows (int): Number of flows in flow.
flow_kernel_size (int): Kernel size in flow.
flow_base_dilation (int): Base dilation in flow.
flow_layers (int): Number of layers in flow.
flow_dropout_rate (float): Dropout rate in flow
use_weight_norm_in_flow (bool): Whether to apply weight normalization in
flow.
use_only_mean_in_flow (bool): Whether to use only mean in flow.
stochastic_duration_predictor_kernel_size (int): Kernel size in stochastic
duration predictor.
stochastic_duration_predictor_dropout_rate (float): Dropout rate in
stochastic duration predictor.
stochastic_duration_predictor_flows (int): Number of flows in stochastic
duration predictor.
stochastic_duration_predictor_dds_conv_layers (int): Number of DDS conv
layers in stochastic duration predictor.
"""
super().__init__()
self.segment_size = segment_size
self.text_encoder = TextEncoder(
vocabs=vocabs,
attention_dim=hidden_channels,
attention_heads=text_encoder_attention_heads,
linear_units=hidden_channels * text_encoder_ffn_expand,
blocks=text_encoder_blocks,
positionwise_layer_type=text_encoder_positionwise_layer_type,
positionwise_conv_kernel_size=text_encoder_positionwise_conv_kernel_size,
positional_encoding_layer_type=text_encoder_positional_encoding_layer_type,
self_attention_layer_type=text_encoder_self_attention_layer_type,
activation_type=text_encoder_activation_type,
normalize_before=text_encoder_normalize_before,
dropout_rate=text_encoder_dropout_rate,
positional_dropout_rate=text_encoder_positional_dropout_rate,
attention_dropout_rate=text_encoder_attention_dropout_rate,
conformer_kernel_size=text_encoder_conformer_kernel_size,
use_macaron_style=use_macaron_style_in_text_encoder,
use_conformer_conv=use_conformer_conv_in_text_encoder,
)
self.decoder = HiFiGANGenerator(
in_channels=hidden_channels,
out_channels=1,
channels=decoder_channels,
global_channels=global_channels,
kernel_size=decoder_kernel_size,
upsample_scales=decoder_upsample_scales,
upsample_kernel_sizes=decoder_upsample_kernel_sizes,
resblock_kernel_sizes=decoder_resblock_kernel_sizes,
resblock_dilations=decoder_resblock_dilations,
use_weight_norm=use_weight_norm_in_decoder,
)
self.posterior_encoder = PosteriorEncoder(
in_channels=aux_channels,
out_channels=hidden_channels,
hidden_channels=hidden_channels,
kernel_size=posterior_encoder_kernel_size,
layers=posterior_encoder_layers,
stacks=posterior_encoder_stacks,
base_dilation=posterior_encoder_base_dilation,
global_channels=global_channels,
dropout_rate=posterior_encoder_dropout_rate,
use_weight_norm=use_weight_norm_in_posterior_encoder,
)
self.flow = ResidualAffineCouplingBlock(
in_channels=hidden_channels,
hidden_channels=hidden_channels,
flows=flow_flows,
kernel_size=flow_kernel_size,
base_dilation=flow_base_dilation,
layers=flow_layers,
global_channels=global_channels,
dropout_rate=flow_dropout_rate,
use_weight_norm=use_weight_norm_in_flow,
use_only_mean=use_only_mean_in_flow,
)
# TODO(kan-bayashi): Add deterministic version as an option
self.duration_predictor = StochasticDurationPredictor(
channels=hidden_channels,
kernel_size=stochastic_duration_predictor_kernel_size,
dropout_rate=stochastic_duration_predictor_dropout_rate,
flows=stochastic_duration_predictor_flows,
dds_conv_layers=stochastic_duration_predictor_dds_conv_layers,
global_channels=global_channels,
)
self.upsample_factor = int(np.prod(decoder_upsample_scales))
self.spks = None
if spks is not None and spks > 1:
assert global_channels > 0
self.spks = spks
self.global_emb = torch.nn.Embedding(spks, global_channels)
self.spk_embed_dim = None
if spk_embed_dim is not None and spk_embed_dim > 0:
assert global_channels > 0
self.spk_embed_dim = spk_embed_dim
self.spemb_proj = torch.nn.Linear(spk_embed_dim, global_channels)
self.langs = None
if langs is not None and langs > 1:
assert global_channels > 0
self.langs = langs
self.lang_emb = torch.nn.Embedding(langs, global_channels)
# delayed import
from espnet2.gan_tts.vits.monotonic_align import maximum_path
self.maximum_path = maximum_path
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: torch.Tensor,
feats_lengths: torch.Tensor,
sids: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
) -> Tuple[
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
Tuple[
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
],
]:
"""Calculate forward propagation.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
feats (Tensor): Feature tensor (B, aux_channels, T_feats).
feats_lengths (Tensor): Feature length tensor (B,).
sids (Optional[Tensor]): Speaker index tensor (B,) or (B, 1).
spembs (Optional[Tensor]): Speaker embedding tensor (B, spk_embed_dim).
lids (Optional[Tensor]): Language index tensor (B,) or (B, 1).
Returns:
Tensor: Waveform tensor (B, 1, segment_size * upsample_factor).
Tensor: Duration negative log-likelihood (NLL) tensor (B,).
Tensor: Monotonic attention weight tensor (B, 1, T_feats, T_text).
Tensor: Segments start index tensor (B,).
Tensor: Text mask tensor (B, 1, T_text).
Tensor: Feature mask tensor (B, 1, T_feats).
tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:
- Tensor: Posterior encoder hidden representation (B, H, T_feats).
- Tensor: Flow hidden representation (B, H, T_feats).
- Tensor: Expanded text encoder projected mean (B, H, T_feats).
- Tensor: Expanded text encoder projected scale (B, H, T_feats).
- Tensor: Posterior encoder projected mean (B, H, T_feats).
- Tensor: Posterior encoder projected scale (B, H, T_feats).
"""
# forward text encoder
x, m_p, logs_p, x_mask = self.text_encoder(text, text_lengths)
# calculate global conditioning
g = None
if self.spks is not None:
# speaker one-hot vector embedding: (B, global_channels, 1)
g = self.global_emb(sids.view(-1)).unsqueeze(-1)
if self.spk_embed_dim is not None:
# pretreined speaker embedding, e.g., X-vector (B, global_channels, 1)
g_ = self.spemb_proj(F.normalize(spembs)).unsqueeze(-1)
if g is None:
g = g_
else:
g = g + g_
if self.langs is not None:
# language one-hot vector embedding: (B, global_channels, 1)
g_ = self.lang_emb(lids.view(-1)).unsqueeze(-1)
if g is None:
g = g_
else:
g = g + g_
# forward posterior encoder
z, m_q, logs_q, y_mask = self.posterior_encoder(feats, feats_lengths, g=g)
# forward flow
z_p = self.flow(z, y_mask, g=g) # (B, H, T_feats)
# monotonic alignment search
with torch.no_grad():
# negative cross-entropy
s_p_sq_r = torch.exp(-2 * logs_p) # (B, H, T_text)
# (B, 1, T_text)
neg_x_ent_1 = torch.sum(
-0.5 * math.log(2 * math.pi) - logs_p,
[1],
keepdim=True,
)
# (B, T_feats, H) x (B, H, T_text) = (B, T_feats, T_text)
neg_x_ent_2 = torch.matmul(
-0.5 * (z_p**2).transpose(1, 2),
s_p_sq_r,
)
# (B, T_feats, H) x (B, H, T_text) = (B, T_feats, T_text)
neg_x_ent_3 = torch.matmul(
z_p.transpose(1, 2),
(m_p * s_p_sq_r),
)
# (B, 1, T_text)
neg_x_ent_4 = torch.sum(
-0.5 * (m_p**2) * s_p_sq_r,
[1],
keepdim=True,
)
# (B, T_feats, T_text)
neg_x_ent = neg_x_ent_1 + neg_x_ent_2 + neg_x_ent_3 + neg_x_ent_4
# (B, 1, T_feats, T_text)
attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
# monotonic attention weight: (B, 1, T_feats, T_text)
attn = (
self.maximum_path(
neg_x_ent,
attn_mask.squeeze(1),
)
.unsqueeze(1)
.detach()
)
# forward duration predictor
w = attn.sum(2) # (B, 1, T_text)
dur_nll = self.duration_predictor(x, x_mask, w=w, g=g)
dur_nll = dur_nll / torch.sum(x_mask)
# expand the length to match with the feature sequence
# (B, T_feats, T_text) x (B, T_text, H) -> (B, H, T_feats)
m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)
# (B, T_feats, T_text) x (B, T_text, H) -> (B, H, T_feats)
logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)
# get random segments
z_segments, z_start_idxs = get_random_segments(
z,
feats_lengths,
self.segment_size,
)
# forward decoder with random segments
wav = self.decoder(z_segments, g=g)
return (
wav,
dur_nll,
attn,
z_start_idxs,
x_mask,
y_mask,
(z, z_p, m_p, logs_p, m_q, logs_q),
)
def inference(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: Optional[torch.Tensor] = None,
feats_lengths: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
dur: Optional[torch.Tensor] = None,
noise_scale: float = 0.667,
noise_scale_dur: float = 0.8,
alpha: float = 1.0,
max_len: Optional[int] = None,
use_teacher_forcing: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Run inference.
Args:
text (Tensor): Input text index tensor (B, T_text,).
text_lengths (Tensor): Text length tensor (B,).
feats (Tensor): Feature tensor (B, aux_channels, T_feats,).
feats_lengths (Tensor): Feature length tensor (B,).
sids (Optional[Tensor]): Speaker index tensor (B,) or (B, 1).
spembs (Optional[Tensor]): Speaker embedding tensor (B, spk_embed_dim).
lids (Optional[Tensor]): Language index tensor (B,) or (B, 1).
dur (Optional[Tensor]): Ground-truth duration (B, T_text,). If provided,
skip the prediction of durations (i.e., teacher forcing).
noise_scale (float): Noise scale parameter for flow.
noise_scale_dur (float): Noise scale parameter for duration predictor.
alpha (float): Alpha parameter to control the speed of generated speech.
max_len (Optional[int]): Maximum length of acoustic feature sequence.
use_teacher_forcing (bool): Whether to use teacher forcing.
Returns:
Tensor: Generated waveform tensor (B, T_wav).
Tensor: Monotonic attention weight tensor (B, T_feats, T_text).
Tensor: Duration tensor (B, T_text).
"""
# encoder
x, m_p, logs_p, x_mask = self.text_encoder(text, text_lengths)
g = None
if self.spks is not None:
# (B, global_channels, 1)
g = self.global_emb(sids.view(-1)).unsqueeze(-1)
if self.spk_embed_dim is not None:
# (B, global_channels, 1)
g_ = self.spemb_proj(F.normalize(spembs.unsqueeze(0))).unsqueeze(-1)
if g is None:
g = g_
else:
g = g + g_
if self.langs is not None:
# (B, global_channels, 1)
g_ = self.lang_emb(lids.view(-1)).unsqueeze(-1)
if g is None:
g = g_
else:
g = g + g_
if use_teacher_forcing:
# forward posterior encoder
z, m_q, logs_q, y_mask = self.posterior_encoder(feats, feats_lengths, g=g)
# forward flow
z_p = self.flow(z, y_mask, g=g) # (B, H, T_feats)
# monotonic alignment search
s_p_sq_r = torch.exp(-2 * logs_p) # (B, H, T_text)
# (B, 1, T_text)
neg_x_ent_1 = torch.sum(
-0.5 * math.log(2 * math.pi) - logs_p,
[1],
keepdim=True,
)
# (B, T_feats, H) x (B, H, T_text) = (B, T_feats, T_text)
neg_x_ent_2 = torch.matmul(
-0.5 * (z_p**2).transpose(1, 2),
s_p_sq_r,
)
# (B, T_feats, H) x (B, H, T_text) = (B, T_feats, T_text)
neg_x_ent_3 = torch.matmul(
z_p.transpose(1, 2),
(m_p * s_p_sq_r),
)
# (B, 1, T_text)
neg_x_ent_4 = torch.sum(
-0.5 * (m_p**2) * s_p_sq_r,
[1],
keepdim=True,
)
# (B, T_feats, T_text)
neg_x_ent = neg_x_ent_1 + neg_x_ent_2 + neg_x_ent_3 + neg_x_ent_4
# (B, 1, T_feats, T_text)
attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
# monotonic attention weight: (B, 1, T_feats, T_text)
attn = self.maximum_path(
neg_x_ent,
attn_mask.squeeze(1),
).unsqueeze(1)
dur = attn.sum(2) # (B, 1, T_text)
# forward decoder with random segments
wav = self.decoder(z * y_mask, g=g)
else:
# duration
if dur is None:
logw = self.duration_predictor(
x,
x_mask,
g=g,
inverse=True,
noise_scale=noise_scale_dur,
)
w = torch.exp(logw) * x_mask * alpha
dur = torch.ceil(w)
y_lengths = torch.clamp_min(torch.sum(dur, [1, 2]), 1).long()
y_mask = make_non_pad_mask(y_lengths).unsqueeze(1).to(text.device)
attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
attn = self._generate_path(dur, attn_mask)
# expand the length to match with the feature sequence
# (B, T_feats, T_text) x (B, T_text, H) -> (B, H, T_feats)
m_p = torch.matmul(
attn.squeeze(1),
m_p.transpose(1, 2),
).transpose(1, 2)
# (B, T_feats, T_text) x (B, T_text, H) -> (B, H, T_feats)
logs_p = torch.matmul(
attn.squeeze(1),
logs_p.transpose(1, 2),
).transpose(1, 2)
# decoder
z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
z = self.flow(z_p, y_mask, g=g, inverse=True)
wav = self.decoder((z * y_mask)[:, :, :max_len], g=g)
return wav.squeeze(1), attn.squeeze(1), dur.squeeze(1)
def _generate_path(self, dur: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
"""Generate path a.k.a. monotonic attention.
Args:
dur (Tensor): Duration tensor (B, 1, T_text).
mask (Tensor): Attention mask tensor (B, 1, T_feats, T_text).
Returns:
Tensor: Path tensor (B, 1, T_feats, T_text).
"""
b, _, t_y, t_x = mask.shape
cum_dur = torch.cumsum(dur, -1)
cum_dur_flat = cum_dur.view(b * t_x)
path = torch.arange(t_y, dtype=dur.dtype, device=dur.device)
path = path.unsqueeze(0) < cum_dur_flat.unsqueeze(1)
path = path.view(b, t_x, t_y).to(dtype=mask.dtype)
# path will be like (t_x = 3, t_y = 5):
# [[[1., 1., 0., 0., 0.], [[[1., 1., 0., 0., 0.],
# [1., 1., 1., 1., 0.], --> [0., 0., 1., 1., 0.],
# [1., 1., 1., 1., 1.]]] [0., 0., 0., 0., 1.]]]
path = path - F.pad(path, [0, 0, 1, 0, 0, 0])[:, :-1]
return path.unsqueeze(1).transpose(2, 3) * mask
| 25,456 | 43.273043 | 88 | py |
espnet | espnet-master/espnet2/gan_tts/vits/text_encoder.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Text encoder module in VITS.
This code is based on https://github.com/jaywalnut310/vits.
"""
import math
from typing import Tuple
import torch
from espnet.nets.pytorch_backend.conformer.encoder import Encoder
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask
class TextEncoder(torch.nn.Module):
"""Text encoder module in VITS.
This is a module of text encoder described in `Conditional Variational Autoencoder
with Adversarial Learning for End-to-End Text-to-Speech`_.
Instead of the relative positional Transformer, we use conformer architecture as
the encoder module, which contains additional convolution layers.
.. _`Conditional Variational Autoencoder with Adversarial Learning for End-to-End
Text-to-Speech`: https://arxiv.org/abs/2006.04558
"""
def __init__(
self,
vocabs: int,
attention_dim: int = 192,
attention_heads: int = 2,
linear_units: int = 768,
blocks: int = 6,
positionwise_layer_type: str = "conv1d",
positionwise_conv_kernel_size: int = 3,
positional_encoding_layer_type: str = "rel_pos",
self_attention_layer_type: str = "rel_selfattn",
activation_type: str = "swish",
normalize_before: bool = True,
use_macaron_style: bool = False,
use_conformer_conv: bool = False,
conformer_kernel_size: int = 7,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.0,
attention_dropout_rate: float = 0.0,
):
"""Initialize TextEncoder module.
Args:
vocabs (int): Vocabulary size.
attention_dim (int): Attention dimension.
attention_heads (int): Number of attention heads.
linear_units (int): Number of linear units of positionwise layers.
blocks (int): Number of encoder blocks.
positionwise_layer_type (str): Positionwise layer type.
positionwise_conv_kernel_size (int): Positionwise layer's kernel size.
positional_encoding_layer_type (str): Positional encoding layer type.
self_attention_layer_type (str): Self-attention layer type.
activation_type (str): Activation function type.
normalize_before (bool): Whether to apply LayerNorm before attention.
use_macaron_style (bool): Whether to use macaron style components.
use_conformer_conv (bool): Whether to use conformer conv layers.
conformer_kernel_size (int): Conformer's conv kernel size.
dropout_rate (float): Dropout rate.
positional_dropout_rate (float): Dropout rate for positional encoding.
attention_dropout_rate (float): Dropout rate for attention.
"""
super().__init__()
# store for forward
self.attention_dim = attention_dim
# define modules
self.emb = torch.nn.Embedding(vocabs, attention_dim)
torch.nn.init.normal_(self.emb.weight, 0.0, attention_dim**-0.5)
self.encoder = Encoder(
idim=-1,
input_layer=None,
attention_dim=attention_dim,
attention_heads=attention_heads,
linear_units=linear_units,
num_blocks=blocks,
dropout_rate=dropout_rate,
positional_dropout_rate=positional_dropout_rate,
attention_dropout_rate=attention_dropout_rate,
normalize_before=normalize_before,
positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size,
macaron_style=use_macaron_style,
pos_enc_layer_type=positional_encoding_layer_type,
selfattention_layer_type=self_attention_layer_type,
activation_type=activation_type,
use_cnn_module=use_conformer_conv,
cnn_module_kernel=conformer_kernel_size,
)
self.proj = torch.nn.Conv1d(attention_dim, attention_dim * 2, 1)
def forward(
self,
x: torch.Tensor,
x_lengths: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""Calculate forward propagation.
Args:
x (Tensor): Input index tensor (B, T_text).
x_lengths (Tensor): Length tensor (B,).
Returns:
Tensor: Encoded hidden representation (B, attention_dim, T_text).
Tensor: Projected mean tensor (B, attention_dim, T_text).
Tensor: Projected scale tensor (B, attention_dim, T_text).
Tensor: Mask tensor for input tensor (B, 1, T_text).
"""
x = self.emb(x) * math.sqrt(self.attention_dim)
x_mask = (
make_non_pad_mask(x_lengths)
.to(
device=x.device,
dtype=x.dtype,
)
.unsqueeze(1)
)
# encoder assume the channel last (B, T_text, attention_dim)
# but mask shape shoud be (B, 1, T_text)
x, _ = self.encoder(x, x_mask)
# convert the channel first (B, attention_dim, T_text)
x = x.transpose(1, 2)
stats = self.proj(x) * x_mask
m, logs = stats.split(stats.size(1) // 2, dim=1)
return x, m, logs, x_mask
| 5,385 | 37.198582 | 86 | py |
espnet | espnet-master/espnet2/gan_tts/vits/posterior_encoder.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Posterior encoder module in VITS.
This code is based on https://github.com/jaywalnut310/vits.
"""
from typing import Optional, Tuple
import torch
from espnet2.gan_tts.wavenet import WaveNet
from espnet2.gan_tts.wavenet.residual_block import Conv1d
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask
class PosteriorEncoder(torch.nn.Module):
"""Posterior encoder module in VITS.
This is a module of posterior encoder described in `Conditional Variational
Autoencoder with Adversarial Learning for End-to-End Text-to-Speech`_.
.. _`Conditional Variational Autoencoder with Adversarial Learning for End-to-End
Text-to-Speech`: https://arxiv.org/abs/2006.04558
"""
def __init__(
self,
in_channels: int = 513,
out_channels: int = 192,
hidden_channels: int = 192,
kernel_size: int = 5,
layers: int = 16,
stacks: int = 1,
base_dilation: int = 1,
global_channels: int = -1,
dropout_rate: float = 0.0,
bias: bool = True,
use_weight_norm: bool = True,
):
"""Initilialize PosteriorEncoder module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
hidden_channels (int): Number of hidden channels.
kernel_size (int): Kernel size in WaveNet.
layers (int): Number of layers of WaveNet.
stacks (int): Number of repeat stacking of WaveNet.
base_dilation (int): Base dilation factor.
global_channels (int): Number of global conditioning channels.
dropout_rate (float): Dropout rate.
bias (bool): Whether to use bias parameters in conv.
use_weight_norm (bool): Whether to apply weight norm.
"""
super().__init__()
# define modules
self.input_conv = Conv1d(in_channels, hidden_channels, 1)
self.encoder = WaveNet(
in_channels=-1,
out_channels=-1,
kernel_size=kernel_size,
layers=layers,
stacks=stacks,
base_dilation=base_dilation,
residual_channels=hidden_channels,
aux_channels=-1,
gate_channels=hidden_channels * 2,
skip_channels=hidden_channels,
global_channels=global_channels,
dropout_rate=dropout_rate,
bias=bias,
use_weight_norm=use_weight_norm,
use_first_conv=False,
use_last_conv=False,
scale_residual=False,
scale_skip_connect=True,
)
self.proj = Conv1d(hidden_channels, out_channels * 2, 1)
def forward(
self, x: torch.Tensor, x_lengths: torch.Tensor, g: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, in_channels, T_feats).
x_lengths (Tensor): Length tensor (B,).
g (Optional[Tensor]): Global conditioning tensor (B, global_channels, 1).
Returns:
Tensor: Encoded hidden representation tensor (B, out_channels, T_feats).
Tensor: Projected mean tensor (B, out_channels, T_feats).
Tensor: Projected scale tensor (B, out_channels, T_feats).
Tensor: Mask tensor for input tensor (B, 1, T_feats).
"""
x_mask = (
make_non_pad_mask(x_lengths)
.unsqueeze(1)
.to(
dtype=x.dtype,
device=x.device,
)
)
x = self.input_conv(x) * x_mask
x = self.encoder(x, x_mask, g=g)
stats = self.proj(x) * x_mask
m, logs = stats.split(stats.size(1) // 2, dim=1)
z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
return z, m, logs, x_mask
| 4,037 | 33.512821 | 88 | py |
espnet | espnet-master/espnet2/gan_tts/vits/monotonic_align/__init__.py | """Maximum path calculation module.
This code is based on https://github.com/jaywalnut310/vits.
"""
import warnings
import numpy as np
import torch
from numba import njit, prange
try:
from .core import maximum_path_c
is_cython_avalable = True
except ImportError:
is_cython_avalable = False
warnings.warn(
"Cython version is not available. Fallback to 'EXPERIMETAL' numba version. "
"If you want to use the cython version, please build it as follows: "
"`cd espnet2/gan_tts/vits/monotonic_align; python setup.py build_ext --inplace`"
)
def maximum_path(neg_x_ent: torch.Tensor, attn_mask: torch.Tensor) -> torch.Tensor:
"""Calculate maximum path.
Args:
neg_x_ent (Tensor): Negative X entropy tensor (B, T_feats, T_text).
attn_mask (Tensor): Attention mask (B, T_feats, T_text).
Returns:
Tensor: Maximum path tensor (B, T_feats, T_text).
"""
device, dtype = neg_x_ent.device, neg_x_ent.dtype
neg_x_ent = neg_x_ent.cpu().numpy().astype(np.float32)
path = np.zeros(neg_x_ent.shape, dtype=np.int32)
t_t_max = attn_mask.sum(1)[:, 0].cpu().numpy().astype(np.int32)
t_s_max = attn_mask.sum(2)[:, 0].cpu().numpy().astype(np.int32)
if is_cython_avalable:
maximum_path_c(path, neg_x_ent, t_t_max, t_s_max)
else:
maximum_path_numba(path, neg_x_ent, t_t_max, t_s_max)
return torch.from_numpy(path).to(device=device, dtype=dtype)
@njit
def maximum_path_each_numba(path, value, t_y, t_x, max_neg_val=-np.inf):
"""Calculate a single maximum path with numba."""
index = t_x - 1
for y in range(t_y):
for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
if x == y:
v_cur = max_neg_val
else:
v_cur = value[y - 1, x]
if x == 0:
if y == 0:
v_prev = 0.0
else:
v_prev = max_neg_val
else:
v_prev = value[y - 1, x - 1]
value[y, x] += max(v_prev, v_cur)
for y in range(t_y - 1, -1, -1):
path[y, index] = 1
if index != 0 and (index == y or value[y - 1, index] < value[y - 1, index - 1]):
index = index - 1
@njit(parallel=True)
def maximum_path_numba(paths, values, t_ys, t_xs):
"""Calculate batch maximum path with numba."""
for i in prange(paths.shape[0]):
maximum_path_each_numba(paths[i], values[i], t_ys[i], t_xs[i])
| 2,493 | 30.175 | 88 | py |
espnet | espnet-master/espnet2/gan_tts/parallel_wavegan/parallel_wavegan.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Parallel WaveGAN Modules.
This code is modified from https://github.com/kan-bayashi/ParallelWaveGAN.
"""
import logging
import math
from typing import Any, Dict, Optional
import numpy as np
import torch
from espnet2.gan_tts.parallel_wavegan import upsample
from espnet2.gan_tts.wavenet.residual_block import Conv1d, Conv1d1x1, ResidualBlock
class ParallelWaveGANGenerator(torch.nn.Module):
"""Parallel WaveGAN Generator module."""
def __init__(
self,
in_channels: int = 1,
out_channels: int = 1,
kernel_size: int = 3,
layers: int = 30,
stacks: int = 3,
residual_channels: int = 64,
gate_channels: int = 128,
skip_channels: int = 64,
aux_channels: int = 80,
aux_context_window: int = 2,
dropout_rate: float = 0.0,
bias: bool = True,
use_weight_norm: bool = True,
upsample_conditional_features: bool = True,
upsample_net: str = "ConvInUpsampleNetwork",
upsample_params: Dict[str, Any] = {"upsample_scales": [4, 4, 4, 4]},
):
"""Initialize ParallelWaveGANGenerator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
kernel_size (int): Kernel size of dilated convolution.
layers (int): Number of residual block layers.
stacks (int): Number of stacks i.e., dilation cycles.
residual_channels (int): Number of channels in residual conv.
gate_channels (int): Number of channels in gated conv.
skip_channels (int): Number of channels in skip conv.
aux_channels (int): Number of channels for auxiliary feature conv.
aux_context_window (int): Context window size for auxiliary feature.
dropout_rate (float): Dropout rate. 0.0 means no dropout applied.
bias (bool): Whether to use bias parameter in conv layer.
use_weight_norm (bool): Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
upsample_conditional_features (bool): Whether to use upsampling network.
upsample_net (str): Upsampling network architecture.
upsample_params (Dict[str, Any]): Upsampling network parameters.
"""
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.aux_channels = aux_channels
self.aux_context_window = aux_context_window
self.layers = layers
self.stacks = stacks
self.kernel_size = kernel_size
# check the number of layers and stacks
assert layers % stacks == 0
layers_per_stack = layers // stacks
# define first convolution
self.first_conv = Conv1d1x1(in_channels, residual_channels, bias=True)
# define conv + upsampling network
if upsample_conditional_features:
if upsample_net == "ConvInUpsampleNetwork":
upsample_params.update(
{
"aux_channels": aux_channels,
"aux_context_window": aux_context_window,
}
)
self.upsample_net = getattr(upsample, upsample_net)(**upsample_params)
self.upsample_factor = int(np.prod(upsample_params["upsample_scales"]))
else:
self.upsample_net = None
self.upsample_factor = out_channels
# define residual blocks
self.conv_layers = torch.nn.ModuleList()
for layer in range(layers):
dilation = 2 ** (layer % layers_per_stack)
conv = ResidualBlock(
kernel_size=kernel_size,
residual_channels=residual_channels,
gate_channels=gate_channels,
skip_channels=skip_channels,
aux_channels=aux_channels,
dilation=dilation,
dropout_rate=dropout_rate,
bias=bias,
scale_residual=True,
)
self.conv_layers += [conv]
# define output layers
self.last_conv_layers = torch.nn.ModuleList(
[
torch.nn.ReLU(),
Conv1d1x1(skip_channels, skip_channels, bias=True),
torch.nn.ReLU(),
Conv1d1x1(skip_channels, out_channels, bias=True),
]
)
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
# NOTE(kan-bayashi): register pre hook function for the compatibility with
# parallel_wavegan repo
self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook)
def forward(
self, c: torch.Tensor, z: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""Calculate forward propagation.
Args:
c (Tensor): Local conditioning auxiliary features (B, C ,T_feats).
z (Tensor): Input noise signal (B, 1, T_wav).
Returns:
Tensor: Output tensor (B, out_channels, T_wav)
"""
if z is None:
b, _, t = c.size()
z = torch.randn(b, 1, t * self.upsample_factor).to(
device=c.device, dtype=c.dtype
)
# perform upsampling
if self.upsample_net is not None:
c = self.upsample_net(c)
assert c.size(-1) == z.size(-1)
# encode to hidden representation
x = self.first_conv(z)
skips = 0
for f in self.conv_layers:
x, h = f(x=x, x_mask=None, c=c)
skips += h
skips *= math.sqrt(1.0 / len(self.conv_layers))
# apply final layers
x = skips
for f in self.last_conv_layers:
x = f(x)
return x
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m: torch.nn.Module):
try:
logging.debug(f"Weight norm is removed from {m}.")
torch.nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m: torch.nn.Module):
if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
@staticmethod
def _get_receptive_field_size(
layers, stacks, kernel_size, dilation=lambda x: 2**x
):
assert layers % stacks == 0
layers_per_cycle = layers // stacks
dilations = [dilation(i % layers_per_cycle) for i in range(layers)]
return (kernel_size - 1) * sum(dilations) + 1
@property
def receptive_field_size(self):
"""Return receptive field size."""
return self._get_receptive_field_size(
self.layers, self.stacks, self.kernel_size
)
def inference(
self, c: torch.Tensor, z: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""Perform inference.
Args:
c (Tensor): Local conditioning auxiliary features (T_feats ,C).
z (Optional[Tensor]): Input noise signal (T_wav, 1).
Returns:
Tensor: Output tensor (T_wav, out_channels)
"""
if z is not None:
z = z.transpose(1, 0).unsqueeze(0)
c = c.transpose(1, 0).unsqueeze(0)
return self.forward(c, z).squeeze(0).transpose(1, 0)
def _load_state_dict_pre_hook(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
"""Apply pre hook function before loading state dict."""
keys = list(state_dict.keys())
for k in keys:
if "conv1x1_skip" in k.replace(prefix, ""):
v_skip = state_dict.pop(k)
v_out = state_dict[k.replace("skip", "out")]
state_dict[k.replace("skip", "out")] = torch.cat([v_out, v_skip], dim=0)
class ParallelWaveGANDiscriminator(torch.nn.Module):
"""Parallel WaveGAN Discriminator module."""
def __init__(
self,
in_channels: int = 1,
out_channels: int = 1,
kernel_size: int = 3,
layers: int = 10,
conv_channels: int = 64,
dilation_factor: int = 1,
nonlinear_activation: str = "LeakyReLU",
nonlinear_activation_params: Dict[str, Any] = {"negative_slope": 0.2},
bias: bool = True,
use_weight_norm: bool = True,
):
"""Initialize ParallelWaveGANDiscriminator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
kernel_size (int): Number of output channels.
layers (int): Number of conv layers.
conv_channels (int): Number of chnn layers.
dilation_factor (int): Dilation factor. For example, if dilation_factor = 2,
the dilation will be 2, 4, 8, ..., and so on.
nonlinear_activation (str): Nonlinear function after each conv.
nonlinear_activation_params (Dict[str, Any]): Nonlinear function parameters
bias (bool): Whether to use bias parameter in conv.
use_weight_norm (bool) Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
"""
super().__init__()
assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size."
assert dilation_factor > 0, "Dilation factor must be > 0."
self.conv_layers = torch.nn.ModuleList()
conv_in_channels = in_channels
for i in range(layers - 1):
if i == 0:
dilation = 1
else:
dilation = i if dilation_factor == 1 else dilation_factor**i
conv_in_channels = conv_channels
padding = (kernel_size - 1) // 2 * dilation
conv_layer = [
Conv1d(
conv_in_channels,
conv_channels,
kernel_size=kernel_size,
padding=padding,
dilation=dilation,
bias=bias,
),
getattr(torch.nn, nonlinear_activation)(
inplace=True, **nonlinear_activation_params
),
]
self.conv_layers += conv_layer
padding = (kernel_size - 1) // 2
last_conv_layer = Conv1d(
conv_in_channels,
out_channels,
kernel_size=kernel_size,
padding=padding,
bias=bias,
)
self.conv_layers += [last_conv_layer]
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
Returns:
Tensor: Output tensor (B, 1, T).
"""
for f in self.conv_layers:
x = f(x)
return x
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m: torch.nn.Module):
if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m: torch.nn.Module):
try:
logging.debug(f"Weight norm is removed from {m}.")
torch.nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
| 12,423 | 34.195467 | 88 | py |
espnet | espnet-master/espnet2/gan_tts/parallel_wavegan/upsample.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Upsampling module.
This code is modified from https://github.com/kan-bayashi/ParallelWaveGAN.
"""
from typing import Any, Dict, List, Optional
import numpy as np
import torch
import torch.nn.functional as F
from espnet2.gan_tts.wavenet.residual_block import Conv1d
class Stretch2d(torch.nn.Module):
"""Stretch2d module."""
def __init__(self, x_scale: int, y_scale: int, mode: str = "nearest"):
"""Initialize Stretch2d module.
Args:
x_scale (int): X scaling factor (Time axis in spectrogram).
y_scale (int): Y scaling factor (Frequency axis in spectrogram).
mode (str): Interpolation mode.
"""
super().__init__()
self.x_scale = x_scale
self.y_scale = y_scale
self.mode = mode
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, C, F, T).
Returns:
Tensor: Interpolated tensor (B, C, F * y_scale, T * x_scale),
"""
return F.interpolate(
x, scale_factor=(self.y_scale, self.x_scale), mode=self.mode
)
class Conv2d(torch.nn.Conv2d):
"""Conv2d module with customized initialization."""
def __init__(self, *args, **kwargs):
"""Initialize Conv2d module."""
super().__init__(*args, **kwargs)
def reset_parameters(self):
"""Reset parameters."""
self.weight.data.fill_(1.0 / np.prod(self.kernel_size))
if self.bias is not None:
torch.nn.init.constant_(self.bias, 0.0)
class UpsampleNetwork(torch.nn.Module):
"""Upsampling network module."""
def __init__(
self,
upsample_scales: List[int],
nonlinear_activation: Optional[str] = None,
nonlinear_activation_params: Dict[str, Any] = {},
interpolate_mode: str = "nearest",
freq_axis_kernel_size: int = 1,
):
"""Initialize UpsampleNetwork module.
Args:
upsample_scales (List[int]): List of upsampling scales.
nonlinear_activation (Optional[str]): Activation function name.
nonlinear_activation_params (Dict[str, Any]): Arguments for the specified
activation function.
interpolate_mode (str): Interpolation mode.
freq_axis_kernel_size (int): Kernel size in the direction of frequency axis.
"""
super().__init__()
self.up_layers = torch.nn.ModuleList()
for scale in upsample_scales:
# interpolation layer
stretch = Stretch2d(scale, 1, interpolate_mode)
self.up_layers += [stretch]
# conv layer
assert (
freq_axis_kernel_size - 1
) % 2 == 0, "Not support even number freq axis kernel size."
freq_axis_padding = (freq_axis_kernel_size - 1) // 2
kernel_size = (freq_axis_kernel_size, scale * 2 + 1)
padding = (freq_axis_padding, scale)
conv = Conv2d(1, 1, kernel_size=kernel_size, padding=padding, bias=False)
self.up_layers += [conv]
# nonlinear
if nonlinear_activation is not None:
nonlinear = getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
)
self.up_layers += [nonlinear]
def forward(self, c: torch.Tensor) -> torch.Tensor:
"""Calculate forward propagation.
Args:
c : Input tensor (B, C, T_feats).
Returns:
Tensor: Upsampled tensor (B, C, T_wav).
"""
c = c.unsqueeze(1) # (B, 1, C, T)
for f in self.up_layers:
c = f(c)
return c.squeeze(1) # (B, C, T')
class ConvInUpsampleNetwork(torch.nn.Module):
"""Convolution + upsampling network module."""
def __init__(
self,
upsample_scales: List[int],
nonlinear_activation: Optional[str] = None,
nonlinear_activation_params: Dict[str, Any] = {},
interpolate_mode: str = "nearest",
freq_axis_kernel_size: int = 1,
aux_channels: int = 80,
aux_context_window: int = 0,
):
"""Initialize ConvInUpsampleNetwork module.
Args:
upsample_scales (list): List of upsampling scales.
nonlinear_activation (Optional[str]): Activation function name.
nonlinear_activation_params (Dict[str, Any]): Arguments for the specified
activation function.
mode (str): Interpolation mode.
freq_axis_kernel_size (int): Kernel size in the direction of
frequency axis.
aux_channels (int): Number of channels of pre-conv layer.
aux_context_window (int): Context window size of the pre-conv layer.
"""
super().__init__()
self.aux_context_window = aux_context_window
# To capture wide-context information in conditional features
kernel_size = 2 * aux_context_window + 1
# NOTE(kan-bayashi): Use pad here, which is not used in parallel_wavegan
self.pad = torch.nn.ReplicationPad1d(aux_context_window)
self.conv_in = Conv1d(
aux_channels,
aux_channels,
kernel_size=kernel_size,
bias=False,
)
self.upsample = UpsampleNetwork(
upsample_scales=upsample_scales,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
interpolate_mode=interpolate_mode,
freq_axis_kernel_size=freq_axis_kernel_size,
)
def forward(self, c: torch.Tensor) -> torch.Tensor:
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, C, T_feats).
Returns:
Tensor: Upsampled tensor (B, C, T_wav),
where T_wav = T_feats * prod(upsample_scales).
"""
c = self.conv_in(self.pad(c))
return self.upsample(c)
| 6,161 | 31.951872 | 88 | py |
espnet | espnet-master/espnet2/gan_svs/espnet_model.py | # Copyright 2021 Tomoki Hayashi
# Copyright 2022 Yifeng Yu
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""GAN-based Singing-voice-synthesis ESPnet model."""
from contextlib import contextmanager
from typing import Any, Dict, Optional
import torch
from packaging.version import parse as V
from typeguard import check_argument_types
from espnet2.gan_svs.abs_gan_svs import AbsGANSVS
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.layers.inversible_interface import InversibleInterface
from espnet2.svs.feats_extract.score_feats_extract import (
FrameScoreFeats,
SyllableScoreFeats,
expand_to_frame,
)
from espnet2.train.abs_gan_espnet_model import AbsGANESPnetModel
from espnet2.tts.feats_extract.abs_feats_extract import AbsFeatsExtract
if V(torch.__version__) >= V("1.6.0"):
from torch.cuda.amp import autocast
else:
# Nothing to do if torch < 1.6.0
@contextmanager
def autocast(enabled=True): # NOQA
yield
class ESPnetGANSVSModel(AbsGANESPnetModel):
"""ESPnet model for GAN-based singing voice synthesis task."""
def __init__(
self,
text_extract: Optional[AbsFeatsExtract],
feats_extract: Optional[AbsFeatsExtract],
score_feats_extract: Optional[AbsFeatsExtract],
label_extract: Optional[AbsFeatsExtract],
pitch_extract: Optional[AbsFeatsExtract],
ying_extract: Optional[AbsFeatsExtract],
duration_extract: Optional[AbsFeatsExtract],
energy_extract: Optional[AbsFeatsExtract],
normalize: Optional[AbsNormalize and InversibleInterface],
pitch_normalize: Optional[AbsNormalize and InversibleInterface],
energy_normalize: Optional[AbsNormalize and InversibleInterface],
svs: AbsGANSVS,
):
"""Initialize ESPnetGANSVSModel module."""
assert check_argument_types()
super().__init__()
self.text_extract = text_extract
self.feats_extract = feats_extract
self.score_feats_extract = score_feats_extract
self.label_extract = label_extract
self.pitch_extract = pitch_extract
self.duration_extract = duration_extract
self.energy_extract = energy_extract
self.ying_extract = ying_extract
self.normalize = normalize
self.pitch_normalize = pitch_normalize
self.energy_normalize = energy_normalize
self.svs = svs
assert hasattr(
svs, "generator"
), "generator module must be registered as svs.generator"
assert hasattr(
svs, "discriminator"
), "discriminator module must be registered as svs.discriminator"
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
singing: torch.Tensor,
singing_lengths: torch.Tensor,
feats: Optional[torch.Tensor] = None,
feats_lengths: Optional[torch.Tensor] = None,
label: Optional[torch.Tensor] = None,
label_lengths: Optional[torch.Tensor] = None,
phn_cnt: Optional[torch.Tensor] = None,
midi: Optional[torch.Tensor] = None,
midi_lengths: Optional[torch.Tensor] = None,
duration_phn: Optional[torch.Tensor] = None,
duration_phn_lengths: Optional[torch.Tensor] = None,
duration_ruled_phn: Optional[torch.Tensor] = None,
duration_ruled_phn_lengths: Optional[torch.Tensor] = None,
duration_syb: Optional[torch.Tensor] = None,
duration_syb_lengths: Optional[torch.Tensor] = None,
slur: Optional[torch.Tensor] = None,
pitch: Optional[torch.Tensor] = None,
pitch_lengths: Optional[torch.Tensor] = None,
energy: Optional[torch.Tensor] = None,
energy_lengths: Optional[torch.Tensor] = None,
ying: Optional[torch.Tensor] = None,
ying_lengths: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
forward_generator: bool = True,
**kwargs,
) -> Dict[str, Any]:
"""Return generator or discriminator loss with dict format.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
singing (Tensor): Singing waveform tensor (B, T_wav).
singing_lengths (Tensor): Singing length tensor (B,).
label (Option[Tensor]): Label tensor (B, T_label).
label_lengths (Optional[Tensor]): Label lrngth tensor (B,).
phn_cnt (Optional[Tensor]): Number of phones in each syllable (B, T_syb)
midi (Option[Tensor]): Midi tensor (B, T_label).
midi_lengths (Optional[Tensor]): Midi lrngth tensor (B,).
duration_phn (Optional[Tensor]): duration tensor (B, T_label).
duration_phn_lengths (Optional[Tensor]): duration length tensor (B,).
duration_ruled_phn (Optional[Tensor]): duration tensor (B, T_phone).
duration_ruled_phn_lengths (Optional[Tensor]): duration length tensor (B,).
duration_syb (Optional[Tensor]): duration tensor (B, T_syllable).
duration_syb_lengths (Optional[Tensor]): duration length tensor (B,).
slur (Optional[Tensor]): slur tensor (B, T_slur).
pitch (Optional[Tensor]): Pitch tensor (B, T_wav). - f0 sequence
pitch_lengths (Optional[Tensor]): Pitch length tensor (B,).
energy (Optional[Tensor]): Energy tensor.
energy_lengths (Optional[Tensor]): Energy length tensor (B,).
spembs (Optional[Tensor]): Speaker embedding tensor (B, D).
sids (Optional[Tensor]): Speaker ID tensor (B, 1).
lids (Optional[Tensor]): Language ID tensor (B, 1).
forward_generator (bool): Whether to forward generator.
kwargs: "utt_id" is among the input.
Returns:
Dict[str, Any]:
- loss (Tensor): Loss scalar tensor.
- stats (Dict[str, float]): Statistics to be monitored.
- weight (Tensor): Weight tensor to summarize losses.
- optim_idx (int): Optimizer index (0 for G and 1 for D).
"""
with autocast(False):
# Extract features
if self.feats_extract is not None and feats is None:
feats, feats_lengths = self.feats_extract(
singing,
singing_lengths,
)
# Extract auxiliary features
# score : 128 midi pitch
# duration :
# input-> phone-id seqence
# output -> frame level(take mode from window) or syllable level
# cut length
for i in range(feats.size(0)):
dur_len = sum(duration_phn[i])
if feats_lengths[i] > dur_len:
feats_lengths[i] = dur_len
else: # decrease duration at the end of sequence
delta = dur_len - feats_lengths[i]
end = duration_phn_lengths[i] - 1
while delta > 0 and end >= 0:
new = duration_phn[i][end] - delta
if new < 0: # keep on decreasing the previous one
delta -= duration_phn[i][end]
duration_phn[i][end] = 0
end -= 1
else: # stop
delta -= duration_phn[i][end] - new
duration_phn[i][end] = new
feats = feats[:, : feats_lengths.max()]
if isinstance(self.score_feats_extract, FrameScoreFeats):
(
label_lab,
label_lab_lengths,
midi_lab,
midi_lab_lengths,
duration_lab,
duration_lab_lengths,
) = expand_to_frame(
duration_phn, duration_phn_lengths, label, midi, duration_phn
)
# for data-parallel
label_lab = label_lab[:, : label_lab_lengths.max()]
midi_lab = midi_lab[:, : midi_lab_lengths.max()]
duration_lab = duration_lab[:, : duration_lab_lengths.max()]
(
label_score,
label_score_lengths,
midi_score,
midi_score_lengths,
duration_score,
duration_score_phn_lengths,
) = expand_to_frame(
duration_ruled_phn,
duration_ruled_phn_lengths,
label,
midi,
duration_ruled_phn,
)
# for data-parallel
label_score = label_score[:, : label_score_lengths.max()]
midi_score = midi_score[:, : midi_score_lengths.max()]
duration_score = duration_score[:, : duration_score_phn_lengths.max()]
duration_score_syb = None
elif isinstance(self.score_feats_extract, SyllableScoreFeats):
label_lab_lengths = label_lengths
midi_lab_lengths = midi_lengths
duration_lab_lengths = duration_phn_lengths
label_lab = label[:, : label_lab_lengths.max()]
midi_lab = midi[:, : midi_lab_lengths.max()]
duration_lab = duration_phn[:, : duration_lab_lengths.max()]
label_score_lengths = label_lengths
midi_score_lengths = midi_lengths
duration_score_phn_lengths = duration_ruled_phn_lengths
duration_score_syb_lengths = duration_syb_lengths
label_score = label[:, : label_score_lengths.max()]
midi_score = midi[:, : midi_score_lengths.max()]
duration_score = duration_ruled_phn[
:, : duration_score_phn_lengths.max()
]
duration_score_syb = duration_syb[:, : duration_score_syb_lengths.max()]
slur = slur[:, : label_score_lengths.max()]
if self.pitch_extract is not None and pitch is None:
pitch, pitch_lengths = self.pitch_extract(
input=singing,
input_lengths=singing_lengths,
feats_lengths=feats_lengths,
)
if self.energy_extract is not None and energy is None:
energy, energy_lengths = self.energy_extract(
singing,
singing_lengths,
feats_lengths=feats_lengths,
)
if self.ying_extract is not None and ying is None:
ying, ying_lengths = self.ying_extract(
singing,
singing_lengths,
feats_lengths=feats_lengths,
)
# Normalize
if self.normalize is not None:
feats, feats_lengths = self.normalize(feats, feats_lengths)
if self.pitch_normalize is not None:
pitch, pitch_lengths = self.pitch_normalize(pitch, pitch_lengths)
if self.energy_normalize is not None:
energy, energy_lengths = self.energy_normalize(energy, energy_lengths)
# Make batch for svs inputs
batch = dict(
text=text,
text_lengths=text_lengths,
forward_generator=forward_generator,
)
# label
# NOTE(Yuning): Label can be word, syllable or phoneme,
# which is determined by annotation file.
label = dict()
label_lengths = dict()
if label_lab is not None:
label_lab = label_lab.to(dtype=torch.long)
label.update(lab=label_lab)
label_lengths.update(lab=label_lab_lengths)
if label_score is not None:
label_score = label_score.to(dtype=torch.long)
label.update(score=label_score)
label_lengths.update(score=label_score_lengths)
batch.update(label=label, label_lengths=label_lengths)
# melody
melody = dict()
if midi_lab is not None:
midi_lab = midi_lab.to(dtype=torch.long)
melody.update(lab=midi_lab)
if midi_score is not None:
midi_score = midi_score.to(dtype=torch.long)
melody.update(score=midi_score)
batch.update(melody=melody)
# duration
# NOTE(Yuning): duration = duration_time / time_shift (same as Xiaoice paper)
duration = dict()
if duration_lab is not None:
duration_lab = duration_lab.to(dtype=torch.long)
duration.update(lab=duration_lab)
if duration_score is not None:
duration_phn_score = duration_score.to(dtype=torch.long)
duration.update(score_phn=duration_phn_score)
if duration_score_syb is not None:
duration_syb_score = duration_score_syb.to(dtype=torch.long)
duration.update(score_syb=duration_syb_score)
batch.update(duration=duration)
if slur is not None:
batch.update(slur=slur)
if spembs is not None:
batch.update(spembs=spembs)
if sids is not None:
batch.update(sids=sids)
if lids is not None:
batch.update(lids=lids)
if feats is not None:
batch.update(feats=feats, feats_lengths=feats_lengths)
if self.pitch_extract is not None and pitch is not None:
batch.update(pitch=pitch)
if self.energy_extract is not None and energy is not None:
batch.update(energy=energy)
if self.ying_extract is not None and ying is not None:
batch.update(ying=ying)
if self.svs.require_raw_singing:
batch.update(singing=singing, singing_lengths=singing_lengths)
return self.svs(**batch)
def collect_feats(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
singing: torch.Tensor,
singing_lengths: torch.Tensor,
label: Optional[torch.Tensor] = None,
label_lengths: Optional[torch.Tensor] = None,
phn_cnt: Optional[torch.Tensor] = None,
midi: Optional[torch.Tensor] = None,
midi_lengths: Optional[torch.Tensor] = None,
duration_phn: Optional[torch.Tensor] = None,
duration_phn_lengths: Optional[torch.Tensor] = None,
duration_ruled_phn: Optional[torch.Tensor] = None,
duration_ruled_phn_lengths: Optional[torch.Tensor] = None,
duration_syb: Optional[torch.Tensor] = None,
duration_syb_lengths: Optional[torch.Tensor] = None,
slur: Optional[torch.Tensor] = None,
pitch: Optional[torch.Tensor] = None,
pitch_lengths: Optional[torch.Tensor] = None,
energy: Optional[torch.Tensor] = None,
energy_lengths: Optional[torch.Tensor] = None,
ying: Optional[torch.Tensor] = None,
ying_lengths: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
**kwargs,
) -> Dict[str, torch.Tensor]:
"""Calculate features and return them as a dict.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
singing (Tensor): Singing waveform tensor (B, T_wav).
singing_lengths (Tensor): Singing length tensor (B,).
label (Option[Tensor]): Label tensor (B, T_label).
label_lengths (Optional[Tensor]): Label lrngth tensor (B,).
phn_cnt (Optional[Tensor]): Number of phones in each syllable (B, T_syb)
midi (Option[Tensor]): Midi tensor (B, T_label).
midi_lengths (Optional[Tensor]): Midi lrngth tensor (B,).
duration_phn (Optional[Tensor]): duration tensor (T_label).
duration_ruled_phn (Optional[Tensor]): duration tensor (T_phone).
duration_syb (Optional[Tensor]): duration tensor (T_phone).
slur (Optional[Tensor]): slur tensor (B, T_slur).
pitch (Optional[Tensor]): Pitch tensor (B, T_wav). - f0 sequence
pitch_lengths (Optional[Tensor]): Pitch length tensor (B,).
energy (Optional[Tensor): Energy tensor.
energy_lengths (Optional[Tensor): Energy length tensor (B,).
spembs (Optional[Tensor]): Speaker embedding tensor (B, D).
sids (Optional[Tensor]): Speaker ID tensor (B, 1).
lids (Optional[Tensor]): Language ID tensor (B, 1).
Returns:
Dict[str, Tensor]: Dict of features.
"""
feats = None
if self.feats_extract is not None:
feats, feats_lengths = self.feats_extract(
singing,
singing_lengths,
)
# cut length
for i in range(feats.size(0)):
dur_len = sum(duration_phn[i])
if feats_lengths[i] > dur_len:
feats_lengths[i] = dur_len
else: # decrease duration at the end of sequence
delta = dur_len - feats_lengths[i]
end = duration_phn_lengths[i] - 1
while delta > 0 and end >= 0:
new = duration_phn[i][end] - delta
if new < 0: # keep on decreasing the previous one
delta -= duration_phn[i][end]
duration_phn[i][end] = 0
end -= 1
else: # stop
delta -= duration_phn[i][end] - new
duration_phn[i][end] = new
feats = feats[:, : feats_lengths.max()]
if self.pitch_extract is not None:
pitch, pitch_lengths = self.pitch_extract(
input=singing,
input_lengths=singing_lengths,
feats_lengths=feats_lengths,
)
if self.energy_extract is not None:
energy, energy_lengths = self.energy_extract(
singing,
singing_lengths,
feats_lengths=feats_lengths,
)
if self.ying_extract is not None and ying is None:
ying, ying_lengths = self.ying_extract(
singing,
singing_lengths,
feats_lengths=feats_lengths,
)
# store in dict
feats_dict = {}
if feats is not None:
feats_dict.update(feats=feats, feats_lengths=feats_lengths)
if pitch is not None:
feats_dict.update(pitch=pitch, pitch_lengths=pitch_lengths)
if energy is not None:
feats_dict.update(energy=energy, energy_lengths=energy_lengths)
if ying is not None:
feats_dict.update(ying=ying, ying_lengths=ying_lengths)
return feats_dict
| 19,103 | 42.124153 | 88 | py |
espnet | espnet-master/espnet2/gan_svs/abs_gan_svs.py | # Copyright 2021 Tomoki Hayashi
# Copyright 2022 Yifeng Yu
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""GAN-based SVS abstrast class."""
from abc import ABC, abstractmethod
from typing import Dict, Union
import torch
from espnet2.svs.abs_svs import AbsSVS
class AbsGANSVS(AbsSVS, ABC):
"""GAN-based SVS model abstract class."""
@abstractmethod
def forward(
self,
forward_generator,
*args,
**kwargs,
) -> Dict[str, Union[torch.Tensor, Dict[str, torch.Tensor], int]]:
"""Return generator or discriminator loss."""
raise NotImplementedError
| 627 | 22.259259 | 70 | py |
espnet | espnet-master/espnet2/gan_svs/uhifigan/sine_generator.py | import numpy as np
import torch
class SineGen(torch.nn.Module):
"""Definition of sine generator
SineGen(samp_rate, harmonic_num = 0,
sine_amp = 0.1, noise_std = 0.003,
voiced_threshold = 0,
flag_for_pulse=False)
sample_rate: sampling rate in Hz
harmonic_num: number of harmonic overtones (default 0)
sine_amp: amplitude of sine-wavefrom (default 0.1)
noise_std: std of Gaussian noise (default 0.003)
voiced_thoreshold: F0 threshold for U/V classification (default 0)
flag_for_pulse: this SinGen is used inside PulseGen (default False)
Note: when flag_for_pulse is True, the first time step of a voiced
segment is always sin(np.pi) or cos(0)
"""
def __init__(
self,
sample_rate,
harmonic_num=0,
sine_amp=0.1,
noise_std=0.003,
voiced_threshold=0,
flag_for_pulse=False,
):
super(SineGen, self).__init__()
self.sine_amp = sine_amp
self.noise_std = noise_std
self.harmonic_num = harmonic_num
self.dim = self.harmonic_num + 1
self.sampling_rate = sample_rate
self.voiced_threshold = voiced_threshold
self.flag_for_pulse = flag_for_pulse
def _f02uv(self, f0):
# generate uv signal
uv = torch.ones_like(f0)
uv = uv * (f0 > self.voiced_threshold)
return uv
def _f02sine(self, f0_values):
"""f0_values: (batchsize, length, dim)
where dim indicates fundamental tone and overtones
"""
# convert to F0 in rad. The interger part n can be ignored
# because 2 * np.pi * n doesn't affect phase
rad_values = (f0_values / self.sampling_rate) % 1
# initial phase noise (no noise for fundamental component)
rand_ini = torch.rand(
f0_values.shape[0], f0_values.shape[2], device=f0_values.device
)
rand_ini[:, 0] = 0
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
# instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad)
if not self.flag_for_pulse:
# for normal case
# To prevent torch.cumsum numerical overflow,
# it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1.
# Buffer tmp_over_one_idx indicates the time step to add -1.
# This will not change F0 of sine because (x-1) * 2*pi = x *2*pi
tmp_over_one = torch.cumsum(rad_values, 1) % 1
tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
cumsum_shift = torch.zeros_like(rad_values)
cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
sines = torch.sin(
torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
)
else:
# If necessary, make sure that the first time step of every
# voiced segments is sin(pi) or cos(0)
# This is used for pulse-train generation
# identify the last time step in unvoiced segments
uv = self._f02uv(f0_values)
uv_1 = torch.roll(uv, shifts=-1, dims=1)
uv_1[:, -1, :] = 1
u_loc = (uv < 1) * (uv_1 > 0)
# get the instantanouse phase
tmp_cumsum = torch.cumsum(rad_values, dim=1)
# different batch needs to be processed differently
for idx in range(f0_values.shape[0]):
temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :]
temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :]
# stores the accumulation of i.phase within
# each voiced segments
tmp_cumsum[idx, :, :] = 0
tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum
# rad_values - tmp_cumsum: remove the accumulation of i.phase
# within the previous voiced segment.
i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1)
# get the sines
sines = torch.cos(i_phase * 2 * np.pi)
return sines
def forward(self, f0):
"""sine_tensor, uv = forward(f0)
input F0: tensor(batchsize=1, length, dim=1)
f0 for unvoiced steps should be 0
output sine_tensor: tensor(batchsize=1, length, dim)
output uv: tensor(batchsize=1, length, 1)
"""
with torch.no_grad():
f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
# print("f0_buf", f0_buf.shape)
# print("f0", f0.shape)
# fundamental component
f0_buf[:, :, 0] = f0[:, :, 0]
for idx in np.arange(self.harmonic_num):
# idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (idx + 2)
# generate sine waveforms
sine_waves = self._f02sine(f0_buf) * self.sine_amp
# generate uv signal
# uv = torch.ones(f0.shape)
# uv = uv * (f0 > self.voiced_threshold)
uv = self._f02uv(f0)
# noise: for unvoiced should be similar to sine_amp
# std = self.sine_amp/3 -> max value ~ self.sine_amp
# . for voiced regions is self.noise_std
noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
noise = noise_amp * torch.randn_like(sine_waves)
# first: set the unvoiced part to 0 by uv
# then: additive noise
sine_waves = sine_waves * uv + noise
return sine_waves, uv, noise
| 5,613 | 38.258741 | 86 | py |
espnet | espnet-master/espnet2/gan_svs/uhifigan/uhifigan.py | # -*- coding: utf-8 -*-
"""Unet-baed HiFi-GAN Modules.
This code is based on https://github.com/jik876/hifi-gan
and https://github.com/kan-bayashi/ParallelWaveGAN.
"""
import logging
from typing import List, Optional
import numpy as np
import torch
import torch.nn.functional as F
try:
from parallel_wavegan.layers import CausalConv1d, CausalConvTranspose1d
from parallel_wavegan.layers import HiFiGANResidualBlock as ResidualBlock
from parallel_wavegan.utils import read_hdf5
except ImportError:
CausalConv1d, CausalConvTranspose1d = None, None
ResidualBlock = None
read_hdf5 = None
class UHiFiGANGenerator(torch.nn.Module):
"""UHiFiGAN generator module."""
def __init__(
self,
in_channels=80,
out_channels=1,
channels=512,
global_channels: int = -1,
kernel_size=7,
downsample_scales=(2, 2, 8, 8),
downsample_kernel_sizes=(4, 4, 16, 16),
upsample_scales=(8, 8, 2, 2),
upsample_kernel_sizes=(16, 16, 4, 4),
resblock_kernel_sizes=(3, 7, 11),
resblock_dilations=[(1, 3, 5), (1, 3, 5), (1, 3, 5)],
projection_filters: List[int] = [0, 1, 1, 1],
projection_kernels: List[int] = [0, 5, 7, 11],
dropout=0.3,
use_additional_convs=True,
bias=True,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.1},
use_causal_conv=False,
use_weight_norm=True,
use_avocodo=False,
):
"""Initialize Unet-based HiFiGANGenerator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
channels (int): Number of hidden representation channels.
global_channels (int): Number of global conditioning channels.
kernel_size (int): Kernel size of initial and final conv layer.
upsample_scales (list): List of upsampling scales.
upsample_kernel_sizes (list): List of kernel sizes for upsampling layers.
resblock_kernel_sizes (list): List of kernel sizes for residual blocks.
resblock_dilations (list): List of dilation list for residual blocks.
use_additional_convs (bool): Whether to use additional conv layers
in residual blocks.
bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
use_causal_conv (bool): Whether to use causal structure.
use_weight_norm (bool): Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
"""
super().__init__()
# check hyperparameters are valid
assert kernel_size % 2 == 1, "Kernel size must be odd number."
assert len(upsample_scales) == len(upsample_kernel_sizes)
assert len(resblock_dilations) == len(resblock_kernel_sizes)
# define modules
self.num_upsamples = len(upsample_kernel_sizes)
self.num_blocks = len(resblock_kernel_sizes)
self.use_causal_conv = use_causal_conv
self.input_conv = None
self.downsamples = torch.nn.ModuleList()
self.downsamples_mrf = torch.nn.ModuleList()
self.hidden_conv = None
self.upsamples = torch.nn.ModuleList()
self.upsamples_mrf = torch.nn.ModuleList()
self.output_conv = None
self.use_avocodo = use_avocodo
if (
CausalConv1d is None
or CausalConvTranspose1d is None
or ResidualBlock is None
):
raise ImportError(
"`parallel_wavegan` is not installed. "
"Please install via `pip install -U parallel_wavegan`."
)
if not use_causal_conv:
self.input_conv = torch.nn.Sequential(
torch.nn.Conv1d(
out_channels,
channels,
kernel_size=kernel_size,
bias=bias,
padding=(kernel_size - 1) // 2,
),
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
torch.nn.Dropout(dropout),
)
else:
self.input_conv = torch.nn.Sequential(
CausalConv1d(
out_channels,
channels,
kernel_size=kernel_size,
bias=bias,
padding=(kernel_size - 1) // 2,
),
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
torch.nn.Dropout(dropout),
)
hidden_channels = channels
for i in range(len(downsample_scales)):
for j in range(len(resblock_kernel_sizes)):
self.downsamples_mrf += [
ResidualBlock(
kernel_size=resblock_kernel_sizes[j],
channels=channels,
# channels=channels * 2**i,
dilations=resblock_dilations[j],
bias=bias,
use_additional_convs=use_additional_convs,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
use_causal_conv=use_causal_conv,
)
]
if not use_causal_conv:
self.downsamples += [
torch.nn.Sequential(
torch.nn.Conv1d(
channels,
channels * 2,
# channels * (2 ** (i + 1)),
kernel_size=downsample_kernel_sizes[i],
stride=downsample_scales[i],
bias=bias,
padding=downsample_scales[i] // 2
+ downsample_scales[i] % 2,
),
getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
),
torch.nn.Dropout(dropout),
)
]
else:
self.downsamples += [
torch.nn.Sequential(
CausalConv1d(
channels,
channels * 2,
# channels * (2 ** (i + 1)),
kernel_size=downsample_kernel_sizes[i],
stride=downsample_scales[i],
bias=bias,
padding=downsample_scales[i] // 2
+ downsample_scales[i] % 2,
),
getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
),
torch.nn.Dropout(dropout),
)
]
channels = channels * 2
if not use_causal_conv:
self.hidden_conv = torch.nn.Conv1d(
in_channels,
channels,
kernel_size=kernel_size,
bias=bias,
padding=(kernel_size - 1) // 2,
)
else:
self.hidden_conv = CausalConv1d(
in_channels,
channels,
kernel_size=kernel_size,
bias=bias,
padding=(kernel_size - 1) // 2,
)
max_channels = channels
self.output_conv = torch.nn.ModuleList()
for i in range(len(upsample_kernel_sizes)):
# assert upsample_kernel_sizes[i] == 2 * upsample_scales[i]
if not use_causal_conv:
self.upsamples += [
torch.nn.Sequential(
getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
),
torch.nn.ConvTranspose1d(
channels * 2,
channels // 2,
# channels // (2 ** (i + 1)),
upsample_kernel_sizes[i],
upsample_scales[i],
padding=upsample_scales[i] // 2 + upsample_scales[i] % 2,
output_padding=upsample_scales[i] % 2,
bias=bias,
),
)
]
else:
self.upsamples += [
torch.nn.Sequential(
getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
),
CausalConvTranspose1d(
channels * 2,
channels // 2,
# channels // (2 ** (i + 1)),
upsample_kernel_sizes[i],
upsample_scales[i],
bias=bias,
),
)
]
# hidden_channel for MRF module
for j in range(len(resblock_kernel_sizes)):
self.upsamples_mrf += [
ResidualBlock(
kernel_size=resblock_kernel_sizes[j],
channels=channels // 2,
# channels=channels // (2 ** (i + 1)),
dilations=resblock_dilations[j],
bias=bias,
use_additional_convs=use_additional_convs,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
use_causal_conv=use_causal_conv,
)
]
channels = channels // 2
if use_avocodo:
if projection_filters[i] != 0:
self.output_conv.append(
torch.nn.Conv1d(
max_channels // (2 ** (i + 1)),
# channels // (2 ** (i + 1)),
projection_filters[i],
projection_kernels[i],
1,
padding=projection_kernels[i] // 2,
)
)
else:
self.output_conv.append(torch.nn.Identity())
if not use_avocodo:
if not use_causal_conv:
self.output_conv = torch.nn.Sequential(
# NOTE(kan-bayashi): follow official implementation but why
# using different slope parameter here? (0.1 vs. 0.01)
torch.nn.LeakyReLU(),
torch.nn.Conv1d(
channels,
out_channels,
kernel_size,
bias=bias,
padding=(kernel_size - 1) // 2,
),
torch.nn.Tanh(),
)
else:
self.output_conv = torch.nn.Sequential(
# NOTE(kan-bayashi): follow official implementation but why
# using different slope parameter here? (0.1 vs. 0.01)
torch.nn.LeakyReLU(),
CausalConv1d(
channels,
out_channels,
kernel_size,
bias=bias,
),
torch.nn.Tanh(),
)
if global_channels > 0:
self.global_conv = torch.nn.Conv1d(global_channels, in_channels, 1)
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
# reset parameters
self.reset_parameters()
def forward(
self, c=None, f0=None, excitation=None, g: Optional[torch.Tensor] = None
):
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, in_channels, T).
f0 (Tensor): Input tensor (B, 1, T).
excitation (Tensor): Input tensor (B, frame_len, T).
Returns:
Tensor: Output tensor (B, out_channels, T).
"""
# logging.warn(f'c:{c.shape}')
# logging.warn(f'f0:{f0.shape}')
# logging.warn(f'excitation:{excitation.shape}')
# logging.info(f'c:{c.shape}')
# if f0 is not None:
# c = torch.cat( (c,f0), 1)
# if excitation is not None:
# c = torch.cat( (c,excitation), 1)
# if f0 is not None and excitation is not None:
# c = torch.cat( (c, f0, excitation) ,1)
# elif f0 is not None:
# c = torch.cat( (c,f0), 1)
# elif excitation is not None:
# c = torch.cat( (c,excitation), 1)
residual_results = []
if self.use_avocodo:
outs = []
hidden = self.input_conv(excitation)
# TODO(yifeng): add global conv to hidden?
if g is not None:
c = c + self.global_conv(g)
for i in range(len(self.downsamples)):
cs = 0.0 # initialize
for j in range(self.num_blocks):
tc = self.downsamples_mrf[i * self.num_blocks + j](hidden)
cs += tc
hidden = cs / self.num_blocks
hidden = self.downsamples[i](hidden)
# print(f"hidden.shape{i}", hidden.shape)
residual_results.append(hidden)
# logging.warn(f'hidden:{hidden.shape}')
residual_results.reverse()
# logging.warn(f"residual_results:{ [r.shape for r in residual_results] }")
hidden_mel = self.hidden_conv(c)
for i in range(len(self.upsamples)):
# logging.warn(f'bef {i}-th upsampe:{hidden_mel.shape}')
# logging.warn(f'bef {i}-th upsampe:{residual_results[i].shape}')
# print("hidden_mel.shape1", hidden_mel.shape)
hidden_mel = torch.cat((hidden_mel, residual_results[i]), dim=1)
# logging.warn(f'aft {i}-th upsample :{hidden_mel.shape}')
# print("hidden_mel.shape2", hidden_mel.shape)
hidden_mel = self.upsamples[i](hidden_mel)
# print("hidden_mel.shape3", hidden_mel.shape)
# logging.warn(f'bef {i}-th MRF:{hidden_mel.shape}')
# logging.warn(f'self.upsamples_mrf:{self.upsamples_mrf}')
cs = 0.0 # initialize
for j in range(self.num_blocks):
tc = self.upsamples_mrf[i * self.num_blocks + j](hidden_mel)
# logging.info(f'{j}-th tc.shape:{tc.shape}')
cs += tc
hidden_mel = cs / self.num_blocks
# logging.warn(f'aft {i}-th MRF:{hidden_mel.shape}')
if self.use_avocodo:
if i >= (self.num_upsamples - 3):
_c = F.leaky_relu(hidden_mel)
_c = self.output_conv[i](_c)
_c = torch.tanh(_c)
outs.append(_c)
else:
hidden_mel = self.output_conv[i](hidden_mel)
# logging.warn(f'bef output conv mel : {hidden_mel.shape}')
if self.use_avocodo:
return outs
else:
return self.output_conv(hidden_mel)
def reset_parameters(self):
"""Reset parameters.
This initialization follows the official implementation manner.
https://github.com/jik876/hifi-gan/blob/master/models.py
"""
def _reset_parameters(m):
if isinstance(m, (torch.nn.Conv1d, torch.nn.ConvTranspose1d)):
m.weight.data.normal_(0.0, 0.01)
logging.debug(f"Reset parameters in {m}.")
self.apply(_reset_parameters)
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m):
try:
logging.debug(f"Weight norm is removed from {m}.")
torch.nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def register_stats(self, stats):
"""Register stats for de-normalization as buffer.
Args:
stats (str): Path of statistics file (".npy" or ".h5").
"""
assert stats.endswith(".h5") or stats.endswith(".npy")
if stats.endswith(".h5"):
mean = read_hdf5(stats, "mean").reshape(-1)
scale = read_hdf5(stats, "scale").reshape(-1)
else:
mean = np.load(stats)[0].reshape(-1)
scale = np.load(stats)[1].reshape(-1)
self.register_buffer("mean", torch.from_numpy(mean).float())
self.register_buffer("scale", torch.from_numpy(scale).float())
logging.info("Successfully registered stats as buffer.")
def inference(self, excitation=None, f0=None, c=None, normalize_before=False):
"""Perform inference.
Args:
c (Union[Tensor, ndarray]): Input tensor (T, in_channels).
normalize_before (bool): Whether to perform normalization.
Returns:
Tensor: Output tensor (T ** prod(upsample_scales), out_channels).
"""
# print(len(c))
# logging.info(f'len(c):{len(c)}')
# excitation, f0, c = c
if c is not None and not isinstance(c, torch.Tensor):
c = torch.tensor(c, dtype=torch.float).to(next(self.parameters()).device)
if excitation is not None and not isinstance(excitation, torch.Tensor):
excitation = torch.tensor(excitation, dtype=torch.float).to(
next(self.parameters()).device
)
if f0 is not None and not isinstance(f0, torch.Tensor):
f0 = torch.tensor(f0, dtype=torch.float).to(next(self.parameters()).device)
# logging.info(f'excitation.shape:{excitation.shape}')
# logging.info(f'f0.shape:{f0.shape}')
# logging.info(f'c.shape:{c.shape}')
# c = self.forward(None, None, c.transpose(1, 0).unsqueeze(0))
c = self.forward(
c.transpose(1, 0).unsqueeze(0),
f0.unsqueeze(1).transpose(1, 0).unsqueeze(0),
excitation.reshape(1, 1, -1),
)
return c.squeeze(0).transpose(1, 0)
| 19,291 | 37.738956 | 88 | py |
espnet | espnet-master/espnet2/gan_svs/joint/joint_score2wav.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Joint text-to-wav module for end-to-end training."""
from typing import Any, Dict, Optional
import torch
from typeguard import check_argument_types
from espnet2.gan_svs.abs_gan_svs import AbsGANSVS
from espnet2.gan_tts.hifigan import (
HiFiGANGenerator,
HiFiGANMultiPeriodDiscriminator,
HiFiGANMultiScaleDiscriminator,
HiFiGANMultiScaleMultiPeriodDiscriminator,
HiFiGANPeriodDiscriminator,
HiFiGANScaleDiscriminator,
)
from espnet2.gan_tts.hifigan.loss import (
DiscriminatorAdversarialLoss,
FeatureMatchLoss,
GeneratorAdversarialLoss,
MelSpectrogramLoss,
)
from espnet2.gan_tts.melgan import MelGANGenerator, MelGANMultiScaleDiscriminator
from espnet2.gan_tts.melgan.pqmf import PQMF
from espnet2.gan_tts.parallel_wavegan import (
ParallelWaveGANDiscriminator,
ParallelWaveGANGenerator,
)
from espnet2.gan_tts.style_melgan import StyleMelGANDiscriminator, StyleMelGANGenerator
from espnet2.gan_tts.utils import get_random_segments, get_segments
from espnet2.svs.naive_rnn.naive_rnn_dp import NaiveRNNDP
from espnet2.svs.xiaoice.XiaoiceSing import XiaoiceSing
from espnet2.torch_utils.device_funcs import force_gatherable
AVAILABLE_SCORE2MEL = {
"xiaoice": XiaoiceSing,
"naive_rnn_dp": NaiveRNNDP,
}
AVAILABLE_VOCODER = {
"hifigan_generator": HiFiGANGenerator,
"melgan_generator": MelGANGenerator,
"parallel_wavegan_generator": ParallelWaveGANGenerator,
"style_melgan_generator": StyleMelGANGenerator,
}
AVAILABLE_DISCRIMINATORS = {
"hifigan_period_discriminator": HiFiGANPeriodDiscriminator,
"hifigan_scale_discriminator": HiFiGANScaleDiscriminator,
"hifigan_multi_period_discriminator": HiFiGANMultiPeriodDiscriminator,
"hifigan_multi_scale_discriminator": HiFiGANMultiScaleDiscriminator,
"hifigan_multi_scale_multi_period_discriminator": HiFiGANMultiScaleMultiPeriodDiscriminator, # NOQA
"melgan_multi_scale_discriminator": MelGANMultiScaleDiscriminator,
"parallel_wavegan_discriminator": ParallelWaveGANDiscriminator,
"style_melgan_discriminator": StyleMelGANDiscriminator,
}
class JointScore2Wav(AbsGANSVS):
"""General class to jointly train score2mel and vocoder parts."""
def __init__(
self,
# generator (score2mel + vocoder) related
idim: int,
odim: int,
segment_size: int = 32,
sampling_rate: int = 22050,
score2mel_type: str = "xiaoice",
score2mel_params: Dict[str, Any] = {
"midi_dim": 129,
"tempo_dim": 500,
"embed_dim": 512,
"adim": 384,
"aheads": 4,
"elayers": 6,
"eunits": 1536,
"dlayers": 6,
"dunits": 1536,
"postnet_layers": 5,
"postnet_chans": 512,
"postnet_filts": 5,
"postnet_dropout_rate": 0.5,
"positionwise_layer_type": "conv1d",
"positionwise_conv_kernel_size": 1,
"use_scaled_pos_enc": True,
"use_batch_norm": True,
"encoder_normalize_before": True,
"decoder_normalize_before": True,
"encoder_concat_after": False,
"decoder_concat_after": False,
"duration_predictor_layers": 2,
"duration_predictor_chans": 384,
"duration_predictor_kernel_size": 3,
"duration_predictor_dropout_rate": 0.1,
"reduction_factor": 1,
"encoder_type": "transformer",
"decoder_type": "transformer",
"transformer_enc_dropout_rate": 0.1,
"transformer_enc_positional_dropout_rate": 0.1,
"transformer_enc_attn_dropout_rate": 0.1,
"transformer_dec_dropout_rate": 0.1,
"transformer_dec_positional_dropout_rate": 0.1,
"transformer_dec_attn_dropout_rate": 0.1,
# only for conformer
"conformer_rel_pos_type": "latest",
"conformer_pos_enc_layer_type": "rel_pos",
"conformer_self_attn_layer_type": "rel_selfattn",
"conformer_activation_type": "swish",
"use_macaron_style_in_conformer": True,
"use_cnn_in_conformer": True,
"zero_triu": False,
"conformer_enc_kernel_size": 7,
"conformer_dec_kernel_size": 31,
# extra embedding related
"spks": None,
"langs": None,
"spk_embed_dim": None,
"spk_embed_integration_type": "add",
# training related
"init_type": "xavier_uniform",
"init_enc_alpha": 1.0,
"init_dec_alpha": 1.0,
"use_masking": False,
"use_weighted_masking": False,
"loss_type": "L1",
},
vocoder_type: str = "hifigan_generator",
vocoder_params: Dict[str, Any] = {
"out_channels": 1,
"channels": 512,
"global_channels": -1,
"kernel_size": 7,
"upsample_scales": [8, 8, 2, 2],
"upsample_kernel_sizes": [16, 16, 4, 4],
"resblock_kernel_sizes": [3, 7, 11],
"resblock_dilations": [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
"use_additional_convs": True,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
},
use_pqmf: bool = False,
pqmf_params: Dict[str, Any] = {
"subbands": 4,
"taps": 62,
"cutoff_ratio": 0.142,
"beta": 9.0,
},
# discriminator related
discriminator_type: str = "hifigan_multi_scale_multi_period_discriminator",
discriminator_params: Dict[str, Any] = {
"scales": 1,
"scale_downsample_pooling": "AvgPool1d",
"scale_downsample_pooling_params": {
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
"scale_discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 128,
"max_downsample_channels": 1024,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 2, 4, 4, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
"follow_official_norm": False,
"periods": [2, 3, 5, 7, 11],
"period_discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 32,
"downsample_scales": [3, 3, 3, 3, 1],
"max_downsample_channels": 1024,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
},
# loss related
generator_adv_loss_params: Dict[str, Any] = {
"average_by_discriminators": False,
"loss_type": "mse",
},
discriminator_adv_loss_params: Dict[str, Any] = {
"average_by_discriminators": False,
"loss_type": "mse",
},
use_feat_match_loss: bool = True,
feat_match_loss_params: Dict[str, Any] = {
"average_by_discriminators": False,
"average_by_layers": False,
"include_final_outputs": True,
},
use_mel_loss: bool = True,
mel_loss_params: Dict[str, Any] = {
"fs": 22050,
"n_fft": 1024,
"hop_length": 256,
"win_length": None,
"window": "hann",
"n_mels": 80,
"fmin": 0,
"fmax": None,
"log_base": None,
},
lambda_score2mel: float = 1.0,
lambda_adv: float = 1.0,
lambda_feat_match: float = 2.0,
lambda_mel: float = 45.0,
cache_generator_outputs: bool = False,
):
"""Initialize JointScore2Wav module.
Args:
idim (int): Input vocabrary size.
odim (int): Acoustic feature dimension. The actual output channels will
be 1 since the model is the end-to-end text-to-wave model but for the
compatibility odim is used to indicate the acoustic feature dimension.
segment_size (int): Segment size for random windowed inputs.
sampling_rate (int): Sampling rate, not used for the training but it will
be referred in saving waveform during the inference.
text2mel_type (str): The text2mel model type.
text2mel_params (Dict[str, Any]): Parameter dict for text2mel model.
use_pqmf (bool): Whether to use PQMF for multi-band vocoder.
pqmf_params (Dict[str, Any]): Parameter dict for PQMF module.
vocoder_type (str): The vocoder model type.
vocoder_params (Dict[str, Any]): Parameter dict for vocoder model.
discriminator_type (str): Discriminator type.
discriminator_params (Dict[str, Any]): Parameter dict for discriminator.
generator_adv_loss_params (Dict[str, Any]): Parameter dict for generator
adversarial loss.
discriminator_adv_loss_params (Dict[str, Any]): Parameter dict for
discriminator adversarial loss.
use_feat_match_loss (bool): Whether to use feat match loss.
feat_match_loss_params (Dict[str, Any]): Parameter dict for feat match loss.
use_mel_loss (bool): Whether to use mel loss.
mel_loss_params (Dict[str, Any]): Parameter dict for mel loss.
lambda_text2mel (float): Loss scaling coefficient for text2mel model loss.
lambda_adv (float): Loss scaling coefficient for adversarial loss.
lambda_feat_match (float): Loss scaling coefficient for feat match loss.
lambda_mel (float): Loss scaling coefficient for mel loss.
cache_generator_outputs (bool): Whether to cache generator outputs.
"""
assert check_argument_types()
super().__init__()
self.segment_size = segment_size
self.use_pqmf = use_pqmf
# define modules
self.generator = torch.nn.ModuleDict()
score2mel_class = AVAILABLE_SCORE2MEL[score2mel_type]
score2mel_params.update(idim=idim, odim=odim)
self.generator["score2mel"] = score2mel_class(
**score2mel_params,
)
vocoder_class = AVAILABLE_VOCODER[vocoder_type]
if vocoder_type in ["hifigan_generator", "melgan_generator"]:
vocoder_params.update(in_channels=odim)
elif vocoder_type in ["parallel_wavegan_generator", "style_melgan_generator"]:
vocoder_params.update(aux_channels=odim)
self.generator["vocoder"] = vocoder_class(
**vocoder_params,
)
if self.use_pqmf:
self.pqmf = PQMF(**pqmf_params)
discriminator_class = AVAILABLE_DISCRIMINATORS[discriminator_type]
self.discriminator = discriminator_class(
**discriminator_params,
)
self.generator_adv_loss = GeneratorAdversarialLoss(
**generator_adv_loss_params,
)
self.discriminator_adv_loss = DiscriminatorAdversarialLoss(
**discriminator_adv_loss_params,
)
self.use_feat_match_loss = use_feat_match_loss
if self.use_feat_match_loss:
self.feat_match_loss = FeatureMatchLoss(
**feat_match_loss_params,
)
self.use_mel_loss = use_mel_loss
if self.use_mel_loss:
self.mel_loss = MelSpectrogramLoss(
**mel_loss_params,
)
# coefficients
self.lambda_score2mel = lambda_score2mel
self.lambda_adv = lambda_adv
if self.use_feat_match_loss:
self.lambda_feat_match = lambda_feat_match
if self.use_mel_loss:
self.lambda_mel = lambda_mel
# cache
self.cache_generator_outputs = cache_generator_outputs
self._cache = None
# store sampling rate for saving wav file
# (not used for the training)
self.fs = sampling_rate
# store parameters for test compatibility
self.spks = self.generator["score2mel"].spks
self.langs = self.generator["score2mel"].langs
self.spk_embed_dim = self.generator["score2mel"].spk_embed_dim
@property
def require_raw_singing(self):
"""Return whether or not singing is required."""
return True
@property
def require_vocoder(self):
"""Return whether or not vocoder is required."""
return False
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: torch.Tensor,
feats_lengths: torch.Tensor,
singing: torch.Tensor,
singing_lengths: torch.Tensor,
label: Optional[Dict[str, torch.Tensor]] = None,
label_lengths: Optional[Dict[str, torch.Tensor]] = None,
melody: Optional[Dict[str, torch.Tensor]] = None,
melody_lengths: Optional[Dict[str, torch.Tensor]] = None,
pitch: torch.LongTensor = None,
pitch_lengths: torch.Tensor = None,
duration: Optional[Dict[str, torch.Tensor]] = None,
duration_lengths: Optional[Dict[str, torch.Tensor]] = None,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
forward_generator: bool = True,
) -> Dict[str, Any]:
"""Perform generator forward.
Args:
text (LongTensor): Batch of padded character ids (B, Tmax).
text_lengths (LongTensor): Batch of lengths of each input batch (B,).
feats (Tensor): Batch of padded target features (B, Lmax, odim).
feats_lengths (LongTensor): Batch of the lengths of each target (B,).
singing (Tensor): Singing waveform tensor (B, T_wav).
singing_lengths (Tensor): Singing length tensor (B,).
label (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of padded label ids (B, Tmax).
label_lengths (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of the lengths of padded label ids (B, ).
melody (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of padded melody (B, Tmax).
melody_lengths (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of the lengths of padded melody (B, ).
pitch (FloatTensor): Batch of padded f0 (B, Tmax).
pitch_lengths (LongTensor): Batch of the lengths of padded f0 (B, ).
duration (Optional[Dict]): key is "lab", "score_phn" or "score_syb";
value (LongTensor): Batch of padded duration (B, Tmax).
duration_length (Optional[Dict]): key is "lab", "score_phn" or "score_syb";
value (LongTensor): Batch of the lengths of padded duration (B, ).
slur (FloatTensor): Batch of padded slur (B, Tmax).
slur_lengths (LongTensor): Batch of the lengths of padded slur (B, ).
spembs (Optional[Tensor]): Batch of speaker embeddings (B, spk_embed_dim).
sids (Optional[Tensor]): Batch of speaker IDs (B, 1).
lids (Optional[Tensor]): Batch of language IDs (B, 1).
forward_generator (bool): Whether to forward generator.
Returns:
Dict[str, Any]:
- loss (Tensor): Loss scalar tensor.
- stats (Dict[str, float]): Statistics to be monitored.
- weight (Tensor): Weight tensor to summarize losses.
- optim_idx (int): Optimizer index (0 for G and 1 for D).
"""
beat = duration["lab"]
beat_lengths = duration_lengths["lab"]
duration = duration["lab"]
label = label["score"]
label_lengths = label_lengths["score"]
melody = melody["score"]
melody_lengths = melody_lengths["score"]
if forward_generator:
return self._forward_generator(
text=text,
text_lengths=text_lengths,
feats=feats,
feats_lengths=feats_lengths,
singing=singing,
singing_lengths=singing_lengths,
duration=duration,
label=label,
label_lengths=label_lengths,
melody=melody,
melody_lengths=melody_lengths,
beat=beat,
beat_lengths=beat_lengths,
pitch=pitch,
pitch_lengths=pitch_lengths,
sids=sids,
spembs=spembs,
lids=lids,
)
else:
return self._forward_discrminator(
text=text,
text_lengths=text_lengths,
feats=feats,
feats_lengths=feats_lengths,
singing=singing,
singing_lengths=singing_lengths,
duration=duration,
label=label,
label_lengths=label_lengths,
melody=melody,
melody_lengths=melody_lengths,
beat=beat,
beat_lengths=beat_lengths,
pitch=pitch,
pitch_lengths=pitch_lengths,
sids=sids,
spembs=spembs,
lids=lids,
)
def _forward_generator(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: torch.Tensor,
feats_lengths: torch.Tensor,
singing: torch.Tensor,
singing_lengths: torch.Tensor,
duration: torch.Tensor,
label: Optional[Dict[str, torch.Tensor]] = None,
label_lengths: Optional[Dict[str, torch.Tensor]] = None,
melody: Optional[Dict[str, torch.Tensor]] = None,
melody_lengths: Optional[Dict[str, torch.Tensor]] = None,
beat: Optional[Dict[str, torch.Tensor]] = None,
beat_lengths: Optional[Dict[str, torch.Tensor]] = None,
pitch: Optional[torch.Tensor] = None,
pitch_lengths: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
) -> Dict[str, Any]:
"""Perform generator forward.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
feats (Tensor): Feature tensor (B, T_feats, aux_channels).
feats_lengths (Tensor): Feature length tensor (B,).
singing (Tensor): Singing waveform tensor (B, T_wav).
singing_lengths (Tensor): Singing length tensor (B,).
duration (Optional[Dict]): key is "phn", "syb";
value (LongTensor): Batch of padded beat (B, Tmax).
label (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of padded label ids (B, Tmax).
label_lengths (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of the lengths of padded label ids (B, ).
melody (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of padded melody (B, Tmax).
melody_lengths (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of the lengths of padded melody (B, ).
tempo (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of padded tempo (B, Tmax).
tempo_lengths (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of the lengths of padded tempo (B, ).
beat (Optional[Dict]): key is "lab", "score_phn" or "score_syb";
value (LongTensor): Batch of padded beat (B, Tmax).
beat_length (Optional[Dict]): key is "lab", "score_phn" or "score_syb";
value (LongTensor): Batch of the lengths of padded beat (B, ).
pitch (FloatTensor): Batch of padded f0 (B, Tmax).
pitch_lengths (LongTensor): Batch of the lengths of padded f0 (B, ).
sids (Optional[Tensor]): Speaker index tensor (B,) or (B, 1).
spembs (Optional[Tensor]): Speaker embedding tensor (B, spk_embed_dim).
lids (Optional[Tensor]): Language index tensor (B,) or (B, 1).
Returns:
Dict[str, Any]:
* loss (Tensor): Loss scalar tensor.
* stats (Dict[str, float]): Statistics to be monitored.
* weight (Tensor): Weight tensor to summarize losses.
* optim_idx (int): Optimizer index (0 for G and 1 for D).
"""
# setup
batch_size = text.size(0)
singing = singing.unsqueeze(1)
# calculate generator outputs
reuse_cache = True
if not self.cache_generator_outputs or self._cache is None:
reuse_cache = False
# calculate text2mel outputs
score2mel_loss, stats, feats_gen = self.generator["score2mel"](
text=text,
text_lengths=text_lengths,
feats=feats,
feats_lengths=feats_lengths,
label=label,
label_lengths=label_lengths,
melody=melody,
melody_lengths=melody_lengths,
duration=beat,
duration_lengths=beat_lengths,
pitch=pitch,
pitch_lengths=pitch_lengths,
sids=sids,
spembs=spembs,
lids=lids,
joint_training=True,
)
# get random segments
feats_gen_, start_idxs = get_random_segments(
x=feats_gen.transpose(1, 2),
x_lengths=feats_lengths,
segment_size=self.segment_size,
)
# calculate vocoder outputs
singing_hat_ = self.generator["vocoder"](feats_gen_)
if self.use_pqmf:
singing_hat_ = self.pqmf.synthesis(singing_hat_)
else:
score2mel_loss, stats, singing_hat_, start_idxs = self._cache
# store cache
if self.training and self.cache_generator_outputs and not reuse_cache:
self._cache = (score2mel_loss, stats, singing_hat_, start_idxs)
singing_ = get_segments(
x=singing,
start_idxs=start_idxs * self.generator["vocoder"].upsample_factor,
segment_size=self.segment_size * self.generator["vocoder"].upsample_factor,
)
# calculate discriminator outputs
p_hat = self.discriminator(singing_hat_)
with torch.no_grad():
# do not store discriminator gradient in generator turn
p = self.discriminator(singing_)
# calculate losses
adv_loss = self.generator_adv_loss(p_hat)
adv_loss = adv_loss * self.lambda_adv
score2mel_loss = score2mel_loss * self.lambda_score2mel
loss = adv_loss + score2mel_loss
if self.use_feat_match_loss:
feat_match_loss = self.feat_match_loss(p_hat, p)
feat_match_loss = feat_match_loss * self.lambda_feat_match
loss = loss + feat_match_loss
stats.update(feat_match_loss=feat_match_loss.item())
if self.use_mel_loss:
mel_loss = self.mel_loss(singing_hat_, singing_)
mel_loss = self.lambda_mel * mel_loss
loss = loss + mel_loss
stats.update(mel_loss=mel_loss.item())
stats.update(
adv_loss=adv_loss.item(),
score2mel_loss=score2mel_loss.item(),
loss=loss.item(),
)
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
# reset cache
if reuse_cache or not self.training:
self._cache = None
return {
"loss": loss,
"stats": stats,
"weight": weight,
"optim_idx": 0, # needed for trainer
}
def _forward_discrminator(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: torch.Tensor,
feats_lengths: torch.Tensor,
singing: torch.Tensor,
singing_lengths: torch.Tensor,
duration: torch.Tensor,
label: Optional[Dict[str, torch.Tensor]] = None,
label_lengths: Optional[Dict[str, torch.Tensor]] = None,
melody: Optional[Dict[str, torch.Tensor]] = None,
melody_lengths: Optional[Dict[str, torch.Tensor]] = None,
beat: Optional[Dict[str, torch.Tensor]] = None,
beat_lengths: Optional[Dict[str, torch.Tensor]] = None,
pitch: Optional[torch.Tensor] = None,
pitch_lengths: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
) -> Dict[str, Any]:
"""Perform discriminator forward.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
feats (Tensor): Feature tensor (B, T_feats, aux_channels).
feats_lengths (Tensor): Feature length tensor (B,).
singing (Tensor): Singing waveform tensor (B, T_wav).
singing_lengths (Tensor): Singing length tensor (B,).
duration (Optional[Dict]): key is "phn", "syb";
value (LongTensor): Batch of padded beat (B, Tmax).
label (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of padded label ids (B, Tmax).
label_lengths (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of the lengths of padded label ids (B, ).
melody (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of padded melody (B, Tmax).
melody_lengths (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of the lengths of padded melody (B, ).
tempo (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of padded tempo (B, Tmax).
tempo_lengths (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of the lengths of padded tempo (B, ).
beat (Optional[Dict]): key is "lab", "score_phn" or "score_syb";
value (LongTensor): Batch of padded beat (B, Tmax).
beat_length (Optional[Dict]): key is "lab", "score_phn" or "score_syb";
value (LongTensor): Batch of the lengths of padded beat (B, ).
pitch (FloatTensor): Batch of padded f0 (B, Tmax).
pitch_lengths (LongTensor): Batch of the lengths of padded f0 (B, ).
sids (Optional[Tensor]): Speaker index tensor (B,) or (B, 1).
spembs (Optional[Tensor]): Speaker embedding tensor (B, spk_embed_dim).
lids (Optional[Tensor]): Language index tensor (B,) or (B, 1).
Returns:
Dict[str, Any]:
* loss (Tensor): Loss scalar tensor.
* stats (Dict[str, float]): Statistics to be monitored.
* weight (Tensor): Weight tensor to summarize losses.
* optim_idx (int): Optimizer index (0 for G and 1 for D).
"""
# setup
batch_size = text.size(0)
singing = singing.unsqueeze(1)
# calculate generator outputs
reuse_cache = True
if not self.cache_generator_outputs or self._cache is None:
reuse_cache = False
# calculate score2mel outputs
score2mel_loss, stats, feats_gen = self.generator["score2mel"](
text=text,
text_lengths=text_lengths,
feats=feats,
feats_lengths=feats_lengths,
label=label,
label_lengths=label_lengths,
melody=melody,
melody_lengths=melody_lengths,
duration=beat,
duration_lengths=beat_lengths,
pitch=pitch,
pitch_lengths=pitch_lengths,
sids=sids,
spembs=spembs,
lids=lids,
joint_training=True,
)
# get random segments
feats_gen_, start_idxs = get_random_segments(
x=feats_gen.transpose(1, 2),
x_lengths=feats_lengths,
segment_size=self.segment_size,
)
# calculate vocoder outputs
singing_hat_ = self.generator["vocoder"](feats_gen_)
if self.use_pqmf:
singing_hat_ = self.pqmf.synthesis(singing_hat_)
else:
_, _, singing_hat_, start_idxs = self._cache
# store cache
if self.cache_generator_outputs and not reuse_cache:
self._cache = (score2mel_loss, stats, singing_hat_, start_idxs)
# parse outputs
singing_ = get_segments(
x=singing,
start_idxs=start_idxs * self.generator["vocoder"].upsample_factor,
segment_size=self.segment_size * self.generator["vocoder"].upsample_factor,
)
# calculate discriminator outputs
p_hat = self.discriminator(singing_hat_.detach())
p = self.discriminator(singing_)
# calculate losses
real_loss, fake_loss = self.discriminator_adv_loss(p_hat, p)
loss = real_loss + fake_loss
stats = dict(
discriminator_loss=loss.item(),
real_loss=real_loss.item(),
fake_loss=fake_loss.item(),
)
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
# reset cache
if reuse_cache or not self.training:
self._cache = None
return {
"loss": loss,
"stats": stats,
"weight": weight,
"optim_idx": 1, # needed for trainer
}
def inference(
self,
text: torch.Tensor,
feats: Optional[torch.Tensor] = None,
label: Optional[Dict[str, torch.Tensor]] = None,
melody: Optional[Dict[str, torch.Tensor]] = None,
pitch: Optional[torch.Tensor] = None,
duration: Optional[Dict[str, torch.Tensor]] = None,
slur: Optional[Dict[str, torch.Tensor]] = None,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
noise_scale: float = 0.667,
noise_scale_dur: float = 0.8,
alpha: float = 1.0,
max_len: Optional[int] = None,
use_teacher_forcing: bool = False,
) -> Dict[str, torch.Tensor]:
"""Run inference.
Args:
text (Tensor): Input text index tensor (T_text,).
feats (Tensor): Feature tensor (T_feats, aux_channels).
label (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of padded label ids (B, Tmax).
melody (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of padded melody (B, Tmax).
tempo (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of padded tempo (B, Tmax).
beat (Optional[Dict]): key is "lab", "score_phn" or "score_syb";
value (LongTensor): Batch of padded beat (B, Tmax).
pitch (FloatTensor): Batch of padded f0 (B, Tmax).
duration (Optional[Dict]): key is "phn", "syb";
value (LongTensor): Batch of padded beat (B, Tmax).
slur (LongTensor): Batch of padded slur (B, Tmax).
sids (Tensor): Speaker index tensor (1,).
spembs (Optional[Tensor]): Speaker embedding tensor (spk_embed_dim,).
lids (Tensor): Language index tensor (1,).
noise_scale (float): Noise scale value for flow.
noise_scale_dur (float): Noise scale value for duration predictor.
alpha (float): Alpha parameter to control the speed of generated singing.
max_len (Optional[int]): Maximum length.
use_teacher_forcing (bool): Whether to use teacher forcing.
Returns:
Dict[str, Tensor]:
* wav (Tensor): Generated waveform tensor (T_wav,).
* feat_gan (Tensor): Generated feature tensor (T_text, C).
"""
output_dict = self.generator["score2mel"].inference(
text=text,
feats=feats,
label=label,
melody=melody,
duration=duration,
pitch=pitch,
sids=sids,
spembs=spembs,
lids=lids,
joint_training=True,
)
wav = self.generator["vocoder"].inference(output_dict["feat_gen"])
if self.use_pqmf:
wav = self.pqmf.synthesis(wav.unsqueeze(0).transpose(1, 2))
wav = wav.squeeze(0).transpose(0, 1)
output_dict.update(wav=wav)
return output_dict
| 33,872 | 41.183064 | 104 | py |
espnet | espnet-master/espnet2/gan_svs/avocodo/avocodo.py | # Copyright 2023 Yifeng Yu
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Avocodo Modules.
This code is modified from https://github.com/ncsoft/avocodo.
"""
import logging
from typing import Any, Dict, List, Optional
import torch
import torch.nn.functional as F
from torch.nn import Conv1d
from torch.nn.utils import spectral_norm, weight_norm
from espnet2.gan_svs.visinger2.visinger2_vocoder import MultiFrequencyDiscriminator
from espnet2.gan_tts.hifigan.residual_block import ResidualBlock
from espnet2.gan_tts.melgan.pqmf import PQMF
def get_padding(kernel_size, dilation=1):
return int((kernel_size * dilation - dilation) / 2)
class AvocodoGenerator(torch.nn.Module):
"""Avocodo generator module."""
def __init__(
self,
in_channels: int = 80,
out_channels: int = 1,
channels: int = 512,
global_channels: int = -1,
kernel_size: int = 7,
upsample_scales: List[int] = [8, 8, 2, 2],
upsample_kernel_sizes: List[int] = [16, 16, 4, 4],
resblock_kernel_sizes: List[int] = [3, 7, 11],
resblock_dilations: List[List[int]] = [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
projection_filters: List[int] = [0, 1, 1, 1],
projection_kernels: List[int] = [0, 5, 7, 11],
use_additional_convs: bool = True,
bias: bool = True,
nonlinear_activation: str = "LeakyReLU",
nonlinear_activation_params: Dict[str, Any] = {"negative_slope": 0.2},
use_weight_norm: bool = True,
):
"""Initialize AvocodoGenerator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
channels (int): Number of hidden representation channels.
global_channels (int): Number of global conditioning channels.
kernel_size (int): Kernel size of initial and final conv layer.
upsample_scales (List[int]): List of upsampling scales.
upsample_kernel_sizes (List[int]): List of kernel sizes for upsample layers.
resblock_kernel_sizes (List[int]): List of kernel sizes for residual blocks.
resblock_dilations (List[List[int]]): List of list of dilations for residual
blocks.
use_additional_convs (bool): Whether to use additional conv layers in
residual blocks.
bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (Dict[str, Any]): Hyperparameters for activation
function.
use_weight_norm (bool): Whether to use weight norm. If set to true, it will
be applied to all of the conv layers.
"""
super().__init__()
# check hyperparameters are valid
assert kernel_size % 2 == 1, "Kernel size must be odd number."
assert len(upsample_scales) == len(upsample_kernel_sizes)
assert len(resblock_dilations) == len(resblock_kernel_sizes)
# define modules
self.num_upsamples = len(upsample_kernel_sizes)
self.num_blocks = len(resblock_kernel_sizes)
self.input_conv = torch.nn.Conv1d(
in_channels,
channels,
kernel_size,
1,
padding=(kernel_size - 1) // 2,
)
self.upsamples = torch.nn.ModuleList()
self.blocks = torch.nn.ModuleList()
self.output_conv = torch.nn.ModuleList()
for i in range(len(upsample_kernel_sizes)):
assert upsample_kernel_sizes[i] == 2 * upsample_scales[i]
self.upsamples += [
torch.nn.Sequential(
getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
),
torch.nn.ConvTranspose1d(
channels // (2**i),
channels // (2 ** (i + 1)),
upsample_kernel_sizes[i],
upsample_scales[i],
padding=upsample_scales[i] // 2 + upsample_scales[i] % 2,
output_padding=upsample_scales[i] % 2,
),
)
]
for j in range(len(resblock_kernel_sizes)):
self.blocks += [
ResidualBlock(
kernel_size=resblock_kernel_sizes[j],
channels=channels // (2 ** (i + 1)),
dilations=resblock_dilations[j],
bias=bias,
use_additional_convs=use_additional_convs,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
)
]
if projection_filters[i] != 0:
self.output_conv.append(
torch.nn.Conv1d(
channels // (2 ** (i + 1)),
projection_filters[i],
projection_kernels[i],
1,
padding=projection_kernels[i] // 2,
)
)
else:
self.output_conv.append(torch.nn.Identity())
if global_channels > 0:
self.global_conv = torch.nn.Conv1d(global_channels, channels, 1)
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
# reset parameters
self.reset_parameters()
def forward(
self, c: torch.Tensor, g: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, in_channels, T).
g (Optional[Tensor]): Global conditioning tensor (B, global_channels, 1).
Returns:
List[Tensor]: List of output tensors (B, out_channels, T).
"""
outs = []
c = self.input_conv(c)
if g is not None:
c = c + self.global_conv(g)
for i in range(self.num_upsamples):
c = self.upsamples[i](c)
cs = 0.0 # initialize
for j in range(self.num_blocks):
cs += self.blocks[i * self.num_blocks + j](c)
c = cs / self.num_blocks
if i >= (self.num_upsamples - 3):
_c = F.leaky_relu(c)
_c = self.output_conv[i](_c)
_c = torch.tanh(_c)
outs.append(_c)
else:
c = self.output_conv[i](c)
return outs
def reset_parameters(self):
"""Reset parameters.
This initialization follows the official implementation manner.
https://github.com/jik876/hifi-gan/blob/master/models.py
"""
def _reset_parameters(m: torch.nn.Module):
if isinstance(m, (torch.nn.Conv1d, torch.nn.ConvTranspose1d)):
m.weight.data.normal_(0.0, 0.01)
logging.debug(f"Reset parameters in {m}.")
self.apply(_reset_parameters)
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m: torch.nn.Module):
try:
logging.debug(f"Weight norm is removed from {m}.")
torch.nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m: torch.nn.Module):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
# CoMBD
class CoMBDBlock(torch.nn.Module):
"""CoMBD (Collaborative Multi-band Discriminator) block module"""
def __init__(
self,
h_u: List[int],
d_k: List[int],
d_s: List[int],
d_d: List[int],
d_g: List[int],
d_p: List[int],
op_f: int,
op_k: int,
op_g: int,
use_spectral_norm=False,
):
super(CoMBDBlock, self).__init__()
norm_f = weight_norm if use_spectral_norm is False else spectral_norm
self.convs = torch.nn.ModuleList()
filters = [[1, h_u[0]]]
for i in range(len(h_u) - 1):
filters.append([h_u[i], h_u[i + 1]])
for _f, _k, _s, _d, _g, _p in zip(filters, d_k, d_s, d_d, d_g, d_p):
self.convs.append(
norm_f(
Conv1d(
in_channels=_f[0],
out_channels=_f[1],
kernel_size=_k,
stride=_s,
dilation=_d,
groups=_g,
padding=_p,
)
)
)
self.projection_conv = norm_f(
Conv1d(
in_channels=filters[-1][1],
out_channels=op_f,
kernel_size=op_k,
groups=op_g,
)
)
def forward(self, x):
"""
Forward pass through the CoMBD block.
Args:
x (Tensor): Input tensor of shape (B, C_in, T_in).
Returns:
Tuple[Tensor, List[Tensor]]: Tuple containing the output tensor of
shape (B, C_out, T_out)
and a list of feature maps of shape (B, C, T) at each Conv1d layer.
"""
fmap = []
for block in self.convs:
x = block(x)
x = F.leaky_relu(x, 0.2)
fmap.append(x)
x = self.projection_conv(x)
return x, fmap
class CoMBD(torch.nn.Module):
"""CoMBD (Collaborative Multi-band Discriminator) module
from from https://arxiv.org/abs/2206.13404"""
def __init__(self, h, pqmf_list=None, use_spectral_norm=False):
super(CoMBD, self).__init__()
self.h = h
if pqmf_list is not None:
self.pqmf = pqmf_list
else:
self.pqmf = [PQMF(*h.pqmf_config["lv2"]), PQMF(*h.pqmf_config["lv1"])]
self.blocks = torch.nn.ModuleList()
for _h_u, _d_k, _d_s, _d_d, _d_g, _d_p, _op_f, _op_k, _op_g in zip(
h["combd_h_u"],
h["combd_d_k"],
h["combd_d_s"],
h["combd_d_d"],
h["combd_d_g"],
h["combd_d_p"],
h["combd_op_f"],
h["combd_op_k"],
h["combd_op_g"],
):
self.blocks.append(
CoMBDBlock(
_h_u,
_d_k,
_d_s,
_d_d,
_d_g,
_d_p,
_op_f,
_op_k,
_op_g,
)
)
def _block_forward(self, input, blocks, outs, f_maps):
for x, block in zip(input, blocks):
out, f_map = block(x)
outs.append(out)
f_maps.append(f_map)
return outs, f_maps
def _pqmf_forward(self, ys, ys_hat):
# preprocess for multi_scale forward
multi_scale_inputs = []
multi_scale_inputs_hat = []
for pqmf in self.pqmf:
multi_scale_inputs.append(pqmf.to(ys[-1]).analysis(ys[-1])[:, :1, :])
multi_scale_inputs_hat.append(
pqmf.to(ys[-1]).analysis(ys_hat[-1])[:, :1, :]
)
outs_real = []
f_maps_real = []
# real
# for hierarchical forward
outs_real, f_maps_real = self._block_forward(
ys, self.blocks, outs_real, f_maps_real
)
# for multi_scale forward
outs_real, f_maps_real = self._block_forward(
multi_scale_inputs, self.blocks[:-1], outs_real, f_maps_real
)
outs_fake = []
f_maps_fake = []
# predicted
# for hierarchical forward
outs_fake, f_maps_fake = self._block_forward(
ys_hat, self.blocks, outs_fake, f_maps_fake
)
# for multi_scale forward
outs_fake, f_maps_fake = self._block_forward(
multi_scale_inputs_hat, self.blocks[:-1], outs_fake, f_maps_fake
)
return outs_real, outs_fake, f_maps_real, f_maps_fake
def forward(self, ys, ys_hat):
"""
Args:
ys (List[Tensor]): List of ground truth signals of shape (B, 1, T).
ys_hat (List[Tensor]): List of predicted signals of shape (B, 1, T).
Returns:
Tuple[List[Tensor], List[Tensor], List[List[Tensor]], List[List[Tensor]]]:
Tuple containing the list of output tensors of shape (B, C_out, T_out)
for real and fake, respectively, and the list of feature maps of shape
(B, C, T) at each Conv1d layer for real and fake, respectively.
"""
outs_real, outs_fake, f_maps_real, f_maps_fake = self._pqmf_forward(ys, ys_hat)
return outs_real, outs_fake, f_maps_real, f_maps_fake
# SBD
class MDC(torch.nn.Module):
"""Multiscale Dilated Convolution from https://arxiv.org/pdf/1609.07093.pdf"""
def __init__(
self,
in_channels,
out_channels,
strides,
kernel_size,
dilations,
use_spectral_norm=False,
):
super(MDC, self).__init__()
norm_f = weight_norm if not use_spectral_norm else spectral_norm
self.d_convs = torch.nn.ModuleList()
for _k, _d in zip(kernel_size, dilations):
self.d_convs.append(
norm_f(
Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=_k,
dilation=_d,
padding=get_padding(_k, _d),
)
)
)
self.post_conv = norm_f(
Conv1d(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=3,
stride=strides,
padding=get_padding(_k, _d),
)
)
self.softmax = torch.nn.Softmax(dim=-1)
def forward(self, x):
_out = None
for _l in self.d_convs:
_x = torch.unsqueeze(_l(x), -1)
_x = F.leaky_relu(_x, 0.2)
if _out is None:
_out = _x
else:
_out = torch.cat([_out, _x], axis=-1)
x = torch.sum(_out, dim=-1)
x = self.post_conv(x)
x = F.leaky_relu(x, 0.2) # @@
return x
class SBDBlock(torch.nn.Module):
"""SBD (Sub-band Discriminator) Block"""
def __init__(
self,
segment_dim,
strides,
filters,
kernel_size,
dilations,
use_spectral_norm=False,
):
super(SBDBlock, self).__init__()
norm_f = weight_norm if not use_spectral_norm else spectral_norm
self.convs = torch.nn.ModuleList()
filters_in_out = [(segment_dim, filters[0])]
for i in range(len(filters) - 1):
filters_in_out.append([filters[i], filters[i + 1]])
for _s, _f, _k, _d in zip(strides, filters_in_out, kernel_size, dilations):
self.convs.append(
MDC(
in_channels=_f[0],
out_channels=_f[1],
strides=_s,
kernel_size=_k,
dilations=_d,
use_spectral_norm=use_spectral_norm,
)
)
self.post_conv = norm_f(
Conv1d(
in_channels=_f[1],
out_channels=1,
kernel_size=3,
stride=1,
padding=3 // 2,
)
) # @@
def forward(self, x):
fmap = []
for _l in self.convs:
x = _l(x)
fmap.append(x)
x = self.post_conv(x) # @@
return x, fmap
class MDCDConfig:
def __init__(self, h):
self.pqmf_params = h["pqmf_config"]["sbd"]
self.f_pqmf_params = h["pqmf_config"]["fsbd"]
self.filters = h["sbd_filters"]
self.kernel_sizes = h["sbd_kernel_sizes"]
self.dilations = h["sbd_dilations"]
self.strides = h["sbd_strides"]
self.band_ranges = h["sbd_band_ranges"]
self.transpose = h["sbd_transpose"]
self.segment_size = h["segment_size"]
class SBD(torch.nn.Module):
"""SBD (Sub-band Discriminator) from https://arxiv.org/pdf/2206.13404.pdf"""
def __init__(self, h, use_spectral_norm=False):
super(SBD, self).__init__()
self.config = MDCDConfig(h)
self.pqmf = PQMF(*self.config.pqmf_params)
if True in h["sbd_transpose"]:
self.f_pqmf = PQMF(*self.config.f_pqmf_params)
else:
self.f_pqmf = None
self.discriminators = torch.nn.ModuleList()
for _f, _k, _d, _s, _br, _tr in zip(
self.config.filters,
self.config.kernel_sizes,
self.config.dilations,
self.config.strides,
self.config.band_ranges,
self.config.transpose,
):
if _tr:
segment_dim = self.config.segment_size // _br[1] - _br[0]
else:
segment_dim = _br[1] - _br[0]
self.discriminators.append(
SBDBlock(
segment_dim=segment_dim,
filters=_f,
kernel_size=_k,
dilations=_d,
strides=_s,
use_spectral_norm=use_spectral_norm,
)
)
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
y_in = self.pqmf.analysis(y)
y_hat_in = self.pqmf.analysis(y_hat)
if self.f_pqmf is not None:
y_in_f = self.f_pqmf.analysis(y)
y_hat_in_f = self.f_pqmf.analysis(y_hat)
for d, br, tr in zip(
self.discriminators, self.config.band_ranges, self.config.transpose
):
if tr:
_y_in = y_in_f[:, br[0] : br[1], :]
_y_hat_in = y_hat_in_f[:, br[0] : br[1], :]
_y_in = torch.transpose(_y_in, 1, 2)
_y_hat_in = torch.transpose(_y_hat_in, 1, 2)
else:
_y_in = y_in[:, br[0] : br[1], :]
_y_hat_in = y_hat_in[:, br[0] : br[1], :]
y_d_r, fmap_r = d(_y_in)
y_d_g, fmap_g = d(_y_hat_in)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
class AvocodoDiscriminator(torch.nn.Module):
"""Avocodo Discriminator module"""
def __init__(
self,
combd: Dict[str, Any] = {
"combd_h_u": [
[16, 64, 256, 1024, 1024, 1024],
[16, 64, 256, 1024, 1024, 1024],
[16, 64, 256, 1024, 1024, 1024],
],
"combd_d_k": [
[7, 11, 11, 11, 11, 5],
[11, 21, 21, 21, 21, 5],
[15, 41, 41, 41, 41, 5],
],
"combd_d_s": [
[1, 1, 4, 4, 4, 1],
[1, 1, 4, 4, 4, 1],
[1, 1, 4, 4, 4, 1],
],
"combd_d_d": [
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
],
"combd_d_g": [
[1, 4, 16, 64, 256, 1],
[1, 4, 16, 64, 256, 1],
[1, 4, 16, 64, 256, 1],
],
"combd_d_p": [
[3, 5, 5, 5, 5, 2],
[5, 10, 10, 10, 10, 2],
[7, 20, 20, 20, 20, 2],
],
"combd_op_f": [1, 1, 1],
"combd_op_k": [3, 3, 3],
"combd_op_g": [1, 1, 1],
},
sbd: Dict[str, Any] = {
"use_sbd": True,
"sbd_filters": [
[64, 128, 256, 256, 256],
[64, 128, 256, 256, 256],
[64, 128, 256, 256, 256],
[32, 64, 128, 128, 128],
],
"sbd_strides": [
[1, 1, 3, 3, 1],
[1, 1, 3, 3, 1],
[1, 1, 3, 3, 1],
[1, 1, 3, 3, 1],
],
"sbd_kernel_sizes": [
[[7, 7, 7], [7, 7, 7], [7, 7, 7], [7, 7, 7], [7, 7, 7]],
[[5, 5, 5], [5, 5, 5], [5, 5, 5], [5, 5, 5], [5, 5, 5]],
[[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]],
[[5, 5, 5], [5, 5, 5], [5, 5, 5], [5, 5, 5], [5, 5, 5]],
],
"sbd_dilations": [
[[5, 7, 11], [5, 7, 11], [5, 7, 11], [5, 7, 11], [5, 7, 11]],
[[3, 5, 7], [3, 5, 7], [3, 5, 7], [3, 5, 7], [3, 5, 7]],
[[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]],
[[1, 2, 3], [1, 2, 3], [1, 2, 3], [2, 3, 5], [2, 3, 5]],
],
"sbd_band_ranges": [[0, 6], [0, 11], [0, 16], [0, 64]],
"sbd_transpose": [False, False, False, True],
"pqmf_config": {
"sbd": [16, 256, 0.03, 10.0],
"fsbd": [64, 256, 0.1, 9.0],
},
"segment_size": 8192,
},
pqmf_config: Dict[str, Any] = {
"lv1": [2, 256, 0.25, 10.0],
"lv2": [4, 192, 0.13, 10.0],
},
projection_filters: List[int] = [0, 1, 1, 1],
):
super(AvocodoDiscriminator, self).__init__()
self.pqmf_lv2 = PQMF(*pqmf_config["lv2"])
self.pqmf_lv1 = PQMF(*pqmf_config["lv1"])
self.combd = CoMBD(
combd,
[self.pqmf_lv2, self.pqmf_lv1],
use_spectral_norm=combd["use_spectral_norm"],
)
self.sbd = SBD(
sbd,
use_spectral_norm=sbd["use_spectral_norm"],
)
self.projection_filters = projection_filters
def forward(
self, y: torch.Tensor, y_hats: torch.Tensor
) -> List[List[torch.Tensor]]:
ys = [
self.pqmf_lv2.analysis(y)[:, : self.projection_filters[1]],
self.pqmf_lv1.analysis(y)[:, : self.projection_filters[2]],
y,
]
(
combd_outs_real,
combd_outs_fake,
combd_fmaps_real,
combd_fmaps_fake,
) = self.combd(ys, y_hats)
sbd_outs_real, sbd_outs_fake, sbd_fmaps_real, sbd_fmaps_fake = self.sbd(
y, y_hats[-1]
)
# Combine the outputs of both discriminators
outs_real = combd_outs_real + sbd_outs_real
outs_fake = combd_outs_fake + sbd_outs_fake
fmaps_real = combd_fmaps_real + sbd_fmaps_real
fmaps_fake = combd_fmaps_fake + sbd_fmaps_fake
return outs_real, outs_fake, fmaps_real, fmaps_fake
class AvocodoDiscriminatorPlus(torch.nn.Module):
"""Avocodo discriminator with additional MFD."""
def __init__(
self,
combd: Dict[str, Any] = {
"combd_h_u": [
[16, 64, 256, 1024, 1024, 1024],
[16, 64, 256, 1024, 1024, 1024],
[16, 64, 256, 1024, 1024, 1024],
],
"combd_d_k": [
[7, 11, 11, 11, 11, 5],
[11, 21, 21, 21, 21, 5],
[15, 41, 41, 41, 41, 5],
],
"combd_d_s": [
[1, 1, 4, 4, 4, 1],
[1, 1, 4, 4, 4, 1],
[1, 1, 4, 4, 4, 1],
],
"combd_d_d": [
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
],
"combd_d_g": [
[1, 4, 16, 64, 256, 1],
[1, 4, 16, 64, 256, 1],
[1, 4, 16, 64, 256, 1],
],
"combd_d_p": [
[3, 5, 5, 5, 5, 2],
[5, 10, 10, 10, 10, 2],
[7, 20, 20, 20, 20, 2],
],
"combd_op_f": [1, 1, 1],
"combd_op_k": [3, 3, 3],
"combd_op_g": [1, 1, 1],
},
sbd: Dict[str, Any] = {
"use_sbd": True,
"sbd_filters": [
[64, 128, 256, 256, 256],
[64, 128, 256, 256, 256],
[64, 128, 256, 256, 256],
[32, 64, 128, 128, 128],
],
"sbd_strides": [
[1, 1, 3, 3, 1],
[1, 1, 3, 3, 1],
[1, 1, 3, 3, 1],
[1, 1, 3, 3, 1],
],
"sbd_kernel_sizes": [
[[7, 7, 7], [7, 7, 7], [7, 7, 7], [7, 7, 7], [7, 7, 7]],
[[5, 5, 5], [5, 5, 5], [5, 5, 5], [5, 5, 5], [5, 5, 5]],
[[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]],
[[5, 5, 5], [5, 5, 5], [5, 5, 5], [5, 5, 5], [5, 5, 5]],
],
"sbd_dilations": [
[[5, 7, 11], [5, 7, 11], [5, 7, 11], [5, 7, 11], [5, 7, 11]],
[[3, 5, 7], [3, 5, 7], [3, 5, 7], [3, 5, 7], [3, 5, 7]],
[[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]],
[[1, 2, 3], [1, 2, 3], [1, 2, 3], [2, 3, 5], [2, 3, 5]],
],
"sbd_band_ranges": [[0, 6], [0, 11], [0, 16], [0, 64]],
"sbd_transpose": [False, False, False, True],
"pqmf_config": {
"sbd": [16, 256, 0.03, 10.0],
"fsbd": [64, 256, 0.1, 9.0],
},
"segment_size": 8192,
},
pqmf_config: Dict[str, Any] = {
"lv1": [2, 256, 0.25, 10.0],
"lv2": [4, 192, 0.13, 10.0],
},
projection_filters: List[int] = [0, 1, 1, 1],
# Multi-frequency discriminator related
sample_rate: int = 22050,
multi_freq_disc_params: Dict[str, Any] = {
"hop_length_factors": [4, 8, 16],
"hidden_channels": [256, 512, 512],
"domain": "double",
"mel_scale": True,
"divisors": [32, 16, 8, 4, 2, 1, 1],
"strides": [1, 2, 1, 2, 1, 2, 1],
},
):
super().__init__()
self.pqmf_lv2 = PQMF(*pqmf_config["lv2"])
self.pqmf_lv1 = PQMF(*pqmf_config["lv1"])
self.combd = CoMBD(
combd,
[self.pqmf_lv2, self.pqmf_lv1],
use_spectral_norm=combd["use_spectral_norm"],
)
self.sbd = SBD(
sbd,
use_spectral_norm=sbd["use_spectral_norm"],
)
# Multi-frequency discriminator related
if "hop_lengths" not in multi_freq_disc_params:
# Transfer hop lengths factors to hop lengths
multi_freq_disc_params["hop_lengths"] = []
for i in range(len(multi_freq_disc_params["hop_length_factors"])):
multi_freq_disc_params["hop_lengths"].append(
int(
sample_rate
* multi_freq_disc_params["hop_length_factors"][i]
/ 1000
)
)
del multi_freq_disc_params["hop_length_factors"]
self.mfd = MultiFrequencyDiscriminator(
**multi_freq_disc_params,
)
self.projection_filters = projection_filters
def forward(
self, y: torch.Tensor, y_hats: torch.Tensor
) -> List[List[torch.Tensor]]:
ys = [
self.pqmf_lv2.analysis(y)[:, : self.projection_filters[1]],
self.pqmf_lv1.analysis(y)[:, : self.projection_filters[2]],
y,
]
(
combd_outs_real,
combd_outs_fake,
combd_fmaps_real,
combd_fmaps_fake,
) = self.combd(ys, y_hats)
sbd_outs_real, sbd_outs_fake, sbd_fmaps_real, sbd_fmaps_fake = self.sbd(
y, y_hats[-1]
)
mfd_fmaps_real = self.mfd(y)
mfd_fmaps_fake = self.mfd(y_hats[-1])
mfd_outs_real = mfd_fmaps_real[-1]
mfd_outs_fake = mfd_fmaps_fake[-1]
# Combine the outputs of both discriminators
outs_real = combd_outs_real + sbd_outs_real + mfd_outs_real
outs_fake = combd_outs_fake + sbd_outs_fake + mfd_outs_fake
fmaps_real = combd_fmaps_real + sbd_fmaps_real + mfd_fmaps_real
fmaps_fake = combd_fmaps_fake + sbd_fmaps_fake + mfd_fmaps_fake
return outs_real, outs_fake, fmaps_real, fmaps_fake
| 29,046 | 33.172941 | 88 | py |
espnet | espnet-master/espnet2/gan_svs/pits/modules.py | import torch
import torch.nn as nn
class WN(torch.nn.Module):
def __init__(
self,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=0,
p_dropout=0,
):
super(WN, self).__init__()
assert kernel_size % 2 == 1
self.hidden_channels = hidden_channels
self.kernel_size = (kernel_size,)
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.p_dropout = p_dropout
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
self.drop = nn.Dropout(p_dropout)
if gin_channels > 0:
cond_layer = torch.nn.Conv1d(
gin_channels, 2 * hidden_channels * n_layers, 1
)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
for i in range(n_layers):
dilation = dilation_rate**i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(
hidden_channels,
2 * hidden_channels,
kernel_size,
dilation=dilation,
padding=padding,
)
in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * hidden_channels
else:
res_skip_channels = hidden_channels
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
self.res_skip_layers.append(res_skip_layer)
def forward(self, x, x_mask, g=None, **kwargs):
output = torch.zeros_like(x)
n_channels_tensor = torch.IntTensor([self.hidden_channels])
if g is not None:
g = self.cond_layer(g)
for i in range(self.n_layers):
x_in = self.in_layers[i](x)
if g is not None:
cond_offset = i * 2 * self.hidden_channels
g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
else:
g_l = torch.zeros_like(x_in)
acts = self.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
acts = self.drop(acts)
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
res_acts = res_skip_acts[:, : self.hidden_channels, :]
x = (x + res_acts) * x_mask
output = output + res_skip_acts[:, self.hidden_channels :, :]
else:
output = output + res_skip_acts
return output * x_mask
def remove_weight_norm(self):
if self.gin_channels != 0:
torch.nn.utils.remove_weight_norm(self.cond_layer)
for in_layer in self.in_layers:
torch.nn.utils.remove_weight_norm(in_layer)
for res_skip_layer in self.res_skip_layers:
torch.nn.utils.remove_weight_norm(res_skip_layer)
def fused_add_tanh_sigmoid_multiply(self, input_a, input_b, n_channels):
n_channels_int = n_channels[0]
in_act = input_a + input_b
t_act = torch.tanh(in_act[:, :n_channels_int, :])
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
acts = t_act * s_act
return acts
| 3,542 | 34.787879 | 86 | py |
espnet | espnet-master/espnet2/gan_svs/pits/ying_decoder.py | import torch
import torch.nn as nn
import espnet2.gan_svs.pits.modules as modules
# TODO (Yifeng): This comment is generated by ChatGPT, which may not be accurate.
class YingDecoder(nn.Module):
"""Ying decoder module."""
def __init__(
self,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
yin_start,
yin_scope,
yin_shift_range,
gin_channels=0,
):
"""Initialize the YingDecoder module.
Args:
hidden_channels (int): Number of hidden channels.
kernel_size (int): Size of the convolutional kernel.
dilation_rate (int): Dilation rate of the convolutional layers.
n_layers (int): Number of convolutional layers.
yin_start (int): Start point of the yin target signal.
yin_scope (int): Scope of the yin target signal.
yin_shift_range (int): Maximum number of frames to shift the yin
target signal.
gin_channels (int, optional): Number of global conditioning channels.
Defaults to 0.
"""
super().__init__()
self.in_channels = yin_scope
self.out_channels = yin_scope
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.yin_start = yin_start
self.yin_scope = yin_scope
self.yin_shift_range = yin_shift_range
self.pre = nn.Conv1d(self.in_channels, hidden_channels, 1)
self.dec = modules.WN(
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=gin_channels,
)
self.proj = nn.Conv1d(hidden_channels, self.out_channels, 1)
def crop_scope(self, x, yin_start, scope_shift):
"""Crop the input tensor.
Args:
x (torch.Tensor): Input tensor of shape [B, C, T].
yin_start (int): Starting point of the yin target signal.
scope_shift (torch.Tensor): Shift tensor of shape [B].
Returns:
torch.Tensor: Cropped tensor of shape [B, C, yin_scope].
"""
return torch.stack(
[
x[
i,
yin_start
+ scope_shift[i] : yin_start
+ self.yin_scope
+ scope_shift[i],
:,
]
for i in range(x.shape[0])
],
dim=0,
)
def infer(self, z_yin, z_mask, g=None):
"""Generate yin prediction.
Args:
z_yin (torch.Tensor): Input yin target tensor of shape [B, yin_scope, C].
z_mask (torch.Tensor): Input mask tensor of shape [B, yin_scope, 1].
g (torch.Tensor, optional): Global conditioning tensor of shape
[B, gin_channels, 1]. Defaults to None.
Returns:
torch.Tensor: Predicted yin tensor of shape [B, yin_scope, C].
"""
B = z_yin.shape[0]
scope_shift = torch.randint(
-self.yin_shift_range, self.yin_shift_range, (B,), dtype=torch.int
)
z_yin_crop = self.crop_scope(z_yin, self.yin_start, scope_shift)
x = self.pre(z_yin_crop) * z_mask
x = self.dec(x, z_mask, g=g)
yin_hat_crop = self.proj(x) * z_mask
return yin_hat_crop
def forward(self, z_yin, yin_gt, z_mask, g=None):
"""Forward pass of the decoder.
Args:
z_yin (torch.Tensor): The input yin note sequence of shape (B, C, T_yin).
yin_gt (torch.Tensor): The ground truth yin note sequence of shape
(B, C, T_yin).
z_mask (torch.Tensor): The mask tensor of shape (B, 1, T_yin).
g (torch.Tensor): The global conditioning tensor.
Returns:
torch.Tensor: The predicted yin note sequence of shape (B, C, T_yin).
torch.Tensor: The shifted ground truth yin note sequence of shape
(B, C, T_yin).
torch.Tensor: The cropped ground truth yin note sequence of shape
(B, C, T_yin).
torch.Tensor: The cropped input yin note sequence of shape (B, C, T_yin).
torch.Tensor: The scope shift tensor of shape (B,).
"""
B = z_yin.shape[0]
scope_shift = torch.randint(
-self.yin_shift_range, self.yin_shift_range, (B,), dtype=torch.int
)
z_yin_crop = self.crop_scope(z_yin, self.yin_start, scope_shift)
yin_gt_shifted_crop = self.crop_scope(yin_gt, self.yin_start, scope_shift)
yin_gt_crop = self.crop_scope(
yin_gt, self.yin_start, torch.zeros_like(scope_shift)
)
x = self.pre(z_yin_crop) * z_mask
x = self.dec(x, z_mask, g=g)
yin_hat_crop = self.proj(x) * z_mask
return yin_gt_crop, yin_gt_shifted_crop, yin_hat_crop, z_yin_crop, scope_shift
| 5,184 | 35.77305 | 86 | py |
espnet | espnet-master/espnet2/gan_svs/visinger2/visinger2_vocoder.py | # Copyright 2023 Yifeng Yu
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""VISinger2 HiFi-GAN Modules.
This code is based on https://github.com/zhangyongmao/VISinger2
"""
import logging
import math
from typing import Any, Dict, List, Optional
import numpy as np
import torch
import torch.nn.functional as F
from espnet2.gan_svs.visinger2.ddsp import (
remove_above_nyquist,
scale_function,
upsample,
)
from espnet2.gan_tts.hifigan import (
HiFiGANMultiPeriodDiscriminator,
HiFiGANMultiScaleDiscriminator,
HiFiGANMultiScaleMultiPeriodDiscriminator,
HiFiGANPeriodDiscriminator,
HiFiGANScaleDiscriminator,
)
from espnet2.gan_tts.hifigan.residual_block import ResidualBlock
class VISinger2VocoderGenerator(torch.nn.Module):
def __init__(
self,
in_channels: int = 80,
out_channels: int = 1,
channels: int = 512,
global_channels: int = -1,
kernel_size: int = 7,
upsample_scales: List[int] = [8, 8, 2, 2],
upsample_kernel_sizes: List[int] = [16, 16, 4, 4],
resblock_kernel_sizes: List[int] = [3, 7, 11],
resblock_dilations: List[List[int]] = [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
n_harmonic: int = 64,
use_additional_convs: bool = True,
bias: bool = True,
nonlinear_activation: str = "LeakyReLU",
nonlinear_activation_params: Dict[str, Any] = {"negative_slope": 0.1},
use_weight_norm: bool = True,
):
"""Initialize HiFiGANGenerator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
channels (int): Number of hidden representation channels.
global_channels (int): Number of global conditioning channels.
kernel_size (int): Kernel size of initial and final conv layer.
upsample_scales (List[int]): List of upsampling scales.
upsample_kernel_sizes (List[int]): List of kernel sizes for upsample layers.
resblock_kernel_sizes (List[int]): List of kernel sizes for residual blocks.
resblock_dilations (List[List[int]]): List of list of dilations for residual
blocks.
n_harmonic (int): Number of harmonics used to synthesize a sound signal.
use_additional_convs (bool): Whether to use additional conv layers in
residual blocks.
bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (Dict[str, Any]): Hyperparameters for activation
function.
use_weight_norm (bool): Whether to use weight norm. If set to true, it will
be applied to all of the conv layers.
"""
super().__init__()
# check hyperparameters are valid
assert kernel_size % 2 == 1, "Kernel size must be odd number."
assert len(upsample_scales) == len(upsample_kernel_sizes)
assert len(resblock_dilations) == len(resblock_kernel_sizes)
# define modules
self.upsample_factor = int(np.prod(upsample_scales) * out_channels)
self.num_upsamples = len(upsample_kernel_sizes)
self.num_blocks = len(resblock_kernel_sizes)
self.input_conv = torch.nn.Conv1d(
in_channels,
channels,
kernel_size,
1,
padding=(kernel_size - 1) // 2,
)
self.upsample_scales = upsample_scales
self.downs = torch.nn.ModuleList()
for i, (u, k) in enumerate(zip(upsample_scales, upsample_kernel_sizes)):
i = self.num_upsamples - 1 - i
u = upsample_scales[i]
k = upsample_kernel_sizes[i]
down = torch.nn.Conv1d(
n_harmonic + 2,
n_harmonic + 2,
k,
u,
padding=k // 2 + k % 2,
)
self.downs.append(down)
self.blocks_downs = torch.nn.ModuleList()
for i in range(len(self.downs)):
j = self.num_upsamples - 1 - i
self.blocks_downs += [
ResidualBlock(
kernel_size=3,
channels=n_harmonic + 2,
dilations=(1, 3),
bias=bias,
use_additional_convs=False,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
)
]
self.concat_pre = torch.nn.Conv1d(
channels + n_harmonic + 2,
channels,
3,
1,
padding=1,
)
self.concat_conv = torch.nn.ModuleList()
for i in range(self.num_upsamples):
ch = channels // (2 ** (i + 1))
self.concat_conv.append(
torch.nn.Conv1d(ch + n_harmonic + 2, ch, 3, 1, padding=1, bias=bias)
)
self.upsamples = torch.nn.ModuleList()
self.blocks = torch.nn.ModuleList()
for i in range(len(upsample_kernel_sizes)):
assert upsample_kernel_sizes[i] == 2 * upsample_scales[i]
self.upsamples += [
torch.nn.Sequential(
getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
),
torch.nn.ConvTranspose1d(
channels // (2**i),
channels // (2 ** (i + 1)),
upsample_kernel_sizes[i],
upsample_scales[i],
padding=upsample_scales[i] // 2 + upsample_scales[i] % 2,
output_padding=upsample_scales[i] % 2,
),
)
]
for j in range(len(resblock_kernel_sizes)):
self.blocks += [
ResidualBlock(
kernel_size=resblock_kernel_sizes[j],
channels=channels // (2 ** (i + 1)),
dilations=resblock_dilations[j],
bias=bias,
use_additional_convs=use_additional_convs,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
)
]
self.output_conv = torch.nn.Sequential(
# NOTE(kan-bayashi): follow official implementation but why
# using different slope parameter here? (0.1 vs. 0.01)
torch.nn.LeakyReLU(),
torch.nn.Conv1d(
channels // (2 ** (i + 1)),
out_channels,
kernel_size,
1,
padding=(kernel_size - 1) // 2,
),
torch.nn.Tanh(),
)
if global_channels > 0:
self.global_conv = torch.nn.Conv1d(global_channels, channels, 1)
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
# reset parameters
self.reset_parameters()
def forward(self, c, ddsp, g: Optional[torch.Tensor] = None) -> torch.Tensor:
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, in_channels, T).
ddsp (Tensor): Input tensor (B, n_harmonic + 2, T * hop_length).
g (Optional[Tensor]): Global conditioning tensor (B, global_channels, 1).
Returns:
Tensor: Output tensor (B, out_channels, T).
"""
c = self.input_conv(c)
if g is not None:
c = c + self.global_conv(g)
se = ddsp
res_features = [se]
for i in range(self.num_upsamples):
in_size = se.size(2)
se = self.downs[i](se)
se = self.blocks_downs[i](se)
up_rate = self.upsample_scales[self.num_upsamples - 1 - i]
se = se[:, :, : in_size // up_rate]
res_features.append(se)
c = torch.cat([c, se], 1)
c = self.concat_pre(c)
for i in range(self.num_upsamples):
in_size = c.size(2)
c = self.upsamples[i](c)
c = c[:, :, : in_size * self.upsample_scales[i]]
c = torch.cat([c, res_features[self.num_upsamples - 1 - i]], 1)
c = self.concat_conv[i](c)
cs = 0.0 # initialize
for j in range(self.num_blocks):
cs = cs + self.blocks[i * self.num_blocks + j](c)
c = cs / self.num_blocks
c = self.output_conv(c)
return c
def reset_parameters(self):
"""Reset parameters.
This initialization follows the official implementation manner.
https://github.com/jik876/hifi-gan/blob/master/models.py
"""
def _reset_parameters(m: torch.nn.Module):
if isinstance(m, (torch.nn.Conv1d, torch.nn.ConvTranspose1d)):
m.weight.data.normal_(0.0, 0.01)
logging.debug(f"Reset parameters in {m}.")
self.apply(_reset_parameters)
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m: torch.nn.Module):
try:
logging.debug(f"Weight norm is removed from {m}.")
torch.nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m: torch.nn.Module):
if isinstance(m, torch.nn.Conv1d) or isinstance(
m, torch.nn.ConvTranspose1d
):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
class Generator_Harm(torch.nn.Module):
def __init__(
self,
hidden_channels: int = 192,
n_harmonic: int = 64,
kernel_size: int = 3,
padding: int = 1,
dropout_rate: float = 0.1,
sample_rate: int = 22050,
hop_size: int = 256,
):
"""Initialize harmonic generator module.
Args:
hidden_channels (int): Number of channels in the input and hidden layers.
n_harmonic (int): Number of harmonic channels.
kernel_size (int): Size of the convolutional kernel.
padding (int): Amount of padding added to the input.
dropout_rate (float): Dropout rate.
sample_rate (int): Sampling rate of the input audio.
hop_size (int): Hop size used in the analysis of the input audio.
"""
super().__init__()
self.prenet = torch.nn.Conv1d(
hidden_channels, hidden_channels, kernel_size, padding=padding
)
self.net = ConvReluNorm(
hidden_channels,
hidden_channels,
hidden_channels,
kernel_size,
8,
dropout_rate,
)
self.postnet = torch.nn.Conv1d(
hidden_channels, n_harmonic + 1, kernel_size, padding=padding
)
self.sample_rate = sample_rate
self.hop_size = hop_size
def forward(self, f0, harm, mask):
"""Generate harmonics from F0 and harmonic data.
Args:
f0 (Tensor): Pitch (F0) tensor (B, 1, T).
harm (Tensor): Harmonic data tensor (B, hidden_channels, T).
mask (Tensor): Mask tensor for harmonic data (B, 1, T).
Returns:
Tensor: Harmonic signal tensor (B, n_harmonic, T * hop_length).
"""
pitch = f0.transpose(1, 2)
harm = self.prenet(harm)
harm = self.net(harm) * mask
harm = self.postnet(harm)
harm = harm.transpose(1, 2)
param = harm
param = scale_function(param)
total_amp = param[..., :1]
amplitudes = param[..., 1:]
amplitudes = remove_above_nyquist(
amplitudes,
pitch,
self.sample_rate,
)
amplitudes /= amplitudes.sum(-1, keepdim=True)
amplitudes *= total_amp
amplitudes = upsample(amplitudes, self.hop_size)
pitch = upsample(pitch, self.hop_size)
n_harmonic = amplitudes.shape[-1]
omega = torch.cumsum(2 * math.pi * pitch / self.sample_rate, 1)
omegas = omega * torch.arange(1, n_harmonic + 1).to(omega)
signal_harmonics = torch.sin(omegas) * amplitudes
signal_harmonics = signal_harmonics.transpose(1, 2)
return signal_harmonics
class Generator_Noise(torch.nn.Module):
def __init__(
self,
win_length: int = 1024,
hop_length: int = 256,
n_fft: int = 1024,
hidden_channels: int = 192,
kernel_size: int = 3,
padding: int = 1,
dropout_rate: float = 0.1,
):
"""Initialize the Generator_Noise module.
Args:
win_length (int, optional): Window length. If None, set to `n_fft`.
hop_length (int): Hop length.
n_fft (int): FFT size.
hidden_channels (int): Number of hidden representation channels.
kernel_size (int): Size of the convolutional kernel.
padding (int): Size of the padding applied to the input.
dropout_rate (float): Dropout rate.
"""
super().__init__()
self.win_size = win_length if win_length is not None else n_fft
self.hop_size = hop_length
self.fft_size = n_fft
self.istft_pre = torch.nn.Conv1d(
hidden_channels, hidden_channels, kernel_size, padding=padding
)
self.net = ConvReluNorm(
hidden_channels,
hidden_channels,
hidden_channels,
kernel_size,
8,
dropout_rate,
)
self.istft_amplitude = torch.nn.Conv1d(
hidden_channels, self.fft_size // 2 + 1, 1, padding
)
self.window = torch.hann_window(self.win_size)
def forward(self, x, mask):
"""
Args:
x (Tensor): Input tensor (B, hidden_channels, T).
mask (Tensor): Mask tensor (B, 1, T).
Returns:
Tensor: Output tensor (B, 1, T * hop_size).
"""
istft_x = x
istft_x = self.istft_pre(istft_x)
istft_x = self.net(istft_x) * mask
amp = self.istft_amplitude(istft_x).unsqueeze(-1)
phase = (torch.rand(amp.shape) * 2 * 3.14 - 3.14).to(amp)
real = amp * torch.cos(phase)
imag = amp * torch.sin(phase)
# spec = torch.cat([real, imag], 3)
spec = torch.stack([real, imag], dim=-1) # Change to stack
spec_complex = torch.view_as_complex(
spec.squeeze(-2)
) # Convert to complex tensor
istft_x = torch.istft(
spec_complex,
self.fft_size,
self.hop_size,
self.win_size,
self.window.to(amp),
True,
length=x.shape[2] * self.hop_size,
return_complex=False,
)
return istft_x.unsqueeze(1)
class MultiFrequencyDiscriminator(torch.nn.Module):
"""Multi-Frequency Discriminator module in UnivNet."""
def __init__(
self,
sample_rate: int = 22050,
hop_lengths=[128, 256, 512],
hidden_channels=[256, 512, 512],
domain="double",
mel_scale=True,
divisors=[32, 16, 8, 4, 2, 1, 1],
strides=[1, 2, 1, 2, 1, 2, 1],
):
"""
Initialize Multi-Frequency Discriminator module.
Args:
hop_lengths (list): List of hop lengths.
hidden_channels (list): List of number of channels in hidden layers.
domain (str): Domain of input signal. Default is "double".
mel_scale (bool): Whether to use mel-scale frequency. Default is True.
divisors (list): List of divisors for each layer in the discriminator.
Default is [32, 16, 8, 4, 2, 1, 1].
strides (list): List of strides for each layer in the discriminator.
Default is [1, 2, 1, 2, 1, 2, 1].
"""
super().__init__()
# TODO (Yifeng): Maybe use LogMelFbank instead of TorchSTFT
self.stfts = torch.nn.ModuleList(
[
TorchSTFT(
sample_rate=sample_rate,
fft_size=x * 4,
hop_size=x,
win_size=x * 4,
normalized=True,
domain=domain,
mel_scale=mel_scale,
)
for x in hop_lengths
]
)
self.domain = domain
if domain == "double":
self.discriminators = torch.nn.ModuleList(
[
BaseFrequenceDiscriminator(2, c, divisors=divisors, strides=strides)
for x, c in zip(hop_lengths, hidden_channels)
]
)
else:
self.discriminators = torch.nn.ModuleList(
[
BaseFrequenceDiscriminator(1, c, divisors=divisors, strides=strides)
for x, c in zip(hop_lengths, hidden_channels)
]
)
def forward(self, x):
"""
Forward pass of Multi-Frequency Discriminator module.
Args:
x (Tensor): Input tensor (B, 1, T * hop_size).
Returns:
List[Tensor]: List of feature maps.
"""
feats = list()
for stft, layer in zip(self.stfts, self.discriminators):
mag, phase = stft.transform(x.squeeze(1))
if self.domain == "double":
mag = torch.stack(torch.chunk(mag, 2, dim=1), dim=1)
else:
mag = mag.unsqueeze(1)
feat = layer(mag)
feats.append(feat)
return feats
class BaseFrequenceDiscriminator(torch.nn.Module):
def __init__(
self,
in_channels,
hidden_channels=512,
divisors=[32, 16, 8, 4, 2, 1, 1],
strides=[1, 2, 1, 2, 1, 2, 1],
):
"""
Args:
in_channels (int): Number of input channels.
hidden_channels (int, optional): Number of channels in hidden layers.
Defaults to 512.
divisors (List[int], optional): List of divisors for the number of channels
in each layer. The length of the list
determines the number of layers. Defaults
to [32, 16, 8, 4, 2, 1, 1].
strides (List[int], optional): List of stride values for each layer. The
length of the list determines the number
of layers.Defaults to [1, 2, 1, 2, 1, 2, 1].
"""
super().__init__()
layers = []
for i in range(len(divisors) - 1):
in_ch = in_channels if i == 0 else hidden_channels // divisors[i - 1]
out_ch = hidden_channels // divisors[i]
stride = strides[i]
layers.append((in_ch, out_ch, stride))
layers.append((hidden_channels // divisors[-1], 1, strides[-1]))
self.discriminators = torch.nn.ModuleList()
for in_ch, out_ch, stride in layers:
seq = torch.nn.Sequential(
torch.nn.LeakyReLU(0.2, True) if out_ch != 1 else torch.nn.Identity(),
torch.nn.ReflectionPad2d((1, 1, 1, 1)),
torch.nn.utils.weight_norm(
torch.nn.Conv2d(
in_ch,
out_ch,
kernel_size=(3, 3),
stride=(stride, stride),
)
),
)
self.discriminators += [seq]
def forward(self, x):
"""Perform forward pass through the base frequency discriminator.
Args:
x (torch.Tensor): Input tensor of shape
(B, in_channels, freq_bins, time_steps).
Returns:
List[torch.Tensor]: List of output tensors from each layer of the
discriminator, where the first tensor corresponds to
the output of the first layer, and so on.
"""
outs = []
for f in self.discriminators:
x = f(x)
outs = outs + [x]
return outs
class VISinger2Discriminator(torch.nn.Module):
def __init__(
self,
# Multi-scale discriminator related
scales: int = 1,
scale_downsample_pooling: str = "AvgPool1d",
scale_downsample_pooling_params: Dict[str, Any] = {
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
scale_discriminator_params: Dict[str, Any] = {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 128,
"max_downsample_channels": 1024,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 2, 4, 4, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
},
follow_official_norm: bool = True,
# Multi-period discriminator related
periods: List[int] = [2, 3, 5, 7, 11],
period_discriminator_params: Dict[str, Any] = {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 32,
"downsample_scales": [3, 3, 3, 3, 1],
"max_downsample_channels": 1024,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
# Multi-frequency discriminator related
multi_freq_disc_params: Dict[str, Any] = {
"sample_rate": 22050,
"hop_length_factors": [4, 8, 16],
"hidden_channels": [256, 512, 512],
"domain": "double",
"mel_scale": True,
"divisors": [32, 16, 8, 4, 2, 1, 1],
"strides": [1, 2, 1, 2, 1, 2, 1],
},
):
"""
Discriminator module for VISinger2, including MSD, MPD, and MFD.
Args:
scales (int): Number of scales to be used in the multi-scale discriminator.
scale_downsample_pooling (str): Type of pooling used for downsampling.
scale_downsample_pooling_params (Dict[str, Any]): Parameters for the
downsampling pooling
layer.
scale_discriminator_params (Dict[str, Any]): Parameters for the scale
discriminator.
follow_official_norm (bool): Whether to follow the official normalization.
periods (List[int]): List of periods to be used in the multi-period
discriminator.
period_discriminator_params (Dict[str, Any]): Parameters for the period
discriminator.
multi_freq_disc_params (Dict[str, Any]): Parameters for the
multi-frequency discriminator.
use_spectral_norm (bool): Whether to use spectral normalization or not.
"""
super().__init__()
# Multi-scale discriminator related
self.msd = HiFiGANMultiScaleDiscriminator(
scales=scales,
downsample_pooling=scale_downsample_pooling,
downsample_pooling_params=scale_downsample_pooling_params,
discriminator_params=scale_discriminator_params,
follow_official_norm=follow_official_norm,
)
# Multi-period discriminator related
self.mpd = HiFiGANMultiPeriodDiscriminator(
periods=periods,
discriminator_params=period_discriminator_params,
)
# Multi-frequency discriminator related
if "hop_lengths" not in multi_freq_disc_params:
# Transfer hop lengths factors to hop lengths
multi_freq_disc_params["hop_lengths"] = []
for i in range(len(multi_freq_disc_params["hop_length_factors"])):
multi_freq_disc_params["hop_lengths"].append(
int(
multi_freq_disc_params["sample_rate"]
* multi_freq_disc_params["hop_length_factors"][i]
/ 1000
)
)
del multi_freq_disc_params["hop_length_factors"]
self.mfd = MultiFrequencyDiscriminator(
**multi_freq_disc_params,
)
def forward(self, x):
msd_outs = self.msd(x)
mpd_outs = self.mpd(x)
mfd_outs = self.mfd(x)
return msd_outs + mpd_outs + mfd_outs
# TODO(Yifeng): Not sure if those modules exists in espnet.
class LayerNorm(torch.nn.Module):
def __init__(self, channels, eps=1e-5):
super().__init__()
self.channels = channels
self.eps = eps
self.gamma = torch.nn.Parameter(torch.ones(channels))
self.beta = torch.nn.Parameter(torch.zeros(channels))
def forward(self, x):
x = x.transpose(1, -1)
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
return x.transpose(1, -1)
class ConvReluNorm(torch.nn.Module):
def __init__(
self,
in_channels,
hidden_channels,
out_channels,
kernel_size,
n_layers,
dropout_rate,
):
super().__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.dropout_rate = dropout_rate
assert n_layers > 1, "Number of layers should be larger than 0."
self.conv_layers = torch.nn.ModuleList()
self.norm_layers = torch.nn.ModuleList()
self.conv_layers.append(
torch.nn.Conv1d(
in_channels, hidden_channels, kernel_size, padding=kernel_size // 2
)
)
self.norm_layers.append(LayerNorm(hidden_channels))
self.relu_drop = torch.nn.Sequential(
torch.nn.ReLU(), torch.nn.Dropout(dropout_rate)
)
for _ in range(n_layers - 1):
self.conv_layers.append(
torch.nn.Conv1d(
hidden_channels,
hidden_channels,
kernel_size,
padding=kernel_size // 2,
)
)
self.norm_layers.append(LayerNorm(hidden_channels))
self.proj = torch.nn.Conv1d(hidden_channels, out_channels, 1)
self.proj.weight.data.zero_()
self.proj.bias.data.zero_()
def forward(self, x):
x = self.conv_layers[0](x)
x = self.norm_layers[0](x)
x = self.relu_drop(x)
for i in range(1, self.n_layers):
x_ = self.conv_layers[i](x)
x_ = self.norm_layers[i](x_)
x_ = self.relu_drop(x_)
x = (x + x_) / 2
x = self.proj(x)
return x
class TorchSTFT(torch.nn.Module):
def __init__(
self,
sample_rate,
fft_size,
hop_size,
win_size,
normalized=False,
domain="linear",
mel_scale=False,
ref_level_db=20,
min_level_db=-100,
):
super().__init__()
self.fft_size = fft_size
self.hop_size = hop_size
self.win_size = win_size
self.ref_level_db = ref_level_db
self.min_level_db = min_level_db
self.window = torch.hann_window(win_size)
self.normalized = normalized
self.domain = domain
self.mel_scale = (
MelScale(
sample_rate=sample_rate,
n_mels=(fft_size // 2 + 1),
n_stft=(fft_size // 2 + 1),
)
if mel_scale
else None
)
def transform(self, x):
x_stft = torch.stft(
x,
self.fft_size,
self.hop_size,
self.win_size,
self.window.type_as(x),
normalized=self.normalized,
return_complex=True,
)
real = torch.real(x_stft)
imag = torch.imag(x_stft)
mag = torch.clamp(real**2 + imag**2, min=1e-7)
mag = torch.sqrt(mag)
phase = torch.angle(x_stft)
if self.mel_scale is not None:
mag = self.mel_scale(mag)
if self.domain == "log":
mag = 20 * torch.log10(mag) - self.ref_level_db
mag = torch.clamp((mag - self.min_level_db) / -self.min_level_db, 0, 1)
return mag, phase
elif self.domain == "linear":
return mag, phase
elif self.domain == "double":
log_mag = 20 * torch.log10(mag) - self.ref_level_db
log_mag = torch.clamp(
(log_mag - self.min_level_db) / -self.min_level_db, 0, 1
)
return torch.cat((mag, log_mag), dim=1), phase
def complex(self, x):
x_stft = torch.stft(
x,
self.fft_size,
self.hop_size,
self.win_size,
self.window.type_as(x),
normalized=self.normalized,
)
real = x_stft[..., 0]
imag = x_stft[..., 1]
return real, imag
class MelScale(torch.nn.Module):
"""Turn a normal STFT into a mel frequency STFT, using a conversion
matrix. This uses triangular filter banks.
User can control which device the filter bank (fb) is (e.g. fb.to(spec_f.device)).
Args:
n_mels (int, optional): Number of mel filterbanks. (Default: 128)
sample_rate (int, optional): Sample rate of audio signal. (Default: 16000)
f_min (float, optional): Minimum frequency. (Default: 0.)
f_max (float or None, optional): Maximum frequency.
(Default: sample_rate // 2)
n_stft (int, optional): Number of bins in STFT. Calculated from first input
if None is given. See n_fft in :class:Spectrogram.
(Default: None)
"""
__constants__ = ["n_mels", "sample_rate", "f_min", "f_max"]
def __init__(
self,
n_mels: int = 128,
sample_rate: int = 24000,
f_min: float = 0.0,
f_max: Optional[float] = None,
n_stft: Optional[int] = None,
) -> None:
super(MelScale, self).__init__()
self.n_mels = n_mels
self.sample_rate = sample_rate
self.f_max = f_max if f_max is not None else float(sample_rate // 2)
self.f_min = f_min
assert f_min <= self.f_max, "Require f_min: %f < f_max: %f" % (
f_min,
self.f_max,
)
fb = (
torch.empty(0)
if n_stft is None
else create_fb_matrix(
n_stft, self.f_min, self.f_max, self.n_mels, self.sample_rate
)
)
self.register_buffer("fb", fb)
def forward(self, specgram: torch.Tensor) -> torch.Tensor:
"""
Args:
specgram (Tensor): A spectrogram STFT of dimension (..., freq, time).
Returns:
Tensor: Mel frequency spectrogram of size (..., n_mels, time).
"""
# pack batch
shape = specgram.size()
specgram = specgram.reshape(-1, shape[-2], shape[-1])
if self.fb.numel() == 0:
tmp_fb = create_fb_matrix(
specgram.size(1), self.f_min, self.f_max, self.n_mels, self.sample_rate
)
# Attributes cannot be reassigned outside __init__ so workaround
self.fb.resize_(tmp_fb.size())
self.fb.copy_(tmp_fb)
# (channel, frequency, time).transpose(...) dot (frequency, n_mels)
# -> (channel, time, n_mels).transpose(...)
mel_specgram = torch.matmul(specgram.transpose(1, 2), self.fb).transpose(1, 2)
# unpack batch
mel_specgram = mel_specgram.reshape(shape[:-2] + mel_specgram.shape[-2:])
return mel_specgram
def create_fb_matrix(
n_freqs: int,
f_min: float,
f_max: float,
n_mels: int,
sample_rate: int,
norm: Optional[str] = None,
) -> torch.Tensor:
"""Create a frequency bin conversion matrix.
Args:
n_freqs (int): Number of frequencies to highlight/apply
f_min (float): Minimum frequency (Hz)
f_max (float): Maximum frequency (Hz)
n_mels (int): Number of mel filterbanks
sample_rate (int): Sample rate of the audio waveform
norm (Optional[str]): If 'slaney',
divide the triangular mel weights by the width of the mel band
(area normalization). (Default: None)
Returns:
Tensor: Triangular filter banks (fb matrix) of size (n_freqs, n_mels)
meaning number of frequencies to highlight/apply to x the number of filterbanks.
Each column is a filterbank so that assuming there is a matrix A of
size (..., n_freqs), the applied result would be
A * create_fb_matrix(A.size(-1), ...).
"""
if norm is not None and norm != "slaney":
raise ValueError("norm must be one of None or 'slaney'")
# freq bins
# Equivalent filterbank construction by Librosa
all_freqs = torch.linspace(0, sample_rate // 2, n_freqs)
# calculate mel freq bins
# hertz to mel(f) is 2595. * math.log10(1. + (f / 700.))
m_min = 2595.0 * math.log10(1.0 + (f_min / 700.0))
m_max = 2595.0 * math.log10(1.0 + (f_max / 700.0))
m_pts = torch.linspace(m_min, m_max, n_mels + 2)
# mel to hertz(mel) is 700. * (10**(mel / 2595.) - 1.)
f_pts = 700.0 * (10 ** (m_pts / 2595.0) - 1.0)
# calculate the difference between each mel point and each stft freq point in hertz
f_diff = f_pts[1:] - f_pts[:-1] # (n_mels + 1)
slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1) # (n_freqs, n_mels + 2)
# create overlapping triangles
down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1] # (n_freqs, n_mels)
up_slopes = slopes[:, 2:] / f_diff[1:] # (n_freqs, n_mels)
fb = torch.min(down_slopes, up_slopes)
fb = torch.clamp(fb, 1e-6, 1)
if norm is not None and norm == "slaney":
# Slaney-style mel is scaled to be approx constant energy per channel
enorm = 2.0 / (f_pts[2 : n_mels + 2] - f_pts[:n_mels])
fb *= enorm.unsqueeze(0)
return fb
| 35,285 | 34.145418 | 88 | py |
espnet | espnet-master/espnet2/gan_svs/visinger2/ddsp.py | import math
import librosa as li
import numpy as np
import torch
import torch.fft as fft
import torch.nn as nn
from scipy.signal import get_window
def safe_log(x):
return torch.log(x + 1e-7)
@torch.no_grad()
def mean_std_loudness(dataset):
mean = 0
std = 0
n = 0
for _, _, l in dataset:
n += 1
mean += (l.mean().item() - mean) / n
std += (l.std().item() - std) / n
return mean, std
def multiscale_fft(signal, scales, overlap):
stfts = []
for s in scales:
S = torch.stft(
signal,
s,
int(s * (1 - overlap)),
s,
torch.hann_window(s).to(signal),
True,
normalized=True,
return_complex=True,
).abs()
stfts.append(S)
return stfts
def resample(x, factor: int):
batch, frame, channel = x.shape
x = x.permute(0, 2, 1).reshape(batch * channel, 1, frame)
window = torch.hann_window(
factor * 2,
dtype=x.dtype,
device=x.device,
).reshape(1, 1, -1)
y = torch.zeros(x.shape[0], x.shape[1], factor * x.shape[2]).to(x)
y[..., ::factor] = x
y[..., -1:] = x[..., -1:]
y = torch.nn.functional.pad(y, [factor, factor])
y = torch.nn.functional.conv1d(y, window)[..., :-1]
y = y.reshape(batch, channel, factor * frame).permute(0, 2, 1)
return y
def upsample(signal, factor):
signal = signal.permute(0, 2, 1)
signal = nn.functional.interpolate(signal, size=signal.shape[-1] * factor)
return signal.permute(0, 2, 1)
def remove_above_nyquist(amplitudes, pitch, sampling_rate):
n_harm = amplitudes.shape[-1]
pitches = pitch * torch.arange(1, n_harm + 1).to(pitch)
aa = (pitches < sampling_rate / 2).float() + 1e-4
return amplitudes * aa
def scale_function(x):
return 2 * torch.sigmoid(x) ** (math.log(10)) + 1e-7
def extract_loudness(signal, sampling_rate, block_size, n_fft=2048):
S = li.stft(
signal,
n_fft=n_fft,
hop_length=block_size,
win_length=n_fft,
center=True,
)
S = np.log(abs(S) + 1e-7)
f = li.fft_frequencies(sampling_rate, n_fft)
a_weight = li.A_weighting(f)
S = S + a_weight.reshape(-1, 1)
S = np.mean(S, 0)[..., :-1]
return S
# TODO (Yifeng): Some functions are not used here such as crepe,
# maybe we can remove them later or only import used functions.
def extract_pitch(signal, sampling_rate, block_size):
length = signal.shape[-1] // block_size
f0 = crepe.predict(
signal,
sampling_rate,
step_size=int(1000 * block_size / sampling_rate),
verbose=1,
center=True,
viterbi=True,
)
f0 = f0[1].reshape(-1)[:-1]
if f0.shape[-1] != length:
f0 = np.interp(
np.linspace(0, 1, length, endpoint=False),
np.linspace(0, 1, f0.shape[-1], endpoint=False),
f0,
)
return f0
def mlp(in_size, hidden_size, n_layers):
channels = [in_size] + (n_layers) * [hidden_size]
net = []
for i in range(n_layers):
net.append(nn.Linear(channels[i], channels[i + 1]))
net.append(nn.LayerNorm(channels[i + 1]))
net.append(nn.LeakyReLU())
return nn.Sequential(*net)
def gru(n_input, hidden_size):
return nn.GRU(n_input * hidden_size, hidden_size, batch_first=True)
def harmonic_synth(pitch, amplitudes, sampling_rate):
n_harmonic = amplitudes.shape[-1]
omega = torch.cumsum(2 * math.pi * pitch / sampling_rate, 1)
omegas = omega * torch.arange(1, n_harmonic + 1).to(omega)
signal = (torch.sin(omegas) * amplitudes).sum(-1, keepdim=True)
return signal
def amp_to_impulse_response(amp, target_size):
amp = torch.stack([amp, torch.zeros_like(amp)], -1)
amp = torch.view_as_complex(amp)
amp = fft.irfft(amp)
filter_size = amp.shape[-1]
amp = torch.roll(amp, filter_size // 2, -1)
win = torch.hann_window(filter_size, dtype=amp.dtype, device=amp.device)
amp = amp * win
amp = nn.functional.pad(amp, (0, int(target_size) - int(filter_size)))
amp = torch.roll(amp, -filter_size // 2, -1)
return amp
def fft_convolve(signal, kernel):
signal = nn.functional.pad(signal, (0, signal.shape[-1]))
kernel = nn.functional.pad(kernel, (kernel.shape[-1], 0))
output = fft.irfft(fft.rfft(signal) * fft.rfft(kernel))
output = output[..., output.shape[-1] // 2 :]
return output
def init_kernels(win_len, win_inc, fft_len, win_type=None, invers=False):
if win_type == "None" or win_type is None:
window = np.ones(win_len)
else:
window = get_window(win_type, win_len, fftbins=True) # **0.5
N = fft_len
fourier_basis = np.fft.rfft(np.eye(N))[:win_len]
real_kernel = np.real(fourier_basis)
imag_kernel = np.imag(fourier_basis)
kernel = np.concatenate([real_kernel, imag_kernel], 1).T
if invers:
kernel = np.linalg.pinv(kernel).T
kernel = kernel * window
kernel = kernel[:, None, :]
return torch.from_numpy(kernel.astype(np.float32)), torch.from_numpy(
window[None, :, None].astype(np.float32)
)
| 5,159 | 25.597938 | 78 | py |
espnet | espnet-master/espnet2/gan_svs/utils/expand_f0.py | # Copyright 2023 Yifeng Yu
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Function to get random segments."""
from typing import Optional, Tuple
import torch
import torch.nn.functional as F
def expand_f0(f0_frame, hop_length, method="interpolation"):
"""Expand f0 to output wave length.
Args:
f0_frame (Tensor): Input tensor (B, 1, frame_len).
hop_length (Tensor): Hop length.
method (str): Method to expand f0. Choose either 'interpolation' or 'repeat'.
Returns:
Tensor: Output tensor (B, 1, wav_len).
"""
frame_length = f0_frame.size(2)
signal_length = frame_length * hop_length
if method == "interpolation":
f0_sample = F.interpolate(
f0_frame, size=signal_length, mode="linear", align_corners=False
)
elif method == "repeat":
f0_sample = f0_frame.repeat_interleave(hop_length, dim=2)[:signal_length]
else:
raise ValueError("Invalid method. Choose either 'interpolation' or 'repeat'.")
f0_sample = f0_sample.squeeze()[
:signal_length
] # Remove extra dimensions and trim to signal_length
return f0_sample
| 1,165 | 29.684211 | 86 | py |
espnet | espnet-master/espnet2/gan_svs/vits/phoneme_predictor.py | # Copyright 2022 Yifeng Yu
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import torch
from espnet.nets.pytorch_backend.conformer.encoder import Encoder
class PhonemePredictor(torch.nn.Module):
"""
Phoneme Predictor module in VISinger.
"""
def __init__(
self,
vocabs: int,
hidden_channels: int = 192,
attention_dim: int = 192,
attention_heads: int = 2,
linear_units: int = 768,
blocks: int = 2,
positionwise_layer_type: str = "conv1d",
positionwise_conv_kernel_size: int = 3,
positional_encoding_layer_type: str = "rel_pos",
self_attention_layer_type: str = "rel_selfattn",
activation_type: str = "swish",
normalize_before: bool = True,
use_macaron_style: bool = False,
use_conformer_conv: bool = False,
conformer_kernel_size: int = 7,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.0,
attention_dropout_rate: float = 0.0,
):
"""
Initialize PhonemePredictor module.
Args:
vocabs (int): The number of vocabulary.
hidden_channels (int): The number of hidden channels.
attention_dim (int): The number of attention dimension.
attention_heads (int): The number of attention heads.
linear_units (int): The number of linear units.
blocks (int): The number of encoder blocks.
positionwise_layer_type (str): The type of position-wise layer.
positionwise_conv_kernel_size (int): The size of position-wise
convolution kernel.
positional_encoding_layer_type (str): The type of positional encoding layer.
self_attention_layer_type (str): The type of self-attention layer.
activation_type (str): The type of activation function.
normalize_before (bool): Whether to apply normalization before the
position-wise layer or not.
use_macaron_style (bool): Whether to use macaron style or not.
use_conformer_conv (bool): Whether to use Conformer convolution or not.
conformer_kernel_size (int): The size of Conformer kernel.
dropout_rate (float): The dropout rate.
positional_dropout_rate (float): The dropout rate for positional encoding.
attention_dropout_rate (float): The dropout rate for attention.
"""
super().__init__()
self.phoneme_predictor = Encoder(
idim=-1,
input_layer=None,
attention_dim=attention_dim,
attention_heads=attention_heads,
linear_units=linear_units,
num_blocks=blocks,
dropout_rate=dropout_rate,
positional_dropout_rate=positional_dropout_rate,
attention_dropout_rate=attention_dropout_rate,
normalize_before=normalize_before,
positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size,
macaron_style=use_macaron_style,
pos_enc_layer_type=positional_encoding_layer_type,
selfattention_layer_type=self_attention_layer_type,
activation_type=activation_type,
use_cnn_module=use_conformer_conv,
cnn_module_kernel=conformer_kernel_size,
)
self.linear1 = torch.nn.Linear(hidden_channels, vocabs)
def forward(self, x, x_mask):
"""
Perform forward propagation.
Args:
x (Tensor): The input tensor of shape (B, dim, length).
x_mask (Tensor): The mask tensor for the input tensor of shape (B, length).
Returns:
Tensor: The predicted phoneme tensor of shape (length, B, vocab_size).
"""
x = x * x_mask
x = x.transpose(1, 2)
phoneme_embedding, _ = self.phoneme_predictor(x, x_mask)
phoneme_embedding = phoneme_embedding.transpose(1, 2)
x1 = self.linear1(phoneme_embedding.transpose(1, 2))
x1 = x1.log_softmax(2)
return x1.transpose(0, 1)
| 4,212 | 39.12381 | 88 | py |
espnet | espnet-master/espnet2/gan_svs/vits/prior_decoder.py | # Copyright 2023 Yifeng Yu
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import torch
from espnet.nets.pytorch_backend.conformer.encoder import Encoder
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask
class PriorDecoder(torch.nn.Module):
def __init__(
self,
out_channels: int = 192 * 2,
attention_dim: int = 192,
attention_heads: int = 2,
linear_units: int = 768,
blocks: int = 6,
positionwise_layer_type: str = "conv1d",
positionwise_conv_kernel_size: int = 3,
positional_encoding_layer_type: str = "rel_pos",
self_attention_layer_type: str = "rel_selfattn",
activation_type: str = "swish",
normalize_before: bool = True,
use_macaron_style: bool = False,
use_conformer_conv: bool = False,
conformer_kernel_size: int = 7,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.0,
attention_dropout_rate: float = 0.0,
global_channels: int = 0,
):
"""
Initialize prior decoder module.
Args:
out_channels (int): Output channels of the prior decoder. Defaults to 384.
attention_dim (int): Dimension of the attention mechanism. Defaults to 192.
attention_heads (int): Number of attention heads. Defaults to 2.
linear_units (int): Number of units in the linear layer. Defaults to 768.
blocks (int): Number of blocks in the encoder. Defaults to 6.
positionwise_layer_type (str): Type of the positionwise layer.
Defaults to "conv1d".
positionwise_conv_kernel_size (int): Kernel size of the positionwise
convolutional layer. Defaults to 3.
positional_encoding_layer_type (str): Type of positional encoding layer.
Defaults to "rel_pos".
self_attention_layer_type (str): Type of self-attention layer.
Defaults to "rel_selfattn".
activation_type (str): Type of activation. Defaults to "swish".
normalize_before (bool): Flag for normalization. Defaults to True.
use_macaron_style (bool): Flag for macaron style. Defaults to False.
use_conformer_conv (bool): Flag for using conformer convolution.
Defaults to False.
conformer_kernel_size (int): Kernel size for conformer convolution.
Defaults to 7.
dropout_rate (float): Dropout rate. Defaults to 0.1.
positional_dropout_rate (float): Dropout rate for positional encoding.
Defaults to 0.0.
attention_dropout_rate (float): Dropout rate for attention.
Defaults to 0.0.
global_channels (int): Number of global channels. Defaults to 0.
"""
super().__init__()
self.prenet = torch.nn.Conv1d(attention_dim + 2, attention_dim, 3, padding=1)
self.decoder = Encoder(
idim=-1,
input_layer=None,
attention_dim=attention_dim,
attention_heads=attention_heads,
linear_units=linear_units,
num_blocks=blocks,
dropout_rate=dropout_rate,
positional_dropout_rate=positional_dropout_rate,
attention_dropout_rate=attention_dropout_rate,
normalize_before=normalize_before,
positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size,
macaron_style=use_macaron_style,
pos_enc_layer_type=positional_encoding_layer_type,
selfattention_layer_type=self_attention_layer_type,
activation_type=activation_type,
use_cnn_module=use_conformer_conv,
cnn_module_kernel=conformer_kernel_size,
)
self.proj = torch.nn.Conv1d(attention_dim, out_channels, 1)
if global_channels > 0:
self.conv = torch.nn.Conv1d(global_channels, attention_dim, 1)
def forward(self, x, x_lengths, g=None):
"""
Forward pass of the PriorDecoder module.
Args:
x (Tensor): Input tensor (B, attention_dim + 2, T).
x_lengths (Tensor): Length tensor (B,).
g (Tensor): Tensor for multi-singer. (B, global_channels, 1)
Returns:
Tensor: Output tensor (B, out_channels, T).
Tensor: Output mask tensor (B, 1, T).
"""
x_mask = (
make_non_pad_mask(x_lengths)
.to(
device=x.device,
dtype=x.dtype,
)
.unsqueeze(1)
)
x = self.prenet(x) * x_mask
# multi-singer
if g is not None:
g = torch.detach(g)
x = x + self.conv(g)
x = x * x_mask
x = x.transpose(1, 2)
x, _ = self.decoder(x, x_mask)
x = x.transpose(1, 2)
bn = self.proj(x) * x_mask
return bn, x_mask
| 5,294 | 40.367188 | 87 | py |
espnet | espnet-master/espnet2/gan_svs/vits/modules.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2022 Yifeng Yu
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import torch
class Projection(torch.nn.Module):
def __init__(self, hidden_channels, out_channels):
super().__init__()
self.hidden_channels = hidden_channels
self.out_channels = out_channels
self.proj = torch.nn.Conv1d(hidden_channels, out_channels * 2, 1)
def forward(self, x, x_mask):
# x shape: (B, attention_dim, T_text)
stats = self.proj(x) * x_mask
m_p, logs_p = torch.split(stats, self.out_channels, dim=1)
return m_p, logs_p
def sequence_mask(length, max_length=None):
if max_length is None:
max_length = length.max()
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
return x.unsqueeze(0) < length.unsqueeze(1)
| 873 | 29.137931 | 74 | py |
espnet | espnet-master/espnet2/gan_svs/vits/pitch_predictor.py | # Copyright 2022 Yifeng Yu
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import torch
from espnet.nets.pytorch_backend.conformer.encoder import Encoder
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask
class Decoder(torch.nn.Module):
"""Pitch or Mel decoder module in VISinger 2."""
def __init__(
self,
out_channels: int = 192,
attention_dim: int = 192,
attention_heads: int = 2,
linear_units: int = 768,
blocks: int = 6,
pw_layer_type: str = "conv1d",
pw_conv_kernel_size: int = 3,
pos_enc_layer_type: str = "rel_pos",
self_attention_layer_type: str = "rel_selfattn",
activation_type: str = "swish",
normalize_before: bool = True,
use_macaron_style: bool = False,
use_conformer_conv: bool = False,
conformer_kernel_size: int = 7,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.0,
attention_dropout_rate: float = 0.0,
global_channels: int = -1,
):
"""
Args:
out_channels (int): The output dimension of the module.
attention_dim (int): The dimension of the attention mechanism.
attention_heads (int): The number of attention heads.
linear_units (int): The number of units in the linear layer.
blocks (int): The number of encoder blocks.
pw_layer_type (str): The type of position-wise layer to use.
pw_conv_kernel_size (int): The kernel size of the position-wise
convolutional layer.
pos_enc_layer_type (str): The type of positional encoding layer to use.
self_attention_layer_type (str): The type of self-attention layer to use.
activation_type (str): The type of activation function to use.
normalize_before (bool): Whether to normalize the data before the
position-wise layer or after.
use_macaron_style (bool): Whether to use the macaron style or not.
use_conformer_conv (bool): Whether to use Conformer style conv or not.
conformer_kernel_size (int): The kernel size of the conformer
convolutional layer.
dropout_rate (float): The dropout rate to use.
positional_dropout_rate (float): The positional dropout rate to use.
attention_dropout_rate (float): The attention dropout rate to use.
global_channels (int): The number of channels to use for global
conditioning.
"""
super().__init__()
self.prenet = torch.nn.Conv1d(attention_dim + 2, attention_dim, 3, padding=1)
self.decoder = Encoder(
idim=-1,
input_layer=None,
attention_dim=attention_dim,
attention_heads=attention_heads,
linear_units=linear_units,
num_blocks=blocks,
dropout_rate=dropout_rate,
positional_dropout_rate=positional_dropout_rate,
attention_dropout_rate=attention_dropout_rate,
normalize_before=normalize_before,
positionwise_layer_type=pw_layer_type,
positionwise_conv_kernel_size=pw_conv_kernel_size,
macaron_style=use_macaron_style,
pos_enc_layer_type=pos_enc_layer_type,
selfattention_layer_type=self_attention_layer_type,
activation_type=activation_type,
use_cnn_module=use_conformer_conv,
cnn_module_kernel=conformer_kernel_size,
)
self.proj = torch.nn.Conv1d(attention_dim, out_channels, 1)
if global_channels > 0:
self.global_conv = torch.nn.Conv1d(global_channels, attention_dim, 1)
def forward(self, x, x_lengths, g=None):
"""
Forward pass of the Decoder.
Args:
x (Tensor): Input tensor (B, 2 + attention_dim, T).
x_lengths (Tensor): Length tensor (B,).
g (Tensor, optional): Global conditioning tensor (B, global_channels, 1).
Returns:
Tensor: Output tensor (B, 1, T).
Tensor: Output mask (B, 1, T).
"""
x_mask = (
make_non_pad_mask(x_lengths)
.to(
device=x.device,
dtype=x.dtype,
)
.unsqueeze(1)
)
x = self.prenet(x) * x_mask
if g is not None:
x = x + self.global_conv(g)
x = x.transpose(1, 2)
x, _ = self.decoder(x, x_mask)
x = x.transpose(1, 2)
x = self.proj(x) * x_mask
return x, x_mask
| 4,739 | 38.173554 | 85 | py |
espnet | espnet-master/espnet2/gan_svs/vits/duration_predictor.py | # Copyright 2021 Tomoki Hayashi
# Copyright 2022 Yifeng Yu
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Duration predictor modules in VISinger.
"""
import torch
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
class DurationPredictor(torch.nn.Module):
def __init__(
self,
channels,
filter_channels,
kernel_size,
dropout_rate,
global_channels=0,
):
"""Initialize duration predictor module.
Args:
channels (int): Number of input channels.
filter_channels (int): Number of filter channels.
kernel_size (int): Size of the convolutional kernel.
dropout_rate (float): Dropout rate.
global_channels (int, optional): Number of global conditioning channels.
"""
super().__init__()
self.in_channels = channels
self.filter_channels = filter_channels
self.kernel_size = kernel_size
self.dropout_rate = dropout_rate
self.drop = torch.nn.Dropout(dropout_rate)
self.conv_1 = torch.nn.Conv1d(
channels, filter_channels, kernel_size, padding=kernel_size // 2
)
self.norm_1 = LayerNorm(filter_channels, dim=1)
self.conv_2 = torch.nn.Conv1d(
filter_channels, filter_channels, kernel_size, padding=kernel_size // 2
)
self.norm_2 = LayerNorm(filter_channels, dim=1)
self.conv_3 = torch.nn.Conv1d(
filter_channels, filter_channels, kernel_size, padding=kernel_size // 2
)
self.norm_3 = LayerNorm(filter_channels, dim=1)
self.proj = torch.nn.Conv1d(filter_channels, 2, 1)
if global_channels > 0:
self.conv = torch.nn.Conv1d(global_channels, channels, 1)
def forward(self, x, x_mask, g=None):
"""Forward pass through the duration predictor module.
Args:
x (Tensor): Input tensor (B, in_channels, T).
x_mask (Tensor): Mask tensor (B, 1, T).
g (Tensor, optional): Global condition tensor (B, global_channels, 1).
Returns:
Tensor: Predicted duration tensor (B, 2, T).
"""
# multi-singer
if g is not None:
g = torch.detach(g)
x = x + self.conv(g)
x = self.conv_1(x * x_mask)
x = torch.relu(x)
x = self.norm_1(x)
x = self.drop(x)
x = self.conv_2(x * x_mask)
x = torch.relu(x)
x = self.norm_2(x)
x = self.drop(x)
x = self.conv_3(x * x_mask)
x = torch.relu(x)
x = self.norm_3(x)
x = self.drop(x)
x = self.proj(x * x_mask)
return x * x_mask
| 2,729 | 29 | 84 | py |
espnet | espnet-master/espnet2/gan_svs/vits/vits.py | # Copyright 2021 Tomoki Hayashi
# Copyright 2022 Yifeng Yu
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""VITS/VISinger module for GAN-SVS task."""
from contextlib import contextmanager
from distutils.version import LooseVersion
from typing import Any, Dict, Optional
import torch
from torch.nn import functional as F
from typeguard import check_argument_types
from espnet2.gan_svs.abs_gan_svs import AbsGANSVS
from espnet2.gan_svs.avocodo.avocodo import (
SBD,
AvocodoDiscriminator,
AvocodoDiscriminatorPlus,
CoMBD,
)
from espnet2.gan_svs.visinger2.visinger2_vocoder import VISinger2Discriminator
from espnet2.gan_svs.vits.generator import VISingerGenerator
from espnet2.gan_tts.hifigan import (
HiFiGANMultiPeriodDiscriminator,
HiFiGANMultiScaleDiscriminator,
HiFiGANMultiScaleMultiPeriodDiscriminator,
HiFiGANPeriodDiscriminator,
HiFiGANScaleDiscriminator,
)
from espnet2.gan_tts.hifigan.loss import (
DiscriminatorAdversarialLoss,
FeatureMatchLoss,
GeneratorAdversarialLoss,
MelSpectrogramLoss,
)
from espnet2.gan_tts.utils import get_segments
from espnet2.gan_tts.vits.loss import KLDivergenceLoss, KLDivergenceLossWithoutFlow
from espnet2.torch_utils.device_funcs import force_gatherable
AVAILABLE_GENERATERS = {
"visinger": VISingerGenerator,
# TODO(yifeng): add more generators
"visinger2": VISingerGenerator,
# "pisinger": PISingerGenerator,
}
AVAILABLE_DISCRIMINATORS = {
"hifigan_period_discriminator": HiFiGANPeriodDiscriminator,
"hifigan_scale_discriminator": HiFiGANScaleDiscriminator,
"hifigan_multi_period_discriminator": HiFiGANMultiPeriodDiscriminator,
"hifigan_multi_scale_discriminator": HiFiGANMultiScaleDiscriminator,
"hifigan_multi_scale_multi_period_discriminator": HiFiGANMultiScaleMultiPeriodDiscriminator, # NOQA
"combd": CoMBD,
"sbd": SBD,
"avocodo": AvocodoDiscriminator,
"visinger2": VISinger2Discriminator,
"avocodo_plus": AvocodoDiscriminatorPlus,
}
if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"):
from torch.cuda.amp import autocast
else:
# Nothing to do if torch<1.6.0
@contextmanager
def autocast(enabled=True): # NOQA
yield
class VITS(AbsGANSVS):
"""VITS module (generator + discriminator).
This is a module of VITS described in `Conditional Variational Autoencoder
with Adversarial Learning for End-to-End Text-to-Speech`_.
.. _`Conditional Variational Autoencoder with Adversarial Learning for End-to-End
Text-to-Speech`: https://arxiv.org/abs/2006.04558
"""
def __init__(
self,
# generator related
idim: int,
odim: int,
sampling_rate: int = 22050,
generator_type: str = "visinger",
vocoder_generator_type: str = "hifigan",
generator_params: Dict[str, Any] = {
"hidden_channels": 192,
"spks": None,
"langs": None,
"spk_embed_dim": None,
"global_channels": -1,
"segment_size": 32,
"text_encoder_attention_heads": 2,
"text_encoder_ffn_expand": 4,
"text_encoder_blocks": 6,
"text_encoder_positionwise_layer_type": "conv1d",
"text_encoder_positionwise_conv_kernel_size": 1,
"text_encoder_positional_encoding_layer_type": "rel_pos",
"text_encoder_self_attention_layer_type": "rel_selfattn",
"text_encoder_activation_type": "swish",
"text_encoder_normalize_before": True,
"text_encoder_dropout_rate": 0.1,
"text_encoder_positional_dropout_rate": 0.0,
"text_encoder_attention_dropout_rate": 0.0,
"text_encoder_conformer_kernel_size": 7,
"use_macaron_style_in_text_encoder": True,
"use_conformer_conv_in_text_encoder": True,
"decoder_kernel_size": 7,
"decoder_channels": 512,
"decoder_upsample_scales": [8, 8, 2, 2],
"decoder_upsample_kernel_sizes": [16, 16, 4, 4],
"decoder_resblock_kernel_sizes": [3, 7, 11],
"decoder_resblock_dilations": [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
"projection_filters": [0, 1, 1, 1],
"projection_kernels": [0, 5, 7, 11],
"use_weight_norm_in_decoder": True,
"posterior_encoder_kernel_size": 5,
"posterior_encoder_layers": 16,
"posterior_encoder_stacks": 1,
"posterior_encoder_base_dilation": 1,
"posterior_encoder_dropout_rate": 0.0,
"use_weight_norm_in_posterior_encoder": True,
"flow_flows": 4,
"flow_kernel_size": 5,
"flow_base_dilation": 1,
"flow_layers": 4,
"flow_dropout_rate": 0.0,
"use_weight_norm_in_flow": True,
"use_only_mean_in_flow": True,
"expand_f0_method": "repeat",
"use_phoneme_predictor": False,
},
# discriminator related
discriminator_type: str = "hifigan_multi_scale_multi_period_discriminator",
discriminator_params: Dict[str, Any] = {
"hifigan_multi_scale_multi_period_discriminator": {
"scales": 1,
"scale_downsample_pooling": "AvgPool1d",
"scale_downsample_pooling_params": {
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
"scale_discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 128,
"max_downsample_channels": 1024,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 2, 4, 4, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
"follow_official_norm": False,
"periods": [2, 3, 5, 7, 11],
"period_discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 32,
"downsample_scales": [3, 3, 3, 3, 1],
"max_downsample_channels": 1024,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
},
"avocodo": {
"combd": {
"combd_h_u": [
[16, 64, 256, 1024, 1024, 1024],
[16, 64, 256, 1024, 1024, 1024],
[16, 64, 256, 1024, 1024, 1024],
],
"combd_d_k": [
[7, 11, 11, 11, 11, 5],
[11, 21, 21, 21, 21, 5],
[15, 41, 41, 41, 41, 5],
],
"combd_d_s": [
[1, 1, 4, 4, 4, 1],
[1, 1, 4, 4, 4, 1],
[1, 1, 4, 4, 4, 1],
],
"combd_d_d": [
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
],
"combd_d_g": [
[1, 4, 16, 64, 256, 1],
[1, 4, 16, 64, 256, 1],
[1, 4, 16, 64, 256, 1],
],
"combd_d_p": [
[3, 5, 5, 5, 5, 2],
[5, 10, 10, 10, 10, 2],
[7, 20, 20, 20, 20, 2],
],
"combd_op_f": [1, 1, 1],
"combd_op_k": [3, 3, 3],
"combd_op_g": [1, 1, 1],
},
"sbd": {
"use_sbd": True,
"sbd_filters": [
[64, 128, 256, 256, 256],
[64, 128, 256, 256, 256],
[64, 128, 256, 256, 256],
[32, 64, 128, 128, 128],
],
"sbd_strides": [
[1, 1, 3, 3, 1],
[1, 1, 3, 3, 1],
[1, 1, 3, 3, 1],
[1, 1, 3, 3, 1],
],
"sbd_kernel_sizes": [
[[7, 7, 7], [7, 7, 7], [7, 7, 7], [7, 7, 7], [7, 7, 7]],
[[5, 5, 5], [5, 5, 5], [5, 5, 5], [5, 5, 5], [5, 5, 5]],
[[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]],
[[5, 5, 5], [5, 5, 5], [5, 5, 5], [5, 5, 5], [5, 5, 5]],
],
"sbd_dilations": [
[[5, 7, 11], [5, 7, 11], [5, 7, 11], [5, 7, 11], [5, 7, 11]],
[[3, 5, 7], [3, 5, 7], [3, 5, 7], [3, 5, 7], [3, 5, 7]],
[[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]],
[[1, 2, 3], [1, 2, 3], [1, 2, 3], [2, 3, 5], [2, 3, 5]],
],
"sbd_band_ranges": [[0, 6], [0, 11], [0, 16], [0, 64]],
"sbd_transpose": [False, False, False, True],
"pqmf_config": {
"sbd": [16, 256, 0.03, 10.0],
"fsbd": [64, 256, 0.1, 9.0],
},
},
"pqmf_config": {
"lv1": [2, 256, 0.25, 10.0],
"lv2": [4, 192, 0.13, 10.0],
},
},
},
# loss related
generator_adv_loss_params: Dict[str, Any] = {
"average_by_discriminators": False,
"loss_type": "mse",
},
discriminator_adv_loss_params: Dict[str, Any] = {
"average_by_discriminators": False,
"loss_type": "mse",
},
feat_match_loss_params: Dict[str, Any] = {
"average_by_discriminators": False,
"average_by_layers": False,
"include_final_outputs": True,
},
mel_loss_params: Dict[str, Any] = {
"fs": 22050,
"n_fft": 1024,
"hop_length": 256,
"win_length": None,
"window": "hann",
"n_mels": 80,
"fmin": 0,
"fmax": None,
"log_base": None,
},
lambda_adv: float = 1.0,
lambda_mel: float = 45.0,
lambda_feat_match: float = 2.0,
lambda_dur: float = 0.1,
lambda_kl: float = 1.0,
lambda_pitch: float = 10.0,
lambda_phoneme: float = 1.0,
lambda_c_yin: float = 45.0,
cache_generator_outputs: bool = True,
):
"""Initialize VITS module.
Args:
idim (int): Input vocabrary size.
odim (int): Acoustic feature dimension. The actual output channels will
be 1 since VITS is the end-to-end text-to-wave model but for the
compatibility odim is used to indicate the acoustic feature dimension.
sampling_rate (int): Sampling rate, not used for the training but it will
be referred in saving waveform during the inference.
generator_type (str): Generator type.
vocoder_generator_type (str): Type of vocoder generator to use in the model.
generator_params (Dict[str, Any]): Parameter dict for generator.
discriminator_type (str): Discriminator type.
discriminator_params (Dict[str, Any]): Parameter dict for discriminator.
generator_adv_loss_params (Dict[str, Any]): Parameter dict for generator
adversarial loss.
discriminator_adv_loss_params (Dict[str, Any]): Parameter dict for
discriminator adversarial loss.
feat_match_loss_params (Dict[str, Any]): Parameter dict for feat match loss.
mel_loss_params (Dict[str, Any]): Parameter dict for mel loss.
lambda_adv (float): Loss scaling coefficient for adversarial loss.
lambda_mel (float): Loss scaling coefficient for mel spectrogram loss.
lambda_feat_match (float): Loss scaling coefficient for feat match loss.
lambda_dur (float): Loss scaling coefficient for duration loss.
lambda_kl (float): Loss scaling coefficient for KL divergence loss.
lambda_pitch (float): Loss scaling coefficient for pitch loss.
lambda_phoneme (float): Loss scaling coefficient for phoneme loss.
lambda_c_yin (float): Loss scaling coefficient for yin loss.
cache_generator_outputs (bool): Whether to cache generator outputs.
"""
assert check_argument_types()
super().__init__()
# define modules
generator_class = AVAILABLE_GENERATERS[generator_type]
if "visinger" in generator_type or "pisinger" in generator_type:
# NOTE(kan-bayashi): Update parameters for the compatibility.
# The idim and odim is automatically decided from input data,
# where idim represents #vocabularies and odim represents
# the input acoustic feature dimension.
generator_params.update(vocabs=idim, aux_channels=odim)
self.generator_type = generator_type
self.use_flow = True if generator_params["flow_flows"] > 0 else False
self.use_phoneme_predictor = generator_params["use_phoneme_predictor"]
self.discriminator_type = discriminator_type
if "avocodo" in discriminator_type:
use_avocodo = True
vocoder_generator_type = "avocodo"
else:
use_avocodo = False
self.use_avocodo = use_avocodo
self.vocoder_generator_type = vocoder_generator_type
generator_params.update(generator_type=generator_type)
generator_params.update(vocoder_generator_type=vocoder_generator_type)
generator_params.update(fs=mel_loss_params["fs"])
generator_params.update(hop_length=mel_loss_params["hop_length"])
generator_params.update(win_length=mel_loss_params["win_length"])
generator_params.update(n_fft=mel_loss_params["n_fft"])
if vocoder_generator_type == "uhifigan" and use_avocodo:
generator_params.update(use_avocodo=use_avocodo)
self.generator = generator_class(
**generator_params,
)
discriminator_class = AVAILABLE_DISCRIMINATORS[self.discriminator_type]
if use_avocodo:
discriminator_params.update(
projection_filters=generator_params["projection_filters"]
)
discriminator_params["sbd"].update(
segment_size=generator_params["segment_size"]
* mel_loss_params["hop_length"]
)
if "visinger2" in discriminator_type:
discriminator_params["multi_freq_disc_params"].update(
sample_rate=sampling_rate
)
self.discriminator = discriminator_class(
**discriminator_params,
)
self.generator_adv_loss = GeneratorAdversarialLoss(
**generator_adv_loss_params,
)
self.discriminator_adv_loss = DiscriminatorAdversarialLoss(
**discriminator_adv_loss_params,
)
self.feat_match_loss = FeatureMatchLoss(
**feat_match_loss_params,
)
self.mel_loss = MelSpectrogramLoss(
**mel_loss_params,
)
if self.use_flow:
self.kl_loss = KLDivergenceLoss()
else:
self.kl_loss = KLDivergenceLossWithoutFlow()
self.ctc_loss = torch.nn.CTCLoss(idim - 1, reduction="mean")
self.mse_loss = torch.nn.MSELoss()
# coefficients
self.lambda_adv = lambda_adv
self.lambda_mel = lambda_mel
self.lambda_kl = lambda_kl
self.lambda_feat_match = lambda_feat_match
self.lambda_dur = lambda_dur
self.lambda_pitch = lambda_pitch
self.lambda_phoneme = lambda_phoneme
self.lambda_c_yin = lambda_c_yin
# cache
self.cache_generator_outputs = cache_generator_outputs
self._cache = None
# store sampling rate for saving wav file
# (not used for the training)
self.fs = sampling_rate
# store parameters for test compatibility
self.spks = self.generator.spks
self.langs = self.generator.langs
self.spk_embed_dim = self.generator.spk_embed_dim
@property
def require_raw_singing(self):
"""Return whether or not singing is required."""
return True
@property
def require_vocoder(self):
"""Return whether or not vocoder is required."""
return False
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: torch.Tensor,
feats_lengths: torch.Tensor,
singing: torch.Tensor,
singing_lengths: torch.Tensor,
label: Optional[Dict[str, torch.Tensor]] = None,
label_lengths: Optional[Dict[str, torch.Tensor]] = None,
melody: Optional[Dict[str, torch.Tensor]] = None,
pitch: torch.LongTensor = None,
ying: torch.Tensor = None,
duration: Optional[Dict[str, torch.Tensor]] = None,
slur: torch.LongTensor = None,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
forward_generator: bool = True,
) -> Dict[str, Any]:
"""Perform generator forward.
Args:
text (LongTensor): Batch of padded character ids (B, T_text).
text_lengths (LongTensor): Batch of lengths of each input batch (B,).
feats (Tensor): Batch of padded target features (B, Lmax, odim).
feats_lengths (LongTensor): Batch of the lengths of each target (B,).
singing (Tensor): Singing waveform tensor (B, T_wav).
singing_lengths (Tensor): Singing length tensor (B,).
label (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of padded label ids (B, T_text).
label_lengths (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of the lengths of padded label ids (B, ).
melody (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of padded melody (B, T_text).
pitch (FloatTensor): Batch of padded f0 (B, T_feats).
duration (Optional[Dict]): key is "lab", "score_phn" or "score_syb";
value (LongTensor): Batch of padded duration (B, T_text).
slur (FloatTensor): Batch of padded slur (B, T_text).
spembs (Optional[Tensor]): Batch of speaker embeddings (B, spk_embed_dim).
sids (Optional[Tensor]): Batch of speaker IDs (B, 1).
lids (Optional[Tensor]): Batch of language IDs (B, 1).
forward_generator (bool): Whether to forward generator.
Returns:
Dict[str, Any]:
- loss (Tensor): Loss scalar tensor.
- stats (Dict[str, float]): Statistics to be monitored.
- weight (Tensor): Weight tensor to summarize losses.
- optim_idx (int): Optimizer index (0 for G and 1 for D).
"""
score_dur = duration["score_syb"]
gt_dur = duration["lab"]
label = label["lab"]
label_lengths = label_lengths["lab"]
melody = melody["lab"]
if forward_generator:
return self._forward_generator(
text=text,
text_lengths=text_lengths,
feats=feats,
feats_lengths=feats_lengths,
singing=singing,
singing_lengths=singing_lengths,
label=label,
label_lengths=label_lengths,
melody=melody,
gt_dur=gt_dur,
score_dur=score_dur,
slur=slur,
pitch=pitch,
ying=ying,
sids=sids,
spembs=spembs,
lids=lids,
)
else:
return self._forward_discrminator(
text=text,
text_lengths=text_lengths,
feats=feats,
feats_lengths=feats_lengths,
singing=singing,
singing_lengths=singing_lengths,
label=label,
label_lengths=label_lengths,
melody=melody,
gt_dur=gt_dur,
score_dur=score_dur,
slur=slur,
pitch=pitch,
ying=ying,
sids=sids,
spembs=spembs,
lids=lids,
)
def _forward_generator(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: torch.Tensor,
feats_lengths: torch.Tensor,
singing: torch.Tensor,
singing_lengths: torch.Tensor,
label: torch.Tensor = None,
label_lengths: torch.Tensor = None,
melody: torch.Tensor = None,
gt_dur: torch.Tensor = None,
score_dur: torch.Tensor = None,
slur: torch.Tensor = None,
pitch: torch.Tensor = None,
ying: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
) -> Dict[str, Any]:
"""Perform generator forward.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
feats (Tensor): Feature tensor (B, T_feats, aux_channels).
feats_lengths (Tensor): Feature length tensor (B,).
singing (Tensor): Singing waveform tensor (B, T_wav).
singing_lengths (Tensor): Singing length tensor (B,).
label (Tensor): Label index tensor (B, T_text).
label_lengths (Tensor): Label length tensor (B,).
melody (Tensor): Melody index tensor (B, T_text).
gt_dur (Tensor): Groundtruth duration tensor (B, T_text).
score_dur (Tensor): Score duration tensor (B, T_text).
slur (Tensor): Slur index tensor (B, T_text).
pitch (FloatTensor): Batch of padded f0 (B, T_feats).
ying (Optional[Tensor]): Yin pitch tensor (B, T_feats).
sids (Optional[Tensor]): Speaker index tensor (B,) or (B, 1).
spembs (Optional[Tensor]): Speaker embedding tensor (B, spk_embed_dim).
lids (Optional[Tensor]): Language index tensor (B,) or (B, 1).
Returns:
Dict[str, Any]:
* loss (Tensor): Loss scalar tensor.
* stats (Dict[str, float]): Statistics to be monitored.
* weight (Tensor): Weight tensor to summarize losses.
* optim_idx (int): Optimizer index (0 for G and 1 for D).
"""
# setup
batch_size = text.size(0)
feats = feats.transpose(1, 2)
singing = singing.unsqueeze(1)
# calculate generator outputs
reuse_cache = True
if not self.cache_generator_outputs or self._cache is None:
reuse_cache = False
outs = self.generator(
text=text,
text_lengths=text_lengths,
feats=feats,
feats_lengths=feats_lengths,
label=label,
label_lengths=label_lengths,
melody=melody,
gt_dur=gt_dur,
score_dur=score_dur,
slur=slur,
pitch=pitch,
ying=ying,
sids=sids,
spembs=spembs,
lids=lids,
)
else:
outs = self._cache
# store cache
if self.training and self.cache_generator_outputs and not reuse_cache:
self._cache = outs
# parse outputs
if "visinger" in self.generator_type:
singing_hat_, start_idxs, _, z_mask, outs_, *extra_outs = outs
if (
self.vocoder_generator_type == "visinger2"
and self.generator_type == "visinger2"
):
singing_hat_ddsp_, predict_mel = extra_outs
elif self.vocoder_generator_type == "visinger2":
singing_hat_ddsp_ = extra_outs[0]
elif self.generator_type == "visinger2":
predict_mel = extra_outs[0]
elif "pisinger" in self.generator_type:
if self.vocoder_generator_type == "visinger2":
(
singing_hat_,
start_idxs,
_,
z_mask,
outs_,
singing_hat_ddsp_,
outs2_,
) = outs
else:
singing_hat_, start_idxs, _, z_mask, outs_, outs2_ = outs
(
yin_gt_crop,
yin_gt_shifted_crop,
yin_dec_crop,
z_yin_crop_shifted,
scope_shift,
) = outs2_
(
_,
z_p,
m_p,
logs_p,
m_q,
logs_q,
pred_pitch,
gt_pitch,
pred_dur,
gt_dur,
log_probs,
) = outs_
singing_ = get_segments(
x=singing,
start_idxs=start_idxs * self.generator.upsample_factor,
segment_size=self.generator.segment_size * self.generator.upsample_factor,
)
# calculate discriminator outputs
if "avocodo" in self.discriminator_type:
p, p_hat, fmaps_real, fmaps_fake = self.discriminator(
singing_, singing_hat_
)
else:
p_hat = self.discriminator(singing_hat_)
with torch.no_grad():
# do not store discriminator gradient in generator turn
p = self.discriminator(singing_)
# calculate losses
with autocast(enabled=False):
if "pisinger" in self.generator_type:
yin_dec_loss = (
F.l1_loss(yin_gt_shifted_crop, yin_dec_crop) * self.lambda_c_yin
)
# TODO(yifeng): add yin shift loss later
# loss_yin_shift = (
# F.l1_loss(torch.exp(-yin_gt_crop), torch.exp(-yin_hat_crop))
# * self.lambda_c_yin
# + F.l1_loss(
# torch.exp(-yin_hat_shifted),
# torch.exp(-(torch.chunk(yin_hat_crop, 2, dim=0)[1])),
# )
# * self.lambda_c_yin
# )
if self.use_avocodo:
mel_loss = self.mel_loss(singing_hat_[-1], singing_)
elif self.vocoder_generator_type == "visinger2":
mel_loss = self.mel_loss(singing_hat_, singing_)
ddsp_mel_loss = self.mel_loss(singing_hat_ddsp_, singing_)
else:
mel_loss = self.mel_loss(singing_hat_, singing_)
if self.use_flow:
kl_loss = self.kl_loss(z_p, logs_q, m_p, logs_p, z_mask)
else:
kl_loss = self.kl_loss(m_q, logs_q, m_p, logs_p)
if "avocodo" in self.discriminator_type:
adv_loss = self.generator_adv_loss(p_hat)
feat_match_loss = self.feat_match_loss(fmaps_fake, fmaps_real)
else:
adv_loss = self.generator_adv_loss(p_hat)
feat_match_loss = self.feat_match_loss(p_hat, p)
pitch_loss = self.mse_loss(pred_pitch, gt_pitch)
phoneme_dur_loss = self.mse_loss(
pred_dur[:, 0, :].squeeze(1), gt_dur.float()
)
score_dur_loss = self.mse_loss(pred_dur[:, 1, :].squeeze(1), gt_dur.float())
if self.use_phoneme_predictor:
ctc_loss = self.ctc_loss(log_probs, label, feats_lengths, label_lengths)
mel_loss = mel_loss * self.lambda_mel
kl_loss = kl_loss * self.lambda_kl
adv_loss = adv_loss * self.lambda_adv
feat_match_loss = feat_match_loss * self.lambda_feat_match
pitch_loss = pitch_loss * self.lambda_pitch
phoneme_dur_loss = phoneme_dur_loss * self.lambda_dur
score_dur_loss = score_dur_loss * self.lambda_dur
if self.use_phoneme_predictor:
ctc_loss = ctc_loss * self.lambda_phoneme
loss = mel_loss + kl_loss + adv_loss + feat_match_loss
if self.vocoder_generator_type == "visinger2":
ddsp_mel_loss = ddsp_mel_loss * self.lambda_mel
loss = loss + ddsp_mel_loss
if self.generator_type == "visinger2":
loss_mel_am = self.mse_loss(feats * z_mask, predict_mel * z_mask)
loss = loss + loss_mel_am
loss = loss + pitch_loss
loss = loss + phoneme_dur_loss
loss = loss + score_dur_loss
if self.use_phoneme_predictor:
loss = loss + ctc_loss
if "pisinger" in self.generator_type:
loss = loss + yin_dec_loss
stats = dict(
generator_loss=loss.item(),
generator_mel_loss=mel_loss.item(),
generator_phn_dur_loss=phoneme_dur_loss.item(),
generator_score_dur_loss=score_dur_loss.item(),
generator_adv_loss=adv_loss.item(),
generator_feat_match_loss=feat_match_loss.item(),
generator_pitch_loss=pitch_loss.item(),
generator_kl_loss=kl_loss.item(),
)
if self.use_phoneme_predictor:
stats.update(
dict(
generator_phoneme_loss=ctc_loss.item(),
)
)
if self.vocoder_generator_type == "visinger2":
stats.update(
dict(
generator_mel_ddsp_loss=ddsp_mel_loss.item(),
)
)
if self.generator_type == "visinger2":
stats.update(
dict(
generator_mel_am_loss=loss_mel_am.item(),
)
)
if "pisinger" in self.generator_type:
stats.update(
dict(
generator_yin_dec_loss=yin_dec_loss.item(),
)
)
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
# reset cache
if reuse_cache or not self.training:
self._cache = None
return {
"loss": loss,
"stats": stats,
"weight": weight,
"optim_idx": 0, # needed for trainer
}
def _forward_discrminator(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: torch.Tensor,
feats_lengths: torch.Tensor,
singing: torch.Tensor,
singing_lengths: torch.Tensor,
label: torch.Tensor = None,
label_lengths: torch.Tensor = None,
melody: torch.Tensor = None,
gt_dur: torch.Tensor = None,
score_dur: torch.Tensor = None,
slur: torch.Tensor = None,
pitch: torch.Tensor = None,
ying: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
) -> Dict[str, Any]:
"""Perform discriminator forward.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
feats (Tensor): Feature tensor (B, T_feats, aux_channels).
feats_lengths (Tensor): Feature length tensor (B,).
singing (Tensor): Singing waveform tensor (B, T_wav).
singing_lengths (Tensor): Singing length tensor (B,).
label (Tensor): Label index tensor (B, T_text).
label_lengths (Tensor): Label length tensor (B,).
melody (Tensor): Melody index tensor (B, T_text).
gt_dur (Tensor): Groundtruth duration tensor (B, T_text).
score_dur (Tensor): Score duration tensor (B, T_text).
slur (Tensor): Slur index tensor (B, T_text).
pitch (FloatTensor): Batch of padded f0 (B, T_feats).
ying (Optional[Tensor]): Yin pitch tensor (B, T_feats).
sids (Optional[Tensor]): Speaker index tensor (B,) or (B, 1).
spembs (Optional[Tensor]): Speaker embedding tensor (B, spk_embed_dim).
lids (Optional[Tensor]): Language index tensor (B,) or (B, 1).
Returns:
Dict[str, Any]:
* loss (Tensor): Loss scalar tensor.
* stats (Dict[str, float]): Statistics to be monitored.
* weight (Tensor): Weight tensor to summarize losses.
* optim_idx (int): Optimizer index (0 for G and 1 for D).
"""
# setup
batch_size = text.size(0)
feats = feats.transpose(1, 2)
singing = singing.unsqueeze(1)
# calculate generator outputs
reuse_cache = True
if not self.cache_generator_outputs or self._cache is None:
reuse_cache = False
outs = self.generator(
text=text,
text_lengths=text_lengths,
feats=feats,
feats_lengths=feats_lengths,
gt_dur=gt_dur,
label=label,
label_lengths=label_lengths,
melody=melody,
score_dur=score_dur,
slur=slur,
pitch=pitch,
ying=ying,
sids=sids,
spembs=spembs,
lids=lids,
)
else:
outs = self._cache
# store cache
if self.cache_generator_outputs and not reuse_cache:
self._cache = outs
# parse outputs
# remove dp loss
singing_hat_, start_idxs, *_ = outs
singing_ = get_segments(
x=singing,
start_idxs=start_idxs * self.generator.upsample_factor,
segment_size=self.generator.segment_size * self.generator.upsample_factor,
)
# calculate discriminator outputs
if "avocodo" in self.discriminator_type:
detached_singing_hat_ = [x.detach() for x in singing_hat_]
p, p_hat, fmaps_real, fmaps_fake = self.discriminator(
singing_, detached_singing_hat_
)
else:
p_hat = self.discriminator(singing_hat_.detach())
p = self.discriminator(singing_)
# calculate losses
with autocast(enabled=False):
real_loss, fake_loss = self.discriminator_adv_loss(p_hat, p)
loss = real_loss + fake_loss
stats = dict(
discriminator_loss=loss.item(),
discriminator_real_loss=real_loss.item(),
discriminator_fake_loss=fake_loss.item(),
)
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
# reset cache
if reuse_cache or not self.training:
self._cache = None
return {
"loss": loss,
"stats": stats,
"weight": weight,
"optim_idx": 1, # needed for trainer
}
def inference(
self,
text: torch.Tensor,
feats: Optional[torch.Tensor] = None,
label: Optional[Dict[str, torch.Tensor]] = None,
melody: Optional[Dict[str, torch.Tensor]] = None,
pitch: Optional[torch.Tensor] = None,
duration: Optional[Dict[str, torch.Tensor]] = None,
slur: Optional[Dict[str, torch.Tensor]] = None,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
noise_scale: float = 0.667,
noise_scale_dur: float = 0.8,
alpha: float = 1.0,
max_len: Optional[int] = None,
use_teacher_forcing: bool = False,
) -> Dict[str, torch.Tensor]:
"""Run inference.
Args:
text (Tensor): Input text index tensor (T_text,).
feats (Tensor): Feature tensor (T_feats, aux_channels).
label (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of padded label ids (B, T_text).
melody (Optional[Dict]): key is "lab" or "score";
value (LongTensor): Batch of padded melody (B, T_text).
pitch (FloatTensor): Batch of padded f0 (B, T_feats).
slur (LongTensor): Batch of padded slur (B, T_text).
sids (Tensor): Speaker index tensor (1,).
spembs (Optional[Tensor]): Speaker embedding tensor (spk_embed_dim,).
lids (Tensor): Language index tensor (1,).
noise_scale (float): Noise scale value for flow.
noise_scale_dur (float): Noise scale value for duration predictor.
alpha (float): Alpha parameter to control the speed of generated singing.
max_len (Optional[int]): Maximum length.
use_teacher_forcing (bool): Whether to use teacher forcing.
duration (Optional[Dict]): key is "lab", "score_phn" or "score_syb";
value (LongTensor): Batch of padded duration (B, T_text).
Returns:
Dict[str, Tensor]:
* wav (Tensor): Generated waveform tensor (T_wav,).
"""
# setup
label = label["lab"]
melody = melody["lab"]
score_dur = duration["score_syb"]
gt_dur = duration["lab"]
text = text[None]
text_lengths = torch.tensor(
[text.size(1)],
dtype=torch.long,
device=text.device,
)
label_lengths = torch.tensor(
[label.size(1)],
dtype=torch.long,
device=text.device,
)
if sids is not None:
sids = sids.view(1)
if lids is not None:
lids = lids.view(1)
# inference
if use_teacher_forcing:
assert feats is not None
assert pitch is not None
feats = feats[None].transpose(1, 2)
feats_lengths = torch.tensor(
[feats.size(2)],
dtype=torch.long,
device=feats.device,
)
wav = self.generator.inference(
text=text,
text_lengths=text_lengths,
feats=feats,
feats_lengths=feats_lengths,
label=label,
label_lengths=label_lengths,
melody=melody,
score_dur=score_dur,
slur=slur,
gt_dur=gt_dur,
pitch=pitch,
sids=sids,
spembs=spembs,
lids=lids,
noise_scale=noise_scale,
noise_scale_dur=noise_scale_dur,
alpha=alpha,
max_len=max_len,
use_teacher_forcing=use_teacher_forcing,
)
else:
wav = self.generator.inference(
text=text,
text_lengths=text_lengths,
label=label,
label_lengths=label_lengths,
melody=melody,
score_dur=score_dur,
slur=slur,
sids=sids,
spembs=spembs,
lids=lids,
noise_scale=noise_scale,
noise_scale_dur=noise_scale_dur,
alpha=alpha,
max_len=max_len,
)
return dict(wav=wav.view(-1))
| 40,571 | 38.390291 | 104 | py |
espnet | espnet-master/espnet2/gan_svs/vits/generator.py | # Copyright 2021 Tomoki Hayashi
# Copyright 2022 Yifeng Yu
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Generator module in VISinger.
This code is based on https://github.com/jaywalnut310/vits.
This is a module of VISinger described in `VISinger: Variational Inference
with Adversarial Learning for End-to-End Singing Voice Synthesis`_.
.. _`VISinger: Variational Inference with Adversarial Learning for
End-to-End Singing Voice Synthesis`: https://arxiv.org/abs/2110.08813
"""
import math
from typing import List, Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from espnet2.gan_svs.avocodo import AvocodoGenerator
from espnet2.gan_svs.uhifigan import UHiFiGANGenerator
from espnet2.gan_svs.uhifigan.sine_generator import SineGen
from espnet2.gan_svs.utils.expand_f0 import expand_f0
from espnet2.gan_svs.visinger2 import (
Generator_Harm,
Generator_Noise,
VISinger2VocoderGenerator,
)
from espnet2.gan_svs.visinger2.ddsp import upsample
from espnet2.gan_svs.vits.duration_predictor import DurationPredictor
from espnet2.gan_svs.vits.length_regulator import LengthRegulator
from espnet2.gan_svs.vits.phoneme_predictor import PhonemePredictor
from espnet2.gan_svs.vits.pitch_predictor import Decoder
from espnet2.gan_svs.vits.prior_decoder import PriorDecoder
from espnet2.gan_svs.vits.text_encoder import TextEncoder
from espnet2.gan_tts.hifigan import HiFiGANGenerator
from espnet2.gan_tts.utils import get_random_segments, get_segments
from espnet2.gan_tts.vits.posterior_encoder import PosteriorEncoder
from espnet2.gan_tts.vits.residual_coupling import ResidualAffineCouplingBlock
class VISingerGenerator(torch.nn.Module):
"""Generator module in VISinger."""
def __init__(
self,
vocabs: int,
aux_channels: int = 513,
hidden_channels: int = 192,
spks: Optional[int] = None,
langs: Optional[int] = None,
spk_embed_dim: Optional[int] = None,
global_channels: int = -1,
segment_size: int = 32,
text_encoder_attention_heads: int = 2,
text_encoder_ffn_expand: int = 4,
text_encoder_blocks: int = 6,
text_encoder_positionwise_layer_type: str = "conv1d",
text_encoder_positionwise_conv_kernel_size: int = 1,
text_encoder_positional_encoding_layer_type: str = "rel_pos",
text_encoder_self_attention_layer_type: str = "rel_selfattn",
text_encoder_activation_type: str = "swish",
text_encoder_normalize_before: bool = True,
text_encoder_dropout_rate: float = 0.1,
text_encoder_positional_dropout_rate: float = 0.0,
text_encoder_attention_dropout_rate: float = 0.0,
text_encoder_conformer_kernel_size: int = 7,
use_macaron_style_in_text_encoder: bool = True,
use_conformer_conv_in_text_encoder: bool = True,
decoder_kernel_size: int = 7,
decoder_channels: int = 512,
decoder_downsample_scales: List[int] = [2, 2, 8, 8],
decoder_downsample_kernel_sizes: List[int] = [4, 4, 16, 16],
decoder_upsample_scales: List[int] = [8, 8, 2, 2],
decoder_upsample_kernel_sizes: List[int] = [16, 16, 4, 4],
decoder_resblock_kernel_sizes: List[int] = [3, 7, 11],
decoder_resblock_dilations: List[List[int]] = [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
# avocodo
use_avocodo=False,
projection_filters: List[int] = [0, 1, 1, 1],
projection_kernels: List[int] = [0, 5, 7, 11],
# visinger 2
n_harmonic: int = 64,
use_weight_norm_in_decoder: bool = True,
posterior_encoder_kernel_size: int = 5,
posterior_encoder_layers: int = 16,
posterior_encoder_stacks: int = 1,
posterior_encoder_base_dilation: int = 1,
posterior_encoder_dropout_rate: float = 0.0,
use_weight_norm_in_posterior_encoder: bool = True,
flow_flows: int = 4,
flow_kernel_size: int = 5,
flow_base_dilation: int = 1,
flow_layers: int = 4,
flow_dropout_rate: float = 0.0,
use_weight_norm_in_flow: bool = True,
use_only_mean_in_flow: bool = True,
generator_type: str = "visinger",
vocoder_generator_type: str = "hifigan",
fs: int = 22050,
hop_length: int = 256,
win_length: int = 1024,
n_fft: int = 1024,
use_phoneme_predictor: bool = False,
expand_f0_method: str = "repeat",
):
"""Initialize VITS generator module.
Args:
vocabs (int): Input vocabulary size.
aux_channels (int): Number of acoustic feature channels.
hidden_channels (int): Number of hidden channels.
spks (Optional[int]): Number of speakers. If set to > 1, assume that the
sids will be provided as the input and use sid embedding layer.
langs (Optional[int]): Number of languages. If set to > 1, assume that the
lids will be provided as the input and use sid embedding layer.
spk_embed_dim (Optional[int]): Speaker embedding dimension. If set to > 0,
assume that spembs will be provided as the input.
global_channels (int): Number of global conditioning channels.
segment_size (int): Segment size for decoder.
text_encoder_attention_heads (int): Number of heads in conformer block
of text encoder.
text_encoder_ffn_expand (int): Expansion ratio of FFN in conformer block
of text encoder.
text_encoder_blocks (int): Number of conformer blocks in text encoder.
text_encoder_positionwise_layer_type (str): Position-wise layer type in
conformer block of text encoder.
text_encoder_positionwise_conv_kernel_size (int): Position-wise convolution
kernel size in conformer block of text encoder. Only used when the
above layer type is conv1d or conv1d-linear.
text_encoder_positional_encoding_layer_type (str): Positional encoding layer
type in conformer block of text encoder.
text_encoder_self_attention_layer_type (str): Self-attention layer type in
conformer block of text encoder.
text_encoder_activation_type (str): Activation function type in conformer
block of text encoder.
text_encoder_normalize_before (bool): Whether to apply layer norm before
self-attention in conformer block of text encoder.
text_encoder_dropout_rate (float): Dropout rate in conformer block of
text encoder.
text_encoder_positional_dropout_rate (float): Dropout rate for positional
encoding in conformer block of text encoder.
text_encoder_attention_dropout_rate (float): Dropout rate for attention in
conformer block of text encoder.
text_encoder_conformer_kernel_size (int): Conformer conv kernel size. It
will be used when only use_conformer_conv_in_text_encoder = True.
use_macaron_style_in_text_encoder (bool): Whether to use macaron style FFN
in conformer block of text encoder.
use_conformer_conv_in_text_encoder (bool): Whether to use covolution in
conformer block of text encoder.
decoder_kernel_size (int): Decoder kernel size.
decoder_channels (int): Number of decoder initial channels.
decoder_downsample_scales (List[int]): List of downsampling scales in
decoder.
decoder_downsample_kernel_sizes (List[int]): List of kernel sizes for
downsampling layers in decoder.
decoder_upsample_scales (List[int]): List of upsampling scales in decoder.
decoder_upsample_kernel_sizes (List[int]): List of kernel sizes for
upsampling layers in decoder.
decoder_resblock_kernel_sizes (List[int]): List of kernel sizes for
resblocks in decoder.
decoder_resblock_dilations (List[List[int]]): List of list of dilations for
resblocks in decoder.
use_avocodo (bool): Whether to use Avocodo model in the generator.
projection_filters (List[int]): List of projection filter sizes.
projection_kernels (List[int]): List of projection kernel sizes.
n_harmonic (int): Number of harmonic components.
use_weight_norm_in_decoder (bool): Whether to apply weight normalization in
decoder.
posterior_encoder_kernel_size (int): Posterior encoder kernel size.
posterior_encoder_layers (int): Number of layers of posterior encoder.
posterior_encoder_stacks (int): Number of stacks of posterior encoder.
posterior_encoder_base_dilation (int): Base dilation of posterior encoder.
posterior_encoder_dropout_rate (float): Dropout rate for posterior encoder.
use_weight_norm_in_posterior_encoder (bool): Whether to apply weight
normalization in posterior encoder.
flow_flows (int): Number of flows in flow.
flow_kernel_size (int): Kernel size in flow.
flow_base_dilation (int): Base dilation in flow.
flow_layers (int): Number of layers in flow.
flow_dropout_rate (float): Dropout rate in flow
use_weight_norm_in_flow (bool): Whether to apply weight normalization in
flow.
use_only_mean_in_flow (bool): Whether to use only mean in flow.
generator_type (str): Type of generator to use for the model.
vocoder_generator_type (str): Type of vocoder generator to use for the
model.
fs (int): Sample rate of the audio.
hop_length (int): Number of samples between successive frames in STFT.
win_length (int): Window size of the STFT.
n_fft (int): Length of the FFT window to be used.
use_phoneme_predictor (bool): Whether to use phoneme predictor in the model.
expand_f0_method (str): The method used to expand F0. Use "repeat" or
"interpolation".
"""
super().__init__()
self.aux_channels = aux_channels
self.hidden_channels = hidden_channels
self.generator_type = generator_type
self.segment_size = segment_size
self.sample_rate = fs
self.hop_length = hop_length
self.use_avocodo = use_avocodo
self.use_flow = True if flow_flows > 0 else False
self.use_phoneme_predictor = use_phoneme_predictor
self.text_encoder = TextEncoder(
vocabs=vocabs,
attention_dim=hidden_channels,
attention_heads=text_encoder_attention_heads,
linear_units=hidden_channels * text_encoder_ffn_expand,
blocks=text_encoder_blocks,
positionwise_layer_type=text_encoder_positionwise_layer_type,
positionwise_conv_kernel_size=text_encoder_positionwise_conv_kernel_size,
positional_encoding_layer_type=text_encoder_positional_encoding_layer_type,
self_attention_layer_type=text_encoder_self_attention_layer_type,
activation_type=text_encoder_activation_type,
normalize_before=text_encoder_normalize_before,
dropout_rate=text_encoder_dropout_rate,
positional_dropout_rate=text_encoder_positional_dropout_rate,
attention_dropout_rate=text_encoder_attention_dropout_rate,
conformer_kernel_size=text_encoder_conformer_kernel_size,
use_macaron_style=use_macaron_style_in_text_encoder,
use_conformer_conv=use_conformer_conv_in_text_encoder,
)
if vocoder_generator_type == "uhifigan":
self.decoder = UHiFiGANGenerator(
in_channels=hidden_channels,
out_channels=1,
channels=decoder_channels,
global_channels=global_channels,
kernel_size=decoder_kernel_size,
downsample_scales=decoder_downsample_scales,
downsample_kernel_sizes=decoder_downsample_kernel_sizes,
upsample_scales=decoder_upsample_scales,
upsample_kernel_sizes=decoder_upsample_kernel_sizes,
resblock_kernel_sizes=decoder_resblock_kernel_sizes,
resblock_dilations=decoder_resblock_dilations,
use_weight_norm=use_weight_norm_in_decoder,
use_avocodo=use_avocodo,
)
self.sine_generator = SineGen(
sample_rate=fs,
)
elif vocoder_generator_type == "hifigan":
self.decoder = HiFiGANGenerator(
in_channels=hidden_channels,
out_channels=1,
channels=decoder_channels,
global_channels=global_channels,
kernel_size=decoder_kernel_size,
upsample_scales=decoder_upsample_scales,
upsample_kernel_sizes=decoder_upsample_kernel_sizes,
resblock_kernel_sizes=decoder_resblock_kernel_sizes,
resblock_dilations=decoder_resblock_dilations,
use_weight_norm=use_weight_norm_in_decoder,
)
elif vocoder_generator_type == "avocodo":
self.decoder = AvocodoGenerator(
in_channels=hidden_channels,
out_channels=1,
channels=decoder_channels,
global_channels=global_channels,
kernel_size=decoder_kernel_size,
upsample_scales=decoder_upsample_scales,
upsample_kernel_sizes=decoder_upsample_kernel_sizes,
resblock_kernel_sizes=decoder_resblock_kernel_sizes,
resblock_dilations=decoder_resblock_dilations,
projection_filters=projection_filters,
projection_kernels=projection_kernels,
use_weight_norm=use_weight_norm_in_decoder,
)
elif vocoder_generator_type == "visinger2":
self.decoder = VISinger2VocoderGenerator(
in_channels=hidden_channels,
out_channels=1,
channels=decoder_channels,
global_channels=global_channels,
kernel_size=decoder_kernel_size,
upsample_scales=decoder_upsample_scales,
upsample_kernel_sizes=decoder_upsample_kernel_sizes,
resblock_kernel_sizes=decoder_resblock_kernel_sizes,
resblock_dilations=decoder_resblock_dilations,
use_weight_norm=use_weight_norm_in_decoder,
n_harmonic=n_harmonic,
)
self.dec_harm = Generator_Harm(
hidden_channels=hidden_channels,
n_harmonic=n_harmonic,
kernel_size=3,
padding=1,
dropout_rate=0.1,
sample_rate=fs,
hop_size=hop_length,
)
self.dec_noise = Generator_Noise(
win_length=win_length,
hop_length=hop_length,
n_fft=n_fft,
hidden_channels=hidden_channels,
kernel_size=3,
padding=1,
dropout_rate=0.1,
)
self.sin_prenet = torch.nn.Conv1d(1, n_harmonic + 2, 3, padding=1)
else:
raise ValueError(
f"Not supported vocoder generator type: {vocoder_generator_type}"
)
self.posterior_encoder = PosteriorEncoder(
in_channels=aux_channels,
out_channels=hidden_channels,
hidden_channels=hidden_channels,
kernel_size=posterior_encoder_kernel_size,
layers=posterior_encoder_layers,
stacks=posterior_encoder_stacks,
base_dilation=posterior_encoder_base_dilation,
global_channels=global_channels,
dropout_rate=posterior_encoder_dropout_rate,
use_weight_norm=use_weight_norm_in_posterior_encoder,
)
if self.use_flow:
self.flow = ResidualAffineCouplingBlock(
in_channels=hidden_channels,
hidden_channels=hidden_channels,
flows=flow_flows,
kernel_size=flow_kernel_size,
base_dilation=flow_base_dilation,
layers=flow_layers,
global_channels=global_channels,
dropout_rate=flow_dropout_rate,
use_weight_norm=use_weight_norm_in_flow,
use_only_mean=use_only_mean_in_flow,
)
self.f0_prenet = torch.nn.Conv1d(1, hidden_channels + 2, 3, padding=1)
if generator_type == "visinger2":
self.energy_prenet = torch.nn.Conv1d(1, hidden_channels + 2, 3, padding=1)
self.mel_prenet = torch.nn.Conv1d(
aux_channels, hidden_channels + 2, 3, padding=1
)
# TODO(kan-bayashi): Add deterministic version as an option
self.duration_predictor = DurationPredictor(
channels=hidden_channels,
filter_channels=256,
kernel_size=3,
dropout_rate=0.5,
global_channels=global_channels,
)
self.lr = LengthRegulator()
if self.use_phoneme_predictor:
self.phoneme_predictor = PhonemePredictor(
vocabs=vocabs,
hidden_channels=hidden_channels,
attention_dim=hidden_channels,
blocks=2,
)
self.f0_decoder = Decoder(
1,
attention_dim=hidden_channels,
attention_heads=text_encoder_attention_heads,
linear_units=hidden_channels * text_encoder_ffn_expand,
blocks=text_encoder_blocks,
pw_layer_type=text_encoder_positionwise_layer_type,
pw_conv_kernel_size=text_encoder_positionwise_conv_kernel_size,
pos_enc_layer_type=text_encoder_positional_encoding_layer_type,
self_attention_layer_type=text_encoder_self_attention_layer_type,
activation_type=text_encoder_activation_type,
normalize_before=text_encoder_normalize_before,
dropout_rate=text_encoder_dropout_rate,
positional_dropout_rate=text_encoder_positional_dropout_rate,
attention_dropout_rate=text_encoder_attention_dropout_rate,
conformer_kernel_size=text_encoder_conformer_kernel_size,
use_macaron_style=use_macaron_style_in_text_encoder,
use_conformer_conv=use_conformer_conv_in_text_encoder,
global_channels=global_channels,
)
if self.generator_type == "visinger2":
self.mel_decoder = Decoder(
out_channels=aux_channels,
attention_dim=hidden_channels,
attention_heads=text_encoder_attention_heads,
linear_units=hidden_channels * text_encoder_ffn_expand,
blocks=text_encoder_blocks,
pw_layer_type=text_encoder_positionwise_layer_type,
pw_conv_kernel_size=text_encoder_positionwise_conv_kernel_size,
pos_enc_layer_type=text_encoder_positional_encoding_layer_type,
self_attention_layer_type=text_encoder_self_attention_layer_type,
activation_type=text_encoder_activation_type,
normalize_before=text_encoder_normalize_before,
dropout_rate=text_encoder_dropout_rate,
positional_dropout_rate=text_encoder_positional_dropout_rate,
attention_dropout_rate=text_encoder_attention_dropout_rate,
conformer_kernel_size=text_encoder_conformer_kernel_size,
use_macaron_style=use_macaron_style_in_text_encoder,
use_conformer_conv=use_conformer_conv_in_text_encoder,
global_channels=global_channels,
)
self.prior_decoder = PriorDecoder(
out_channels=hidden_channels * 2,
attention_dim=hidden_channels,
attention_heads=text_encoder_attention_heads,
linear_units=hidden_channels * text_encoder_ffn_expand,
blocks=text_encoder_blocks,
positionwise_layer_type=text_encoder_positionwise_layer_type,
positionwise_conv_kernel_size=text_encoder_positionwise_conv_kernel_size,
positional_encoding_layer_type=text_encoder_positional_encoding_layer_type,
self_attention_layer_type=text_encoder_self_attention_layer_type,
activation_type=text_encoder_activation_type,
normalize_before=text_encoder_normalize_before,
dropout_rate=text_encoder_dropout_rate,
positional_dropout_rate=text_encoder_positional_dropout_rate,
attention_dropout_rate=text_encoder_attention_dropout_rate,
conformer_kernel_size=text_encoder_conformer_kernel_size,
use_macaron_style=use_macaron_style_in_text_encoder,
use_conformer_conv=use_conformer_conv_in_text_encoder,
global_channels=global_channels,
)
self.upsample_factor = int(np.prod(decoder_upsample_scales))
self.spks = None
if spks is not None and spks > 1:
assert global_channels > 0
self.spks = spks
self.global_emb = torch.nn.Embedding(spks, global_channels)
self.spk_embed_dim = None
if spk_embed_dim is not None and spk_embed_dim > 0:
assert global_channels > 0
self.spk_embed_dim = spk_embed_dim
self.spemb_proj = torch.nn.Linear(spk_embed_dim, global_channels)
self.langs = None
if langs is not None and langs > 1:
assert global_channels > 0
self.langs = langs
self.lang_emb = torch.nn.Embedding(langs, global_channels)
self.vocoder_generator_type = vocoder_generator_type
self.dropout = torch.nn.Dropout(0.2)
self.expand_f0_method = expand_f0_method
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: torch.Tensor,
feats_lengths: torch.Tensor,
label: torch.Tensor = None,
label_lengths: torch.Tensor = None,
melody: torch.Tensor = None,
gt_dur: torch.Tensor = None,
score_dur: torch.Tensor = None,
slur: torch.Tensor = None,
pitch: torch.Tensor = None,
ying: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
) -> Tuple[
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
Tuple[
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
],
]:
"""Calculate forward propagation.
Args:
text (LongTensor): Batch of padded character ids (B, Tmax).
text_lengths (LongTensor): Batch of lengths of each input batch (B,).
feats (Tensor): Batch of padded target features (B, Lmax, odim).
feats_lengths (LongTensor): Batch of the lengths of each target (B,).
label (LongTensor): Batch of padded label ids (B, Tmax).
label_lengths (LongTensor): Batch of the lengths of padded label ids (B, ).
melody (LongTensor): Batch of padded midi (B, Tmax).
gt_dur (LongTensor): Batch of padded ground truth duration (B, Tmax).
score_dur (LongTensor): Batch of padded score duration (B, Tmax).
pitch (FloatTensor): Batch of padded f0 (B, Tmax).
ying (Optional[Tensor]): Batch of padded ying (B, Tmax).
spembs (Optional[Tensor]): Batch of speaker embeddings (B, spk_embed_dim).
sids (Optional[Tensor]): Batch of speaker IDs (B, 1).
lids (Optional[Tensor]): Batch of language IDs (B, 1).
Returns:
Tensor: Waveform tensor (B, 1, segment_size * upsample_factor).
Tensor: Duration negative log-likelihood (NLL) tensor (B,).
Tensor: Monotonic attention weight tensor (B, 1, T_feats, T_text).
Tensor: Segments start index tensor (B,).
Tensor: Text mask tensor (B, 1, T_text).
Tensor: Feature mask tensor (B, 1, T_feats).
tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:
- Tensor: Posterior encoder hidden representation (B, H, T_feats).
- Tensor: Flow hidden representation (B, H, T_feats).
- Tensor: Expanded text encoder projected mean (B, H, T_feats).
- Tensor: Expanded text encoder projected scale (B, H, T_feats).
- Tensor: Posterior encoder projected mean (B, H, T_feats).
- Tensor: Posterior encoder projected scale (B, H, T_feats).
"""
# calculate global conditioning
g = None
if self.spks is not None:
# speaker one-hot vector embedding: (B, global_channels, 1)
g = self.global_emb(sids.view(-1)).unsqueeze(-1)
if self.spk_embed_dim is not None:
# pretreined speaker embedding, e.g., X-vector (B, global_channels, 1)
g_ = self.spemb_proj(F.normalize(spembs)).unsqueeze(-1)
if g is None:
g = g_
else:
g = g + g_
if self.langs is not None:
# language one-hot vector embedding: (B, global_channels, 1)
g_ = self.lang_emb(lids.view(-1)).unsqueeze(-1)
if g is None:
g = g_
else:
g = g + g_
# forward text encoder
# Encoder
x, x_mask, dur_input, x_pitch = self.text_encoder(
label, label_lengths, melody, score_dur, slur
)
# dur
# Note this is different, we use frame level duration not time level
# but it has no big difference on performance
predict_dur = self.duration_predictor(dur_input, x_mask, g=g)
predict_dur = (torch.exp(predict_dur) - 1) * x_mask
predict_dur = predict_dur * self.sample_rate / self.hop_length
# LR
decoder_input, mel_len = self.lr(x, gt_dur, use_state_info=True)
decoder_input_pitch, mel_len = self.lr(x_pitch, gt_dur, use_state_info=True)
LF0 = 2595.0 * torch.log10(1.0 + pitch / 700.0)
LF0 = LF0 / 500
LF0 = LF0.transpose(1, 2)
predict_lf0, predict_bn_mask = self.f0_decoder(
decoder_input + decoder_input_pitch, feats_lengths, g=g
)
predict_lf0 = torch.max(
predict_lf0, torch.zeros_like(predict_lf0).to(predict_lf0)
)
if self.generator_type == "visinger2":
predict_mel, predict_bn_mask = self.mel_decoder(
decoder_input + self.f0_prenet(LF0), feats_lengths, g=g
)
predict_energy = (
predict_mel.detach().sum(1).unsqueeze(1) / self.aux_channels
)
decoder_input = decoder_input + self.f0_prenet(LF0)
if self.generator_type == "visinger2":
decoder_input = (
decoder_input
+ self.energy_prenet(predict_energy)
+ self.mel_prenet(predict_mel.detach())
)
decoder_output, predict_bn_mask = self.prior_decoder(
decoder_input, feats_lengths, g=g
)
prior_info = decoder_output
prior_mean = prior_info[:, : self.hidden_channels, :]
prior_logstd = prior_info[:, self.hidden_channels :, :]
# forward posterior encoder
posterior_z, posterior_mean, posterior_logstd, y_mask = self.posterior_encoder(
feats, feats_lengths, g=g
)
if self.use_flow:
z_flow = self.flow(posterior_z, y_mask, g=g)
else:
z_flow = None
# phoneme predictor
if self.use_phoneme_predictor:
log_probs = self.phoneme_predictor(posterior_z, y_mask)
else:
log_probs = None
p_z = posterior_z
p_z = self.dropout(p_z)
# get random segments
z_segments, z_start_idxs = get_random_segments(
p_z, feats_lengths, self.segment_size
)
if self.vocoder_generator_type == "uhifigan":
# get sine wave
# def plot_sine_waves(sine_waves, name):
# import matplotlib.pyplot as plt
# sine_waves_np = sine_waves[0].detach().cpu().numpy()
# plt.plot(sine_waves_np)
# plt.xlabel("Time (samples)")
# plt.ylabel("Amplitude")
# plt.title("Sine Wave")
# plt.savefig(name + ".png")
# plt.close()
# plot_sine_waves(pitch_segments[0], "pitch_segments")
pitch_segments = get_segments(
pitch.transpose(1, 2), z_start_idxs, self.segment_size
)
pitch_segments_expended = expand_f0(
pitch_segments, self.hop_length, method="repeat"
)
# plot_sine_waves(
# pitch_segments_expended[0].unsqueeze(0), "pitch_segments_expended"
# )
pitch_segments_expended = pitch_segments_expended.reshape(
-1, pitch_segments_expended.shape[-1], 1
)
sine_waves, uv, noise = self.sine_generator(pitch_segments_expended)
sine_waves = sine_waves.transpose(1, 2)
wav = self.decoder(z_segments, excitation=sine_waves, g=g)
elif self.vocoder_generator_type == "visinger2":
pitch_ = upsample(pitch, self.hop_length)
omega = torch.cumsum(2 * math.pi * pitch_ / self.sample_rate, 1)
sin = torch.sin(omega).transpose(1, 2)
# dsp synthesize
pitch = pitch.transpose(1, 2)
noise_x = self.dec_noise(posterior_z, y_mask)
harm_x = self.dec_harm(pitch, posterior_z, y_mask)
# dsp waveform
dsp_o = torch.cat([harm_x, noise_x], axis=1)
# decoder_condition = torch.cat([harm_x, noise_x, sin], axis=1)
decoder_condition = self.sin_prenet(sin)
# dsp based HiFiGAN vocoder
F0_slice = get_segments(pitch, z_start_idxs, self.segment_size)
dsp_slice = get_segments(
dsp_o,
z_start_idxs * self.hop_length,
self.segment_size * self.hop_length,
)
condition_slice = get_segments(
decoder_condition,
z_start_idxs * self.hop_length,
self.segment_size * self.hop_length,
)
wav = self.decoder(z_segments, condition_slice, g=g)
else:
wav = self.decoder(z_segments, g=g)
# wav = dsp_slice.sum(1, keepdim=True)
common_tuple = (
posterior_z,
z_flow,
prior_mean,
prior_logstd,
posterior_mean,
posterior_logstd,
predict_lf0,
LF0 * predict_bn_mask,
predict_dur,
gt_dur,
log_probs,
)
output = (wav, z_start_idxs, x_mask, y_mask, common_tuple)
if self.vocoder_generator_type == "visinger2":
output = output + (dsp_slice.sum(1),)
if self.generator_type == "visinger2":
output = output + (predict_mel,)
return output
def inference(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: Optional[torch.Tensor] = None,
feats_lengths: Optional[torch.Tensor] = None,
label: torch.Tensor = None,
label_lengths: torch.Tensor = None,
melody: torch.Tensor = None,
score_dur: torch.Tensor = None,
slur: torch.Tensor = None,
gt_dur: Optional[torch.Tensor] = None,
pitch: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
noise_scale: float = 0.667,
noise_scale_dur: float = 0.8,
alpha: float = 1.0,
max_len: Optional[int] = None,
use_teacher_forcing: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Run inference.
Args:
text (LongTensor): Batch of padded character ids (B, Tmax).
text_lengths (LongTensor): Batch of lengths of each input batch (B,).
feats (Tensor): Batch of padded target features (B, Lmax, odim).
feats_lengths (LongTensor): Batch of the lengths of each target (B,).
label (LongTensor): Batch of padded label ids (B, Tmax).
label_lengths (LongTensor): Batch of the lengths of padded label ids (B, ).
melody (LongTensor): Batch of padded midi (B, Tmax).
gt_dur (LongTensor): Batch of padded ground truth duration (B, Tmax).
score_dur (LongTensor): Batch of padded score duration (B, Tmax).
pitch (FloatTensor): Batch of padded f0 (B, Tmax).
ying (Optional[Tensor]): Batch of padded ying (B, Tmax).
spembs (Optional[Tensor]): Batch of speaker embeddings (B, spk_embed_dim).
sids (Optional[Tensor]): Batch of speaker IDs (B, 1).
lids (Optional[Tensor]): Batch of language IDs (B, 1).
noise_scale (float): Noise scale parameter for flow.
noise_scale_dur (float): Noise scale parameter for duration predictor.
alpha (float): Alpha parameter to control the speed of generated speech.
max_len (Optional[int]): Maximum length of acoustic feature sequence.
use_teacher_forcing (bool): Whether to use teacher forcing.
Returns:
Tensor: Generated waveform tensor (B, T_wav).
"""
# encoder
x, x_mask, dur_input, x_pitch = self.text_encoder(
label, label_lengths, melody, score_dur, slur
)
g = None
if self.spks is not None:
# (B, global_channels, 1)
g = self.global_emb(sids.view(-1)).unsqueeze(-1)
if self.spk_embed_dim is not None:
# (B, global_channels, 1)
g_ = self.spemb_proj(F.normalize(spembs.unsqueeze(0))).unsqueeze(-1)
if g is None:
g = g_
else:
g = g + g_
if self.langs is not None:
# (B, global_channels, 1)
g_ = self.lang_emb(lids.view(-1)).unsqueeze(-1)
if g is None:
g = g_
else:
g = g + g_
if use_teacher_forcing:
# forward posterior encoder
z, m_q, logs_q, y_mask = self.posterior_encoder(feats, feats_lengths, g=g)
# forward flow
if self.use_flow:
z_p = self.flow(z, y_mask, g=g) # (B, H, T_feats)
# decoder
pitch = pitch.transpose(0, 1).reshape(1, 1, -1)
if self.vocoder_generator_type == "uhifigan":
pitch_segments_expended = expand_f0(
pitch, self.hop_length, method=self.expand_f0_method
)
pitch_segments_expended = pitch_segments_expended.reshape(
-1, pitch_segments_expended.shape[-1], 1
)
sine_waves, uv, noise = self.sine_generator(pitch_segments_expended)
sine_waves = sine_waves.transpose(1, 2)
wav = self.decoder(
(z * y_mask)[:, :, :max_len], excitation=sine_waves, g=g
)
elif self.vocoder_generator_type == "avocodo":
wav = self.decoder((z * y_mask)[:, :, :max_len], g=g)[-1]
elif self.vocoder_generator_type == "visinger2":
pitch_ = upsample(pitch.transpose(1, 2), self.hop_length)
omega = torch.cumsum(2 * math.pi * pitch_ / self.sample_rate, 1)
sin = torch.sin(omega).transpose(1, 2)
# dsp synthesize
noise_x = self.dec_noise(z, y_mask)
harm_x = self.dec_harm(pitch, z, y_mask)
# dsp waveform
dsp_o = torch.cat([harm_x, noise_x], axis=1)
# decoder_condition = torch.cat([harm_x, noise_x, sin], axis=1)
decoder_condition = self.sin_prenet(sin)
# dsp based HiFiGAN vocoder
wav = self.decoder((z * y_mask)[:, :, :max_len], decoder_condition, g=g)
# wav = dsp_o.sum(1)
# wav = noise_x
# wav = harm_x.sum(1)
else:
wav = self.decoder((z * y_mask)[:, :, :max_len], g=g)
else:
# dur
predict_dur = self.duration_predictor(dur_input, x_mask, g=g)
predict_dur = (torch.exp(predict_dur) - 1) * x_mask
predict_dur = predict_dur * self.sample_rate / self.hop_length
predict_dur = torch.max(predict_dur, torch.ones_like(predict_dur).to(x))
predict_dur = torch.ceil(predict_dur).long()
predict_dur = predict_dur[:, 0, :]
y_lengths = torch.clamp_min(torch.sum(predict_dur, [1]), 1).long()
# LR
decoder_input, mel_len = self.lr(x, predict_dur, use_state_info=True)
decoder_input_pitch, mel_len = self.lr(
x_pitch, predict_dur, use_state_info=True
)
# aam
predict_lf0, predict_bn_mask = self.f0_decoder(
decoder_input + decoder_input_pitch, y_lengths, g=g
)
if self.generator_type == "visinger2":
predict_mel, predict_bn_mask = self.mel_decoder(
decoder_input + self.f0_prenet(predict_lf0),
y_lengths,
g=g,
)
predict_energy = predict_mel.sum(1).unsqueeze(1) / self.aux_channels
predict_lf0 = torch.max(
predict_lf0, torch.zeros_like(predict_lf0).to(predict_lf0)
)
decoder_input = decoder_input + self.f0_prenet(predict_lf0)
if self.generator_type == "visinger2":
decoder_input = (
decoder_input
+ self.energy_prenet(predict_energy)
+ self.mel_prenet(predict_mel)
)
decoder_output, y_mask = self.prior_decoder(decoder_input, y_lengths, g=g)
prior_info = decoder_output
m_p = prior_info[:, : self.hidden_channels, :]
logs_p = prior_info[:, self.hidden_channels :, :]
# decoder
z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
if self.use_flow:
z = self.flow(z_p, y_mask, g=g, inverse=True)
else:
z = z_p
F0_std = 500
F0 = predict_lf0 * F0_std
F0 = F0 / 2595
F0 = torch.pow(10, F0)
F0 = (F0 - 1) * 700.0
if self.vocoder_generator_type == "uhifigan":
pitch_segments_expended = expand_f0(
F0, self.hop_length, method=self.expand_f0_method
)
pitch_segments_expended = pitch_segments_expended.reshape(
-1, pitch_segments_expended.shape[-1], 1
)
sine_waves, uv, noise = self.sine_generator(pitch_segments_expended)
sine_waves = sine_waves.transpose(1, 2)
wav = self.decoder(
(z * y_mask)[:, :, :max_len], excitation=sine_waves, g=g
)
elif self.vocoder_generator_type == "avocodo":
wav = self.decoder((z * y_mask)[:, :, :max_len], g=g)[-1]
elif self.vocoder_generator_type == "visinger2":
pitch_ = upsample(F0.transpose(1, 2), self.hop_length)
omega = torch.cumsum(2 * math.pi * pitch_ / self.sample_rate, 1)
sin = torch.sin(omega).transpose(1, 2)
# dsp synthesize
noise_x = self.dec_noise(z, y_mask)
harm_x = self.dec_harm(F0, z, y_mask)
# dsp waveform
dsp_o = torch.cat([harm_x, noise_x], axis=1)
# decoder_condition = torch.cat([harm_x, noise_x, sin], axis=1)
decoder_condition = self.sin_prenet(sin)
# dsp based HiFiGAN vocoder
wav = self.decoder((z * y_mask)[:, :, :max_len], decoder_condition, g=g)
# wav = dsp_o.sum(1)
# wav = noise_x
# wav = harm_x.sum(1)
else:
wav = self.decoder((z * y_mask)[:, :, :max_len], g=g)
return wav.squeeze(1)
| 41,328 | 43.53556 | 88 | py |
espnet | espnet-master/espnet2/gan_svs/vits/text_encoder.py | # Copyright 2021 Tomoki Hayashi
# Copyright 2022 Yifeng Yu
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Text encoder module in VISinger.
This code is based on https://github.com/jaywalnut310/vits
and https://github.com/zhangyongmao/VISinger2.
"""
import math
from typing import Optional, Tuple
import torch
from espnet.nets.pytorch_backend.conformer.encoder import Encoder
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask
class TextEncoder(torch.nn.Module):
"""Text encoder module in VISinger.
This is a module of text encoder described in `Conditional Variational Autoencoder
with Adversarial Learning for End-to-End Text-to-Speech`_.
Instead of the relative positional Transformer, we use conformer architecture as
the encoder module, which contains additional convolution layers.
.. _`Conditional Variational Autoencoder with Adversarial Learning for End-to-End
Text-to-Speech`: https://arxiv.org/abs/2006.04558
"""
def __init__(
self,
vocabs: int,
attention_dim: int = 192,
attention_heads: int = 2,
linear_units: int = 768,
blocks: int = 6,
positionwise_layer_type: str = "conv1d",
positionwise_conv_kernel_size: int = 3,
positional_encoding_layer_type: str = "rel_pos",
self_attention_layer_type: str = "rel_selfattn",
activation_type: str = "swish",
normalize_before: bool = True,
use_macaron_style: bool = False,
use_conformer_conv: bool = False,
conformer_kernel_size: int = 7,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.0,
attention_dropout_rate: float = 0.0,
use_slur=True,
):
"""Initialize TextEncoder module.
Args:
vocabs (int): Vocabulary size.
attention_dim (int): Attention dimension.
attention_heads (int): Number of attention heads.
linear_units (int): Number of linear units of positionwise layers.
blocks (int): Number of encoder blocks.
positionwise_layer_type (str): Positionwise layer type.
positionwise_conv_kernel_size (int): Positionwise layer's kernel size.
positional_encoding_layer_type (str): Positional encoding layer type.
self_attention_layer_type (str): Self-attention layer type.
activation_type (str): Activation function type.
normalize_before (bool): Whether to apply LayerNorm before attention.
use_macaron_style (bool): Whether to use macaron style components.
use_conformer_conv (bool): Whether to use conformer conv layers.
conformer_kernel_size (int): Conformer's conv kernel size.
dropout_rate (float): Dropout rate.
positional_dropout_rate (float): Dropout rate for positional encoding.
attention_dropout_rate (float): Dropout rate for attention.
use_slur (bool): Whether to use slur embedding.
"""
super().__init__()
# store for forward
self.attention_dim = attention_dim
# define modules
self.encoder = Encoder(
idim=-1,
input_layer=None,
attention_dim=attention_dim,
attention_heads=attention_heads,
linear_units=linear_units,
num_blocks=blocks,
dropout_rate=dropout_rate,
positional_dropout_rate=positional_dropout_rate,
attention_dropout_rate=attention_dropout_rate,
normalize_before=normalize_before,
positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size,
macaron_style=use_macaron_style,
pos_enc_layer_type=positional_encoding_layer_type,
selfattention_layer_type=self_attention_layer_type,
activation_type=activation_type,
use_cnn_module=use_conformer_conv,
cnn_module_kernel=conformer_kernel_size,
)
self.emb_phone_dim = 256
self.emb_phone = torch.nn.Embedding(vocabs, self.emb_phone_dim)
torch.nn.init.normal_(self.emb_phone.weight, 0.0, self.emb_phone_dim**-0.5)
self.emb_pitch_dim = 128
self.emb_pitch = torch.nn.Embedding(
129, self.emb_pitch_dim
) # Should we count the number of midis instead of 129?
torch.nn.init.normal_(self.emb_pitch.weight, 0.0, self.emb_pitch_dim**-0.5)
if use_slur:
self.emb_slur = torch.nn.Embedding(2, 64)
torch.nn.init.normal_(self.emb_slur.weight, 0.0, 64**-0.5)
if use_slur:
self.emb_dur = torch.nn.Linear(1, 64)
else:
self.emb_dur = torch.nn.Linear(1, 128)
self.pre_net = torch.nn.Linear(512, attention_dim)
self.pre_dur_net = torch.nn.Linear(512, attention_dim)
self.proj = torch.nn.Conv1d(attention_dim, attention_dim, 1)
self.proj_pitch = torch.nn.Conv1d(self.emb_pitch_dim, attention_dim, 1)
def forward(
self,
phone: torch.Tensor,
phone_lengths: torch.Tensor,
midi_id: torch.Tensor,
dur: torch.Tensor,
slur: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""Calculate forward propagation.
Args:
phone (Tensor): Input index tensor (B, T_text).
phone_lengths (Tensor): Length tensor (B,).
midi_id (Tensor): Input midi tensor (B, T_text).
dur (Tensor): Input duration tensor (B, T_text).
Returns:
Tensor: Encoded hidden representation (B, attention_dim, T_text).
Tensor: Mask tensor for padded part (B, 1, T_text).
Tensor: Encoded hidden representation for duration
(B, attention_dim, T_text).
Tensor: Encoded hidden representation for pitch
(B, attention_dim, T_text).
"""
phone_end = self.emb_phone(phone) * math.sqrt(self.emb_phone_dim)
pitch_end = self.emb_pitch(midi_id) * math.sqrt(self.emb_pitch_dim)
if slur is not None:
slur_end = self.emb_slur(slur) * math.sqrt(64)
dur = dur.float()
dur_end = self.emb_dur(dur.unsqueeze(-1))
if slur is not None:
x = torch.cat([phone_end, pitch_end, slur_end, dur_end], dim=-1)
else:
x = torch.cat([phone_end, pitch_end, dur_end], dim=-1)
dur_input = self.pre_dur_net(x)
dur_input = torch.transpose(dur_input, 1, -1)
x = self.pre_net(x)
# x = torch.transpose(x, 1, -1) # [b, h, t]
x_mask = (
make_non_pad_mask(phone_lengths)
.to(
device=x.device,
dtype=x.dtype,
)
.unsqueeze(1)
)
# encoder assume the channel last (B, T_text, attention_dim)
# but mask shape shoud be (B, 1, T_text)
x, _ = self.encoder(x, x_mask)
# convert the channel first to (B, attention_dim, T_text)
x = x.transpose(1, 2)
x = self.proj(x) * x_mask
pitch_info = self.proj_pitch(pitch_end.transpose(1, 2))
return x, x_mask, dur_input, pitch_info
| 7,323 | 36.948187 | 86 | py |
espnet | espnet-master/espnet2/gan_svs/vits/length_regulator.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# Copyright 2022 Yifeng Yu
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Length regulator related modules."""
import logging
import torch
from espnet.nets.pytorch_backend.nets_utils import pad_list
class LengthRegulator(torch.nn.Module):
"""Length Regulator"""
def __init__(self, pad_value=0.0):
"""Initilize length regulator module.
Args:
pad_value (float, optional): Value used for padding.
"""
super().__init__()
self.pad_value = pad_value
def LR(self, x, duration, use_state_info=False):
"""Length regulates input mel-spectrograms to match duration.
Args:
x (Tensor): Input tensor (B, dim, T).
duration (Tensor): Duration tensor (B, T).
use_state_info (bool, optional): Whether to use position information or not.
Returns:
Tensor: Output tensor (B, dim, D_frame).
Tensor: Output length (B,).
"""
x = torch.transpose(x, 1, 2)
output = list()
mel_len = list()
for batch, expand_target in zip(x, duration):
expanded = self.expand(batch, expand_target, use_state_info=use_state_info)
output.append(expanded)
mel_len.append(expanded.shape[0])
output = pad_list(output, self.pad_value) # (B, D_frame, dim)
output = torch.transpose(output, 1, 2)
return output, torch.LongTensor(mel_len)
def expand(self, batch, predicted, use_state_info=False):
"""Expand input mel-spectrogram based on the predicted duration.
Args:
batch (Tensor): Input tensor (T, dim).
predicted (Tensor): Predicted duration tensor (T,).
use_state_info (bool, optional): Whether to use position information or not.
Returns:
Tensor: Output tensor (D_frame, dim).
"""
out = list()
for i, vec in enumerate(batch):
expand_size = predicted[i].item()
if use_state_info:
state_info_index = torch.unsqueeze(
torch.arange(0, expand_size), 1
).float()
state_info_length = torch.unsqueeze(
torch.Tensor([expand_size] * expand_size), 1
).float()
state_info = torch.cat([state_info_index, state_info_length], 1).to(
vec.device
)
new_vec = vec.expand(max(int(expand_size), 0), -1)
if use_state_info:
new_vec = torch.cat([new_vec, state_info], 1)
out.append(new_vec)
out = torch.cat(out, 0)
return out
def forward(self, x, duration, use_state_info=False):
"""Forward pass through the length regulator module.
Args:
x (Tensor): Input tensor (B, dim, T).
duration (Tensor): Duration tensor (B, T).
use_state_info (bool, optional): Whether to use position information or not.
Returns:
Tensor: Output tensor (B, dim, D_frame).
Tensor: Output length (B,).
"""
if duration.sum() == 0:
logging.warning(
"predicted durations includes all 0 sequences. "
"fill the first element with 1."
)
duration[duration.sum(dim=1).eq(0)] = 1
output, mel_len = self.LR(x, duration, use_state_info=use_state_info)
return output, mel_len
| 3,570 | 31.463636 | 88 | py |
espnet | espnet-master/espnet2/mt/espnet_model.py | import logging
from contextlib import contextmanager
from typing import Dict, List, Optional, Tuple, Union
import torch
from packaging.version import parse as V
from typeguard import check_argument_types
from espnet2.asr.decoder.abs_decoder import AbsDecoder
from espnet2.asr.encoder.abs_encoder import AbsEncoder
from espnet2.asr.frontend.abs_frontend import AbsFrontend
from espnet2.asr.postencoder.abs_postencoder import AbsPostEncoder
from espnet2.asr.preencoder.abs_preencoder import AbsPreEncoder
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet2.train.abs_espnet_model import AbsESPnetModel
from espnet.nets.e2e_mt_common import ErrorCalculator as MTErrorCalculator
from espnet.nets.pytorch_backend.nets_utils import th_accuracy
from espnet.nets.pytorch_backend.transformer.add_sos_eos import add_sos_eos
from espnet.nets.pytorch_backend.transformer.label_smoothing_loss import ( # noqa: H301
LabelSmoothingLoss,
)
if V(torch.__version__) >= V("1.6.0"):
from torch.cuda.amp import autocast
else:
# Nothing to do if torch<1.6.0
@contextmanager
def autocast(enabled=True):
yield
class ESPnetMTModel(AbsESPnetModel):
"""Encoder-Decoder model"""
def __init__(
self,
vocab_size: int,
token_list: Union[Tuple[str, ...], List[str]],
frontend: Optional[AbsFrontend],
preencoder: Optional[AbsPreEncoder],
encoder: AbsEncoder,
postencoder: Optional[AbsPostEncoder],
decoder: AbsDecoder,
src_vocab_size: int = 0,
src_token_list: Union[Tuple[str, ...], List[str]] = [],
ignore_id: int = -1,
lsm_weight: float = 0.0,
length_normalized_loss: bool = False,
report_bleu: bool = True,
sym_space: str = "<space>",
sym_blank: str = "<blank>",
extract_feats_in_collect_stats: bool = True,
share_decoder_input_output_embed: bool = False,
share_encoder_decoder_input_embed: bool = False,
):
assert check_argument_types()
super().__init__()
# note that eos is the same as sos (equivalent ID)
self.sos = vocab_size - 1
self.eos = vocab_size - 1
self.src_sos = src_vocab_size - 1 if src_vocab_size else None
self.src_eos = src_vocab_size - 1 if src_vocab_size else None
self.vocab_size = vocab_size
self.src_vocab_size = src_vocab_size
self.ignore_id = ignore_id
self.token_list = token_list.copy()
if share_decoder_input_output_embed:
if decoder.output_layer is not None:
decoder.output_layer.weight = decoder.embed[0].weight
logging.info(
"Decoder input embedding and output linear layer are shared"
)
else:
logging.warning(
"Decoder has no output layer, so it cannot be shared "
"with input embedding"
)
if share_encoder_decoder_input_embed:
if src_vocab_size == vocab_size:
frontend.embed[0].weight = decoder.embed[0].weight
logging.info("Encoder and decoder input embeddings are shared")
else:
logging.warning(
f"src_vocab_size ({src_vocab_size}) does not equal tgt_vocab_size"
f" ({vocab_size}), so the encoder and decoder input embeddings "
"cannot be shared"
)
self.frontend = frontend
self.preencoder = preencoder
self.postencoder = postencoder
self.encoder = encoder
self.decoder = decoder
self.criterion_mt = LabelSmoothingLoss(
size=vocab_size,
padding_idx=ignore_id,
smoothing=lsm_weight,
normalize_length=length_normalized_loss,
)
# MT error calculator
if report_bleu:
self.mt_error_calculator = MTErrorCalculator(
token_list, sym_space, sym_blank, report_bleu
)
else:
self.mt_error_calculator = None
self.extract_feats_in_collect_stats = extract_feats_in_collect_stats
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
src_text: torch.Tensor,
src_text_lengths: torch.Tensor,
**kwargs,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Frontend + Encoder + Decoder + Calc loss
Args:
text: (Batch, Length)
text_lengths: (Batch,)
src_text: (Batch, length)
src_text_lengths: (Batch,)
kwargs: "utt_id" is among the input.
"""
assert text_lengths.dim() == 1, text_lengths.shape
# Check that batch_size is unified
assert (
text.shape[0]
== text_lengths.shape[0]
== src_text.shape[0]
== src_text_lengths.shape[0]
), (text.shape, text_lengths.shape, src_text.shape, src_text_lengths.shape)
batch_size = src_text.shape[0]
# for data-parallel
text = text[:, : text_lengths.max()]
src_text = src_text[:, : src_text_lengths.max()]
# 1. Encoder
encoder_out, encoder_out_lens = self.encode(src_text, src_text_lengths)
# 2a. Attention-decoder branch (MT)
loss_mt_att, acc_mt_att, bleu_mt_att = self._calc_mt_att_loss(
encoder_out, encoder_out_lens, text, text_lengths
)
# 3. Loss computation
loss = loss_mt_att
stats = dict(
loss=loss.detach(),
acc=acc_mt_att,
bleu=bleu_mt_att,
)
# force_gatherable: to-device and to-tensor if scalar for DataParallel
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
return loss, stats, weight
def collect_feats(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
src_text: torch.Tensor,
src_text_lengths: torch.Tensor,
**kwargs,
) -> Dict[str, torch.Tensor]:
if self.extract_feats_in_collect_stats:
feats, feats_lengths = self._extract_feats(src_text, src_text_lengths)
else:
# Generate dummy stats if extract_feats_in_collect_stats is False
logging.warning(
"Generating dummy stats for feats and feats_lengths, "
"because encoder_conf.extract_feats_in_collect_stats is "
f"{self.extract_feats_in_collect_stats}"
)
feats, feats_lengths = src_text, src_text_lengths
return {"feats": feats, "feats_lengths": feats_lengths}
def encode(
self, src_text: torch.Tensor, src_text_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Frontend + Encoder. Note that this method is used by mt_inference.py
Args:
src_text: (Batch, Length, ...)
src_text_lengths: (Batch, )
"""
with autocast(False):
# 1. Extract feats
feats, feats_lengths = self._extract_feats(src_text, src_text_lengths)
# 2. Data augmentation
# if self.specaug is not None and self.training:
# feats, feats_lengths = self.specaug(feats, feats_lengths)
# Pre-encoder, e.g. used for raw input data
if self.preencoder is not None:
feats, feats_lengths = self.preencoder(feats, feats_lengths)
# 4. Forward encoder
# feats: (Batch, Length, Dim)
# -> encoder_out: (Batch, Length2, Dim2)
encoder_out, encoder_out_lens, _ = self.encoder(feats, feats_lengths)
# Post-encoder, e.g. NLU
if self.postencoder is not None:
encoder_out, encoder_out_lens = self.postencoder(
encoder_out, encoder_out_lens
)
assert encoder_out.size(0) == src_text.size(0), (
encoder_out.size(),
src_text.size(0),
)
assert encoder_out.size(1) <= encoder_out_lens.max(), (
encoder_out.size(),
encoder_out_lens.max(),
)
return encoder_out, encoder_out_lens
def _extract_feats(
self, src_text: torch.Tensor, src_text_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
assert src_text_lengths.dim() == 1, src_text_lengths.shape
# for data-parallel
src_text = src_text[:, : src_text_lengths.max()]
src_text, _ = add_sos_eos(src_text, self.src_sos, self.src_eos, self.ignore_id)
src_text_lengths = src_text_lengths + 1
if self.frontend is not None:
# Frontend
# e.g. Embedding Lookup
# src_text (Batch, NSamples) -> feats: (Batch, NSamples, Dim)
feats, feats_lengths = self.frontend(src_text, src_text_lengths)
else:
# No frontend and no feature extract
feats, feats_lengths = src_text, src_text_lengths
return feats, feats_lengths
def _calc_mt_att_loss(
self,
encoder_out: torch.Tensor,
encoder_out_lens: torch.Tensor,
ys_pad: torch.Tensor,
ys_pad_lens: torch.Tensor,
):
ys_in_pad, ys_out_pad = add_sos_eos(ys_pad, self.sos, self.eos, self.ignore_id)
ys_in_lens = ys_pad_lens + 1
# 1. Forward decoder
decoder_out, _ = self.decoder(
encoder_out, encoder_out_lens, ys_in_pad, ys_in_lens
)
# 2. Compute attention loss
loss_att = self.criterion_mt(decoder_out, ys_out_pad)
acc_att = th_accuracy(
decoder_out.view(-1, self.vocab_size),
ys_out_pad,
ignore_label=self.ignore_id,
)
# Compute cer/wer using attention-decoder
if self.training or self.mt_error_calculator is None:
bleu_att = None
else:
ys_hat = decoder_out.argmax(dim=-1)
bleu_att = self.mt_error_calculator(ys_hat.cpu(), ys_pad.cpu())
return loss_att, acc_att, bleu_att
| 10,123 | 34.900709 | 88 | py |
espnet | espnet-master/espnet2/mt/frontend/embedding.py | #!/usr/bin/env python3
# 2020, Technische Universität München; Ludwig Kürzinger
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Embedding Frontend for text based inputs."""
from typing import Tuple
import torch
from typeguard import check_argument_types
from espnet2.asr.frontend.abs_frontend import AbsFrontend
from espnet.nets.pytorch_backend.transformer.embedding import PositionalEncoding
class Embedding(AbsFrontend):
"""Embedding Frontend for text based inputs."""
def __init__(
self,
input_size: int = 400,
embed_dim: int = 400,
pos_enc_class=PositionalEncoding,
positional_dropout_rate: float = 0.1,
):
"""Initialize.
Args:
input_size: Number of input tokens.
embed_dim: Embedding Size.
pos_enc_class: PositionalEncoding or ScaledPositionalEncoding
positional_dropout_rate: dropout rate after adding positional encoding
"""
assert check_argument_types()
super().__init__()
self.embed_dim = embed_dim
# TODO(sdalmia): check for padding idx
self.embed = torch.nn.Sequential(
torch.nn.Embedding(input_size, embed_dim),
pos_enc_class(embed_dim, positional_dropout_rate),
)
def forward(
self, input: torch.Tensor, input_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Apply a sliding window on the input.
Args:
input: Input (B, T) or (B, T,D), with D.
input_lengths: Input lengths within batch.
Returns:
Tensor: Output with dimensions (B, T, D).
Tensor: Output lengths within batch.
"""
x = self.embed(input)
return x, input_lengths
def output_size(self) -> int:
"""Return output length of feature dimension D, i.e. the embedding dim."""
return self.embed_dim
| 1,938 | 29.777778 | 82 | py |
espnet | espnet-master/espnet2/enh/espnet_model.py | """Enhancement model module."""
import contextlib
from typing import Dict, List, Optional, OrderedDict, Tuple
import numpy as np
import torch
from packaging.version import parse as V
from typeguard import check_argument_types
from espnet2.diar.layers.abs_mask import AbsMask
from espnet2.enh.decoder.abs_decoder import AbsDecoder
from espnet2.enh.encoder.abs_encoder import AbsEncoder
from espnet2.enh.loss.criterions.tf_domain import FrequencyDomainLoss
from espnet2.enh.loss.criterions.time_domain import TimeDomainLoss
from espnet2.enh.loss.wrappers.abs_wrapper import AbsLossWrapper
from espnet2.enh.separator.abs_separator import AbsSeparator
from espnet2.enh.separator.dan_separator import DANSeparator
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet2.train.abs_espnet_model import AbsESPnetModel
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
EPS = torch.finfo(torch.get_default_dtype()).eps
class ESPnetEnhancementModel(AbsESPnetModel):
"""Speech enhancement or separation Frontend model"""
def __init__(
self,
encoder: AbsEncoder,
separator: AbsSeparator,
decoder: AbsDecoder,
mask_module: Optional[AbsMask],
loss_wrappers: List[AbsLossWrapper],
stft_consistency: bool = False,
loss_type: str = "mask_mse",
mask_type: Optional[str] = None,
extract_feats_in_collect_stats: bool = False,
):
assert check_argument_types()
super().__init__()
self.encoder = encoder
self.separator = separator
self.decoder = decoder
self.mask_module = mask_module
self.num_spk = separator.num_spk
self.num_noise_type = getattr(self.separator, "num_noise_type", 1)
self.loss_wrappers = loss_wrappers
names = [w.criterion.name for w in self.loss_wrappers]
if len(set(names)) != len(names):
raise ValueError("Duplicated loss names are not allowed: {}".format(names))
# get mask type for TF-domain models
# (only used when loss_type="mask_*") (deprecated, keep for compatibility)
self.mask_type = mask_type.upper() if mask_type else None
# get loss type for model training (deprecated, keep for compatibility)
self.loss_type = loss_type
# whether to compute the TF-domain loss while enforcing STFT consistency
# (deprecated, keep for compatibility)
# NOTE: STFT consistency is now always used for frequency-domain spectrum losses
self.stft_consistency = stft_consistency
# for multi-channel signal
self.ref_channel = getattr(self.separator, "ref_channel", None)
if self.ref_channel is None:
self.ref_channel = 0
# Used in espnet2/tasks/abs_task.py for determining whether or not to do
# collect_feats during collect stats (stage 5).
self.extract_feats_in_collect_stats = extract_feats_in_collect_stats
def forward(
self,
speech_mix: torch.Tensor,
speech_mix_lengths: torch.Tensor = None,
**kwargs,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Frontend + Encoder + Decoder + Calc loss
Args:
speech_mix: (Batch, samples) or (Batch, samples, channels)
speech_ref: (Batch, num_speaker, samples)
or (Batch, num_speaker, samples, channels)
speech_mix_lengths: (Batch,), default None for chunk interator,
because the chunk-iterator does not have the
speech_lengths returned. see in
espnet2/iterators/chunk_iter_factory.py
kwargs: "utt_id" is among the input.
"""
# reference speech signal of each speaker
assert "speech_ref1" in kwargs, "At least 1 reference signal input is required."
speech_ref = [
kwargs.get(
f"speech_ref{spk + 1}",
torch.zeros_like(kwargs["speech_ref1"]),
)
for spk in range(self.num_spk)
]
# (Batch, num_speaker, samples) or (Batch, num_speaker, samples, channels)
speech_ref = torch.stack(speech_ref, dim=1)
if "noise_ref1" in kwargs:
# noise signal (optional, required when using beamforming-based
# frontend models)
noise_ref = [
kwargs["noise_ref{}".format(n + 1)] for n in range(self.num_noise_type)
]
# (Batch, num_noise_type, samples) or
# (Batch, num_noise_type, samples, channels)
noise_ref = torch.stack(noise_ref, dim=1)
else:
noise_ref = None
# dereverberated (noisy) signal
# (optional, only used for frontend models with WPE)
if "dereverb_ref1" in kwargs:
# noise signal (optional, required when using
# frontend models with beamformering)
dereverb_speech_ref = [
kwargs["dereverb_ref{}".format(n + 1)]
for n in range(self.num_spk)
if "dereverb_ref{}".format(n + 1) in kwargs
]
assert len(dereverb_speech_ref) in (1, self.num_spk), len(
dereverb_speech_ref
)
# (Batch, N, samples) or (Batch, N, samples, channels)
dereverb_speech_ref = torch.stack(dereverb_speech_ref, dim=1)
else:
dereverb_speech_ref = None
batch_size = speech_mix.shape[0]
speech_lengths = (
speech_mix_lengths
if speech_mix_lengths is not None
else torch.ones(batch_size).int().fill_(speech_mix.shape[1])
)
assert speech_lengths.dim() == 1, speech_lengths.shape
# Check that batch_size is unified
assert speech_mix.shape[0] == speech_ref.shape[0] == speech_lengths.shape[0], (
speech_mix.shape,
speech_ref.shape,
speech_lengths.shape,
)
# for data-parallel
speech_ref = speech_ref[..., : speech_lengths.max()].unbind(dim=1)
if noise_ref is not None:
noise_ref = noise_ref[..., : speech_lengths.max()].unbind(dim=1)
if dereverb_speech_ref is not None:
dereverb_speech_ref = dereverb_speech_ref[..., : speech_lengths.max()]
dereverb_speech_ref = dereverb_speech_ref.unbind(dim=1)
additional = {}
# Additional data is required in Deep Attractor Network
if isinstance(self.separator, DANSeparator):
additional["feature_ref"] = [
self.encoder(r, speech_lengths)[0] for r in speech_ref
]
speech_mix = speech_mix[:, : speech_lengths.max()]
# model forward
speech_pre, feature_mix, feature_pre, others = self.forward_enhance(
speech_mix, speech_lengths, additional
)
# loss computation
loss, stats, weight, perm = self.forward_loss(
speech_pre,
speech_lengths,
feature_mix,
feature_pre,
others,
speech_ref,
noise_ref,
dereverb_speech_ref,
)
return loss, stats, weight
def forward_enhance(
self,
speech_mix: torch.Tensor,
speech_lengths: torch.Tensor,
additional: Optional[Dict] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
feature_mix, flens = self.encoder(speech_mix, speech_lengths)
if self.mask_module is None:
feature_pre, flens, others = self.separator(feature_mix, flens, additional)
else:
# Obtain bottleneck_feats from separator.
# This is used for the input of diarization module in "enh + diar" task
bottleneck_feats, bottleneck_feats_lengths = self.separator(
feature_mix, flens
)
if additional.get("num_spk") is not None:
feature_pre, flens, others = self.mask_module(
feature_mix, flens, bottleneck_feats, additional["num_spk"]
)
others["bottleneck_feats"] = bottleneck_feats
others["bottleneck_feats_lengths"] = bottleneck_feats_lengths
else:
feature_pre = None
others = {
"bottleneck_feats": bottleneck_feats,
"bottleneck_feats_lengths": bottleneck_feats_lengths,
}
if feature_pre is not None:
# for models like SVoice that output multiple lists of separated signals
pre_is_multi_list = isinstance(feature_pre[0], (list, tuple))
if pre_is_multi_list:
speech_pre = [
[self.decoder(p, speech_lengths)[0] for p in ps]
for ps in feature_pre
]
else:
speech_pre = [self.decoder(ps, speech_lengths)[0] for ps in feature_pre]
else:
# some models (e.g. neural beamformer trained with mask loss)
# do not predict time-domain signal in the training stage
speech_pre = None
return speech_pre, feature_mix, feature_pre, others
def forward_loss(
self,
speech_pre: torch.Tensor,
speech_lengths: torch.Tensor,
feature_mix: torch.Tensor,
feature_pre: torch.Tensor,
others: OrderedDict,
speech_ref: torch.Tensor,
noise_ref: torch.Tensor = None,
dereverb_speech_ref: torch.Tensor = None,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
# for calculating loss on estimated noise signals
if getattr(self.separator, "predict_noise", False):
assert "noise1" in others, others.keys()
if noise_ref is not None and "noise1" in others:
for n in range(self.num_noise_type):
key = "noise{}".format(n + 1)
others[key] = self.decoder(others[key], speech_lengths)[0]
# for calculating loss on dereverberated signals
if getattr(self.separator, "predict_dereverb", False):
assert "dereverb1" in others, others.keys()
if dereverb_speech_ref is not None and "dereverb1" in others:
for spk in range(self.num_spk):
key = "dereverb{}".format(spk + 1)
if key in others:
others[key] = self.decoder(others[key], speech_lengths)[0]
loss = 0.0
stats = {}
o = {}
perm = None
for loss_wrapper in self.loss_wrappers:
criterion = loss_wrapper.criterion
if getattr(criterion, "only_for_test", False) and self.training:
continue
if getattr(criterion, "is_noise_loss", False):
if noise_ref is None:
raise ValueError(
"No noise reference for training!\n"
'Please specify "--use_noise_ref true" in run.sh'
)
signal_ref = noise_ref
signal_pre = [
others["noise{}".format(n + 1)] for n in range(self.num_noise_type)
]
elif getattr(criterion, "is_dereverb_loss", False):
if dereverb_speech_ref is None:
raise ValueError(
"No dereverberated reference for training!\n"
'Please specify "--use_dereverb_ref true" in run.sh'
)
signal_ref = dereverb_speech_ref
signal_pre = [
others["dereverb{}".format(n + 1)]
for n in range(self.num_noise_type)
if "dereverb{}".format(n + 1) in others
]
if len(signal_pre) == 0:
signal_pre = None
else:
signal_ref = speech_ref
signal_pre = speech_pre
zero_weight = loss_wrapper.weight == 0.0
if isinstance(criterion, TimeDomainLoss):
assert signal_pre is not None
sref, spre = self._align_ref_pre_channels(
signal_ref, signal_pre, ch_dim=2, force_1ch=True
)
# for the time domain criterions
with torch.no_grad() if zero_weight else contextlib.ExitStack():
l, s, o = loss_wrapper(sref, spre, {**others, **o})
elif isinstance(criterion, FrequencyDomainLoss):
sref, spre = self._align_ref_pre_channels(
signal_ref, signal_pre, ch_dim=2, force_1ch=False
)
# for the time-frequency domain criterions
if criterion.compute_on_mask:
# compute loss on masks
if getattr(criterion, "is_noise_loss", False):
tf_ref, tf_pre = self._get_noise_masks(
criterion,
feature_mix,
speech_ref,
signal_ref,
signal_pre,
speech_lengths,
others,
)
elif getattr(criterion, "is_dereverb_loss", False):
tf_ref, tf_pre = self._get_dereverb_masks(
criterion,
feature_mix,
noise_ref,
signal_ref,
signal_pre,
speech_lengths,
others,
)
else:
tf_ref, tf_pre = self._get_speech_masks(
criterion,
feature_mix,
noise_ref,
signal_ref,
signal_pre,
speech_lengths,
others,
)
else:
# compute on spectrum
tf_ref = [self.encoder(sr, speech_lengths)[0] for sr in sref]
# for models like SVoice that output multiple lists of
# separated signals
pre_is_multi_list = isinstance(spre[0], (list, tuple))
if pre_is_multi_list:
tf_pre = [
[self.encoder(sp, speech_lengths)[0] for sp in ps]
for ps in spre
]
else:
tf_pre = [self.encoder(sp, speech_lengths)[0] for sp in spre]
with torch.no_grad() if zero_weight else contextlib.ExitStack():
l, s, o = loss_wrapper(tf_ref, tf_pre, {**others, **o})
else:
raise NotImplementedError("Unsupported loss type: %s" % str(criterion))
loss += l * loss_wrapper.weight
stats.update(s)
if perm is None and "perm" in o:
perm = o["perm"]
if self.training and isinstance(loss, float):
raise AttributeError(
"At least one criterion must satisfy: only_for_test=False"
)
stats["loss"] = loss.detach()
# force_gatherable: to-device and to-tensor if scalar for DataParallel
batch_size = speech_ref[0].shape[0]
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
return loss, stats, weight, perm
def _align_ref_pre_channels(self, ref, pre, ch_dim=2, force_1ch=False):
if ref is None or pre is None:
return ref, pre
# NOTE: input must be a list of time-domain signals
index = ref[0].new_tensor(self.ref_channel, dtype=torch.long)
# for models like SVoice that output multiple lists of separated signals
pre_is_multi_list = isinstance(pre[0], (list, tuple))
pre_dim = pre[0][0].dim() if pre_is_multi_list else pre[0].dim()
if ref[0].dim() > pre_dim:
# multi-channel reference and single-channel output
ref = [r.index_select(ch_dim, index).squeeze(ch_dim) for r in ref]
elif ref[0].dim() < pre_dim:
# single-channel reference and multi-channel output
if pre_is_multi_list:
pre = [
p.index_select(ch_dim, index).squeeze(ch_dim)
for plist in pre
for p in plist
]
else:
pre = [p.index_select(ch_dim, index).squeeze(ch_dim) for p in pre]
elif ref[0].dim() == pre_dim == 3 and force_1ch:
# multi-channel reference and output
ref = [r.index_select(ch_dim, index).squeeze(ch_dim) for r in ref]
if pre_is_multi_list:
pre = [
p.index_select(ch_dim, index).squeeze(ch_dim)
for plist in pre
for p in plist
]
else:
pre = [p.index_select(ch_dim, index).squeeze(ch_dim) for p in pre]
return ref, pre
def _get_noise_masks(
self, criterion, feature_mix, speech_ref, noise_ref, noise_pre, ilens, others
):
speech_spec = self.encoder(sum(speech_ref), ilens)[0]
masks_ref = criterion.create_mask_label(
feature_mix,
[self.encoder(nr, ilens)[0] for nr in noise_ref],
noise_spec=speech_spec,
)
if "mask_noise1" in others:
masks_pre = [
others["mask_noise{}".format(n + 1)] for n in range(self.num_noise_type)
]
else:
assert len(noise_pre) == len(noise_ref), (len(noise_pre), len(noise_ref))
masks_pre = criterion.create_mask_label(
feature_mix,
[self.encoder(np, ilens)[0] for np in noise_pre],
noise_spec=speech_spec,
)
return masks_ref, masks_pre
def _get_dereverb_masks(
self, criterion, feat_mix, noise_ref, dereverb_ref, dereverb_pre, ilens, others
):
if noise_ref is not None:
noise_spec = self.encoder(sum(noise_ref), ilens)[0]
else:
noise_spec = None
masks_ref = criterion.create_mask_label(
feat_mix,
[self.encoder(dr, ilens)[0] for dr in dereverb_ref],
noise_spec=noise_spec,
)
if "mask_dereverb1" in others:
masks_pre = [
others["mask_dereverb{}".format(spk + 1)]
for spk in range(self.num_spk)
if "mask_dereverb{}".format(spk + 1) in others
]
assert len(masks_pre) == len(masks_ref), (len(masks_pre), len(masks_ref))
else:
assert len(dereverb_pre) == len(dereverb_ref), (
len(dereverb_pre),
len(dereverb_ref),
)
masks_pre = criterion.create_mask_label(
feat_mix,
[self.encoder(dp, ilens)[0] for dp in dereverb_pre],
noise_spec=noise_spec,
)
return masks_ref, masks_pre
def _get_speech_masks(
self, criterion, feature_mix, noise_ref, speech_ref, speech_pre, ilens, others
):
if noise_ref is not None:
noise_spec = self.encoder(sum(noise_ref), ilens)[0]
else:
noise_spec = None
masks_ref = criterion.create_mask_label(
feature_mix,
[self.encoder(sr, ilens)[0] for sr in speech_ref],
noise_spec=noise_spec,
)
if "mask_spk1" in others:
masks_pre = [
others["mask_spk{}".format(spk + 1)] for spk in range(self.num_spk)
]
else:
masks_pre = criterion.create_mask_label(
feature_mix,
[self.encoder(sp, ilens)[0] for sp in speech_pre],
noise_spec=noise_spec,
)
return masks_ref, masks_pre
@staticmethod
def sort_by_perm(nn_output, perm):
"""Sort the input list of tensors by the specified permutation.
Args:
nn_output: List[torch.Tensor(Batch, ...)], len(nn_output) == num_spk
perm: (Batch, num_spk) or List[torch.Tensor(num_spk)]
Returns:
nn_output_new: List[torch.Tensor(Batch, ...)]
"""
if len(nn_output) == 1:
return nn_output
# (Batch, num_spk, ...)
nn_output = torch.stack(nn_output, dim=1)
if not isinstance(perm, torch.Tensor):
# perm is a list or tuple
perm = torch.stack(perm, dim=0)
assert nn_output.size(1) == perm.size(1), (nn_output.shape, perm.shape)
diff_dim = nn_output.dim() - perm.dim()
if diff_dim > 0:
perm = perm.view(*perm.shape, *[1 for _ in range(diff_dim)]).expand_as(
nn_output
)
return torch.gather(nn_output, 1, perm).unbind(dim=1)
def collect_feats(
self, speech_mix: torch.Tensor, speech_mix_lengths: torch.Tensor, **kwargs
) -> Dict[str, torch.Tensor]:
# for data-parallel
speech_mix = speech_mix[:, : speech_mix_lengths.max()]
feats, feats_lengths = speech_mix, speech_mix_lengths
return {"feats": feats, "feats_lengths": feats_lengths}
| 21,569 | 40.401152 | 88 | py |
espnet | espnet-master/espnet2/enh/espnet_enh_s2t_model.py | import logging
import random
from contextlib import contextmanager
from typing import Dict, List, Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from packaging.version import parse as V
from scipy.optimize import linear_sum_assignment
from typeguard import check_argument_types
from espnet2.asr.espnet_model import ESPnetASRModel
from espnet2.diar.espnet_model import ESPnetDiarizationModel
from espnet2.enh.espnet_model import ESPnetEnhancementModel
from espnet2.st.espnet_model import ESPnetSTModel
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet2.train.abs_espnet_model import AbsESPnetModel
if V(torch.__version__) >= V("1.6.0"):
from torch.cuda.amp import autocast
else:
# Nothing to do if torch<1.6.0
@contextmanager
def autocast(enabled=True):
yield
class ESPnetEnhS2TModel(AbsESPnetModel):
"""Joint model Enhancement and Speech to Text."""
def __init__(
self,
enh_model: ESPnetEnhancementModel,
s2t_model: Union[ESPnetASRModel, ESPnetSTModel, ESPnetDiarizationModel],
calc_enh_loss: bool = True,
bypass_enh_prob: float = 0, # 0 means do not bypass enhancement for all data
):
assert check_argument_types()
super().__init__()
self.enh_model = enh_model
self.s2t_model = s2t_model # ASR or ST or DIAR model
self.bypass_enh_prob = bypass_enh_prob
self.calc_enh_loss = calc_enh_loss
if isinstance(self.s2t_model, ESPnetDiarizationModel):
self.extract_feats_in_collect_stats = False
else:
self.extract_feats_in_collect_stats = (
self.s2t_model.extract_feats_in_collect_stats
)
if (
self.enh_model.num_spk is not None
and self.enh_model.num_spk > 1
and isinstance(self.s2t_model, ESPnetASRModel)
):
if self.calc_enh_loss:
logging.warning("The permutation issue will be handled by the Enh loss")
else:
logging.warning("The permutation issue will be handled by the CTC loss")
def forward(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor = None,
**kwargs,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Frontend + Encoder + Decoder + Calc loss
Args:
speech: (Batch, Length, ...)
speech_lengths: (Batch, ) default None for chunk interator,
because the chunk-iterator does not
have the speech_lengths returned.
see in
espnet2/iterators/chunk_iter_factory.py
For Enh+ASR task:
text_spk1: (Batch, Length)
text_spk2: (Batch, Length)
...
text_spk1_lengths: (Batch,)
text_spk2_lengths: (Batch,)
...
For other tasks:
text: (Batch, Length) default None just to keep the argument order
text_lengths: (Batch,)
default None for the same reason as speech_lengths
"""
if "text" in kwargs:
text = kwargs["text"]
text_ref_lengths = [kwargs.get("text_lengths", None)]
if text_ref_lengths[0] is not None:
text_length_max = max(
ref_lengths.max() for ref_lengths in text_ref_lengths
)
else:
text_length_max = text.shape[1]
else:
text_ref = [
kwargs["text_spk{}".format(spk + 1)]
for spk in range(self.enh_model.num_spk)
]
text_ref_lengths = [
kwargs.get("text_spk{}_lengths".format(spk + 1), None)
for spk in range(self.enh_model.num_spk)
]
# for data-parallel
if text_ref_lengths[0] is not None:
text_length_max = max(
ref_lengths.max() for ref_lengths in text_ref_lengths
)
else:
text_length_max = max(text.shape[1] for text in text_ref)
# pad text sequences of different speakers to the same length
ignore_id = getattr(self.s2t_model, "ignore_id", -1)
text = torch.stack(
[
F.pad(ref, (0, text_length_max - ref.shape[1]), value=ignore_id)
for ref in text_ref
],
dim=2,
)
if text_ref_lengths[0] is not None:
assert all(ref_lengths.dim() == 1 for ref_lengths in text_ref_lengths), (
ref_lengths.shape for ref_lengths in text_ref_lengths
)
if speech_lengths is not None and text_ref_lengths[0] is not None:
# Check that batch_size is unified
assert (
speech.shape[0]
== speech_lengths.shape[0]
== text.shape[0]
== text_ref_lengths[0].shape[0]
), (
speech.shape,
speech_lengths.shape,
text.shape,
text_ref_lengths[0].shape,
)
else:
assert speech.shape[0] == text.shape[0], (speech.shape, text.shape)
# additional checks with valid src_text
if "src_text" in kwargs:
src_text = kwargs["src_text"]
src_text_lengths = kwargs["src_text_lengths"]
if src_text is not None:
assert src_text_lengths.dim() == 1, src_text_lengths.shape
assert (
text_ref[0].shape[0]
== src_text.shape[0]
== src_text_lengths.shape[0]
), (
text_ref[0].shape,
src_text.shape,
src_text_lengths.shape,
)
else:
src_text = None
src_text_lengths = None
batch_size = speech.shape[0]
speech_lengths = (
speech_lengths
if speech_lengths is not None
else torch.ones(batch_size).int() * speech.shape[1]
)
# number of speakers
# Take the number of speakers from text
# (= spk_label [Batch, length, num_spk] ) if it is 3-D.
# This is to handle flexible number of speakers.
# Used only in "enh + diar" task for now.
num_spk = text.shape[2] if text.dim() == 3 else self.enh_model.num_spk
if self.enh_model.num_spk is not None:
# for compatibility with TCNSeparatorNomask in enh_diar
assert num_spk == self.enh_model.num_spk, (num_spk, self.enh_model.num_spk)
# clean speech signal of each speaker
speech_ref = None
if self.calc_enh_loss:
assert "speech_ref1" in kwargs
speech_ref = [
kwargs["speech_ref{}".format(spk + 1)] for spk in range(num_spk)
]
# (Batch, num_speaker, samples) or (Batch, num_speaker, samples, channels)
speech_ref = torch.stack(speech_ref, dim=1)
# for data-parallel
speech_ref = speech_ref[..., : speech_lengths.max()]
speech_ref = speech_ref.unbind(dim=1)
# Calculating enhancement loss
utt_id = kwargs.get("utt_id", None)
bypass_enh_flag, skip_enhloss_flag = False, False
if utt_id is not None and not isinstance(
self.s2t_model, ESPnetDiarizationModel
):
# TODO(xkc): to pass category info and use predefined category list
if utt_id[0].endswith("CLEAN"):
# For clean data
# feed it to Enhancement, without calculating loss_enh
bypass_enh_flag = True
skip_enhloss_flag = True
elif utt_id[0].endswith("REAL"):
# For single-speaker real data
# feed it to Enhancement but without calculating loss_enh
bypass_enh_flag = False
skip_enhloss_flag = True
else:
# For simulated single-/multi-speaker data
# feed it to Enhancement and calculate loss_enh
bypass_enh_flag = False
skip_enhloss_flag = False
if not self.calc_enh_loss:
skip_enhloss_flag = True
# Bypass the enhancement module
if (
self.training and skip_enhloss_flag and not bypass_enh_flag
): # For single-speaker real data: possibility to bypass frontend
if random.random() <= self.bypass_enh_prob:
bypass_enh_flag = True
# 1. Enhancement
# model forward
loss_enh = None
perm = None
if not bypass_enh_flag:
ret = self.enh_model.forward_enhance(
speech, speech_lengths, {"num_spk": num_spk}
)
speech_pre, feature_mix, feature_pre, others = ret
# loss computation
if not skip_enhloss_flag:
loss_enh, _, _, perm = self.enh_model.forward_loss(
speech_pre,
speech_lengths,
feature_mix,
feature_pre,
others,
speech_ref,
)
loss_enh = loss_enh[0]
# resort the prediction audios with the obtained permutation
if perm is not None:
speech_pre = ESPnetEnhancementModel.sort_by_perm(speech_pre, perm)
else:
speech_pre = [speech]
# for data-parallel
if text_ref_lengths[0] is not None:
text = text[:, :text_length_max]
if src_text is not None:
src_text = src_text[:, : src_text_lengths.max()]
# 2. ASR or ST
if isinstance(self.s2t_model, ESPnetASRModel): # ASR
if perm is None:
loss_s2t, stats, weight = self.asr_pit_loss(
speech_pre, speech_lengths, text.unbind(2), text_ref_lengths
)
else:
loss_s2t, stats, weight = self.s2t_model(
torch.cat(speech_pre, dim=0),
speech_lengths.repeat(len(speech_pre)),
torch.cat(text.unbind(2), dim=0),
torch.cat(text_ref_lengths, dim=0),
)
stats["loss_asr"] = loss_s2t.detach()
elif isinstance(self.s2t_model, ESPnetSTModel): # ST
loss_s2t, stats, weight = self.s2t_model(
speech_pre[0],
speech_lengths,
text,
text_ref_lengths[0],
src_text,
src_text_lengths,
)
stats["loss_st"] = loss_s2t.detach()
elif isinstance(self.s2t_model, ESPnetDiarizationModel): # DIAR
loss_s2t, stats, weight = self.s2t_model(
speech=speech.clone(),
speech_lengths=speech_lengths,
spk_labels=text,
spk_labels_lengths=text_ref_lengths[0],
bottleneck_feats=others.get("bottleneck_feats"),
bottleneck_feats_lengths=others.get("bottleneck_feats_lengths"),
)
stats["loss_diar"] = loss_s2t.detach()
else:
raise NotImplementedError(f"{type(self.s2t_model)} is not supported yet.")
if loss_enh is not None:
loss = loss_enh + loss_s2t
else:
loss = loss_s2t
stats["loss"] = loss.detach() if loss is not None else None
stats["loss_enh"] = loss_enh.detach() if loss_enh is not None else None
# force_gatherable: to-device and to-tensor if scalar for DataParallel
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
return loss, stats, weight
def collect_feats(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
**kwargs,
) -> Dict[str, torch.Tensor]:
if "text" in kwargs:
text = kwargs["text"]
text_lengths = kwargs.get("text_lengths", None)
else:
text = kwargs["text_spk1"]
text_lengths = kwargs.get("text_spk1_lengths", None)
if self.extract_feats_in_collect_stats:
ret = self.s2t_model.collect_feats(
speech,
speech_lengths,
text,
text_lengths,
**kwargs,
)
feats, feats_lengths = ret["feats"], ret["feats_lengths"]
else:
# Generate dummy stats if extract_feats_in_collect_stats is False
logging.warning(
"Generating dummy stats for feats and feats_lengths, "
"because encoder_conf.extract_feats_in_collect_stats is "
f"{self.extract_feats_in_collect_stats}"
)
feats, feats_lengths = speech, speech_lengths
return {"feats": feats, "feats_lengths": feats_lengths}
def encode(
self, speech: torch.Tensor, speech_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Frontend + Encoder. Note that this method is used by asr_inference.py
Args:
speech: (Batch, Length, ...)
speech_lengths: (Batch, )
"""
(
speech_pre,
feature_mix,
feature_pre,
others,
) = self.enh_model.forward_enhance(speech, speech_lengths)
num_spk = len(speech_pre)
assert num_spk == self.enh_model.num_spk, (num_spk, self.enh_model.num_spk)
encoder_out, encoder_out_lens = zip(
*[self.s2t_model.encode(sp, speech_lengths) for sp in speech_pre]
)
return encoder_out, encoder_out_lens
def encode_diar(
self, speech: torch.Tensor, speech_lengths: torch.Tensor, num_spk: int
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Frontend + Encoder. Note that this method is used by diar_inference.py
Args:
speech: (Batch, Length, ...)
speech_lengths: (Batch, )
num_spk: int
"""
(
speech_pre,
_,
_,
others,
) = self.enh_model.forward_enhance(speech, speech_lengths, {"num_spk": num_spk})
encoder_out, encoder_out_lens = self.s2t_model.encode(
speech,
speech_lengths,
others.get("bottleneck_feats"),
others.get("bottleneck_feats_lengths"),
)
return encoder_out, encoder_out_lens, speech_pre
def nll(
self,
encoder_out: torch.Tensor,
encoder_out_lens: torch.Tensor,
ys_pad: torch.Tensor,
ys_pad_lens: torch.Tensor,
) -> torch.Tensor:
"""Compute negative log likelihood(nll) from transformer-decoder
Normally, this function is called in batchify_nll.
Args:
encoder_out: (Batch, Length, Dim)
encoder_out_lens: (Batch,)
ys_pad: (Batch, Length)
ys_pad_lens: (Batch,)
"""
return self.s2t_model.nll(
encoder_out,
encoder_out_lens,
ys_pad,
ys_pad_lens,
)
batchify_nll = ESPnetASRModel.batchify_nll
def asr_pit_loss(self, speech, speech_lengths, text, text_lengths):
if self.s2t_model.ctc is None:
raise ValueError("CTC must be used to determine the permutation")
with torch.no_grad():
# (B, n_ref, n_hyp)
loss0 = torch.stack(
[
torch.stack(
[
self.s2t_model._calc_batch_ctc_loss(
speech[h],
speech_lengths,
text[r],
text_lengths[r],
)
for r in range(self.enh_model.num_spk)
],
dim=1,
)
for h in range(self.enh_model.num_spk)
],
dim=2,
)
perm_detail, min_loss = self.permutation_invariant_training(loss0)
speech = ESPnetEnhancementModel.sort_by_perm(speech, perm_detail)
loss, stats, weight = self.s2t_model(
torch.cat(speech, dim=0),
speech_lengths.repeat(len(speech)),
torch.cat(text, dim=0),
torch.cat(text_lengths, dim=0),
)
return loss, stats, weight
def _permutation_loss(self, ref, inf, criterion, perm=None):
"""The basic permutation loss function.
Args:
ref (List[torch.Tensor]): [(batch, ...), ...] x n_spk
inf (List[torch.Tensor]): [(batch, ...), ...]
criterion (function): Loss function
perm: (batch)
Returns:
loss: torch.Tensor: (batch)
perm: list[(num_spk)]
"""
num_spk = len(ref)
losses = torch.stack(
[
torch.stack([criterion(ref[r], inf[h]) for r in range(num_spk)], dim=1)
for h in range(num_spk)
],
dim=2,
) # (B, n_ref, n_hyp)
perm_detail, min_loss = self.permutation_invariant_training(losses)
return min_loss.mean(), perm_detail
def permutation_invariant_training(self, losses: torch.Tensor):
"""Compute PIT loss.
Args:
losses (torch.Tensor): (batch, nref, nhyp)
Returns:
perm: list: (batch, n_spk)
loss: torch.Tensor: (batch)
"""
hyp_perm, min_perm_loss = [], []
losses_cpu = losses.data.cpu()
for b, b_loss in enumerate(losses_cpu):
# hungarian algorithm
try:
row_ind, col_ind = linear_sum_assignment(b_loss)
except ValueError as err:
if str(err) == "cost matrix is infeasible":
# random assignment since the cost is always inf
col_ind = np.array([0, 1])
min_perm_loss.append(torch.mean(losses[b, col_ind, col_ind]))
hyp_perm.append(col_ind)
continue
else:
raise
min_perm_loss.append(torch.mean(losses[b, row_ind, col_ind]))
hyp_perm.append(
torch.as_tensor(col_ind, dtype=torch.long, device=losses.device)
)
return hyp_perm, torch.stack(min_perm_loss)
def inherite_attributes(
self,
inherite_enh_attrs: List[str] = [],
inherite_s2t_attrs: List[str] = [],
):
assert check_argument_types()
if len(inherite_enh_attrs) > 0:
for attr in inherite_enh_attrs:
setattr(self, attr, getattr(self.enh_model, attr, None))
if len(inherite_s2t_attrs) > 0:
for attr in inherite_s2t_attrs:
setattr(self, attr, getattr(self.s2t_model, attr, None))
| 19,330 | 35.820952 | 88 | py |
espnet | espnet-master/espnet2/enh/abs_enh.py | from abc import ABC, abstractmethod
from collections import OrderedDict
from typing import Tuple
import torch
class AbsEnhancement(torch.nn.Module, ABC):
# @abstractmethod
# def output_size(self) -> int:
# raise NotImplementedError
@abstractmethod
def forward(
self,
input: torch.Tensor,
ilens: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, OrderedDict]:
raise NotImplementedError
@abstractmethod
def forward_rawwav(
self, input: torch.Tensor, ilens: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, OrderedDict]:
raise NotImplementedError
| 643 | 23.769231 | 56 | py |
espnet | espnet-master/espnet2/enh/espnet_model_tse.py | """Enhancement model module."""
import contextlib
from typing import Dict, List, OrderedDict, Tuple
import torch
from typeguard import check_argument_types
from espnet2.enh.decoder.abs_decoder import AbsDecoder
from espnet2.enh.encoder.abs_encoder import AbsEncoder
from espnet2.enh.extractor.abs_extractor import AbsExtractor
from espnet2.enh.loss.criterions.tf_domain import FrequencyDomainLoss
from espnet2.enh.loss.criterions.time_domain import TimeDomainLoss
from espnet2.enh.loss.wrappers.abs_wrapper import AbsLossWrapper
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet2.train.abs_espnet_model import AbsESPnetModel
EPS = torch.finfo(torch.get_default_dtype()).eps
class ESPnetExtractionModel(AbsESPnetModel):
"""Target Speaker Extraction Frontend model"""
def __init__(
self,
encoder: AbsEncoder,
extractor: AbsExtractor,
decoder: AbsDecoder,
loss_wrappers: List[AbsLossWrapper],
num_spk: int = 1,
share_encoder: bool = True,
extract_feats_in_collect_stats: bool = False,
):
assert check_argument_types()
super().__init__()
self.encoder = encoder
self.extractor = extractor
self.decoder = decoder
# Whether to share encoder for both mixture and enrollment
self.share_encoder = share_encoder
self.num_spk = num_spk
self.loss_wrappers = loss_wrappers
names = [w.criterion.name for w in self.loss_wrappers]
if len(set(names)) != len(names):
raise ValueError("Duplicated loss names are not allowed: {}".format(names))
for w in self.loss_wrappers:
if getattr(w.criterion, "is_noise_loss", False):
raise ValueError("is_noise_loss=True is not supported")
elif getattr(w.criterion, "is_dereverb_loss", False):
raise ValueError("is_dereverb_loss=True is not supported")
# for multi-channel signal
self.ref_channel = getattr(self.extractor, "ref_channel", -1)
# Used in espnet2/tasks/abs_task.py for determining whether or not to do
# collect_feats during collect stats (stage 5).
self.extract_feats_in_collect_stats = extract_feats_in_collect_stats
def forward(
self,
speech_mix: torch.Tensor,
speech_mix_lengths: torch.Tensor = None,
**kwargs,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Frontend + Encoder + Decoder + Calc loss
Args:
speech_mix: (Batch, samples) or (Batch, samples, channels)
speech_ref1: (Batch, samples)
or (Batch, samples, channels)
speech_ref2: (Batch, samples)
or (Batch, samples, channels)
...
speech_mix_lengths: (Batch,), default None for chunk interator,
because the chunk-iterator does not have the
speech_lengths returned. see in
espnet2/iterators/chunk_iter_factory.py
enroll_ref1: (Batch, samples_aux)
enrollment (raw audio or embedding) for speaker 1
enroll_ref2: (Batch, samples_aux)
enrollment (raw audio or embedding) for speaker 2
...
kwargs: "utt_id" is among the input.
"""
# reference speech signal of each speaker
assert "speech_ref1" in kwargs, "At least 1 reference signal input is required."
speech_ref = [
kwargs.get(
f"speech_ref{spk + 1}",
torch.zeros_like(kwargs["speech_ref1"]),
)
for spk in range(self.num_spk)
if "speech_ref{}".format(spk + 1) in kwargs
]
# (Batch, num_speaker, samples) or (Batch, num_speaker, samples, channels)
speech_ref = torch.stack(speech_ref, dim=1)
batch_size = speech_mix.shape[0]
assert "enroll_ref1" in kwargs, "At least 1 enrollment signal is required."
# enrollment signal for each speaker (as the target)
enroll_ref = [
# (Batch, samples_aux)
kwargs["enroll_ref{}".format(spk + 1)]
for spk in range(self.num_spk)
if "enroll_ref{}".format(spk + 1) in kwargs
]
enroll_ref_lengths = [
# (Batch,)
kwargs.get(
"enroll_ref{}_lengths".format(spk + 1),
torch.ones(batch_size).int().fill_(enroll_ref[spk].size(1)),
)
for spk in range(self.num_spk)
if "enroll_ref{}".format(spk + 1) in kwargs
]
speech_lengths = (
speech_mix_lengths
if speech_mix_lengths is not None
else torch.ones(batch_size).int().fill_(speech_mix.shape[1])
)
assert speech_lengths.dim() == 1, speech_lengths.shape
# Check that batch_size is unified
assert speech_mix.shape[0] == speech_ref.shape[0] == speech_lengths.shape[0], (
speech_mix.shape,
speech_ref.shape,
speech_lengths.shape,
)
for aux in enroll_ref:
assert aux.shape[0] == speech_mix.shape[0], (aux.shape, speech_mix.shape)
# for data-parallel
speech_ref = speech_ref[..., : speech_lengths.max()].unbind(dim=1)
speech_mix = speech_mix[:, : speech_lengths.max()]
enroll_ref = [
enroll_ref[spk][:, : enroll_ref_lengths[spk].max()]
for spk in range(len(enroll_ref))
]
assert len(speech_ref) == len(enroll_ref), (len(speech_ref), len(enroll_ref))
# model forward
speech_pre, feature_mix, feature_pre, others = self.forward_enhance(
speech_mix, speech_lengths, enroll_ref, enroll_ref_lengths
)
# loss computation
loss, stats, weight, perm = self.forward_loss(
speech_pre,
speech_lengths,
feature_mix,
feature_pre,
others,
speech_ref,
)
return loss, stats, weight
def forward_enhance(
self,
speech_mix: torch.Tensor,
speech_lengths: torch.Tensor,
enroll_ref: torch.Tensor,
enroll_ref_lengths: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
feature_mix, flens = self.encoder(speech_mix, speech_lengths)
if self.share_encoder:
feature_aux, flens_aux = zip(
*[
self.encoder(enroll_ref[spk], enroll_ref_lengths[spk])
for spk in range(len(enroll_ref))
]
)
else:
feature_aux = enroll_ref
flens_aux = enroll_ref_lengths
feature_pre, _, others = zip(
*[
self.extractor(
feature_mix,
flens,
feature_aux[spk],
flens_aux[spk],
suffix_tag=f"_spk{spk + 1}",
)
for spk in range(len(enroll_ref))
]
)
others = {k: v for dic in others for k, v in dic.items()}
if feature_pre[0] is not None:
speech_pre = [self.decoder(ps, speech_lengths)[0] for ps in feature_pre]
else:
# some models (e.g. neural beamformer trained with mask loss)
# do not predict time-domain signal in the training stage
speech_pre = None
return speech_pre, feature_mix, feature_pre, others
def forward_loss(
self,
speech_pre: torch.Tensor,
speech_lengths: torch.Tensor,
feature_mix: torch.Tensor,
feature_pre: torch.Tensor,
others: OrderedDict,
speech_ref: torch.Tensor,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
loss = 0.0
stats = {}
o = {}
perm = None
for loss_wrapper in self.loss_wrappers:
criterion = loss_wrapper.criterion
if getattr(criterion, "only_for_test", False) and self.training:
continue
zero_weight = loss_wrapper.weight == 0.0
if isinstance(criterion, TimeDomainLoss):
assert speech_pre is not None
sref, spre = self._align_ref_pre_channels(
speech_ref, speech_pre, ch_dim=2, force_1ch=True
)
# for the time domain criterions
with torch.no_grad() if zero_weight else contextlib.ExitStack():
l, s, o = loss_wrapper(sref, spre, {**others, **o})
elif isinstance(criterion, FrequencyDomainLoss):
sref, spre = self._align_ref_pre_channels(
speech_ref, speech_pre, ch_dim=2, force_1ch=False
)
# for the time-frequency domain criterions
if criterion.compute_on_mask:
# compute loss on masks
tf_ref, tf_pre = self._get_speech_masks(
criterion,
feature_mix,
None,
speech_ref,
speech_pre,
speech_lengths,
others,
)
else:
# compute on spectrum
tf_ref = [self.encoder(sr, speech_lengths)[0] for sr in sref]
tf_pre = [self.encoder(sp, speech_lengths)[0] for sp in spre]
with torch.no_grad() if zero_weight else contextlib.ExitStack():
l, s, o = loss_wrapper(tf_ref, tf_pre, {**others, **o})
else:
raise NotImplementedError("Unsupported loss type: %s" % str(criterion))
loss += l * loss_wrapper.weight
stats.update(s)
if perm is None and "perm" in o:
perm = o["perm"]
if self.training and isinstance(loss, float):
raise AttributeError(
"At least one criterion must satisfy: only_for_test=False"
)
stats["loss"] = loss.detach()
# force_gatherable: to-device and to-tensor if scalar for DataParallel
batch_size = speech_ref[0].shape[0]
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
return loss, stats, weight, perm
def _align_ref_pre_channels(self, ref, pre, ch_dim=2, force_1ch=False):
if ref is None or pre is None:
return ref, pre
# NOTE: input must be a list of time-domain signals
index = ref[0].new_tensor(self.ref_channel, dtype=torch.long)
# for models like SVoice that output multiple lists of separated signals
pre_is_multi_list = isinstance(pre[0], (list, tuple))
pre_dim = pre[0][0].dim() if pre_is_multi_list else pre[0].dim()
if ref[0].dim() > pre_dim:
# multi-channel reference and single-channel output
ref = [r.index_select(ch_dim, index).squeeze(ch_dim) for r in ref]
elif ref[0].dim() < pre_dim:
# single-channel reference and multi-channel output
if pre_is_multi_list:
pre = [
p.index_select(ch_dim, index).squeeze(ch_dim)
for plist in pre
for p in plist
]
else:
pre = [p.index_select(ch_dim, index).squeeze(ch_dim) for p in pre]
elif ref[0].dim() == pre_dim == 3 and force_1ch:
# multi-channel reference and output
ref = [r.index_select(ch_dim, index).squeeze(ch_dim) for r in ref]
if pre_is_multi_list:
pre = [
p.index_select(ch_dim, index).squeeze(ch_dim)
for plist in pre
for p in plist
]
else:
pre = [p.index_select(ch_dim, index).squeeze(ch_dim) for p in pre]
return ref, pre
def _get_speech_masks(
self, criterion, feature_mix, noise_ref, speech_ref, speech_pre, ilens, others
):
if noise_ref is not None:
noise_spec = self.encoder(sum(noise_ref), ilens)[0]
else:
noise_spec = None
masks_ref = criterion.create_mask_label(
feature_mix,
[self.encoder(sr, ilens)[0] for sr in speech_ref],
noise_spec=noise_spec,
)
if "mask_spk1" in others:
masks_pre = [
others["mask_spk{}".format(spk + 1)] for spk in range(self.num_spk)
]
else:
masks_pre = criterion.create_mask_label(
feature_mix,
[self.encoder(sp, ilens)[0] for sp in speech_pre],
noise_spec=noise_spec,
)
return masks_ref, masks_pre
def collect_feats(
self, speech_mix: torch.Tensor, speech_mix_lengths: torch.Tensor, **kwargs
) -> Dict[str, torch.Tensor]:
# for data-parallel
speech_mix = speech_mix[:, : speech_mix_lengths.max()]
feats, feats_lengths = speech_mix, speech_mix_lengths
return {"feats": feats, "feats_lengths": feats_lengths}
| 13,324 | 38.423077 | 88 | py |
espnet | espnet-master/espnet2/enh/separator/dan_separator.py | from collections import OrderedDict
from functools import reduce
from typing import Dict, List, Optional, Tuple, Union
import torch
import torch.nn.functional as Fun
from torch_complex.tensor import ComplexTensor
from espnet2.enh.layers.complex_utils import is_complex
from espnet2.enh.separator.abs_separator import AbsSeparator
from espnet.nets.pytorch_backend.rnn.encoders import RNN
class DANSeparator(AbsSeparator):
def __init__(
self,
input_dim: int,
rnn_type: str = "blstm",
num_spk: int = 2,
nonlinear: str = "tanh",
layer: int = 2,
unit: int = 512,
emb_D: int = 40,
dropout: float = 0.0,
):
"""Deep Attractor Network Separator
Reference:
DEEP ATTRACTOR NETWORK FOR SINGLE-MICROPHONE SPEAKER SEPARATION;
Zhuo Chen. et al., 2017;
https://pubmed.ncbi.nlm.nih.gov/29430212/
Args:
input_dim: input feature dimension
rnn_type: string, select from 'blstm', 'lstm' etc.
bidirectional: bool, whether the inter-chunk RNN layers are bidirectional.
num_spk: number of speakers
nonlinear: the nonlinear function for mask estimation,
select from 'relu', 'tanh', 'sigmoid'
layer: int, number of stacked RNN layers. Default is 3.
unit: int, dimension of the hidden state.
emb_D: int, dimension of the attribute vector for one tf-bin.
dropout: float, dropout ratio. Default is 0.
"""
super().__init__()
self._num_spk = num_spk
self.blstm = RNN(
idim=input_dim,
elayers=layer,
cdim=unit,
hdim=unit,
dropout=dropout,
typ=rnn_type,
)
self.linear = torch.nn.Linear(unit, input_dim * emb_D)
if nonlinear not in ("sigmoid", "relu", "tanh"):
raise ValueError("Not supporting nonlinear={}".format(nonlinear))
self.nonlinear = {
"sigmoid": torch.nn.Sigmoid(),
"relu": torch.nn.ReLU(),
"tanh": torch.nn.Tanh(),
}[nonlinear]
self.D = emb_D
def forward(
self,
input: Union[torch.Tensor, ComplexTensor],
ilens: torch.Tensor,
additional: Optional[Dict] = None,
) -> Tuple[List[Union[torch.Tensor, ComplexTensor]], torch.Tensor, OrderedDict]:
"""Forward.
Args:
input (torch.Tensor or ComplexTensor): Encoded feature [B, T, F]
ilens (torch.Tensor): input lengths [Batch]
additional (Dict or None): other data included in model
e.g. "feature_ref": list of reference spectra List[(B, T, F)]
Returns:
masked (List[Union(torch.Tensor, ComplexTensor)]): [(B, T, N), ...]
ilens (torch.Tensor): (B,)
others predicted data, e.g. masks: OrderedDict[
'mask_spk1': torch.Tensor(Batch, Frames, Freq),
'mask_spk2': torch.Tensor(Batch, Frames, Freq),
...
'mask_spkn': torch.Tensor(Batch, Frames, Freq),
]
"""
# if complex spectrum,
if is_complex(input):
feature = abs(input)
else:
feature = input
B, T, F = input.shape
# x:(B, T, F)
x, ilens, _ = self.blstm(feature, ilens)
# x:(B, T, F*D)
x = self.linear(x)
# x:(B, T, F*D)
x = self.nonlinear(x)
# tf_embedding:(B, T*F, D)
tf_embedding = x.contiguous().view(B, T * F, -1)
# Compute the attractors
if self.training:
assert additional is not None and "feature_ref" in additional
origin = additional["feature_ref"]
abs_origin = [abs(o) for o in origin]
Y_t = torch.zeros(B, T, F, device=origin[0].device)
for i in range(self._num_spk):
flags = [abs_origin[i] >= o for o in abs_origin]
Y = reduce(lambda x, y: x * y, flags)
Y = Y.int() * i
Y_t += Y
Y_t = Y_t.contiguous().flatten().long()
Y = Fun.one_hot(Y_t, num_classes=self._num_spk)
Y = Y.contiguous().view(B, -1, self._num_spk).float()
# v_y:(B, D, spks)
v_y = torch.bmm(torch.transpose(tf_embedding, 1, 2), Y)
# sum_y:(B, D, spks)
sum_y = torch.sum(Y, 1, keepdim=True).expand_as(v_y)
# attractor:(B, D, spks)
attractor = v_y / (sum_y + 1e-8)
else:
# K-means for batch
centers = tf_embedding[:, : self._num_spk, :].detach()
dist = torch.empty(B, T * F, self._num_spk, device=tf_embedding.device)
last_label = torch.zeros(B, T * F, device=tf_embedding.device)
while True:
for i in range(self._num_spk):
dist[:, :, i] = torch.sum(
(tf_embedding - centers[:, i, :].unsqueeze(1)) ** 2, dim=2
)
label = dist.argmin(dim=2)
if torch.sum(label != last_label) == 0:
break
last_label = label
for b in range(B):
for i in range(self._num_spk):
centers[b, i] = tf_embedding[b, label[b] == i].mean(dim=0)
attractor = centers.permute(0, 2, 1)
# calculate the distance between embeddings and attractors
# dist:(B, T*F, spks)
dist = torch.bmm(tf_embedding, attractor)
masks = torch.softmax(dist, dim=2)
masks = masks.contiguous().view(B, T, F, self._num_spk).unbind(dim=3)
masked = [input * m for m in masks]
others = OrderedDict(
zip(["mask_spk{}".format(i + 1) for i in range(len(masks))], masks)
)
return masked, ilens, others
@property
def num_spk(self):
return self._num_spk
| 6,012 | 35.005988 | 86 | py |
espnet | espnet-master/espnet2/enh/separator/tfgridnet_separator.py | import math
from collections import OrderedDict
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from torch.nn.parameter import Parameter
from espnet2.enh.decoder.stft_decoder import STFTDecoder
from espnet2.enh.encoder.stft_encoder import STFTEncoder
from espnet2.enh.layers.complex_utils import new_complex_like
from espnet2.enh.separator.abs_separator import AbsSeparator
from espnet2.torch_utils.get_layer_from_string import get_layer
class TFGridNet(AbsSeparator):
"""Offline TFGridNet
Reference:
[1] Z.-Q. Wang, S. Cornell, S. Choi, Y. Lee, B.-Y. Kim, and S. Watanabe,
"TF-GridNet: Integrating Full- and Sub-Band Modeling for Speech Separation",
in arXiv preprint arXiv:2211.12433, 2022.
[2] Z.-Q. Wang, S. Cornell, S. Choi, Y. Lee, B.-Y. Kim, and S. Watanabe,
"TF-GridNet: Making Time-Frequency Domain Models Great Again for Monaural
Speaker Separation", in arXiv preprint arXiv:2209.03952, 2022.
NOTES:
As outlined in the Reference, this model works best when trained with variance
normalized mixture input and target, e.g., with mixture of shape [batch, samples,
microphones], you normalize it by dividing with torch.std(mixture, (1, 2)). You
must do the same for the target signals. It is encouraged to do so when not using
scale-invariant loss functions such as SI-SDR.
Args:
input_dim: placeholder, not used
n_srcs: number of output sources/speakers.
n_fft: stft window size.
stride: stft stride.
window: stft window type choose between 'hamming', 'hanning' or None.
n_imics: number of microphones channels (only fixed-array geometry supported).
n_layers: number of TFGridNet blocks.
lstm_hidden_units: number of hidden units in LSTM.
attn_n_head: number of heads in self-attention
attn_approx_qk_dim: approximate dimention of frame-level key and value tensors
emb_dim: embedding dimension
emb_ks: kernel size for unfolding and deconv1D
emb_hs: hop size for unfolding and deconv1D
activation: activation function to use in the whole TFGridNet model,
you can use any torch supported activation e.g. 'relu' or 'elu'.
eps: small epsilon for normalization layers.
use_builtin_complex: whether to use builtin complex type or not.
"""
def __init__(
self,
input_dim,
n_srcs=2,
n_fft=128,
stride=64,
window="hann",
n_imics=1,
n_layers=6,
lstm_hidden_units=192,
attn_n_head=4,
attn_approx_qk_dim=512,
emb_dim=48,
emb_ks=4,
emb_hs=1,
activation="prelu",
eps=1.0e-5,
use_builtin_complex=False,
):
super().__init__()
self.n_srcs = n_srcs
self.n_layers = n_layers
self.n_imics = n_imics
assert n_fft % 2 == 0
n_freqs = n_fft // 2 + 1
self.enc = STFTEncoder(
n_fft, n_fft, stride, window=window, use_builtin_complex=use_builtin_complex
)
self.dec = STFTDecoder(n_fft, n_fft, stride, window=window)
t_ksize = 3
ks, padding = (t_ksize, 3), (t_ksize // 2, 1)
self.conv = nn.Sequential(
nn.Conv2d(2 * n_imics, emb_dim, ks, padding=padding),
nn.GroupNorm(1, emb_dim, eps=eps),
)
self.blocks = nn.ModuleList([])
for _ in range(n_layers):
self.blocks.append(
GridNetBlock(
emb_dim,
emb_ks,
emb_hs,
n_freqs,
lstm_hidden_units,
n_head=attn_n_head,
approx_qk_dim=attn_approx_qk_dim,
activation=activation,
eps=eps,
)
)
self.deconv = nn.ConvTranspose2d(emb_dim, n_srcs * 2, ks, padding=padding)
def forward(
self,
input: torch.Tensor,
ilens: torch.Tensor,
additional: Optional[Dict] = None,
) -> Tuple[List[torch.Tensor], torch.Tensor, OrderedDict]:
"""Forward.
Args:
input (torch.Tensor): batched multi-channel audio tensor with
M audio channels and N samples [B, N, M]
ilens (torch.Tensor): input lengths [B]
additional (Dict or None): other data, currently unused in this model.
Returns:
enhanced (List[Union(torch.Tensor)]):
[(B, T), ...] list of len n_srcs
of mono audio tensors with T samples.
ilens (torch.Tensor): (B,)
additional (Dict or None): other data, currently unused in this model,
we return it also in output.
"""
n_samples = input.shape[1]
if self.n_imics == 1:
assert len(input.shape) == 2
input = input[..., None] # [B, N, M]
mix_std_ = torch.std(input, dim=(1, 2), keepdim=True) # [B, 1, 1]
input = input / mix_std_ # RMS normalization
batch = self.enc(input, ilens)[0] # [B, T, M, F]
batch0 = batch.transpose(1, 2) # [B, M, T, F]
batch = torch.cat((batch0.real, batch0.imag), dim=1) # [B, 2*M, T, F]
n_batch, _, n_frames, n_freqs = batch.shape
batch = self.conv(batch) # [B, -1, T, F]
for ii in range(self.n_layers):
batch = self.blocks[ii](batch) # [B, -1, T, F]
batch = self.deconv(batch) # [B, n_srcs*2, T, F]
batch = batch.view([n_batch, self.n_srcs, 2, n_frames, n_freqs])
batch = new_complex_like(batch0, (batch[:, :, 0], batch[:, :, 1]))
batch = self.dec(batch.view(-1, n_frames, n_freqs), ilens)[0] # [B, n_srcs, -1]
batch = self.pad2(batch.view([n_batch, self.num_spk, -1]), n_samples)
batch = batch * mix_std_ # reverse the RMS normalization
batch = [batch[:, src] for src in range(self.num_spk)]
return batch, ilens, OrderedDict()
@property
def num_spk(self):
return self.n_srcs
@staticmethod
def pad2(input_tensor, target_len):
input_tensor = torch.nn.functional.pad(
input_tensor, (0, target_len - input_tensor.shape[-1])
)
return input_tensor
class GridNetBlock(nn.Module):
def __getitem__(self, key):
return getattr(self, key)
def __init__(
self,
emb_dim,
emb_ks,
emb_hs,
n_freqs,
hidden_channels,
n_head=4,
approx_qk_dim=512,
activation="prelu",
eps=1e-5,
):
super().__init__()
in_channels = emb_dim * emb_ks
self.intra_norm = LayerNormalization4D(emb_dim, eps=eps)
self.intra_rnn = nn.LSTM(
in_channels, hidden_channels, 1, batch_first=True, bidirectional=True
)
self.intra_linear = nn.ConvTranspose1d(
hidden_channels * 2, emb_dim, emb_ks, stride=emb_hs
)
self.inter_norm = LayerNormalization4D(emb_dim, eps=eps)
self.inter_rnn = nn.LSTM(
in_channels, hidden_channels, 1, batch_first=True, bidirectional=True
)
self.inter_linear = nn.ConvTranspose1d(
hidden_channels * 2, emb_dim, emb_ks, stride=emb_hs
)
E = math.ceil(
approx_qk_dim * 1.0 / n_freqs
) # approx_qk_dim is only approximate
assert emb_dim % n_head == 0
for ii in range(n_head):
self.add_module(
"attn_conv_Q_%d" % ii,
nn.Sequential(
nn.Conv2d(emb_dim, E, 1),
get_layer(activation)(),
LayerNormalization4DCF((E, n_freqs), eps=eps),
),
)
self.add_module(
"attn_conv_K_%d" % ii,
nn.Sequential(
nn.Conv2d(emb_dim, E, 1),
get_layer(activation)(),
LayerNormalization4DCF((E, n_freqs), eps=eps),
),
)
self.add_module(
"attn_conv_V_%d" % ii,
nn.Sequential(
nn.Conv2d(emb_dim, emb_dim // n_head, 1),
get_layer(activation)(),
LayerNormalization4DCF((emb_dim // n_head, n_freqs), eps=eps),
),
)
self.add_module(
"attn_concat_proj",
nn.Sequential(
nn.Conv2d(emb_dim, emb_dim, 1),
get_layer(activation)(),
LayerNormalization4DCF((emb_dim, n_freqs), eps=eps),
),
)
self.emb_dim = emb_dim
self.emb_ks = emb_ks
self.emb_hs = emb_hs
self.n_head = n_head
def forward(self, x):
"""GridNetBlock Forward.
Args:
x: [B, C, T, Q]
out: [B, C, T, Q]
"""
B, C, old_T, old_Q = x.shape
T = math.ceil((old_T - self.emb_ks) / self.emb_hs) * self.emb_hs + self.emb_ks
Q = math.ceil((old_Q - self.emb_ks) / self.emb_hs) * self.emb_hs + self.emb_ks
x = F.pad(x, (0, Q - old_Q, 0, T - old_T))
# intra RNN
input_ = x
intra_rnn = self.intra_norm(input_) # [B, C, T, Q]
intra_rnn = (
intra_rnn.transpose(1, 2).contiguous().view(B * T, C, Q)
) # [BT, C, Q]
intra_rnn = F.unfold(
intra_rnn[..., None], (self.emb_ks, 1), stride=(self.emb_hs, 1)
) # [BT, C*emb_ks, -1]
intra_rnn = intra_rnn.transpose(1, 2) # [BT, -1, C*emb_ks]
intra_rnn, _ = self.intra_rnn(intra_rnn) # [BT, -1, H]
intra_rnn = intra_rnn.transpose(1, 2) # [BT, H, -1]
intra_rnn = self.intra_linear(intra_rnn) # [BT, C, Q]
intra_rnn = intra_rnn.view([B, T, C, Q])
intra_rnn = intra_rnn.transpose(1, 2).contiguous() # [B, C, T, Q]
intra_rnn = intra_rnn + input_ # [B, C, T, Q]
# inter RNN
input_ = intra_rnn
inter_rnn = self.inter_norm(input_) # [B, C, T, F]
inter_rnn = (
inter_rnn.permute(0, 3, 1, 2).contiguous().view(B * Q, C, T)
) # [BF, C, T]
inter_rnn = F.unfold(
inter_rnn[..., None], (self.emb_ks, 1), stride=(self.emb_hs, 1)
) # [BF, C*emb_ks, -1]
inter_rnn = inter_rnn.transpose(1, 2) # [BF, -1, C*emb_ks]
inter_rnn, _ = self.inter_rnn(inter_rnn) # [BF, -1, H]
inter_rnn = inter_rnn.transpose(1, 2) # [BF, H, -1]
inter_rnn = self.inter_linear(inter_rnn) # [BF, C, T]
inter_rnn = inter_rnn.view([B, Q, C, T])
inter_rnn = inter_rnn.permute(0, 2, 3, 1).contiguous() # [B, C, T, Q]
inter_rnn = inter_rnn + input_ # [B, C, T, Q]
# attention
inter_rnn = inter_rnn[..., :old_T, :old_Q]
batch = inter_rnn
all_Q, all_K, all_V = [], [], []
for ii in range(self.n_head):
all_Q.append(self["attn_conv_Q_%d" % ii](batch)) # [B, C, T, Q]
all_K.append(self["attn_conv_K_%d" % ii](batch)) # [B, C, T, Q]
all_V.append(self["attn_conv_V_%d" % ii](batch)) # [B, C, T, Q]
Q = torch.cat(all_Q, dim=0) # [B', C, T, Q]
K = torch.cat(all_K, dim=0) # [B', C, T, Q]
V = torch.cat(all_V, dim=0) # [B', C, T, Q]
Q = Q.transpose(1, 2)
Q = Q.flatten(start_dim=2) # [B', T, C*Q]
K = K.transpose(1, 2)
K = K.flatten(start_dim=2) # [B', T, C*Q]
V = V.transpose(1, 2) # [B', T, C, Q]
old_shape = V.shape
V = V.flatten(start_dim=2) # [B', T, C*Q]
emb_dim = Q.shape[-1]
attn_mat = torch.matmul(Q, K.transpose(1, 2)) / (emb_dim**0.5) # [B', T, T]
attn_mat = F.softmax(attn_mat, dim=2) # [B', T, T]
V = torch.matmul(attn_mat, V) # [B', T, C*Q]
V = V.reshape(old_shape) # [B', T, C, Q]
V = V.transpose(1, 2) # [B', C, T, Q]
emb_dim = V.shape[1]
batch = V.view([self.n_head, B, emb_dim, old_T, -1]) # [n_head, B, C, T, Q])
batch = batch.transpose(0, 1) # [B, n_head, C, T, Q])
batch = batch.contiguous().view(
[B, self.n_head * emb_dim, old_T, -1]
) # [B, C, T, Q])
batch = self["attn_concat_proj"](batch) # [B, C, T, Q])
out = batch + inter_rnn
return out
class LayerNormalization4D(nn.Module):
def __init__(self, input_dimension, eps=1e-5):
super().__init__()
param_size = [1, input_dimension, 1, 1]
self.gamma = Parameter(torch.Tensor(*param_size).to(torch.float32))
self.beta = Parameter(torch.Tensor(*param_size).to(torch.float32))
init.ones_(self.gamma)
init.zeros_(self.beta)
self.eps = eps
def forward(self, x):
if x.ndim == 4:
_, C, _, _ = x.shape
stat_dim = (1,)
else:
raise ValueError("Expect x to have 4 dimensions, but got {}".format(x.ndim))
mu_ = x.mean(dim=stat_dim, keepdim=True) # [B,1,T,F]
std_ = torch.sqrt(
x.var(dim=stat_dim, unbiased=False, keepdim=True) + self.eps
) # [B,1,T,F]
x_hat = ((x - mu_) / std_) * self.gamma + self.beta
return x_hat
class LayerNormalization4DCF(nn.Module):
def __init__(self, input_dimension, eps=1e-5):
super().__init__()
assert len(input_dimension) == 2
param_size = [1, input_dimension[0], 1, input_dimension[1]]
self.gamma = Parameter(torch.Tensor(*param_size).to(torch.float32))
self.beta = Parameter(torch.Tensor(*param_size).to(torch.float32))
init.ones_(self.gamma)
init.zeros_(self.beta)
self.eps = eps
def forward(self, x):
if x.ndim == 4:
stat_dim = (1, 3)
else:
raise ValueError("Expect x to have 4 dimensions, but got {}".format(x.ndim))
mu_ = x.mean(dim=stat_dim, keepdim=True) # [B,1,T,1]
std_ = torch.sqrt(
x.var(dim=stat_dim, unbiased=False, keepdim=True) + self.eps
) # [B,1,T,F]
x_hat = ((x - mu_) / std_) * self.gamma + self.beta
return x_hat
| 14,294 | 35.466837 | 88 | py |
espnet | espnet-master/espnet2/enh/separator/conformer_separator.py | from collections import OrderedDict
from typing import Dict, List, Optional, Tuple, Union
import torch
from packaging.version import parse as V
from torch_complex.tensor import ComplexTensor
from espnet2.enh.layers.complex_utils import is_complex
from espnet2.enh.separator.abs_separator import AbsSeparator
from espnet.nets.pytorch_backend.conformer.encoder import Encoder as ConformerEncoder
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
class ConformerSeparator(AbsSeparator):
def __init__(
self,
input_dim: int,
num_spk: int = 2,
predict_noise: bool = False,
adim: int = 384,
aheads: int = 4,
layers: int = 6,
linear_units: int = 1536,
positionwise_layer_type: str = "linear",
positionwise_conv_kernel_size: int = 1,
normalize_before: bool = False,
concat_after: bool = False,
dropout_rate: float = 0.1,
input_layer: str = "linear",
positional_dropout_rate: float = 0.1,
attention_dropout_rate: float = 0.1,
nonlinear: str = "relu",
conformer_pos_enc_layer_type: str = "rel_pos",
conformer_self_attn_layer_type: str = "rel_selfattn",
conformer_activation_type: str = "swish",
use_macaron_style_in_conformer: bool = True,
use_cnn_in_conformer: bool = True,
conformer_enc_kernel_size: int = 7,
padding_idx: int = -1,
):
"""Conformer separator.
Args:
input_dim: input feature dimension
num_spk: number of speakers
predict_noise: whether to output the estimated noise signal
adim (int): Dimension of attention.
aheads (int): The number of heads of multi head attention.
linear_units (int): The number of units of position-wise feed forward.
layers (int): The number of transformer blocks.
dropout_rate (float): Dropout rate.
input_layer (Union[str, torch.nn.Module]): Input layer type.
attention_dropout_rate (float): Dropout rate in attention.
positional_dropout_rate (float): Dropout rate after adding
positional encoding.
normalize_before (bool): Whether to use layer_norm before the first block.
concat_after (bool): Whether to concat attention layer's input and output.
if True, additional linear will be applied.
i.e. x -> x + linear(concat(x, att(x)))
if False, no additional linear will be applied. i.e. x -> x + att(x)
conformer_pos_enc_layer_type(str): Encoder positional encoding layer type.
conformer_self_attn_layer_type (str): Encoder attention layer type.
conformer_activation_type(str): Encoder activation function type.
positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear".
positionwise_conv_kernel_size (int): Kernel size of
positionwise conv1d layer.
use_macaron_style_in_conformer (bool): Whether to use macaron style for
positionwise layer.
use_cnn_in_conformer (bool): Whether to use convolution module.
conformer_enc_kernel_size(int): Kernerl size of convolution module.
padding_idx (int): Padding idx for input_layer=embed.
nonlinear: the nonlinear function for mask estimation,
select from 'relu', 'tanh', 'sigmoid'
"""
super().__init__()
self._num_spk = num_spk
self.predict_noise = predict_noise
self.conformer = ConformerEncoder(
idim=input_dim,
attention_dim=adim,
attention_heads=aheads,
linear_units=linear_units,
num_blocks=layers,
dropout_rate=dropout_rate,
positional_dropout_rate=positional_dropout_rate,
attention_dropout_rate=attention_dropout_rate,
input_layer=input_layer,
normalize_before=normalize_before,
concat_after=concat_after,
positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size,
macaron_style=use_macaron_style_in_conformer,
pos_enc_layer_type=conformer_pos_enc_layer_type,
selfattention_layer_type=conformer_self_attn_layer_type,
activation_type=conformer_activation_type,
use_cnn_module=use_cnn_in_conformer,
cnn_module_kernel=conformer_enc_kernel_size,
padding_idx=padding_idx,
)
num_outputs = self.num_spk + 1 if self.predict_noise else self.num_spk
self.linear = torch.nn.ModuleList(
[torch.nn.Linear(adim, input_dim) for _ in range(num_outputs)]
)
if nonlinear not in ("sigmoid", "relu", "tanh"):
raise ValueError("Not supporting nonlinear={}".format(nonlinear))
self.nonlinear = {
"sigmoid": torch.nn.Sigmoid(),
"relu": torch.nn.ReLU(),
"tanh": torch.nn.Tanh(),
}[nonlinear]
def forward(
self,
input: Union[torch.Tensor, ComplexTensor],
ilens: torch.Tensor,
additional: Optional[Dict] = None,
) -> Tuple[List[Union[torch.Tensor, ComplexTensor]], torch.Tensor, OrderedDict]:
"""Forward.
Args:
input (torch.Tensor or ComplexTensor): Encoded feature [B, T, N]
ilens (torch.Tensor): input lengths [Batch]
additional (Dict or None): other data included in model
NOTE: not used in this model
Returns:
masked (List[Union(torch.Tensor, ComplexTensor)]): [(B, T, N), ...]
ilens (torch.Tensor): (B,)
others predicted data, e.g. masks: OrderedDict[
'mask_spk1': torch.Tensor(Batch, Frames, Freq),
'mask_spk2': torch.Tensor(Batch, Frames, Freq),
...
'mask_spkn': torch.Tensor(Batch, Frames, Freq),
]
"""
# if complex spectrum,
if is_complex(input):
feature = abs(input)
else:
feature = input
# prepare pad_mask for transformer
pad_mask = make_non_pad_mask(ilens).unsqueeze(1).to(feature.device)
x, ilens = self.conformer(feature, pad_mask)
masks = []
for linear in self.linear:
y = linear(x)
y = self.nonlinear(y)
masks.append(y)
if self.predict_noise:
*masks, mask_noise = masks
masked = [input * m for m in masks]
others = OrderedDict(
zip(["mask_spk{}".format(i + 1) for i in range(len(masks))], masks)
)
if self.predict_noise:
others["noise1"] = input * mask_noise
return masked, ilens, others
@property
def num_spk(self):
return self._num_spk
| 7,130 | 39.288136 | 86 | py |
espnet | espnet-master/espnet2/enh/separator/dprnn_separator.py | from collections import OrderedDict
from typing import Dict, List, Optional, Tuple, Union
import torch
from packaging.version import parse as V
from torch_complex.tensor import ComplexTensor
from espnet2.enh.layers.complex_utils import is_complex
from espnet2.enh.layers.dprnn import DPRNN, merge_feature, split_feature
from espnet2.enh.separator.abs_separator import AbsSeparator
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
class DPRNNSeparator(AbsSeparator):
def __init__(
self,
input_dim: int,
rnn_type: str = "lstm",
bidirectional: bool = True,
num_spk: int = 2,
predict_noise: bool = False,
nonlinear: str = "relu",
layer: int = 3,
unit: int = 512,
segment_size: int = 20,
dropout: float = 0.0,
):
"""Dual-Path RNN (DPRNN) Separator
Args:
input_dim: input feature dimension
rnn_type: string, select from 'RNN', 'LSTM' and 'GRU'.
bidirectional: bool, whether the inter-chunk RNN layers are bidirectional.
num_spk: number of speakers
predict_noise: whether to output the estimated noise signal
nonlinear: the nonlinear function for mask estimation,
select from 'relu', 'tanh', 'sigmoid'
layer: int, number of stacked RNN layers. Default is 3.
unit: int, dimension of the hidden state.
segment_size: dual-path segment size
dropout: float, dropout ratio. Default is 0.
"""
super().__init__()
self._num_spk = num_spk
self.predict_noise = predict_noise
self.segment_size = segment_size
self.num_outputs = self.num_spk + 1 if self.predict_noise else self.num_spk
self.dprnn = DPRNN(
rnn_type=rnn_type,
input_size=input_dim,
hidden_size=unit,
output_size=input_dim * self.num_outputs,
dropout=dropout,
num_layers=layer,
bidirectional=bidirectional,
)
if nonlinear not in ("sigmoid", "relu", "tanh"):
raise ValueError("Not supporting nonlinear={}".format(nonlinear))
self.nonlinear = {
"sigmoid": torch.nn.Sigmoid(),
"relu": torch.nn.ReLU(),
"tanh": torch.nn.Tanh(),
}[nonlinear]
def forward(
self,
input: Union[torch.Tensor, ComplexTensor],
ilens: torch.Tensor,
additional: Optional[Dict] = None,
) -> Tuple[List[Union[torch.Tensor, ComplexTensor]], torch.Tensor, OrderedDict]:
"""Forward.
Args:
input (torch.Tensor or ComplexTensor): Encoded feature [B, T, N]
ilens (torch.Tensor): input lengths [Batch]
additional (Dict or None): other data included in model
NOTE: not used in this model
Returns:
masked (List[Union(torch.Tensor, ComplexTensor)]): [(B, T, N), ...]
ilens (torch.Tensor): (B,)
others predicted data, e.g. masks: OrderedDict[
'mask_spk1': torch.Tensor(Batch, Frames, Freq),
'mask_spk2': torch.Tensor(Batch, Frames, Freq),
...
'mask_spkn': torch.Tensor(Batch, Frames, Freq),
]
"""
# if complex spectrum,
if is_complex(input):
feature = abs(input)
else:
feature = input
B, T, N = feature.shape
feature = feature.transpose(1, 2) # B, N, T
segmented, rest = split_feature(
feature, segment_size=self.segment_size
) # B, N, L, K
processed = self.dprnn(segmented) # B, N*num_spk, L, K
processed = merge_feature(processed, rest) # B, N*num_spk, T
processed = processed.transpose(1, 2) # B, T, N*num_spk
processed = processed.view(B, T, N, self.num_outputs)
masks = self.nonlinear(processed).unbind(dim=3)
if self.predict_noise:
*masks, mask_noise = masks
masked = [input * m for m in masks]
others = OrderedDict(
zip(["mask_spk{}".format(i + 1) for i in range(len(masks))], masks)
)
if self.predict_noise:
others["noise1"] = input * mask_noise
return masked, ilens, others
@property
def num_spk(self):
return self._num_spk
| 4,412 | 32.431818 | 86 | py |
espnet | espnet-master/espnet2/enh/separator/fasnet_separator.py | from collections import OrderedDict
from typing import Dict, List, Optional, Tuple
import torch
from packaging.version import parse as V
from espnet2.enh.layers.fasnet import FaSNet_TAC
from espnet2.enh.layers.ifasnet import iFaSNet
from espnet2.enh.separator.abs_separator import AbsSeparator
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
class FaSNetSeparator(AbsSeparator):
def __init__(
self,
input_dim: int,
enc_dim: int,
feature_dim: int,
hidden_dim: int,
layer: int,
segment_size: int,
num_spk: int,
win_len: int,
context_len: int,
fasnet_type: str,
dropout: float = 0.0,
sr: int = 16000,
predict_noise: bool = False,
):
"""Filter-and-sum Network (FaSNet) Separator
Args:
input_dim: required by AbsSeparator. Not used in this model.
enc_dim: encoder dimension
feature_dim: feature dimension
hidden_dim: hidden dimension in DPRNN
layer: number of DPRNN blocks in iFaSNet
segment_size: dual-path segment size
num_spk: number of speakers
win_len: window length in millisecond
context_len: context length in millisecond
fasnet_type: 'fasnet' or 'ifasnet'.
Select from origin fasnet or Implicit fasnet
dropout: dropout rate. Default is 0.
sr: samplerate of input audio
predict_noise: whether to output the estimated noise signal
"""
super().__init__()
self._num_spk = num_spk
self.predict_noise = predict_noise
assert fasnet_type in ["fasnet", "ifasnet"], "only support fasnet and ifasnet"
FASNET = FaSNet_TAC if fasnet_type == "fasnet" else iFaSNet
self.fasnet = FASNET(
enc_dim=enc_dim,
feature_dim=feature_dim,
hidden_dim=hidden_dim,
layer=layer,
segment_size=segment_size,
nspk=num_spk + 1 if predict_noise else num_spk,
win_len=win_len,
context_len=context_len,
sr=sr,
dropout=dropout,
)
def forward(
self,
input: torch.Tensor,
ilens: torch.Tensor,
additional: Optional[Dict] = None,
) -> Tuple[List[torch.Tensor], torch.Tensor, OrderedDict]:
"""Forward.
Args:
input (torch.Tensor): (Batch, samples, channels)
ilens (torch.Tensor): input lengths [Batch]
additional (Dict or None): other data included in model
NOTE: not used in this model
Returns:
separated (List[Union(torch.Tensor, ComplexTensor)]): [(B, T, N), ...]
ilens (torch.Tensor): (B,)
others predicted data, e.g. masks: OrderedDict[
'mask_spk1': torch.Tensor(Batch, Frames, Freq),
'mask_spk2': torch.Tensor(Batch, Frames, Freq),
...
'mask_spkn': torch.Tensor(Batch, Frames, Freq),
]
"""
assert input.dim() == 3, "only support input shape: (Batch, samples, channels)"
# currently only support for fixed-array
input = input.permute(0, 2, 1)
none_mic = torch.zeros(1, dtype=input.dtype)
separated = self.fasnet(input, none_mic)
separated = list(separated.unbind(dim=1))
others = {}
if self.predict_noise:
*separated, noise = separated
others["noise1"] = noise
return separated, ilens, others
@property
def num_spk(self):
return self._num_spk
| 3,677 | 30.435897 | 87 | py |
espnet | espnet-master/espnet2/enh/separator/ineube_separator.py | from collections import OrderedDict
from typing import Dict, List, Optional, Tuple, Union
import torch
from packaging.version import parse as V
from torch_complex.tensor import ComplexTensor
from espnet2.enh.decoder.stft_decoder import STFTDecoder
from espnet2.enh.encoder.stft_encoder import STFTEncoder
from espnet2.enh.layers.beamformer import tik_reg, to_double
from espnet2.enh.layers.tcndenseunet import TCNDenseUNet
from espnet2.enh.separator.abs_separator import AbsSeparator
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
class iNeuBe(AbsSeparator):
"""iNeuBe, iterative neural/beamforming enhancement
Reference:
Lu, Y. J., Cornell, S., Chang, X., Zhang, W., Li, C., Ni, Z., ... & Watanabe, S.
Towards Low-Distortion Multi-Channel Speech Enhancement:
The ESPNET-Se Submission to the L3DAS22 Challenge. ICASSP 2022 p. 9201-9205.
NOTES:
As outlined in the Reference, this model works best when coupled with
the MultiResL1SpecLoss defined in criterions/time_domain.py.
The model is trained with variance normalized mixture input and target.
e.g. with mixture of shape [batch, microphones, samples] you normalize
it by dividing with torch.std(mixture, (1, 2)). You must do the same
for the target signal.
In the Reference, the variance normalization was performed offline
(we normalized by the std computed on the entire training set and not
for each input separately). However we found out that also normalizing
each input and target separately works well.
Args:
n_spk: number of output sources/speakers.
n_fft: stft window size.
stride: stft stride.
window: stft window type choose between 'hamming', 'hanning' or None.
mic_channels: number of microphones channels
(only fixed-array geometry supported).
hid_chans: number of channels in the subsampling/upsampling conv layers.
hid_chans_dense: number of channels in the densenet layers
(reduce this to reduce VRAM requirements).
ksz_dense: kernel size in the densenet layers thorough iNeuBe.
ksz_tcn: kernel size in the TCN submodule.
tcn_repeats: number of repetitions of blocks in the TCN submodule.
tcn_blocks: number of blocks in the TCN submodule.
tcn_channels: number of channels in the TCN submodule.
activation: activation function to use in the whole iNeuBe model,
you can use any torch supported activation e.g. 'relu' or 'elu'.
output_from: output the estimate from 'dnn1', 'mfmcwf' or 'dnn2'.
n_chunks: number of future and past frames to consider for mfMCWF computation.
freeze_dnn1: whether or not freezing dnn1 parameters during training of dnn2.
tik_eps: diagonal loading in the mfMCWF computation.
"""
def __init__(
self,
n_spk=1,
n_fft=512,
stride=128,
window="hann",
mic_channels=1,
hid_chans=32,
hid_chans_dense=32,
ksz_dense=(3, 3),
ksz_tcn=3,
tcn_repeats=4,
tcn_blocks=7,
tcn_channels=384,
activation="elu",
output_from="dnn1",
n_chunks=3,
freeze_dnn1=False,
tik_eps=1e-8,
):
super().__init__()
self.n_spk = n_spk
self.output_from = output_from
self.n_chunks = n_chunks
self.freeze_dnn1 = freeze_dnn1
self.tik_eps = tik_eps
fft_c_channels = n_fft // 2 + 1
assert is_torch_1_9_plus, (
"iNeuBe model requires torch>=1.9.0, "
"please install latest torch version."
)
self.enc = STFTEncoder(n_fft, n_fft, stride, window=window)
self.dec = STFTDecoder(n_fft, n_fft, stride, window=window)
self.dnn1 = TCNDenseUNet(
n_spk,
fft_c_channels,
mic_channels,
hid_chans,
hid_chans_dense,
ksz_dense,
ksz_tcn,
tcn_repeats,
tcn_blocks,
tcn_channels,
activation=activation,
)
self.dnn2 = TCNDenseUNet(
1,
fft_c_channels,
mic_channels + 2,
hid_chans,
hid_chans_dense,
ksz_dense,
ksz_tcn,
tcn_repeats,
tcn_blocks,
tcn_channels,
activation=activation,
)
@staticmethod
def unfold(tf_rep, chunk_size):
"""unfolding STFT representation to add context in the mics channel.
Args:
mixture (torch.Tensor): 3D tensor (monaural complex STFT)
of shape [B, T, F] batch, frames, microphones, frequencies.
n_chunks (int): number of past and future to consider.
Returns:
est_unfolded (torch.Tensor): complex 3D tensor STFT with context channel.
shape now is [B, T, C, F] batch, frames, context, frequencies.
Basically same shape as a multi-channel STFT with C microphones.
"""
bsz, freq, _ = tf_rep.shape
if chunk_size == 0:
return tf_rep
est_unfolded = torch.nn.functional.unfold(
torch.nn.functional.pad(
tf_rep, (chunk_size, chunk_size), mode="constant"
).unsqueeze(-1),
kernel_size=(2 * chunk_size + 1, 1),
padding=(0, 0),
stride=(1, 1),
)
n_chunks = est_unfolded.shape[-1]
est_unfolded = est_unfolded.reshape(bsz, freq, 2 * chunk_size + 1, n_chunks)
est_unfolded = est_unfolded.transpose(1, 2)
return est_unfolded
@staticmethod
def mfmcwf(mixture, estimate, n_chunks, tik_eps):
"""multi-frame multi-channel wiener filter.
Args:
mixture (torch.Tensor): multi-channel STFT complex mixture tensor,
of shape [B, T, C, F] batch, frames, microphones, frequencies.
estimate (torch.Tensor): monaural STFT complex estimate
of target source [B, T, F] batch, frames, frequencies.
n_chunks (int): number of past and future mfMCWF frames.
If 0 then standard MCWF.
tik_eps (float): diagonal loading for matrix inversion in MCWF computation.
Returns:
beamformed (torch.Tensor): monaural STFT complex estimate
of target source after MFMCWF [B, T, F] batch, frames, frequencies.
"""
mixture = mixture.permute(0, 2, 3, 1)
estimate = estimate.transpose(-1, -2)
bsz, mics, _, frames = mixture.shape
mix_unfolded = iNeuBe.unfold(
mixture.reshape(bsz * mics, -1, frames), n_chunks
).reshape(bsz, mics * (2 * n_chunks + 1), -1, frames)
mix_unfolded = to_double(mix_unfolded)
estimate1 = to_double(estimate)
zeta = torch.einsum("bmft, bft->bmf", mix_unfolded, estimate1.conj())
scm_mix = torch.einsum("bmft, bnft->bmnf", mix_unfolded, mix_unfolded.conj())
inv_scm_mix = torch.inverse(
tik_reg(scm_mix.permute(0, 3, 1, 2), tik_eps)
).permute(0, 2, 3, 1)
bf_vector = torch.einsum("bmnf, bnf->bmf", inv_scm_mix, zeta)
beamformed = torch.einsum("...mf,...mft->...ft", bf_vector.conj(), mix_unfolded)
beamformed = beamformed.to(mixture)
return beamformed.transpose(-1, -2)
@staticmethod
def pad2(input_tensor, target_len):
input_tensor = torch.nn.functional.pad(
input_tensor, (0, target_len - input_tensor.shape[-1])
)
return input_tensor
def forward(
self,
input: Union[torch.Tensor, ComplexTensor],
ilens: torch.Tensor,
additional: Optional[Dict] = None,
) -> Tuple[List[Union[torch.Tensor, ComplexTensor]], torch.Tensor, OrderedDict]:
"""Forward.
Args:
input (torch.Tensor/ComplexTensor): batched multi-channel audio tensor with
C audio channels and T samples [B, T, C]
ilens (torch.Tensor): input lengths [Batch]
additional (Dict or None): other data, currently unused in this model.
Returns:
enhanced (List[Union[torch.Tensor, ComplexTensor]]):
[(B, T), ...] list of len n_spk
of mono audio tensors with T samples.
ilens (torch.Tensor): (B,)
additional (Dict or None): other data, currently unused in this model,
we return it also in output.
"""
# B, T, C
bsz, mixture_len, mics = input.shape
mix_stft = self.enc(input, ilens)[0]
# B, T, C, F
est_dnn1 = self.dnn1(mix_stft)
if self.freeze_dnn1:
est_dnn1 = est_dnn1.detach()
_, _, frames, freq = est_dnn1.shape
output_dnn1 = self.dec(
est_dnn1.reshape(bsz * self.num_spk, frames, freq), ilens
)[0]
output_dnn1 = self.pad2(output_dnn1.reshape(bsz, self.num_spk, -1), mixture_len)
output_dnn1 = [output_dnn1[:, src] for src in range(output_dnn1.shape[1])]
others = OrderedDict()
if self.output_from == "dnn1":
return output_dnn1, ilens, others
elif self.output_from in ["mfmcwf", "dnn2"]:
others["dnn1"] = output_dnn1
est_mfmcwf = iNeuBe.mfmcwf(
mix_stft,
est_dnn1.reshape(bsz * self.n_spk, frames, freq),
self.n_chunks,
self.tik_eps,
).reshape(bsz, self.n_spk, frames, freq)
output_mfmcwf = self.dec(
est_mfmcwf.reshape(bsz * self.num_spk, frames, freq), ilens
)[0]
output_mfmcwf = self.pad2(
output_mfmcwf.reshape(bsz, self.num_spk, -1), mixture_len
)
if self.output_from == "mfmcwf":
return (
[output_mfmcwf[:, src] for src in range(output_mfmcwf.shape[1])],
ilens,
others,
)
elif self.output_from == "dnn2":
others["dnn1"] = output_dnn1
others["beam"] = output_mfmcwf
est_dnn2 = self.dnn2(
torch.cat(
(
mix_stft.repeat(self.num_spk, 1, 1, 1),
est_dnn1.reshape(
bsz * self.num_spk, frames, freq
).unsqueeze(2),
est_mfmcwf.reshape(
bsz * self.num_spk, frames, freq
).unsqueeze(2),
),
2,
)
)
output_dnn2 = self.dec(est_dnn2[:, 0], ilens)[0]
output_dnn2 = self.pad2(
output_dnn2.reshape(bsz, self.num_spk, -1), mixture_len
)
return (
[output_dnn2[:, src] for src in range(output_dnn2.shape[1])],
ilens,
others,
)
else:
raise NotImplementedError
else:
raise NotImplementedError
@property
def num_spk(self):
return self.n_spk
| 11,343 | 37.195286 | 88 | py |
espnet | espnet-master/espnet2/enh/separator/dpcl_e2e_separator.py | from collections import OrderedDict
from typing import Dict, List, Optional, Tuple, Union
import torch
from torch_complex.tensor import ComplexTensor
from espnet2.enh.layers.complex_utils import is_complex
from espnet2.enh.separator.abs_separator import AbsSeparator
from espnet.nets.pytorch_backend.rnn.encoders import RNN
class DPCLE2ESeparator(AbsSeparator):
def __init__(
self,
input_dim: int,
rnn_type: str = "blstm",
num_spk: int = 2,
predict_noise: bool = False,
nonlinear: str = "tanh",
layer: int = 2,
unit: int = 512,
emb_D: int = 40,
dropout: float = 0.0,
alpha: float = 5.0,
max_iteration: int = 500,
threshold: float = 1.0e-05,
):
"""Deep Clustering End-to-End Separator
References:
Single-Channel Multi-Speaker Separation using Deep Clustering;
Yusuf Isik. et al., 2016;
https://www.isca-speech.org/archive/interspeech_2016/isik16_interspeech.html
Args:
input_dim: input feature dimension
rnn_type: string, select from 'blstm', 'lstm' etc.
bidirectional: bool, whether the inter-chunk RNN layers are bidirectional.
num_spk: number of speakers
predict_noise: whether to output the estimated noise signal
nonlinear: the nonlinear function for mask estimation,
select from 'relu', 'tanh', 'sigmoid'
layer: int, number of stacked RNN layers. Default is 3.
unit: int, dimension of the hidden state.
emb_D: int, dimension of the feature vector for a tf-bin.
dropout: float, dropout ratio. Default is 0.
alpha: float, the clustering hardness parameter.
max_iteration: int, the max iterations of soft kmeans.
threshold: float, the threshold to end the soft k-means process.
"""
super().__init__()
self._num_spk = num_spk
self.predict_noise = predict_noise
self.blstm = RNN(
idim=input_dim,
elayers=layer,
cdim=unit,
hdim=unit,
dropout=dropout,
typ=rnn_type,
)
self.linear = torch.nn.Linear(unit, input_dim * emb_D)
if nonlinear not in ("sigmoid", "relu", "tanh"):
raise ValueError("Not supporting nonlinear={}".format(nonlinear))
self.nonlinear = {
"sigmoid": torch.nn.Sigmoid(),
"relu": torch.nn.ReLU(),
"tanh": torch.nn.Tanh(),
}[nonlinear]
self.num_outputs = self.num_spk + 1 if self.predict_noise else self.num_spk
self.enh_blstm = RNN(
idim=input_dim * (self.num_outputs + 1),
elayers=1,
cdim=unit,
hdim=unit,
dropout=dropout,
typ=rnn_type,
)
self.enh_linear = torch.nn.Linear(unit, input_dim * self.num_outputs)
self.D = emb_D
self.alpha = alpha
self.max_iteration = max_iteration
self.threshold = threshold
def forward(
self,
input: Union[torch.Tensor, ComplexTensor],
ilens: torch.Tensor,
additional: Optional[Dict] = None,
) -> Tuple[List[Union[torch.Tensor, ComplexTensor]], torch.Tensor, OrderedDict]:
"""Forward.
Args:
input (torch.Tensor or ComplexTensor): Encoded feature [B, T, F]
ilens (torch.Tensor): input lengths [Batch]
Returns:
masked (List[Union(torch.Tensor, ComplexTensor)]): [(B, T, N), ...]
ilens (torch.Tensor): (B,)
others predicted data, e.g. V: OrderedDict[
others predicted data, e.g. masks: OrderedDict[
'mask_spk1': torch.Tensor(Batch, Frames, Freq),
'mask_spk2': torch.Tensor(Batch, Frames, Freq),
...
'mask_spkn': torch.Tensor(Batch, Frames, Freq),
]
"""
# if complex spectrum,
if is_complex(input):
feature = abs(input)
else:
feature = input
B, T, F = input.shape
# 1st Stage
# x:(B, T, F)
x, ilens, _ = self.blstm(feature, ilens)
# x:(B, T, F*D)
x = self.linear(x)
# x:(B, T, F*D)
x = self.nonlinear(x)
V = x.view(B, -1, self.D)
# Soft KMeans
centers = V[:, : self.num_outputs, :]
gamma = torch.zeros(B, T * F, self.num_outputs, device=input.device)
count = 0
while True:
# Compute weight
gamma_exp = torch.empty(B, T * F, self.num_outputs, device=input.device)
new_centers = torch.empty(B, self.num_outputs, self.D, device=input.device)
for i in range(self.num_outputs):
gamma_exp[:, :, i] = torch.exp(
-self.alpha
* torch.sum(V - centers[:, i, :].unsqueeze(1) ** 2, dim=2)
)
# To avoid grad becomes nan, we add a small constant in denominator
gamma = gamma_exp / (torch.sum(gamma_exp, dim=2, keepdim=True) + 1.0e-8)
# Update centers
for i in range(self.num_outputs):
new_centers[:, i, :] = torch.sum(
V * gamma[:, :, i].unsqueeze(2), dim=1
) / (torch.sum(gamma[:, :, i].unsqueeze(2), dim=1) + 1.0e-8)
if (
torch.pow(new_centers - centers, 2).sum() < self.threshold
or count > self.max_iteration
):
break
count += 1
centers = new_centers
masks = gamma.contiguous().view(B, T, F, self.num_outputs).unbind(dim=3)
masked = [feature * m for m in masks]
masked.append(feature)
# 2nd Stage
# cat_source:(B, T, (spks+1)*F)
cat_source = torch.cat(masked, dim=2)
# cat_x:(B, T, spks*F)
cat_x, ilens, _ = self.enh_blstm(cat_source, ilens)
# z:(B, T, spks*F)
z = self.enh_linear(cat_x)
z = z.contiguous().view(B, T, F, self.num_outputs)
enh_masks = torch.softmax(z, dim=3).unbind(dim=3)
if self.predict_noise:
*enh_masks, mask_noise = enh_masks
enh_masked = [input * m for m in enh_masks]
others = OrderedDict(
zip(["mask_spk{}".format(i + 1) for i in range(len(enh_masks))], enh_masks)
)
if self.predict_noise:
others["noise1"] = input * mask_noise
return enh_masked, ilens, others
@property
def num_spk(self):
return self._num_spk
| 6,677 | 33.78125 | 88 | py |
espnet | espnet-master/espnet2/enh/separator/tcn_separator.py | from collections import OrderedDict
from typing import Dict, List, Optional, Tuple, Union
import torch
from packaging.version import parse as V
from torch_complex.tensor import ComplexTensor
from espnet2.enh.layers.complex_utils import is_complex
from espnet2.enh.layers.tcn import TemporalConvNet
from espnet2.enh.separator.abs_separator import AbsSeparator
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
class TCNSeparator(AbsSeparator):
def __init__(
self,
input_dim: int,
num_spk: int = 2,
predict_noise: bool = False,
layer: int = 8,
stack: int = 3,
bottleneck_dim: int = 128,
hidden_dim: int = 512,
kernel: int = 3,
causal: bool = False,
norm_type: str = "gLN",
nonlinear: str = "relu",
):
"""Temporal Convolution Separator
Args:
input_dim: input feature dimension
num_spk: number of speakers
predict_noise: whether to output the estimated noise signal
layer: int, number of layers in each stack.
stack: int, number of stacks
bottleneck_dim: bottleneck dimension
hidden_dim: number of convolution channel
kernel: int, kernel size.
causal: bool, defalut False.
norm_type: str, choose from 'BN', 'gLN', 'cLN'
nonlinear: the nonlinear function for mask estimation,
select from 'relu', 'tanh', 'sigmoid'
"""
super().__init__()
self._num_spk = num_spk
self.predict_noise = predict_noise
if nonlinear not in ("sigmoid", "relu", "tanh"):
raise ValueError("Not supporting nonlinear={}".format(nonlinear))
self.tcn = TemporalConvNet(
N=input_dim,
B=bottleneck_dim,
H=hidden_dim,
P=kernel,
X=layer,
R=stack,
C=num_spk + 1 if predict_noise else num_spk,
norm_type=norm_type,
causal=causal,
mask_nonlinear=nonlinear,
)
def forward(
self,
input: Union[torch.Tensor, ComplexTensor],
ilens: torch.Tensor,
additional: Optional[Dict] = None,
) -> Tuple[List[Union[torch.Tensor, ComplexTensor]], torch.Tensor, OrderedDict]:
"""Forward.
Args:
input (torch.Tensor or ComplexTensor): Encoded feature [B, T, N]
ilens (torch.Tensor): input lengths [Batch]
additional (Dict or None): other data included in model
NOTE: not used in this model
Returns:
masked (List[Union(torch.Tensor, ComplexTensor)]): [(B, T, N), ...]
ilens (torch.Tensor): (B,)
others predicted data, e.g. masks: OrderedDict[
'mask_spk1': torch.Tensor(Batch, Frames, Freq),
'mask_spk2': torch.Tensor(Batch, Frames, Freq),
...
'mask_spkn': torch.Tensor(Batch, Frames, Freq),
]
"""
# if complex spectrum
if is_complex(input):
feature = abs(input)
else:
feature = input
B, L, N = feature.shape
feature = feature.transpose(1, 2) # B, N, L
masks = self.tcn(feature) # B, num_spk, N, L
masks = masks.transpose(2, 3) # B, num_spk, L, N
if self.predict_noise:
*masks, mask_noise = masks.unbind(dim=1) # List[B, L, N]
else:
masks = masks.unbind(dim=1) # List[B, L, N]
masked = [input * m for m in masks]
others = OrderedDict(
zip(["mask_spk{}".format(i + 1) for i in range(len(masks))], masks)
)
if self.predict_noise:
others["noise1"] = input * mask_noise
return masked, ilens, others
def forward_streaming(self, input_frame: torch.Tensor, buffer=None):
# input_frame: B, 1, N
B, _, N = input_frame.shape
receptive_field = self.tcn.receptive_field
if buffer is None:
buffer = torch.zeros((B, receptive_field, N), device=input_frame.device)
buffer = torch.roll(buffer, shifts=-1, dims=1)
buffer[:, -1, :] = input_frame[:, 0, :]
masked, ilens, others = self.forward(buffer, None)
masked = [m[:, -1, :].unsqueeze(1) for m in masked]
return masked, buffer, others
@property
def num_spk(self):
return self._num_spk
| 4,478 | 31.223022 | 84 | py |
espnet | espnet-master/espnet2/enh/separator/svoice_separator.py | import math
from collections import OrderedDict
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from espnet2.enh.layers.dpmulcat import DPMulCat
from espnet2.enh.layers.dprnn import merge_feature, split_feature
from espnet2.enh.separator.abs_separator import AbsSeparator
def overlap_and_add(signal, frame_step):
"""Reconstructs a signal from a framed representation.
Adds potentially overlapping frames of a signal with shape
`[..., frames, frame_length]`, offsetting subsequent frames by `frame_step`.
The resulting tensor has shape `[..., output_size]` where
output_size = (frames - 1) * frame_step + frame_length
Args:
signal: A [..., frames, frame_length] Tensor. All dimensions may be unknown,
and rank must be at least 2.
frame_step: An integer denoting overlap offsets.
Must be less than or equal to frame_length.
Returns:
A Tensor with shape [..., output_size] containing the
overlap-added frames of signal's inner-most two dimensions.
output_size = (frames - 1) * frame_step + frame_length
Based on
https://github.com/tensorflow/tensorflow/blob/r1.12/tensorflow/contrib/signal/python/ops/reconstruction_ops.py
"""
outer_dimensions = signal.size()[:-2]
frames, frame_length = signal.size()[-2:]
# gcd=Greatest Common Divisor
subframe_length = math.gcd(frame_length, frame_step)
subframe_step = frame_step // subframe_length
subframes_per_frame = frame_length // subframe_length
output_size = frame_step * (frames - 1) + frame_length
output_subframes = output_size // subframe_length
subframe_signal = signal.view(*outer_dimensions, -1, subframe_length)
frame = torch.arange(0, output_subframes).unfold(
0, subframes_per_frame, subframe_step
)
frame = frame.clone().detach().long().to(signal.device)
# frame = signal.new_tensor(frame).clone().long() # signal may in GPU or CPU
frame = frame.contiguous().view(-1)
result = signal.new_zeros(*outer_dimensions, output_subframes, subframe_length)
result.index_add_(-2, frame, subframe_signal)
result = result.view(*outer_dimensions, -1)
return result
class Encoder(nn.Module):
def __init__(self, enc_kernel_size: int, enc_feat_dim: int):
super().__init__()
# setting 50% overlap
self.conv = nn.Conv1d(
1,
enc_feat_dim,
kernel_size=enc_kernel_size,
stride=enc_kernel_size // 2,
bias=False,
)
self.nonlinear = nn.ReLU()
def forward(self, mixture):
mixture = torch.unsqueeze(mixture, 1)
mixture_w = self.nonlinear(self.conv(mixture))
return mixture_w
class Decoder(nn.Module):
def __init__(self, kernel_size):
super().__init__()
self.kernel_size = kernel_size
def forward(self, est_source):
est_source = torch.transpose(est_source, 2, 3)
est_source = nn.AvgPool2d((1, self.kernel_size))(est_source)
est_source = overlap_and_add(est_source, self.kernel_size // 2)
return est_source
class SVoiceSeparator(AbsSeparator):
"""SVoice model for speech separation.
Reference:
Voice Separation with an Unknown Number of Multiple Speakers;
E. Nachmani et al., 2020;
https://arxiv.org/abs/2003.01531
Args:
enc_dim: int, dimension of the encoder module's output. (Default: 128)
kernel_size: int, the kernel size of Conv1D layer in both encoder and
decoder modules. (Default: 8)
hidden_size: int, dimension of the hidden state in RNN layers. (Default: 128)
num_spk: int, the number of speakers in the output. (Default: 2)
num_layers: int, number of stacked MulCat blocks. (Default: 4)
segment_size: dual-path segment size. (Default: 20)
bidirectional: bool, whether the RNN layers are bidirectional. (Default: True)
input_normalize: bool, whether to apply GroupNorm on the input Tensor.
(Default: False)
"""
def __init__(
self,
input_dim: int,
enc_dim: int,
kernel_size: int,
hidden_size: int,
num_spk: int = 2,
num_layers: int = 4,
segment_size: int = 20,
bidirectional: bool = True,
input_normalize: bool = False,
):
super().__init__()
self._num_spk = num_spk
self.enc_dim = enc_dim
self.segment_size = segment_size
# model sub-networks
self.encoder = Encoder(kernel_size, enc_dim)
self.decoder = Decoder(kernel_size)
self.rnn_model = DPMulCat(
input_size=enc_dim,
hidden_size=hidden_size,
output_size=enc_dim,
num_spk=num_spk,
num_layers=num_layers,
bidirectional=bidirectional,
input_normalize=input_normalize,
)
def forward(
self,
input: torch.Tensor,
ilens: torch.Tensor,
additional: Optional[Dict] = None,
) -> Tuple[List[torch.Tensor], torch.Tensor, OrderedDict]:
"""Forward.
Args:
input (torch.Tensor or ComplexTensor): Encoded feature [B, T, N]
ilens (torch.Tensor): input lengths [Batch]
additional (Dict or None): other data included in model
NOTE: not used in this model
Returns:
masked (List[Union(torch.Tensor, ComplexTensor)]): [(B, T, N), ...]
ilens (torch.Tensor): (B,)
others predicted data, e.g. masks: OrderedDict[
'mask_spk1': torch.Tensor(Batch, Frames, Freq),
'mask_spk2': torch.Tensor(Batch, Frames, Freq),
...
'mask_spkn': torch.Tensor(Batch, Frames, Freq),
]
"""
# fix time dimension, might change due to convolution operations
T_mix = input.size(-1)
mixture_w = self.encoder(input)
enc_segments, enc_rest = split_feature(mixture_w, self.segment_size)
# separate
output_all = self.rnn_model(enc_segments)
# generate wav after each RNN block and optimize the loss
outputs = []
for ii in range(len(output_all)):
output_ii = merge_feature(output_all[ii], enc_rest)
output_ii = output_ii.view(
input.shape[0], self._num_spk, self.enc_dim, mixture_w.shape[2]
)
output_ii = self.decoder(output_ii)
T_est = output_ii.size(-1)
output_ii = F.pad(output_ii, (0, T_mix - T_est))
output_ii = list(output_ii.unbind(dim=1))
if self.training:
outputs.append(output_ii)
else:
outputs = output_ii
others = {}
return outputs, ilens, others
@property
def num_spk(self):
return self._num_spk
| 7,042 | 34.215 | 114 | py |
espnet | espnet-master/espnet2/enh/separator/transformer_separator.py | from collections import OrderedDict
from typing import Dict, List, Optional, Tuple, Union
import torch
from packaging.version import parse as V
from torch_complex.tensor import ComplexTensor
from espnet2.enh.layers.complex_utils import is_complex
from espnet2.enh.separator.abs_separator import AbsSeparator
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask
from espnet.nets.pytorch_backend.transformer.embedding import ( # noqa: H301
PositionalEncoding,
ScaledPositionalEncoding,
)
from espnet.nets.pytorch_backend.transformer.encoder import (
Encoder as TransformerEncoder,
)
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
class TransformerSeparator(AbsSeparator):
def __init__(
self,
input_dim: int,
num_spk: int = 2,
predict_noise: bool = False,
adim: int = 384,
aheads: int = 4,
layers: int = 6,
linear_units: int = 1536,
positionwise_layer_type: str = "linear",
positionwise_conv_kernel_size: int = 1,
normalize_before: bool = False,
concat_after: bool = False,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.1,
attention_dropout_rate: float = 0.1,
use_scaled_pos_enc: bool = True,
nonlinear: str = "relu",
):
"""Transformer separator.
Args:
input_dim: input feature dimension
num_spk: number of speakers
predict_noise: whether to output the estimated noise signal
adim (int): Dimension of attention.
aheads (int): The number of heads of multi head attention.
linear_units (int): The number of units of position-wise feed forward.
layers (int): The number of transformer blocks.
dropout_rate (float): Dropout rate.
attention_dropout_rate (float): Dropout rate in attention.
positional_dropout_rate (float): Dropout rate after adding
positional encoding.
normalize_before (bool): Whether to use layer_norm before the first block.
concat_after (bool): Whether to concat attention layer's input and output.
if True, additional linear will be applied.
i.e. x -> x + linear(concat(x, att(x)))
if False, no additional linear will be applied. i.e. x -> x + att(x)
positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear".
positionwise_conv_kernel_size (int): Kernel size of
positionwise conv1d layer.
use_scaled_pos_enc (bool) : use scaled positional encoding or not
nonlinear: the nonlinear function for mask estimation,
select from 'relu', 'tanh', 'sigmoid'
"""
super().__init__()
self._num_spk = num_spk
self.predict_noise = predict_noise
pos_enc_class = (
ScaledPositionalEncoding if use_scaled_pos_enc else PositionalEncoding
)
self.transformer = TransformerEncoder(
idim=input_dim,
attention_dim=adim,
attention_heads=aheads,
linear_units=linear_units,
num_blocks=layers,
input_layer="linear",
dropout_rate=dropout_rate,
positional_dropout_rate=positional_dropout_rate,
attention_dropout_rate=attention_dropout_rate,
pos_enc_class=pos_enc_class,
normalize_before=normalize_before,
concat_after=concat_after,
positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size,
)
num_outputs = self.num_spk + 1 if self.predict_noise else self.num_spk
self.linear = torch.nn.ModuleList(
[torch.nn.Linear(adim, input_dim) for _ in range(num_outputs)]
)
if nonlinear not in ("sigmoid", "relu", "tanh"):
raise ValueError("Not supporting nonlinear={}".format(nonlinear))
self.nonlinear = {
"sigmoid": torch.nn.Sigmoid(),
"relu": torch.nn.ReLU(),
"tanh": torch.nn.Tanh(),
}[nonlinear]
def forward(
self,
input: Union[torch.Tensor, ComplexTensor],
ilens: torch.Tensor,
additional: Optional[Dict] = None,
) -> Tuple[List[Union[torch.Tensor, ComplexTensor]], torch.Tensor, OrderedDict]:
"""Forward.
Args:
input (torch.Tensor or ComplexTensor): Encoded feature [B, T, N]
ilens (torch.Tensor): input lengths [Batch]
additional (Dict or None): other data included in model
NOTE: not used in this model
Returns:
masked (List[Union(torch.Tensor, ComplexTensor)]): [(B, T, N), ...]
ilens (torch.Tensor): (B,)
others predicted data, e.g. masks: OrderedDict[
'mask_spk1': torch.Tensor(Batch, Frames, Freq),
'mask_spk2': torch.Tensor(Batch, Frames, Freq),
...
'mask_spkn': torch.Tensor(Batch, Frames, Freq),
]
"""
# if complex spectrum,
if is_complex(input):
feature = abs(input)
else:
feature = input
# prepare pad_mask for transformer
pad_mask = make_non_pad_mask(ilens).unsqueeze(1).to(feature.device)
x, ilens = self.transformer(feature, pad_mask)
masks = []
for linear in self.linear:
y = linear(x)
y = self.nonlinear(y)
masks.append(y)
if self.predict_noise:
*masks, mask_noise = masks
masked = [input * m for m in masks]
others = OrderedDict(
zip(["mask_spk{}".format(i + 1) for i in range(len(masks))], masks)
)
if self.predict_noise:
others["noise1"] = input * mask_noise
return masked, ilens, others
@property
def num_spk(self):
return self._num_spk
| 6,107 | 36.018182 | 86 | py |
espnet | espnet-master/espnet2/enh/separator/neural_beamformer.py | from collections import OrderedDict
from typing import Dict, List, Optional, Tuple, Union
import torch
from torch_complex.tensor import ComplexTensor
from espnet2.enh.layers.dnn_beamformer import DNN_Beamformer
from espnet2.enh.layers.dnn_wpe import DNN_WPE
from espnet2.enh.separator.abs_separator import AbsSeparator
class NeuralBeamformer(AbsSeparator):
def __init__(
self,
input_dim: int,
num_spk: int = 1,
loss_type: str = "mask_mse",
# Dereverberation options
use_wpe: bool = False,
wnet_type: str = "blstmp",
wlayers: int = 3,
wunits: int = 300,
wprojs: int = 320,
wdropout_rate: float = 0.0,
taps: int = 5,
delay: int = 3,
use_dnn_mask_for_wpe: bool = True,
wnonlinear: str = "crelu",
multi_source_wpe: bool = True,
wnormalization: bool = False,
# Beamformer options
use_beamformer: bool = True,
bnet_type: str = "blstmp",
blayers: int = 3,
bunits: int = 300,
bprojs: int = 320,
badim: int = 320,
ref_channel: int = -1,
use_noise_mask: bool = True,
bnonlinear: str = "sigmoid",
beamformer_type: str = "mvdr_souden",
rtf_iterations: int = 2,
bdropout_rate: float = 0.0,
shared_power: bool = True,
use_torchaudio_api: bool = False,
# For numerical stability
diagonal_loading: bool = True,
diag_eps_wpe: float = 1e-7,
diag_eps_bf: float = 1e-7,
mask_flooring: bool = False,
flooring_thres_wpe: float = 1e-6,
flooring_thres_bf: float = 1e-6,
use_torch_solver: bool = True,
):
super().__init__()
self._num_spk = num_spk
self.loss_type = loss_type
if loss_type not in ("mask_mse", "spectrum", "spectrum_log", "magnitude"):
raise ValueError("Unsupported loss type: %s" % loss_type)
self.use_beamformer = use_beamformer
self.use_wpe = use_wpe
if self.use_wpe:
if use_dnn_mask_for_wpe:
# Use DNN for power estimation
iterations = 1
else:
# Performing as conventional WPE, without DNN Estimator
iterations = 2
self.wpe = DNN_WPE(
wtype=wnet_type,
widim=input_dim,
wlayers=wlayers,
wunits=wunits,
wprojs=wprojs,
dropout_rate=wdropout_rate,
taps=taps,
delay=delay,
use_dnn_mask=use_dnn_mask_for_wpe,
nmask=1 if multi_source_wpe else num_spk,
nonlinear=wnonlinear,
iterations=iterations,
normalization=wnormalization,
diagonal_loading=diagonal_loading,
diag_eps=diag_eps_wpe,
mask_flooring=mask_flooring,
flooring_thres=flooring_thres_wpe,
use_torch_solver=use_torch_solver,
)
else:
self.wpe = None
self.ref_channel = ref_channel
if self.use_beamformer:
self.beamformer = DNN_Beamformer(
bidim=input_dim,
btype=bnet_type,
blayers=blayers,
bunits=bunits,
bprojs=bprojs,
num_spk=num_spk,
use_noise_mask=use_noise_mask,
nonlinear=bnonlinear,
dropout_rate=bdropout_rate,
badim=badim,
ref_channel=ref_channel,
beamformer_type=beamformer_type,
rtf_iterations=rtf_iterations,
btaps=taps,
bdelay=delay,
diagonal_loading=diagonal_loading,
diag_eps=diag_eps_bf,
mask_flooring=mask_flooring,
flooring_thres=flooring_thres_bf,
use_torch_solver=use_torch_solver,
use_torchaudio_api=use_torchaudio_api,
)
else:
self.beamformer = None
# share speech powers between WPE and beamforming (wMPDR/WPD)
self.shared_power = shared_power and use_wpe
def forward(
self,
input: Union[torch.Tensor, ComplexTensor],
ilens: torch.Tensor,
additional: Optional[Dict] = None,
) -> Tuple[List[Union[torch.Tensor, ComplexTensor]], torch.Tensor, OrderedDict]:
"""Forward.
Args:
input (torch.complex64/ComplexTensor):
mixed speech [Batch, Frames, Channel, Freq]
ilens (torch.Tensor): input lengths [Batch]
additional (Dict or None): other data included in model
NOTE: not used in this model
Returns:
enhanced speech (single-channel): List[torch.complex64/ComplexTensor]
output lengths
other predcited data: OrderedDict[
'dereverb1': ComplexTensor(Batch, Frames, Channel, Freq),
'mask_dereverb1': torch.Tensor(Batch, Frames, Channel, Freq),
'mask_noise1': torch.Tensor(Batch, Frames, Channel, Freq),
'mask_spk1': torch.Tensor(Batch, Frames, Channel, Freq),
'mask_spk2': torch.Tensor(Batch, Frames, Channel, Freq),
...
'mask_spkn': torch.Tensor(Batch, Frames, Channel, Freq),
]
"""
# Shape of input spectrum must be (B, T, F) or (B, T, C, F)
assert input.dim() in (3, 4), input.dim()
enhanced = input
others = OrderedDict()
if (
self.training
and self.loss_type is not None
and self.loss_type.startswith("mask")
):
# Only estimating masks during training for saving memory
if self.use_wpe:
if input.dim() == 3:
mask_w, ilens = self.wpe.predict_mask(input.unsqueeze(-2), ilens)
mask_w = mask_w.squeeze(-2)
elif input.dim() == 4:
mask_w, ilens = self.wpe.predict_mask(input, ilens)
if mask_w is not None:
if isinstance(enhanced, list):
# single-source WPE
for spk in range(self.num_spk):
others["mask_dereverb{}".format(spk + 1)] = mask_w[spk]
else:
# multi-source WPE
others["mask_dereverb1"] = mask_w
if self.use_beamformer and input.dim() == 4:
others_b, ilens = self.beamformer.predict_mask(input, ilens)
for spk in range(self.num_spk):
others["mask_spk{}".format(spk + 1)] = others_b[spk]
if len(others_b) > self.num_spk:
others["mask_noise1"] = others_b[self.num_spk]
return None, ilens, others
else:
powers = None
# Performing both mask estimation and enhancement
if input.dim() == 3:
# single-channel input (B, T, F)
if self.use_wpe:
enhanced, ilens, mask_w, powers = self.wpe(
input.unsqueeze(-2), ilens
)
if isinstance(enhanced, list):
# single-source WPE
enhanced = [enh.squeeze(-2) for enh in enhanced]
if mask_w is not None:
for spk in range(self.num_spk):
key = "dereverb{}".format(spk + 1)
others[key] = enhanced[spk]
others["mask_" + key] = mask_w[spk].squeeze(-2)
else:
# multi-source WPE
enhanced = enhanced.squeeze(-2)
if mask_w is not None:
others["dereverb1"] = enhanced
others["mask_dereverb1"] = mask_w.squeeze(-2)
else:
# multi-channel input (B, T, C, F)
# 1. WPE
if self.use_wpe:
enhanced, ilens, mask_w, powers = self.wpe(input, ilens)
if mask_w is not None:
if isinstance(enhanced, list):
# single-source WPE
for spk in range(self.num_spk):
key = "dereverb{}".format(spk + 1)
others[key] = enhanced[spk]
others["mask_" + key] = mask_w[spk]
else:
# multi-source WPE
others["dereverb1"] = enhanced
others["mask_dereverb1"] = mask_w.squeeze(-2)
# 2. Beamformer
if self.use_beamformer:
if (
not self.beamformer.beamformer_type.startswith("wmpdr")
or not self.beamformer.beamformer_type.startswith("wpd")
or not self.shared_power
or (self.wpe.nmask == 1 and self.num_spk > 1)
):
powers = None
# enhanced: (B, T, C, F) -> (B, T, F)
if isinstance(enhanced, list):
# outputs of single-source WPE
raise NotImplementedError(
"Single-source WPE is not supported with beamformer "
"in multi-speaker cases."
)
else:
# output of multi-source WPE
enhanced, ilens, others_b = self.beamformer(
enhanced, ilens, powers=powers
)
for spk in range(self.num_spk):
others["mask_spk{}".format(spk + 1)] = others_b[spk]
if len(others_b) > self.num_spk:
others["mask_noise1"] = others_b[self.num_spk]
if not isinstance(enhanced, list):
enhanced = [enhanced]
return enhanced, ilens, others
@property
def num_spk(self):
return self._num_spk
| 10,439 | 38.24812 | 85 | py |
espnet | espnet-master/espnet2/enh/separator/abs_separator.py | from abc import ABC, abstractmethod
from collections import OrderedDict
from typing import Dict, Optional, Tuple
import torch
class AbsSeparator(torch.nn.Module, ABC):
@abstractmethod
def forward(
self,
input: torch.Tensor,
ilens: torch.Tensor,
additional: Optional[Dict] = None,
) -> Tuple[Tuple[torch.Tensor], torch.Tensor, OrderedDict]:
raise NotImplementedError
def forward_streaming(
self,
input_frame: torch.Tensor,
buffer=None,
):
raise NotImplementedError
@property
@abstractmethod
def num_spk(self):
raise NotImplementedError
| 652 | 21.517241 | 63 | py |
espnet | espnet-master/espnet2/enh/separator/dptnet_separator.py | from collections import OrderedDict
from distutils.version import LooseVersion
from typing import Dict, List, Optional, Tuple, Union
import torch
from torch_complex.tensor import ComplexTensor
from espnet2.enh.layers.complex_utils import is_complex
from espnet2.enh.layers.dptnet import DPTNet
from espnet2.enh.layers.tcn import choose_norm
from espnet2.enh.separator.abs_separator import AbsSeparator
is_torch_1_9_plus = LooseVersion(torch.__version__) >= LooseVersion("1.9.0")
class DPTNetSeparator(AbsSeparator):
def __init__(
self,
input_dim: int,
post_enc_relu: bool = True,
rnn_type: str = "lstm",
bidirectional: bool = True,
num_spk: int = 2,
predict_noise: bool = False,
unit: int = 256,
att_heads: int = 4,
dropout: float = 0.0,
activation: str = "relu",
norm_type: str = "gLN",
layer: int = 6,
segment_size: int = 20,
nonlinear: str = "relu",
):
"""Dual-Path Transformer Network (DPTNet) Separator
Args:
input_dim: input feature dimension
rnn_type: string, select from 'RNN', 'LSTM' and 'GRU'.
bidirectional: bool, whether the inter-chunk RNN layers are bidirectional.
num_spk: number of speakers
predict_noise: whether to output the estimated noise signal
unit: int, dimension of the hidden state.
att_heads: number of attention heads.
dropout: float, dropout ratio. Default is 0.
activation: activation function applied at the output of RNN.
norm_type: type of normalization to use after each inter- or
intra-chunk Transformer block.
nonlinear: the nonlinear function for mask estimation,
select from 'relu', 'tanh', 'sigmoid'
layer: int, number of stacked RNN layers. Default is 3.
segment_size: dual-path segment size
"""
super().__init__()
self._num_spk = num_spk
self.predict_noise = predict_noise
self.segment_size = segment_size
self.post_enc_relu = post_enc_relu
self.enc_LN = choose_norm(norm_type, input_dim)
self.num_outputs = self.num_spk + 1 if self.predict_noise else self.num_spk
self.dptnet = DPTNet(
rnn_type=rnn_type,
input_size=input_dim,
hidden_size=unit,
output_size=input_dim * self.num_outputs,
att_heads=att_heads,
dropout=dropout,
activation=activation,
num_layers=layer,
bidirectional=bidirectional,
norm_type=norm_type,
)
# gated output layer
self.output = torch.nn.Sequential(
torch.nn.Conv1d(input_dim, input_dim, 1), torch.nn.Tanh()
)
self.output_gate = torch.nn.Sequential(
torch.nn.Conv1d(input_dim, input_dim, 1), torch.nn.Sigmoid()
)
if nonlinear not in ("sigmoid", "relu", "tanh"):
raise ValueError("Not supporting nonlinear={}".format(nonlinear))
self.nonlinear = {
"sigmoid": torch.nn.Sigmoid(),
"relu": torch.nn.ReLU(),
"tanh": torch.nn.Tanh(),
}[nonlinear]
def forward(
self,
input: Union[torch.Tensor, ComplexTensor],
ilens: torch.Tensor,
additional: Optional[Dict] = None,
) -> Tuple[List[Union[torch.Tensor, ComplexTensor]], torch.Tensor, OrderedDict]:
"""Forward.
Args:
input (torch.Tensor or ComplexTensor): Encoded feature [B, T, N]
ilens (torch.Tensor): input lengths [Batch]
additional (Dict or None): other data included in model
NOTE: not used in this model
Returns:
masked (List[Union(torch.Tensor, ComplexTensor)]): [(B, T, N), ...]
ilens (torch.Tensor): (B,)
others predicted data, e.g. masks: OrderedDict[
'mask_spk1': torch.Tensor(Batch, Frames, Freq),
'mask_spk2': torch.Tensor(Batch, Frames, Freq),
...
'mask_spkn': torch.Tensor(Batch, Frames, Freq),
]
"""
# if complex spectrum,
if is_complex(input):
feature = abs(input)
elif self.post_enc_relu:
feature = torch.nn.functional.relu(input)
else:
feature = input
B, T, N = feature.shape
feature = feature.transpose(1, 2) # B, N, T
feature = self.enc_LN(feature)
segmented = self.split_feature(feature) # B, N, L, K
processed = self.dptnet(segmented) # B, N*num_spk, L, K
processed = processed.reshape(
B * self.num_outputs, -1, processed.size(-2), processed.size(-1)
) # B*num_spk, N, L, K
processed = self.merge_feature(processed, length=T) # B*num_spk, N, T
# gated output layer for filter generation (B*num_spk, N, T)
processed = self.output(processed) * self.output_gate(processed)
masks = processed.reshape(B, self.num_outputs, N, T)
# list[(B, T, N)]
masks = self.nonlinear(masks.transpose(-1, -2)).unbind(dim=1)
if self.predict_noise:
*masks, mask_noise = masks
masked = [input * m for m in masks]
others = OrderedDict(
zip(["mask_spk{}".format(i + 1) for i in range(len(masks))], masks)
)
if self.predict_noise:
others["noise1"] = input * mask_noise
return masked, ilens, others
def split_feature(self, x):
B, N, T = x.size()
unfolded = torch.nn.functional.unfold(
x.unsqueeze(-1),
kernel_size=(self.segment_size, 1),
padding=(self.segment_size, 0),
stride=(self.segment_size // 2, 1),
)
return unfolded.reshape(B, N, self.segment_size, -1)
def merge_feature(self, x, length=None):
B, N, L, n_chunks = x.size()
hop_size = self.segment_size // 2
if length is None:
length = (n_chunks - 1) * hop_size + L
padding = 0
else:
padding = (0, L)
seq = x.reshape(B, N * L, n_chunks)
x = torch.nn.functional.fold(
seq,
output_size=(1, length),
kernel_size=(1, L),
padding=padding,
stride=(1, hop_size),
)
norm_mat = torch.nn.functional.fold(
input=torch.ones_like(seq),
output_size=(1, length),
kernel_size=(1, L),
padding=padding,
stride=(1, hop_size),
)
x /= norm_mat
return x.reshape(B, N, length)
@property
def num_spk(self):
return self._num_spk
| 6,828 | 33.145 | 86 | py |
espnet | espnet-master/espnet2/enh/separator/rnn_separator.py | from collections import OrderedDict
from typing import Dict, List, Optional, Tuple, Union
import torch
from packaging.version import parse as V
from torch_complex.tensor import ComplexTensor
from espnet2.enh.layers.complex_utils import is_complex
from espnet2.enh.separator.abs_separator import AbsSeparator
from espnet.nets.pytorch_backend.rnn.encoders import RNN
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
class RNNSeparator(AbsSeparator):
def __init__(
self,
input_dim: int,
rnn_type: str = "blstm",
num_spk: int = 2,
predict_noise: bool = False,
nonlinear: str = "sigmoid",
layer: int = 3,
unit: int = 512,
dropout: float = 0.0,
):
"""RNN Separator
Args:
input_dim: input feature dimension
rnn_type: string, select from 'blstm', 'lstm' etc.
bidirectional: bool, whether the inter-chunk RNN layers are bidirectional.
num_spk: number of speakers
predict_noise: whether to output the estimated noise signal
nonlinear: the nonlinear function for mask estimation,
select from 'relu', 'tanh', 'sigmoid'
layer: int, number of stacked RNN layers. Default is 3.
unit: int, dimension of the hidden state.
dropout: float, dropout ratio. Default is 0.
"""
super().__init__()
self._num_spk = num_spk
self.predict_noise = predict_noise
self.rnn = RNN(
idim=input_dim,
elayers=layer,
cdim=unit,
hdim=unit,
dropout=dropout,
typ=rnn_type,
)
num_outputs = self.num_spk + 1 if self.predict_noise else self.num_spk
self.linear = torch.nn.ModuleList(
[torch.nn.Linear(unit, input_dim) for _ in range(num_outputs)]
)
if nonlinear not in ("sigmoid", "relu", "tanh"):
raise ValueError("Not supporting nonlinear={}".format(nonlinear))
self.nonlinear = {
"sigmoid": torch.nn.Sigmoid(),
"relu": torch.nn.ReLU(),
"tanh": torch.nn.Tanh(),
}[nonlinear]
def forward(
self,
input: Union[torch.Tensor, ComplexTensor],
ilens: torch.Tensor,
additional: Optional[Dict] = None,
) -> Tuple[List[Union[torch.Tensor, ComplexTensor]], torch.Tensor, OrderedDict]:
"""Forward.
Args:
input (torch.Tensor or ComplexTensor): Encoded feature [B, T, N]
ilens (torch.Tensor): input lengths [Batch]
additional (Dict or None): other data included in model
NOTE: not used in this model
Returns:
masked (List[Union(torch.Tensor, ComplexTensor)]): [(B, T, N), ...]
ilens (torch.Tensor): (B,)
others predicted data, e.g. masks: OrderedDict[
'mask_spk1': torch.Tensor(Batch, Frames, Freq),
'mask_spk2': torch.Tensor(Batch, Frames, Freq),
...
'mask_spkn': torch.Tensor(Batch, Frames, Freq),
]
"""
# if complex spectrum,
if is_complex(input):
feature = abs(input)
else:
feature = input
x, ilens, _ = self.rnn(feature, ilens)
masks = []
for linear in self.linear:
y = linear(x)
y = self.nonlinear(y)
masks.append(y)
if self.predict_noise:
*masks, mask_noise = masks
masked = [input * m for m in masks]
others = OrderedDict(
zip(["mask_spk{}".format(i + 1) for i in range(len(masks))], masks)
)
if self.predict_noise:
others["noise1"] = input * mask_noise
return masked, ilens, others
@property
def num_spk(self):
return self._num_spk
def forward_streaming(self, input_frame: torch.Tensor, states=None):
# input_frame # B, 1, N
# if complex spectrum,
if is_complex(input_frame):
feature = abs(input_frame)
else:
feature = input_frame
ilens = torch.ones(feature.shape[0], device=feature.device)
x, _, states = self.rnn(feature, ilens, states)
masks = []
for linear in self.linear:
y = linear(x)
y = self.nonlinear(y)
masks.append(y)
if self.predict_noise:
*masks, mask_noise = masks
masked = [input_frame * m for m in masks]
others = OrderedDict(
zip(["mask_spk{}".format(i + 1) for i in range(len(masks))], masks)
)
if self.predict_noise:
others["noise1"] = input * mask_noise
return masked, states, others
| 4,807 | 29.43038 | 86 | py |
espnet | espnet-master/espnet2/enh/separator/asteroid_models.py | import warnings
from collections import OrderedDict
from typing import Dict, Optional, Tuple
import torch
from espnet2.enh.separator.abs_separator import AbsSeparator
class AsteroidModel_Converter(AbsSeparator):
def __init__(
self,
encoder_output_dim: int,
model_name: str,
num_spk: int,
pretrained_path: str = "",
loss_type: str = "si_snr",
**model_related_kwargs,
):
"""The class to convert the models from asteroid to AbsSeprator.
Args:
encoder_output_dim: input feature dimension, default=1 after the NullEncoder
num_spk: number of speakers
loss_type: loss type of enhancement
model_name: Asteroid model names, e.g. ConvTasNet, DPTNet. Refers to
https://github.com/asteroid-team/asteroid/
blob/master/asteroid/models/__init__.py
pretrained_path: the name of pretrained model from Asteroid in HF hub.
Refers to: https://github.com/asteroid-team/asteroid/
blob/master/docs/source/readmes/pretrained_models.md and
https://huggingface.co/models?filter=asteroid
model_related_kwargs: more args towards each specific asteroid model.
"""
super(AsteroidModel_Converter, self).__init__()
assert (
encoder_output_dim == 1
), encoder_output_dim # The input should in raw-wave domain.
# Please make sure the installation of Asteroid.
# https://github.com/asteroid-team/asteroid
from asteroid import models
model_related_kwargs = {
k: None if v == "None" else v for k, v in model_related_kwargs.items()
}
# print('args:',model_related_kwargs)
if pretrained_path:
model = getattr(models, model_name).from_pretrained(pretrained_path)
print("model_kwargs:", model_related_kwargs)
if model_related_kwargs:
warnings.warn(
"Pratrained model should get no args with %s" % model_related_kwargs
)
else:
model_name = getattr(models, model_name)
model = model_name(**model_related_kwargs)
self.model = model
self._num_spk = num_spk
self.loss_type = loss_type
if loss_type != "si_snr":
raise ValueError("Unsupported loss type: %s" % loss_type)
def forward(
self,
input: torch.Tensor,
ilens: torch.Tensor = None,
additional: Optional[Dict] = None,
):
"""Whole forward of asteroid models.
Args:
input (torch.Tensor): Raw Waveforms [B, T]
ilens (torch.Tensor): input lengths [B]
additional (Dict or None): other data included in model
Returns:
estimated Waveforms(List[Union(torch.Tensor]): [(B, T), ...]
ilens (torch.Tensor): (B,)
others predicted data, e.g. masks: OrderedDict[
'mask_spk1': torch.Tensor(Batch, T),
'mask_spk2': torch.Tensor(Batch, T),
...
'mask_spkn': torch.Tensor(Batch, T),
]
"""
if hasattr(self.model, "forward_wav"):
est_source = self.model.forward_wav(input) # B,nspk,T or nspk,T
else:
est_source = self.model(input) # B,nspk,T or nspk,T
if input.dim() == 1:
assert est_source.size(0) == self.num_spk, est_source.size(0)
else:
assert est_source.size(1) == self.num_spk, est_source.size(1)
est_source = [es for es in est_source.transpose(0, 1)] # List(M,T)
masks = OrderedDict(
zip(["mask_spk{}".format(i + 1) for i in range(self.num_spk)], est_source)
)
return est_source, ilens, masks
def forward_rawwav(
self, input: torch.Tensor, ilens: torch.Tensor = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Output with waveforms."""
return self.forward(input, ilens)
@property
def num_spk(self):
return self._num_spk
if __name__ == "__main__":
mixture = torch.randn(3, 16000)
print("mixture shape", mixture.shape)
net = AsteroidModel_Converter(
model_name="ConvTasNet",
encoder_output_dim=1,
num_spk=2,
loss_type="si_snr",
pretrained_path="mpariente/ConvTasNet_WHAM!_sepclean",
)
print("model", net)
output, *__ = net(mixture)
output, *__ = net.forward_rawwav(mixture, 111)
print("output spk1 shape", output[0].shape)
net = AsteroidModel_Converter(
encoder_output_dim=1,
num_spk=2,
model_name="ConvTasNet",
n_src=2,
loss_type="si_snr",
out_chan=None,
n_blocks=2,
n_repeats=2,
bn_chan=128,
hid_chan=512,
skip_chan=128,
conv_kernel_size=3,
norm_type="gLN",
mask_act="sigmoid",
in_chan=None,
fb_name="free",
kernel_size=16,
n_filters=512,
stride=8,
encoder_activation=None,
sample_rate=8000,
)
print("\n\nmodel", net)
output, *__ = net(mixture)
print("output spk1 shape", output[0].shape)
print("Finished", output[0].shape)
| 5,364 | 31.91411 | 88 | py |
espnet | espnet-master/espnet2/enh/separator/dc_crn_separator.py | from collections import OrderedDict
from typing import Dict, List, Optional, Tuple, Union
import torch
from packaging.version import parse as V
from torch_complex.tensor import ComplexTensor
from espnet2.enh.layers.complex_utils import is_complex, new_complex_like
from espnet2.enh.layers.dc_crn import DC_CRN
from espnet2.enh.separator.abs_separator import AbsSeparator
EPS = torch.finfo(torch.get_default_dtype()).eps
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
class DC_CRNSeparator(AbsSeparator):
def __init__(
self,
input_dim: int,
num_spk: int = 2,
predict_noise: bool = False,
input_channels: List = [2, 16, 32, 64, 128, 256],
enc_hid_channels: int = 8,
enc_kernel_size: Tuple = (1, 3),
enc_padding: Tuple = (0, 1),
enc_last_kernel_size: Tuple = (1, 4),
enc_last_stride: Tuple = (1, 2),
enc_last_padding: Tuple = (0, 1),
enc_layers: int = 5,
skip_last_kernel_size: Tuple = (1, 3),
skip_last_stride: Tuple = (1, 1),
skip_last_padding: Tuple = (0, 1),
glstm_groups: int = 2,
glstm_layers: int = 2,
glstm_bidirectional: bool = False,
glstm_rearrange: bool = False,
mode: str = "masking",
ref_channel: int = 0,
):
"""Densely-Connected Convolutional Recurrent Network (DC-CRN) Separator
Reference:
Deep Learning Based Real-Time Speech Enhancement for Dual-Microphone
Mobile Phones; Tan et al., 2020
https://web.cse.ohio-state.edu/~wang.77/papers/TZW.taslp21.pdf
Args:
input_dim: input feature dimension
num_spk: number of speakers
predict_noise: whether to output the estimated noise signal
input_channels (list): number of input channels for the stacked
DenselyConnectedBlock layers
Its length should be (`number of DenselyConnectedBlock layers`).
enc_hid_channels (int): common number of intermediate channels for all
DenselyConnectedBlock of the encoder
enc_kernel_size (tuple): common kernel size for all DenselyConnectedBlock
of the encoder
enc_padding (tuple): common padding for all DenselyConnectedBlock
of the encoder
enc_last_kernel_size (tuple): common kernel size for the last Conv layer
in all DenselyConnectedBlock of the encoder
enc_last_stride (tuple): common stride for the last Conv layer in all
DenselyConnectedBlock of the encoder
enc_last_padding (tuple): common padding for the last Conv layer in all
DenselyConnectedBlock of the encoder
enc_layers (int): common total number of Conv layers for all
DenselyConnectedBlock layers of the encoder
skip_last_kernel_size (tuple): common kernel size for the last Conv layer
in all DenselyConnectedBlock of the skip pathways
skip_last_stride (tuple): common stride for the last Conv layer in all
DenselyConnectedBlock of the skip pathways
skip_last_padding (tuple): common padding for the last Conv layer in all
DenselyConnectedBlock of the skip pathways
glstm_groups (int): number of groups in each Grouped LSTM layer
glstm_layers (int): number of Grouped LSTM layers
glstm_bidirectional (bool): whether to use BLSTM or unidirectional LSTM
in Grouped LSTM layers
glstm_rearrange (bool): whether to apply the rearrange operation after each
grouped LSTM layer
output_channels (int): number of output channels (even number)
mode (str): one of ("mapping", "masking")
"mapping": complex spectral mapping
"masking": complex masking
ref_channel (int): index of the reference microphone
"""
super().__init__()
self._num_spk = num_spk
self.predict_noise = predict_noise
self.mode = mode
if mode not in ("mapping", "masking"):
raise ValueError("mode=%s is not supported" % mode)
self.ref_channel = ref_channel
num_outputs = self.num_spk + 1 if self.predict_noise else self.num_spk
self.dc_crn = DC_CRN(
input_dim=input_dim,
input_channels=input_channels,
enc_hid_channels=enc_hid_channels,
enc_kernel_size=enc_kernel_size,
enc_padding=enc_padding,
enc_last_kernel_size=enc_last_kernel_size,
enc_last_stride=enc_last_stride,
enc_last_padding=enc_last_padding,
enc_layers=enc_layers,
skip_last_kernel_size=skip_last_kernel_size,
skip_last_stride=skip_last_stride,
skip_last_padding=skip_last_padding,
glstm_groups=glstm_groups,
glstm_layers=glstm_layers,
glstm_bidirectional=glstm_bidirectional,
glstm_rearrange=glstm_rearrange,
output_channels=num_outputs * 2,
)
def forward(
self,
input: Union[torch.Tensor, ComplexTensor],
ilens: torch.Tensor,
additional: Optional[Dict] = None,
) -> Tuple[List[Union[torch.Tensor, ComplexTensor]], torch.Tensor, OrderedDict]:
"""DC-CRN Separator Forward.
Args:
input (torch.Tensor or ComplexTensor): Encoded feature [Batch, T, F]
or [Batch, T, C, F]
ilens (torch.Tensor): input lengths [Batch,]
Returns:
masked (List[Union(torch.Tensor, ComplexTensor)]): [(Batch, T, F), ...]
ilens (torch.Tensor): (B,)
others predicted data, e.g. masks: OrderedDict[
'mask_spk1': torch.Tensor(Batch, Frames, Freq),
'mask_spk2': torch.Tensor(Batch, Frames, Freq),
...
'mask_spkn': torch.Tensor(Batch, Frames, Freq),
]
"""
assert is_complex(input)
is_multichannel = input.ndim == 4
if is_multichannel:
feature = torch.cat([input.real, input.imag], dim=2).permute(0, 2, 1, 3)
else:
feature = torch.stack([input.real, input.imag], dim=1)
masks = self.dc_crn(feature)
masks = [new_complex_like(input, m.unbind(dim=1)) for m in masks.unbind(dim=2)]
if self.predict_noise:
*masks, mask_noise = masks
if self.mode == "masking":
if is_multichannel:
masked = [input * m.unsqueeze(2) for m in masks]
else:
masked = [input * m for m in masks]
else:
masked = masks
if is_multichannel:
masks = [m.unsqueeze(2) / (input + EPS) for m in masked]
else:
masks = [m / (input + EPS) for m in masked]
others = OrderedDict(
zip(["mask_spk{}".format(i + 1) for i in range(len(masks))], masks)
)
if self.predict_noise:
mask_noise = mask_noise.unsqueeze(2) if is_multichannel else mask_noise
if self.mode == "masking":
others["noise1"] = input * mask_noise
else:
others["noise1"] = mask_noise
return masked, ilens, others
@property
def num_spk(self):
return self._num_spk
| 7,510 | 40.960894 | 87 | py |
espnet | espnet-master/espnet2/enh/separator/dccrn_separator.py | from collections import OrderedDict
from typing import Dict, List, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from packaging.version import parse as V
from torch_complex.tensor import ComplexTensor
from espnet2.enh.layers.complexnn import (
ComplexBatchNorm,
ComplexConv2d,
ComplexConvTranspose2d,
NavieComplexLSTM,
complex_cat,
)
from espnet2.enh.separator.abs_separator import AbsSeparator
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
EPS = torch.finfo(torch.double).eps
class DCCRNSeparator(AbsSeparator):
def __init__(
self,
input_dim: int,
num_spk: int = 1,
rnn_layer: int = 2,
rnn_units: int = 256,
masking_mode: str = "E",
use_clstm: bool = True,
bidirectional: bool = False,
use_cbn: bool = False,
kernel_size: int = 5,
kernel_num: List[int] = [32, 64, 128, 256, 256, 256],
use_builtin_complex: bool = True,
use_noise_mask: bool = False,
):
"""DCCRN separator.
Args:
input_dim (int): input dimension。
num_spk (int, optional): number of speakers. Defaults to 1.
rnn_layer (int, optional): number of lstm layers in the crn. Defaults to 2.
rnn_units (int, optional): rnn units. Defaults to 128.
masking_mode (str, optional): usage of the estimated mask. Defaults to "E".
use_clstm (bool, optional): whether use complex LSTM. Defaults to False.
bidirectional (bool, optional): whether use BLSTM. Defaults to False.
use_cbn (bool, optional): whether use complex BN. Defaults to False.
kernel_size (int, optional): convolution kernel size. Defaults to 5.
kernel_num (list, optional): output dimension of each layer of the encoder.
use_builtin_complex (bool, optional): torch.complex if True,
else ComplexTensor.
use_noise_mask (bool, optional): whether to estimate the mask of noise.
"""
super().__init__()
self.use_builtin_complex = use_builtin_complex
self._num_spk = num_spk
self.use_noise_mask = use_noise_mask
self.predict_noise = use_noise_mask
if masking_mode not in ["C", "E", "R"]:
raise ValueError("Unsupported masking mode: %s" % masking_mode)
# Network config
self.rnn_units = rnn_units
self.hidden_layers = rnn_layer
self.kernel_size = kernel_size
self.kernel_num = [2] + kernel_num
self.masking_mode = masking_mode
self.use_clstm = use_clstm
fac = 2 if bidirectional else 1
self.encoder = nn.ModuleList()
self.decoder = nn.ModuleList()
for idx in range(len(self.kernel_num) - 1):
self.encoder.append(
nn.Sequential(
ComplexConv2d(
self.kernel_num[idx],
self.kernel_num[idx + 1],
kernel_size=(self.kernel_size, 2),
stride=(2, 1),
padding=(2, 1),
),
nn.BatchNorm2d(self.kernel_num[idx + 1])
if not use_cbn
else ComplexBatchNorm(self.kernel_num[idx + 1]),
nn.PReLU(),
)
)
hidden_dim = (input_dim - 1 + 2 ** (len(self.kernel_num) - 1) - 1) // (
2 ** (len(self.kernel_num) - 1)
)
hidden_dim = hidden_dim if hidden_dim > 0 else 1
if self.use_clstm:
rnns = []
for idx in range(rnn_layer):
rnns.append(
NavieComplexLSTM(
input_size=hidden_dim * self.kernel_num[-1]
if idx == 0
else self.rnn_units * fac,
hidden_size=self.rnn_units,
bidirectional=bidirectional,
batch_first=False,
projection_dim=hidden_dim * self.kernel_num[-1]
if idx == rnn_layer - 1
else None,
)
)
self.enhance = nn.Sequential(*rnns)
else:
self.enhance = nn.LSTM(
input_size=hidden_dim * self.kernel_num[-1],
hidden_size=self.rnn_units,
num_layers=2,
dropout=0.0,
bidirectional=bidirectional,
batch_first=False,
)
self.tranform = nn.Linear(
self.rnn_units * fac, hidden_dim * self.kernel_num[-1]
)
for idx in range(len(self.kernel_num) - 1, 0, -1):
if idx != 1:
self.decoder.append(
nn.Sequential(
ComplexConvTranspose2d(
self.kernel_num[idx] * 2,
self.kernel_num[idx - 1],
kernel_size=(self.kernel_size, 2),
stride=(2, 1),
padding=(2, 0),
output_padding=(1, 0),
),
nn.BatchNorm2d(self.kernel_num[idx - 1])
if not use_cbn
else ComplexBatchNorm(self.kernel_num[idx - 1]),
nn.PReLU(),
)
)
else:
self.decoder.append(
nn.Sequential(
ComplexConvTranspose2d(
self.kernel_num[idx] * 2,
self.kernel_num[idx - 1] * (self._num_spk + 1)
if self.use_noise_mask
else self.kernel_num[idx - 1] * self._num_spk,
kernel_size=(self.kernel_size, 2),
stride=(2, 1),
padding=(2, 0),
output_padding=(1, 0),
),
)
)
self.flatten_parameters()
def forward(
self,
input: Union[torch.Tensor, ComplexTensor],
ilens: torch.Tensor,
additional: Optional[Dict] = None,
) -> Tuple[List[Union[torch.Tensor, ComplexTensor]], torch.Tensor, OrderedDict]:
"""Forward.
Args:
input (torch.Tensor or ComplexTensor): Encoded feature [B, T, F]
ilens (torch.Tensor): input lengths [Batch]
additional (Dict or None): other data included in model
NOTE: not used in this model
Returns:
masked (List[Union(torch.Tensor, ComplexTensor)]): [(B, T, F), ...]
ilens (torch.Tensor): (B,)
others predicted data, e.g. masks: OrderedDict[
'mask_spk1': torch.Tensor(Batch, Frames, Freq),
'mask_spk2': torch.Tensor(Batch, Frames, Freq),
...
'mask_spkn': torch.Tensor(Batch, Frames, Freq),
]
"""
# shape (B, T, F) --> (B, F, T)
specs = input.permute(0, 2, 1)
real, imag = specs.real, specs.imag
# # shape (B, F, T)
# spec_mags = torch.sqrt(real**2 + imag**2 + 1e-8)
# # shape (B, F, T)
# spec_phase = torch.atan2(imag, real)
# shape (B, 2, F, T)
cspecs = torch.stack([real, imag], 1)
# shape (B, 2, F-1, T)
cspecs = cspecs[:, :, 1:]
out = cspecs
encoder_out = []
for idx, layer in enumerate(self.encoder):
out = layer(out)
encoder_out.append(out)
# shape (B, C, F, T)
batch_size, channels, dims, lengths = out.size()
# shape (T, B, C, F)
out = out.permute(3, 0, 1, 2)
if self.use_clstm:
# shape (T, B, C // 2, F)
r_rnn_in = out[:, :, : channels // 2]
# shape (T, B, C // 2, F)
i_rnn_in = out[:, :, channels // 2 :]
# shape (T, B, C // 2 * F)
r_rnn_in = torch.reshape(
r_rnn_in, [lengths, batch_size, channels // 2 * dims]
)
# shape (T, B, C // 2 * F)
i_rnn_in = torch.reshape(
i_rnn_in, [lengths, batch_size, channels // 2 * dims]
)
r_rnn_in, i_rnn_in = self.enhance([r_rnn_in, i_rnn_in])
# shape (T, B, C // 2, F)
r_rnn_in = torch.reshape(
r_rnn_in, [lengths, batch_size, channels // 2, dims]
)
# shape (T, B, C // 2, F)
i_rnn_in = torch.reshape(
i_rnn_in, [lengths, batch_size, channels // 2, dims]
)
# shape (T, B, C, F)
out = torch.cat([r_rnn_in, i_rnn_in], 2)
else:
# shape (T, B, C*F)
out = torch.reshape(out, [lengths, batch_size, channels * dims])
out, _ = self.enhance(out)
out = self.tranform(out)
# shape (T, B, C, F)
out = torch.reshape(out, [lengths, batch_size, channels, dims])
# shape (B, C, F, T)
out = out.permute(1, 2, 3, 0)
for idx in range(len(self.decoder)):
# skip connection
out = complex_cat([out, encoder_out[-1 - idx]], 1)
out = self.decoder[idx](out)
out = out[..., 1:]
# out shape = (B, 2*num_spk, F-1, T) if self.use_noise_mask == False
# else (B, 2*(num_spk+1), F-1, T)
masks = self.create_masks(out)
masked = self.apply_masks(masks, real, imag)
others = OrderedDict(
zip(
["mask_spk{}".format(i + 1) for i in range(self.num_spk)],
masks,
)
)
if self.use_noise_mask:
others["mask_noise1"] = masks[-1]
others["noise1"] = masked.pop(-1)
return (masked, ilens, others)
def flatten_parameters(self):
if isinstance(self.enhance, nn.LSTM):
self.enhance.flatten_parameters()
def create_masks(self, mask_tensor: torch.Tensor):
"""create estimated mask for each speaker
Args:
mask_tensor (torch.Tensor): output of decoder, shape(B, 2*num_spk, F-1, T)
"""
if self.use_noise_mask:
assert mask_tensor.shape[1] == 2 * (self._num_spk + 1), mask_tensor.shape[1]
else:
assert mask_tensor.shape[1] == 2 * self._num_spk, mask_tensor.shape[1]
masks = []
for idx in range(mask_tensor.shape[1] // 2):
# shape (B, F-1, T)
mask_real = mask_tensor[:, idx * 2]
# shape (B, F-1, T)
mask_imag = mask_tensor[:, idx * 2 + 1]
# shape (B, F, T)
mask_real = F.pad(mask_real, [0, 0, 1, 0])
# shape (B, F, T)
mask_imag = F.pad(mask_imag, [0, 0, 1, 0])
# mask shape (B, T, F)
if is_torch_1_9_plus and self.use_builtin_complex:
complex_mask = torch.complex(
mask_real.permute(0, 2, 1), mask_imag.permute(0, 2, 1)
)
else:
complex_mask = ComplexTensor(
mask_real.permute(0, 2, 1), mask_imag.permute(0, 2, 1)
)
masks.append(complex_mask)
return masks
def apply_masks(
self,
masks: List[Union[torch.Tensor, ComplexTensor]],
real: torch.Tensor,
imag: torch.Tensor,
):
"""apply masks
Args:
masks : est_masks, [(B, T, F), ...]
real (torch.Tensor): real part of the noisy spectrum, (B, F, T)
imag (torch.Tensor): imag part of the noisy spectrum, (B, F, T)
Returns:
masked (List[Union(torch.Tensor, ComplexTensor)]): [(B, T, F), ...]
"""
masked = []
for i in range(len(masks)):
# shape (B, T, F) --> (B, F, T)
mask_real = masks[i].real.permute(0, 2, 1)
mask_imag = masks[i].imag.permute(0, 2, 1)
if self.masking_mode == "E":
# shape (B, F, T)
spec_mags = torch.sqrt(real**2 + imag**2 + 1e-8)
# shape (B, F, T)
spec_phase = torch.atan2(imag, real)
mask_mags = (mask_real**2 + mask_imag**2) ** 0.5
# mask_mags = (mask_real ** 2 + mask_imag ** 2 + EPS) ** 0.5
real_phase = mask_real / (mask_mags + EPS)
imag_phase = mask_imag / (mask_mags + EPS)
# mask_phase = torch.atan2(imag_phase + EPS, real_phase + EPS)
mask_phase = torch.atan2(imag_phase, real_phase)
mask_mags = torch.tanh(mask_mags)
est_mags = mask_mags * spec_mags
est_phase = spec_phase + mask_phase
real = est_mags * torch.cos(est_phase)
imag = est_mags * torch.sin(est_phase)
elif self.masking_mode == "C":
real, imag = (
real * mask_real - imag * mask_imag,
real * mask_imag + imag * mask_real,
)
elif self.masking_mode == "R":
real, imag = real * mask_real, imag * mask_imag
# shape (B, F, T) --> (B, T, F)
if is_torch_1_9_plus and self.use_builtin_complex:
masked.append(
torch.complex(real.permute(0, 2, 1), imag.permute(0, 2, 1))
)
else:
masked.append(
ComplexTensor(real.permute(0, 2, 1), imag.permute(0, 2, 1))
)
return masked
@property
def num_spk(self):
return self._num_spk
| 13,913 | 37.016393 | 88 | py |
espnet | espnet-master/espnet2/enh/separator/dpcl_separator.py | from collections import OrderedDict
from typing import Dict, List, Optional, Tuple, Union
import torch
from torch_complex.tensor import ComplexTensor
from espnet2.enh.layers.complex_utils import is_complex
from espnet2.enh.separator.abs_separator import AbsSeparator
from espnet.nets.pytorch_backend.rnn.encoders import RNN
class DPCLSeparator(AbsSeparator):
def __init__(
self,
input_dim: int,
rnn_type: str = "blstm",
num_spk: int = 2,
nonlinear: str = "tanh",
layer: int = 2,
unit: int = 512,
emb_D: int = 40,
dropout: float = 0.0,
):
"""Deep Clustering Separator.
References:
[1] Deep clustering: Discriminative embeddings for segmentation and
separation; John R. Hershey. et al., 2016;
https://ieeexplore.ieee.org/document/7471631
[2] Manifold-Aware Deep Clustering: Maximizing Angles Between Embedding
Vectors Based on Regular Simplex; Tanaka, K. et al., 2021;
https://www.isca-speech.org/archive/interspeech_2021/tanaka21_interspeech.html
Args:
input_dim: input feature dimension
rnn_type: string, select from 'blstm', 'lstm' etc.
bidirectional: bool, whether the inter-chunk RNN layers are bidirectional.
num_spk: number of speakers
nonlinear: the nonlinear function for mask estimation,
select from 'relu', 'tanh', 'sigmoid'
layer: int, number of stacked RNN layers. Default is 3.
unit: int, dimension of the hidden state.
emb_D: int, dimension of the feature vector for a tf-bin.
dropout: float, dropout ratio. Default is 0.
""" # noqa: E501
super().__init__()
self._num_spk = num_spk
self.blstm = RNN(
idim=input_dim,
elayers=layer,
cdim=unit,
hdim=unit,
dropout=dropout,
typ=rnn_type,
)
self.linear = torch.nn.Linear(unit, input_dim * emb_D)
if nonlinear not in ("sigmoid", "relu", "tanh"):
raise ValueError("Not supporting nonlinear={}".format(nonlinear))
self.nonlinear = {
"sigmoid": torch.nn.Sigmoid(),
"relu": torch.nn.ReLU(),
"tanh": torch.nn.Tanh(),
}[nonlinear]
self.D = emb_D
def forward(
self,
input: Union[torch.Tensor, ComplexTensor],
ilens: torch.Tensor,
additional: Optional[Dict] = None,
) -> Tuple[List[Union[torch.Tensor, ComplexTensor]], torch.Tensor, OrderedDict]:
"""Forward.
Args:
input (torch.Tensor or ComplexTensor): Encoded feature [B, T, F]
ilens (torch.Tensor): input lengths [Batch]
additional (Dict or None): other data included in model
NOTE: not used in this model
Returns:
masked (List[Union(torch.Tensor, ComplexTensor)]): [(B, T, N), ...]
ilens (torch.Tensor): (B,)
others predicted data, e.g. tf_embedding: OrderedDict[
'tf_embedding': learned embedding of all T-F bins (B, T * F, D),
]
"""
# if complex spectrum,
if is_complex(input):
feature = abs(input)
else:
feature = input
B, T, F = input.shape
# x:(B, T, F)
x, ilens, _ = self.blstm(feature, ilens)
# x:(B, T, F*D)
x = self.linear(x)
# x:(B, T, F*D)
x = self.nonlinear(x)
tf_embedding = x.view(B, -1, self.D)
if self.training:
masked = None
else:
# K-means for batch
centers = tf_embedding[:, : self._num_spk, :].detach()
dist = torch.empty(B, T * F, self._num_spk, device=tf_embedding.device)
last_label = torch.zeros(B, T * F, device=tf_embedding.device)
while True:
for i in range(self._num_spk):
dist[:, :, i] = torch.sum(
(tf_embedding - centers[:, i, :].unsqueeze(1)) ** 2, dim=2
)
label = dist.argmin(dim=2)
if torch.sum(label != last_label) == 0:
break
last_label = label
for b in range(B):
for i in range(self._num_spk):
centers[b, i] = tf_embedding[b, label[b] == i].mean(dim=0)
label = label.view(B, T, F)
masked = []
for i in range(self._num_spk):
masked.append(input * (label == i))
others = OrderedDict(
{"tf_embedding": tf_embedding},
)
return masked, ilens, others
@property
def num_spk(self):
return self._num_spk
| 4,878 | 33.85 | 94 | py |
espnet | espnet-master/espnet2/enh/separator/skim_separator.py | from collections import OrderedDict
from typing import Dict, List, Optional, Tuple, Union
import torch
from torch_complex.tensor import ComplexTensor
from espnet2.enh.layers.complex_utils import is_complex
from espnet2.enh.layers.skim import SkiM
from espnet2.enh.separator.abs_separator import AbsSeparator
class SkiMSeparator(AbsSeparator):
"""Skipping Memory (SkiM) Separator
Args:
input_dim: input feature dimension
causal: bool, whether the system is causal.
num_spk: number of target speakers.
nonlinear: the nonlinear function for mask estimation,
select from 'relu', 'tanh', 'sigmoid'
layer: int, number of SkiM blocks. Default is 3.
unit: int, dimension of the hidden state.
segment_size: segmentation size for splitting long features
dropout: float, dropout ratio. Default is 0.
mem_type: 'hc', 'h', 'c', 'id' or None.
It controls whether the hidden (or cell) state of
SegLSTM will be processed by MemLSTM.
In 'id' mode, both the hidden and cell states
will be identically returned.
When mem_type is None, the MemLSTM will be removed.
seg_overlap: Bool, whether the segmentation will reserve 50%
overlap for adjacent segments. Default is False.
"""
def __init__(
self,
input_dim: int,
causal: bool = True,
num_spk: int = 2,
predict_noise: bool = False,
nonlinear: str = "relu",
layer: int = 3,
unit: int = 512,
segment_size: int = 20,
dropout: float = 0.0,
mem_type: str = "hc",
seg_overlap: bool = False,
):
super().__init__()
self._num_spk = num_spk
self.predict_noise = predict_noise
self.segment_size = segment_size
if mem_type not in ("hc", "h", "c", "id", None):
raise ValueError("Not supporting mem_type={}".format(mem_type))
self.num_outputs = self.num_spk + 1 if self.predict_noise else self.num_spk
self.skim = SkiM(
input_size=input_dim,
hidden_size=unit,
output_size=input_dim * self.num_outputs,
dropout=dropout,
num_blocks=layer,
bidirectional=(not causal),
norm_type="cLN" if causal else "gLN",
segment_size=segment_size,
seg_overlap=seg_overlap,
mem_type=mem_type,
)
if nonlinear not in ("sigmoid", "relu", "tanh"):
raise ValueError("Not supporting nonlinear={}".format(nonlinear))
self.nonlinear = {
"sigmoid": torch.nn.Sigmoid(),
"relu": torch.nn.ReLU(),
"tanh": torch.nn.Tanh(),
}[nonlinear]
def forward(
self,
input: Union[torch.Tensor, ComplexTensor],
ilens: torch.Tensor,
additional: Optional[Dict] = None,
) -> Tuple[List[Union[torch.Tensor, ComplexTensor]], torch.Tensor, OrderedDict]:
"""Forward.
Args:
input (torch.Tensor or ComplexTensor): Encoded feature [B, T, N]
ilens (torch.Tensor): input lengths [Batch]
additional (Dict or None): other data included in model
NOTE: not used in this model
Returns:
masked (List[Union(torch.Tensor, ComplexTensor)]): [(B, T, N), ...]
ilens (torch.Tensor): (B,)
others predicted data, e.g. masks: OrderedDict[
'mask_spk1': torch.Tensor(Batch, Frames, Freq),
'mask_spk2': torch.Tensor(Batch, Frames, Freq),
...
'mask_spkn': torch.Tensor(Batch, Frames, Freq),
]
"""
# if complex spectrum,
if is_complex(input):
feature = abs(input)
else:
feature = input
B, T, N = feature.shape
processed = self.skim(feature) # B,T, N
processed = processed.view(B, T, N, self.num_outputs)
masks = self.nonlinear(processed).unbind(dim=3)
if self.predict_noise:
*masks, mask_noise = masks
masked = [input * m for m in masks]
others = OrderedDict(
zip(["mask_spk{}".format(i + 1) for i in range(len(masks))], masks)
)
if self.predict_noise:
others["noise1"] = input * mask_noise
return masked, ilens, others
def forward_streaming(self, input_frame: torch.Tensor, states=None):
if is_complex(input_frame):
feature = abs(input_frame)
else:
feature = input_frame
B, _, N = feature.shape
processed, states = self.skim.forward_stream(feature, states=states)
processed = processed.view(B, 1, N, self.num_outputs)
masks = self.nonlinear(processed).unbind(dim=3)
if self.predict_noise:
*masks, mask_noise = masks
masked = [input_frame * m for m in masks]
others = OrderedDict(
zip(["mask_spk{}".format(i + 1) for i in range(len(masks))], masks)
)
if self.predict_noise:
others["noise1"] = input_frame * mask_noise
return masked, states, others
@property
def num_spk(self):
return self._num_spk
| 5,293 | 32.0875 | 84 | py |
espnet | espnet-master/espnet2/enh/layers/skim.py | # An implementation of SkiM model described in
# "SkiM: Skipping Memory LSTM for Low-Latency Real-Time Continuous Speech Separation"
# (https://arxiv.org/abs/2201.10800)
#
import torch
import torch.nn as nn
from espnet2.enh.layers.dprnn import SingleRNN, merge_feature, split_feature
from espnet2.enh.layers.tcn import choose_norm
class MemLSTM(nn.Module):
"""the Mem-LSTM of SkiM
args:
hidden_size: int, dimension of the hidden state.
dropout: float, dropout ratio. Default is 0.
bidirectional: bool, whether the LSTM layers are bidirectional.
Default is False.
mem_type: 'hc', 'h', 'c' or 'id'.
It controls whether the hidden (or cell) state of
SegLSTM will be processed by MemLSTM.
In 'id' mode, both the hidden and cell states will
be identically returned.
norm_type: gLN, cLN. cLN is for causal implementation.
"""
def __init__(
self,
hidden_size,
dropout=0.0,
bidirectional=False,
mem_type="hc",
norm_type="cLN",
):
super().__init__()
self.hidden_size = hidden_size
self.bidirectional = bidirectional
self.input_size = (int(bidirectional) + 1) * hidden_size
self.mem_type = mem_type
assert mem_type in [
"hc",
"h",
"c",
"id",
], f"only support 'hc', 'h', 'c' and 'id', current type: {mem_type}"
if mem_type in ["hc", "h"]:
self.h_net = SingleRNN(
"LSTM",
input_size=self.input_size,
hidden_size=self.hidden_size,
dropout=dropout,
bidirectional=bidirectional,
)
self.h_norm = choose_norm(
norm_type=norm_type, channel_size=self.input_size, shape="BTD"
)
if mem_type in ["hc", "c"]:
self.c_net = SingleRNN(
"LSTM",
input_size=self.input_size,
hidden_size=self.hidden_size,
dropout=dropout,
bidirectional=bidirectional,
)
self.c_norm = choose_norm(
norm_type=norm_type, channel_size=self.input_size, shape="BTD"
)
def extra_repr(self) -> str:
return f"Mem_type: {self.mem_type}, bidirectional: {self.bidirectional}"
def forward(self, hc, S):
# hc = (h, c), tuple of hidden and cell states from SegLSTM
# shape of h and c: (d, B*S, H)
# S: number of segments in SegLSTM
if self.mem_type == "id":
ret_val = hc
h, c = hc
d, BS, H = h.shape
B = BS // S
else:
h, c = hc
d, BS, H = h.shape
B = BS // S
h = h.transpose(1, 0).contiguous().view(B, S, d * H) # B, S, dH
c = c.transpose(1, 0).contiguous().view(B, S, d * H) # B, S, dH
if self.mem_type == "hc":
h = h + self.h_norm(self.h_net(h)[0])
c = c + self.c_norm(self.c_net(c)[0])
elif self.mem_type == "h":
h = h + self.h_norm(self.h_net(h)[0])
c = torch.zeros_like(c)
elif self.mem_type == "c":
h = torch.zeros_like(h)
c = c + self.c_norm(self.c_net(c)[0])
h = h.view(B * S, d, H).transpose(1, 0).contiguous()
c = c.view(B * S, d, H).transpose(1, 0).contiguous()
ret_val = (h, c)
if not self.bidirectional:
# for causal setup
causal_ret_val = []
for x in ret_val:
x = x.transpose(1, 0).contiguous().view(B, S, d * H)
x_ = torch.zeros_like(x)
x_[:, 1:, :] = x[:, :-1, :]
x_ = x_.view(B * S, d, H).transpose(1, 0).contiguous()
causal_ret_val.append(x_)
ret_val = tuple(causal_ret_val)
return ret_val
def forward_one_step(self, hc, state):
if self.mem_type == "id":
pass
else:
h, c = hc
d, B, H = h.shape
h = h.transpose(1, 0).contiguous().view(B, 1, d * H) # B, 1, dH
c = c.transpose(1, 0).contiguous().view(B, 1, d * H) # B, 1, dH
if self.mem_type == "hc":
h_tmp, state[0] = self.h_net(h, state[0])
h = h + self.h_norm(h_tmp)
c_tmp, state[1] = self.c_net(c, state[1])
c = c + self.c_norm(c_tmp)
elif self.mem_type == "h":
h_tmp, state[0] = self.h_net(h, state[0])
h = h + self.h_norm(h_tmp)
c = torch.zeros_like(c)
elif self.mem_type == "c":
h = torch.zeros_like(h)
c_tmp, state[1] = self.c_net(c, state[1])
c = c + self.c_norm(c_tmp)
h = h.transpose(1, 0).contiguous()
c = c.transpose(1, 0).contiguous()
hc = (h, c)
return hc, state
class SegLSTM(nn.Module):
"""the Seg-LSTM of SkiM
args:
input_size: int, dimension of the input feature.
The input should have shape (batch, seq_len, input_size).
hidden_size: int, dimension of the hidden state.
dropout: float, dropout ratio. Default is 0.
bidirectional: bool, whether the LSTM layers are bidirectional.
Default is False.
norm_type: gLN, cLN. cLN is for causal implementation.
"""
def __init__(
self, input_size, hidden_size, dropout=0.0, bidirectional=False, norm_type="cLN"
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_direction = int(bidirectional) + 1
self.lstm = nn.LSTM(
input_size,
hidden_size,
1,
batch_first=True,
bidirectional=bidirectional,
)
self.dropout = nn.Dropout(p=dropout)
self.proj = nn.Linear(hidden_size * self.num_direction, input_size)
self.norm = choose_norm(
norm_type=norm_type, channel_size=input_size, shape="BTD"
)
def forward(self, input, hc):
# input shape: B, T, H
B, T, H = input.shape
if hc is None:
# In fist input SkiM block, h and c are not available
d = self.num_direction
h = torch.zeros(d, B, self.hidden_size, dtype=input.dtype).to(input.device)
c = torch.zeros(d, B, self.hidden_size, dtype=input.dtype).to(input.device)
else:
h, c = hc
output, (h, c) = self.lstm(input, (h, c))
output = self.dropout(output)
output = self.proj(output.contiguous().view(-1, output.shape[2])).view(
input.shape
)
output = input + self.norm(output)
return output, (h, c)
class SkiM(nn.Module):
"""Skipping Memory Net
args:
input_size: int, dimension of the input feature.
Input shape shoud be (batch, length, input_size)
hidden_size: int, dimension of the hidden state.
output_size: int, dimension of the output size.
dropout: float, dropout ratio. Default is 0.
num_blocks: number of basic SkiM blocks
segment_size: segmentation size for splitting long features
bidirectional: bool, whether the RNN layers are bidirectional.
mem_type: 'hc', 'h', 'c', 'id' or None.
It controls whether the hidden (or cell) state of SegLSTM
will be processed by MemLSTM.
In 'id' mode, both the hidden and cell states will
be identically returned.
When mem_type is None, the MemLSTM will be removed.
norm_type: gLN, cLN. cLN is for causal implementation.
seg_overlap: Bool, whether the segmentation will reserve 50%
overlap for adjacent segments.Default is False.
"""
def __init__(
self,
input_size,
hidden_size,
output_size,
dropout=0.0,
num_blocks=2,
segment_size=20,
bidirectional=True,
mem_type="hc",
norm_type="gLN",
seg_overlap=False,
):
super().__init__()
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
self.segment_size = segment_size
self.dropout = dropout
self.num_blocks = num_blocks
self.mem_type = mem_type
self.norm_type = norm_type
self.seg_overlap = seg_overlap
assert mem_type in [
"hc",
"h",
"c",
"id",
None,
], f"only support 'hc', 'h', 'c', 'id', and None, current type: {mem_type}"
self.seg_lstms = nn.ModuleList([])
for i in range(num_blocks):
self.seg_lstms.append(
SegLSTM(
input_size=input_size,
hidden_size=hidden_size,
dropout=dropout,
bidirectional=bidirectional,
norm_type=norm_type,
)
)
if self.mem_type is not None:
self.mem_lstms = nn.ModuleList([])
for i in range(num_blocks - 1):
self.mem_lstms.append(
MemLSTM(
hidden_size,
dropout=dropout,
bidirectional=bidirectional,
mem_type=mem_type,
norm_type=norm_type,
)
)
self.output_fc = nn.Sequential(
nn.PReLU(), nn.Conv1d(input_size, output_size, 1)
)
def forward(self, input):
# input shape: B, T (S*K), D
B, T, D = input.shape
if self.seg_overlap:
input, rest = split_feature(
input.transpose(1, 2), segment_size=self.segment_size
) # B, D, K, S
input = input.permute(0, 3, 2, 1).contiguous() # B, S, K, D
else:
input, rest = self._padfeature(input=input)
input = input.view(B, -1, self.segment_size, D) # B, S, K, D
B, S, K, D = input.shape
assert K == self.segment_size
output = input.view(B * S, K, D).contiguous() # BS, K, D
hc = None
for i in range(self.num_blocks):
output, hc = self.seg_lstms[i](output, hc) # BS, K, D
if self.mem_type and i < self.num_blocks - 1:
hc = self.mem_lstms[i](hc, S)
pass
if self.seg_overlap:
output = output.view(B, S, K, D).permute(0, 3, 2, 1) # B, D, K, S
output = merge_feature(output, rest) # B, D, T
output = self.output_fc(output).transpose(1, 2)
else:
output = output.view(B, S * K, D)[:, :T, :] # B, T, D
output = self.output_fc(output.transpose(1, 2)).transpose(1, 2)
return output
def _padfeature(self, input):
B, T, D = input.shape
rest = self.segment_size - T % self.segment_size
if rest > 0:
input = torch.nn.functional.pad(input, (0, 0, 0, rest))
return input, rest
def forward_stream(self, input_frame, states):
# input_frame # B, 1, N
B, _, N = input_frame.shape
def empty_seg_states():
shp = (1, B, self.hidden_size)
return (
torch.zeros(*shp, device=input_frame.device, dtype=input_frame.dtype),
torch.zeros(*shp, device=input_frame.device, dtype=input_frame.dtype),
)
B, _, N = input_frame.shape
if not states:
states = {
"current_step": 0,
"seg_state": [empty_seg_states() for i in range(self.num_blocks)],
"mem_state": [[None, None] for i in range(self.num_blocks - 1)],
}
output = input_frame
if states["current_step"] and (states["current_step"]) % self.segment_size == 0:
tmp_states = [empty_seg_states() for i in range(self.num_blocks)]
for i in range(self.num_blocks - 1):
tmp_states[i + 1], states["mem_state"][i] = self.mem_lstms[
i
].forward_one_step(states["seg_state"][i], states["mem_state"][i])
states["seg_state"] = tmp_states
for i in range(self.num_blocks):
output, states["seg_state"][i] = self.seg_lstms[i](
output, states["seg_state"][i]
)
states["current_step"] += 1
output = self.output_fc(output.transpose(1, 2)).transpose(1, 2)
return output, states
if __name__ == "__main__":
torch.manual_seed(111)
seq_len = 100
model = SkiM(
16,
11,
16,
dropout=0.0,
num_blocks=4,
segment_size=20,
bidirectional=False,
mem_type="hc",
norm_type="cLN",
seg_overlap=False,
)
model.eval()
input = torch.randn(3, seq_len, 16)
seg_output = model(input)
state = None
for i in range(seq_len):
input_frame = input[:, i : i + 1, :]
output, state = model.forward_stream(input_frame=input_frame, states=state)
torch.testing.assert_allclose(output, seg_output[:, i : i + 1, :])
print("streaming ok")
| 13,471 | 32.42928 | 88 | py |
espnet | espnet-master/espnet2/enh/layers/dc_crn.py | # Implementation of Densely-connected convolutional recurrent network (DC-CRN)
# [1] Tan et al. "Deep Learning Based Real-Time Speech Enhancement for Dual-Microphone
# Mobile Phones"
# https://web.cse.ohio-state.edu/~wang.77/papers/TZW.taslp21.pdf
from typing import List
import torch
import torch.nn as nn
from espnet2.enh.layers.conv_utils import conv2d_output_shape, convtransp2d_output_shape
class GLSTM(nn.Module):
def __init__(
self, hidden_size=1024, groups=2, layers=2, bidirectional=False, rearrange=False
):
"""Grouped LSTM.
Reference:
Efficient Sequence Learning with Group Recurrent Networks; Gao et al., 2018
Args:
hidden_size (int): total hidden size of all LSTMs in each grouped LSTM layer
i.e., hidden size of each LSTM is `hidden_size // groups`
groups (int): number of LSTMs in each grouped LSTM layer
layers (int): number of grouped LSTM layers
bidirectional (bool): whether to use BLSTM or unidirectional LSTM
rearrange (bool): whether to apply the rearrange operation after each
grouped LSTM layer
"""
super().__init__()
assert hidden_size % groups == 0, (hidden_size, groups)
hidden_size_t = hidden_size // groups
if bidirectional:
assert hidden_size_t % 2 == 0, hidden_size_t
self.groups = groups
self.layers = layers
self.rearrange = rearrange
self.lstm_list = nn.ModuleList()
self.ln = nn.ModuleList([nn.LayerNorm(hidden_size) for _ in range(layers)])
for layer in range(layers):
self.lstm_list.append(
nn.ModuleList(
[
nn.LSTM(
hidden_size_t,
hidden_size_t // 2 if bidirectional else hidden_size_t,
1,
batch_first=True,
bidirectional=bidirectional,
)
for _ in range(groups)
]
)
)
def forward(self, x):
"""Grouped LSTM forward.
Args:
x (torch.Tensor): (B, C, T, D)
Returns:
out (torch.Tensor): (B, C, T, D)
"""
out = x
out = out.transpose(1, 2).contiguous()
B, T = out.size(0), out.size(1)
out = out.view(B, T, -1).contiguous()
out = torch.chunk(out, self.groups, dim=-1)
out = torch.stack(
[self.lstm_list[0][i](out[i])[0] for i in range(self.groups)], dim=-1
)
out = torch.flatten(out, start_dim=-2, end_dim=-1)
out = self.ln[0](out)
for layer in range(1, self.layers):
if self.rearrange:
out = (
out.reshape(B, T, self.groups, -1)
.transpose(-1, -2)
.contiguous()
.view(B, T, -1)
)
out = torch.chunk(out, self.groups, dim=-1)
out = torch.cat(
[self.lstm_list[layer][i](out[i])[0] for i in range(self.groups)],
dim=-1,
)
out = self.ln[layer](out)
out = out.view(out.size(0), out.size(1), x.size(1), -1).contiguous()
out = out.transpose(1, 2).contiguous()
return out
class GluConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding=0):
"""Conv2d with Gated Linear Units (GLU).
Input and output shapes are the same as regular Conv2d layers.
Reference: Section III-B in [1]
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size (int/tuple): kernel size in Conv2d
stride (int/tuple): stride size in Conv2d
padding (int/tuple): padding size in Conv2d
"""
super().__init__()
self.conv1 = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
)
self.conv2 = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
"""ConvGLU forward.
Args:
x (torch.Tensor): (B, C_in, H_in, W_in)
Returns:
out (torch.Tensor): (B, C_out, H_out, W_out)
"""
out = self.conv1(x)
gate = self.sigmoid(self.conv2(x))
return out * gate
class GluConvTranspose2d(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
padding=0,
output_padding=(0, 0),
):
"""ConvTranspose2d with Gated Linear Units (GLU).
Input and output shapes are the same as regular ConvTranspose2d layers.
Reference: Section III-B in [1]
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size (int/tuple): kernel size in ConvTranspose2d
stride (int/tuple): stride size in ConvTranspose2d
padding (int/tuple): padding size in ConvTranspose2d
output_padding (int/tuple): Additional size added to one side of each
dimension in the output shape
"""
super().__init__()
self.deconv1 = nn.ConvTranspose2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
output_padding=output_padding,
)
self.deconv2 = nn.ConvTranspose2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
output_padding=output_padding,
)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
"""DeconvGLU forward.
Args:
x (torch.Tensor): (B, C_in, H_in, W_in)
Returns:
out (torch.Tensor): (B, C_out, H_out, W_out)
"""
out = self.deconv1(x)
gate = self.sigmoid(self.deconv2(x))
return out * gate
class DenselyConnectedBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
hid_channels=8,
kernel_size=(1, 3),
padding=(0, 1),
last_kernel_size=(1, 4), # use (1, 4) to alleviate the checkerboard artifacts
last_stride=(1, 2),
last_padding=(0, 1),
last_output_padding=(0, 0),
layers=5,
transposed=False,
):
"""Densely-Connected Convolutional Block.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
hid_channels (int): number of output channels in intermediate Conv layers
kernel_size (tuple): kernel size for all but the last Conv layers
padding (tuple): padding for all but the last Conv layers
last_kernel_size (tuple): kernel size for the last GluConv layer
last_stride (tuple): stride for the last GluConv layer
last_padding (tuple): padding for the last GluConv layer
last_output_padding (tuple): output padding for the last GluConvTranspose2d
(only used when `transposed=True`)
layers (int): total number of Conv layers
transposed (bool): True to use GluConvTranspose2d in the last layer
False to use GluConv2d in the last layer
"""
super().__init__()
assert layers > 1, layers
self.conv = nn.ModuleList()
in_channel = in_channels
# here T=42 and D=127 are random integers that should not be changed after Conv
T, D = 42, 127
hidden_sizes = [127]
for _ in range(layers - 1):
self.conv.append(
nn.Sequential(
nn.Conv2d(
in_channel,
hid_channels,
kernel_size=kernel_size,
stride=(1, 1),
padding=padding,
),
nn.BatchNorm2d(hid_channels),
nn.ELU(inplace=True),
)
)
in_channel = in_channel + hid_channels
# make sure the last two dimensions will not be changed after this layer
tdim, hdim = conv2d_output_shape(
(T, D),
kernel_size=kernel_size,
stride=(1, 1),
pad=padding,
)
hidden_sizes.append(hdim)
assert tdim == T and hdim == D, (tdim, hdim, T, D)
if transposed:
self.conv.append(
GluConvTranspose2d(
in_channel,
out_channels,
kernel_size=last_kernel_size,
stride=last_stride,
padding=last_padding,
output_padding=last_output_padding,
)
)
else:
self.conv.append(
GluConv2d(
in_channel,
out_channels,
kernel_size=last_kernel_size,
stride=last_stride,
padding=last_padding,
)
)
def forward(self, input):
"""DenselyConnectedBlock forward.
Args:
input (torch.Tensor): (B, C, T_in, F_in)
Returns:
out (torch.Tensor): (B, C, T_out, F_out)
"""
out = self.conv[0](input)
outputs = [input, out]
num_layers = len(self.conv)
for idx, layer in enumerate(self.conv[1:]):
out = layer(torch.cat(outputs, dim=1))
if idx < num_layers - 1:
outputs.append(out)
return out
class DC_CRN(nn.Module):
def __init__(
self,
input_dim,
input_channels: List = [2, 16, 32, 64, 128, 256],
enc_hid_channels=8,
enc_kernel_size=(1, 3),
enc_padding=(0, 1),
enc_last_kernel_size=(1, 4),
enc_last_stride=(1, 2),
enc_last_padding=(0, 1),
enc_layers=5,
skip_last_kernel_size=(1, 3),
skip_last_stride=(1, 1),
skip_last_padding=(0, 1),
glstm_groups=2,
glstm_layers=2,
glstm_bidirectional=False,
glstm_rearrange=False,
output_channels=2,
):
"""Densely-Connected Convolutional Recurrent Network (DC-CRN).
Reference: Fig. 3 and Section III-B in [1]
Args:
input_dim (int): input feature dimension
input_channels (list): number of input channels for the stacked
DenselyConnectedBlock layers
Its length should be (`number of DenselyConnectedBlock layers`).
It is recommended to use even number of channels to avoid AssertError
when `glstm_bidirectional=True`.
enc_hid_channels (int): common number of intermediate channels for all
DenselyConnectedBlock of the encoder
enc_kernel_size (tuple): common kernel size for all DenselyConnectedBlock
of the encoder
enc_padding (tuple): common padding for all DenselyConnectedBlock
of the encoder
enc_last_kernel_size (tuple): common kernel size for the last Conv layer
in all DenselyConnectedBlock of the encoder
enc_last_stride (tuple): common stride for the last Conv layer in all
DenselyConnectedBlock of the encoder
enc_last_padding (tuple): common padding for the last Conv layer in all
DenselyConnectedBlock of the encoder
enc_layers (int): common total number of Conv layers for all
DenselyConnectedBlock layers of the encoder
skip_last_kernel_size (tuple): common kernel size for the last Conv layer
in all DenselyConnectedBlock of the skip pathways
skip_last_stride (tuple): common stride for the last Conv layer in all
DenselyConnectedBlock of the skip pathways
skip_last_padding (tuple): common padding for the last Conv layer in all
DenselyConnectedBlock of the skip pathways
glstm_groups (int): number of groups in each Grouped LSTM layer
glstm_layers (int): number of Grouped LSTM layers
glstm_bidirectional (bool): whether to use BLSTM or unidirectional LSTM
in Grouped LSTM layers
glstm_rearrange (bool): whether to apply the rearrange operation after each
grouped LSTM layer
output_channels (int): number of output channels (must be an even number to
recover both real and imaginary parts)
"""
super().__init__()
assert output_channels % 2 == 0, output_channels
self.conv_enc = nn.ModuleList()
# here T=42 is a random integer that should not be changed after Conv
T = 42
hidden_sizes = [input_dim]
hdim = input_dim
for i in range(1, len(input_channels)):
self.conv_enc.append(
DenselyConnectedBlock(
in_channels=input_channels[i - 1],
out_channels=input_channels[i],
hid_channels=enc_hid_channels,
kernel_size=enc_kernel_size,
padding=enc_padding,
last_kernel_size=enc_last_kernel_size,
last_stride=enc_last_stride,
last_padding=enc_last_padding,
layers=enc_layers,
transposed=False,
)
)
tdim, hdim = conv2d_output_shape(
(T, hdim),
kernel_size=enc_last_kernel_size,
stride=enc_last_stride,
pad=enc_last_padding,
)
hidden_sizes.append(hdim)
assert tdim == T, (tdim, hdim)
hs = hdim * input_channels[-1]
assert hs >= glstm_groups, (hs, glstm_groups)
self.glstm = GLSTM(
hidden_size=hs,
groups=glstm_groups,
layers=glstm_layers,
bidirectional=glstm_bidirectional,
rearrange=glstm_rearrange,
)
self.skip_pathway = nn.ModuleList()
self.deconv_dec = nn.ModuleList()
for i in range(len(input_channels) - 1, 0, -1):
self.skip_pathway.append(
DenselyConnectedBlock(
in_channels=input_channels[i],
out_channels=input_channels[i],
hid_channels=enc_hid_channels,
kernel_size=enc_kernel_size,
padding=enc_padding,
last_kernel_size=skip_last_kernel_size,
last_stride=skip_last_stride,
last_padding=skip_last_padding,
layers=enc_layers,
transposed=False,
)
)
# make sure the last two dimensions will not be changed after this layer
enc_hdim = hidden_sizes[i]
tdim, hdim = conv2d_output_shape(
(T, enc_hdim),
kernel_size=skip_last_kernel_size,
stride=skip_last_stride,
pad=skip_last_padding,
)
assert tdim == T and hdim == enc_hdim, (tdim, hdim, T, enc_hdim)
if i != 1:
out_ch = input_channels[i - 1]
else:
out_ch = output_channels
# make sure the last but one dimension will not be changed after this layer
tdim, hdim = convtransp2d_output_shape(
(T, enc_hdim),
kernel_size=enc_last_kernel_size,
stride=enc_last_stride,
pad=enc_last_padding,
)
assert tdim == T, (tdim, hdim)
hpadding = hidden_sizes[i - 1] - hdim
assert hpadding >= 0, (hidden_sizes[i - 1], hdim)
self.deconv_dec.append(
DenselyConnectedBlock(
in_channels=input_channels[i] * 2,
out_channels=out_ch,
hid_channels=enc_hid_channels,
kernel_size=enc_kernel_size,
padding=enc_padding,
last_kernel_size=enc_last_kernel_size,
last_stride=enc_last_stride,
last_padding=enc_last_padding,
last_output_padding=(0, hpadding),
layers=enc_layers,
transposed=True,
)
)
self.fc_real = nn.Linear(in_features=input_dim, out_features=input_dim)
self.fc_imag = nn.Linear(in_features=input_dim, out_features=input_dim)
def forward(self, x):
"""DC-CRN forward.
Args:
x (torch.Tensor): Concatenated real and imaginary spectrum features
(B, input_channels[0], T, F)
Returns:
out (torch.Tensor): (B, 2, output_channels, T, F)
"""
out = x
conv_out = []
for idx, layer in enumerate(self.conv_enc):
out = layer(out)
conv_out.append(out)
num_out = len(conv_out)
out = self.glstm(conv_out[-1])
res = self.skip_pathway[0](conv_out[-1])
out = torch.cat((out, res), dim=1)
for idx in range(len(self.deconv_dec) - 1):
deconv_out = self.deconv_dec[idx](out)
res = self.skip_pathway[idx + 1](conv_out[num_out - idx - 2])
out = torch.cat((deconv_out, res), dim=1)
out = self.deconv_dec[-1](out)
dout_real, dout_imag = torch.chunk(out, 2, dim=1)
out_real = self.fc_real(dout_real)
out_imag = self.fc_imag(dout_imag)
out = torch.stack([out_real, out_imag], dim=1)
return out
| 18,544 | 35.505906 | 88 | py |
espnet | espnet-master/espnet2/enh/layers/wpe.py | from typing import Tuple, Union
import torch
import torch.nn.functional as F
import torch_complex.functional as FC
from packaging.version import parse as V
from torch_complex.tensor import ComplexTensor
from espnet2.enh.layers.complex_utils import einsum, matmul, reverse
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
""" WPE pytorch version: Ported from https://github.com/fgnt/nara_wpe
Many functions aren't enough tested"""
def signal_framing(
signal: Union[torch.Tensor, ComplexTensor],
frame_length: int,
frame_step: int,
pad_value=0,
) -> Union[torch.Tensor, ComplexTensor]:
"""Expands signal into frames of frame_length.
Args:
signal : (B * F, D, T)
Returns:
torch.Tensor: (B * F, D, T, W)
"""
if isinstance(signal, ComplexTensor):
real = signal_framing(signal.real, frame_length, frame_step, pad_value)
imag = signal_framing(signal.imag, frame_length, frame_step, pad_value)
return ComplexTensor(real, imag)
elif is_torch_1_9_plus and torch.is_complex(signal):
real = signal_framing(signal.real, frame_length, frame_step, pad_value)
imag = signal_framing(signal.imag, frame_length, frame_step, pad_value)
return torch.complex(real, imag)
signal = F.pad(signal, (0, frame_length - 1), "constant", pad_value)
indices = sum(
[
list(range(i, i + frame_length))
for i in range(0, signal.size(-1) - frame_length + 1, frame_step)
],
[],
)
signal = signal[..., indices].view(*signal.size()[:-1], -1, frame_length)
return signal
def get_power(signal, dim=-2) -> torch.Tensor:
"""Calculates power for `signal`
Args:
signal : Single frequency signal
with shape (F, C, T).
axis: reduce_mean axis
Returns:
Power with shape (F, T)
"""
power = signal.real**2 + signal.imag**2
power = power.mean(dim=dim)
return power
def get_correlations(
Y: Union[torch.Tensor, ComplexTensor], inverse_power: torch.Tensor, taps, delay
) -> Tuple[Union[torch.Tensor, ComplexTensor], Union[torch.Tensor, ComplexTensor]]:
"""Calculates weighted correlations of a window of length taps
Args:
Y : Complex-valued STFT signal with shape (F, C, T)
inverse_power : Weighting factor with shape (F, T)
taps (int): Lenghts of correlation window
delay (int): Delay for the weighting factor
Returns:
Correlation matrix of shape (F, taps*C, taps*C)
Correlation vector of shape (F, taps, C, C)
"""
assert inverse_power.dim() == 2, inverse_power.dim()
assert inverse_power.size(0) == Y.size(0), (inverse_power.size(0), Y.size(0))
F, C, T = Y.size()
# Y: (F, C, T) -> Psi: (F, C, T, taps)
Psi = signal_framing(Y, frame_length=taps, frame_step=1)[
..., : T - delay - taps + 1, :
]
# Reverse along taps-axis
Psi = reverse(Psi, dim=-1)
Psi_conj_norm = Psi.conj() * inverse_power[..., None, delay + taps - 1 :, None]
# (F, C, T, taps) x (F, C, T, taps) -> (F, taps, C, taps, C)
correlation_matrix = einsum("fdtk,fetl->fkdle", Psi_conj_norm, Psi)
# (F, taps, C, taps, C) -> (F, taps * C, taps * C)
correlation_matrix = correlation_matrix.reshape(F, taps * C, taps * C)
# (F, C, T, taps) x (F, C, T) -> (F, taps, C, C)
correlation_vector = einsum(
"fdtk,fet->fked", Psi_conj_norm, Y[..., delay + taps - 1 :]
)
return correlation_matrix, correlation_vector
def get_filter_matrix_conj(
correlation_matrix: Union[torch.Tensor, ComplexTensor],
correlation_vector: Union[torch.Tensor, ComplexTensor],
eps: float = 1e-10,
) -> Union[torch.Tensor, ComplexTensor]:
"""Calculate (conjugate) filter matrix based on correlations for one freq.
Args:
correlation_matrix : Correlation matrix (F, taps * C, taps * C)
correlation_vector : Correlation vector (F, taps, C, C)
eps:
Returns:
filter_matrix_conj (torch.complex/ComplexTensor): (F, taps, C, C)
"""
F, taps, C, _ = correlation_vector.size()
# (F, taps, C1, C2) -> (F, C1, taps, C2) -> (F, C1, taps * C2)
correlation_vector = (
correlation_vector.permute(0, 2, 1, 3).contiguous().view(F, C, taps * C)
)
eye = torch.eye(
correlation_matrix.size(-1),
dtype=correlation_matrix.dtype,
device=correlation_matrix.device,
)
shape = (
tuple(1 for _ in range(correlation_matrix.dim() - 2))
+ correlation_matrix.shape[-2:]
)
eye = eye.view(*shape)
correlation_matrix += eps * eye
inv_correlation_matrix = correlation_matrix.inverse()
# (F, C, taps, C) x (F, taps * C, taps * C) -> (F, C, taps * C)
stacked_filter_conj = matmul(
correlation_vector, inv_correlation_matrix.transpose(-1, -2)
)
# (F, C1, taps * C2) -> (F, C1, taps, C2) -> (F, taps, C2, C1)
filter_matrix_conj = stacked_filter_conj.view(F, C, taps, C).permute(0, 2, 3, 1)
return filter_matrix_conj
def perform_filter_operation(
Y: Union[torch.Tensor, ComplexTensor],
filter_matrix_conj: Union[torch.Tensor, ComplexTensor],
taps,
delay,
) -> Union[torch.Tensor, ComplexTensor]:
"""perform_filter_operation
Args:
Y : Complex-valued STFT signal of shape (F, C, T)
filter Matrix (F, taps, C, C)
"""
if isinstance(Y, ComplexTensor):
complex_module = FC
pad_func = FC.pad
elif is_torch_1_9_plus and torch.is_complex(Y):
complex_module = torch
pad_func = F.pad
else:
raise ValueError(
"Please update your PyTorch version to 1.9+ for complex support."
)
T = Y.size(-1)
# Y_tilde: (taps, F, C, T)
Y_tilde = complex_module.stack(
[
pad_func(Y[:, :, : T - delay - i], (delay + i, 0), mode="constant", value=0)
for i in range(taps)
],
dim=0,
)
reverb_tail = complex_module.einsum("fpde,pfdt->fet", (filter_matrix_conj, Y_tilde))
return Y - reverb_tail
def wpe_one_iteration(
Y: Union[torch.Tensor, ComplexTensor],
power: torch.Tensor,
taps: int = 10,
delay: int = 3,
eps: float = 1e-10,
inverse_power: bool = True,
) -> Union[torch.Tensor, ComplexTensor]:
"""WPE for one iteration
Args:
Y: Complex valued STFT signal with shape (..., C, T)
power: : (..., T)
taps: Number of filter taps
delay: Delay as a guard interval, such that X does not become zero.
eps:
inverse_power (bool):
Returns:
enhanced: (..., C, T)
"""
assert Y.size()[:-2] == power.size()[:-1]
batch_freq_size = Y.size()[:-2]
Y = Y.view(-1, *Y.size()[-2:])
power = power.view(-1, power.size()[-1])
if inverse_power:
inverse_power = 1 / torch.clamp(power, min=eps)
else:
inverse_power = power
correlation_matrix, correlation_vector = get_correlations(
Y, inverse_power, taps, delay
)
filter_matrix_conj = get_filter_matrix_conj(correlation_matrix, correlation_vector)
enhanced = perform_filter_operation(Y, filter_matrix_conj, taps, delay)
enhanced = enhanced.view(*batch_freq_size, *Y.size()[-2:])
return enhanced
def wpe(
Y: Union[torch.Tensor, ComplexTensor], taps=10, delay=3, iterations=3
) -> Union[torch.Tensor, ComplexTensor]:
"""WPE
Args:
Y: Complex valued STFT signal with shape (F, C, T)
taps: Number of filter taps
delay: Delay as a guard interval, such that X does not become zero.
iterations:
Returns:
enhanced: (F, C, T)
"""
enhanced = Y
for _ in range(iterations):
power = get_power(enhanced)
enhanced = wpe_one_iteration(Y, power, taps=taps, delay=delay)
return enhanced
| 7,858 | 30.310757 | 88 | py |
espnet | espnet-master/espnet2/enh/layers/dnn_wpe.py | from typing import Tuple, Union
import torch
from torch_complex.tensor import ComplexTensor
from espnet2.enh.layers.complex_utils import to_double, to_float
from espnet2.enh.layers.mask_estimator import MaskEstimator
from espnet2.enh.layers.wpe import wpe_one_iteration
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
class DNN_WPE(torch.nn.Module):
def __init__(
self,
wtype: str = "blstmp",
widim: int = 257,
wlayers: int = 3,
wunits: int = 300,
wprojs: int = 320,
dropout_rate: float = 0.0,
taps: int = 5,
delay: int = 3,
use_dnn_mask: bool = True,
nmask: int = 1,
nonlinear: str = "sigmoid",
iterations: int = 1,
normalization: bool = False,
eps: float = 1e-6,
diagonal_loading: bool = True,
diag_eps: float = 1e-7,
mask_flooring: bool = False,
flooring_thres: float = 1e-6,
use_torch_solver: bool = True,
):
super().__init__()
self.iterations = iterations
self.taps = taps
self.delay = delay
self.eps = eps
self.normalization = normalization
self.use_dnn_mask = use_dnn_mask
self.inverse_power = True
self.diagonal_loading = diagonal_loading
self.diag_eps = diag_eps
self.mask_flooring = mask_flooring
self.flooring_thres = flooring_thres
self.use_torch_solver = use_torch_solver
if self.use_dnn_mask:
self.nmask = nmask
self.mask_est = MaskEstimator(
wtype,
widim,
wlayers,
wunits,
wprojs,
dropout_rate,
nmask=nmask,
nonlinear=nonlinear,
)
else:
self.nmask = 1
def forward(
self, data: Union[torch.Tensor, ComplexTensor], ilens: torch.LongTensor
) -> Tuple[
Union[torch.Tensor, ComplexTensor],
torch.LongTensor,
Union[torch.Tensor, ComplexTensor],
]:
"""DNN_WPE forward function.
Notation:
B: Batch
C: Channel
T: Time or Sequence length
F: Freq or Some dimension of the feature vector
Args:
data: (B, T, C, F)
ilens: (B,)
Returns:
enhanced (torch.Tensor or List[torch.Tensor]): (B, T, C, F)
ilens: (B,)
masks (torch.Tensor or List[torch.Tensor]): (B, T, C, F)
power (List[torch.Tensor]): (B, F, T)
"""
# (B, T, C, F) -> (B, F, C, T)
data = data.permute(0, 3, 2, 1)
enhanced = [data for i in range(self.nmask)]
masks = None
power = None
for i in range(self.iterations):
# Calculate power: (..., C, T)
power = [enh.real**2 + enh.imag**2 for enh in enhanced]
if i == 0 and self.use_dnn_mask:
# mask: (B, F, C, T)
masks, _ = self.mask_est(data, ilens)
# floor masks to increase numerical stability
if self.mask_flooring:
masks = [m.clamp(min=self.flooring_thres) for m in masks]
if self.normalization:
# Normalize along T
masks = [m / m.sum(dim=-1, keepdim=True) for m in masks]
# (..., C, T) * (..., C, T) -> (..., C, T)
power = [p * masks[i] for i, p in enumerate(power)]
# Averaging along the channel axis: (..., C, T) -> (..., T)
power = [p.mean(dim=-2).clamp(min=self.eps) for p in power]
# enhanced: (..., C, T) -> (..., C, T)
# NOTE(kamo): Calculate in double precision
enhanced = [
wpe_one_iteration(
to_double(data.contiguous()),
to_double(p),
taps=self.taps,
delay=self.delay,
inverse_power=self.inverse_power,
)
for p in power
]
enhanced = [
enh.to(dtype=data.dtype).masked_fill(make_pad_mask(ilens, enh.real), 0)
for enh in enhanced
]
# (B, F, C, T) -> (B, T, C, F)
enhanced = [enh.permute(0, 3, 2, 1) for enh in enhanced]
if masks is not None:
masks = (
[m.transpose(-1, -3) for m in masks]
if self.nmask > 1
else masks[0].transpose(-1, -3)
)
if self.nmask == 1:
enhanced = enhanced[0]
return enhanced, ilens, masks, power
def predict_mask(
self, data: Union[torch.Tensor, ComplexTensor], ilens: torch.LongTensor
) -> Tuple[torch.Tensor, torch.LongTensor]:
"""Predict mask for WPE dereverberation.
Args:
data (torch.complex64/ComplexTensor): (B, T, C, F), double precision
ilens (torch.Tensor): (B,)
Returns:
masks (torch.Tensor or List[torch.Tensor]): (B, T, C, F)
ilens (torch.Tensor): (B,)
"""
if self.use_dnn_mask:
masks, ilens = self.mask_est(to_float(data.permute(0, 3, 2, 1)), ilens)
# (B, F, C, T) -> (B, T, C, F)
masks = [m.transpose(-1, -3) for m in masks]
if self.nmask == 1:
masks = masks[0]
else:
masks = None
return masks, ilens
| 5,512 | 32.412121 | 87 | py |
espnet | espnet-master/espnet2/enh/layers/dpmulcat.py | import torch
import torch.nn as nn
class MulCatBlock(nn.Module):
"""The MulCat block.
Args:
input_size: int, dimension of the input feature.
The input should have shape (batch, seq_len, input_size).
hidden_size: int, dimension of the hidden state.
dropout: float, the dropout rate in the LSTM layer. (Default: 0.0)
bidirectional: bool, whether the RNN layers are bidirectional. (Default: True)
"""
def __init__(
self,
input_size: int,
hidden_size: int,
dropout: float = 0.0,
bidirectional: bool = True,
):
super().__init__()
num_direction = int(bidirectional) + 1
self.rnn = nn.LSTM(
input_size,
hidden_size,
1,
dropout=dropout,
batch_first=True,
bidirectional=bidirectional,
)
self.rnn_proj = nn.Linear(hidden_size * num_direction, input_size)
self.gate_rnn = nn.LSTM(
input_size,
hidden_size,
num_layers=1,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional,
)
self.gate_rnn_proj = nn.Linear(hidden_size * num_direction, input_size)
self.block_projection = nn.Linear(input_size * 2, input_size)
def forward(self, input):
"""Compute output after MulCatBlock.
Args:
input (torch.Tensor): The input feature.
Tensor of shape (batch, time, feature_dim)
Returns:
(torch.Tensor): The output feature after MulCatBlock.
Tensor of shape (batch, time, feature_dim)
"""
orig_shape = input.shape
# run rnn module
rnn_output, _ = self.rnn(input)
rnn_output = (
self.rnn_proj(rnn_output.contiguous().view(-1, rnn_output.shape[2]))
.view(orig_shape)
.contiguous()
)
# run gate rnn module
gate_rnn_output, _ = self.gate_rnn(input)
gate_rnn_output = (
self.gate_rnn_proj(
gate_rnn_output.contiguous().view(-1, gate_rnn_output.shape[2])
)
.view(orig_shape)
.contiguous()
)
# apply gated rnn
gated_output = torch.mul(rnn_output, gate_rnn_output)
# concatenate the input with rnn output
gated_output = torch.cat([gated_output, input], 2)
# linear projection to make the output shape the same as input
gated_output = self.block_projection(
gated_output.contiguous().view(-1, gated_output.shape[2])
).view(orig_shape)
return gated_output
class DPMulCat(nn.Module):
"""Dual-path RNN module with MulCat blocks.
Args:
input_size: int, dimension of the input feature.
The input should have shape (batch, seq_len, input_size).
hidden_size: int, dimension of the hidden state.
output_size: int, dimension of the output size.
num_spk: int, the number of speakers in the output.
dropout: float, the dropout rate in the LSTM layer. (Default: 0.0)
bidirectional: bool, whether the RNN layers are bidirectional. (Default: True)
num_layers: int, number of stacked MulCat blocks. (Default: 4)
input_normalize: bool, whether to apply GroupNorm on the input Tensor.
(Default: False)
"""
def __init__(
self,
input_size: int,
hidden_size: int,
output_size: int,
num_spk: int,
dropout: float = 0.0,
num_layers: int = 4,
bidirectional: bool = True,
input_normalize: bool = False,
):
super().__init__()
self.rows_grnn = nn.ModuleList([])
self.cols_grnn = nn.ModuleList([])
self.rows_normalization = nn.ModuleList([])
self.cols_normalization = nn.ModuleList([])
# create the dual path pipeline
for i in range(num_layers):
self.rows_grnn.append(
MulCatBlock(
input_size, hidden_size, dropout, bidirectional=bidirectional
)
)
self.cols_grnn.append(
MulCatBlock(
input_size, hidden_size, dropout, bidirectional=bidirectional
)
)
if input_normalize:
self.rows_normalization.append(nn.GroupNorm(1, input_size, eps=1e-8))
self.cols_normalization.append(nn.GroupNorm(1, input_size, eps=1e-8))
else:
# used to disable normalization
self.rows_normalization.append(nn.Identity())
self.cols_normalization.append(nn.Identity())
self.output = nn.Sequential(
nn.PReLU(), nn.Conv2d(input_size, output_size * num_spk, 1)
)
def forward(self, input):
"""Compute output after DPMulCat module.
Args:
input (torch.Tensor): The input feature.
Tensor of shape (batch, N, dim1, dim2)
Apply RNN on dim1 first and then dim2
Returns:
(list(torch.Tensor) or list(list(torch.Tensor))
In training mode, the module returns output of each DPMulCat block.
In eval mode, the module only returns output in the last block.
"""
batch_size, _, d1, d2 = input.shape
output = input
output_all = []
for i in range(len(self.rows_grnn)):
row_input = (
output.permute(0, 3, 2, 1).contiguous().view(batch_size * d2, d1, -1)
)
row_output = self.rows_grnn[i](row_input)
row_output = (
row_output.view(batch_size, d2, d1, -1).permute(0, 3, 2, 1).contiguous()
)
row_output = self.rows_normalization[i](row_output)
# apply a skip connection
output = output + row_output
col_input = (
output.permute(0, 2, 3, 1).contiguous().view(batch_size * d1, d2, -1)
)
col_output = self.cols_grnn[i](col_input)
col_output = (
col_output.view(batch_size, d1, d2, -1).permute(0, 3, 1, 2).contiguous()
)
col_output = self.cols_normalization[i](col_output).contiguous()
# apply a skip connection
output = output + col_output
# if training mode, it returns the output Tensor from all layers.
# Otherwise, it only returns the one from the last layer.
if self.training or i == (len(self.rows_grnn) - 1):
output_i = self.output(output)
output_all.append(output_i)
return output_all
| 6,771 | 34.642105 | 88 | py |
espnet | espnet-master/espnet2/enh/layers/ifasnet.py | # The implementation of iFaSNet in
# Luo. et al. "Implicit Filter-and-sum Network for
# Multi-channel Speech Separation"
#
# The implementation is based on:
# https://github.com/yluo42/TAC
# Licensed under CC BY-NC-SA 3.0 US.
#
import torch
import torch.nn as nn
from espnet2.enh.layers import dprnn
from espnet2.enh.layers.fasnet import BF_module, FaSNet_base
# implicit FaSNet (iFaSNet)
class iFaSNet(FaSNet_base):
def __init__(self, *args, **kwargs):
super(iFaSNet, self).__init__(*args, **kwargs)
self.context = self.context_len * 2 // self.win_len
# context compression
self.summ_BN = nn.Linear(self.enc_dim, self.feature_dim)
self.summ_RNN = dprnn.SingleRNN(
"LSTM", self.feature_dim, self.hidden_dim, bidirectional=True
)
self.summ_LN = nn.GroupNorm(1, self.feature_dim, eps=self.eps)
self.summ_output = nn.Linear(self.feature_dim, self.enc_dim)
self.separator = BF_module(
self.enc_dim + (self.context * 2 + 1) ** 2,
self.feature_dim,
self.hidden_dim,
self.enc_dim,
self.num_spk,
self.layer,
self.segment_size,
dropout=self.dropout,
fasnet_type="ifasnet",
)
# waveform encoder/decoder
self.encoder = nn.Conv1d(
1, self.enc_dim, self.window, stride=self.stride, bias=False
)
self.decoder = nn.ConvTranspose1d(
self.enc_dim, 1, self.window, stride=self.stride, bias=False
)
self.enc_LN = nn.GroupNorm(1, self.enc_dim, eps=self.eps)
# context decompression
self.gen_BN = nn.Conv1d(self.enc_dim * 2, self.feature_dim, 1)
self.gen_RNN = dprnn.SingleRNN(
"LSTM", self.feature_dim, self.hidden_dim, bidirectional=True
)
self.gen_LN = nn.GroupNorm(1, self.feature_dim, eps=self.eps)
self.gen_output = nn.Conv1d(self.feature_dim, self.enc_dim, 1)
def forward(self, input, num_mic):
batch_size = input.size(0)
nmic = input.size(1)
# pad input accordingly
input, rest = self.pad_input(input, self.window)
# encoder on all channels
enc_output = self.encoder(input.view(batch_size * nmic, 1, -1)) # B*nmic, N, L
seq_length = enc_output.shape[-1]
# calculate the context of the encoder output
# consider both past and future
enc_context = self.signal_context(
enc_output, self.context
) # B*nmic, N, 2C+1, L
enc_context = enc_context.view(
batch_size, nmic, self.enc_dim, -1, seq_length
) # B, nmic, N, 2C+1, L
# NCC feature
ref_enc = enc_context[:, 0].contiguous() # B, N, 2C+1, L
ref_enc = (
ref_enc.permute(0, 3, 1, 2)
.contiguous()
.view(batch_size * seq_length, self.enc_dim, -1)
) # B*L, N, 2C+1
enc_context_copy = (
enc_context.permute(0, 4, 1, 3, 2)
.contiguous()
.view(batch_size * seq_length, nmic, -1, self.enc_dim)
) # B*L, nmic, 2C+1, N
NCC = torch.cat(
[enc_context_copy[:, i].bmm(ref_enc).unsqueeze(1) for i in range(nmic)], 1
) # B*L, nmic, 2C+1, 2C+1
ref_norm = (
ref_enc.pow(2).sum(1).unsqueeze(1) + self.eps
).sqrt() # B*L, 1, 2C+1
enc_norm = (
enc_context_copy.pow(2).sum(3).unsqueeze(3) + self.eps
).sqrt() # B*L, nmic, 2C+1, 1
NCC = NCC / (ref_norm.unsqueeze(1) * enc_norm) # B*L, nmic, 2C+1, 2C+1
NCC = torch.cat(
[NCC[:, :, i] for i in range(NCC.shape[2])], 2
) # B*L, nmic, (2C+1)^2
NCC = (
NCC.view(batch_size, seq_length, nmic, -1).permute(0, 2, 3, 1).contiguous()
) # B, nmic, (2C+1)^2, L
# context compression
norm_output = self.enc_LN(enc_output) # B*nmic, N, L
norm_context = self.signal_context(
norm_output, self.context
) # B*nmic, N, 2C+1, L
norm_context = (
norm_context.permute(0, 3, 2, 1)
.contiguous()
.view(-1, self.context * 2 + 1, self.enc_dim)
)
norm_context_BN = self.summ_BN(norm_context.view(-1, self.enc_dim)).view(
-1, self.context * 2 + 1, self.feature_dim
)
embedding = (
self.summ_RNN(norm_context_BN)[0].transpose(1, 2).contiguous()
) # B*nmic*L, N, 2C+1
embedding = norm_context_BN.transpose(1, 2).contiguous() + self.summ_LN(
embedding
) # B*nmic*L, N, 2C+1
embedding = self.summ_output(embedding.mean(2)).view(
batch_size, nmic, seq_length, self.enc_dim
) # B, nmic, L, N
embedding = embedding.transpose(2, 3).contiguous() # B, nmic, N, L
input_feature = torch.cat([embedding, NCC], 2) # B, nmic, N+(2C+1)^2, L
# pass to DPRNN-TAC
embedding = self.separator(input_feature, num_mic)[
:, 0
].contiguous() # B, nspk, N, L
# concatenate with encoder outputs and generate masks
# context decompression
norm_context = norm_context.view(
batch_size, nmic, seq_length, -1, self.enc_dim
) # B, nmic, L, 2C+1, N
norm_context = norm_context.permute(0, 1, 4, 3, 2)[
:, :1
].contiguous() # B, 1, N, 2C+1, L
embedding = torch.cat(
[embedding.unsqueeze(3)] * (self.context * 2 + 1), 3
) # B, nspk, N, 2C+1, L
norm_context = torch.cat(
[norm_context] * self.num_spk, 1
) # B, nspk, N, 2C+1, L
embedding = (
torch.cat([norm_context, embedding], 2).permute(0, 1, 4, 2, 3).contiguous()
) # B, nspk, L, 2N, 2C+1
all_filter = self.gen_BN(
embedding.view(-1, self.enc_dim * 2, self.context * 2 + 1)
) # B*nspk*L, N, 2C+1
all_filter = all_filter + self.gen_LN(
self.gen_RNN(all_filter.transpose(1, 2))[0].transpose(1, 2)
) # B*nspk*L, N, 2C+1
all_filter = self.gen_output(all_filter) # B*nspk*L, N, 2C+1
all_filter = all_filter.view(
batch_size, self.num_spk, seq_length, self.enc_dim, -1
) # B, nspk, L, N+1, 2C+1
all_filter = all_filter.permute(
0, 1, 3, 4, 2
).contiguous() # B, nspk, N, 2C+1, L
# apply to with ref mic's encoder context
output = (enc_context[:, :1] * all_filter).mean(3) # B, nspk, N, L
# decode
bf_signal = self.decoder(
output.view(batch_size * self.num_spk, self.enc_dim, -1)
) # B*nspk, 1, T
if rest > 0:
bf_signal = bf_signal[:, :, self.stride : -rest - self.stride]
bf_signal = bf_signal.view(batch_size, self.num_spk, -1) # B, nspk, T
return bf_signal
def test_model(model):
import numpy as np
x = torch.rand(3, 4, 32000) # (batch, num_mic, length)
num_mic = (
torch.from_numpy(np.array([3, 3, 2]))
.view(
-1,
)
.type(x.type())
) # ad-hoc array
none_mic = torch.zeros(1).type(x.type()) # fixed-array
y1 = model(x, num_mic.long())
y2 = model(x, none_mic.long())
print(y1.shape, y2.shape) # (batch, nspk, length)
if __name__ == "__main__":
model_iFaSNet = iFaSNet(
enc_dim=64,
feature_dim=64,
hidden_dim=128,
layer=6,
segment_size=24,
nspk=2,
win_len=16,
context_len=16,
sr=16000,
)
test_model(model_iFaSNet)
| 7,647 | 33.922374 | 87 | py |
espnet | espnet-master/espnet2/enh/layers/beamformer_th.py | """Beamformer module."""
from typing import List, Union
import torch
import torch_complex.functional as FC
import torchaudio
def prepare_beamformer_stats(
signal,
masks_speech,
mask_noise,
powers=None,
beamformer_type="mvdr",
bdelay=3,
btaps=5,
eps=1e-6,
):
"""Prepare necessary statistics for constructing the specified beamformer.
Args:
signal (torch.complex64): (..., F, C, T)
masks_speech (List[torch.Tensor]): (..., F, C, T) masks for all speech sources
mask_noise (torch.Tensor): (..., F, C, T) noise mask
powers (List[torch.Tensor]): powers for all speech sources (..., F, T)
used for wMPDR or WPD beamformers
beamformer_type (str): one of the pre-defined beamformer types
bdelay (int): delay factor, used for WPD beamformser
btaps (int): number of filter taps, used for WPD beamformser
eps (torch.Tensor): tiny constant
Returns:
beamformer_stats (dict): a dictionary containing all necessary statistics
e.g. "psd_n", "psd_speech", "psd_distortion"
Note:
* When `masks_speech` is a tensor or a single-element list, all returned
statistics are tensors;
* When `masks_speech` is a multi-element list, some returned statistics
can be a list, e.g., "psd_n" for MVDR, "psd_speech" and "psd_distortion".
"""
from espnet2.enh.layers.dnn_beamformer import BEAMFORMER_TYPES
assert beamformer_type in BEAMFORMER_TYPES, "%s is not supported yet"
if isinstance(masks_speech, (list, tuple)):
masks_speech = [m.cdouble() for m in masks_speech]
else:
masks_speech = [masks_speech.cdouble()]
num_spk = len(masks_speech)
if (
beamformer_type.startswith("wmpdr")
or beamformer_type.startswith("wpd")
or beamformer_type == "wlcmp"
or beamformer_type == "wmwf"
):
if powers is None:
power_input = signal.real**2 + signal.imag**2
# Averaging along the channel axis: (..., C, T) -> (..., T)
powers = [(power_input * m.abs()).mean(dim=-2) for m in masks_speech]
else:
assert len(powers) == num_spk, (len(powers), num_spk)
inverse_powers = [1 / torch.clamp(p, min=eps) for p in powers]
psd_transform = torchaudio.transforms.PSD(multi_mask=True, normalize=True)
psd_speeches = [
psd_transform(signal.transpose(-2, -3), m.transpose(-2, -3))
for m in masks_speech
]
if (
beamformer_type == "mvdr_souden"
or beamformer_type == "sdw_mwf"
or beamformer_type == "r1mwf"
or beamformer_type.startswith("mvdr_tfs")
or not beamformer_type.endswith("_souden")
):
# MVDR or other RTF-based formulas
if mask_noise is not None:
psd_bg = psd_transform(
signal.transpose(-2, -3), mask_noise.cdouble().transpose(-2, -3)
)
if num_spk == 1:
assert mask_noise is not None
psd_noise = psd_bg
else:
psd_noise = []
for i in range(num_spk):
if beamformer_type.startswith("mvdr_tfs"):
# NOTE: psd_noise is a list only for this beamformer
psd_noise_i = [psd for j, psd in enumerate(psd_speeches) if j != i]
else:
psd_sum = sum(psd for j, psd in enumerate(psd_speeches) if j != i)
psd_noise_i = (
psd_bg + psd_sum if mask_noise is not None else psd_sum
)
psd_noise.append(psd_noise_i)
if beamformer_type in (
"mvdr",
"mvdr_souden",
"mvdr_tfs_souden",
"sdw_mwf",
"r1mwf",
"lcmv",
"gev",
"gev_ban",
):
psd_n = psd_noise
elif beamformer_type == "mvdr_tfs":
psd_n = psd_noise
psd_noise = [sum(psd_noise_i) for psd_noise_i in psd_noise]
elif beamformer_type in ("mpdr", "mpdr_souden", "lcmp", "mwf"):
psd_n = torch.einsum("...ct,...et->...ce", signal, signal.conj())
elif beamformer_type in ("wmpdr", "wmpdr_souden", "wlcmp", "wmwf"):
psd_n = [
torch.einsum(
"...ct,...et->...ce",
signal * inv_p[..., None, :],
signal.conj(),
)
for inv_p in inverse_powers
]
elif beamformer_type in ("wpd", "wpd_souden"):
psd_n = [
get_covariances(signal, inv_p, bdelay, btaps, get_vector=False)
for inv_p in inverse_powers
]
if num_spk == 1:
psd_speeches = psd_speeches[0]
if isinstance(psd_n, (list, tuple)):
psd_n = psd_n[0]
if beamformer_type in (
"mvdr",
"mpdr",
"wmpdr",
"wpd",
"lcmp",
"wlcmp",
"lcmv",
"mvdr_tfs",
):
return {"psd_n": psd_n, "psd_speech": psd_speeches, "psd_distortion": psd_noise}
elif (
beamformer_type.endswith("_souden")
or beamformer_type.startswith("gev")
or beamformer_type == "mwf"
or beamformer_type == "wmwf"
or beamformer_type == "sdw_mwf"
or beamformer_type == "r1mwf"
):
return {"psd_n": psd_n, "psd_speech": psd_speeches}
def get_rtf(
psd_speech,
psd_noise,
mode="power",
reference_vector: Union[int, torch.Tensor] = 0,
iterations: int = 3,
diagonal_loading: bool = True,
diag_eps: float = 1e-7,
):
"""Calculate the relative transfer function (RTF).
Args:
psd_speech (torch.complex64):
speech covariance matrix (..., F, C, C)
psd_noise (torch.complex64):
noise covariance matrix (..., F, C, C)
mode (str): one of ("power", "evd")
"power": power method
"evd": eigenvalue decomposition
reference_vector (torch.Tensor or int): (..., C) or scalar
iterations (int): number of iterations in power method
Returns:
rtf (torch.complex64): (..., F, C)
"""
if mode == "power":
rtf = torchaudio.functional.rtf_power(
psd_speech,
psd_noise,
reference_vector,
n_iter=iterations,
diagonal_loading=diagonal_loading,
diag_eps=diag_eps,
)
elif mode == "evd":
rtf = torchaudio.functional.rtf_evd(psd_speech)
else:
raise ValueError("Unknown mode: %s" % mode)
return rtf
def get_mvdr_vector(
psd_s,
psd_n,
reference_vector: Union[torch.Tensor, int],
diagonal_loading: bool = True,
diag_eps: float = 1e-7,
eps: float = 1e-8,
):
"""Return the MVDR (Minimum Variance Distortionless Response) vector:
h = (Npsd^-1 @ Spsd) / (Tr(Npsd^-1 @ Spsd)) @ u
Reference:
On optimal frequency-domain multichannel linear filtering
for noise reduction; M. Souden et al., 2010;
https://ieeexplore.ieee.org/document/5089420
Args:
psd_s (torch.complex64):
speech covariance matrix (..., F, C, C)
psd_n (torch.complex64):
observation/noise covariance matrix (..., F, C, C)
reference_vector (torch.Tensor): (..., C) or an integer
diagonal_loading (bool): Whether to add a tiny term to the diagonal of psd_n
diag_eps (float):
eps (float):
Returns:
beamform_vector (torch.complex64): (..., F, C)
""" # noqa: D400
return torchaudio.functional.mvdr_weights_souden(
psd_s,
psd_n,
reference_vector,
diagonal_loading=diagonal_loading,
diag_eps=diag_eps,
eps=eps,
)
def get_mvdr_vector_with_rtf(
psd_n: torch.Tensor,
psd_speech: torch.Tensor,
psd_noise: torch.Tensor,
iterations: int = 3,
reference_vector: Union[int, torch.Tensor, None] = None,
diagonal_loading: bool = True,
diag_eps: float = 1e-7,
eps: float = 1e-8,
) -> torch.Tensor:
"""Return the MVDR (Minimum Variance Distortionless Response) vector
calculated with RTF:
h = (Npsd^-1 @ rtf) / (rtf^H @ Npsd^-1 @ rtf)
Reference:
On optimal frequency-domain multichannel linear filtering
for noise reduction; M. Souden et al., 2010;
https://ieeexplore.ieee.org/document/5089420
Args:
psd_n (torch.complex64):
observation/noise covariance matrix (..., F, C, C)
psd_speech (torch.complex64):
speech covariance matrix (..., F, C, C)
psd_noise (torch.complex64):
noise covariance matrix (..., F, C, C)
iterations (int): number of iterations in power method
reference_vector (torch.Tensor or int): (..., C) or scalar
diagonal_loading (bool): Whether to add a tiny term to the diagonal of psd_n
diag_eps (float):
eps (float):
Returns:
beamform_vector (torch.complex64): (..., F, C)
""" # noqa: H405, D205, D400
# (B, F, C)
rtf = get_rtf(
psd_speech,
psd_noise,
reference_vector=reference_vector,
iterations=iterations,
diagonal_loading=diagonal_loading,
diag_eps=diag_eps,
)
return torchaudio.functional.mvdr_weights_rtf(
rtf,
psd_n,
reference_vector,
diagonal_loading=diagonal_loading,
diag_eps=diag_eps,
eps=eps,
)
def apply_beamforming_vector(
beamform_vector: torch.Tensor, mix: torch.Tensor
) -> torch.Tensor:
# (..., C) x (..., C, T) -> (..., T)
es = torch.einsum("...c,...ct->...t", beamform_vector.conj(), mix)
return es
def get_mwf_vector(
psd_s,
psd_n,
reference_vector: Union[torch.Tensor, int],
diagonal_loading: bool = True,
diag_eps: float = 1e-7,
eps: float = 1e-8,
):
"""Return the MWF (Minimum Multi-channel Wiener Filter) vector:
h = (Npsd^-1 @ Spsd) @ u
Args:
psd_s (torch.complex64):
speech covariance matrix (..., F, C, C)
psd_n (torch.complex64):
power-normalized observation covariance matrix (..., F, C, C)
reference_vector (torch.Tensor or int): (..., C) or scalar
diagonal_loading (bool): Whether to add a tiny term to the diagonal of psd_n
diag_eps (float):
eps (float):
Returns:
beamform_vector (torch.complex64): (..., F, C)
""" # noqa: D400
if diagonal_loading:
psd_n = tik_reg(psd_n, reg=diag_eps, eps=eps)
ws = torch.linalg.solve(psd_n, psd_s)
# h: (..., F, C_1, C_2) x (..., C_2) -> (..., F, C_1)
if isinstance(reference_vector, int):
beamform_vector = ws[..., reference_vector]
else:
beamform_vector = torch.einsum(
"...fec,...c->...fe", ws, reference_vector.to(dtype=ws.dtype)
)
return beamform_vector
def get_sdw_mwf_vector(
psd_speech,
psd_noise,
reference_vector: Union[torch.Tensor, int],
denoising_weight: float = 1.0,
approx_low_rank_psd_speech: bool = False,
iterations: int = 3,
diagonal_loading: bool = True,
diag_eps: float = 1e-7,
eps: float = 1e-8,
):
"""Return the SDW-MWF (Speech Distortion Weighted Multi-channel Wiener Filter) vector
h = (Spsd + mu * Npsd)^-1 @ Spsd @ u
Reference:
[1] Spatially pre-processed speech distortion weighted multi-channel Wiener
filtering for noise reduction; A. Spriet et al, 2004
https://dl.acm.org/doi/abs/10.1016/j.sigpro.2004.07.028
[2] Rank-1 constrained multichannel Wiener filter for speech recognition in
noisy environments; Z. Wang et al, 2018
https://hal.inria.fr/hal-01634449/document
[3] Low-rank approximation based multichannel Wiener filter algorithms for
noise reduction with application in cochlear implants; R. Serizel, 2014
https://ieeexplore.ieee.org/document/6730918
Args:
psd_speech (torch.complex64):
speech covariance matrix (..., F, C, C)
psd_noise (torch.complex64):
noise covariance matrix (..., F, C, C)
reference_vector (torch.Tensor or int): (..., C) or scalar
denoising_weight (float): a trade-off parameter between noise reduction and
speech distortion.
A larger value leads to more noise reduction at the expense of more speech
distortion.
The plain MWF is obtained with `denoising_weight = 1` (by default).
approx_low_rank_psd_speech (bool): whether to replace original input psd_speech
with its low-rank approximation as in [2]
iterations (int): number of iterations in power method, only used when
`approx_low_rank_psd_speech = True`
diagonal_loading (bool): Whether to add a tiny term to the diagonal of psd_n
diag_eps (float):
eps (float):
Returns:
beamform_vector (torch.complex64): (..., F, C)
""" # noqa: H405, D205, D400, E501
if approx_low_rank_psd_speech:
if diagonal_loading:
psd_noise = tik_reg(psd_noise, reg=diag_eps, eps=eps)
# (B, F, C)
recon_vec = get_rtf(
psd_speech,
psd_noise,
mode="power",
iterations=iterations,
reference_vector=reference_vector,
diagonal_loading=False,
)
# Eq. (25) in Ref[2]
psd_speech_r1 = torch.einsum("...c,...e->...ce", recon_vec, recon_vec.conj())
sigma_speech = FC.trace(psd_speech) / (FC.trace(psd_speech_r1) + eps)
psd_speech_r1 = psd_speech_r1 * sigma_speech[..., None, None]
# c.f. Eq. (62) in Ref[3]
psd_speech = psd_speech_r1
psd_n = psd_speech + denoising_weight * psd_noise
if diagonal_loading:
psd_n = tik_reg(psd_n, reg=diag_eps, eps=eps)
ws = torch.linalg.solve(psd_n, psd_speech)
if isinstance(reference_vector, int):
beamform_vector = ws[..., reference_vector]
else:
beamform_vector = torch.einsum(
"...fec,...c->...fe", ws, reference_vector.to(dtype=ws.dtype)
)
return beamform_vector
def get_rank1_mwf_vector(
psd_speech,
psd_noise,
reference_vector: Union[torch.Tensor, int],
denoising_weight: float = 1.0,
approx_low_rank_psd_speech: bool = False,
iterations: int = 3,
diagonal_loading: bool = True,
diag_eps: float = 1e-7,
eps: float = 1e-8,
):
"""Return the R1-MWF (Rank-1 Multi-channel Wiener Filter) vector
h = (Npsd^-1 @ Spsd) / (mu + Tr(Npsd^-1 @ Spsd)) @ u
Reference:
[1] Rank-1 constrained multichannel Wiener filter for speech recognition in
noisy environments; Z. Wang et al, 2018
https://hal.inria.fr/hal-01634449/document
[2] Low-rank approximation based multichannel Wiener filter algorithms for
noise reduction with application in cochlear implants; R. Serizel, 2014
https://ieeexplore.ieee.org/document/6730918
Args:
psd_speech (torch.complex64):
speech covariance matrix (..., F, C, C)
psd_noise (torch.complex64):
noise covariance matrix (..., F, C, C)
reference_vector (torch.Tensor or int): (..., C) or scalar
denoising_weight (float): a trade-off parameter between noise reduction and
speech distortion.
A larger value leads to more noise reduction at the expense of more speech
distortion.
When `denoising_weight = 0`, it corresponds to MVDR beamformer.
approx_low_rank_psd_speech (bool): whether to replace original input psd_speech
with its low-rank approximation as in [1]
iterations (int): number of iterations in power method, only used when
`approx_low_rank_psd_speech = True`
diagonal_loading (bool): Whether to add a tiny term to the diagonal of psd_n
diag_eps (float):
eps (float):
Returns:
beamform_vector (torch.complex64): (..., F, C)
""" # noqa: H405, D205, D400
if diagonal_loading:
psd_noise = tik_reg(psd_noise, reg=diag_eps, eps=eps)
if approx_low_rank_psd_speech:
# (B, F, C)
recon_vec = get_rtf(
psd_speech,
psd_noise,
mode="power",
iterations=iterations,
reference_vector=reference_vector,
diagonal_loading=False,
)
# Eq. (25) in Ref[1]
psd_speech_r1 = torch.einsum("...c,...e->...ce", recon_vec, recon_vec.conj())
sigma_speech = FC.trace(psd_speech) / (FC.trace(psd_speech_r1) + eps)
psd_speech_r1 = psd_speech_r1 * sigma_speech[..., None, None]
# c.f. Eq. (62) in Ref[2]
psd_speech = psd_speech_r1
numerator = torch.linalg.solve(psd_noise, psd_speech)
# NOTE (wangyou): until PyTorch 1.9.0, torch.trace does not
# support bacth processing. Use FC.trace() as fallback.
# ws: (..., C, C) / (...,) -> (..., C, C)
ws = numerator / (denoising_weight + FC.trace(numerator)[..., None, None] + eps)
# h: (..., F, C_1, C_2) x (..., C_2) -> (..., F, C_1)
if isinstance(reference_vector, int):
beamform_vector = ws[..., reference_vector]
else:
beamform_vector = torch.einsum(
"...fec,...c->...fe", ws, reference_vector.to(dtype=ws.dtype)
)
return beamform_vector
def get_rtf_matrix(
psd_speeches,
psd_noises,
diagonal_loading: bool = True,
ref_channel: int = 0,
rtf_iterations: int = 3,
diag_eps: float = 1e-7,
eps: float = 1e-8,
):
"""Calculate the RTF matrix with each column the relative transfer function
of the corresponding source.
""" # noqa: H405
assert isinstance(psd_speeches, list) and isinstance(psd_noises, list)
rtf_mat = torch.stack(
[
get_rtf(
psd_speeches[spk],
psd_n,
reference_vector=ref_channel,
iterations=rtf_iterations,
diagonal_loading=diagonal_loading,
diag_eps=diag_eps,
)
for spk, psd_n in enumerate(psd_noises)
],
dim=-1,
)
# normalize at the reference channel
return rtf_mat / rtf_mat[..., ref_channel, None, :]
def get_lcmv_vector_with_rtf(
psd_n: torch.Tensor,
rtf_mat: torch.Tensor,
reference_vector: Union[int, torch.Tensor, None] = None,
diagonal_loading: bool = True,
diag_eps: float = 1e-7,
eps: float = 1e-8,
) -> torch.Tensor:
"""Return the LCMV (Linearly Constrained Minimum Variance) vector
calculated with RTF:
h = (Npsd^-1 @ rtf_mat) @ (rtf_mat^H @ Npsd^-1 @ rtf_mat)^-1 @ p
Reference:
H. L. Van Trees, “Optimum array processing: Part IV of detection, estimation,
and modulation theory,” John Wiley & Sons, 2004. (Chapter 6.7)
Args:
psd_n (torch.complex64):
observation/noise covariance matrix (..., F, C, C)
rtf_mat (torch.complex64):
RTF matrix (..., F, C, num_spk)
reference_vector (torch.Tensor or int): (..., num_spk) or scalar
diagonal_loading (bool): Whether to add a tiny term to the diagonal of psd_n
diag_eps (float):
eps (float):
Returns:
beamform_vector (torch.complex64): (..., F, C)
""" # noqa: H405, D205, D400
if diagonal_loading:
psd_n = tik_reg(psd_n, reg=diag_eps, eps=eps)
# numerator: (..., C_1, C_2) x (..., C_2, num_spk) -> (..., C_1, num_spk)
numerator = torch.linalg.solve(psd_n, rtf_mat)
denominator = torch.matmul(rtf_mat.conj().transpose(-1, -2), numerator)
if isinstance(reference_vector, int):
ws = denominator.inverse()[..., reference_vector, None]
else:
ws = torch.linalg.solve(denominator, reference_vector)
beamforming_vector = torch.matmul(numerator, ws).squeeze(-1)
return beamforming_vector
def generalized_eigenvalue_decomposition(a: torch.Tensor, b: torch.Tensor, eps=1e-6):
"""Solves the generalized eigenvalue decomposition through Cholesky decomposition.
ported from https://github.com/asteroid-team/asteroid/blob/master/asteroid/dsp/beamforming.py#L464
a @ e_vec = e_val * b @ e_vec
|
| Cholesky decomposition on `b`:
| b = L @ L^H, where `L` is a lower triangular matrix
|
| Let C = L^-1 @ a @ L^-H, it is Hermitian.
|
=> C @ y = lambda * y
=> e_vec = L^-H @ y
Reference: https://www.netlib.org/lapack/lug/node54.html
Args:
a: A complex Hermitian or real symmetric matrix whose eigenvalues and
eigenvectors will be computed. (..., C, C)
b: A complex Hermitian or real symmetric definite positive matrix. (..., C, C)
Returns:
e_val: generalized eigenvalues (ascending order)
e_vec: generalized eigenvectors
""" # noqa: H405, E501
try:
cholesky = torch.linalg.cholesky(b)
except RuntimeError:
b = tik_reg(b, reg=eps, eps=eps)
cholesky = torch.linalg.cholesky(b)
inv_cholesky = cholesky.inverse()
# Compute C matrix L⁻1 a L^-H
cmat = inv_cholesky @ a @ inv_cholesky.conj().transpose(-1, -2)
# Performing the eigenvalue decomposition
e_val, e_vec = torch.linalg.eigh(cmat)
# Collecting the eigenvectors
e_vec = torch.matmul(inv_cholesky.conj().transpose(-1, -2), e_vec)
return e_val, e_vec
def gev_phase_correction(vector):
"""Phase correction to reduce distortions due to phase inconsistencies.
ported from https://github.com/fgnt/nn-gev/blob/master/fgnt/beamforming.py#L169
Args:
vector: Beamforming vector with shape (..., F, C)
Returns:
w: Phase corrected beamforming vectors
"""
B, F, C = vector.shape
correction = torch.empty_like(vector.real)
for f in range(F):
correction[:, f, :] = torch.exp(
(vector[:, f, :] * vector[:, f - 1, :].conj())
.sum(dim=-1, keepdim=True)
.angle()
)
correction = torch.exp(-1j * correction)
return vector * correction
def blind_analytic_normalization(ws, psd_noise, eps=1e-8):
"""Blind analytic normalization (BAN) for post-filtering
Args:
ws (torch.complex64): beamformer vector (..., F, C)
psd_noise (torch.complex64): noise PSD matrix (..., F, C, C)
eps (float)
Returns:
ws_ban (torch.complex64): normalized beamformer vector (..., F)
"""
C2 = psd_noise.size(-1) ** 2
denominator = torch.einsum("...c,...ce,...e->...", ws.conj(), psd_noise, ws)
numerator = torch.einsum(
"...c,...ce,...eo,...o->...", ws.conj(), psd_noise, psd_noise, ws
)
gain = (numerator + eps).sqrt() / (denominator * C2 + eps)
return gain
def get_gev_vector(
psd_noise: torch.Tensor,
psd_speech: torch.Tensor,
mode="power",
reference_vector: Union[int, torch.Tensor] = 0,
iterations: int = 3,
diagonal_loading: bool = True,
diag_eps: float = 1e-7,
eps: float = 1e-8,
) -> torch.Tensor:
"""Return the generalized eigenvalue (GEV) beamformer vector:
psd_speech @ h = lambda * psd_noise @ h
Reference:
Blind acoustic beamforming based on generalized eigenvalue decomposition;
E. Warsitz and R. Haeb-Umbach, 2007.
Args:
psd_noise (torch.complex64):
noise covariance matrix (..., F, C, C)
psd_speech (torch.complex64):
speech covariance matrix (..., F, C, C)
mode (str): one of ("power", "evd")
"power": power method
"evd": eigenvalue decomposition
reference_vector (torch.Tensor or int): (..., C) or scalar
iterations (int): number of iterations in power method
diagonal_loading (bool): Whether to add a tiny term to the diagonal of psd_n
diag_eps (float):
eps (float):
Returns:
beamform_vector (torch.complex64): (..., F, C)
""" # noqa: H405, D205, D400
if diagonal_loading:
psd_noise = tik_reg(psd_noise, reg=diag_eps, eps=eps)
if mode == "power":
phi = torch.linalg.solve(psd_noise, psd_speech)
e_vec = (
phi[..., reference_vector, None]
if isinstance(reference_vector, int)
else torch.matmul(phi, reference_vector[..., None, :, None])
)
for _ in range(iterations - 1):
e_vec = torch.matmul(phi, e_vec)
# e_vec = e_vec / complex_norm(e_vec, dim=-1, keepdim=True)
e_vec = e_vec.squeeze(-1)
elif mode == "evd":
# e_vec = generalized_eigenvalue_decomposition(psd_speech, psd_noise)[1][...,-1]
e_vec = psd_noise.new_zeros(psd_noise.shape[:-1])
for f in range(psd_noise.shape[-3]):
try:
e_vec[..., f, :] = generalized_eigenvalue_decomposition(
psd_speech[..., f, :, :], psd_noise[..., f, :, :]
)[1][..., -1]
except RuntimeError:
# port from github.com/fgnt/nn-gev/blob/master/fgnt/beamforming.py#L106
print(
"GEV beamformer: LinAlg error for frequency {}".format(f),
flush=True,
)
C = psd_noise.size(-1)
e_vec[..., f, :] = (
psd_noise.new_ones(e_vec[..., f, :].shape)
/ FC.trace(psd_noise[..., f, :, :])
* C
)
else:
raise ValueError("Unknown mode: %s" % mode)
beamforming_vector = e_vec / torch.norm(e_vec, dim=-1, keepdim=True)
beamforming_vector = gev_phase_correction(beamforming_vector)
return beamforming_vector
def signal_framing(
signal: torch.Tensor,
frame_length: int,
frame_step: int,
bdelay: int,
do_padding: bool = False,
pad_value: int = 0,
indices: List = None,
) -> torch.Tensor:
"""Expand `signal` into several frames, with each frame of length `frame_length`.
Args:
signal : (..., T)
frame_length: length of each segment
frame_step: step for selecting frames
bdelay: delay for WPD
do_padding: whether or not to pad the input signal at the beginning
of the time dimension
pad_value: value to fill in the padding
Returns:
torch.Tensor:
if do_padding: (..., T, frame_length)
else: (..., T - bdelay - frame_length + 2, frame_length)
"""
frame_length2 = frame_length - 1
# pad to the right at the last dimension of `signal` (time dimension)
if do_padding:
# (..., T) --> (..., T + bdelay + frame_length - 2)
signal = torch.nn.functional.pad(
signal, (bdelay + frame_length2 - 1, 0), "constant", pad_value
)
do_padding = False
if indices is None:
# [[ 0, 1, ..., frame_length2 - 1, frame_length2 - 1 + bdelay ],
# [ 1, 2, ..., frame_length2, frame_length2 + bdelay ],
# [ 2, 3, ..., frame_length2 + 1, frame_length2 + 1 + bdelay ],
# ...
# [ T-bdelay-frame_length2, ..., T-1-bdelay, T-1 ]]
indices = [
[*range(i, i + frame_length2), i + frame_length2 + bdelay - 1]
for i in range(0, signal.shape[-1] - frame_length2 - bdelay + 1, frame_step)
]
if torch.is_complex(signal):
real = signal_framing(
signal.real,
frame_length,
frame_step,
bdelay,
do_padding,
pad_value,
indices,
)
imag = signal_framing(
signal.imag,
frame_length,
frame_step,
bdelay,
do_padding,
pad_value,
indices,
)
return torch.complex(real, imag)
else:
# (..., T - bdelay - frame_length + 2, frame_length)
signal = signal[..., indices]
return signal
def get_covariances(
Y: torch.Tensor,
inverse_power: torch.Tensor,
bdelay: int,
btaps: int,
get_vector: bool = False,
) -> torch.Tensor:
"""Calculates the power normalized spatio-temporal covariance
matrix of the framed signal.
Args:
Y : Complex STFT signal with shape (B, F, C, T)
inverse_power : Weighting factor with shape (B, F, T)
Returns:
Correlation matrix: (B, F, (btaps+1) * C, (btaps+1) * C)
Correlation vector: (B, F, btaps + 1, C, C)
""" # noqa: H405, D205, D400, D401
assert inverse_power.dim() == 3, inverse_power.dim()
assert inverse_power.size(0) == Y.size(0), (inverse_power.size(0), Y.size(0))
Bs, Fdim, C, T = Y.shape
# (B, F, C, T - bdelay - btaps + 1, btaps + 1)
Psi = signal_framing(Y, btaps + 1, 1, bdelay, do_padding=False)[
..., : T - bdelay - btaps + 1, :
]
# Reverse along btaps-axis:
# [tau, tau-bdelay, tau-bdelay-1, ..., tau-bdelay-frame_length+1]
Psi = torch.flip(Psi, dims=(-1,))
Psi_norm = Psi * inverse_power[..., None, bdelay + btaps - 1 :, None]
# let T' = T - bdelay - btaps + 1
# (B, F, C, T', btaps + 1) x (B, F, C, T', btaps + 1)
# -> (B, F, btaps + 1, C, btaps + 1, C)
covariance_matrix = torch.einsum("bfdtk,bfetl->bfkdle", Psi, Psi_norm.conj())
# (B, F, btaps + 1, C, btaps + 1, C)
# -> (B, F, (btaps + 1) * C, (btaps + 1) * C)
covariance_matrix = covariance_matrix.view(
Bs, Fdim, (btaps + 1) * C, (btaps + 1) * C
)
if get_vector:
# (B, F, C, T', btaps + 1) x (B, F, C, T')
# --> (B, F, btaps +1, C, C)
covariance_vector = torch.einsum(
"bfdtk,bfet->bfked", Psi_norm, Y[..., bdelay + btaps - 1 :].conj()
)
return covariance_matrix, covariance_vector
else:
return covariance_matrix
def get_WPD_filter(
Phi: torch.Tensor,
Rf: torch.Tensor,
reference_vector: torch.Tensor,
diagonal_loading: bool = True,
diag_eps: float = 1e-7,
eps: float = 1e-8,
) -> torch.Tensor:
"""Return the WPD vector.
WPD is the Weighted Power minimization Distortionless response
convolutional beamformer. As follows:
h = (Rf^-1 @ Phi_{xx}) / tr[(Rf^-1) @ Phi_{xx}] @ u
Reference:
T. Nakatani and K. Kinoshita, "A Unified Convolutional Beamformer
for Simultaneous Denoising and Dereverberation," in IEEE Signal
Processing Letters, vol. 26, no. 6, pp. 903-907, June 2019, doi:
10.1109/LSP.2019.2911179.
https://ieeexplore.ieee.org/document/8691481
Args:
Phi (torch.complex64): (B, F, (btaps+1) * C, (btaps+1) * C)
is the PSD of zero-padded speech [x^T(t,f) 0 ... 0]^T.
Rf (torch.complex64): (B, F, (btaps+1) * C, (btaps+1) * C)
is the power normalized spatio-temporal covariance matrix.
reference_vector (torch.Tensor): (B, (btaps+1) * C)
is the reference_vector.
use_torch_solver (bool): Whether to use `solve` instead of `inverse`
diagonal_loading (bool): Whether to add a tiny term to the diagonal of psd_n
diag_eps (float):
eps (float):
Returns:
filter_matrix (torch.complex64): (B, F, (btaps + 1) * C)
"""
if diagonal_loading:
Rf = tik_reg(Rf, reg=diag_eps, eps=eps)
# numerator: (..., C_1, C_2) x (..., C_2, C_3) -> (..., C_1, C_3)
numerator = torch.linalg.solve(Rf, Phi)
# NOTE (wangyou): until PyTorch 1.9.0, torch.trace does not
# support bacth processing. Use FC.trace() as fallback.
# ws: (..., C, C) / (...,) -> (..., C, C)
ws = numerator / (FC.trace(numerator)[..., None, None] + eps)
# h: (..., F, C_1, C_2) x (..., C_2) -> (..., F, C_1)
beamform_vector = torch.einsum(
"...fec,...c->...fe", ws, reference_vector.to(dtype=ws.dtype)
)
# (B, F, (btaps + 1) * C)
return beamform_vector
def get_WPD_filter_v2(
Phi: torch.Tensor,
Rf: torch.Tensor,
reference_vector: torch.Tensor,
diagonal_loading: bool = True,
diag_eps: float = 1e-7,
eps: float = 1e-8,
) -> torch.Tensor:
"""Return the WPD vector (v2).
This implementation is more efficient than `get_WPD_filter` as
it skips unnecessary computation with zeros.
Args:
Phi (torch.complex64): (B, F, C, C)
is speech PSD.
Rf (torch.complex64): (B, F, (btaps+1) * C, (btaps+1) * C)
is the power normalized spatio-temporal covariance matrix.
reference_vector (torch.Tensor): (B, C)
is the reference_vector.
diagonal_loading (bool):
Whether to add a tiny term to the diagonal of psd_n
diag_eps (float):
eps (float):
Returns:
filter_matrix (torch.complex64): (B, F, (btaps+1) * C)
"""
C = reference_vector.shape[-1]
if diagonal_loading:
Rf = tik_reg(Rf, reg=diag_eps, eps=eps)
inv_Rf = Rf.inverse()
# (B, F, (btaps+1) * C, C)
inv_Rf_pruned = inv_Rf[..., :C]
# numerator: (..., C_1, C_2) x (..., C_2, C_3) -> (..., C_1, C_3)
numerator = torch.matmul(inv_Rf_pruned, Phi)
# NOTE (wangyou): until PyTorch 1.9.0, torch.trace does not
# support bacth processing. Use FC.trace() as fallback.
# ws: (..., (btaps+1) * C, C) / (...,) -> (..., (btaps+1) * C, C)
ws = numerator / (FC.trace(numerator[..., :C, :])[..., None, None] + eps)
# h: (..., F, C_1, C_2) x (..., C_2) -> (..., F, C_1)
beamform_vector = torch.einsum(
"...fec,...c->...fe", ws, reference_vector.to(dtype=ws.dtype)
)
# (B, F, (btaps+1) * C)
return beamform_vector
def get_WPD_filter_with_rtf(
psd_observed_bar: torch.Tensor,
psd_speech: torch.Tensor,
psd_noise: torch.Tensor,
iterations: int = 3,
reference_vector: Union[int, torch.Tensor] = 0,
diagonal_loading: bool = True,
diag_eps: float = 1e-7,
eps: float = 1e-15,
) -> torch.Tensor:
"""Return the WPD vector calculated with RTF.
WPD is the Weighted Power minimization Distortionless response
convolutional beamformer. As follows:
h = (Rf^-1 @ vbar) / (vbar^H @ R^-1 @ vbar)
Reference:
T. Nakatani and K. Kinoshita, "A Unified Convolutional Beamformer
for Simultaneous Denoising and Dereverberation," in IEEE Signal
Processing Letters, vol. 26, no. 6, pp. 903-907, June 2019, doi:
10.1109/LSP.2019.2911179.
https://ieeexplore.ieee.org/document/8691481
Args:
psd_observed_bar (torch.complex64):
stacked observation covariance matrix
psd_speech (torch.complex64):
speech covariance matrix (..., F, C, C)
psd_noise (torch.complex64):
noise covariance matrix (..., F, C, C)
iterations (int): number of iterations in power method
reference_vector (torch.Tensor or int): (..., C) or scalar
diagonal_loading (bool):
Whether to add a tiny term to the diagonal of psd_n
diag_eps (float):
eps (float):
Returns:
beamform_vector (torch.complex64): (..., F, C)
"""
C = psd_noise.size(-1)
# (B, F, C)
rtf = get_rtf(
psd_speech,
psd_noise,
mode="power",
reference_vector=reference_vector,
iterations=iterations,
diagonal_loading=diagonal_loading,
diag_eps=diag_eps,
)
# (B, F, (K+1)*C)
rtf = torch.nn.functional.pad(
rtf, (0, psd_observed_bar.shape[-1] - C), "constant", 0
)
# numerator: (..., C_1, C_2) x (..., C_2) -> (..., C_1)
numerator = torch.linalg.solve(psd_observed_bar, rtf)
denominator = torch.einsum("...d,...d->...", rtf.conj(), numerator)
if isinstance(reference_vector, int):
scale = rtf[..., reference_vector, None].conj()
else:
scale = torch.einsum(
"...c,...c->...",
[rtf[:, :, :C].conj(), reference_vector[..., None, :].to(dtype=rtf.dtype)],
).unsqueeze(-1)
beamforming_vector = numerator * scale / (denominator.real.unsqueeze(-1) + eps)
return beamforming_vector
def perform_WPD_filtering(
filter_matrix: torch.Tensor,
Y: torch.Tensor,
bdelay: int,
btaps: int,
) -> torch.Tensor:
"""Perform WPD filtering.
Args:
filter_matrix: Filter matrix (B, F, (btaps + 1) * C)
Y : Complex STFT signal with shape (B, F, C, T)
Returns:
enhanced (torch.complex64): (B, F, T)
"""
# (B, F, C, T) --> (B, F, C, T, btaps + 1)
Ytilde = signal_framing(Y, btaps + 1, 1, bdelay, do_padding=True, pad_value=0)
Ytilde = torch.flip(Ytilde, dims=(-1,))
Bs, Fdim, C, T = Y.shape
# --> (B, F, T, btaps + 1, C) --> (B, F, T, (btaps + 1) * C)
Ytilde = Ytilde.permute(0, 1, 3, 4, 2).contiguous().view(Bs, Fdim, T, -1)
# (B, F, T, 1)
enhanced = torch.einsum("...tc,...c->...t", Ytilde, filter_matrix.conj())
return enhanced
def tik_reg(mat, reg: float = 1e-8, eps: float = 1e-8):
"""Perform Tikhonov regularization (only modifying real part).
Args:
mat (torch.complex64): input matrix (..., C, C)
reg (float): regularization factor
eps (float)
Returns:
ret (torch.complex64): regularized matrix (..., C, C)
"""
# Add eps
C = mat.size(-1)
eye = torch.eye(C, dtype=mat.dtype, device=mat.device)
shape = [1 for _ in range(mat.dim() - 2)] + [C, C]
eye = eye.view(*shape).repeat(*mat.shape[:-2], 1, 1)
with torch.no_grad():
epsilon = FC.trace(mat).real[..., None, None] * reg
# in case that correlation_matrix is all-zero
epsilon = epsilon + eps
mat = mat + epsilon * eye
return mat
| 37,803 | 34.166512 | 102 | py |
espnet | espnet-master/espnet2/enh/layers/tcndenseunet.py | import torch
from packaging.version import parse as V
from torch_complex.tensor import ComplexTensor
from espnet2.torch_utils.get_layer_from_string import get_layer
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
class Conv2DActNorm(torch.nn.Module):
"""Basic Conv2D + activation + instance norm building block."""
def __init__(
self,
in_channels,
out_channels,
ksz=(3, 3),
stride=(1, 2),
padding=(1, 0),
upsample=False,
activation=torch.nn.ELU,
):
super(Conv2DActNorm, self).__init__()
if upsample:
conv = torch.nn.ConvTranspose2d(
in_channels, out_channels, ksz, stride, padding
)
else:
conv = torch.nn.Conv2d(
in_channels, out_channels, ksz, stride, padding, padding_mode="reflect"
)
act = get_layer(activation)()
norm = torch.nn.GroupNorm(out_channels, out_channels, eps=1e-8)
self.layer = torch.nn.Sequential(conv, act, norm)
def forward(self, inp):
return self.layer(inp)
class FreqWiseBlock(torch.nn.Module):
"""FreqWiseBlock, see iNeuBe paper.
Block that applies pointwise 2D convolution over
STFT-like image tensor on frequency axis.
The input is assumed to be [batch, image_channels, frames, freq].
"""
def __init__(self, in_channels, num_freqs, out_channels, activation=torch.nn.ELU):
super(FreqWiseBlock, self).__init__()
self.bottleneck = Conv2DActNorm(
in_channels, out_channels, (1, 1), (1, 1), (0, 0), activation=activation
)
self.freq_proc = Conv2DActNorm(
num_freqs, num_freqs, (1, 1), (1, 1), (0, 0), activation=activation
)
def forward(self, inp):
# bsz, chans, x, y
out = self.freq_proc(self.bottleneck(inp).permute(0, 3, 2, 1)).permute(
0, 3, 2, 1
)
return out
class DenseBlock(torch.nn.Module):
"""single DenseNet block as used in iNeuBe model.
Args:
in_channels: number of input channels (image axis).
out_channels: number of output channels (image axis).
num_freqs: number of complex frequencies in the
input STFT complex image-like tensor.
The input is batch, image_channels, frames, freqs.
pre_blocks: dense block before point-wise convolution block over frequency axis.
freq_proc_blocks: number of frequency axis processing blocks.
post_blocks: dense block after point-wise convolution block over frequency axis.
ksz: kernel size used in densenet Conv2D layers.
activation: activation function to use in the whole iNeuBe model,
you can use any torch supported activation e.g. 'relu' or 'elu'.
hid_chans: number of hidden channels in densenet Conv2D.
"""
def __init__(
self,
in_channels,
out_channels,
num_freqs,
pre_blocks=2,
freq_proc_blocks=1,
post_blocks=2,
ksz=(3, 3),
activation=torch.nn.ELU,
hid_chans=32,
):
super(DenseBlock, self).__init__()
assert post_blocks >= 1
assert pre_blocks >= 1
self.pre_blocks = torch.nn.ModuleList([])
tot_layers = 0
for indx in range(pre_blocks):
c_layer = Conv2DActNorm(
in_channels + hid_chans * tot_layers,
hid_chans,
ksz,
(1, 1),
(1, 1),
activation=activation,
)
self.pre_blocks.append(c_layer)
tot_layers += 1
self.freq_proc_blocks = torch.nn.ModuleList([])
for indx in range(freq_proc_blocks):
c_layer = FreqWiseBlock(
in_channels + hid_chans * tot_layers,
num_freqs,
hid_chans,
activation=activation,
)
self.freq_proc_blocks.append(c_layer)
tot_layers += 1
self.post_blocks = torch.nn.ModuleList([])
for indx in range(post_blocks - 1):
c_layer = Conv2DActNorm(
in_channels + hid_chans * tot_layers,
hid_chans,
ksz,
(1, 1),
(1, 1),
activation=activation,
)
self.post_blocks.append(c_layer)
tot_layers += 1
last = Conv2DActNorm(
in_channels + hid_chans * tot_layers,
out_channels,
ksz,
(1, 1),
(1, 1),
activation=activation,
)
self.post_blocks.append(last)
def forward(self, input):
# batch, channels, frames, freq
out = [input]
for pre_block in self.pre_blocks:
c_out = pre_block(torch.cat(out, 1))
out.append(c_out)
for freq_block in self.freq_proc_blocks:
c_out = freq_block(torch.cat(out, 1))
out.append(c_out)
for post_block in self.post_blocks:
c_out = post_block(torch.cat(out, 1))
out.append(c_out)
return c_out
class TCNResBlock(torch.nn.Module):
"""single depth-wise separable TCN block as used in iNeuBe TCN.
Args:
in_chan: number of input feature channels.
out_chan: number of output feature channels.
ksz: kernel size.
stride: stride in depth-wise convolution.
dilation: dilation in depth-wise convolution.
activation: activation function to use in the whole iNeuBe model,
you can use any torch supported activation e.g. 'relu' or 'elu'.
"""
def __init__(
self, in_chan, out_chan, ksz=3, stride=1, dilation=1, activation=torch.nn.ELU
):
super(TCNResBlock, self).__init__()
padding = dilation
dconv = torch.nn.Conv1d(
in_chan,
in_chan,
ksz,
stride,
padding=padding,
dilation=dilation,
padding_mode="reflect",
groups=in_chan,
)
point_conv = torch.nn.Conv1d(in_chan, out_chan, 1)
self.layer = torch.nn.Sequential(
torch.nn.GroupNorm(in_chan, in_chan, eps=1e-8),
get_layer(activation)(),
dconv,
point_conv,
)
def forward(self, inp):
# [B, C, F] batch, channels, frames
return self.layer(inp) + inp
class TCNDenseUNet(torch.nn.Module):
"""TCNDenseNet block from iNeuBe
Reference:
Lu, Y. J., Cornell, S., Chang, X., Zhang, W., Li, C., Ni, Z., ... & Watanabe, S.
Towards Low-Distortion Multi-Channel Speech Enhancement:
The ESPNET-Se Submission to the L3DAS22 Challenge. ICASSP 2022 p. 9201-9205.
Args:
n_spk: number of output sources/speakers.
in_freqs: number of complex STFT frequencies.
mic_channels: number of microphones channels
(only fixed-array geometry supported).
hid_chans: number of channels in the subsampling/upsampling conv layers.
hid_chans_dense: number of channels in the densenet layers
(reduce this to reduce VRAM requirements).
ksz_dense: kernel size in the densenet layers thorough iNeuBe.
ksz_tcn: kernel size in the TCN submodule.
tcn_repeats: number of repetitions of blocks in the TCN submodule.
tcn_blocks: number of blocks in the TCN submodule.
tcn_channels: number of channels in the TCN submodule.
activation: activation function to use in the whole iNeuBe model,
you can use any torch supported activation e.g. 'relu' or 'elu'.
"""
def __init__(
self,
n_spk=1,
in_freqs=257,
mic_channels=1,
hid_chans=32,
hid_chans_dense=32,
ksz_dense=(3, 3),
ksz_tcn=3,
tcn_repeats=4,
tcn_blocks=7,
tcn_channels=384,
activation=torch.nn.ELU,
):
super(TCNDenseUNet, self).__init__()
self.n_spk = n_spk
self.in_channels = in_freqs
self.mic_channels = mic_channels
num_freqs = in_freqs - 2
first = torch.nn.Sequential(
torch.nn.Conv2d(
self.mic_channels * 2,
hid_chans,
(3, 3),
(1, 1),
(1, 0),
padding_mode="reflect",
),
DenseBlock(
hid_chans,
hid_chans,
num_freqs,
ksz=ksz_dense,
activation=activation,
hid_chans=hid_chans_dense,
),
)
freq_axis_dims = self._get_depth(num_freqs)
self.encoder = torch.nn.ModuleList([])
self.encoder.append(first)
for layer_indx in range(len(freq_axis_dims)):
downsample = Conv2DActNorm(
hid_chans, hid_chans, (3, 3), (1, 2), (1, 0), activation=activation
)
denseblocks = DenseBlock(
hid_chans,
hid_chans,
freq_axis_dims[layer_indx],
ksz=ksz_dense,
activation=activation,
hid_chans=hid_chans_dense,
)
c_layer = torch.nn.Sequential(downsample, denseblocks)
self.encoder.append(c_layer)
self.encoder.append(
Conv2DActNorm(
hid_chans, hid_chans * 2, (3, 3), (1, 2), (1, 0), activation=activation
)
)
self.encoder.append(
Conv2DActNorm(
hid_chans * 2,
hid_chans * 4,
(3, 3),
(1, 2),
(1, 0),
activation=activation,
)
)
self.encoder.append(
Conv2DActNorm(
hid_chans * 4,
tcn_channels,
(3, 3),
(1, 1),
(1, 0),
activation=activation,
)
)
self.tcn = []
for r in range(tcn_repeats):
for x in range(tcn_blocks):
self.tcn.append(
TCNResBlock(
tcn_channels,
tcn_channels,
ksz_tcn,
dilation=2**x,
activation=activation,
)
)
self.tcn = torch.nn.Sequential(*self.tcn)
self.decoder = torch.nn.ModuleList([])
self.decoder.append(
Conv2DActNorm(
tcn_channels * 2,
hid_chans * 4,
(3, 3),
(1, 1),
(1, 0),
activation=activation,
upsample=True,
)
)
self.decoder.append(
Conv2DActNorm(
hid_chans * 8,
hid_chans * 2,
(3, 3),
(1, 2),
(1, 0),
activation=activation,
upsample=True,
)
)
self.decoder.append(
Conv2DActNorm(
hid_chans * 4,
hid_chans,
(3, 3),
(1, 2),
(1, 0),
activation=activation,
upsample=True,
)
)
for dec_indx in range(len(freq_axis_dims)):
c_num_freqs = freq_axis_dims[len(freq_axis_dims) - dec_indx - 1]
denseblocks = DenseBlock(
hid_chans * 2,
hid_chans * 2,
c_num_freqs,
ksz=ksz_dense,
activation=activation,
hid_chans=hid_chans_dense,
)
upsample = Conv2DActNorm(
hid_chans * 2,
hid_chans,
(3, 3),
(1, 2),
(1, 0),
activation=activation,
upsample=True,
)
c_layer = torch.nn.Sequential(denseblocks, upsample)
self.decoder.append(c_layer)
last = torch.nn.Sequential(
DenseBlock(
hid_chans * 2,
hid_chans * 2,
self.in_channels - 2,
ksz=ksz_dense,
activation=activation,
hid_chans=hid_chans_dense,
),
torch.nn.ConvTranspose2d(
hid_chans * 2, 2 * self.n_spk, (3, 3), (1, 1), (1, 0)
),
)
self.decoder.append(last)
def _get_depth(self, num_freq):
n_layers = 0
freqs = []
while num_freq > 15:
num_freq = int(num_freq / 2)
freqs.append(num_freq)
n_layers += 1
return freqs
def forward(self, tf_rep):
"""forward.
Args:
tf_rep (torch.Tensor): 4D tensor (multi-channel complex STFT of mixture)
of shape [B, T, C, F] batch, frames, microphones, frequencies.
Returns:
out (torch.Tensor): complex 3D tensor monaural STFT of the targets
shape is [B, T, F] batch, frames, frequencies.
"""
# B, T, C, F
tf_rep = tf_rep.permute(0, 2, 3, 1)
bsz, mics, _, frames = tf_rep.shape
assert mics == self.mic_channels
inp_feats = torch.cat((tf_rep.real, tf_rep.imag), 1)
inp_feats = inp_feats.transpose(-1, -2)
inp_feats = inp_feats.reshape(
bsz, self.mic_channels * 2, frames, self.in_channels
)
enc_out = []
buffer = inp_feats
for enc_layer in self.encoder:
buffer = enc_layer(buffer)
enc_out.append(buffer)
assert buffer.shape[-1] == 1
tcn_out = self.tcn(buffer.squeeze(-1)).unsqueeze(-1)
buffer = tcn_out
for indx, dec_layer in enumerate(self.decoder):
c_input = torch.cat((buffer, enc_out[-(indx + 1)]), 1)
buffer = dec_layer(c_input)
buffer = buffer.reshape(bsz, 2, self.n_spk, -1, self.in_channels)
if is_torch_1_9_plus:
out = torch.complex(buffer[:, 0], buffer[:, 1])
else:
out = ComplexTensor(buffer[:, 0], buffer[:, 1])
# bsz, complex_chans, frames or bsz, spk, complex_chans, frames
return out # bsz, spk, time, freq -> bsz, time, spk, freq
| 14,465 | 30.311688 | 88 | py |
espnet | espnet-master/espnet2/enh/layers/fasnet.py | # The implementation of FaSNet in
# Y. Luo, et al. “FaSNet: Low-Latency Adaptive Beamforming
# for Multi-Microphone Audio Processing”
# The implementation is based on:
# https://github.com/yluo42/TAC
# Licensed under CC BY-NC-SA 3.0 US.
#
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from espnet2.enh.layers import dprnn
# DPRNN for beamforming filter estimation
class BF_module(nn.Module):
def __init__(
self,
input_dim,
feature_dim,
hidden_dim,
output_dim,
num_spk=2,
layer=4,
segment_size=100,
bidirectional=True,
dropout=0.0,
fasnet_type="ifasnet",
):
super().__init__()
assert fasnet_type in [
"fasnet",
"ifasnet",
], "fasnet_type should be fasnet or ifasnet"
self.input_dim = input_dim
self.feature_dim = feature_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.layer = layer
self.segment_size = segment_size
self.num_spk = num_spk
self.dprnn_model = dprnn.DPRNN_TAC(
"lstm",
self.feature_dim,
self.hidden_dim,
self.feature_dim * self.num_spk,
num_layers=layer,
bidirectional=bidirectional,
dropout=dropout,
)
self.eps = 1e-8
self.fasnet_type = fasnet_type
if fasnet_type == "ifasnet":
# output layer in ifasnet
self.output = nn.Conv1d(self.feature_dim, self.output_dim, 1)
elif fasnet_type == "fasnet":
# gated output layer in ifasnet
self.output = nn.Sequential(
nn.Conv1d(self.feature_dim, self.output_dim, 1), nn.Tanh()
)
self.output_gate = nn.Sequential(
nn.Conv1d(self.feature_dim, self.output_dim, 1), nn.Sigmoid()
)
self.num_spk = num_spk
self.BN = nn.Conv1d(self.input_dim, self.feature_dim, 1, bias=False)
def forward(self, input, num_mic):
# input: (B, ch, N, T)
batch_size, ch, N, seq_length = input.shape
input = input.view(batch_size * ch, N, seq_length) # B*ch, N, T
enc_feature = self.BN(input)
# split the encoder output into overlapped, longer segments
enc_segments, enc_rest = dprnn.split_feature(
enc_feature, self.segment_size
) # B*ch, N, L, K
enc_segments = enc_segments.view(
batch_size, ch, -1, enc_segments.shape[2], enc_segments.shape[3]
) # B, ch, N, L, K
output = self.dprnn_model(enc_segments, num_mic).view(
batch_size * ch * self.num_spk,
self.feature_dim,
self.segment_size,
-1,
) # B*ch*nspk, N, L, K
# overlap-and-add of the outputs
output = dprnn.merge_feature(output, enc_rest) # B*ch*nspk, N, T
if self.fasnet_type == "fasnet":
# gated output layer for filter generation
bf_filter = self.output(output) * self.output_gate(
output
) # B*ch*nspk, K, T
bf_filter = (
bf_filter.transpose(1, 2)
.contiguous()
.view(batch_size, ch, self.num_spk, -1, self.output_dim)
) # B, ch, nspk, L, N
elif self.fasnet_type == "ifasnet":
# output layer
bf_filter = self.output(output) # B*ch*nspk, K, T
bf_filter = bf_filter.view(
batch_size, ch, self.num_spk, self.output_dim, -1
) # B, ch, nspk, K, L
return bf_filter
# base module for FaSNet
class FaSNet_base(nn.Module):
def __init__(
self,
enc_dim,
feature_dim,
hidden_dim,
layer,
segment_size=24,
nspk=2,
win_len=16,
context_len=16,
dropout=0.0,
sr=16000,
):
super(FaSNet_base, self).__init__()
# parameters
self.win_len = win_len
self.window = max(int(sr * win_len / 1000), 2)
self.stride = self.window // 2
self.sr = sr
self.context_len = context_len
self.dropout = dropout
self.enc_dim = enc_dim
self.feature_dim = feature_dim
self.hidden_dim = hidden_dim
self.segment_size = segment_size
self.layer = layer
self.num_spk = nspk
self.eps = 1e-8
def pad_input(self, input, window):
"""Zero-padding input according to window/stride size."""
batch_size, nmic, nsample = input.shape
stride = self.stride
# pad the signals at the end for matching the window/stride size
rest = window - (stride + nsample % window) % window
if rest > 0:
pad = torch.zeros(batch_size, nmic, rest).type(input.type())
input = torch.cat([input, pad], 2)
pad_aux = torch.zeros(batch_size, nmic, stride).type(input.type())
input = torch.cat([pad_aux, input, pad_aux], 2)
return input, rest
def seg_signal_context(self, x, window, context):
"""Segmenting the signal into chunks with specific context.
input:
x: size (B, ch, T)
window: int
context: int
"""
# pad input accordingly
# first pad according to window size
input, rest = self.pad_input(x, window)
batch_size, nmic, nsample = input.shape
stride = window // 2
# pad another context size
pad_context = torch.zeros(batch_size, nmic, context).type(input.type())
input = torch.cat([pad_context, input, pad_context], 2) # B, ch, L
# calculate index for each chunk
nchunk = 2 * nsample // window - 1
begin_idx = np.arange(nchunk) * stride
begin_idx = (
torch.from_numpy(begin_idx).type(input.type()).long().view(1, 1, -1)
) # 1, 1, nchunk
begin_idx = begin_idx.expand(batch_size, nmic, nchunk) # B, ch, nchunk
# select entries from index
chunks = [
torch.gather(input, 2, begin_idx + i).unsqueeze(3)
for i in range(2 * context + window)
] # B, ch, nchunk, 1
chunks = torch.cat(chunks, 3) # B, ch, nchunk, chunk_size
# center frame
center_frame = chunks[:, :, :, context : context + window]
return center_frame, chunks, rest
def signal_context(self, x, context):
"""signal context function
Segmenting the signal into chunks with specific context.
input:
x: size (B, dim, nframe)
context: int
"""
batch_size, dim, nframe = x.shape
zero_pad = torch.zeros(batch_size, dim, context).type(x.type())
pad_past = []
pad_future = []
for i in range(context):
pad_past.append(
torch.cat([zero_pad[:, :, i:], x[:, :, : -context + i]], 2).unsqueeze(2)
)
pad_future.append(
torch.cat([x[:, :, i + 1 :], zero_pad[:, :, : i + 1]], 2).unsqueeze(2)
)
pad_past = torch.cat(pad_past, 2) # B, D, C, L
pad_future = torch.cat(pad_future, 2) # B, D, C, L
all_context = torch.cat(
[pad_past, x.unsqueeze(2), pad_future], 2
) # B, D, 2*C+1, L
return all_context
def seq_cos_sim(self, ref, target):
"""Cosine similarity between some reference mics and some target mics
ref: shape (nmic1, L, seg1)
target: shape (nmic2, L, seg2)
"""
assert ref.size(1) == target.size(1), "Inputs should have same length."
assert ref.size(2) >= target.size(
2
), "Reference input should be no smaller than the target input."
seq_length = ref.size(1)
larger_ch = ref.size(0)
if target.size(0) > ref.size(0):
ref = ref.expand(
target.size(0), ref.size(1), ref.size(2)
).contiguous() # nmic2, L, seg1
larger_ch = target.size(0)
elif target.size(0) < ref.size(0):
target = target.expand(
ref.size(0), target.size(1), target.size(2)
).contiguous() # nmic1, L, seg2
# L2 norms
ref_norm = F.conv1d(
ref.view(1, -1, ref.size(2)).pow(2),
torch.ones(ref.size(0) * ref.size(1), 1, target.size(2)).type(ref.type()),
groups=larger_ch * seq_length,
) # 1, larger_ch*L, seg1-seg2+1
ref_norm = ref_norm.sqrt() + self.eps
target_norm = (
target.norm(2, dim=2).view(1, -1, 1) + self.eps
) # 1, larger_ch*L, 1
# cosine similarity
cos_sim = F.conv1d(
ref.view(1, -1, ref.size(2)),
target.view(-1, 1, target.size(2)),
groups=larger_ch * seq_length,
) # 1, larger_ch*L, seg1-seg2+1
cos_sim = cos_sim / (ref_norm * target_norm)
return cos_sim.view(larger_ch, seq_length, -1)
def forward(self, input, num_mic):
"""abstract forward function
input: shape (batch, max_num_ch, T)
num_mic: shape (batch, ), the number of channels for each input.
Zero for fixed geometry configuration.
"""
pass
# single-stage FaSNet + TAC
class FaSNet_TAC(FaSNet_base):
def __init__(self, *args, **kwargs):
super(FaSNet_TAC, self).__init__(*args, **kwargs)
self.context = int(self.sr * self.context_len / 1000)
self.filter_dim = self.context * 2 + 1
# DPRNN + TAC for estimation
self.all_BF = BF_module(
self.filter_dim + self.enc_dim,
self.feature_dim,
self.hidden_dim,
self.filter_dim,
self.num_spk,
self.layer,
self.segment_size,
dropout=self.dropout,
fasnet_type="fasnet",
)
# waveform encoder
self.encoder = nn.Conv1d(
1, self.enc_dim, self.context * 2 + self.window, bias=False
)
self.enc_LN = nn.GroupNorm(1, self.enc_dim, eps=1e-8)
def forward(self, input, num_mic):
batch_size = input.size(0)
nmic = input.size(1)
# split input into chunks
all_seg, all_mic_context, rest = self.seg_signal_context(
input, self.window, self.context
) # B, nmic, L, win/chunk
seq_length = all_seg.size(2)
# embeddings for all channels
enc_output = (
self.encoder(all_mic_context.view(-1, 1, self.context * 2 + self.window))
.view(batch_size * nmic, seq_length, self.enc_dim)
.transpose(1, 2)
.contiguous()
) # B*nmic, N, L
enc_output = self.enc_LN(enc_output).view(
batch_size, nmic, self.enc_dim, seq_length
) # B, nmic, N, L
# calculate the cosine similarities for ref channel's center
# frame with all channels' context
ref_seg = all_seg[:, 0].contiguous().view(1, -1, self.window) # 1, B*L, win
all_context = (
all_mic_context.transpose(0, 1)
.contiguous()
.view(nmic, -1, self.context * 2 + self.window)
) # 1, B*L, 3*win
all_cos_sim = self.seq_cos_sim(all_context, ref_seg) # nmic, B*L, 2*win+1
all_cos_sim = (
all_cos_sim.view(nmic, batch_size, seq_length, self.filter_dim)
.permute(1, 0, 3, 2)
.contiguous()
) # B, nmic, 2*win+1, L
input_feature = torch.cat([enc_output, all_cos_sim], 2) # B, nmic, N+2*win+1, L
# pass to DPRNN
all_filter = self.all_BF(input_feature, num_mic) # B, ch, nspk, L, 2*win+1
# convolve with all mic's context
mic_context = torch.cat(
[
all_mic_context.view(
batch_size * nmic, 1, seq_length, self.context * 2 + self.window
)
]
* self.num_spk,
1,
) # B*nmic, nspk, L, 3*win
all_bf_output = F.conv1d(
mic_context.view(1, -1, self.context * 2 + self.window),
all_filter.view(-1, 1, self.filter_dim),
groups=batch_size * nmic * self.num_spk * seq_length,
) # 1, B*nmic*nspk*L, win
all_bf_output = all_bf_output.view(
batch_size, nmic, self.num_spk, seq_length, self.window
) # B, nmic, nspk, L, win
# reshape to utterance
bf_signal = all_bf_output.view(
batch_size * nmic * self.num_spk, -1, self.window * 2
)
bf_signal1 = (
bf_signal[:, :, : self.window]
.contiguous()
.view(batch_size * nmic * self.num_spk, 1, -1)[:, :, self.stride :]
)
bf_signal2 = (
bf_signal[:, :, self.window :]
.contiguous()
.view(batch_size * nmic * self.num_spk, 1, -1)[:, :, : -self.stride]
)
bf_signal = bf_signal1 + bf_signal2 # B*nmic*nspk, 1, T
if rest > 0:
bf_signal = bf_signal[:, :, :-rest]
bf_signal = bf_signal.view(
batch_size, nmic, self.num_spk, -1
) # B, nmic, nspk, T
# consider only the valid channels
if num_mic.max() == 0:
bf_signal = bf_signal.mean(1) # B, nspk, T
else:
bf_signal = [
bf_signal[b, : num_mic[b]].mean(0).unsqueeze(0)
for b in range(batch_size)
] # nspk, T
bf_signal = torch.cat(bf_signal, 0) # B, nspk, T
return bf_signal
def test_model(model):
x = torch.rand(2, 4, 32000) # (batch, num_mic, length)
num_mic = (
torch.from_numpy(np.array([3, 2]))
.view(
-1,
)
.type(x.type())
) # ad-hoc array
none_mic = torch.zeros(1).type(x.type()) # fixed-array
y1 = model(x, num_mic.long())
y2 = model(x, none_mic.long())
print(y1.shape, y2.shape) # (batch, nspk, length)
if __name__ == "__main__":
model_TAC = FaSNet_TAC(
enc_dim=64,
feature_dim=64,
hidden_dim=128,
layer=4,
segment_size=50,
nspk=2,
win_len=4,
context_len=16,
sr=16000,
)
test_model(model_TAC)
| 14,366 | 31.213004 | 88 | py |
espnet | espnet-master/espnet2/enh/layers/adapt_layers.py | # noqa E501: Ported from https://github.com/BUTSpeechFIT/speakerbeam/blob/main/src/models/adapt_layers.py
# Copyright (c) 2021 Brno University of Technology
# Copyright (c) 2021 Nippon Telegraph and Telephone corporation (NTT).
# All rights reserved
# By Katerina Zmolikova, August 2021.
from functools import partial
import torch
import torch.nn as nn
def make_adapt_layer(type, indim, enrolldim, ninputs=1):
adapt_class = adaptation_layer_types.get(type)
return adapt_class(indim, enrolldim, ninputs)
def into_tuple(x):
"""Transforms tensor/list/tuple into tuple."""
if isinstance(x, list):
return tuple(x)
elif isinstance(x, torch.Tensor):
return (x,)
elif isinstance(x, tuple):
return x
else:
raise ValueError("x should be tensor, list of tuple")
def into_orig_type(x, orig_type):
"""Inverts into_tuple function."""
if orig_type is tuple:
return x
if orig_type is list:
return list(x)
if orig_type is torch.Tensor:
return x[0]
else:
assert False
class ConcatAdaptLayer(nn.Module):
def __init__(self, indim, enrolldim, ninputs=1):
super().__init__()
self.ninputs = ninputs
self.transform = nn.ModuleList(
[nn.Linear(indim + enrolldim, indim) for _ in range(ninputs)]
)
def forward(self, main, enroll):
"""ConcatAdaptLayer forward.
Args:
main: tensor or tuple or list
activations in the main neural network, which are adapted
tuple/list may be useful when we want to apply the adaptation
to both normal and skip connection at once
enroll: tensor or tuple or list
embedding extracted from enrollment
tuple/list may be useful when we want to apply the adaptation
to both normal and skip connection at once
"""
assert type(main) == type(enroll)
orig_type = type(main)
main, enroll = into_tuple(main), into_tuple(enroll)
assert len(main) == len(enroll) == self.ninputs
out = []
for transform, main0, enroll0 in zip(self.transform, main, enroll):
out.append(
transform(
torch.cat(
(main0, enroll0[:, :, None].expand(main0.shape)), dim=1
).permute(0, 2, 1)
).permute(0, 2, 1)
)
return into_orig_type(tuple(out), orig_type)
class MulAddAdaptLayer(nn.Module):
def __init__(self, indim, enrolldim, ninputs=1, do_addition=True):
super().__init__()
self.ninputs = ninputs
self.do_addition = do_addition
if do_addition:
assert enrolldim == 2 * indim, (enrolldim, indim)
else:
assert enrolldim == indim, (enrolldim, indim)
def forward(self, main, enroll):
"""MulAddAdaptLayer Forward.
Args:
main: tensor or tuple or list
activations in the main neural network, which are adapted
tuple/list may be useful when we want to apply the adaptation
to both normal and skip connection at once
enroll: tensor or tuple or list
embedding extracted from enrollment
tuple/list may be useful when we want to apply the adaptation
to both normal and skip connection at once
"""
assert type(main) == type(enroll)
orig_type = type(main)
main, enroll = into_tuple(main), into_tuple(enroll)
assert len(main) == len(enroll) == self.ninputs, (
len(main),
len(enroll),
self.ninputs,
)
out = []
for main0, enroll0 in zip(main, enroll):
if self.do_addition:
enroll0_mul, enroll0_add = torch.chunk(enroll0, 2, dim=1)
out.append(enroll0_mul[:, :, None] * main0 + enroll0_add[:, :, None])
else:
out.append(enroll0[:, :, None] * main0)
return into_orig_type(tuple(out), orig_type)
# aliases for possible adaptation layer types
adaptation_layer_types = {
"concat": ConcatAdaptLayer,
"muladd": MulAddAdaptLayer,
"mul": partial(MulAddAdaptLayer, do_addition=False),
}
| 4,373 | 32.906977 | 105 | py |
espnet | espnet-master/espnet2/enh/layers/conv_utils.py | # noqa: E501 ported from https://discuss.pytorch.org/t/utility-function-for-calculating-the-shape-of-a-conv-output/11173/7
import math
def num2tuple(num):
return num if isinstance(num, tuple) else (num, num)
def conv2d_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):
h_w, kernel_size, stride, pad, dilation = (
num2tuple(h_w),
num2tuple(kernel_size),
num2tuple(stride),
num2tuple(pad),
num2tuple(dilation),
)
pad = num2tuple(pad[0]), num2tuple(pad[1])
h = math.floor(
(h_w[0] + sum(pad[0]) - dilation[0] * (kernel_size[0] - 1) - 1) / stride[0] + 1
)
w = math.floor(
(h_w[1] + sum(pad[1]) - dilation[1] * (kernel_size[1] - 1) - 1) / stride[1] + 1
)
return h, w
def convtransp2d_output_shape(
h_w, kernel_size=1, stride=1, pad=0, dilation=1, out_pad=0
):
h_w, kernel_size, stride, pad, dilation, out_pad = (
num2tuple(h_w),
num2tuple(kernel_size),
num2tuple(stride),
num2tuple(pad),
num2tuple(dilation),
num2tuple(out_pad),
)
pad = num2tuple(pad[0]), num2tuple(pad[1])
h = (
(h_w[0] - 1) * stride[0]
- sum(pad[0])
+ dilation[0] * (kernel_size[0] - 1)
+ out_pad[0]
+ 1
)
w = (
(h_w[1] - 1) * stride[1]
- sum(pad[1])
+ dilation[1] * (kernel_size[1] - 1)
+ out_pad[1]
+ 1
)
return h, w
| 1,462 | 24.224138 | 122 | py |
espnet | espnet-master/espnet2/enh/layers/complexnn.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class NavieComplexLSTM(nn.Module):
def __init__(
self,
input_size,
hidden_size,
projection_dim=None,
bidirectional=False,
batch_first=False,
):
super(NavieComplexLSTM, self).__init__()
self.bidirectional = bidirectional
self.input_dim = input_size // 2
self.rnn_units = hidden_size // 2
self.real_lstm = nn.LSTM(
self.input_dim,
self.rnn_units,
num_layers=1,
bidirectional=bidirectional,
batch_first=False,
)
self.imag_lstm = nn.LSTM(
self.input_dim,
self.rnn_units,
num_layers=1,
bidirectional=bidirectional,
batch_first=False,
)
if bidirectional:
bidirectional = 2
else:
bidirectional = 1
if projection_dim is not None:
self.projection_dim = projection_dim // 2
self.r_trans = nn.Linear(
self.rnn_units * bidirectional, self.projection_dim
)
self.i_trans = nn.Linear(
self.rnn_units * bidirectional, self.projection_dim
)
else:
self.projection_dim = None
def forward(self, inputs):
if isinstance(inputs, list):
real, imag = inputs
elif isinstance(inputs, torch.Tensor):
real, imag = torch.chunk(inputs, -1)
r2r_out = self.real_lstm(real)[0]
r2i_out = self.imag_lstm(real)[0]
i2r_out = self.real_lstm(imag)[0]
i2i_out = self.imag_lstm(imag)[0]
real_out = r2r_out - i2i_out
imag_out = i2r_out + r2i_out
if self.projection_dim is not None:
real_out = self.r_trans(real_out)
imag_out = self.i_trans(imag_out)
return [real_out, imag_out]
def flatten_parameters(self):
self.imag_lstm.flatten_parameters()
self.real_lstm.flatten_parameters()
def complex_cat(inputs, axis):
real, imag = [], []
for idx, data in enumerate(inputs):
r, i = torch.chunk(data, 2, axis)
real.append(r)
imag.append(i)
real = torch.cat(real, axis)
imag = torch.cat(imag, axis)
outputs = torch.cat([real, imag], axis)
return outputs
class ComplexConv2d(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size=(1, 1),
stride=(1, 1),
padding=(0, 0),
dilation=1,
groups=1,
causal=True,
complex_axis=1,
):
"""ComplexConv2d.
in_channels: real+imag
out_channels: real+imag
kernel_size : input [B,C,D,T] kernel size in [D,T]
padding : input [B,C,D,T] padding in [D,T]
causal: if causal, will padding time dimension's left side,
otherwise both
"""
super(ComplexConv2d, self).__init__()
self.in_channels = in_channels // 2
self.out_channels = out_channels // 2
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.causal = causal
self.groups = groups
self.dilation = dilation
self.complex_axis = complex_axis
self.real_conv = nn.Conv2d(
self.in_channels,
self.out_channels,
kernel_size,
self.stride,
padding=[self.padding[0], 0],
dilation=self.dilation,
groups=self.groups,
)
self.imag_conv = nn.Conv2d(
self.in_channels,
self.out_channels,
kernel_size,
self.stride,
padding=[self.padding[0], 0],
dilation=self.dilation,
groups=self.groups,
)
nn.init.normal_(self.real_conv.weight.data, std=0.05)
nn.init.normal_(self.imag_conv.weight.data, std=0.05)
nn.init.constant_(self.real_conv.bias, 0.0)
nn.init.constant_(self.imag_conv.bias, 0.0)
def forward(self, inputs):
if self.padding[1] != 0 and self.causal:
inputs = F.pad(inputs, [self.padding[1], 0, 0, 0])
else:
inputs = F.pad(inputs, [self.padding[1], self.padding[1], 0, 0])
if self.complex_axis == 0:
real = self.real_conv(inputs)
imag = self.imag_conv(inputs)
real2real, imag2real = torch.chunk(real, 2, self.complex_axis)
real2imag, imag2imag = torch.chunk(imag, 2, self.complex_axis)
else:
if isinstance(inputs, torch.Tensor):
real, imag = torch.chunk(inputs, 2, self.complex_axis)
real2real = self.real_conv(
real,
)
imag2imag = self.imag_conv(
imag,
)
real2imag = self.imag_conv(real)
imag2real = self.real_conv(imag)
real = real2real - imag2imag
imag = real2imag + imag2real
out = torch.cat([real, imag], self.complex_axis)
return out
class ComplexConvTranspose2d(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size=(1, 1),
stride=(1, 1),
padding=(0, 0),
output_padding=(0, 0),
causal=False,
complex_axis=1,
groups=1,
):
"""ComplexConvTranspose2d.
in_channels: real+imag
out_channels: real+imag
"""
super(ComplexConvTranspose2d, self).__init__()
self.in_channels = in_channels // 2
self.out_channels = out_channels // 2
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.output_padding = output_padding
self.groups = groups
self.real_conv = nn.ConvTranspose2d(
self.in_channels,
self.out_channels,
kernel_size,
self.stride,
padding=self.padding,
output_padding=output_padding,
groups=self.groups,
)
self.imag_conv = nn.ConvTranspose2d(
self.in_channels,
self.out_channels,
kernel_size,
self.stride,
padding=self.padding,
output_padding=output_padding,
groups=self.groups,
)
self.complex_axis = complex_axis
nn.init.normal_(self.real_conv.weight, std=0.05)
nn.init.normal_(self.imag_conv.weight, std=0.05)
nn.init.constant_(self.real_conv.bias, 0.0)
nn.init.constant_(self.imag_conv.bias, 0.0)
def forward(self, inputs):
if isinstance(inputs, torch.Tensor):
real, imag = torch.chunk(inputs, 2, self.complex_axis)
elif isinstance(inputs, tuple) or isinstance(inputs, list):
real = inputs[0]
imag = inputs[1]
if self.complex_axis == 0:
real = self.real_conv(inputs)
imag = self.imag_conv(inputs)
real2real, imag2real = torch.chunk(real, 2, self.complex_axis)
real2imag, imag2imag = torch.chunk(imag, 2, self.complex_axis)
else:
if isinstance(inputs, torch.Tensor):
real, imag = torch.chunk(inputs, 2, self.complex_axis)
real2real = self.real_conv(
real,
)
imag2imag = self.imag_conv(
imag,
)
real2imag = self.imag_conv(real)
imag2real = self.real_conv(imag)
real = real2real - imag2imag
imag = real2imag + imag2real
out = torch.cat([real, imag], self.complex_axis)
return out
class ComplexBatchNorm(torch.nn.Module):
def __init__(
self,
num_features,
eps=1e-5,
momentum=0.1,
affine=True,
track_running_stats=True,
complex_axis=1,
):
super(ComplexBatchNorm, self).__init__()
self.num_features = num_features // 2
self.eps = eps
self.momentum = momentum
self.affine = affine
self.track_running_stats = track_running_stats
self.complex_axis = complex_axis
if self.affine:
self.Wrr = torch.nn.Parameter(torch.Tensor(self.num_features))
self.Wri = torch.nn.Parameter(torch.Tensor(self.num_features))
self.Wii = torch.nn.Parameter(torch.Tensor(self.num_features))
self.Br = torch.nn.Parameter(torch.Tensor(self.num_features))
self.Bi = torch.nn.Parameter(torch.Tensor(self.num_features))
else:
self.register_parameter("Wrr", None)
self.register_parameter("Wri", None)
self.register_parameter("Wii", None)
self.register_parameter("Br", None)
self.register_parameter("Bi", None)
if self.track_running_stats:
self.register_buffer("RMr", torch.zeros(self.num_features))
self.register_buffer("RMi", torch.zeros(self.num_features))
self.register_buffer("RVrr", torch.ones(self.num_features))
self.register_buffer("RVri", torch.zeros(self.num_features))
self.register_buffer("RVii", torch.ones(self.num_features))
self.register_buffer(
"num_batches_tracked", torch.tensor(0, dtype=torch.long)
)
else:
self.register_parameter("RMr", None)
self.register_parameter("RMi", None)
self.register_parameter("RVrr", None)
self.register_parameter("RVri", None)
self.register_parameter("RVii", None)
self.register_parameter("num_batches_tracked", None)
self.reset_parameters()
def reset_running_stats(self):
if self.track_running_stats:
self.RMr.zero_()
self.RMi.zero_()
self.RVrr.fill_(1)
self.RVri.zero_()
self.RVii.fill_(1)
self.num_batches_tracked.zero_()
def reset_parameters(self):
self.reset_running_stats()
if self.affine:
self.Br.data.zero_()
self.Bi.data.zero_()
self.Wrr.data.fill_(1)
self.Wri.data.uniform_(-0.9, +0.9) # W will be positive-definite
self.Wii.data.fill_(1)
def _check_input_dim(self, xr, xi):
assert xr.shape == xi.shape
assert xr.size(1) == self.num_features
def forward(self, inputs):
xr, xi = torch.chunk(inputs, 2, axis=self.complex_axis)
exponential_average_factor = 0.0
if self.training and self.track_running_stats:
self.num_batches_tracked += 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / self.num_batches_tracked.item()
else: # use exponential moving average
exponential_average_factor = self.momentum
# NOTE: The precise meaning of the "training flag" is:
# True: Normalize using batch statistics, update running statistics
# if they are being collected.
# False: Normalize using running statistics, ignore batch statistics.
training = self.training or not self.track_running_stats
redux = [i for i in reversed(range(xr.dim())) if i != 1]
vdim = [1] * xr.dim()
vdim[1] = xr.size(1)
# Mean M Computation and Centering
# Includes running mean update if training and running.
if training:
Mr, Mi = xr, xi
for d in redux:
Mr = Mr.mean(d, keepdim=True)
Mi = Mi.mean(d, keepdim=True)
if self.track_running_stats:
self.RMr.lerp_(Mr.squeeze(), exponential_average_factor)
self.RMi.lerp_(Mi.squeeze(), exponential_average_factor)
else:
Mr = self.RMr.view(vdim)
Mi = self.RMi.view(vdim)
xr, xi = xr - Mr, xi - Mi
# Variance Matrix V Computation
# Includes epsilon numerical stabilizer/Tikhonov regularizer.
# Includes running variance update if training and running.
if training:
Vrr = xr * xr
Vri = xr * xi
Vii = xi * xi
for d in redux:
Vrr = Vrr.mean(d, keepdim=True)
Vri = Vri.mean(d, keepdim=True)
Vii = Vii.mean(d, keepdim=True)
if self.track_running_stats:
self.RVrr.lerp_(Vrr.squeeze(), exponential_average_factor)
self.RVri.lerp_(Vri.squeeze(), exponential_average_factor)
self.RVii.lerp_(Vii.squeeze(), exponential_average_factor)
else:
Vrr = self.RVrr.view(vdim)
Vri = self.RVri.view(vdim)
Vii = self.RVii.view(vdim)
Vrr = Vrr + self.eps
Vri = Vri
Vii = Vii + self.eps
# Matrix Inverse Square Root U = V^-0.5
# sqrt of a 2x2 matrix,
# - https://en.wikipedia.org/wiki/Square_root_of_a_2_by_2_matrix
tau = Vrr + Vii
delta = torch.addcmul(Vrr * Vii, -1, Vri, Vri)
s = delta.sqrt()
t = (tau + 2 * s).sqrt()
# matrix inverse, http://mathworld.wolfram.com/MatrixInverse.html
rst = (s * t).reciprocal()
Urr = (s + Vii) * rst
Uii = (s + Vrr) * rst
Uri = (-Vri) * rst
# Optionally left-multiply U by affine weights W to produce combined
# weights Z, left-multiply the inputs by Z, then optionally bias them.
#
# y = Zx + B
# y = WUx + B
# y = [Wrr Wri][Urr Uri] [xr] + [Br]
# [Wir Wii][Uir Uii] [xi] [Bi]
if self.affine:
Wrr, Wri, Wii = (
self.Wrr.view(vdim),
self.Wri.view(vdim),
self.Wii.view(vdim),
)
Zrr = (Wrr * Urr) + (Wri * Uri)
Zri = (Wrr * Uri) + (Wri * Uii)
Zir = (Wri * Urr) + (Wii * Uri)
Zii = (Wri * Uri) + (Wii * Uii)
else:
Zrr, Zri, Zir, Zii = Urr, Uri, Uri, Uii
yr = (Zrr * xr) + (Zri * xi)
yi = (Zir * xr) + (Zii * xi)
if self.affine:
yr = yr + self.Br.view(vdim)
yi = yi + self.Bi.view(vdim)
outputs = torch.cat([yr, yi], self.complex_axis)
return outputs
def extra_repr(self):
return (
"{num_features}, eps={eps}, momentum={momentum}, affine={affine}, "
"track_running_stats={track_running_stats}".format(**self.__dict__)
)
| 14,634 | 32.643678 | 85 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.