repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
espnet | espnet-master/espnet2/layers/utterance_mvn.py | from typing import Tuple
import torch
from typeguard import check_argument_types
from espnet2.layers.abs_normalize import AbsNormalize
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
class UtteranceMVN(AbsNormalize):
def __init__(
self,
norm_means: bool = True,
norm_vars: bool = False,
eps: float = 1.0e-20,
):
assert check_argument_types()
super().__init__()
self.norm_means = norm_means
self.norm_vars = norm_vars
self.eps = eps
def extra_repr(self):
return f"norm_means={self.norm_means}, norm_vars={self.norm_vars}"
def forward(
self, x: torch.Tensor, ilens: torch.Tensor = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Forward function
Args:
x: (B, L, ...)
ilens: (B,)
"""
return utterance_mvn(
x,
ilens,
norm_means=self.norm_means,
norm_vars=self.norm_vars,
eps=self.eps,
)
def utterance_mvn(
x: torch.Tensor,
ilens: torch.Tensor = None,
norm_means: bool = True,
norm_vars: bool = False,
eps: float = 1.0e-20,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Apply utterance mean and variance normalization
Args:
x: (B, T, D), assumed zero padded
ilens: (B,)
norm_means:
norm_vars:
eps:
"""
if ilens is None:
ilens = x.new_full([x.size(0)], x.size(1))
ilens_ = ilens.to(x.device, x.dtype).view(-1, *[1 for _ in range(x.dim() - 1)])
# Zero padding
if x.requires_grad:
x = x.masked_fill(make_pad_mask(ilens, x, 1), 0.0)
else:
x.masked_fill_(make_pad_mask(ilens, x, 1), 0.0)
# mean: (B, 1, D)
mean = x.sum(dim=1, keepdim=True) / ilens_
if norm_means:
x -= mean
if norm_vars:
var = x.pow(2).sum(dim=1, keepdim=True) / ilens_
std = torch.clamp(var.sqrt(), min=eps)
x = x / std
return x, ilens
else:
if norm_vars:
y = x - mean
y.masked_fill_(make_pad_mask(ilens, y, 1), 0.0)
var = y.pow(2).sum(dim=1, keepdim=True) / ilens_
std = torch.clamp(var.sqrt(), min=eps)
x /= std
return x, ilens
| 2,316 | 25.033708 | 83 | py |
espnet | espnet-master/espnet2/layers/mask_along_axis.py | import math
from typing import Sequence, Union
import torch
from typeguard import check_argument_types
def mask_along_axis(
spec: torch.Tensor,
spec_lengths: torch.Tensor,
mask_width_range: Sequence[int] = (0, 30),
dim: int = 1,
num_mask: int = 2,
replace_with_zero: bool = True,
):
"""Apply mask along the specified direction.
Args:
spec: (Batch, Length, Freq)
spec_lengths: (Length): Not using lengths in this implementation
mask_width_range: Select the width randomly between this range
"""
org_size = spec.size()
if spec.dim() == 4:
# spec: (Batch, Channel, Length, Freq) -> (Batch * Channel, Length, Freq)
spec = spec.view(-1, spec.size(2), spec.size(3))
B = spec.shape[0]
# D = Length or Freq
D = spec.shape[dim]
# mask_length: (B, num_mask, 1)
mask_length = torch.randint(
mask_width_range[0],
mask_width_range[1],
(B, num_mask),
device=spec.device,
).unsqueeze(2)
# mask_pos: (B, num_mask, 1)
mask_pos = torch.randint(
0, max(1, D - mask_length.max()), (B, num_mask), device=spec.device
).unsqueeze(2)
# aran: (1, 1, D)
aran = torch.arange(D, device=spec.device)[None, None, :]
# mask: (Batch, num_mask, D)
mask = (mask_pos <= aran) * (aran < (mask_pos + mask_length))
# Multiply masks: (Batch, num_mask, D) -> (Batch, D)
mask = mask.any(dim=1)
if dim == 1:
# mask: (Batch, Length, 1)
mask = mask.unsqueeze(2)
elif dim == 2:
# mask: (Batch, 1, Freq)
mask = mask.unsqueeze(1)
if replace_with_zero:
value = 0.0
else:
value = spec.mean()
if spec.requires_grad:
spec = spec.masked_fill(mask, value)
else:
spec = spec.masked_fill_(mask, value)
spec = spec.view(*org_size)
return spec, spec_lengths
class MaskAlongAxis(torch.nn.Module):
def __init__(
self,
mask_width_range: Union[int, Sequence[int]] = (0, 30),
num_mask: int = 2,
dim: Union[int, str] = "time",
replace_with_zero: bool = True,
):
assert check_argument_types()
if isinstance(mask_width_range, int):
mask_width_range = (0, mask_width_range)
if len(mask_width_range) != 2:
raise TypeError(
f"mask_width_range must be a tuple of int and int values: "
f"{mask_width_range}",
)
assert mask_width_range[1] > mask_width_range[0]
if isinstance(dim, str):
if dim == "time":
dim = 1
elif dim == "freq":
dim = 2
else:
raise ValueError("dim must be int, 'time' or 'freq'")
if dim == 1:
self.mask_axis = "time"
elif dim == 2:
self.mask_axis = "freq"
else:
self.mask_axis = "unknown"
super().__init__()
self.mask_width_range = mask_width_range
self.num_mask = num_mask
self.dim = dim
self.replace_with_zero = replace_with_zero
def extra_repr(self):
return (
f"mask_width_range={self.mask_width_range}, "
f"num_mask={self.num_mask}, axis={self.mask_axis}"
)
def forward(self, spec: torch.Tensor, spec_lengths: torch.Tensor = None):
"""Forward function.
Args:
spec: (Batch, Length, Freq)
"""
return mask_along_axis(
spec,
spec_lengths,
mask_width_range=self.mask_width_range,
dim=self.dim,
num_mask=self.num_mask,
replace_with_zero=self.replace_with_zero,
)
class MaskAlongAxisVariableMaxWidth(torch.nn.Module):
"""Mask input spec along a specified axis with variable maximum width.
Formula:
max_width = max_width_ratio * seq_len
"""
def __init__(
self,
mask_width_ratio_range: Union[float, Sequence[float]] = (0.0, 0.05),
num_mask: int = 2,
dim: Union[int, str] = "time",
replace_with_zero: bool = True,
):
assert check_argument_types()
if isinstance(mask_width_ratio_range, float):
mask_width_ratio_range = (0.0, mask_width_ratio_range)
if len(mask_width_ratio_range) != 2:
raise TypeError(
f"mask_width_ratio_range must be a tuple of float and float values: "
f"{mask_width_ratio_range}",
)
assert mask_width_ratio_range[1] > mask_width_ratio_range[0]
if isinstance(dim, str):
if dim == "time":
dim = 1
elif dim == "freq":
dim = 2
else:
raise ValueError("dim must be int, 'time' or 'freq'")
if dim == 1:
self.mask_axis = "time"
elif dim == 2:
self.mask_axis = "freq"
else:
self.mask_axis = "unknown"
super().__init__()
self.mask_width_ratio_range = mask_width_ratio_range
self.num_mask = num_mask
self.dim = dim
self.replace_with_zero = replace_with_zero
def extra_repr(self):
return (
f"mask_width_ratio_range={self.mask_width_ratio_range}, "
f"num_mask={self.num_mask}, axis={self.mask_axis}"
)
def forward(self, spec: torch.Tensor, spec_lengths: torch.Tensor = None):
"""Forward function.
Args:
spec: (Batch, Length, Freq)
"""
max_seq_len = spec.shape[self.dim]
min_mask_width = math.floor(max_seq_len * self.mask_width_ratio_range[0])
min_mask_width = max([0, min_mask_width])
max_mask_width = math.floor(max_seq_len * self.mask_width_ratio_range[1])
max_mask_width = min([max_seq_len, max_mask_width])
if max_mask_width > min_mask_width:
return mask_along_axis(
spec,
spec_lengths,
mask_width_range=(min_mask_width, max_mask_width),
dim=self.dim,
num_mask=self.num_mask,
replace_with_zero=self.replace_with_zero,
)
return spec, spec_lengths
| 6,242 | 29.453659 | 85 | py |
espnet | espnet-master/espnet2/layers/label_aggregation.py | from typing import Optional, Tuple
import torch
from typeguard import check_argument_types
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
class LabelAggregate(torch.nn.Module):
def __init__(
self,
win_length: int = 512,
hop_length: int = 128,
center: bool = True,
):
assert check_argument_types()
super().__init__()
self.win_length = win_length
self.hop_length = hop_length
self.center = center
def extra_repr(self):
return (
f"win_length={self.win_length}, "
f"hop_length={self.hop_length}, "
f"center={self.center}, "
)
def forward(
self, input: torch.Tensor, ilens: torch.Tensor = None
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
"""LabelAggregate forward function.
Args:
input: (Batch, Nsamples, Label_dim)
ilens: (Batch)
Returns:
output: (Batch, Frames, Label_dim)
"""
bs = input.size(0)
max_length = input.size(1)
label_dim = input.size(2)
# NOTE(jiatong):
# The default behaviour of label aggregation is compatible with
# torch.stft about framing and padding.
# Step1: center padding
if self.center:
pad = self.win_length // 2
max_length = max_length + 2 * pad
input = torch.nn.functional.pad(input, (0, 0, pad, pad), "constant", 0)
input[:, :pad, :] = input[:, pad : (2 * pad), :]
input[:, (max_length - pad) : max_length, :] = input[
:, (max_length - 2 * pad) : (max_length - pad), :
]
nframe = (max_length - self.win_length) // self.hop_length + 1
# Step2: framing
output = input.as_strided(
(bs, nframe, self.win_length, label_dim),
(max_length * label_dim, self.hop_length * label_dim, label_dim, 1),
)
# Step3: aggregate label
output = torch.gt(output.sum(dim=2, keepdim=False), self.win_length // 2)
output = output.float()
# Step4: process lengths
if ilens is not None:
if self.center:
pad = self.win_length // 2
ilens = ilens + 2 * pad
olens = (ilens - self.win_length) // self.hop_length + 1
output.masked_fill_(make_pad_mask(olens, output, 1), 0.0)
else:
olens = None
return output, olens
| 2,519 | 29.361446 | 83 | py |
espnet | espnet-master/espnet2/layers/log_mel.py | from typing import Tuple
import librosa
import torch
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
class LogMel(torch.nn.Module):
"""Convert STFT to fbank feats
The arguments is same as librosa.filters.mel
Args:
fs: number > 0 [scalar] sampling rate of the incoming signal
n_fft: int > 0 [scalar] number of FFT components
n_mels: int > 0 [scalar] number of Mel bands to generate
fmin: float >= 0 [scalar] lowest frequency (in Hz)
fmax: float >= 0 [scalar] highest frequency (in Hz).
If `None`, use `fmax = fs / 2.0`
htk: use HTK formula instead of Slaney
"""
def __init__(
self,
fs: int = 16000,
n_fft: int = 512,
n_mels: int = 80,
fmin: float = None,
fmax: float = None,
htk: bool = False,
log_base: float = None,
):
super().__init__()
fmin = 0 if fmin is None else fmin
fmax = fs / 2 if fmax is None else fmax
_mel_options = dict(
sr=fs,
n_fft=n_fft,
n_mels=n_mels,
fmin=fmin,
fmax=fmax,
htk=htk,
)
self.mel_options = _mel_options
self.log_base = log_base
# Note(kamo): The mel matrix of librosa is different from kaldi.
melmat = librosa.filters.mel(**_mel_options)
# melmat: (D2, D1) -> (D1, D2)
self.register_buffer("melmat", torch.from_numpy(melmat.T).float())
def extra_repr(self):
return ", ".join(f"{k}={v}" for k, v in self.mel_options.items())
def forward(
self,
feat: torch.Tensor,
ilens: torch.Tensor = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
# feat: (B, T, D1) x melmat: (D1, D2) -> mel_feat: (B, T, D2)
mel_feat = torch.matmul(feat, self.melmat)
mel_feat = torch.clamp(mel_feat, min=1e-10)
if self.log_base is None:
logmel_feat = mel_feat.log()
elif self.log_base == 2.0:
logmel_feat = mel_feat.log2()
elif self.log_base == 10.0:
logmel_feat = mel_feat.log10()
else:
logmel_feat = mel_feat.log() / torch.log(self.log_base)
# Zero padding
if ilens is not None:
logmel_feat = logmel_feat.masked_fill(
make_pad_mask(ilens, logmel_feat, 1), 0.0
)
else:
ilens = feat.new_full(
[feat.size(0)], fill_value=feat.size(1), dtype=torch.long
)
return logmel_feat, ilens
| 2,578 | 29.341176 | 74 | py |
espnet | espnet-master/espnet2/layers/abs_normalize.py | from abc import ABC, abstractmethod
from typing import Tuple
import torch
class AbsNormalize(torch.nn.Module, ABC):
@abstractmethod
def forward(
self, input: torch.Tensor, input_lengths: torch.Tensor = None
) -> Tuple[torch.Tensor, torch.Tensor]:
# return output, output_lengths
raise NotImplementedError
| 344 | 23.642857 | 69 | py |
espnet | espnet-master/espnet2/layers/sinc_conv.py | #!/usr/bin/env python3
# 2020, Technische Universität München; Ludwig Kürzinger
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Sinc convolutions."""
import math
from typing import Union
import torch
from typeguard import check_argument_types
class LogCompression(torch.nn.Module):
"""Log Compression Activation.
Activation function `log(abs(x) + 1)`.
"""
def __init__(self):
"""Initialize."""
super().__init__()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward.
Applies the Log Compression function elementwise on tensor x.
"""
return torch.log(torch.abs(x) + 1)
class SincConv(torch.nn.Module):
"""Sinc Convolution.
This module performs a convolution using Sinc filters in time domain as kernel.
Sinc filters function as band passes in spectral domain.
The filtering is done as a convolution in time domain, and no transformation
to spectral domain is necessary.
This implementation of the Sinc convolution is heavily inspired
by Ravanelli et al. https://github.com/mravanelli/SincNet,
and adapted for the ESpnet toolkit.
Combine Sinc convolutions with a log compression activation function, as in:
https://arxiv.org/abs/2010.07597
Notes:
Currently, the same filters are applied to all input channels.
The windowing function is applied on the kernel to obtained a smoother filter,
and not on the input values, which is different to traditional ASR.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
padding: int = 0,
dilation: int = 1,
window_func: str = "hamming",
scale_type: str = "mel",
fs: Union[int, float] = 16000,
):
"""Initialize Sinc convolutions.
Args:
in_channels: Number of input channels.
out_channels: Number of output channels.
kernel_size: Sinc filter kernel size (needs to be an odd number).
stride: See torch.nn.functional.conv1d.
padding: See torch.nn.functional.conv1d.
dilation: See torch.nn.functional.conv1d.
window_func: Window function on the filter, one of ["hamming", "none"].
fs (str, int, float): Sample rate of the input data
"""
assert check_argument_types()
super().__init__()
window_funcs = {
"none": self.none_window,
"hamming": self.hamming_window,
}
if window_func not in window_funcs:
raise NotImplementedError(
f"Window function has to be one of {list(window_funcs.keys())}",
)
self.window_func = window_funcs[window_func]
scale_choices = {
"mel": MelScale,
"bark": BarkScale,
}
if scale_type not in scale_choices:
raise NotImplementedError(
f"Scale has to be one of {list(scale_choices.keys())}",
)
self.scale = scale_choices[scale_type]
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.padding = padding
self.dilation = dilation
self.stride = stride
self.fs = float(fs)
if self.kernel_size % 2 == 0:
raise ValueError("SincConv: Kernel size must be odd.")
self.f = None
N = self.kernel_size // 2
self._x = 2 * math.pi * torch.linspace(1, N, N)
self._window = self.window_func(torch.linspace(1, N, N))
# init may get overwritten by E2E network,
# but is still required to calculate output dim
self.init_filters()
@staticmethod
def sinc(x: torch.Tensor) -> torch.Tensor:
"""Sinc function."""
x2 = x + 1e-6
return torch.sin(x2) / x2
@staticmethod
def none_window(x: torch.Tensor) -> torch.Tensor:
"""Identity-like windowing function."""
return torch.ones_like(x)
@staticmethod
def hamming_window(x: torch.Tensor) -> torch.Tensor:
"""Hamming Windowing function."""
L = 2 * x.size(0) + 1
x = x.flip(0)
return 0.54 - 0.46 * torch.cos(2.0 * math.pi * x / L)
def init_filters(self):
"""Initialize filters with filterbank values."""
f = self.scale.bank(self.out_channels, self.fs)
f = torch.div(f, self.fs)
self.f = torch.nn.Parameter(f, requires_grad=True)
def _create_filters(self, device: str):
"""Calculate coefficients.
This function (re-)calculates the filter convolutions coefficients.
"""
f_mins = torch.abs(self.f[:, 0])
f_maxs = torch.abs(self.f[:, 0]) + torch.abs(self.f[:, 1] - self.f[:, 0])
self._x = self._x.to(device)
self._window = self._window.to(device)
f_mins_x = torch.matmul(f_mins.view(-1, 1), self._x.view(1, -1))
f_maxs_x = torch.matmul(f_maxs.view(-1, 1), self._x.view(1, -1))
kernel = (torch.sin(f_maxs_x) - torch.sin(f_mins_x)) / (0.5 * self._x)
kernel = kernel * self._window
kernel_left = kernel.flip(1)
kernel_center = (2 * f_maxs - 2 * f_mins).unsqueeze(1)
filters = torch.cat([kernel_left, kernel_center, kernel], dim=1)
filters = filters.view(filters.size(0), 1, filters.size(1))
self.sinc_filters = filters
def forward(self, xs: torch.Tensor) -> torch.Tensor:
"""Sinc convolution forward function.
Args:
xs: Batch in form of torch.Tensor (B, C_in, D_in).
Returns:
xs: Batch in form of torch.Tensor (B, C_out, D_out).
"""
self._create_filters(xs.device)
xs = torch.nn.functional.conv1d(
xs,
self.sinc_filters,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.in_channels,
)
return xs
def get_odim(self, idim: int) -> int:
"""Obtain the output dimension of the filter."""
D_out = idim + 2 * self.padding - self.dilation * (self.kernel_size - 1) - 1
D_out = (D_out // self.stride) + 1
return D_out
class MelScale:
"""Mel frequency scale."""
@staticmethod
def convert(f):
"""Convert Hz to mel."""
return 1125.0 * torch.log(torch.div(f, 700.0) + 1.0)
@staticmethod
def invert(x):
"""Convert mel to Hz."""
return 700.0 * (torch.exp(torch.div(x, 1125.0)) - 1.0)
@classmethod
def bank(cls, channels: int, fs: float) -> torch.Tensor:
"""Obtain initialization values for the mel scale.
Args:
channels: Number of channels.
fs: Sample rate.
Returns:
torch.Tensor: Filter start frequencíes.
torch.Tensor: Filter stop frequencies.
"""
assert check_argument_types()
# min and max bandpass edge frequencies
min_frequency = torch.tensor(30.0)
max_frequency = torch.tensor(fs * 0.5)
frequencies = torch.linspace(
cls.convert(min_frequency), cls.convert(max_frequency), channels + 2
)
frequencies = cls.invert(frequencies)
f1, f2 = frequencies[:-2], frequencies[2:]
return torch.stack([f1, f2], dim=1)
class BarkScale:
"""Bark frequency scale.
Has wider bandwidths at lower frequencies, see:
Critical bandwidth: BARK
Zwicker and Terhardt, 1980
"""
@staticmethod
def convert(f):
"""Convert Hz to Bark."""
b = torch.div(f, 1000.0)
b = torch.pow(b, 2.0) * 1.4
b = torch.pow(b + 1.0, 0.69)
return b * 75.0 + 25.0
@staticmethod
def invert(x):
"""Convert Bark to Hz."""
f = torch.div(x - 25.0, 75.0)
f = torch.pow(f, (1.0 / 0.69))
f = torch.div(f - 1.0, 1.4)
f = torch.pow(f, 0.5)
return f * 1000.0
@classmethod
def bank(cls, channels: int, fs: float) -> torch.Tensor:
"""Obtain initialization values for the Bark scale.
Args:
channels: Number of channels.
fs: Sample rate.
Returns:
torch.Tensor: Filter start frequencíes.
torch.Tensor: Filter stop frequencíes.
"""
assert check_argument_types()
# min and max BARK center frequencies by approximation
min_center_frequency = torch.tensor(70.0)
max_center_frequency = torch.tensor(fs * 0.45)
center_frequencies = torch.linspace(
cls.convert(min_center_frequency),
cls.convert(max_center_frequency),
channels,
)
center_frequencies = cls.invert(center_frequencies)
f1 = center_frequencies - torch.div(cls.convert(center_frequencies), 2)
f2 = center_frequencies + torch.div(cls.convert(center_frequencies), 2)
return torch.stack([f1, f2], dim=1)
| 9,028 | 31.832727 | 84 | py |
espnet | espnet-master/espnet2/layers/time_warp.py | """Time warp module."""
import torch
from espnet.nets.pytorch_backend.nets_utils import pad_list
DEFAULT_TIME_WARP_MODE = "bicubic"
def time_warp(x: torch.Tensor, window: int = 80, mode: str = DEFAULT_TIME_WARP_MODE):
"""Time warping using torch.interpolate.
Args:
x: (Batch, Time, Freq)
window: time warp parameter
mode: Interpolate mode
"""
# bicubic supports 4D or more dimension tensor
org_size = x.size()
if x.dim() == 3:
# x: (Batch, Time, Freq) -> (Batch, 1, Time, Freq)
x = x[:, None]
t = x.shape[2]
if t - window <= window:
return x.view(*org_size)
center = torch.randint(window, t - window, (1,))[0]
warped = torch.randint(center - window, center + window, (1,))[0] + 1
# left: (Batch, Channel, warped, Freq)
# right: (Batch, Channel, time - warped, Freq)
left = torch.nn.functional.interpolate(
x[:, :, :center], (warped, x.shape[3]), mode=mode, align_corners=False
)
right = torch.nn.functional.interpolate(
x[:, :, center:], (t - warped, x.shape[3]), mode=mode, align_corners=False
)
if x.requires_grad:
x = torch.cat([left, right], dim=-2)
else:
x[:, :, :warped] = left
x[:, :, warped:] = right
return x.view(*org_size)
class TimeWarp(torch.nn.Module):
"""Time warping using torch.interpolate.
Args:
window: time warp parameter
mode: Interpolate mode
"""
def __init__(self, window: int = 80, mode: str = DEFAULT_TIME_WARP_MODE):
super().__init__()
self.window = window
self.mode = mode
def extra_repr(self):
return f"window={self.window}, mode={self.mode}"
def forward(self, x: torch.Tensor, x_lengths: torch.Tensor = None):
"""Forward function.
Args:
x: (Batch, Time, Freq)
x_lengths: (Batch,)
"""
if x_lengths is None or all(le == x_lengths[0] for le in x_lengths):
# Note that applying same warping for each sample
y = time_warp(x, window=self.window, mode=self.mode)
else:
# FIXME(kamo): I have no idea to batchify Timewarp
ys = []
for i in range(x.size(0)):
_y = time_warp(
x[i][None, : x_lengths[i]],
window=self.window,
mode=self.mode,
)[0]
ys.append(_y)
y = pad_list(ys, 0.0)
return y, x_lengths
| 2,526 | 27.393258 | 85 | py |
espnet | espnet-master/espnet2/fst/lm_rescore.py | import math
from typing import List, Tuple
import torch
try:
import k2
except ImportError or ModuleNotFoundError:
k2 = None
def remove_repeated_and_leq(tokens: List[int], blank_id: int = 0):
"""Generate valid token sequence.
Result may be used as input of transformer decoder and neural language model.
Fristly, remove repeated token from a "token alignment" seqs;
Then remove blank symbols.
This fuction may be replaced by tokenizing word_seqs with tokenizer
or composeing word_seqs_fsas with L_inv.fst
or composing token_seqs with ctc_topo.
Current method is slelected other than previous three methods
because it won't need an extra object, i.e. tokenizer, L.fst or ctc_topo.
"""
new_tokens = []
previous = None
for token in tokens:
if token != previous:
new_tokens.append(token)
previous = token
new_tokens = [token for token in new_tokens if token > blank_id]
return new_tokens
def _intersect_device(
a_fsas: k2.Fsa,
b_fsas: k2.Fsa,
b_to_a_map: torch.Tensor,
sorted_match_a: bool,
batch_size: int = 500,
):
"""Wrap k2.intersect_device
This is a wrapper of k2.intersect_device and its purpose is to split
b_fsas into several batches and process each batch separately to avoid
CUDA OOM error.
The arguments and return value of this function are the same as
k2.intersect_device.
NOTE: You can decrease batch_size in case of CUDA out of memory error.
"""
assert k2 is not None, "please follow 'tools/installers' to install"
num_fsas = b_fsas.shape[0]
if num_fsas <= batch_size:
return k2.intersect_device(
a_fsas, b_fsas, b_to_a_map=b_to_a_map, sorted_match_a=sorted_match_a
)
num_batches = int(math.ceil(float(num_fsas) / batch_size))
splits = []
for i in range(num_batches):
start = i * batch_size
end = min(start + batch_size, num_fsas)
splits.append((start, end))
ans = []
for start, end in splits:
indexes = torch.arange(start, end).to(b_to_a_map)
fsas = k2.index_fsa(b_fsas, indexes)
b_to_a = k2.index_select(b_to_a_map, indexes)
path_lats = k2.intersect_device(
a_fsas, fsas, b_to_a_map=b_to_a, sorted_match_a=sorted_match_a
)
ans.append(path_lats)
return k2.cat(ans)
def compute_am_scores_and_lm_scores(
lats: k2.Fsa,
word_fsas_with_epsilon_loops: k2.Fsa,
path_to_seq_map: torch.Tensor,
device: str = "cuda",
batch_size: int = 500,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Compute AM and LM scores of n-best lists (represented as word_fsas).
Args:
lats:
An FsaVec, which is the output of `k2.intersect_dense_pruned`.
It must have the attribute `lm_scores`.
word_fsas_with_epsilon_loops:
An FsaVec representing a n-best list. Note that it has been processed
by `k2.add_epsilon_self_loops`.
path_to_seq_map:
A 1-D torch.Tensor with dtype torch.int32. path_to_seq_map[i] indicates
which sequence the i-th Fsa in word_fsas_with_epsilon_loops belongs to.
path_to_seq_map.numel() == word_fsas_with_epsilon_loops.arcs.dim0().
batch_size:
Batchify the n-best list when intersecting with inverted_lats.
You could tune this to avoid GPU OOM issue or increase the GPU usage.
Returns:
Return a tuple of (1-D torch.Tensor, 1-D torch.Tensor) containing
the AM and LM scores of each path.
`am_scores.numel() == word_fsas_with_epsilon_loops.shape[0]`
`lm_scores.numel() == word_fsas_with_epsilon_loops.shape[0]`
"""
assert (
k2 is not None
), "k2 is not installed, please follow 'tools/installers' to install"
assert len(lats.shape) == 3
# k2.compose() currently does not support b_to_a_map. To void
# replicating `lats`, we use k2.intersect_device here.
#
# lats has phone IDs as `labels` and word IDs as aux_labels, so we
# need to invert it here.
inverted_lats = k2.invert(lats)
# Now the `labels` of inverted_lats are word IDs (a 1-D torch.Tensor)
# and its `aux_labels` are phone IDs ( a k2.RaggedInt with 2 axes)
# Remove its `aux_labels` since it is not needed in the
# following computation
del inverted_lats.aux_labels
inverted_lats = k2.arc_sort(inverted_lats)
am_path_lats = _intersect_device(
inverted_lats,
word_fsas_with_epsilon_loops,
b_to_a_map=path_to_seq_map,
sorted_match_a=True,
batch_size=batch_size,
)
am_path_lats = k2.top_sort(k2.connect(am_path_lats))
# The `scores` of every arc consists of `am_scores` and `lm_scores`
tot_score_device = "cpu"
if hasattr(lats, "lm_scores"):
am_path_lats.scores = am_path_lats.scores - am_path_lats.lm_scores
am_scores = (
am_path_lats.to(tot_score_device)
.get_tot_scores(use_double_scores=True, log_semiring=False)
.to(device)
)
# Start to compute lm_scores
am_path_lats.scores = am_path_lats.lm_scores
lm_scores = (
am_path_lats.to(tot_score_device)
.get_tot_scores(use_double_scores=True, log_semiring=False)
.to(device)
)
else:
am_scores = (
am_path_lats.to(tot_score_device)
.get_tot_scores(use_double_scores=True, log_semiring=False)
.to(device)
)
lm_scores = None
return am_scores, lm_scores
def nbest_am_lm_scores(
lats: k2.Fsa,
num_paths: int,
device: str = "cuda",
batch_size: int = 500,
):
"""Compute am scores with word_seqs
Compatible with both ctc_decoding or TLG decoding.
"""
assert (
k2 is not None
), "k2 is not installed, please follow 'tools/installers' to install"
paths = k2.random_paths(lats, num_paths=num_paths, use_double_scores=True)
if isinstance(lats.aux_labels, torch.Tensor):
word_seqs = k2.ragged.index(lats.aux_labels.contiguous(), paths)
else:
# '_k2.RaggedInt' object has no attribute 'contiguous'
word_seqs = lats.aux_labels.index(paths)
word_seqs = word_seqs.remove_axis(word_seqs.num_axes - 2)
# With ctc_decoding, word_seqs stores token_ids.
# With TLG decoding, word_seqs stores word_ids.
word_seqs = word_seqs.remove_values_leq(0)
unique_word_seqs, num_repeats, new2old = word_seqs.unique(
need_num_repeats=True, need_new2old_indexes=True
)
seq_to_path_shape = unique_word_seqs.shape.get_layer(0)
path_to_seq_map = seq_to_path_shape.row_ids(1)
# used to split final computed tot_scores
seq_to_path_splits = seq_to_path_shape.row_splits(1)
unique_word_seqs = unique_word_seqs.remove_axis(0)
word_fsas = k2.linear_fsa(unique_word_seqs)
word_fsas_with_epsilon_loops = k2.add_epsilon_self_loops(word_fsas)
am_scores, lm_scores = compute_am_scores_and_lm_scores(
lats, word_fsas_with_epsilon_loops, path_to_seq_map, device, batch_size
)
token_seqs = k2.ragged.index(lats.labels.contiguous(), paths)
token_seqs = token_seqs.remove_axis(0)
token_ids, _ = token_seqs.index(new2old, axis=0)
token_ids = token_ids.tolist()
# Now remove repeated tokens and 0s and -1s.
token_ids = [remove_repeated_and_leq(tokens) for tokens in token_ids]
return am_scores, lm_scores, token_ids, new2old, path_to_seq_map, seq_to_path_splits
| 7,517 | 33.486239 | 88 | py |
espnet | espnet-master/espnet2/train/abs_gan_espnet_model.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""ESPnetModel abstract class for GAN-based training."""
from abc import ABC, abstractmethod
from typing import Dict, Union
import torch
from espnet2.train.abs_espnet_model import AbsESPnetModel
class AbsGANESPnetModel(AbsESPnetModel, torch.nn.Module, ABC):
"""The common abstract class among each GAN-based task.
"ESPnetModel" is referred to a class which inherits torch.nn.Module,
and makes the dnn-models "forward" as its member field, a.k.a delegate
pattern. And "forward" must accept the argument "forward_generator" and
Return the dict of "loss", "stats", "weight", and "optim_idx".
"optim_idx" for generator must be 0 and that for discriminator must be 1.
Example:
>>> from espnet2.tasks.abs_task import AbsTask
>>> class YourESPnetModel(AbsGANESPnetModel):
... def forward(self, input, input_lengths, forward_generator=True):
... ...
... if forward_generator:
... # return loss for the generator
... # optim idx 0 indicates generator optimizer
... return dict(loss=loss, stats=stats, weight=weight, optim_idx=0)
... else:
... # return loss for the discriminator
... # optim idx 1 indicates discriminator optimizer
... return dict(loss=loss, stats=stats, weight=weight, optim_idx=1)
>>> class YourTask(AbsTask):
... @classmethod
... def build_model(cls, args: argparse.Namespace) -> YourESPnetModel:
"""
@abstractmethod
def forward(
self,
forward_generator: bool = True,
**batch: torch.Tensor,
) -> Dict[str, Union[torch.Tensor, Dict[str, torch.Tensor], int]]:
"""Return the generator loss or the discrimiantor loss.
This method must have an argument "forward_generator" to switch the generator
loss calculation and the discrimiantor loss calculation. If forward_generator
is true, return the generator loss with optim_idx 0. If forward_generator is
false, return the discrimiantor loss with optim_idx 1.
Args:
forward_generator (bool): Whether to return the generator loss or the
discrimiantor loss. This must have the default value.
Returns:
Dict[str, Any]:
* loss (Tensor): Loss scalar tensor.
* stats (Dict[str, float]): Statistics to be monitored.
* weight (Tensor): Weight tensor to summarize losses.
* optim_idx (int): Optimizer index (0 for G and 1 for D).
"""
raise NotImplementedError
@abstractmethod
def collect_feats(self, **batch: torch.Tensor) -> Dict[str, torch.Tensor]:
raise NotImplementedError
| 2,913 | 40.042254 | 87 | py |
espnet | espnet-master/espnet2/train/reporter.py | """Reporter module."""
import dataclasses
import datetime
import logging
import time
import warnings
from collections import defaultdict
from contextlib import contextmanager
from pathlib import Path
from typing import ContextManager, Dict, List, Optional, Sequence, Tuple, Union
import humanfriendly
import numpy as np
import torch
from packaging.version import parse as V
from typeguard import check_argument_types, check_return_type
Num = Union[float, int, complex, torch.Tensor, np.ndarray]
_reserved = {"time", "total_count"}
def to_reported_value(v: Num, weight: Num = None) -> "ReportedValue":
assert check_argument_types()
if isinstance(v, (torch.Tensor, np.ndarray)):
if np.prod(v.shape) != 1:
raise ValueError(f"v must be 0 or 1 dimension: {len(v.shape)}")
v = v.item()
if isinstance(weight, (torch.Tensor, np.ndarray)):
if np.prod(weight.shape) != 1:
raise ValueError(f"weight must be 0 or 1 dimension: {len(weight.shape)}")
weight = weight.item()
if weight is not None:
retval = WeightedAverage(v, weight)
else:
retval = Average(v)
assert check_return_type(retval)
return retval
def aggregate(values: Sequence["ReportedValue"]) -> Num:
assert check_argument_types()
for v in values:
if not isinstance(v, type(values[0])):
raise ValueError(
f"Can't use different Reported type together: "
f"{type(v)} != {type(values[0])}"
)
if len(values) == 0:
warnings.warn("No stats found")
retval = np.nan
elif isinstance(values[0], Average):
retval = np.nanmean([v.value for v in values])
elif isinstance(values[0], WeightedAverage):
# Excludes non finite values
invalid_indices = set()
for i, v in enumerate(values):
if not np.isfinite(v.value) or not np.isfinite(v.weight):
invalid_indices.add(i)
values = [v for i, v in enumerate(values) if i not in invalid_indices]
if len(values) != 0:
# Calc weighed average. Weights are changed to sum-to-1.
sum_weights = sum(v.weight for i, v in enumerate(values))
sum_value = sum(v.value * v.weight for i, v in enumerate(values))
if sum_weights == 0:
warnings.warn("weight is zero")
retval = np.nan
else:
retval = sum_value / sum_weights
else:
warnings.warn("No valid stats found")
retval = np.nan
else:
raise NotImplementedError(f"type={type(values[0])}")
assert check_return_type(retval)
return retval
def wandb_get_prefix(key: str):
if key.startswith("valid"):
return "valid/"
if key.startswith("train"):
return "train/"
if key.startswith("attn"):
return "attn/"
return "metrics/"
class ReportedValue:
pass
@dataclasses.dataclass(frozen=True)
class Average(ReportedValue):
value: Num
@dataclasses.dataclass(frozen=True)
class WeightedAverage(ReportedValue):
value: Tuple[Num, Num]
weight: Num
class SubReporter:
"""This class is used in Reporter.
See the docstring of Reporter for the usage.
"""
def __init__(self, key: str, epoch: int, total_count: int):
assert check_argument_types()
self.key = key
self.epoch = epoch
self.start_time = time.perf_counter()
self.stats = defaultdict(list)
self._finished = False
self.total_count = total_count
self.count = 0
self._seen_keys_in_the_step = set()
def get_total_count(self) -> int:
"""Returns the number of iterations over all epochs."""
return self.total_count
def get_epoch(self) -> int:
return self.epoch
def next(self):
"""Close up this step and reset state for the next step"""
for key, stats_list in self.stats.items():
if key not in self._seen_keys_in_the_step:
# Fill nan value if the key is not registered in this step
if isinstance(stats_list[0], WeightedAverage):
stats_list.append(to_reported_value(np.nan, 0))
elif isinstance(stats_list[0], Average):
stats_list.append(to_reported_value(np.nan))
else:
raise NotImplementedError(f"type={type(stats_list[0])}")
assert len(stats_list) == self.count, (len(stats_list), self.count)
self._seen_keys_in_the_step = set()
def register(
self,
stats: Dict[str, Optional[Union[Num, Dict[str, Num]]]],
weight: Num = None,
) -> None:
assert check_argument_types()
if self._finished:
raise RuntimeError("Already finished")
if len(self._seen_keys_in_the_step) == 0:
# Increment count as the first register in this step
self.total_count += 1
self.count += 1
for key2, v in stats.items():
if key2 in _reserved:
raise RuntimeError(f"{key2} is reserved.")
if key2 in self._seen_keys_in_the_step:
raise RuntimeError(f"{key2} is registered twice.")
if v is None:
v = np.nan
r = to_reported_value(v, weight)
if key2 not in self.stats:
# If it's the first time to register the key,
# append nan values in front of the the value
# to make it same length to the other stats
# e.g.
# stat A: [0.4, 0.3, 0.5]
# stat B: [nan, nan, 0.2]
nan = to_reported_value(np.nan, None if weight is None else 0)
self.stats[key2].extend(
r if i == self.count - 1 else nan for i in range(self.count)
)
else:
self.stats[key2].append(r)
self._seen_keys_in_the_step.add(key2)
def log_message(self, start: int = None, end: int = None) -> str:
if self._finished:
raise RuntimeError("Already finished")
if start is None:
start = 0
if start < 0:
start = self.count + start
if end is None:
end = self.count
if self.count == 0 or start == end:
return ""
message = f"{self.epoch}epoch:{self.key}:" f"{start + 1}-{end}batch: "
for idx, (key2, stats_list) in enumerate(self.stats.items()):
assert len(stats_list) == self.count, (len(stats_list), self.count)
# values: List[ReportValue]
values = stats_list[start:end]
if idx != 0 and idx != len(stats_list):
message += ", "
v = aggregate(values)
if abs(v) > 1.0e3:
message += f"{key2}={v:.3e}"
elif abs(v) > 1.0e-3:
message += f"{key2}={v:.3f}"
else:
message += f"{key2}={v:.3e}"
return message
def tensorboard_add_scalar(self, summary_writer, start: int = None):
if start is None:
start = 0
if start < 0:
start = self.count + start
for key2, stats_list in self.stats.items():
assert len(stats_list) == self.count, (len(stats_list), self.count)
# values: List[ReportValue]
values = stats_list[start:]
v = aggregate(values)
summary_writer.add_scalar(f"{key2}", v, self.total_count)
def wandb_log(self, start: int = None):
import wandb
if start is None:
start = 0
if start < 0:
start = self.count + start
d = {}
for key2, stats_list in self.stats.items():
assert len(stats_list) == self.count, (len(stats_list), self.count)
# values: List[ReportValue]
values = stats_list[start:]
v = aggregate(values)
d[wandb_get_prefix(key2) + key2] = v
d["iteration"] = self.total_count
wandb.log(d)
def finished(self) -> None:
self._finished = True
@contextmanager
def measure_time(self, name: str):
start = time.perf_counter()
yield start
t = time.perf_counter() - start
self.register({name: t})
def measure_iter_time(self, iterable, name: str):
iterator = iter(iterable)
while True:
try:
start = time.perf_counter()
retval = next(iterator)
t = time.perf_counter() - start
self.register({name: t})
yield retval
except StopIteration:
break
class Reporter:
"""Reporter class.
Examples:
>>> reporter = Reporter()
>>> with reporter.observe('train') as sub_reporter:
... for batch in iterator:
... stats = dict(loss=0.2)
... sub_reporter.register(stats)
"""
def __init__(self, epoch: int = 0):
assert check_argument_types()
if epoch < 0:
raise ValueError(f"epoch must be 0 or more: {epoch}")
self.epoch = epoch
# stats: Dict[int, Dict[str, Dict[str, float]]]
# e.g. self.stats[epoch]['train']['loss']
self.stats = {}
def get_epoch(self) -> int:
return self.epoch
def set_epoch(self, epoch: int) -> None:
if epoch < 0:
raise ValueError(f"epoch must be 0 or more: {epoch}")
self.epoch = epoch
@contextmanager
def observe(self, key: str, epoch: int = None) -> ContextManager[SubReporter]:
sub_reporter = self.start_epoch(key, epoch)
yield sub_reporter
# Receive the stats from sub_reporter
self.finish_epoch(sub_reporter)
def start_epoch(self, key: str, epoch: int = None) -> SubReporter:
if epoch is not None:
if epoch < 0:
raise ValueError(f"epoch must be 0 or more: {epoch}")
self.epoch = epoch
if self.epoch - 1 not in self.stats or key not in self.stats[self.epoch - 1]:
# If the previous epoch doesn't exist for some reason,
# maybe due to bug, this case also indicates 0-count.
if self.epoch - 1 != 0:
warnings.warn(
f"The stats of the previous epoch={self.epoch - 1}"
f"doesn't exist."
)
total_count = 0
else:
total_count = self.stats[self.epoch - 1][key]["total_count"]
sub_reporter = SubReporter(key, self.epoch, total_count)
# Clear the stats for the next epoch if it exists
self.stats.pop(epoch, None)
return sub_reporter
def finish_epoch(self, sub_reporter: SubReporter) -> None:
if self.epoch != sub_reporter.epoch:
raise RuntimeError(
f"Don't change epoch during observation: "
f"{self.epoch} != {sub_reporter.epoch}"
)
# Calc mean of current stats and set it as previous epochs stats
stats = {}
for key2, values in sub_reporter.stats.items():
v = aggregate(values)
stats[key2] = v
stats["time"] = datetime.timedelta(
seconds=time.perf_counter() - sub_reporter.start_time
)
stats["total_count"] = sub_reporter.total_count
if V(torch.__version__) >= V("1.4.0"):
if torch.cuda.is_initialized():
stats["gpu_max_cached_mem_GB"] = (
torch.cuda.max_memory_reserved() / 2**30
)
else:
if torch.cuda.is_available() and torch.cuda.max_memory_cached() > 0:
stats["gpu_cached_mem_GB"] = torch.cuda.max_memory_cached() / 2**30
self.stats.setdefault(self.epoch, {})[sub_reporter.key] = stats
sub_reporter.finished()
def sort_epochs_and_values(
self, key: str, key2: str, mode: str
) -> List[Tuple[int, float]]:
"""Return the epoch which resulted the best value.
Example:
>>> val = reporter.sort_epochs_and_values('eval', 'loss', 'min')
>>> e_1best, v_1best = val[0]
>>> e_2best, v_2best = val[1]
"""
if mode not in ("min", "max"):
raise ValueError(f"mode must min or max: {mode}")
if not self.has(key, key2):
raise KeyError(f"{key}.{key2} is not found: {self.get_all_keys()}")
# iterate from the last epoch
values = [(e, self.stats[e][key][key2]) for e in self.stats]
if mode == "min":
values = sorted(values, key=lambda x: x[1])
else:
values = sorted(values, key=lambda x: -x[1])
return values
def sort_epochs(self, key: str, key2: str, mode: str) -> List[int]:
return [e for e, v in self.sort_epochs_and_values(key, key2, mode)]
def sort_values(self, key: str, key2: str, mode: str) -> List[float]:
return [v for e, v in self.sort_epochs_and_values(key, key2, mode)]
def get_best_epoch(self, key: str, key2: str, mode: str, nbest: int = 0) -> int:
return self.sort_epochs(key, key2, mode)[nbest]
def check_early_stopping(
self,
patience: int,
key1: str,
key2: str,
mode: str,
epoch: int = None,
logger=None,
) -> bool:
if logger is None:
logger = logging
if epoch is None:
epoch = self.get_epoch()
best_epoch = self.get_best_epoch(key1, key2, mode)
if epoch - best_epoch > patience:
logger.info(
f"[Early stopping] {key1}.{key2} has not been "
f"improved {epoch - best_epoch} epochs continuously. "
f"The training was stopped at {epoch}epoch"
)
return True
else:
return False
def has(self, key: str, key2: str, epoch: int = None) -> bool:
if epoch is None:
epoch = self.get_epoch()
return (
epoch in self.stats
and key in self.stats[epoch]
and key2 in self.stats[epoch][key]
)
def log_message(self, epoch: int = None) -> str:
if epoch is None:
epoch = self.get_epoch()
message = ""
for key, d in self.stats[epoch].items():
_message = ""
for key2, v in d.items():
if v is not None:
if len(_message) != 0:
_message += ", "
if isinstance(v, float):
if abs(v) > 1.0e3:
_message += f"{key2}={v:.3e}"
elif abs(v) > 1.0e-3:
_message += f"{key2}={v:.3f}"
else:
_message += f"{key2}={v:.3e}"
elif isinstance(v, datetime.timedelta):
_v = humanfriendly.format_timespan(v)
_message += f"{key2}={_v}"
else:
_message += f"{key2}={v}"
if len(_message) != 0:
if len(message) == 0:
message += f"{epoch}epoch results: "
else:
message += ", "
message += f"[{key}] {_message}"
return message
def get_value(self, key: str, key2: str, epoch: int = None):
if not self.has(key, key2):
raise KeyError(f"{key}.{key2} is not found in stats: {self.get_all_keys()}")
if epoch is None:
epoch = self.get_epoch()
return self.stats[epoch][key][key2]
def get_keys(self, epoch: int = None) -> Tuple[str, ...]:
"""Returns keys1 e.g. train,eval."""
if epoch is None:
epoch = self.get_epoch()
return tuple(self.stats[epoch])
def get_keys2(self, key: str, epoch: int = None) -> Tuple[str, ...]:
"""Returns keys2 e.g. loss,acc."""
if epoch is None:
epoch = self.get_epoch()
d = self.stats[epoch][key]
keys2 = tuple(k for k in d if k not in ("time", "total_count"))
return keys2
def get_all_keys(self, epoch: int = None) -> Tuple[Tuple[str, str], ...]:
if epoch is None:
epoch = self.get_epoch()
all_keys = []
for key in self.stats[epoch]:
for key2 in self.stats[epoch][key]:
all_keys.append((key, key2))
return tuple(all_keys)
def matplotlib_plot(self, output_dir: Union[str, Path]):
"""Plot stats using Matplotlib and save images."""
keys2 = set.union(*[set(self.get_keys2(k)) for k in self.get_keys()])
for key2 in keys2:
keys = [k for k in self.get_keys() if key2 in self.get_keys2(k)]
plt = self._plot_stats(keys, key2)
p = output_dir / f"{key2}.png"
p.parent.mkdir(parents=True, exist_ok=True)
plt.savefig(p)
def _plot_stats(self, keys: Sequence[str], key2: str):
assert check_argument_types()
# str is also Sequence[str]
if isinstance(keys, str):
raise TypeError(f"Input as [{keys}]")
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
plt.clf()
epochs = np.arange(1, self.get_epoch() + 1)
for key in keys:
y = [
self.stats[e][key][key2]
if e in self.stats
and key in self.stats[e]
and key2 in self.stats[e][key]
else np.nan
for e in epochs
]
assert len(epochs) == len(y), "Bug?"
plt.plot(epochs, y, label=key, marker="x")
plt.legend()
plt.title(f"{key2} vs epoch")
# Force integer tick for x-axis
plt.gca().get_xaxis().set_major_locator(ticker.MaxNLocator(integer=True))
plt.xlabel("epoch")
plt.ylabel(key2)
plt.grid()
return plt
def tensorboard_add_scalar(
self, summary_writer, epoch: int = None, key1: str = None
):
if epoch is None:
epoch = self.get_epoch()
total_count = self.stats[epoch]["train"]["total_count"]
if key1 == "train":
summary_writer.add_scalar("iter_epoch", epoch, total_count)
if key1 is not None:
key1_iterator = tuple([key1])
else:
key1_iterator = self.get_keys(epoch)
for key1 in key1_iterator:
for key2 in self.get_keys2(key1):
summary_writer.add_scalar(
f"{key2}", self.stats[epoch][key1][key2], total_count
)
def wandb_log(self, epoch: int = None):
import wandb
if epoch is None:
epoch = self.get_epoch()
d = {}
for key1 in self.get_keys(epoch):
for key2 in self.stats[epoch][key1]:
if key2 in ("time", "total_count"):
continue
key = f"{key1}_{key2}_epoch"
d[wandb_get_prefix(key) + key] = self.stats[epoch][key1][key2]
d["epoch"] = epoch
wandb.log(d)
def state_dict(self):
return {"stats": self.stats, "epoch": self.epoch}
def load_state_dict(self, state_dict: dict):
self.epoch = state_dict["epoch"]
self.stats = state_dict["stats"]
| 19,602 | 32.740103 | 88 | py |
espnet | espnet-master/espnet2/train/dataset.py | import collections
import copy
import functools
import logging
import numbers
import re
from abc import ABC, abstractmethod
from typing import Any, Callable, Collection, Dict, Mapping, Tuple, Union
import h5py
import humanfriendly
import kaldiio
import numpy as np
import torch
from torch.utils.data.dataset import Dataset
from typeguard import check_argument_types, check_return_type
from espnet2.fileio.npy_scp import NpyScpReader
from espnet2.fileio.rand_gen_dataset import (
FloatRandomGenerateDataset,
IntRandomGenerateDataset,
)
from espnet2.fileio.read_text import (
RandomTextReader,
load_num_sequence_text,
read_2columns_text,
read_label,
)
from espnet2.fileio.rttm import RttmReader
from espnet2.fileio.score_scp import SingingScoreReader
from espnet2.fileio.sound_scp import SoundScpReader
from espnet2.utils.sized_dict import SizedDict
class AdapterForSoundScpReader(collections.abc.Mapping):
def __init__(self, loader, dtype=None):
assert check_argument_types()
self.loader = loader
self.dtype = dtype
self.rate = None
def keys(self):
return self.loader.keys()
def __len__(self):
return len(self.loader)
def __iter__(self):
return iter(self.loader)
def __getitem__(self, key: str) -> np.ndarray:
retval = self.loader[key]
if isinstance(retval, tuple):
assert len(retval) == 2, len(retval)
if isinstance(retval[0], int) and isinstance(retval[1], np.ndarray):
# sound scp case
rate, array = retval
elif isinstance(retval[1], int) and isinstance(retval[0], np.ndarray):
# Extended ark format case
array, rate = retval
else:
raise RuntimeError(
f"Unexpected type: {type(retval[0])}, {type(retval[1])}"
)
if self.rate is not None and self.rate != rate:
raise RuntimeError(
f"Sampling rates are mismatched: {self.rate} != {rate}"
)
self.rate = rate
# Multichannel wave fie
# array: (NSample, Channel) or (Nsample)
if self.dtype is not None:
array = array.astype(self.dtype)
else:
# Normal ark case
assert isinstance(retval, np.ndarray), type(retval)
array = retval
if self.dtype is not None:
array = array.astype(self.dtype)
assert isinstance(array, np.ndarray), type(array)
return array
class H5FileWrapper:
def __init__(self, path: str):
self.path = path
self.h5_file = h5py.File(path, "r")
def __repr__(self) -> str:
return str(self.h5_file)
def __len__(self) -> int:
return len(self.h5_file)
def __iter__(self):
return iter(self.h5_file)
def __getitem__(self, key) -> np.ndarray:
value = self.h5_file[key]
return value[()]
class AdapterForSingingScoreScpReader(collections.abc.Mapping):
def __init__(self, loader):
assert check_argument_types()
self.loader = loader
def keys(self):
return self.loader.keys()
def __len__(self):
return len(self.loader)
def __iter__(self):
return iter(self.loader)
def __getitem__(self, key: str) -> np.ndarray:
retval = self.loader[key]
assert (
len(retval) == 3
and isinstance(retval["tempo"], int)
and isinstance(retval["note"], list)
)
tempo = retval["tempo"]
return tempo, retval["note"]
class AdapterForLabelScpReader(collections.abc.Mapping):
def __init__(self, loader):
assert check_argument_types()
self.loader = loader
def keys(self):
return self.loader.keys()
def __len__(self):
return len(self.loader)
def __iter__(self):
return iter(self.loader)
def __getitem__(self, key: str) -> np.ndarray:
retval = self.loader[key]
assert isinstance(retval, list)
seq_len = len(retval)
sample_time = np.zeros((seq_len, 2))
sample_label = []
for i in range(seq_len):
sample_time[i, 0] = np.float32(retval[i][0])
sample_time[i, 1] = np.float32(retval[i][1])
sample_label.append(retval[i][2])
assert isinstance(sample_time, np.ndarray) and isinstance(sample_label, list)
return sample_time, sample_label
def sound_loader(path, float_dtype=None, multi_columns=False):
# The file is as follows:
# utterance_id_A /some/where/a.wav
# utterance_id_B /some/where/a.flac
# NOTE(kamo): SoundScpReader doesn't support pipe-fashion
# like Kaldi e.g. "cat a.wav |".
# NOTE(kamo): The audio signal is normalized to [-1,1] range.
loader = SoundScpReader(
path, always_2d=False, dtype=float_dtype, multi_columns=multi_columns
)
# SoundScpReader.__getitem__() returns Tuple[int, ndarray],
# but ndarray is desired, so Adapter class is inserted here
return AdapterForSoundScpReader(loader)
def multi_columns_sound_loader(path, float_dtype=None):
return sound_loader(path, float_dtype, multi_columns=True)
def score_loader(path):
loader = SingingScoreReader(fname=path)
return AdapterForSingingScoreScpReader(loader)
def label_loader(path):
loader = read_label(path)
return AdapterForLabelScpReader(loader)
def kaldi_loader(path, float_dtype=None, max_cache_fd: int = 0):
loader = kaldiio.load_scp(path, max_cache_fd=max_cache_fd)
return AdapterForSoundScpReader(loader, float_dtype)
def rand_int_loader(filepath, loader_type):
# e.g. rand_int_3_10
try:
low, high = map(int, loader_type[len("rand_int_") :].split("_"))
except ValueError:
raise RuntimeError(f"e.g rand_int_3_10: but got {loader_type}")
return IntRandomGenerateDataset(filepath, low, high)
DATA_TYPES = {
"sound": dict(
func=sound_loader,
kwargs=["float_dtype"],
help="Audio format types which supported by sndfile wav, flac, etc."
"\n\n"
" utterance_id_a a.wav\n"
" utterance_id_b b.wav\n"
" ...",
),
"multi_columns_sound": dict(
func=multi_columns_sound_loader,
kwargs=["float_dtype"],
help="Enable multi columns wav.scp. "
"The following text file can be loaded as multi channels audio data"
"\n\n"
" utterance_id_a a.wav a2.wav\n"
" utterance_id_b b.wav b2.wav\n"
" ...",
),
"score": dict(
func=score_loader,
kwargs=[],
help="Return text as is. The text contains tempo and note info.\n"
"For each note, 'start' 'end' 'syllabel' 'midi' and 'phones' are included. "
"\n\n"
" utterance_id_A tempo_a start_1 end_1 syllable_1 midi_1 phones_1 ...\n"
" utterance_id_B tempo_b start_1 end_1 syllable_1 midi_1 phones_1 ...\n"
" ...",
),
"duration": dict(
func=label_loader,
kwargs=[],
help="Return text as is. The text must be converted to ndarray "
"by 'preprocess'."
"\n\n"
" utterance_id_A start_1 end_1 phone_1 start_2 end_2 phone_2 ...\n"
" utterance_id_B start_1 end_1 phone_1 start_2 end_2 phone_2 ...\n"
" ...",
),
"kaldi_ark": dict(
func=kaldi_loader,
kwargs=["max_cache_fd"],
help="Kaldi-ark file type."
"\n\n"
" utterance_id_A /some/where/a.ark:123\n"
" utterance_id_B /some/where/a.ark:456\n"
" ...",
),
"npy": dict(
func=NpyScpReader,
kwargs=[],
help="Npy file format."
"\n\n"
" utterance_id_A /some/where/a.npy\n"
" utterance_id_B /some/where/b.npy\n"
" ...",
),
"text_int": dict(
func=functools.partial(load_num_sequence_text, loader_type="text_int"),
kwargs=[],
help="A text file in which is written a sequence of interger numbers "
"separated by space."
"\n\n"
" utterance_id_A 12 0 1 3\n"
" utterance_id_B 3 3 1\n"
" ...",
),
"csv_int": dict(
func=functools.partial(load_num_sequence_text, loader_type="csv_int"),
kwargs=[],
help="A text file in which is written a sequence of interger numbers "
"separated by comma."
"\n\n"
" utterance_id_A 100,80\n"
" utterance_id_B 143,80\n"
" ...",
),
"text_float": dict(
func=functools.partial(load_num_sequence_text, loader_type="text_float"),
kwargs=[],
help="A text file in which is written a sequence of float numbers "
"separated by space."
"\n\n"
" utterance_id_A 12. 3.1 3.4 4.4\n"
" utterance_id_B 3. 3.12 1.1\n"
" ...",
),
"csv_float": dict(
func=functools.partial(load_num_sequence_text, loader_type="csv_float"),
kwargs=[],
help="A text file in which is written a sequence of float numbers "
"separated by comma."
"\n\n"
" utterance_id_A 12.,3.1,3.4,4.4\n"
" utterance_id_B 3.,3.12,1.1\n"
" ...",
),
"text": dict(
func=read_2columns_text,
kwargs=[],
help="Return text as is. The text must be converted to ndarray "
"by 'preprocess'."
"\n\n"
" utterance_id_A hello world\n"
" utterance_id_B foo bar\n"
" ...",
),
"random_text": dict(
func=RandomTextReader,
kwargs=[],
help="Return text as is. The text must be converted to ndarray "
"by 'preprocess'."
"\n\n"
" hello world\n"
" foo bar\n"
" ...",
),
"hdf5": dict(
func=H5FileWrapper,
kwargs=[],
help="A HDF5 file which contains arrays at the first level or the second level."
" >>> f = h5py.File('file.h5')\n"
" >>> array1 = f['utterance_id_A']\n"
" >>> array2 = f['utterance_id_B']\n",
),
"rand_float": dict(
func=FloatRandomGenerateDataset,
kwargs=[],
help="Generate random float-ndarray which has the given shapes "
"in the file."
"\n\n"
" utterance_id_A 3,4\n"
" utterance_id_B 10,4\n"
" ...",
),
"rand_int_\\d+_\\d+": dict(
func=rand_int_loader,
kwargs=["loader_type"],
help="e.g. 'rand_int_0_10'. Generate random int-ndarray which has the given "
"shapes in the path. "
"Give the lower and upper value by the file type. e.g. "
"rand_int_0_10 -> Generate integers from 0 to 10."
"\n\n"
" utterance_id_A 3,4\n"
" utterance_id_B 10,4\n"
" ...",
),
"rttm": dict(
func=RttmReader,
kwargs=[],
help="rttm file loader, currently support for speaker diarization"
"\n\n"
" SPEAKER file1 1 0 1023 <NA> <NA> spk1 <NA>"
" SPEAKER file1 2 4000 3023 <NA> <NA> spk2 <NA>"
" SPEAKER file1 3 500 4023 <NA> <NA> spk1 <NA>"
" END file1 <NA> 4023 <NA> <NA> <NA> <NA>"
" ...",
),
}
class AbsDataset(Dataset, ABC):
@abstractmethod
def has_name(self, name) -> bool:
raise NotImplementedError
@abstractmethod
def names(self) -> Tuple[str, ...]:
raise NotImplementedError
@abstractmethod
def __getitem__(self, uid) -> Tuple[Any, Dict[str, np.ndarray]]:
raise NotImplementedError
class ESPnetDataset(AbsDataset):
"""Pytorch Dataset class for ESPNet.
Examples:
>>> dataset = ESPnetDataset([('wav.scp', 'input', 'sound'),
... ('token_int', 'output', 'text_int')],
... )
... uttid, data = dataset['uttid']
{'input': per_utt_array, 'output': per_utt_array}
"""
def __init__(
self,
path_name_type_list: Collection[Tuple[str, str, str]],
preprocess: Callable[
[str, Dict[str, np.ndarray]], Dict[str, np.ndarray]
] = None,
float_dtype: str = "float32",
int_dtype: str = "long",
max_cache_size: Union[float, int, str] = 0.0,
max_cache_fd: int = 0,
):
assert check_argument_types()
if len(path_name_type_list) == 0:
raise ValueError(
'1 or more elements are required for "path_name_type_list"'
)
path_name_type_list = copy.deepcopy(path_name_type_list)
self.preprocess = preprocess
self.float_dtype = float_dtype
self.int_dtype = int_dtype
self.max_cache_fd = max_cache_fd
self.loader_dict = {}
self.debug_info = {}
for path, name, _type in path_name_type_list:
if name in self.loader_dict:
raise RuntimeError(f'"{name}" is duplicated for data-key')
loader = self._build_loader(path, _type)
self.loader_dict[name] = loader
self.debug_info[name] = path, _type
if len(self.loader_dict[name]) == 0:
raise RuntimeError(f"{path} has no samples")
# TODO(kamo): Should check consistency of each utt-keys?
if isinstance(max_cache_size, str):
max_cache_size = humanfriendly.parse_size(max_cache_size)
self.max_cache_size = max_cache_size
if max_cache_size > 0:
self.cache = SizedDict(shared=True)
else:
self.cache = None
def _build_loader(
self, path: str, loader_type: str
) -> Mapping[str, Union[np.ndarray, torch.Tensor, str, numbers.Number]]:
"""Helper function to instantiate Loader.
Args:
path: The file path
loader_type: loader_type. sound, npy, text_int, text_float, etc
"""
for key, dic in DATA_TYPES.items():
# e.g. loader_type="sound"
# -> return DATA_TYPES["sound"]["func"](path)
if re.match(key, loader_type):
kwargs = {}
for key2 in dic["kwargs"]:
if key2 == "loader_type":
kwargs["loader_type"] = loader_type
elif key2 == "float_dtype":
kwargs["float_dtype"] = self.float_dtype
elif key2 == "int_dtype":
kwargs["int_dtype"] = self.int_dtype
elif key2 == "max_cache_fd":
kwargs["max_cache_fd"] = self.max_cache_fd
else:
raise RuntimeError(f"Not implemented keyword argument: {key2}")
func = dic["func"]
try:
return func(path, **kwargs)
except Exception:
if hasattr(func, "__name__"):
name = func.__name__
else:
name = str(func)
logging.error(f"An error happened with {name}({path})")
raise
else:
raise RuntimeError(f"Not supported: loader_type={loader_type}")
def has_name(self, name) -> bool:
return name in self.loader_dict
def names(self) -> Tuple[str, ...]:
return tuple(self.loader_dict)
def __iter__(self):
return iter(next(iter(self.loader_dict.values())))
def __repr__(self):
_mes = self.__class__.__name__
_mes += "("
for name, (path, _type) in self.debug_info.items():
_mes += f'\n {name}: {{"path": "{path}", "type": "{_type}"}}'
_mes += f"\n preprocess: {self.preprocess})"
return _mes
def __getitem__(self, uid: Union[str, int]) -> Tuple[str, Dict[str, np.ndarray]]:
assert check_argument_types()
# Change integer-id to string-id
if isinstance(uid, int):
d = next(iter(self.loader_dict.values()))
uid = list(d)[uid]
if self.cache is not None and uid in self.cache:
data = self.cache[uid]
return uid, data
data = {}
# 1. Load data from each loaders
for name, loader in self.loader_dict.items():
try:
value = loader[uid]
if isinstance(value, (list)):
value = np.array(value)
if not isinstance(
value, (np.ndarray, torch.Tensor, str, numbers.Number, tuple)
):
raise TypeError(
(
"Must be ndarray, torch.Tensor, "
"str, Number or tuple: {}".format(type(value))
)
)
except Exception:
path, _type = self.debug_info[name]
logging.error(
f"Error happened with path={path}, type={_type}, id={uid}"
)
raise
# torch.Tensor is converted to ndarray
if isinstance(value, torch.Tensor):
value = value.numpy()
elif isinstance(value, numbers.Number):
value = np.array([value])
data[name] = value
# 2. [Option] Apply preprocessing
# e.g. espnet2.train.preprocessor:CommonPreprocessor
if self.preprocess is not None:
data = self.preprocess(uid, data)
# 3. Force data-precision
for name in data:
value = data[name]
if not isinstance(value, np.ndarray):
raise RuntimeError(
f"All values must be converted to np.ndarray object "
f'by preprocessing, but "{name}" is still {type(value)}.'
)
# Cast to desired type
if value.dtype.kind == "f":
value = value.astype(self.float_dtype)
elif value.dtype.kind == "i":
value = value.astype(self.int_dtype)
else:
raise NotImplementedError(f"Not supported dtype: {value.dtype}")
data[name] = value
if self.cache is not None and self.cache.size < self.max_cache_size:
self.cache[uid] = data
retval = uid, data
assert check_return_type(retval)
return retval
| 18,511 | 31.591549 | 88 | py |
espnet | espnet-master/espnet2/train/gan_trainer.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Trainer module for GAN-based training."""
import argparse
import dataclasses
import logging
import time
from contextlib import contextmanager
from typing import Dict, Iterable, List, Optional, Sequence, Tuple
import torch
from packaging.version import parse as V
from typeguard import check_argument_types
from espnet2.schedulers.abs_scheduler import AbsBatchStepScheduler, AbsScheduler
from espnet2.torch_utils.device_funcs import to_device
from espnet2.torch_utils.recursive_op import recursive_average
from espnet2.train.distributed_utils import DistributedOption
from espnet2.train.reporter import SubReporter
from espnet2.train.trainer import Trainer, TrainerOptions
from espnet2.utils.build_dataclass import build_dataclass
from espnet2.utils.types import str2bool
if torch.distributed.is_available():
from torch.distributed import ReduceOp
if V(torch.__version__) >= V("1.6.0"):
from torch.cuda.amp import GradScaler, autocast
else:
# Nothing to do if torch<1.6.0
@contextmanager
def autocast(enabled=True): # NOQA
yield
GradScaler = None
try:
import fairscale
except ImportError:
fairscale = None
@dataclasses.dataclass
class GANTrainerOptions(TrainerOptions):
"""Trainer option dataclass for GANTrainer."""
generator_first: bool
class GANTrainer(Trainer):
"""Trainer for GAN-based training.
If you'd like to use this trainer, the model must inherit
espnet.train.abs_gan_espnet_model.AbsGANESPnetModel.
"""
@classmethod
def build_options(cls, args: argparse.Namespace) -> TrainerOptions:
"""Build options consumed by train(), eval(), and plot_attention()."""
assert check_argument_types()
return build_dataclass(GANTrainerOptions, args)
@classmethod
def add_arguments(cls, parser: argparse.ArgumentParser):
"""Add additional arguments for GAN-trainer."""
parser.add_argument(
"--generator_first",
type=str2bool,
default=False,
help="Whether to update generator first.",
)
@classmethod
def train_one_epoch(
cls,
model: torch.nn.Module,
iterator: Iterable[Tuple[List[str], Dict[str, torch.Tensor]]],
optimizers: Sequence[torch.optim.Optimizer],
schedulers: Sequence[Optional[AbsScheduler]],
scaler: Optional[GradScaler],
reporter: SubReporter,
summary_writer,
options: GANTrainerOptions,
distributed_option: DistributedOption,
) -> bool:
"""Train one epoch."""
assert check_argument_types()
grad_noise = options.grad_noise
accum_grad = options.accum_grad
grad_clip = options.grad_clip
grad_clip_type = options.grad_clip_type
log_interval = options.log_interval
no_forward_run = options.no_forward_run
ngpu = options.ngpu
use_wandb = options.use_wandb
generator_first = options.generator_first
distributed = distributed_option.distributed
# Check unavailable options
# TODO(kan-bayashi): Support the use of these options
if accum_grad > 1:
raise NotImplementedError(
"accum_grad > 1 is not supported in GAN-based training."
)
if grad_noise:
raise NotImplementedError(
"grad_noise is not supported in GAN-based training."
)
if log_interval is None:
try:
log_interval = max(len(iterator) // 20, 10)
except TypeError:
log_interval = 100
model.train()
all_steps_are_invalid = True
# [For distributed] Because iteration counts are not always equals between
# processes, send stop-flag to the other processes if iterator is finished
iterator_stop = torch.tensor(0).to("cuda" if ngpu > 0 else "cpu")
start_time = time.perf_counter()
for iiter, (_, batch) in enumerate(
reporter.measure_iter_time(iterator, "iter_time"), 1
):
assert isinstance(batch, dict), type(batch)
if distributed:
torch.distributed.all_reduce(iterator_stop, ReduceOp.SUM)
if iterator_stop > 0:
break
batch = to_device(batch, "cuda" if ngpu > 0 else "cpu")
if no_forward_run:
all_steps_are_invalid = False
continue
turn_start_time = time.perf_counter()
if generator_first:
turns = ["generator", "discriminator"]
else:
turns = ["discriminator", "generator"]
for turn in turns:
with autocast(scaler is not None):
with reporter.measure_time(f"{turn}_forward_time"):
retval = model(forward_generator=turn == "generator", **batch)
# Note(kamo):
# Supporting two patterns for the returned value from the model
# a. dict type
if isinstance(retval, dict):
loss = retval["loss"]
stats = retval["stats"]
weight = retval["weight"]
optim_idx = retval.get("optim_idx")
if optim_idx is not None and not isinstance(optim_idx, int):
if not isinstance(optim_idx, torch.Tensor):
raise RuntimeError(
"optim_idx must be int or 1dim torch.Tensor, "
f"but got {type(optim_idx)}"
)
if optim_idx.dim() >= 2:
raise RuntimeError(
"optim_idx must be int or 1dim torch.Tensor, "
f"but got {optim_idx.dim()}dim tensor"
)
if optim_idx.dim() == 1:
for v in optim_idx:
if v != optim_idx[0]:
raise RuntimeError(
"optim_idx must be 1dim tensor "
"having same values for all entries"
)
optim_idx = optim_idx[0].item()
else:
optim_idx = optim_idx.item()
# b. tuple or list type
else:
raise RuntimeError("model output must be dict.")
stats = {k: v for k, v in stats.items() if v is not None}
if ngpu > 1 or distributed:
# Apply weighted averaging for loss and stats
loss = (loss * weight.type(loss.dtype)).sum()
# if distributed, this method can also apply all_reduce()
stats, weight = recursive_average(stats, weight, distributed)
# Now weight is summation over all workers
loss /= weight
if distributed:
# NOTE(kamo): Multiply world_size since DistributedDataParallel
# automatically normalizes the gradient by world_size.
loss *= torch.distributed.get_world_size()
reporter.register(stats, weight)
with reporter.measure_time(f"{turn}_backward_time"):
if scaler is not None:
# Scales loss. Calls backward() on scaled loss
# to create scaled gradients.
# Backward passes under autocast are not recommended.
# Backward ops run in the same dtype autocast chose
# for corresponding forward ops.
scaler.scale(loss).backward()
else:
loss.backward()
if scaler is not None:
# Unscales the gradients of optimizer's assigned params in-place
for iopt, optimizer in enumerate(optimizers):
if optim_idx is not None and iopt != optim_idx:
continue
scaler.unscale_(optimizer)
# TODO(kan-bayashi): Compute grad norm without clipping
grad_norm = None
if grad_clip > 0.0:
# compute the gradient norm to check if it is normal or not
grad_norm = torch.nn.utils.clip_grad_norm_(
model.parameters(),
max_norm=grad_clip,
norm_type=grad_clip_type,
)
# PyTorch<=1.4, clip_grad_norm_ returns float value
if not isinstance(grad_norm, torch.Tensor):
grad_norm = torch.tensor(grad_norm)
if grad_norm is None or torch.isfinite(grad_norm):
all_steps_are_invalid = False
with reporter.measure_time(f"{turn}_optim_step_time"):
for iopt, (optimizer, scheduler) in enumerate(
zip(optimizers, schedulers)
):
if optim_idx is not None and iopt != optim_idx:
continue
if scaler is not None:
# scaler.step() first unscales the gradients of
# the optimizer's assigned params.
scaler.step(optimizer)
# Updates the scale for next iteration.
scaler.update()
else:
optimizer.step()
if isinstance(scheduler, AbsBatchStepScheduler):
scheduler.step()
else:
logging.warning(
f"The grad norm is {grad_norm}. " "Skipping updating the model."
)
# Must invoke scaler.update() if unscale_() is used in the
# iteration to avoid the following error:
# RuntimeError: unscale_() has already been called
# on this optimizer since the last update().
# Note that if the gradient has inf/nan values,
# scaler.step skips optimizer.step().
if scaler is not None:
for iopt, optimizer in enumerate(optimizers):
if optim_idx is not None and iopt != optim_idx:
continue
scaler.step(optimizer)
scaler.update()
for iopt, optimizer in enumerate(optimizers):
# NOTE(kan-bayashi): In the case of GAN, we need to clear
# the gradient of both optimizers after every update.
optimizer.zero_grad()
# Register lr and train/load time[sec/step],
# where step refers to accum_grad * mini-batch
reporter.register(
{
f"optim{optim_idx}_lr{i}": pg["lr"]
for i, pg in enumerate(optimizers[optim_idx].param_groups)
if "lr" in pg
},
)
reporter.register(
{f"{turn}_train_time": time.perf_counter() - turn_start_time}
)
turn_start_time = time.perf_counter()
reporter.register({"train_time": time.perf_counter() - start_time})
start_time = time.perf_counter()
# NOTE(kamo): Call log_message() after next()
reporter.next()
if iiter % log_interval == 0:
logging.info(reporter.log_message(-log_interval))
if summary_writer is not None:
reporter.tensorboard_add_scalar(summary_writer, -log_interval)
if use_wandb:
reporter.wandb_log()
else:
if distributed:
iterator_stop.fill_(1)
torch.distributed.all_reduce(iterator_stop, ReduceOp.SUM)
return all_steps_are_invalid
@classmethod
@torch.no_grad()
def validate_one_epoch(
cls,
model: torch.nn.Module,
iterator: Iterable[Dict[str, torch.Tensor]],
reporter: SubReporter,
options: GANTrainerOptions,
distributed_option: DistributedOption,
) -> None:
"""Validate one epoch."""
assert check_argument_types()
ngpu = options.ngpu
no_forward_run = options.no_forward_run
distributed = distributed_option.distributed
generator_first = options.generator_first
model.eval()
# [For distributed] Because iteration counts are not always equals between
# processes, send stop-flag to the other processes if iterator is finished
iterator_stop = torch.tensor(0).to("cuda" if ngpu > 0 else "cpu")
for _, batch in iterator:
assert isinstance(batch, dict), type(batch)
if distributed:
torch.distributed.all_reduce(iterator_stop, ReduceOp.SUM)
if iterator_stop > 0:
break
batch = to_device(batch, "cuda" if ngpu > 0 else "cpu")
if no_forward_run:
continue
if generator_first:
turns = ["generator", "discriminator"]
else:
turns = ["discriminator", "generator"]
for turn in turns:
retval = model(forward_generator=turn == "generator", **batch)
if isinstance(retval, dict):
stats = retval["stats"]
weight = retval["weight"]
else:
_, stats, weight = retval
if ngpu > 1 or distributed:
# Apply weighted averaging for stats.
# if distributed, this method can also apply all_reduce()
stats, weight = recursive_average(stats, weight, distributed)
reporter.register(stats, weight)
reporter.next()
else:
if distributed:
iterator_stop.fill_(1)
torch.distributed.all_reduce(iterator_stop, ReduceOp.SUM)
| 15,057 | 40.368132 | 88 | py |
espnet | espnet-master/espnet2/train/distributed_utils.py | import dataclasses
import os
import socket
from typing import Optional
import torch
import torch.distributed
@dataclasses.dataclass
class DistributedOption:
# Enable distributed Training
distributed: bool = False
# torch.distributed.Backend: "nccl", "mpi", "gloo", or "tcp"
dist_backend: str = "nccl"
# if init_method="env://",
# env values of "MASTER_PORT", "MASTER_ADDR", "WORLD_SIZE", and "RANK" are referred.
dist_init_method: str = "env://"
dist_world_size: Optional[int] = None
dist_rank: Optional[int] = None
local_rank: Optional[int] = None
ngpu: int = 0
dist_master_addr: Optional[str] = None
dist_master_port: Optional[int] = None
dist_launcher: Optional[str] = None
multiprocessing_distributed: bool = True
def init_options(self):
if self.distributed:
if self.dist_init_method == "env://":
if get_master_addr(self.dist_master_addr, self.dist_launcher) is None:
raise RuntimeError(
"--dist_master_addr or MASTER_ADDR must be set "
"if --dist_init_method == 'env://'"
)
if get_master_port(self.dist_master_port) is None:
raise RuntimeError(
"--dist_master_port or MASTER_PORT must be set "
"if --dist_init_port == 'env://'"
)
# About priority order:
# If --dist_* is specified:
# Use the value of --dist_rank and overwrite it environ just in case.
# elif environ is set:
# Use the value of environ and set it to self
self.dist_rank = get_rank(self.dist_rank, self.dist_launcher)
self.dist_world_size = get_world_size(
self.dist_world_size, self.dist_launcher
)
self.local_rank = get_local_rank(self.local_rank, self.dist_launcher)
if self.local_rank is not None:
if self.ngpu > 1:
raise RuntimeError(f"Assuming 1GPU in this case: ngpu={self.ngpu}")
if "CUDA_VISIBLE_DEVICES" in os.environ:
cvd = os.environ["CUDA_VISIBLE_DEVICES"]
if self.local_rank >= len(cvd.split(",")):
raise RuntimeError(
f"LOCAL_RANK={self.local_rank} is bigger "
f"than the number of visible devices: {cvd}"
)
if (
self.dist_rank is not None
and self.dist_world_size is not None
and self.dist_rank >= self.dist_world_size
):
raise RuntimeError(
f"RANK >= WORLD_SIZE: {self.dist_rank} >= {self.dist_world_size}"
)
if self.dist_init_method == "env://":
self.dist_master_addr = get_master_addr(
self.dist_master_addr, self.dist_launcher
)
self.dist_master_port = get_master_port(self.dist_master_port)
if (
self.dist_master_addr is not None
and self.dist_master_port is not None
):
self.dist_init_method = (
f"tcp://{self.dist_master_addr}:{self.dist_master_port}"
)
def init_torch_distributed(self):
if self.distributed:
# See:
# https://docs.nvidia.com/deeplearning/sdk/nccl-developer-guide/docs/env.html
os.environ.setdefault("NCCL_DEBUG", "INFO")
# See:
# https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group
os.environ.setdefault("NCCL_BLOCKING_WAIT", "1")
torch.distributed.init_process_group(
backend=self.dist_backend,
init_method=self.dist_init_method,
world_size=self.dist_world_size,
rank=self.dist_rank,
)
# About distributed model:
# if self.local_rank is not None and ngpu == 1
# => Distributed with n-Process and n-GPU
# if self.local_rank is None and ngpu >= 1
# => Distributed with 1-Process and n-GPU
if self.local_rank is not None and self.ngpu > 0:
torch.cuda.set_device(self.local_rank)
def resolve_distributed_mode(args):
# Note that args.distributed is set by only this function.
# and ArgumentParser doesn't have such option
if args.multiprocessing_distributed:
num_nodes = get_num_nodes(args.dist_world_size, args.dist_launcher)
# a. multi-node
if num_nodes > 1:
args.distributed = True
# b. single-node and multi-gpu with multiprocessing_distributed mode
elif args.ngpu > 1:
args.distributed = True
# c. single-node and single-gpu
else:
args.distributed = False
if args.ngpu <= 1:
# Disable multiprocessing_distributed mode if 1process per node or cpu mode
args.multiprocessing_distributed = False
if args.ngpu == 1:
# If the number of GPUs equals to 1 with multiprocessing_distributed mode,
# LOCAL_RANK is always 0
args.local_rank = 0
if num_nodes > 1 and get_node_rank(args.dist_rank, args.dist_launcher) is None:
raise RuntimeError(
"--dist_rank or RANK must be set "
"if --multiprocessing_distributed == true"
)
# Note that RANK, LOCAL_RANK, and WORLD_SIZE is automatically set,
# so we don't need to check here
else:
# d. multiprocess and multi-gpu with external launcher
# e.g. torch.distributed.launch
if get_world_size(args.dist_world_size, args.dist_launcher) > 1:
args.distributed = True
# e. single-process
else:
args.distributed = False
if args.distributed and args.ngpu > 0:
if get_local_rank(args.local_rank, args.dist_launcher) is None:
raise RuntimeError(
"--local_rank or LOCAL_RANK must be set "
"if --multiprocessing_distributed == false"
)
if args.distributed:
if get_node_rank(args.dist_rank, args.dist_launcher) is None:
raise RuntimeError(
"--dist_rank or RANK must be set "
"if --multiprocessing_distributed == false"
)
if args.distributed and args.dist_launcher == "slurm" and not is_in_slurm_step():
raise RuntimeError("Launch by 'srun' command if --dist_launcher='slurm'")
def is_in_slurm_job() -> bool:
return "SLURM_PROCID" in os.environ and "SLURM_NTASKS" in os.environ
def is_in_slurm_step() -> bool:
return (
is_in_slurm_job()
and "SLURM_STEP_NUM_NODES" in os.environ
and "SLURM_STEP_NODELIST" in os.environ
)
def _int_or_none(x: Optional[str]) -> Optional[int]:
if x is None:
return x
return int(x)
def free_port():
"""Find free port using bind().
There are some interval between finding this port and using it
and the other process might catch the port by that time.
Thus it is not guaranteed that the port is really empty.
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.bind(("", 0))
return sock.getsockname()[1]
def get_rank(prior=None, launcher: str = None) -> Optional[int]:
if prior is None:
if launcher == "slurm":
if not is_in_slurm_step():
raise RuntimeError("This process seems not to be launched by 'srun'")
prior = os.environ["SLURM_PROCID"]
elif launcher == "mpi":
raise RuntimeError(
"launcher=mpi is used for 'multiprocessing-distributed' mode"
)
elif launcher is not None:
raise RuntimeError(f"launcher='{launcher}' is not supported")
if prior is not None:
return int(prior)
else:
# prior is None and RANK is None -> RANK = None
return _int_or_none(os.environ.get("RANK"))
def get_world_size(prior=None, launcher: str = None) -> int:
if prior is None:
if launcher == "slurm":
if not is_in_slurm_step():
raise RuntimeError("This process seems not to be launched by 'srun'")
prior = int(os.environ["SLURM_NTASKS"])
elif launcher == "mpi":
raise RuntimeError(
"launcher=mpi is used for 'multiprocessing-distributed' mode"
)
elif launcher is not None:
raise RuntimeError(f"launcher='{launcher}' is not supported")
if prior is not None:
return int(prior)
else:
# prior is None and WORLD_SIZE is None -> WORLD_SIZE = 1
return int(os.environ.get("WORLD_SIZE", "1"))
def get_local_rank(prior=None, launcher: str = None) -> Optional[int]:
# LOCAL_RANK is same as GPU device id
if prior is None:
if launcher == "slurm":
if not is_in_slurm_step():
raise RuntimeError("This process seems not to be launched by 'srun'")
prior = int(os.environ["SLURM_LOCALID"])
elif launcher == "mpi":
raise RuntimeError(
"launcher=mpi is used for 'multiprocessing-distributed' mode"
)
elif launcher is not None:
raise RuntimeError(f"launcher='{launcher}' is not supported")
if prior is not None:
return int(prior)
elif "LOCAL_RANK" in os.environ:
return int(os.environ["LOCAL_RANK"])
elif "CUDA_VISIBLE_DEVICES" in os.environ:
# There are two possibility:
# - "CUDA_VISIBLE_DEVICES" is set to multiple GPU ids. e.g. "0.1,2"
# => This intends to specify multiple devices to to be used exactly
# and local_rank information is possibly insufficient.
# - "CUDA_VISIBLE_DEVICES" is set to an id. e.g. "1"
# => This could be used for LOCAL_RANK
cvd = os.environ["CUDA_VISIBLE_DEVICES"].split(",")
if len(cvd) == 1 and "LOCAL_RANK" not in os.environ:
# If CUDA_VISIBLE_DEVICES is set and LOCAL_RANK is not set,
# then use it as LOCAL_RANK.
# Unset CUDA_VISIBLE_DEVICES
# because the other device must be visible to communicate
return int(os.environ.pop("CUDA_VISIBLE_DEVICES"))
else:
return None
else:
return None
def get_master_addr(prior=None, launcher: str = None) -> Optional[str]:
if prior is None:
if launcher == "slurm":
if not is_in_slurm_step():
raise RuntimeError("This process seems not to be launched by 'srun'")
# e.g nodelist = foo[1-10],bar[3-8] or foo4,bar[2-10]
nodelist = os.environ["SLURM_STEP_NODELIST"]
prior = nodelist.split(",")[0].split("-")[0].replace("[", "")
if prior is not None:
return str(prior)
else:
return os.environ.get("MASTER_ADDR")
def get_master_port(prior=None) -> Optional[int]:
if prior is not None:
return prior
else:
return _int_or_none(os.environ.get("MASTER_PORT"))
def get_node_rank(prior=None, launcher: str = None) -> Optional[int]:
"""Get Node Rank.
Use for "multiprocessing distributed" mode.
The initial RANK equals to the Node id in this case and
the real Rank is set as (nGPU * NodeID) + LOCAL_RANK in torch.distributed.
"""
if prior is not None:
return prior
elif launcher == "slurm":
if not is_in_slurm_step():
raise RuntimeError("This process seems not to be launched by 'srun'")
# Assume ntasks_per_node == 1
if os.environ["SLURM_STEP_NUM_NODES"] != os.environ["SLURM_NTASKS"]:
raise RuntimeError(
"Run with --ntasks_per_node=1 if mutliprocessing_distributed=true"
)
return int(os.environ["SLURM_NODEID"])
elif launcher == "mpi":
# Use mpi4py only for initialization and not using for communication
from mpi4py import MPI
comm = MPI.COMM_WORLD
# Assume ntasks_per_node == 1 (We can't check whether it is or not)
return comm.Get_rank()
elif launcher is not None:
raise RuntimeError(f"launcher='{launcher}' is not supported")
else:
return _int_or_none(os.environ.get("RANK"))
def get_num_nodes(prior=None, launcher: str = None) -> Optional[int]:
"""Get the number of nodes.
Use for "multiprocessing distributed" mode.
RANK equals to the Node id in this case and
the real Rank is set as (nGPU * NodeID) + LOCAL_RANK in torch.distributed.
"""
if prior is not None:
return prior
elif launcher == "slurm":
if not is_in_slurm_step():
raise RuntimeError("This process seems not to be launched by 'srun'")
# Assume ntasks_per_node == 1
if os.environ["SLURM_STEP_NUM_NODES"] != os.environ["SLURM_NTASKS"]:
raise RuntimeError(
"Run with --ntasks_per_node=1 if mutliprocessing_distributed=true"
)
return int(os.environ["SLURM_STEP_NUM_NODES"])
elif launcher == "mpi":
# Use mpi4py only for initialization and not using for communication
from mpi4py import MPI
comm = MPI.COMM_WORLD
# Assume ntasks_per_node == 1 (We can't check whether it is or not)
return comm.Get_size()
elif launcher is not None:
raise RuntimeError(f"launcher='{launcher}' is not supported")
else:
# prior is None -> NUM_NODES = 1
return int(os.environ.get("WORLD_SIZE", 1))
| 13,914 | 36.506739 | 99 | py |
espnet | espnet-master/espnet2/train/iterable_dataset.py | """Iterable dataset module."""
import copy
from io import StringIO
from pathlib import Path
from typing import Callable, Collection, Dict, Iterator, Tuple, Union
import kaldiio
import numpy as np
import soundfile
import torch
from torch.utils.data.dataset import IterableDataset
from typeguard import check_argument_types
from espnet2.train.dataset import ESPnetDataset
def load_kaldi(input):
retval = kaldiio.load_mat(input)
if isinstance(retval, tuple):
assert len(retval) == 2, len(retval)
if isinstance(retval[0], int) and isinstance(retval[1], np.ndarray):
# sound scp case
rate, array = retval
elif isinstance(retval[1], int) and isinstance(retval[0], np.ndarray):
# Extended ark format case
array, rate = retval
else:
raise RuntimeError(f"Unexpected type: {type(retval[0])}, {type(retval[1])}")
# Multichannel wave fie
# array: (NSample, Channel) or (Nsample)
else:
# Normal ark case
assert isinstance(retval, np.ndarray), type(retval)
array = retval
return array
DATA_TYPES = {
"sound": lambda x: soundfile.read(x)[0],
"kaldi_ark": load_kaldi,
"npy": np.load,
"text_int": lambda x: np.loadtxt(
StringIO(x), ndmin=1, dtype=np.long, delimiter=" "
),
"csv_int": lambda x: np.loadtxt(StringIO(x), ndmin=1, dtype=np.long, delimiter=","),
"text_float": lambda x: np.loadtxt(
StringIO(x), ndmin=1, dtype=np.float32, delimiter=" "
),
"csv_float": lambda x: np.loadtxt(
StringIO(x), ndmin=1, dtype=np.float32, delimiter=","
),
"text": lambda x: x,
}
class IterableESPnetDataset(IterableDataset):
"""Pytorch Dataset class for ESPNet.
Examples:
>>> dataset = IterableESPnetDataset([('wav.scp', 'input', 'sound'),
... ('token_int', 'output', 'text_int')],
... )
>>> for uid, data in dataset:
... data
{'input': per_utt_array, 'output': per_utt_array}
"""
def __init__(
self,
path_name_type_list: Collection[Tuple[str, str, str]],
preprocess: Callable[
[str, Dict[str, np.ndarray]], Dict[str, np.ndarray]
] = None,
float_dtype: str = "float32",
int_dtype: str = "long",
key_file: str = None,
):
assert check_argument_types()
if len(path_name_type_list) == 0:
raise ValueError(
'1 or more elements are required for "path_name_type_list"'
)
path_name_type_list = copy.deepcopy(path_name_type_list)
self.preprocess = preprocess
self.float_dtype = float_dtype
self.int_dtype = int_dtype
self.key_file = key_file
self.debug_info = {}
non_iterable_list = []
self.path_name_type_list = []
for path, name, _type in path_name_type_list:
if name in self.debug_info:
raise RuntimeError(f'"{name}" is duplicated for data-key')
self.debug_info[name] = path, _type
if _type not in DATA_TYPES:
non_iterable_list.append((path, name, _type))
else:
self.path_name_type_list.append((path, name, _type))
if len(non_iterable_list) != 0:
# Some types doesn't support iterable mode
self.non_iterable_dataset = ESPnetDataset(
path_name_type_list=non_iterable_list,
preprocess=preprocess,
float_dtype=float_dtype,
int_dtype=int_dtype,
)
else:
self.non_iterable_dataset = None
if Path(Path(path_name_type_list[0][0]).parent, "utt2category").exists():
self.apply_utt2category = True
else:
self.apply_utt2category = False
def has_name(self, name) -> bool:
return name in self.debug_info
def names(self) -> Tuple[str, ...]:
return tuple(self.debug_info)
def __repr__(self):
_mes = self.__class__.__name__
_mes += "("
for name, (path, _type) in self.debug_info.items():
_mes += f'\n {name}: {{"path": "{path}", "type": "{_type}"}}'
_mes += f"\n preprocess: {self.preprocess})"
return _mes
def __iter__(self) -> Iterator[Tuple[Union[str, int], Dict[str, np.ndarray]]]:
if self.key_file is not None:
uid_iter = (
line.rstrip().split(maxsplit=1)[0]
for line in open(self.key_file, encoding="utf-8")
)
elif len(self.path_name_type_list) != 0:
uid_iter = (
line.rstrip().split(maxsplit=1)[0]
for line in open(self.path_name_type_list[0][0], encoding="utf-8")
)
else:
uid_iter = iter(self.non_iterable_dataset)
files = [open(lis[0], encoding="utf-8") for lis in self.path_name_type_list]
worker_info = torch.utils.data.get_worker_info()
linenum = 0
count = 0
for count, uid in enumerate(uid_iter, 1):
# If num_workers>=1, split keys
if worker_info is not None:
if (count - 1) % worker_info.num_workers != worker_info.id:
continue
# 1. Read a line from each file
while True:
keys = []
values = []
for f in files:
linenum += 1
try:
line = next(f)
except StopIteration:
raise RuntimeError(f"{uid} is not found in the files")
sps = line.rstrip().split(maxsplit=1)
if len(sps) != 2:
raise RuntimeError(
f"This line doesn't include a space:"
f" {f}:L{linenum}: {line})"
)
key, value = sps
keys.append(key)
values.append(value)
for k_idx, k in enumerate(keys):
if k != keys[0]:
raise RuntimeError(
f"Keys are mismatched. Text files (idx={k_idx}) is "
f"not sorted or not having same keys at L{linenum}"
)
# If the key is matched, break the loop
if len(keys) == 0 or keys[0] == uid:
break
# 2. Load the entry from each line and create a dict
data = {}
# 2.a. Load data streamingly
for value, (path, name, _type) in zip(values, self.path_name_type_list):
func = DATA_TYPES[_type]
# Load entry
array = func(value)
data[name] = array
if self.non_iterable_dataset is not None:
# 2.b. Load data from non-iterable dataset
_, from_non_iterable = self.non_iterable_dataset[uid]
data.update(from_non_iterable)
# 3. [Option] Apply preprocessing
# e.g. espnet2.train.preprocessor:CommonPreprocessor
if self.preprocess is not None:
data = self.preprocess(uid, data)
# 4. Force data-precision
for name in data:
value = data[name]
if not isinstance(value, np.ndarray):
raise RuntimeError(
f"All values must be converted to np.ndarray object "
f'by preprocessing, but "{name}" is still {type(value)}.'
)
# Cast to desired type
if value.dtype.kind == "f":
value = value.astype(self.float_dtype)
elif value.dtype.kind == "i":
value = value.astype(self.int_dtype)
else:
raise NotImplementedError(f"Not supported dtype: {value.dtype}")
data[name] = value
yield uid, data
if count == 0:
raise RuntimeError("No iteration")
| 8,251 | 34.416309 | 88 | py |
espnet | espnet-master/espnet2/train/uasr_trainer.py | # Copyright 2022 Tomoki Hayashi
# 2022 Dongji Gao
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Trainer module for GAN-based UASR training."""
import argparse
import dataclasses
import logging
import math
import time
from contextlib import contextmanager
from typing import Dict, Iterable, List, Optional, Sequence, Tuple
import torch
from packaging.version import parse as V
from typeguard import check_argument_types
from espnet2.schedulers.abs_scheduler import AbsBatchStepScheduler, AbsScheduler
from espnet2.torch_utils.device_funcs import to_device
from espnet2.torch_utils.recursive_op import recursive_average
from espnet2.train.distributed_utils import DistributedOption
from espnet2.train.reporter import SubReporter
from espnet2.train.trainer import Trainer, TrainerOptions
from espnet2.utils.build_dataclass import build_dataclass
from espnet2.utils.types import str2bool
if torch.distributed.is_available():
from torch.distributed import ReduceOp
if V(torch.__version__) >= V("1.6.0"):
from torch.cuda.amp import GradScaler, autocast
else:
# Nothing to do if torch<1.6.0
@contextmanager
def autocast(enabled=True): # NOQA
yield
GradScaler = None
try:
import fairscale
except ImportError:
fairscale = None
@dataclasses.dataclass
class UASRTrainerOptions(TrainerOptions):
"""Trainer option dataclass for UASRTrainer."""
generator_first: bool
max_num_warning: int
class UASRTrainer(Trainer):
"""Trainer for GAN-based UASR training.
If you'd like to use this trainer, the model must inherit
espnet.train.abs_gan_espnet_model.AbsGANESPnetModel.
"""
@classmethod
def build_options(cls, args: argparse.Namespace) -> TrainerOptions:
"""Build options consumed by train(), eval(), and plot_attention()."""
assert check_argument_types()
return build_dataclass(UASRTrainerOptions, args)
@classmethod
def add_arguments(cls, parser: argparse.ArgumentParser):
"""Add additional arguments for GAN-trainer."""
parser.add_argument(
"--generator_first",
type=str2bool,
default=False,
help="Whether to update generator first.",
)
parser.add_argument(
"--max_num_warning",
type=int,
default=10,
help="Maximum number of warning shown",
)
@classmethod
def train_one_epoch(
cls,
model: torch.nn.Module,
iterator: Iterable[Tuple[List[str], Dict[str, torch.Tensor]]],
optimizers: Sequence[torch.optim.Optimizer],
schedulers: Sequence[Optional[AbsScheduler]],
scaler: Optional[GradScaler],
reporter: SubReporter,
summary_writer,
options: UASRTrainerOptions,
distributed_option: DistributedOption,
) -> bool:
"""Train one epoch for UASR."""
assert check_argument_types()
grad_noise = options.grad_noise
accum_grad = options.accum_grad
grad_clip = options.grad_clip
grad_clip_type = options.grad_clip_type
log_interval = options.log_interval
no_forward_run = options.no_forward_run
ngpu = options.ngpu
use_wandb = options.use_wandb
distributed = distributed_option.distributed
max_num_warning = options.max_num_warning
cur_num_warning = 0
hide_warning = False
# Check unavailable options
# TODO(kan-bayashi): Support the use of these options
if accum_grad > 1:
raise NotImplementedError(
"accum_grad > 1 is not supported in GAN-based training."
)
if grad_noise:
raise NotImplementedError(
"grad_noise is not supported in GAN-based training."
)
if log_interval is None:
try:
log_interval = max(len(iterator) // 20, 10)
except TypeError:
log_interval = 100
model.train()
all_steps_are_invalid = True
model.number_epochs = reporter.epoch
# [For distributed] Because iteration counts are not always equals between
# processes, send stop-flag to the other processes if iterator is finished
iterator_stop = torch.tensor(0).to("cuda" if ngpu > 0 else "cpu")
start_time = time.perf_counter()
for iiter, (_, batch) in enumerate(
reporter.measure_iter_time(iterator, "iter_time"), 1
):
assert isinstance(batch, dict), type(batch)
if distributed:
torch.distributed.all_reduce(iterator_stop, ReduceOp.SUM)
if iterator_stop > 0:
break
batch = to_device(batch, "cuda" if ngpu > 0 else "cpu")
if no_forward_run:
all_steps_are_invalid = False
continue
turn_start_time = time.perf_counter()
model.number_updates = iiter - 1
is_discriminative_step = model.is_discriminative_step()
optim_idx = model.get_optim_index()
if is_discriminative_step:
turns = ["discriminator"]
else:
turns = ["generator"]
for turn in turns:
with autocast(scaler is not None):
with reporter.measure_time(f"{turn}_forward_time"):
retval = model(**batch)
# Note(jiatong):
# Supporting only one patterns
# for the returned value from the model
# must be tuple or list type
if not (isinstance(retval, list) or isinstance(retval, tuple)):
raise RuntimeError("model output must be tuple or list.")
else:
loss, stats, weight, _ = retval
stats = {k: v for k, v in stats.items() if v is not None}
if ngpu > 1 or distributed:
# Apply weighted averaging for loss and stats
loss = (loss * weight.type(loss.dtype)).sum()
# if distributed, this method can also apply all_reduce()
stats, weight = recursive_average(stats, weight, distributed)
# Now weight is summation over all workers
loss /= weight
if distributed:
# NOTE(kamo): Multiply world_size since DistributedDataParallel
# automatically normalizes the gradient by world_size.
loss *= torch.distributed.get_world_size()
reporter.register(stats, weight)
with reporter.measure_time(f"{turn}_backward_time"):
if scaler is not None:
# Scales loss. Calls backward() on scaled loss
# to create scaled gradients.
# Backward passes under autocast are not recommended.
# Backward ops run in the same dtype autocast chose
# for corresponding forward ops.
scaler.scale(loss).backward()
else:
loss.backward()
if scaler is not None:
# Unscales the gradients of optimizer's assigned params in-place
for iopt, optimizer in enumerate(optimizers):
if optim_idx is not None and iopt != optim_idx:
continue
scaler.unscale_(optimizer)
# TODO(kan-bayashi): Compute grad norm without clipping
grad_norm = None
if grad_clip > 0.0:
# compute the gradient norm to check if it is normal or not
grad_norm = torch.nn.utils.clip_grad_norm_(
model.parameters(),
max_norm=grad_clip,
norm_type=grad_clip_type,
)
# PyTorch<=1.4, clip_grad_norm_ returns float value
if not isinstance(grad_norm, torch.Tensor):
grad_norm = torch.tensor(grad_norm)
if grad_norm is None or torch.isfinite(grad_norm):
all_steps_are_invalid = False
with reporter.measure_time(f"{turn}_optim_step_time"):
for iopt, (optimizer, scheduler) in enumerate(
zip(optimizers, schedulers)
):
if optim_idx is not None and iopt != optim_idx:
continue
if scaler is not None:
# scaler.step() first unscales the gradients of
# the optimizer's assigned params.
scaler.step(optimizer)
# Updates the scale for next iteration.
scaler.update()
else:
optimizer.step()
if isinstance(scheduler, AbsBatchStepScheduler):
scheduler.step()
else:
cur_num_warning += 1
if cur_num_warning >= max_num_warning:
if not hide_warning:
logging.info("Warning info folded...")
hide_warning = True
if not hide_warning:
logging.warning(
f"The grad norm is {grad_norm}. "
"Skipping updating the model."
)
# Must invoke scaler.update() if unscale_() is used in the
# iteration to avoid the following error:
# RuntimeError: unscale_() has already been called
# on this optimizer since the last update().
# Note that if the gradient has inf/nan values,
# scaler.step skips optimizer.step().
if scaler is not None:
for iopt, optimizer in enumerate(optimizers):
if optim_idx is not None and iopt != optim_idx:
continue
scaler.step(optimizer)
scaler.update()
for iopt, optimizer in enumerate(optimizers):
# NOTE(kan-bayashi): In the case of GAN, we need to clear
# the gradient of both optimizers after every update.
optimizer.zero_grad()
# Register lr and train/load time[sec/step],
# where step refers to accum_grad * mini-batch
reporter.register(
{
f"optim{optim_idx}_lr{i}": pg["lr"]
for i, pg in enumerate(optimizers[optim_idx].param_groups)
if "lr" in pg
},
)
reporter.register(
{f"{turn}_train_time": time.perf_counter() - turn_start_time}
)
turn_start_time = time.perf_counter()
reporter.register({"train_time": time.perf_counter() - start_time})
start_time = time.perf_counter()
# NOTE(kamo): Call log_message() after next()
reporter.next()
if iiter % log_interval == 0:
logging.info(reporter.log_message(-log_interval))
if summary_writer is not None:
reporter.tensorboard_add_scalar(summary_writer, -log_interval)
if use_wandb:
reporter.wandb_log()
else:
if distributed:
iterator_stop.fill_(1)
torch.distributed.all_reduce(iterator_stop, ReduceOp.SUM)
if hide_warning:
logging.warning(
f"{cur_num_warning}/{iiter} iterations skipped due to inf/nan grad norm"
)
return all_steps_are_invalid
@classmethod
@torch.no_grad()
def validate_one_epoch(
cls,
model: torch.nn.Module,
iterator: Iterable[Dict[str, torch.Tensor]],
reporter: SubReporter,
options: UASRTrainerOptions,
distributed_option: DistributedOption,
) -> None:
"""Validate one epoch."""
assert check_argument_types()
ngpu = options.ngpu
no_forward_run = options.no_forward_run
distributed = distributed_option.distributed
vocab_seen_list = []
model.eval()
logging.info("Doing validation")
# [For distributed] Because iteration counts are not always equals between
# processes, send stop-flag to the other processes if iterator is finished
iterator_stop = torch.tensor(0).to("cuda" if ngpu > 0 else "cpu")
print_hyp = True
for _, batch in iterator:
assert isinstance(batch, dict), type(batch)
if distributed:
torch.distributed.all_reduce(iterator_stop, ReduceOp.SUM)
if iterator_stop > 0:
break
batch = to_device(batch, "cuda" if ngpu > 0 else "cpu")
if no_forward_run:
continue
retval = model(**batch, do_validation=True, print_hyp=print_hyp)
print_hyp = False
if not (isinstance(retval, list) or isinstance(retval, tuple)):
raise RuntimeError("model output must be tuple or list.")
else:
loss, stats, weight, vocab_seen = retval
vocab_seen_list.append(vocab_seen)
stats = {k: v for k, v in stats.items() if v is not None}
if ngpu > 1 or distributed:
# Apply weighted averaging for stats.
# if distributed, this method can also apply all_reduce()
stats, weight = recursive_average(stats, weight, distributed)
reporter.register(stats)
reporter.next()
else:
if distributed:
iterator_stop.fill_(1)
torch.distributed.all_reduce(iterator_stop, ReduceOp.SUM)
# compute phone error rate
total_num_errors = 0
total_num_ref_tokens = 0
assert (
"batch_num_errors" in reporter.stats
and "batch_num_ref_tokens" in reporter.stats
)
for batch_num_errors, batch_num_ref_tokens in zip(
reporter.stats["batch_num_errors"], reporter.stats["batch_num_ref_tokens"]
):
total_num_errors += batch_num_errors.value
total_num_ref_tokens += batch_num_ref_tokens.value
phone_error_rate = total_num_errors / total_num_ref_tokens
reporter.register({"phone_error_rate": phone_error_rate})
# compute lm_ppl
if model.kenlm:
assert (
"batch_lm_log_prob" in reporter.stats
and "batch_num_hyp_tokens" in reporter.stats
and "batch_size" in reporter.stats
)
assert (
len(reporter.stats["batch_lm_log_prob"])
== len(reporter.stats["batch_num_hyp_tokens"])
== len(reporter.stats["batch_size"])
)
total_lm_log_prob = 0
total_num_tokens = 0
total_num_sentences = 0
for log_prob, num_tokens, batch_size in zip(
reporter.stats["batch_lm_log_prob"],
reporter.stats["batch_num_hyp_tokens"],
reporter.stats["batch_size"],
):
total_lm_log_prob += log_prob.value
total_num_tokens += num_tokens.value
total_num_sentences += batch_size.value
lm_ppl = math.pow(
10, -total_lm_log_prob / (total_num_tokens + total_num_sentences)
)
vocab_seen = torch.stack(vocab_seen_list).sum(dim=0).bool().sum()
vocab_seen_rate = vocab_seen / model.vocab_size
assert vocab_seen_rate <= 1.0
weighted_lm_ppl = lm_ppl / vocab_seen_rate**2
reporter.register({"lm_ppl": lm_ppl})
reporter.register({"weighted_lm_ppl": weighted_lm_ppl})
reporter.next()
| 16,645 | 38.445498 | 88 | py |
espnet | espnet-master/espnet2/train/collate_fn.py | import math
from typing import Collection, Dict, List, Tuple, Union
import numpy as np
import torch
from typeguard import check_argument_types, check_return_type
from espnet.nets.pytorch_backend.nets_utils import pad_list
class CommonCollateFn:
"""Functor class of common_collate_fn()"""
def __init__(
self,
float_pad_value: Union[float, int] = 0.0,
int_pad_value: int = -32768,
not_sequence: Collection[str] = (),
):
assert check_argument_types()
self.float_pad_value = float_pad_value
self.int_pad_value = int_pad_value
self.not_sequence = set(not_sequence)
def __repr__(self):
return (
f"{self.__class__}(float_pad_value={self.float_pad_value}, "
f"int_pad_value={self.float_pad_value})"
)
def __call__(
self, data: Collection[Tuple[str, Dict[str, np.ndarray]]]
) -> Tuple[List[str], Dict[str, torch.Tensor]]:
return common_collate_fn(
data,
float_pad_value=self.float_pad_value,
int_pad_value=self.int_pad_value,
not_sequence=self.not_sequence,
)
class HuBERTCollateFn(CommonCollateFn):
"""Functor class of common_collate_fn()"""
def __init__(
self,
float_pad_value: Union[float, int] = 0.0,
int_pad_value: int = -32768,
label_downsampling: int = 1,
pad: bool = False,
rand_crop: bool = True,
crop_audio: bool = True,
not_sequence: Collection[str] = (),
):
assert check_argument_types()
super().__init__(
float_pad_value=float_pad_value,
int_pad_value=int_pad_value,
not_sequence=not_sequence,
)
self.float_pad_value = float_pad_value
self.int_pad_value = int_pad_value
self.label_downsampling = label_downsampling
self.pad = pad
self.rand_crop = rand_crop
self.crop_audio = crop_audio
self.not_sequence = set(not_sequence)
def __repr__(self):
return (
f"{self.__class__}(float_pad_value={self.float_pad_value}, "
f"int_pad_value={self.float_pad_value}, "
f"label_downsampling={self.label_downsampling}, "
f"pad_value={self.pad}, rand_crop={self.rand_crop}) "
)
def __call__(
self, data: Collection[Tuple[str, Dict[str, np.ndarray]]]
) -> Tuple[List[str], Dict[str, torch.Tensor]]:
assert "speech" in data[0][1]
assert "text" in data[0][1]
if self.pad:
num_frames = max([sample["speech"].shape[0] for uid, sample in data])
else:
num_frames = min([sample["speech"].shape[0] for uid, sample in data])
new_data = []
for uid, sample in data:
waveform, label = sample["speech"], sample["text"]
assert waveform.ndim == 1
length = waveform.size
# The MFCC feature is 10ms per frame, while the HuBERT's transformer output
# is 20ms per frame. Downsample the KMeans label if it's generated by MFCC
# features.
if self.label_downsampling > 1:
label = label[:: self.label_downsampling]
if self.crop_audio:
waveform, label, length = _crop_audio_label(
waveform, label, length, num_frames, self.rand_crop
)
new_data.append((uid, dict(speech=waveform, text=label)))
return common_collate_fn(
new_data,
float_pad_value=self.float_pad_value,
int_pad_value=self.int_pad_value,
not_sequence=self.not_sequence,
)
def _crop_audio_label(
waveform: torch.Tensor,
label: torch.Tensor,
length: torch.Tensor,
num_frames: int,
rand_crop: bool,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Collate the audio and label at the same time.
Args:
waveform (Tensor): The waveform Tensor with dimensions `(time)`.
label (Tensor): The label Tensor with dimensions `(seq)`.
length (Tensor): The length Tensor with dimension `(1,)`.
num_frames (int): The final length of the waveform.
rand_crop (bool): if ``rand_crop`` is True, the starting index of the
waveform and label is random if the length is longer than the minimum
length in the mini-batch.
Returns:
(Tuple(Tensor, Tensor, Tensor)): Returns the Tensors for the waveform,
label, and the waveform length.
"""
kernel_size = 25
stride = 20
sample_rate = 16 # 16 per millisecond
frame_offset = 0
if waveform.size > num_frames and rand_crop:
diff = waveform.size - num_frames
frame_offset = torch.randint(diff, size=(1,))
elif waveform.size < num_frames:
num_frames = waveform.size
label_offset = max(
math.floor((frame_offset - kernel_size * sample_rate) / (stride * sample_rate))
+ 1,
0,
)
num_label = (
math.floor((num_frames - kernel_size * sample_rate) / (stride * sample_rate))
+ 1
)
waveform = waveform[frame_offset : frame_offset + num_frames]
label = label[label_offset : label_offset + num_label]
length = num_frames
return waveform, label, length
def common_collate_fn(
data: Collection[Tuple[str, Dict[str, np.ndarray]]],
float_pad_value: Union[float, int] = 0.0,
int_pad_value: int = -32768,
not_sequence: Collection[str] = (),
) -> Tuple[List[str], Dict[str, torch.Tensor]]:
"""Concatenate ndarray-list to an array and convert to torch.Tensor.
Examples:
>>> from espnet2.samplers.constant_batch_sampler import ConstantBatchSampler,
>>> import espnet2.tasks.abs_task
>>> from espnet2.train.dataset import ESPnetDataset
>>> sampler = ConstantBatchSampler(...)
>>> dataset = ESPnetDataset(...)
>>> keys = next(iter(sampler)
>>> batch = [dataset[key] for key in keys]
>>> batch = common_collate_fn(batch)
>>> model(**batch)
Note that the dict-keys of batch are propagated from
that of the dataset as they are.
"""
assert check_argument_types()
uttids = [u for u, _ in data]
data = [d for _, d in data]
assert all(set(data[0]) == set(d) for d in data), "dict-keys mismatching"
assert all(
not k.endswith("_lengths") for k in data[0]
), f"*_lengths is reserved: {list(data[0])}"
output = {}
for key in data[0]:
# NOTE(kamo):
# Each models, which accepts these values finally, are responsible
# to repaint the pad_value to the desired value for each tasks.
if data[0][key].dtype.kind == "i":
pad_value = int_pad_value
else:
pad_value = float_pad_value
array_list = [d[key] for d in data]
# Assume the first axis is length:
# tensor_list: Batch x (Length, ...)
tensor_list = [torch.from_numpy(a) for a in array_list]
# tensor: (Batch, Length, ...)
tensor = pad_list(tensor_list, pad_value)
output[key] = tensor
# lens: (Batch,)
if key not in not_sequence:
lens = torch.tensor([d[key].shape[0] for d in data], dtype=torch.long)
output[key + "_lengths"] = lens
output = (uttids, output)
assert check_return_type(output)
return output
| 7,460 | 33.068493 | 87 | py |
espnet | espnet-master/espnet2/train/trainer.py | """Trainer module."""
import argparse
import dataclasses
import logging
import time
from contextlib import contextmanager
from dataclasses import is_dataclass
from pathlib import Path
from typing import Dict, Iterable, List, Optional, Sequence, Tuple, Union
import humanfriendly
import numpy as np
import torch
import torch.nn
import torch.optim
from packaging.version import parse as V
from typeguard import check_argument_types
from espnet2.iterators.abs_iter_factory import AbsIterFactory
from espnet2.main_funcs.average_nbest_models import average_nbest_models
from espnet2.main_funcs.calculate_all_attentions import calculate_all_attentions
from espnet2.schedulers.abs_scheduler import (
AbsBatchStepScheduler,
AbsEpochStepScheduler,
AbsScheduler,
AbsValEpochStepScheduler,
)
from espnet2.torch_utils.add_gradient_noise import add_gradient_noise
from espnet2.torch_utils.device_funcs import to_device
from espnet2.torch_utils.recursive_op import recursive_average
from espnet2.torch_utils.set_all_random_seed import set_all_random_seed
from espnet2.train.abs_espnet_model import AbsESPnetModel
from espnet2.train.distributed_utils import DistributedOption
from espnet2.train.reporter import Reporter, SubReporter
from espnet2.utils.build_dataclass import build_dataclass
from espnet2.utils.kwargs2args import kwargs2args
if torch.distributed.is_available():
from torch.distributed import ReduceOp
autocast_args = dict()
if V(torch.__version__) >= V("1.6.0"):
from torch.cuda.amp import GradScaler, autocast
if (
V(torch.__version__) >= V("1.10.0")
and torch.cuda.is_available()
and torch.cuda.is_bf16_supported()
):
autocast_args = dict(dtype=torch.bfloat16)
else:
# Nothing to do if torch<1.6.0
@contextmanager
def autocast(enabled=True):
yield
GradScaler = None
try:
import fairscale
except ImportError:
fairscale = None
@dataclasses.dataclass
class TrainerOptions:
ngpu: int
resume: bool
use_amp: bool
train_dtype: str
grad_noise: bool
accum_grad: int
grad_clip: float
grad_clip_type: float
log_interval: Optional[int]
no_forward_run: bool
use_matplotlib: bool
use_tensorboard: bool
use_wandb: bool
output_dir: Union[Path, str]
max_epoch: int
seed: int
sharded_ddp: bool
patience: Optional[int]
keep_nbest_models: Union[int, List[int]]
nbest_averaging_interval: int
early_stopping_criterion: Sequence[str]
best_model_criterion: Sequence[Sequence[str]]
val_scheduler_criterion: Sequence[str]
unused_parameters: bool
wandb_model_log_interval: int
create_graph_in_tensorboard: bool
class Trainer:
"""Trainer having a optimizer.
If you'd like to use multiple optimizers, then inherit this class
and override the methods if necessary - at least "train_one_epoch()"
>>> class TwoOptimizerTrainer(Trainer):
... @classmethod
... def add_arguments(cls, parser):
... ...
...
... @classmethod
... def train_one_epoch(cls, model, optimizers, ...):
... loss1 = model.model1(...)
... loss1.backward()
... optimizers[0].step()
...
... loss2 = model.model2(...)
... loss2.backward()
... optimizers[1].step()
"""
def __init__(self):
raise RuntimeError("This class can't be instantiated.")
@classmethod
def build_options(cls, args: argparse.Namespace) -> TrainerOptions:
"""Build options consumed by train(), eval(), and plot_attention()"""
assert check_argument_types()
return build_dataclass(TrainerOptions, args)
@classmethod
def add_arguments(cls, parser: argparse.ArgumentParser):
"""Reserved for future development of another Trainer"""
pass
@staticmethod
def resume(
checkpoint: Union[str, Path],
model: torch.nn.Module,
reporter: Reporter,
optimizers: Sequence[torch.optim.Optimizer],
schedulers: Sequence[Optional[AbsScheduler]],
scaler: Optional[GradScaler],
ngpu: int = 0,
):
states = torch.load(
checkpoint,
map_location=f"cuda:{torch.cuda.current_device()}" if ngpu > 0 else "cpu",
)
model.load_state_dict(states["model"])
reporter.load_state_dict(states["reporter"])
for optimizer, state in zip(optimizers, states["optimizers"]):
optimizer.load_state_dict(state)
for scheduler, state in zip(schedulers, states["schedulers"]):
if scheduler is not None:
scheduler.load_state_dict(state)
if scaler is not None:
if states["scaler"] is None:
logging.warning("scaler state is not found")
else:
scaler.load_state_dict(states["scaler"])
logging.info(f"The training was resumed using {checkpoint}")
@classmethod
def run(
cls,
model: AbsESPnetModel,
optimizers: Sequence[torch.optim.Optimizer],
schedulers: Sequence[Optional[AbsScheduler]],
train_iter_factory: AbsIterFactory,
valid_iter_factory: AbsIterFactory,
plot_attention_iter_factory: Optional[AbsIterFactory],
trainer_options,
distributed_option: DistributedOption,
) -> None:
"""Perform training. This method performs the main process of training."""
assert check_argument_types()
# NOTE(kamo): Don't check the type more strictly as far trainer_options
assert is_dataclass(trainer_options), type(trainer_options)
assert len(optimizers) == len(schedulers), (len(optimizers), len(schedulers))
if isinstance(trainer_options.keep_nbest_models, int):
keep_nbest_models = [trainer_options.keep_nbest_models]
else:
if len(trainer_options.keep_nbest_models) == 0:
logging.warning("No keep_nbest_models is given. Change to [1]")
trainer_options.keep_nbest_models = [1]
keep_nbest_models = trainer_options.keep_nbest_models
output_dir = Path(trainer_options.output_dir)
reporter = Reporter()
if trainer_options.use_amp:
if V(torch.__version__) < V("1.6.0"):
raise RuntimeError(
"Require torch>=1.6.0 for Automatic Mixed Precision"
)
if trainer_options.sharded_ddp:
if fairscale is None:
raise RuntimeError(
"Requiring fairscale. Do 'pip install fairscale'"
)
scaler = fairscale.optim.grad_scaler.ShardedGradScaler()
else:
scaler = GradScaler()
else:
scaler = None
if trainer_options.resume and (output_dir / "checkpoint.pth").exists():
cls.resume(
checkpoint=output_dir / "checkpoint.pth",
model=model,
optimizers=optimizers,
schedulers=schedulers,
reporter=reporter,
scaler=scaler,
ngpu=trainer_options.ngpu,
)
start_epoch = reporter.get_epoch() + 1
if start_epoch == trainer_options.max_epoch + 1:
logging.warning(
f"The training has already reached at max_epoch: {start_epoch}"
)
if distributed_option.distributed:
if trainer_options.sharded_ddp:
dp_model = fairscale.nn.data_parallel.ShardedDataParallel(
module=model,
sharded_optimizer=optimizers,
)
else:
dp_model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=(
# Perform multi-Process with multi-GPUs
[torch.cuda.current_device()]
if distributed_option.ngpu == 1
# Perform single-Process with multi-GPUs
else None
),
output_device=(
torch.cuda.current_device()
if distributed_option.ngpu == 1
else None
),
find_unused_parameters=trainer_options.unused_parameters,
)
elif distributed_option.ngpu > 1:
dp_model = torch.nn.parallel.DataParallel(
model,
device_ids=list(range(distributed_option.ngpu)),
)
else:
# NOTE(kamo): DataParallel also should work with ngpu=1,
# but for debuggability it's better to keep this block.
dp_model = model
if trainer_options.use_tensorboard and (
not distributed_option.distributed or distributed_option.dist_rank == 0
):
from torch.utils.tensorboard import SummaryWriter
train_summary_writer = SummaryWriter(
str(output_dir / "tensorboard" / "train")
)
valid_summary_writer = SummaryWriter(
str(output_dir / "tensorboard" / "valid")
)
else:
train_summary_writer = None
start_time = time.perf_counter()
for iepoch in range(start_epoch, trainer_options.max_epoch + 1):
if iepoch != start_epoch:
logging.info(
"{}/{}epoch started. Estimated time to finish: {}".format(
iepoch,
trainer_options.max_epoch,
humanfriendly.format_timespan(
(time.perf_counter() - start_time)
/ (iepoch - start_epoch)
* (trainer_options.max_epoch - iepoch + 1)
),
)
)
else:
logging.info(f"{iepoch}/{trainer_options.max_epoch}epoch started")
set_all_random_seed(trainer_options.seed + iepoch)
reporter.set_epoch(iepoch)
# 1. Train and validation for one-epoch
with reporter.observe("train") as sub_reporter:
all_steps_are_invalid = cls.train_one_epoch(
model=dp_model,
optimizers=optimizers,
schedulers=schedulers,
iterator=train_iter_factory.build_iter(iepoch),
reporter=sub_reporter,
scaler=scaler,
summary_writer=train_summary_writer,
options=trainer_options,
distributed_option=distributed_option,
)
with reporter.observe("valid") as sub_reporter:
cls.validate_one_epoch(
model=dp_model,
iterator=valid_iter_factory.build_iter(iepoch),
reporter=sub_reporter,
options=trainer_options,
distributed_option=distributed_option,
)
if not distributed_option.distributed or distributed_option.dist_rank == 0:
# att_plot doesn't support distributed
if plot_attention_iter_factory is not None:
with reporter.observe("att_plot") as sub_reporter:
cls.plot_attention(
model=model,
output_dir=output_dir / "att_ws",
summary_writer=train_summary_writer,
iterator=plot_attention_iter_factory.build_iter(iepoch),
reporter=sub_reporter,
options=trainer_options,
)
# 2. LR Scheduler step
for scheduler in schedulers:
if isinstance(scheduler, AbsValEpochStepScheduler):
scheduler.step(
reporter.get_value(*trainer_options.val_scheduler_criterion)
)
elif isinstance(scheduler, AbsEpochStepScheduler):
scheduler.step()
if trainer_options.sharded_ddp:
for optimizer in optimizers:
if isinstance(optimizer, fairscale.optim.oss.OSS):
optimizer.consolidate_state_dict()
if not distributed_option.distributed or distributed_option.dist_rank == 0:
# 3. Report the results
logging.info(reporter.log_message())
if trainer_options.use_matplotlib:
reporter.matplotlib_plot(output_dir / "images")
if train_summary_writer is not None:
reporter.tensorboard_add_scalar(train_summary_writer, key1="train")
reporter.tensorboard_add_scalar(valid_summary_writer, key1="valid")
if trainer_options.use_wandb:
reporter.wandb_log()
# 4. Save/Update the checkpoint
torch.save(
{
"model": model.state_dict(),
"reporter": reporter.state_dict(),
"optimizers": [o.state_dict() for o in optimizers],
"schedulers": [
s.state_dict() if s is not None else None
for s in schedulers
],
"scaler": scaler.state_dict() if scaler is not None else None,
},
output_dir / "checkpoint.pth",
)
# 5. Save and log the model and update the link to the best model
torch.save(model.state_dict(), output_dir / f"{iepoch}epoch.pth")
# Creates a sym link latest.pth -> {iepoch}epoch.pth
p = output_dir / "latest.pth"
if p.is_symlink() or p.exists():
p.unlink()
p.symlink_to(f"{iepoch}epoch.pth")
_improved = []
for _phase, k, _mode in trainer_options.best_model_criterion:
# e.g. _phase, k, _mode = "train", "loss", "min"
if reporter.has(_phase, k):
best_epoch = reporter.get_best_epoch(_phase, k, _mode)
# Creates sym links if it's the best result
if best_epoch == iepoch:
p = output_dir / f"{_phase}.{k}.best.pth"
if p.is_symlink() or p.exists():
p.unlink()
p.symlink_to(f"{iepoch}epoch.pth")
_improved.append(f"{_phase}.{k}")
if len(_improved) == 0:
logging.info("There are no improvements in this epoch")
else:
logging.info(
"The best model has been updated: " + ", ".join(_improved)
)
log_model = (
trainer_options.wandb_model_log_interval > 0
and iepoch % trainer_options.wandb_model_log_interval == 0
)
if log_model and trainer_options.use_wandb:
import wandb
logging.info("Logging Model on this epoch :::::")
artifact = wandb.Artifact(
name=f"model_{wandb.run.id}",
type="model",
metadata={"improved": _improved},
)
artifact.add_file(str(output_dir / f"{iepoch}epoch.pth"))
aliases = [
f"epoch-{iepoch}",
"best" if best_epoch == iepoch else "",
]
wandb.log_artifact(artifact, aliases=aliases)
# 6. Remove the model files excluding n-best epoch and latest epoch
_removed = []
# Get the union set of the n-best among multiple criterion
nbests = set().union(
*[
set(reporter.sort_epochs(ph, k, m)[: max(keep_nbest_models)])
for ph, k, m in trainer_options.best_model_criterion
if reporter.has(ph, k)
]
)
# Generated n-best averaged model
if (
trainer_options.nbest_averaging_interval > 0
and iepoch % trainer_options.nbest_averaging_interval == 0
):
average_nbest_models(
reporter=reporter,
output_dir=output_dir,
best_model_criterion=trainer_options.best_model_criterion,
nbest=keep_nbest_models,
suffix=f"till{iepoch}epoch",
)
for e in range(1, iepoch):
p = output_dir / f"{e}epoch.pth"
if p.exists() and e not in nbests:
p.unlink()
_removed.append(str(p))
if len(_removed) != 0:
logging.info("The model files were removed: " + ", ".join(_removed))
# 7. If any updating haven't happened, stops the training
if all_steps_are_invalid:
logging.warning(
"The gradients at all steps are invalid in this epoch. "
f"Something seems wrong. This training was stopped at {iepoch}epoch"
)
break
# 8. Check early stopping
if trainer_options.patience is not None:
if reporter.check_early_stopping(
trainer_options.patience, *trainer_options.early_stopping_criterion
):
break
else:
logging.info(
f"The training was finished at {trainer_options.max_epoch} epochs "
)
# Generated n-best averaged model
if not distributed_option.distributed or distributed_option.dist_rank == 0:
average_nbest_models(
reporter=reporter,
output_dir=output_dir,
best_model_criterion=trainer_options.best_model_criterion,
nbest=keep_nbest_models,
)
@classmethod
def train_one_epoch(
cls,
model: torch.nn.Module,
iterator: Iterable[Tuple[List[str], Dict[str, torch.Tensor]]],
optimizers: Sequence[torch.optim.Optimizer],
schedulers: Sequence[Optional[AbsScheduler]],
scaler: Optional[GradScaler],
reporter: SubReporter,
summary_writer,
options: TrainerOptions,
distributed_option: DistributedOption,
) -> bool:
assert check_argument_types()
grad_noise = options.grad_noise
accum_grad = options.accum_grad
grad_clip = options.grad_clip
grad_clip_type = options.grad_clip_type
log_interval = options.log_interval
no_forward_run = options.no_forward_run
ngpu = options.ngpu
use_wandb = options.use_wandb
create_graph_in_tensorboard = options.create_graph_in_tensorboard
distributed = distributed_option.distributed
if log_interval is None:
try:
log_interval = max(len(iterator) // 20, 10)
except TypeError:
log_interval = 100
model.train()
all_steps_are_invalid = True
# [For distributed] Because iteration counts are not always equals between
# processes, send stop-flag to the other processes if iterator is finished
iterator_stop = torch.tensor(0).to("cuda" if ngpu > 0 else "cpu")
start_time = time.perf_counter()
for iiter, (utt_id, batch) in enumerate(
reporter.measure_iter_time(iterator, "iter_time"), 1
):
assert isinstance(batch, dict), type(batch)
if distributed:
torch.distributed.all_reduce(iterator_stop, ReduceOp.SUM)
if iterator_stop > 0:
break
batch["utt_id"] = utt_id
batch = to_device(batch, "cuda" if ngpu > 0 else "cpu")
if no_forward_run:
all_steps_are_invalid = False
continue
if (
create_graph_in_tensorboard
and iiter == 1
and summary_writer is not None
):
if distributed:
_model = getattr(model, "module")
else:
_model = model
if _model is not None:
try:
_args = kwargs2args(_model.forward, batch)
except (ValueError, TypeError):
logging.warning(
"inpect.signature() is failed for the model. "
"The graph can't be added for tensorboard."
)
else:
try:
summary_writer.add_graph(
_model, _args, use_strict_trace=False
)
except Exception:
logging.warning(
"summary_writer.add_graph() "
"is failed for the model. "
"The graph can't be added for tensorboard."
)
del _args
else:
logging.warning(
"model.module is not found (This should be a bug.)"
)
del _model
with autocast(
scaler is not None,
**autocast_args,
):
with reporter.measure_time("forward_time"):
retval = model(**batch)
# Note(kamo):
# Supporting two patterns for the returned value from the model
# a. dict type
if isinstance(retval, dict):
loss = retval["loss"]
stats = retval["stats"]
weight = retval["weight"]
optim_idx = retval.get("optim_idx")
if optim_idx is not None and not isinstance(optim_idx, int):
if not isinstance(optim_idx, torch.Tensor):
raise RuntimeError(
"optim_idx must be int or 1dim torch.Tensor, "
f"but got {type(optim_idx)}"
)
if optim_idx.dim() >= 2:
raise RuntimeError(
"optim_idx must be int or 1dim torch.Tensor, "
f"but got {optim_idx.dim()}dim tensor"
)
if optim_idx.dim() == 1:
for v in optim_idx:
if v != optim_idx[0]:
raise RuntimeError(
"optim_idx must be 1dim tensor "
"having same values for all entries"
)
optim_idx = optim_idx[0].item()
else:
optim_idx = optim_idx.item()
# b. tuple or list type
else:
loss, stats, weight = retval
optim_idx = None
stats = {k: v for k, v in stats.items() if v is not None}
if ngpu > 1 or distributed:
# Apply weighted averaging for loss and stats
loss = (loss * weight.type(loss.dtype)).sum()
# if distributed, this method can also apply all_reduce()
stats, weight = recursive_average(stats, weight, distributed)
# Now weight is summation over all workers
loss /= weight
if distributed:
# NOTE(kamo): Multiply world_size because DistributedDataParallel
# automatically normalizes the gradient by world_size.
loss *= torch.distributed.get_world_size()
loss /= accum_grad
reporter.register(stats, weight)
with reporter.measure_time("backward_time"):
if scaler is not None:
# Scales loss. Calls backward() on scaled loss
# to create scaled gradients.
# Backward passes under autocast are not recommended.
# Backward ops run in the same dtype autocast chose
# for corresponding forward ops.
scaler.scale(loss).backward()
else:
loss.backward()
if iiter % accum_grad == 0:
if scaler is not None:
# Unscales the gradients of optimizer's assigned params in-place
for iopt, optimizer in enumerate(optimizers):
if optim_idx is not None and iopt != optim_idx:
continue
scaler.unscale_(optimizer)
# gradient noise injection
if grad_noise:
add_gradient_noise(
model,
reporter.get_total_count(),
duration=100,
eta=1.0,
scale_factor=0.55,
)
# compute the gradient norm to check if it is normal or not
grad_norm = torch.nn.utils.clip_grad_norm_(
model.parameters(),
max_norm=grad_clip,
norm_type=grad_clip_type,
)
# PyTorch<=1.4, clip_grad_norm_ returns float value
if not isinstance(grad_norm, torch.Tensor):
grad_norm = torch.tensor(grad_norm)
if not torch.isfinite(grad_norm):
logging.warning(
f"The grad norm is {grad_norm}. Skipping updating the model."
)
# Must invoke scaler.update() if unscale_() is used in the iteration
# to avoid the following error:
# RuntimeError: unscale_() has already been called
# on this optimizer since the last update().
# Note that if the gradient has inf/nan values,
# scaler.step skips optimizer.step().
if scaler is not None:
for iopt, optimizer in enumerate(optimizers):
if optim_idx is not None and iopt != optim_idx:
continue
scaler.step(optimizer)
scaler.update()
else:
reporter.register(
{
"grad_norm": grad_norm,
"clip": torch.where(
grad_norm > grad_clip,
grad_norm.new_tensor(100),
grad_norm.new_tensor(0),
),
"loss_scale": scaler.get_scale() if scaler else 1.0,
}
)
all_steps_are_invalid = False
with reporter.measure_time("optim_step_time"):
for iopt, (optimizer, scheduler) in enumerate(
zip(optimizers, schedulers)
):
if optim_idx is not None and iopt != optim_idx:
continue
if scaler is not None:
# scaler.step() first unscales the gradients of
# the optimizer's assigned params.
scaler.step(optimizer)
# Updates the scale for next iteration.
scaler.update()
else:
optimizer.step()
if isinstance(scheduler, AbsBatchStepScheduler):
scheduler.step()
for iopt, optimizer in enumerate(optimizers):
if optim_idx is not None and iopt != optim_idx:
continue
optimizer.zero_grad()
# Register lr and train/load time[sec/step],
# where step refers to accum_grad * mini-batch
reporter.register(
dict(
{
f"optim{i}_lr{j}": pg["lr"]
for i, optimizer in enumerate(optimizers)
for j, pg in enumerate(optimizer.param_groups)
if "lr" in pg
},
train_time=time.perf_counter() - start_time,
),
)
start_time = time.perf_counter()
# NOTE(kamo): Call log_message() after next()
reporter.next()
if iiter % log_interval == 0:
logging.info(reporter.log_message(-log_interval))
if summary_writer is not None:
reporter.tensorboard_add_scalar(summary_writer, -log_interval)
if use_wandb:
reporter.wandb_log()
else:
if distributed:
iterator_stop.fill_(1)
torch.distributed.all_reduce(iterator_stop, ReduceOp.SUM)
return all_steps_are_invalid
@classmethod
@torch.no_grad()
def validate_one_epoch(
cls,
model: torch.nn.Module,
iterator: Iterable[Dict[str, torch.Tensor]],
reporter: SubReporter,
options: TrainerOptions,
distributed_option: DistributedOption,
) -> None:
assert check_argument_types()
ngpu = options.ngpu
no_forward_run = options.no_forward_run
distributed = distributed_option.distributed
model.eval()
# [For distributed] Because iteration counts are not always equals between
# processes, send stop-flag to the other processes if iterator is finished
iterator_stop = torch.tensor(0).to("cuda" if ngpu > 0 else "cpu")
for utt_id, batch in iterator:
assert isinstance(batch, dict), type(batch)
if distributed:
torch.distributed.all_reduce(iterator_stop, ReduceOp.SUM)
if iterator_stop > 0:
break
batch["utt_id"] = utt_id
batch = to_device(batch, "cuda" if ngpu > 0 else "cpu")
if no_forward_run:
continue
retval = model(**batch)
if isinstance(retval, dict):
stats = retval["stats"]
weight = retval["weight"]
else:
_, stats, weight = retval
if ngpu > 1 or distributed:
# Apply weighted averaging for stats.
# if distributed, this method can also apply all_reduce()
stats, weight = recursive_average(stats, weight, distributed)
reporter.register(stats, weight)
reporter.next()
else:
if distributed:
iterator_stop.fill_(1)
torch.distributed.all_reduce(iterator_stop, ReduceOp.SUM)
@classmethod
@torch.no_grad()
def plot_attention(
cls,
model: torch.nn.Module,
output_dir: Optional[Path],
summary_writer,
iterator: Iterable[Tuple[List[str], Dict[str, torch.Tensor]]],
reporter: SubReporter,
options: TrainerOptions,
) -> None:
assert check_argument_types()
import matplotlib
ngpu = options.ngpu
no_forward_run = options.no_forward_run
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
model.eval()
for ids, batch in iterator:
assert isinstance(batch, dict), type(batch)
assert len(next(iter(batch.values()))) == len(ids), (
len(next(iter(batch.values()))),
len(ids),
)
batch["utt_id"] = ids
batch = to_device(batch, "cuda" if ngpu > 0 else "cpu")
if no_forward_run:
continue
# 1. Forwarding model and gathering all attentions
# calculate_all_attentions() uses single gpu only.
att_dict = calculate_all_attentions(model, batch)
# 2. Plot attentions: This part is slow due to matplotlib
for k, att_list in att_dict.items():
assert len(att_list) == len(ids), (len(att_list), len(ids))
for id_, att_w in zip(ids, att_list):
if isinstance(att_w, torch.Tensor):
att_w = att_w.detach().cpu().numpy()
if att_w.ndim == 2:
att_w = att_w[None]
elif att_w.ndim == 4:
# In multispkr_asr model case, the dimension could be 4.
att_w = np.concatenate(
[att_w[i] for i in range(att_w.shape[0])], axis=0
)
elif att_w.ndim > 4 or att_w.ndim == 1:
raise RuntimeError(f"Must be 2, 3 or 4 dimension: {att_w.ndim}")
w, h = plt.figaspect(1.0 / len(att_w))
fig = plt.Figure(figsize=(w * 1.3, h * 1.3))
axes = fig.subplots(1, len(att_w))
if len(att_w) == 1:
axes = [axes]
for ax, aw in zip(axes, att_w):
ax.imshow(aw.astype(np.float32), aspect="auto")
ax.set_title(f"{k}_{id_}")
ax.set_xlabel("Input")
ax.set_ylabel("Output")
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
if output_dir is not None:
p = output_dir / id_ / f"{k}.{reporter.get_epoch()}ep.png"
p.parent.mkdir(parents=True, exist_ok=True)
fig.savefig(p)
if summary_writer is not None:
summary_writer.add_figure(
f"{k}_{id_}", fig, reporter.get_epoch()
)
if options.use_wandb:
import wandb
wandb.log({f"attention plot/{k}_{id_}": wandb.Image(fig)})
reporter.next()
| 35,902 | 39.752554 | 88 | py |
espnet | espnet-master/espnet2/train/abs_espnet_model.py | from abc import ABC, abstractmethod
from typing import Dict, Tuple
import torch
class AbsESPnetModel(torch.nn.Module, ABC):
"""The common abstract class among each tasks
"ESPnetModel" is referred to a class which inherits torch.nn.Module,
and makes the dnn-models forward as its member field,
a.k.a delegate pattern,
and defines "loss", "stats", and "weight" for the task.
If you intend to implement new task in ESPNet,
the model must inherit this class.
In other words, the "mediator" objects between
our training system and the your task class are
just only these three values, loss, stats, and weight.
Example:
>>> from espnet2.tasks.abs_task import AbsTask
>>> class YourESPnetModel(AbsESPnetModel):
... def forward(self, input, input_lengths):
... ...
... return loss, stats, weight
>>> class YourTask(AbsTask):
... @classmethod
... def build_model(cls, args: argparse.Namespace) -> YourESPnetModel:
"""
@abstractmethod
def forward(
self, **batch: torch.Tensor
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
raise NotImplementedError
@abstractmethod
def collect_feats(self, **batch: torch.Tensor) -> Dict[str, torch.Tensor]:
raise NotImplementedError
| 1,366 | 32.341463 | 82 | py |
espnet | espnet-master/espnet2/samplers/abs_sampler.py | from abc import ABC, abstractmethod
from typing import Iterator, Tuple
from torch.utils.data import Sampler
class AbsSampler(Sampler, ABC):
@abstractmethod
def __len__(self) -> int:
raise NotImplementedError
@abstractmethod
def __iter__(self) -> Iterator[Tuple[str, ...]]:
raise NotImplementedError
def generate(self, seed):
return list(self)
| 392 | 20.833333 | 52 | py |
espnet | espnet-master/espnet2/asr/espnet_model.py | import logging
from contextlib import contextmanager
from typing import Dict, List, Optional, Tuple, Union
import torch
from packaging.version import parse as V
from typeguard import check_argument_types
from espnet2.asr.ctc import CTC
from espnet2.asr.decoder.abs_decoder import AbsDecoder
from espnet2.asr.encoder.abs_encoder import AbsEncoder
from espnet2.asr.frontend.abs_frontend import AbsFrontend
from espnet2.asr.postencoder.abs_postencoder import AbsPostEncoder
from espnet2.asr.preencoder.abs_preencoder import AbsPreEncoder
from espnet2.asr.specaug.abs_specaug import AbsSpecAug
from espnet2.asr.transducer.error_calculator import ErrorCalculatorTransducer
from espnet2.asr_transducer.utils import get_transducer_task_io
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet2.train.abs_espnet_model import AbsESPnetModel
from espnet.nets.e2e_asr_common import ErrorCalculator
from espnet.nets.pytorch_backend.nets_utils import th_accuracy
from espnet.nets.pytorch_backend.transformer.add_sos_eos import add_sos_eos
from espnet.nets.pytorch_backend.transformer.label_smoothing_loss import ( # noqa: H301
LabelSmoothingLoss,
)
if V(torch.__version__) >= V("1.6.0"):
from torch.cuda.amp import autocast
else:
# Nothing to do if torch<1.6.0
@contextmanager
def autocast(enabled=True):
yield
class ESPnetASRModel(AbsESPnetModel):
"""CTC-attention hybrid Encoder-Decoder model"""
def __init__(
self,
vocab_size: int,
token_list: Union[Tuple[str, ...], List[str]],
frontend: Optional[AbsFrontend],
specaug: Optional[AbsSpecAug],
normalize: Optional[AbsNormalize],
preencoder: Optional[AbsPreEncoder],
encoder: AbsEncoder,
postencoder: Optional[AbsPostEncoder],
decoder: Optional[AbsDecoder],
ctc: CTC,
joint_network: Optional[torch.nn.Module],
aux_ctc: dict = None,
ctc_weight: float = 0.5,
interctc_weight: float = 0.0,
ignore_id: int = -1,
lsm_weight: float = 0.0,
length_normalized_loss: bool = False,
report_cer: bool = True,
report_wer: bool = True,
sym_space: str = "<space>",
sym_blank: str = "<blank>",
transducer_multi_blank_durations: List = [],
transducer_multi_blank_sigma: float = 0.05,
# In a regular ESPnet recipe, <sos> and <eos> are both "<sos/eos>"
# Pretrained HF Tokenizer needs custom sym_sos and sym_eos
sym_sos: str = "<sos/eos>",
sym_eos: str = "<sos/eos>",
extract_feats_in_collect_stats: bool = True,
lang_token_id: int = -1,
):
assert check_argument_types()
assert 0.0 <= ctc_weight <= 1.0, ctc_weight
assert 0.0 <= interctc_weight < 1.0, interctc_weight
super().__init__()
# NOTE (Shih-Lun): else case is for OpenAI Whisper ASR model,
# which doesn't use <blank> token
if sym_blank in token_list:
self.blank_id = token_list.index(sym_blank)
else:
self.blank_id = 0
if sym_sos in token_list:
self.sos = token_list.index(sym_sos)
else:
self.sos = vocab_size - 1
if sym_eos in token_list:
self.eos = token_list.index(sym_eos)
else:
self.eos = vocab_size - 1
self.vocab_size = vocab_size
self.ignore_id = ignore_id
self.ctc_weight = ctc_weight
self.interctc_weight = interctc_weight
self.aux_ctc = aux_ctc
self.token_list = token_list.copy()
self.frontend = frontend
self.specaug = specaug
self.normalize = normalize
self.preencoder = preencoder
self.postencoder = postencoder
self.encoder = encoder
if not hasattr(self.encoder, "interctc_use_conditioning"):
self.encoder.interctc_use_conditioning = False
if self.encoder.interctc_use_conditioning:
self.encoder.conditioning_layer = torch.nn.Linear(
vocab_size, self.encoder.output_size()
)
self.use_transducer_decoder = joint_network is not None
self.error_calculator = None
if self.use_transducer_decoder:
self.decoder = decoder
self.joint_network = joint_network
if not transducer_multi_blank_durations:
from warprnnt_pytorch import RNNTLoss
self.criterion_transducer = RNNTLoss(
blank=self.blank_id,
fastemit_lambda=0.0,
)
else:
from espnet2.asr.transducer.rnnt_multi_blank.rnnt_multi_blank import (
MultiblankRNNTLossNumba,
)
self.criterion_transducer = MultiblankRNNTLossNumba(
blank=self.blank_id,
big_blank_durations=transducer_multi_blank_durations,
sigma=transducer_multi_blank_sigma,
reduction="mean",
fastemit_lambda=0.0,
)
self.transducer_multi_blank_durations = transducer_multi_blank_durations
if report_cer or report_wer:
self.error_calculator_trans = ErrorCalculatorTransducer(
decoder,
joint_network,
token_list,
sym_space,
sym_blank,
report_cer=report_cer,
report_wer=report_wer,
)
else:
self.error_calculator_trans = None
if self.ctc_weight != 0:
self.error_calculator = ErrorCalculator(
token_list, sym_space, sym_blank, report_cer, report_wer
)
else:
# we set self.decoder = None in the CTC mode since
# self.decoder parameters were never used and PyTorch complained
# and threw an Exception in the multi-GPU experiment.
# thanks Jeff Farris for pointing out the issue.
if ctc_weight < 1.0:
assert (
decoder is not None
), "decoder should not be None when attention is used"
else:
decoder = None
logging.warning("Set decoder to none as ctc_weight==1.0")
self.decoder = decoder
self.criterion_att = LabelSmoothingLoss(
size=vocab_size,
padding_idx=ignore_id,
smoothing=lsm_weight,
normalize_length=length_normalized_loss,
)
if report_cer or report_wer:
self.error_calculator = ErrorCalculator(
token_list, sym_space, sym_blank, report_cer, report_wer
)
if ctc_weight == 0.0:
self.ctc = None
else:
self.ctc = ctc
self.extract_feats_in_collect_stats = extract_feats_in_collect_stats
self.is_encoder_whisper = "Whisper" in type(self.encoder).__name__
if self.is_encoder_whisper:
assert (
self.frontend is None
), "frontend should be None when using full Whisper model"
if lang_token_id != -1:
self.lang_token_id = torch.tensor([[lang_token_id]])
else:
self.lang_token_id = None
def forward(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
text: torch.Tensor,
text_lengths: torch.Tensor,
**kwargs,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Frontend + Encoder + Decoder + Calc loss
Args:
speech: (Batch, Length, ...)
speech_lengths: (Batch, )
text: (Batch, Length)
text_lengths: (Batch,)
kwargs: "utt_id" is among the input.
"""
assert text_lengths.dim() == 1, text_lengths.shape
# Check that batch_size is unified
assert (
speech.shape[0]
== speech_lengths.shape[0]
== text.shape[0]
== text_lengths.shape[0]
), (speech.shape, speech_lengths.shape, text.shape, text_lengths.shape)
batch_size = speech.shape[0]
text[text == -1] = self.ignore_id
# for data-parallel
text = text[:, : text_lengths.max()]
# 1. Encoder
encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
intermediate_outs = None
if isinstance(encoder_out, tuple):
intermediate_outs = encoder_out[1]
encoder_out = encoder_out[0]
loss_att, acc_att, cer_att, wer_att = None, None, None, None
loss_ctc, cer_ctc = None, None
loss_transducer, cer_transducer, wer_transducer = None, None, None
stats = dict()
# 1. CTC branch
if self.ctc_weight != 0.0:
loss_ctc, cer_ctc = self._calc_ctc_loss(
encoder_out, encoder_out_lens, text, text_lengths
)
# Collect CTC branch stats
stats["loss_ctc"] = loss_ctc.detach() if loss_ctc is not None else None
stats["cer_ctc"] = cer_ctc
# Intermediate CTC (optional)
loss_interctc = 0.0
if self.interctc_weight != 0.0 and intermediate_outs is not None:
for layer_idx, intermediate_out in intermediate_outs:
# we assume intermediate_out has the same length & padding
# as those of encoder_out
# use auxillary ctc data if specified
loss_ic = None
if self.aux_ctc is not None:
idx_key = str(layer_idx)
if idx_key in self.aux_ctc:
aux_data_key = self.aux_ctc[idx_key]
aux_data_tensor = kwargs.get(aux_data_key, None)
aux_data_lengths = kwargs.get(aux_data_key + "_lengths", None)
if aux_data_tensor is not None and aux_data_lengths is not None:
loss_ic, cer_ic = self._calc_ctc_loss(
intermediate_out,
encoder_out_lens,
aux_data_tensor,
aux_data_lengths,
)
else:
raise Exception(
"Aux. CTC tasks were specified but no data was found"
)
if loss_ic is None:
loss_ic, cer_ic = self._calc_ctc_loss(
intermediate_out, encoder_out_lens, text, text_lengths
)
loss_interctc = loss_interctc + loss_ic
# Collect Intermedaite CTC stats
stats["loss_interctc_layer{}".format(layer_idx)] = (
loss_ic.detach() if loss_ic is not None else None
)
stats["cer_interctc_layer{}".format(layer_idx)] = cer_ic
loss_interctc = loss_interctc / len(intermediate_outs)
# calculate whole encoder loss
loss_ctc = (
1 - self.interctc_weight
) * loss_ctc + self.interctc_weight * loss_interctc
if self.use_transducer_decoder:
# 2a. Transducer decoder branch
(
loss_transducer,
cer_transducer,
wer_transducer,
) = self._calc_transducer_loss(
encoder_out,
encoder_out_lens,
text,
)
if loss_ctc is not None:
loss = loss_transducer + (self.ctc_weight * loss_ctc)
else:
loss = loss_transducer
# Collect Transducer branch stats
stats["loss_transducer"] = (
loss_transducer.detach() if loss_transducer is not None else None
)
stats["cer_transducer"] = cer_transducer
stats["wer_transducer"] = wer_transducer
else:
# 2b. Attention decoder branch
if self.ctc_weight != 1.0:
loss_att, acc_att, cer_att, wer_att = self._calc_att_loss(
encoder_out, encoder_out_lens, text, text_lengths
)
# 3. CTC-Att loss definition
if self.ctc_weight == 0.0:
loss = loss_att
elif self.ctc_weight == 1.0:
loss = loss_ctc
else:
loss = self.ctc_weight * loss_ctc + (1 - self.ctc_weight) * loss_att
# Collect Attn branch stats
stats["loss_att"] = loss_att.detach() if loss_att is not None else None
stats["acc"] = acc_att
stats["cer"] = cer_att
stats["wer"] = wer_att
# Collect total loss stats
stats["loss"] = loss.detach()
# force_gatherable: to-device and to-tensor if scalar for DataParallel
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
return loss, stats, weight
def collect_feats(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
text: torch.Tensor,
text_lengths: torch.Tensor,
**kwargs,
) -> Dict[str, torch.Tensor]:
feats, feats_lengths = self._extract_feats(speech, speech_lengths)
return {"feats": feats, "feats_lengths": feats_lengths}
def encode(
self, speech: torch.Tensor, speech_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Frontend + Encoder. Note that this method is used by asr_inference.py
Args:
speech: (Batch, Length, ...)
speech_lengths: (Batch, )
"""
with autocast(False):
# 1. Extract feats
feats, feats_lengths = self._extract_feats(speech, speech_lengths)
# 2. Data augmentation
if self.specaug is not None and self.training:
feats, feats_lengths = self.specaug(feats, feats_lengths)
# 3. Normalization for feature: e.g. Global-CMVN, Utterance-CMVN
if self.normalize is not None:
feats, feats_lengths = self.normalize(feats, feats_lengths)
# Pre-encoder, e.g. used for raw input data
if self.preencoder is not None:
feats, feats_lengths = self.preencoder(feats, feats_lengths)
# 4. Forward encoder
# feats: (Batch, Length, Dim)
# -> encoder_out: (Batch, Length2, Dim2)
if self.encoder.interctc_use_conditioning:
encoder_out, encoder_out_lens, _ = self.encoder(
feats, feats_lengths, ctc=self.ctc
)
else:
encoder_out, encoder_out_lens, _ = self.encoder(feats, feats_lengths)
intermediate_outs = None
if isinstance(encoder_out, tuple):
intermediate_outs = encoder_out[1]
encoder_out = encoder_out[0]
# Post-encoder, e.g. NLU
if self.postencoder is not None:
encoder_out, encoder_out_lens = self.postencoder(
encoder_out, encoder_out_lens
)
assert encoder_out.size(0) == speech.size(0), (
encoder_out.size(),
speech.size(0),
)
if (
getattr(self.encoder, "selfattention_layer_type", None) != "lf_selfattn"
and not self.is_encoder_whisper
):
assert encoder_out.size(-2) <= encoder_out_lens.max(), (
encoder_out.size(),
encoder_out_lens.max(),
)
if intermediate_outs is not None:
return (encoder_out, intermediate_outs), encoder_out_lens
return encoder_out, encoder_out_lens
def _extract_feats(
self, speech: torch.Tensor, speech_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
assert speech_lengths.dim() == 1, speech_lengths.shape
# for data-parallel
speech = speech[:, : speech_lengths.max()]
if self.frontend is not None:
# Frontend
# e.g. STFT and Feature extract
# data_loader may send time-domain signal in this case
# speech (Batch, NSamples) -> feats: (Batch, NFrames, Dim)
feats, feats_lengths = self.frontend(speech, speech_lengths)
else:
# No frontend and no feature extract
feats, feats_lengths = speech, speech_lengths
return feats, feats_lengths
def nll(
self,
encoder_out: torch.Tensor,
encoder_out_lens: torch.Tensor,
ys_pad: torch.Tensor,
ys_pad_lens: torch.Tensor,
) -> torch.Tensor:
"""Compute negative log likelihood(nll) from transformer-decoder
Normally, this function is called in batchify_nll.
Args:
encoder_out: (Batch, Length, Dim)
encoder_out_lens: (Batch,)
ys_pad: (Batch, Length)
ys_pad_lens: (Batch,)
"""
ys_in_pad, ys_out_pad = add_sos_eos(ys_pad, self.sos, self.eos, self.ignore_id)
ys_in_lens = ys_pad_lens + 1
# 1. Forward decoder
decoder_out, _ = self.decoder(
encoder_out, encoder_out_lens, ys_in_pad, ys_in_lens
) # [batch, seqlen, dim]
batch_size = decoder_out.size(0)
decoder_num_class = decoder_out.size(2)
# nll: negative log-likelihood
nll = torch.nn.functional.cross_entropy(
decoder_out.view(-1, decoder_num_class),
ys_out_pad.view(-1),
ignore_index=self.ignore_id,
reduction="none",
)
nll = nll.view(batch_size, -1)
nll = nll.sum(dim=1)
assert nll.size(0) == batch_size
return nll
def batchify_nll(
self,
encoder_out: torch.Tensor,
encoder_out_lens: torch.Tensor,
ys_pad: torch.Tensor,
ys_pad_lens: torch.Tensor,
batch_size: int = 100,
):
"""Compute negative log likelihood(nll) from transformer-decoder
To avoid OOM, this fuction seperate the input into batches.
Then call nll for each batch and combine and return results.
Args:
encoder_out: (Batch, Length, Dim)
encoder_out_lens: (Batch,)
ys_pad: (Batch, Length)
ys_pad_lens: (Batch,)
batch_size: int, samples each batch contain when computing nll,
you may change this to avoid OOM or increase
GPU memory usage
"""
total_num = encoder_out.size(0)
if total_num <= batch_size:
nll = self.nll(encoder_out, encoder_out_lens, ys_pad, ys_pad_lens)
else:
nll = []
start_idx = 0
while True:
end_idx = min(start_idx + batch_size, total_num)
batch_encoder_out = encoder_out[start_idx:end_idx, :, :]
batch_encoder_out_lens = encoder_out_lens[start_idx:end_idx]
batch_ys_pad = ys_pad[start_idx:end_idx, :]
batch_ys_pad_lens = ys_pad_lens[start_idx:end_idx]
batch_nll = self.nll(
batch_encoder_out,
batch_encoder_out_lens,
batch_ys_pad,
batch_ys_pad_lens,
)
nll.append(batch_nll)
start_idx = end_idx
if start_idx == total_num:
break
nll = torch.cat(nll)
assert nll.size(0) == total_num
return nll
def _calc_att_loss(
self,
encoder_out: torch.Tensor,
encoder_out_lens: torch.Tensor,
ys_pad: torch.Tensor,
ys_pad_lens: torch.Tensor,
):
if hasattr(self, "lang_token_id") and self.lang_token_id is not None:
ys_pad = torch.cat(
[
self.lang_token_id.repeat(ys_pad.size(0), 1).to(ys_pad.device),
ys_pad,
],
dim=1,
)
ys_pad_lens += 1
ys_in_pad, ys_out_pad = add_sos_eos(ys_pad, self.sos, self.eos, self.ignore_id)
ys_in_lens = ys_pad_lens + 1
# 1. Forward decoder
decoder_out, _ = self.decoder(
encoder_out, encoder_out_lens, ys_in_pad, ys_in_lens
)
# 2. Compute attention loss
loss_att = self.criterion_att(decoder_out, ys_out_pad)
acc_att = th_accuracy(
decoder_out.view(-1, self.vocab_size),
ys_out_pad,
ignore_label=self.ignore_id,
)
# Compute cer/wer using attention-decoder
if self.training or self.error_calculator is None:
cer_att, wer_att = None, None
else:
ys_hat = decoder_out.argmax(dim=-1)
cer_att, wer_att = self.error_calculator(ys_hat.cpu(), ys_pad.cpu())
return loss_att, acc_att, cer_att, wer_att
def _calc_ctc_loss(
self,
encoder_out: torch.Tensor,
encoder_out_lens: torch.Tensor,
ys_pad: torch.Tensor,
ys_pad_lens: torch.Tensor,
):
# Calc CTC loss
loss_ctc = self.ctc(encoder_out, encoder_out_lens, ys_pad, ys_pad_lens)
# Calc CER using CTC
cer_ctc = None
if not self.training and self.error_calculator is not None:
ys_hat = self.ctc.argmax(encoder_out).data
cer_ctc = self.error_calculator(ys_hat.cpu(), ys_pad.cpu(), is_ctc=True)
return loss_ctc, cer_ctc
def _calc_transducer_loss(
self,
encoder_out: torch.Tensor,
encoder_out_lens: torch.Tensor,
labels: torch.Tensor,
):
"""Compute Transducer loss.
Args:
encoder_out: Encoder output sequences. (B, T, D_enc)
encoder_out_lens: Encoder output sequences lengths. (B,)
labels: Label ID sequences. (B, L)
Return:
loss_transducer: Transducer loss value.
cer_transducer: Character error rate for Transducer.
wer_transducer: Word Error Rate for Transducer.
"""
decoder_in, target, t_len, u_len = get_transducer_task_io(
labels,
encoder_out_lens,
ignore_id=self.ignore_id,
blank_id=self.blank_id,
)
self.decoder.set_device(encoder_out.device)
decoder_out = self.decoder(decoder_in)
joint_out = self.joint_network(
encoder_out.unsqueeze(2), decoder_out.unsqueeze(1)
)
loss_transducer = self.criterion_transducer(
joint_out,
target,
t_len,
u_len,
)
cer_transducer, wer_transducer = None, None
if not self.training and self.error_calculator_trans is not None:
cer_transducer, wer_transducer = self.error_calculator_trans(
encoder_out, target
)
return loss_transducer, cer_transducer, wer_transducer
def _calc_batch_ctc_loss(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
text: torch.Tensor,
text_lengths: torch.Tensor,
):
if self.ctc is None:
return
assert text_lengths.dim() == 1, text_lengths.shape
# Check that batch_size is unified
assert (
speech.shape[0]
== speech_lengths.shape[0]
== text.shape[0]
== text_lengths.shape[0]
), (speech.shape, speech_lengths.shape, text.shape, text_lengths.shape)
# for data-parallel
text = text[:, : text_lengths.max()]
# 1. Encoder
encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
if isinstance(encoder_out, tuple):
encoder_out = encoder_out[0]
# Calc CTC loss
do_reduce = self.ctc.reduce
self.ctc.reduce = False
loss_ctc = self.ctc(encoder_out, encoder_out_lens, text, text_lengths)
self.ctc.reduce = do_reduce
return loss_ctc
| 24,373 | 35.216939 | 88 | py |
espnet | espnet-master/espnet2/asr/discrete_asr_espnet_model.py | from contextlib import contextmanager
from typing import Dict, List, Optional, Tuple, Union
import torch
from packaging.version import parse as V
from typeguard import check_argument_types
from espnet2.asr.ctc import CTC
from espnet2.asr.decoder.abs_decoder import AbsDecoder
from espnet2.asr.encoder.abs_encoder import AbsEncoder
from espnet2.asr.frontend.abs_frontend import AbsFrontend
from espnet2.asr.postencoder.abs_postencoder import AbsPostEncoder
from espnet2.asr.preencoder.abs_preencoder import AbsPreEncoder
from espnet2.asr.specaug.abs_specaug import AbsSpecAug
from espnet2.mt.espnet_model import ESPnetMTModel
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet.nets.e2e_asr_common import ErrorCalculator as ASRErrorCalculator
from espnet.nets.pytorch_backend.nets_utils import th_accuracy
from espnet.nets.pytorch_backend.transformer.add_sos_eos import add_sos_eos
if V(torch.__version__) >= V("1.6.0"):
from torch.cuda.amp import autocast
else:
# Nothing to do if torch<1.6.0
@contextmanager
def autocast(enabled=True):
yield
class ESPnetDiscreteASRModel(ESPnetMTModel):
"""Encoder-Decoder model"""
def __init__(
self,
vocab_size: int,
token_list: Union[Tuple[str, ...], List[str]],
frontend: Optional[AbsFrontend],
specaug: Optional[AbsSpecAug],
preencoder: Optional[AbsPreEncoder],
encoder: AbsEncoder,
postencoder: Optional[AbsPostEncoder],
decoder: AbsDecoder,
ctc: Optional[CTC],
ctc_weight: float = 0.5,
interctc_weight: float = 0.0,
src_vocab_size: int = 0,
src_token_list: Union[Tuple[str, ...], List[str]] = [],
ignore_id: int = -1,
lsm_weight: float = 0.0,
length_normalized_loss: bool = False,
report_bleu: bool = True,
sym_space: str = "<space>",
sym_blank: str = "<blank>",
extract_feats_in_collect_stats: bool = True,
share_decoder_input_output_embed: bool = False,
share_encoder_decoder_input_embed: bool = False,
):
assert check_argument_types()
assert 0.0 <= ctc_weight <= 1.0, ctc_weight
super().__init__(
vocab_size=vocab_size,
token_list=token_list,
frontend=frontend,
preencoder=preencoder,
encoder=encoder,
postencoder=postencoder,
decoder=decoder,
src_vocab_size=src_vocab_size,
src_token_list=src_token_list,
ignore_id=ignore_id,
lsm_weight=lsm_weight,
length_normalized_loss=length_normalized_loss,
report_bleu=report_bleu,
sym_space=sym_space,
sym_blank=sym_blank,
extract_feats_in_collect_stats=extract_feats_in_collect_stats,
share_decoder_input_output_embed=share_decoder_input_output_embed,
share_encoder_decoder_input_embed=share_encoder_decoder_input_embed,
)
self.specaug = specaug
# note that eos is the same as sos (equivalent ID)
self.blank_id = 0
self.ctc_weight = ctc_weight
self.interctc_weight = interctc_weight
if ctc_weight == 0.0:
self.ctc = None
else:
self.ctc = ctc
if report_bleu:
self.error_calculator = ASRErrorCalculator(
token_list, sym_space, sym_blank, True, True
)
if not hasattr(self.encoder, "interctc_use_conditioning"):
self.encoder.interctc_use_conditioning = False
if self.encoder.interctc_use_conditioning:
self.encoder.conditioning_layer = torch.nn.Linear(
vocab_size, self.encoder.output_size()
)
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
src_text: torch.Tensor,
src_text_lengths: torch.Tensor,
**kwargs,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Frontend + Encoder + Decoder + Calc loss
Args:
text: (Batch, Length)
text_lengths: (Batch,)
src_text: (Batch, length)
src_text_lengths: (Batch,)
kwargs: "utt_id" is among the input.
"""
assert text_lengths.dim() == 1, text_lengths.shape
# Check that batch_size is unified
assert (
text.shape[0]
== text_lengths.shape[0]
== src_text.shape[0]
== src_text_lengths.shape[0]
), (text.shape, text_lengths.shape, src_text.shape, src_text_lengths.shape)
batch_size = src_text.shape[0]
# for data-parallel
text = text[:, : text_lengths.max()]
src_text = src_text[:, : src_text_lengths.max()]
# 1. Encoder
encoder_out, encoder_out_lens = self.encode(src_text, src_text_lengths)
intermediate_outs = None
if isinstance(encoder_out, tuple):
intermediate_outs = encoder_out[1]
encoder_out = encoder_out[0]
loss_ctc, cer_ctc = None, None
stats = dict()
# 1. CTC branch
if self.ctc_weight != 0.0:
loss_ctc, cer_ctc = self._calc_ctc_loss(
encoder_out, encoder_out_lens, text, text_lengths
)
# Collect CTC branch stats
stats["loss_ctc"] = loss_ctc.detach() if loss_ctc is not None else None
stats["cer_ctc"] = cer_ctc
# Intermediate CTC (optional)
loss_interctc = 0.0
if self.interctc_weight != 0.0 and intermediate_outs is not None:
for layer_idx, intermediate_out in intermediate_outs:
# we assume intermediate_out has the same length & padding
# as those of encoder_out
loss_ic, cer_ic = self._calc_ctc_loss(
intermediate_out, encoder_out_lens, text, text_lengths
)
loss_interctc = loss_interctc + loss_ic
# Collect Intermedaite CTC stats
stats["loss_interctc_layer{}".format(layer_idx)] = (
loss_ic.detach() if loss_ic is not None else None
)
stats["cer_interctc_layer{}".format(layer_idx)] = cer_ic
loss_interctc = loss_interctc / len(intermediate_outs)
# calculate whole encoder loss
loss_ctc = (
1 - self.interctc_weight
) * loss_ctc + self.interctc_weight * loss_interctc
# 2a. Attention-decoder branch (MT)
loss_att, acc_att, cer_att, wer_att = self._calc_att_loss(
encoder_out, encoder_out_lens, text, text_lengths
)
# 3. Loss computation
if self.ctc_weight > 0.0:
loss = self.ctc_weight * loss_ctc + (1 - self.ctc_weight) * loss_att
else:
loss = loss_att
stats["loss_att"] = loss_att.detach() if loss_att is not None else None
stats["acc"] = acc_att
stats["cer"] = cer_att
stats["wer"] = wer_att
stats["loss"] = loss.detach()
# force_gatherable: to-device and to-tensor if scalar for DataParallel
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
return loss, stats, weight
def encode(
self, src_text: torch.Tensor, src_text_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Frontend + Encoder. Note that this method is used by mt_inference.py
Args:
src_text: (Batch, Length, ...)
src_text_lengths: (Batch, )
"""
with autocast(False):
# 1. Extract feats
feats, feats_lengths = self._extract_feats(src_text, src_text_lengths)
# 2. Data augmentation
if self.specaug is not None and self.training:
feats, feats_lengths = self.specaug(feats, feats_lengths)
# Pre-encoder, e.g. used for raw input data
if self.preencoder is not None:
feats, feats_lengths = self.preencoder(feats, feats_lengths)
# 4. Forward encoder
# feats: (Batch, Length, Dim)
# -> encoder_out: (Batch, Length2, Dim2)
# encoder_out, encoder_out_lens, _ = self.encoder(feats, feats_lengths)
if self.encoder.interctc_use_conditioning:
encoder_out, encoder_out_lens, _ = self.encoder(
feats, feats_lengths, ctc=self.ctc
)
else:
encoder_out, encoder_out_lens, _ = self.encoder(feats, feats_lengths)
intermediate_outs = None
if isinstance(encoder_out, tuple):
intermediate_outs = encoder_out[1]
encoder_out = encoder_out[0]
# Post-encoder, e.g. NLU
if self.postencoder is not None:
encoder_out, encoder_out_lens = self.postencoder(
encoder_out, encoder_out_lens
)
assert encoder_out.size(0) == src_text.size(0), (
encoder_out.size(),
src_text.size(0),
)
assert encoder_out.size(1) <= encoder_out_lens.max(), (
encoder_out.size(),
encoder_out_lens.max(),
)
if intermediate_outs is not None:
return (encoder_out, intermediate_outs), encoder_out_lens
return encoder_out, encoder_out_lens
def _calc_att_loss(
self,
encoder_out: torch.Tensor,
encoder_out_lens: torch.Tensor,
ys_pad: torch.Tensor,
ys_pad_lens: torch.Tensor,
):
ys_in_pad, ys_out_pad = add_sos_eos(ys_pad, self.sos, self.eos, self.ignore_id)
ys_in_lens = ys_pad_lens + 1
# 1. Forward decoder
decoder_out, _ = self.decoder(
encoder_out, encoder_out_lens, ys_in_pad, ys_in_lens
)
# 2. Compute attention loss
loss_att = self.criterion_mt(decoder_out, ys_out_pad)
acc_att = th_accuracy(
decoder_out.view(-1, self.vocab_size),
ys_out_pad,
ignore_label=self.ignore_id,
)
# Compute cer/wer using attention-decoder
if self.training or self.error_calculator is None:
cer_att, wer_att = None, None
else:
ys_hat = decoder_out.argmax(dim=-1)
cer_att, wer_att = self.error_calculator(ys_hat.cpu(), ys_pad.cpu())
return loss_att, acc_att, cer_att, wer_att
def _calc_ctc_loss(
self,
encoder_out: torch.Tensor,
encoder_out_lens: torch.Tensor,
ys_pad: torch.Tensor,
ys_pad_lens: torch.Tensor,
):
# Calc CTC loss
loss_ctc = self.ctc(encoder_out, encoder_out_lens, ys_pad, ys_pad_lens)
# Calc CER using CTC
cer_ctc = None
if not self.training and self.error_calculator is not None:
ys_hat = self.ctc.argmax(encoder_out).data
cer_ctc = self.error_calculator(ys_hat.cpu(), ys_pad.cpu(), is_ctc=True)
return loss_ctc, cer_ctc
| 11,037 | 35.190164 | 87 | py |
espnet | espnet-master/espnet2/asr/pit_espnet_model.py | import itertools
from collections import defaultdict
from contextlib import contextmanager
from typing import Callable, Dict, List, Optional, Tuple, Union
import torch
from packaging.version import parse as V
from typeguard import check_argument_types
from espnet2.asr.ctc import CTC
from espnet2.asr.decoder.abs_decoder import AbsDecoder
from espnet2.asr.encoder.abs_encoder import AbsEncoder
from espnet2.asr.espnet_model import ESPnetASRModel as SingleESPnetASRModel
from espnet2.asr.frontend.abs_frontend import AbsFrontend
from espnet2.asr.postencoder.abs_postencoder import AbsPostEncoder
from espnet2.asr.preencoder.abs_preencoder import AbsPreEncoder
from espnet2.asr.specaug.abs_specaug import AbsSpecAug
from espnet2.enh.loss.wrappers.abs_wrapper import AbsLossWrapper
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.torch_utils.device_funcs import force_gatherable
if V(torch.__version__) >= V("1.6.0"):
from torch.cuda.amp import autocast
else:
# Nothing to do if torch<1.6.0
@contextmanager
def autocast(enabled=True):
yield
class PITLossWrapper(AbsLossWrapper):
def __init__(self, criterion_fn: Callable, num_ref: int):
super().__init__()
self.criterion_fn = criterion_fn
self.num_ref = num_ref
def forward(
self,
inf: torch.Tensor,
inf_lens: torch.Tensor,
ref: torch.Tensor,
ref_lens: torch.Tensor,
others: Dict = None,
):
"""PITLoss Wrapper function. Similar to espnet2/enh/loss/wrapper/pit_solver.py
Args:
inf: Iterable[torch.Tensor], (batch, num_inf, ...)
inf_lens: Iterable[torch.Tensor], (batch, num_inf, ...)
ref: Iterable[torch.Tensor], (batch, num_ref, ...)
ref_lens: Iterable[torch.Tensor], (batch, num_ref, ...)
permute_inf: If true, permute the inference and inference_lens according to
the optimal permutation.
"""
assert (
self.num_ref
== inf.shape[1]
== inf_lens.shape[1]
== ref.shape[1]
== ref_lens.shape[1]
), (self.num_ref, inf.shape, inf_lens.shape, ref.shape, ref_lens.shape)
all_permutations = torch.as_tensor(
list(itertools.permutations(range(self.num_ref), r=self.num_ref))
)
stats = defaultdict(list)
def pre_hook(func, *args, **kwargs):
ret = func(*args, **kwargs)
for k, v in getattr(self.criterion_fn, "stats", {}).items():
stats[k].append(v)
return ret
def pair_loss(permutation):
return sum(
[
pre_hook(
self.criterion_fn,
inf[:, j],
inf_lens[:, j],
ref[:, i],
ref_lens[:, i],
)
for i, j in enumerate(permutation)
]
) / len(permutation)
losses = torch.stack(
[pair_loss(p) for p in all_permutations], dim=1
) # (batch_size, num_perm)
min_losses, min_ids = torch.min(losses, dim=1)
min_ids = min_ids.cpu() # because all_permutations is a cpu tensor.
opt_perm = all_permutations[min_ids] # (batch_size, num_ref)
# Permute the inf and inf_lens according to the optimal perm
return min_losses.mean(), opt_perm
@classmethod
def permutate(self, perm, *args):
ret = []
batch_size = None
num_ref = None
for arg in args: # (batch, num_inf, ...)
if batch_size is None:
batch_size, num_ref = arg.shape[:2]
else:
assert torch.Size([batch_size, num_ref]) == arg.shape[:2]
ret.append(
torch.stack(
[arg[torch.arange(batch_size), perm[:, i]] for i in range(num_ref)],
dim=1,
)
)
return ret
class ESPnetASRModel(SingleESPnetASRModel):
"""CTC-attention hybrid Encoder-Decoder model"""
def __init__(
self,
vocab_size: int,
token_list: Union[Tuple[str, ...], List[str]],
frontend: Optional[AbsFrontend],
specaug: Optional[AbsSpecAug],
normalize: Optional[AbsNormalize],
preencoder: Optional[AbsPreEncoder],
encoder: AbsEncoder,
postencoder: Optional[AbsPostEncoder],
decoder: Optional[AbsDecoder],
ctc: CTC,
joint_network: Optional[torch.nn.Module],
ctc_weight: float = 0.5,
interctc_weight: float = 0.0,
ignore_id: int = -1,
lsm_weight: float = 0.0,
length_normalized_loss: bool = False,
report_cer: bool = True,
report_wer: bool = True,
sym_space: str = "<space>",
sym_blank: str = "<blank>",
# In a regular ESPnet recipe, <sos> and <eos> are both "<sos/eos>"
# Pretrained HF Tokenizer needs custom sym_sos and sym_eos
sym_sos: str = "<sos/eos>",
sym_eos: str = "<sos/eos>",
extract_feats_in_collect_stats: bool = True,
lang_token_id: int = -1,
# num_inf: the number of inferences (= number of outputs of the model)
# num_ref: the number of references (= number of groundtruth seqs)
num_inf: int = 1,
num_ref: int = 1,
):
assert check_argument_types()
assert 0.0 < ctc_weight <= 1.0, ctc_weight
assert interctc_weight == 0.0, "interctc is not supported for multispeaker ASR"
super(ESPnetASRModel, self).__init__(
vocab_size=vocab_size,
token_list=token_list,
frontend=frontend,
specaug=specaug,
normalize=normalize,
preencoder=preencoder,
encoder=encoder,
postencoder=postencoder,
decoder=decoder,
ctc=ctc,
joint_network=joint_network,
ctc_weight=ctc_weight,
interctc_weight=interctc_weight,
ignore_id=ignore_id,
lsm_weight=lsm_weight,
length_normalized_loss=length_normalized_loss,
report_cer=report_cer,
report_wer=report_wer,
sym_space=sym_space,
sym_blank=sym_blank,
sym_sos=sym_sos,
sym_eos=sym_eos,
extract_feats_in_collect_stats=extract_feats_in_collect_stats,
lang_token_id=lang_token_id,
)
assert num_inf == num_ref, "Current PIT loss wrapper requires num_inf=num_ref"
self.num_inf = num_inf
self.num_ref = num_ref
self.pit_ctc = PITLossWrapper(criterion_fn=self.ctc, num_ref=num_ref)
def forward(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
text: torch.Tensor,
text_lengths: torch.Tensor,
**kwargs,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Frontend + Encoder + Decoder + Calc loss
Args:
speech: (Batch, Length, ...)
speech_lengths: (Batch, )
text: (Batch, Length)
text_lengths: (Batch,)
kwargs: "utt_id" is among the input.
"""
assert text_lengths.dim() == 1, text_lengths.shape
# Check that batch_size is unified
assert (
speech.shape[0]
== speech_lengths.shape[0]
== text.shape[0]
== text_lengths.shape[0]
), (speech.shape, speech_lengths.shape, text.shape, text_lengths.shape)
batch_size = speech.shape[0]
# for data-parallel
text_ref = [text] + [
kwargs["text_spk{}".format(spk + 1)] for spk in range(1, self.num_ref)
]
text_ref_lengths = [text_lengths] + [
kwargs.get("text_spk{}_lengths".format(spk + 1), None)
for spk in range(1, self.num_ref)
]
assert all(ref_lengths.dim() == 1 for ref_lengths in text_ref_lengths), (
ref_lengths.shape for ref_lengths in text_ref_lengths
)
text_lengths = torch.stack(text_ref_lengths, dim=1) # (batch, num_ref)
text_length_max = text_lengths.max()
# pad text sequences of different speakers to the same length
text = torch.stack(
[
torch.nn.functional.pad(
ref, (0, text_length_max - ref.shape[1]), value=self.ignore_id
)
for ref in text_ref
],
dim=1,
) # (batch, num_ref, seq_len)
# 1. Encoder
encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
loss_att, acc_att, cer_att, wer_att = None, None, None, None
loss_ctc, cer_ctc = None, None
loss_transducer, cer_transducer, wer_transducer = None, None, None
stats = dict()
# 1. CTC branch
if self.ctc_weight != 0.0:
# CTC is computed twice
# This 1st ctc calculation is only used to decide permutation
_, perm = self.pit_ctc(encoder_out, encoder_out_lens, text, text_lengths)
encoder_out, encoder_out_lens = PITLossWrapper.permutate(
perm, encoder_out, encoder_out_lens
)
if text.dim() == 3: # combine all speakers hidden vectors and labels.
encoder_out = encoder_out.reshape(-1, *encoder_out.shape[2:])
encoder_out_lens = encoder_out_lens.reshape(-1)
text = text.reshape(-1, text.shape[-1])
text_lengths = text_lengths.reshape(-1)
# This 2nd ctc calculation is to compute the loss
loss_ctc, cer_ctc = self._calc_ctc_loss(
encoder_out, encoder_out_lens, text, text_lengths
)
loss_ctc = loss_ctc.sum()
# Collect CTC branch stats
stats["loss_ctc"] = loss_ctc.detach() if loss_ctc is not None else None
stats["cer_ctc"] = cer_ctc
if self.use_transducer_decoder:
# 2a. Transducer decoder branch
(
loss_transducer,
cer_transducer,
wer_transducer,
) = self._calc_transducer_loss(
encoder_out,
encoder_out_lens,
text,
)
if loss_ctc is not None:
loss = loss_transducer + (self.ctc_weight * loss_ctc)
else:
loss = loss_transducer
# Collect Transducer branch stats
stats["loss_transducer"] = (
loss_transducer.detach() if loss_transducer is not None else None
)
stats["cer_transducer"] = cer_transducer
stats["wer_transducer"] = wer_transducer
else:
# 2b. Attention decoder branch
if self.ctc_weight != 1.0:
loss_att, acc_att, cer_att, wer_att = self._calc_att_loss(
encoder_out, encoder_out_lens, text, text_lengths
)
# 3. CTC-Att loss definition
if self.ctc_weight == 0.0:
loss = loss_att
elif self.ctc_weight == 1.0:
loss = loss_ctc
else:
loss = self.ctc_weight * loss_ctc + (1 - self.ctc_weight) * loss_att
# Collect Attn branch stats
stats["loss_att"] = loss_att.detach() if loss_att is not None else None
stats["acc"] = acc_att
stats["cer"] = cer_att
stats["wer"] = wer_att
# Collect total loss stats
stats["loss"] = loss.detach()
# force_gatherable: to-device and to-tensor if scalar for DataParallel
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
return loss, stats, weight
| 11,924 | 35.136364 | 88 | py |
espnet | espnet-master/espnet2/asr/ctc.py | import torch
import torch.nn.functional as F
from typeguard import check_argument_types
class CTC(torch.nn.Module):
"""CTC module.
Args:
odim: dimension of outputs
encoder_output_size: number of encoder projection units
dropout_rate: dropout rate (0.0 ~ 1.0)
ctc_type: builtin or gtnctc
reduce: reduce the CTC loss into a scalar
ignore_nan_grad: Same as zero_infinity (keeping for backward compatiblity)
zero_infinity: Whether to zero infinite losses and the associated gradients.
"""
def __init__(
self,
odim: int,
encoder_output_size: int,
dropout_rate: float = 0.0,
ctc_type: str = "builtin",
reduce: bool = True,
ignore_nan_grad: bool = None,
zero_infinity: bool = True,
):
assert check_argument_types()
super().__init__()
eprojs = encoder_output_size
self.dropout_rate = dropout_rate
self.ctc_lo = torch.nn.Linear(eprojs, odim)
self.ctc_type = ctc_type
if ignore_nan_grad is not None:
zero_infinity = ignore_nan_grad
if self.ctc_type == "builtin":
self.ctc_loss = torch.nn.CTCLoss(
reduction="none", zero_infinity=zero_infinity
)
elif self.ctc_type == "gtnctc":
from espnet.nets.pytorch_backend.gtn_ctc import GTNCTCLossFunction
self.ctc_loss = GTNCTCLossFunction.apply
else:
raise ValueError(f'ctc_type must be "builtin" or "gtnctc": {self.ctc_type}')
self.reduce = reduce
def loss_fn(self, th_pred, th_target, th_ilen, th_olen) -> torch.Tensor:
if self.ctc_type == "builtin":
th_pred = th_pred.log_softmax(2)
loss = self.ctc_loss(th_pred, th_target, th_ilen, th_olen)
size = th_pred.size(1)
if self.reduce:
# Batch-size average
loss = loss.sum() / size
else:
loss = loss / size
return loss
elif self.ctc_type == "gtnctc":
log_probs = torch.nn.functional.log_softmax(th_pred, dim=2)
return self.ctc_loss(log_probs, th_target, th_ilen, 0, "none")
else:
raise NotImplementedError
def forward(self, hs_pad, hlens, ys_pad, ys_lens):
"""Calculate CTC loss.
Args:
hs_pad: batch of padded hidden state sequences (B, Tmax, D)
hlens: batch of lengths of hidden state sequences (B)
ys_pad: batch of padded character id sequence tensor (B, Lmax)
ys_lens: batch of lengths of character sequence (B)
"""
# hs_pad: (B, L, NProj) -> ys_hat: (B, L, Nvocab)
ys_hat = self.ctc_lo(F.dropout(hs_pad, p=self.dropout_rate))
if self.ctc_type == "gtnctc":
# gtn expects list form for ys
ys_true = [y[y != -1] for y in ys_pad] # parse padded ys
else:
# ys_hat: (B, L, D) -> (L, B, D)
ys_hat = ys_hat.transpose(0, 1)
# (B, L) -> (BxL,)
ys_true = torch.cat([ys_pad[i, :l] for i, l in enumerate(ys_lens)])
loss = self.loss_fn(ys_hat, ys_true, hlens, ys_lens).to(
device=hs_pad.device, dtype=hs_pad.dtype
)
return loss
def softmax(self, hs_pad):
"""softmax of frame activations
Args:
Tensor hs_pad: 3d tensor (B, Tmax, eprojs)
Returns:
torch.Tensor: softmax applied 3d tensor (B, Tmax, odim)
"""
return F.softmax(self.ctc_lo(hs_pad), dim=2)
def log_softmax(self, hs_pad):
"""log_softmax of frame activations
Args:
Tensor hs_pad: 3d tensor (B, Tmax, eprojs)
Returns:
torch.Tensor: log softmax applied 3d tensor (B, Tmax, odim)
"""
return F.log_softmax(self.ctc_lo(hs_pad), dim=2)
def argmax(self, hs_pad):
"""argmax of frame activations
Args:
torch.Tensor hs_pad: 3d tensor (B, Tmax, eprojs)
Returns:
torch.Tensor: argmax applied 2d tensor (B, Tmax)
"""
return torch.argmax(self.ctc_lo(hs_pad), dim=2)
| 4,223 | 32 | 88 | py |
espnet | espnet-master/espnet2/asr/maskctc_model.py | import logging
from contextlib import contextmanager
from itertools import groupby
from typing import Dict, List, Optional, Tuple, Union
import numpy
import torch
from packaging.version import parse as V
from typeguard import check_argument_types
from espnet2.asr.ctc import CTC
from espnet2.asr.decoder.mlm_decoder import MLMDecoder
from espnet2.asr.encoder.abs_encoder import AbsEncoder
from espnet2.asr.espnet_model import ESPnetASRModel
from espnet2.asr.frontend.abs_frontend import AbsFrontend
from espnet2.asr.postencoder.abs_postencoder import AbsPostEncoder
from espnet2.asr.preencoder.abs_preencoder import AbsPreEncoder
from espnet2.asr.specaug.abs_specaug import AbsSpecAug
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.text.token_id_converter import TokenIDConverter
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet.nets.beam_search import Hypothesis
from espnet.nets.e2e_asr_common import ErrorCalculator
from espnet.nets.pytorch_backend.maskctc.add_mask_token import mask_uniform
from espnet.nets.pytorch_backend.nets_utils import th_accuracy
from espnet.nets.pytorch_backend.transformer.label_smoothing_loss import ( # noqa: H301
LabelSmoothingLoss,
)
if V(torch.__version__) >= V("1.6.0"):
from torch.cuda.amp import autocast
else:
# Nothing to do if torch<1.6.0
@contextmanager
def autocast(enabled=True):
yield
class MaskCTCModel(ESPnetASRModel):
"""Hybrid CTC/Masked LM Encoder-Decoder model (Mask-CTC)"""
def __init__(
self,
vocab_size: int,
token_list: Union[Tuple[str, ...], List[str]],
frontend: Optional[AbsFrontend],
specaug: Optional[AbsSpecAug],
normalize: Optional[AbsNormalize],
preencoder: Optional[AbsPreEncoder],
encoder: AbsEncoder,
postencoder: Optional[AbsPostEncoder],
decoder: MLMDecoder,
ctc: CTC,
joint_network: Optional[torch.nn.Module] = None,
ctc_weight: float = 0.5,
interctc_weight: float = 0.0,
ignore_id: int = -1,
lsm_weight: float = 0.0,
length_normalized_loss: bool = False,
report_cer: bool = True,
report_wer: bool = True,
sym_space: str = "<space>",
sym_blank: str = "<blank>",
sym_mask: str = "<mask>",
extract_feats_in_collect_stats: bool = True,
):
assert check_argument_types()
super().__init__(
vocab_size=vocab_size,
token_list=token_list,
frontend=frontend,
specaug=specaug,
normalize=normalize,
preencoder=preencoder,
encoder=encoder,
postencoder=postencoder,
decoder=decoder,
ctc=ctc,
joint_network=joint_network,
ctc_weight=ctc_weight,
interctc_weight=interctc_weight,
ignore_id=ignore_id,
lsm_weight=lsm_weight,
length_normalized_loss=length_normalized_loss,
report_cer=report_cer,
report_wer=report_wer,
sym_space=sym_space,
sym_blank=sym_blank,
extract_feats_in_collect_stats=extract_feats_in_collect_stats,
)
# Add <mask> and override inherited fields
token_list.append(sym_mask)
vocab_size += 1
self.vocab_size = vocab_size
self.mask_token = vocab_size - 1
self.token_list = token_list.copy()
# MLM loss
del self.criterion_att
self.criterion_mlm = LabelSmoothingLoss(
size=vocab_size,
padding_idx=ignore_id,
smoothing=lsm_weight,
normalize_length=length_normalized_loss,
)
self.error_calculator = None
if report_cer or report_wer:
self.error_calculator = ErrorCalculator(
token_list, sym_space, sym_blank, report_cer, report_wer
)
def forward(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
text: torch.Tensor,
text_lengths: torch.Tensor,
**kwargs,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Frontend + Encoder + Decoder + Calc loss
Args:
speech: (Batch, Length, ...)
speech_lengths: (Batch, )
text: (Batch, Length)
text_lengths: (Batch,)
"""
assert text_lengths.dim() == 1, text_lengths.shape
# Check that batch_size is unified
assert (
speech.shape[0]
== speech_lengths.shape[0]
== text.shape[0]
== text_lengths.shape[0]
), (speech.shape, speech_lengths.shape, text.shape, text_lengths.shape)
batch_size = speech.shape[0]
# For data-parallel
text = text[:, : text_lengths.max()]
# Define stats to report
loss_mlm, acc_mlm = None, None
loss_ctc, cer_ctc = None, None
stats = dict()
# 1. Encoder
encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
intermediate_outs = None
if isinstance(encoder_out, tuple):
intermediate_outs = encoder_out[1]
encoder_out = encoder_out[0]
# 2. CTC branch
if self.ctc_weight != 0.0:
loss_ctc, cer_ctc = self._calc_ctc_loss(
encoder_out, encoder_out_lens, text, text_lengths
)
# Collect CTC branch stats
stats["loss_ctc"] = loss_ctc.detach() if loss_ctc is not None else None
stats["cer_ctc"] = cer_ctc
# 2a. Intermediate CTC (optional)
loss_interctc = 0.0
if self.interctc_weight != 0.0 and intermediate_outs is not None:
for layer_idx, intermediate_out in intermediate_outs:
# we assume intermediate_out has the same length & padding
# as those of encoder_out
loss_ic, cer_ic = self._calc_ctc_loss(
intermediate_out, encoder_out_lens, text, text_lengths
)
loss_interctc = loss_interctc + loss_ic
# Collect Intermedaite CTC stats
stats["loss_interctc_layer{}".format(layer_idx)] = (
loss_ic.detach() if loss_ic is not None else None
)
stats["cer_interctc_layer{}".format(layer_idx)] = cer_ic
loss_interctc = loss_interctc / len(intermediate_outs)
# calculate whole encoder loss
loss_ctc = (
1 - self.interctc_weight
) * loss_ctc + self.interctc_weight * loss_interctc
# 3. MLM decoder branch
if self.ctc_weight != 1.0:
loss_mlm, acc_mlm = self._calc_mlm_loss(
encoder_out, encoder_out_lens, text, text_lengths
)
# 4. CTC/MLM loss definition
if self.ctc_weight == 0.0:
loss = loss_mlm
elif self.ctc_weight == 1.0:
loss = loss_ctc
else:
loss = self.ctc_weight * loss_ctc + (1 - self.ctc_weight) * loss_mlm
# Collect MLM branch stats
stats["loss_mlm"] = loss_mlm.detach() if loss_mlm is not None else None
stats["acc_mlm"] = acc_mlm
# Collect total loss stats
stats["loss"] = loss.detach()
# force_gatherable: to-device and to-tensor if scalar for DataParallel
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
return loss, stats, weight
def _calc_mlm_loss(
self,
encoder_out: torch.Tensor,
encoder_out_lens: torch.Tensor,
ys_pad: torch.Tensor,
ys_pad_lens: torch.Tensor,
):
# 1. Apply masks
ys_in_pad, ys_out_pad = mask_uniform(
ys_pad, self.mask_token, self.eos, self.ignore_id
)
# 2. Forward decoder
decoder_out, _ = self.decoder(
encoder_out, encoder_out_lens, ys_in_pad, ys_pad_lens
)
# 3. Compute mlm loss
loss_mlm = self.criterion_mlm(decoder_out, ys_out_pad)
acc_mlm = th_accuracy(
decoder_out.view(-1, self.vocab_size),
ys_out_pad,
ignore_label=self.ignore_id,
)
return loss_mlm, acc_mlm
def nll(
self,
encoder_out: torch.Tensor,
encoder_out_lens: torch.Tensor,
ys_pad: torch.Tensor,
ys_pad_lens: torch.Tensor,
) -> torch.Tensor:
raise NotImplementedError
def batchify_nll(
self,
encoder_out: torch.Tensor,
encoder_out_lens: torch.Tensor,
ys_pad: torch.Tensor,
ys_pad_lens: torch.Tensor,
batch_size: int = 100,
):
raise NotImplementedError
class MaskCTCInference(torch.nn.Module):
"""Mask-CTC-based non-autoregressive inference"""
def __init__(
self,
asr_model: MaskCTCModel,
n_iterations: int,
threshold_probability: float,
):
"""Initialize Mask-CTC inference"""
super().__init__()
self.ctc = asr_model.ctc
self.mlm = asr_model.decoder
self.mask_token = asr_model.mask_token
self.n_iterations = n_iterations
self.threshold_probability = threshold_probability
self.converter = TokenIDConverter(token_list=asr_model.token_list)
def ids2text(self, ids: List[int]):
text = "".join(self.converter.ids2tokens(ids))
return text.replace("<mask>", "_").replace("<space>", " ")
def forward(self, enc_out: torch.Tensor) -> List[Hypothesis]:
"""Perform Mask-CTC inference"""
# greedy ctc outputs
enc_out = enc_out.unsqueeze(0)
ctc_probs, ctc_ids = torch.exp(self.ctc.log_softmax(enc_out)).max(dim=-1)
y_hat = torch.stack([x[0] for x in groupby(ctc_ids[0])])
y_idx = torch.nonzero(y_hat != 0).squeeze(-1)
logging.info("ctc:{}".format(self.ids2text(y_hat[y_idx].tolist())))
# calculate token-level ctc probabilities by taking
# the maximum probability of consecutive frames with
# the same ctc symbols
probs_hat = []
cnt = 0
for i, y in enumerate(y_hat.tolist()):
probs_hat.append(-1)
while cnt < ctc_ids.shape[1] and y == ctc_ids[0][cnt]:
if probs_hat[i] < ctc_probs[0][cnt]:
probs_hat[i] = ctc_probs[0][cnt].item()
cnt += 1
probs_hat = torch.from_numpy(numpy.array(probs_hat)).to(enc_out.device)
# mask ctc outputs based on ctc probabilities
p_thres = self.threshold_probability
mask_idx = torch.nonzero(probs_hat[y_idx] < p_thres).squeeze(-1)
confident_idx = torch.nonzero(probs_hat[y_idx] >= p_thres).squeeze(-1)
mask_num = len(mask_idx)
y_in = (
torch.zeros(1, len(y_idx), dtype=torch.long).to(enc_out.device)
+ self.mask_token
)
y_in[0][confident_idx] = y_hat[y_idx][confident_idx]
logging.info("msk:{}".format(self.ids2text(y_in[0].tolist())))
# iterative decoding
if not mask_num == 0:
K = self.n_iterations
num_iter = K if mask_num >= K and K > 0 else mask_num
for t in range(num_iter - 1):
pred, _ = self.mlm(enc_out, [enc_out.size(1)], y_in, [y_in.size(1)])
pred_score, pred_id = pred[0][mask_idx].max(dim=-1)
cand = torch.topk(pred_score, mask_num // num_iter, -1)[1]
y_in[0][mask_idx[cand]] = pred_id[cand]
mask_idx = torch.nonzero(y_in[0] == self.mask_token).squeeze(-1)
logging.info("msk:{}".format(self.ids2text(y_in[0].tolist())))
# predict leftover masks (|masks| < mask_num // num_iter)
pred, _ = self.mlm(enc_out, [enc_out.size(1)], y_in, [y_in.size(1)])
y_in[0][mask_idx] = pred[0][mask_idx].argmax(dim=-1)
logging.info("msk:{}".format(self.ids2text(y_in[0].tolist())))
# pad with mask tokens to ensure compatibility with sos/eos tokens
yseq = torch.tensor(
[self.mask_token] + y_in.tolist()[0] + [self.mask_token], device=y_in.device
)
return Hypothesis(yseq=yseq)
| 12,296 | 34.43804 | 88 | py |
espnet | espnet-master/espnet2/asr/postencoder/abs_postencoder.py | from abc import ABC, abstractmethod
from typing import Tuple
import torch
class AbsPostEncoder(torch.nn.Module, ABC):
@abstractmethod
def output_size(self) -> int:
raise NotImplementedError
@abstractmethod
def forward(
self, input: torch.Tensor, input_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
raise NotImplementedError
| 388 | 21.882353 | 62 | py |
espnet | espnet-master/espnet2/asr/postencoder/hugging_face_transformers_postencoder.py | #!/usr/bin/env python3
# 2021, University of Stuttgart; Pavel Denisov
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Hugging Face Transformers PostEncoder."""
import copy
import logging
from typing import Tuple
import torch
from typeguard import check_argument_types
from espnet2.asr.postencoder.abs_postencoder import AbsPostEncoder
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
from espnet.nets.pytorch_backend.transformer.subsampling import TooShortUttError
try:
from transformers import AutoModel
is_transformers_available = True
except ImportError:
is_transformers_available = False
class HuggingFaceTransformersPostEncoder(AbsPostEncoder):
"""Hugging Face Transformers PostEncoder."""
def __init__(
self,
input_size: int,
model_name_or_path: str,
length_adaptor_n_layers: int = 0,
lang_token_id: int = -1,
):
"""Initialize the module."""
assert check_argument_types()
super().__init__()
if not is_transformers_available:
raise ImportError(
"`transformers` is not available. Please install it via `pip install"
" transformers` or `cd /path/to/espnet/tools && . ./activate_python.sh"
" && ./installers/install_transformers.sh`."
)
model = AutoModel.from_pretrained(model_name_or_path)
if hasattr(model, "encoder"):
self.transformer = model.encoder
else:
self.transformer = model
self.lang_token_embed = None
if hasattr(self.transformer, "embed_tokens"):
if lang_token_id != -1:
self.lang_token_embed = (
self.transformer.embed_tokens(torch.tensor(lang_token_id))
.detach()
.cpu()
)
del self.transformer.embed_tokens
if hasattr(self.transformer, "wte"):
if lang_token_id != -1:
self.lang_token_embed = (
self.transformer.wte(torch.tensor(lang_token_id)).detach().cpu()
)
del self.transformer.wte
if hasattr(self.transformer, "word_embedding"):
if lang_token_id != -1:
self.lang_token_embed = (
self.transformer.word_embedding(torch.tensor(lang_token_id))
.detach()
.cpu()
)
del self.transformer.word_embedding
if hasattr(model, "embeddings") and hasattr(
model.embeddings, "word_embeddings"
):
if lang_token_id != -1:
self.lang_token_embed = (
model.embeddings.word_embeddings(torch.tensor(lang_token_id))
.detach()
.cpu()
)
if self.lang_token_embed is not None and hasattr(
self.transformer, "embed_scale"
):
self.lang_token_embed *= self.transformer.embed_scale
self.pretrained_params = copy.deepcopy(self.transformer.state_dict())
if (
self.transformer.config.is_encoder_decoder
or self.transformer.config.model_type in ["xlnet", "t5"]
):
self.use_inputs_embeds = True
self.extend_attention_mask = False
elif self.transformer.config.model_type == "gpt2":
self.use_inputs_embeds = True
self.extend_attention_mask = True
else:
self.use_inputs_embeds = False
self.extend_attention_mask = True
self.linear_in = torch.nn.Linear(
input_size, self.transformer.config.hidden_size
)
# Length Adaptor as in https://aclanthology.org/2021.acl-long.68.pdf
if length_adaptor_n_layers > 0:
length_adaptor_layers = []
for _ in range(length_adaptor_n_layers):
length_adaptor_layers.append(
torch.nn.Conv1d(input_size, input_size, 2, 2)
)
length_adaptor_layers.append(torch.nn.ReLU())
else:
length_adaptor_layers = [torch.nn.Identity()]
self.length_adaptor = torch.nn.Sequential(*length_adaptor_layers)
self.length_adaptor_ratio = 2**length_adaptor_n_layers
def forward(
self, input: torch.Tensor, input_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Forward."""
if input.size(1) < self.length_adaptor_ratio:
raise TooShortUttError(
f"has {input.size(1)} frames and is too short for subsampling "
+ f"(it needs at least {self.length_adaptor_ratio} frames), "
+ "return empty results",
input.size(1),
self.length_adaptor_ratio,
)
input = input.permute(0, 2, 1)
input = self.length_adaptor(input)
input = input.permute(0, 2, 1)
input_lengths = (
input_lengths.float().div(self.length_adaptor_ratio).floor().long()
)
input = self.linear_in(input)
if self.lang_token_embed is not None:
lang_token_embed = (
self.lang_token_embed.unsqueeze(0)
.unsqueeze(0)
.repeat(input.size(0), 1, 1)
)
input = torch.cat([lang_token_embed.to(input.device), input], dim=1)
input_lengths = input_lengths + 1
args = {"return_dict": True}
mask = (~make_pad_mask(input_lengths)).to(input.device).float()
if self.extend_attention_mask:
args["attention_mask"] = _extend_attention_mask(mask)
else:
args["attention_mask"] = mask
if self.use_inputs_embeds:
args["inputs_embeds"] = input
else:
args["hidden_states"] = input
if self.transformer.config.model_type == "mpnet":
args["head_mask"] = [None for _ in self.transformer.layer]
output = self.transformer(**args).last_hidden_state
return output, input_lengths
def reload_pretrained_parameters(self):
self.transformer.load_state_dict(self.pretrained_params)
logging.info("Pretrained Transformers model parameters reloaded!")
def output_size(self) -> int:
"""Get the output size."""
return self.transformer.config.hidden_size
def _extend_attention_mask(mask: torch.Tensor) -> torch.Tensor:
mask = mask[:, None, None, :]
mask = (1.0 - mask) * -10000.0
return mask
| 6,584 | 33.119171 | 87 | py |
espnet | espnet-master/espnet2/asr/transducer/beam_search_transducer.py | """Search algorithms for Transducer models."""
import logging
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from espnet2.asr.decoder.abs_decoder import AbsDecoder
from espnet2.asr_transducer.joint_network import JointNetwork
from espnet2.lm.transformer_lm import TransformerLM
from espnet.nets.pytorch_backend.transducer.utils import (
is_prefix,
recombine_hyps,
select_k_expansions,
subtract,
)
@dataclass
class Hypothesis:
"""Default hypothesis definition for Transducer search algorithms."""
score: float
yseq: List[int]
dec_state: Union[
Tuple[torch.Tensor, Optional[torch.Tensor]],
List[Optional[torch.Tensor]],
torch.Tensor,
]
lm_state: Union[Dict[str, Any], List[Any]] = None
@dataclass
class ExtendedHypothesis(Hypothesis):
"""Extended hypothesis definition for NSC beam search and mAES."""
dec_out: List[torch.Tensor] = None
lm_scores: torch.Tensor = None
class BeamSearchTransducer:
"""Beam search implementation for Transducer."""
def __init__(
self,
decoder: AbsDecoder,
joint_network: JointNetwork,
beam_size: int,
lm: torch.nn.Module = None,
lm_weight: float = 0.1,
search_type: str = "default",
max_sym_exp: int = 2,
u_max: int = 50,
nstep: int = 1,
prefix_alpha: int = 1,
expansion_gamma: int = 2.3,
expansion_beta: int = 2,
multi_blank_durations: List[int] = [],
multi_blank_indices: List[int] = [],
score_norm: bool = True,
nbest: int = 1,
token_list: Optional[List[str]] = None,
):
"""Initialize Transducer search module.
Args:
decoder: Decoder module.
joint_network: Joint network module.
beam_size: Beam size.
lm: LM class.
lm_weight: LM weight for soft fusion.
search_type: Search algorithm to use during inference.
max_sym_exp: Number of maximum symbol expansions at each time step. (TSD)
u_max: Maximum output sequence length. (ALSD)
nstep: Number of maximum expansion steps at each time step. (NSC/mAES)
prefix_alpha: Maximum prefix length in prefix search. (NSC/mAES)
expansion_beta:
Number of additional candidates for expanded hypotheses selection. (mAES)
expansion_gamma: Allowed logp difference for prune-by-value method. (mAES)
multi_blank_durations: The duration of each blank token. (MBG)
multi_blank_indices: The index of each blank token in token_list. (MBG)
score_norm: Normalize final scores by length. ("default")
nbest: Number of final hypothesis.
"""
self.decoder = decoder
self.joint_network = joint_network
self.beam_size = beam_size
self.hidden_size = decoder.dunits
self.vocab_size = decoder.odim
self.sos = self.vocab_size - 1
self.token_list = token_list
self.blank_id = decoder.blank_id
if search_type == "mbg":
self.beam_size = 1
self.multi_blank_durations = multi_blank_durations
self.multi_blank_indices = multi_blank_indices
self.search_algorithm = self.multi_blank_greedy_search
elif self.beam_size <= 1:
self.search_algorithm = self.greedy_search
elif search_type == "default":
self.search_algorithm = self.default_beam_search
elif search_type == "tsd":
if isinstance(lm, TransformerLM):
raise NotImplementedError
self.max_sym_exp = max_sym_exp
self.search_algorithm = self.time_sync_decoding
elif search_type == "alsd":
if isinstance(lm, TransformerLM):
raise NotImplementedError
self.u_max = u_max
self.search_algorithm = self.align_length_sync_decoding
elif search_type == "nsc":
if isinstance(lm, TransformerLM):
raise NotImplementedError
self.nstep = nstep
self.prefix_alpha = prefix_alpha
self.search_algorithm = self.nsc_beam_search
elif search_type == "maes":
if isinstance(lm, TransformerLM):
raise NotImplementedError
self.nstep = nstep if nstep > 1 else 2
self.prefix_alpha = prefix_alpha
self.expansion_gamma = expansion_gamma
assert self.vocab_size >= beam_size + expansion_beta, (
"beam_size (%d) + expansion_beta (%d) "
"should be smaller or equal to vocabulary size (%d)."
% (beam_size, expansion_beta, self.vocab_size)
)
self.max_candidates = beam_size + expansion_beta
self.search_algorithm = self.modified_adaptive_expansion_search
else:
raise NotImplementedError
self.use_lm = lm is not None
self.lm = lm
self.lm_weight = lm_weight
if self.use_lm and self.beam_size == 1:
logging.warning("LM is provided but not used, since this is greedy search.")
self.score_norm = score_norm
self.nbest = nbest
def __call__(
self, enc_out: torch.Tensor
) -> Union[List[Hypothesis], List[ExtendedHypothesis]]:
"""Perform beam search.
Args:
enc_out: Encoder output sequence. (T, D_enc)
Returns:
nbest_hyps: N-best decoding results
"""
self.decoder.set_device(enc_out.device)
nbest_hyps = self.search_algorithm(enc_out)
return nbest_hyps
def sort_nbest(
self, hyps: Union[List[Hypothesis], List[ExtendedHypothesis]]
) -> Union[List[Hypothesis], List[ExtendedHypothesis]]:
"""Sort hypotheses by score or score given sequence length.
Args:
hyps: Hypothesis.
Return:
hyps: Sorted hypothesis.
"""
if self.score_norm:
hyps.sort(key=lambda x: x.score / len(x.yseq), reverse=True)
else:
hyps.sort(key=lambda x: x.score, reverse=True)
return hyps[: self.nbest]
def prefix_search(
self, hyps: List[ExtendedHypothesis], enc_out_t: torch.Tensor
) -> List[ExtendedHypothesis]:
"""Prefix search for NSC and mAES strategies.
Based on https://arxiv.org/pdf/1211.3711.pdf
"""
for j, hyp_j in enumerate(hyps[:-1]):
for hyp_i in hyps[(j + 1) :]:
curr_id = len(hyp_j.yseq)
pref_id = len(hyp_i.yseq)
if (
is_prefix(hyp_j.yseq, hyp_i.yseq)
and (curr_id - pref_id) <= self.prefix_alpha
):
logp = torch.log_softmax(
self.joint_network(enc_out_t, hyp_i.dec_out[-1]),
dim=-1,
)
curr_score = hyp_i.score + float(logp[hyp_j.yseq[pref_id]])
for k in range(pref_id, (curr_id - 1)):
logp = torch.log_softmax(
self.joint_network(enc_out_t, hyp_j.dec_out[k]),
dim=-1,
)
curr_score += float(logp[hyp_j.yseq[k + 1]])
hyp_j.score = np.logaddexp(hyp_j.score, curr_score)
return hyps
def greedy_search(self, enc_out: torch.Tensor) -> List[Hypothesis]:
"""Greedy search implementation.
Args:
enc_out: Encoder output sequence. (T, D_enc)
Returns:
hyp: 1-best hypotheses.
"""
dec_state = self.decoder.init_state(1)
hyp = Hypothesis(score=0.0, yseq=[self.blank_id], dec_state=dec_state)
cache = {}
dec_out, state, _ = self.decoder.score(hyp, cache)
for enc_out_t in enc_out:
logp = torch.log_softmax(
self.joint_network(enc_out_t, dec_out),
dim=-1,
)
top_logp, pred = torch.max(logp, dim=-1)
if pred != self.blank_id:
hyp.yseq.append(int(pred))
hyp.score += float(top_logp)
hyp.dec_state = state
dec_out, state, _ = self.decoder.score(hyp, cache)
return [hyp]
def default_beam_search(self, enc_out: torch.Tensor) -> List[Hypothesis]:
"""Beam search implementation.
Modified from https://arxiv.org/pdf/1211.3711.pdf
Args:
enc_out: Encoder output sequence. (T, D)
Returns:
nbest_hyps: N-best hypothesis.
"""
beam = min(self.beam_size, self.vocab_size)
beam_k = min(beam, (self.vocab_size - 1))
dec_state = self.decoder.init_state(1)
kept_hyps = [Hypothesis(score=0.0, yseq=[self.blank_id], dec_state=dec_state)]
cache = {}
cache_lm = {}
for enc_out_t in enc_out:
hyps = kept_hyps
kept_hyps = []
if self.token_list is not None:
logging.debug(
"\n"
+ "\n".join(
[
"hypo: "
+ "".join([self.token_list[x] for x in hyp.yseq[1:]])
+ f", score: {round(float(hyp.score), 2)}"
for hyp in sorted(hyps, key=lambda x: x.score, reverse=True)
]
)
)
while True:
max_hyp = max(hyps, key=lambda x: x.score)
hyps.remove(max_hyp)
dec_out, state, lm_tokens = self.decoder.score(max_hyp, cache)
logp = torch.log_softmax(
self.joint_network(enc_out_t, dec_out),
dim=-1,
)
top_k = logp[1:].topk(beam_k, dim=-1)
kept_hyps.append(
Hypothesis(
score=(max_hyp.score + float(logp[0:1])),
yseq=max_hyp.yseq[:],
dec_state=max_hyp.dec_state,
lm_state=max_hyp.lm_state,
)
)
if self.use_lm:
if tuple(max_hyp.yseq) not in cache_lm:
lm_scores, lm_state = self.lm.score(
torch.LongTensor(
[self.sos] + max_hyp.yseq[1:],
device=self.decoder.device,
),
max_hyp.lm_state,
None,
)
cache_lm[tuple(max_hyp.yseq)] = (lm_scores, lm_state)
else:
lm_scores, lm_state = cache_lm[tuple(max_hyp.yseq)]
else:
lm_state = max_hyp.lm_state
for logp, k in zip(*top_k):
score = max_hyp.score + float(logp)
if self.use_lm:
score += self.lm_weight * lm_scores[k + 1]
hyps.append(
Hypothesis(
score=score,
yseq=max_hyp.yseq[:] + [int(k + 1)],
dec_state=state,
lm_state=lm_state,
)
)
hyps_max = float(max(hyps, key=lambda x: x.score).score)
kept_most_prob = sorted(
[hyp for hyp in kept_hyps if hyp.score > hyps_max],
key=lambda x: x.score,
)
if len(kept_most_prob) >= beam:
kept_hyps = kept_most_prob
break
return self.sort_nbest(kept_hyps)
def time_sync_decoding(self, enc_out: torch.Tensor) -> List[Hypothesis]:
"""Time synchronous beam search implementation.
Based on https://ieeexplore.ieee.org/document/9053040
Args:
enc_out: Encoder output sequence. (T, D)
Returns:
nbest_hyps: N-best hypothesis.
"""
beam = min(self.beam_size, self.vocab_size)
beam_state = self.decoder.init_state(beam)
B = [
Hypothesis(
yseq=[self.blank_id],
score=0.0,
dec_state=self.decoder.select_state(beam_state, 0),
)
]
cache = {}
if self.use_lm:
B[0].lm_state = self.lm.zero_state()
for enc_out_t in enc_out:
A = []
C = B
enc_out_t = enc_out_t.unsqueeze(0)
for v in range(self.max_sym_exp):
D = []
beam_dec_out, beam_state, beam_lm_tokens = self.decoder.batch_score(
C,
beam_state,
cache,
self.use_lm,
)
beam_logp = torch.log_softmax(
self.joint_network(enc_out_t, beam_dec_out),
dim=-1,
)
beam_topk = beam_logp[:, 1:].topk(beam, dim=-1)
seq_A = [h.yseq for h in A]
for i, hyp in enumerate(C):
if hyp.yseq not in seq_A:
A.append(
Hypothesis(
score=(hyp.score + float(beam_logp[i, 0])),
yseq=hyp.yseq[:],
dec_state=hyp.dec_state,
lm_state=hyp.lm_state,
)
)
else:
dict_pos = seq_A.index(hyp.yseq)
A[dict_pos].score = np.logaddexp(
A[dict_pos].score, (hyp.score + float(beam_logp[i, 0]))
)
if v < (self.max_sym_exp - 1):
if self.use_lm:
beam_lm_scores, beam_lm_states = self.lm.batch_score(
beam_lm_tokens, [c.lm_state for c in C], None
)
for i, hyp in enumerate(C):
for logp, k in zip(beam_topk[0][i], beam_topk[1][i] + 1):
new_hyp = Hypothesis(
score=(hyp.score + float(logp)),
yseq=(hyp.yseq + [int(k)]),
dec_state=self.decoder.select_state(beam_state, i),
lm_state=hyp.lm_state,
)
if self.use_lm:
new_hyp.score += self.lm_weight * beam_lm_scores[i, k]
new_hyp.lm_state = beam_lm_states[i]
D.append(new_hyp)
C = sorted(D, key=lambda x: x.score, reverse=True)[:beam]
B = sorted(A, key=lambda x: x.score, reverse=True)[:beam]
return self.sort_nbest(B)
def align_length_sync_decoding(self, enc_out: torch.Tensor) -> List[Hypothesis]:
"""Alignment-length synchronous beam search implementation.
Based on https://ieeexplore.ieee.org/document/9053040
Args:
h: Encoder output sequences. (T, D)
Returns:
nbest_hyps: N-best hypothesis.
"""
beam = min(self.beam_size, self.vocab_size)
t_max = int(enc_out.size(0))
u_max = min(self.u_max, (t_max - 1))
beam_state = self.decoder.init_state(beam)
B = [
Hypothesis(
yseq=[self.blank_id],
score=0.0,
dec_state=self.decoder.select_state(beam_state, 0),
)
]
final = []
cache = {}
if self.use_lm:
B[0].lm_state = self.lm.zero_state()
for i in range(t_max + u_max):
A = []
B_ = []
B_enc_out = []
for hyp in B:
u = len(hyp.yseq) - 1
t = i - u
if t > (t_max - 1):
continue
B_.append(hyp)
B_enc_out.append((t, enc_out[t]))
if B_:
beam_dec_out, beam_state, beam_lm_tokens = self.decoder.batch_score(
B_,
beam_state,
cache,
self.use_lm,
)
beam_enc_out = torch.stack([x[1] for x in B_enc_out])
beam_logp = torch.log_softmax(
self.joint_network(beam_enc_out, beam_dec_out),
dim=-1,
)
beam_topk = beam_logp[:, 1:].topk(beam, dim=-1)
if self.use_lm:
beam_lm_scores, beam_lm_states = self.lm.batch_score(
beam_lm_tokens,
[b.lm_state for b in B_],
None,
)
for i, hyp in enumerate(B_):
new_hyp = Hypothesis(
score=(hyp.score + float(beam_logp[i, 0])),
yseq=hyp.yseq[:],
dec_state=hyp.dec_state,
lm_state=hyp.lm_state,
)
A.append(new_hyp)
if B_enc_out[i][0] == (t_max - 1):
final.append(new_hyp)
for logp, k in zip(beam_topk[0][i], beam_topk[1][i] + 1):
new_hyp = Hypothesis(
score=(hyp.score + float(logp)),
yseq=(hyp.yseq[:] + [int(k)]),
dec_state=self.decoder.select_state(beam_state, i),
lm_state=hyp.lm_state,
)
if self.use_lm:
new_hyp.score += self.lm_weight * beam_lm_scores[i, k]
new_hyp.lm_state = beam_lm_states[i]
A.append(new_hyp)
B = sorted(A, key=lambda x: x.score, reverse=True)[:beam]
B = recombine_hyps(B)
if final:
return self.sort_nbest(final)
else:
return B
def nsc_beam_search(self, enc_out: torch.Tensor) -> List[ExtendedHypothesis]:
"""N-step constrained beam search implementation.
Based on/Modified from https://arxiv.org/pdf/2002.03577.pdf.
Please reference ESPnet (b-flo, PR #2444) for any usage outside ESPnet
until further modifications.
Args:
enc_out: Encoder output sequence. (T, D_enc)
Returns:
nbest_hyps: N-best hypothesis.
"""
beam = min(self.beam_size, self.vocab_size)
beam_k = min(beam, (self.vocab_size - 1))
beam_state = self.decoder.init_state(beam)
init_tokens = [
ExtendedHypothesis(
yseq=[self.blank_id],
score=0.0,
dec_state=self.decoder.select_state(beam_state, 0),
)
]
cache = {}
beam_dec_out, beam_state, beam_lm_tokens = self.decoder.batch_score(
init_tokens,
beam_state,
cache,
self.use_lm,
)
state = self.decoder.select_state(beam_state, 0)
if self.use_lm:
beam_lm_scores, beam_lm_states = self.lm.batch_score(
beam_lm_tokens,
[i.lm_state for i in init_tokens],
None,
)
lm_state = beam_lm_states[0]
lm_scores = beam_lm_scores[0]
else:
lm_state = None
lm_scores = None
kept_hyps = [
ExtendedHypothesis(
yseq=[self.blank_id],
score=0.0,
dec_state=state,
dec_out=[beam_dec_out[0]],
lm_state=lm_state,
lm_scores=lm_scores,
)
]
for enc_out_t in enc_out:
hyps = self.prefix_search(
sorted(kept_hyps, key=lambda x: len(x.yseq), reverse=True),
enc_out_t,
)
kept_hyps = []
beam_enc_out = enc_out_t.unsqueeze(0)
S = []
V = []
for n in range(self.nstep):
beam_dec_out = torch.stack([hyp.dec_out[-1] for hyp in hyps])
beam_logp = torch.log_softmax(
self.joint_network(beam_enc_out, beam_dec_out),
dim=-1,
)
beam_topk = beam_logp[:, 1:].topk(beam_k, dim=-1)
for i, hyp in enumerate(hyps):
S.append(
ExtendedHypothesis(
yseq=hyp.yseq[:],
score=hyp.score + float(beam_logp[i, 0:1]),
dec_out=hyp.dec_out[:],
dec_state=hyp.dec_state,
lm_state=hyp.lm_state,
lm_scores=hyp.lm_scores,
)
)
for logp, k in zip(beam_topk[0][i], beam_topk[1][i] + 1):
score = hyp.score + float(logp)
if self.use_lm:
score += self.lm_weight * float(hyp.lm_scores[k])
V.append(
ExtendedHypothesis(
yseq=hyp.yseq[:] + [int(k)],
score=score,
dec_out=hyp.dec_out[:],
dec_state=hyp.dec_state,
lm_state=hyp.lm_state,
lm_scores=hyp.lm_scores,
)
)
V.sort(key=lambda x: x.score, reverse=True)
V = subtract(V, hyps)[:beam]
beam_state = self.decoder.create_batch_states(
beam_state,
[v.dec_state for v in V],
[v.yseq for v in V],
)
beam_dec_out, beam_state, beam_lm_tokens = self.decoder.batch_score(
V,
beam_state,
cache,
self.use_lm,
)
if self.use_lm:
beam_lm_scores, beam_lm_states = self.lm.batch_score(
beam_lm_tokens, [v.lm_state for v in V], None
)
if n < (self.nstep - 1):
for i, v in enumerate(V):
v.dec_out.append(beam_dec_out[i])
v.dec_state = self.decoder.select_state(beam_state, i)
if self.use_lm:
v.lm_state = beam_lm_states[i]
v.lm_scores = beam_lm_scores[i]
hyps = V[:]
else:
beam_logp = torch.log_softmax(
self.joint_network(beam_enc_out, beam_dec_out),
dim=-1,
)
for i, v in enumerate(V):
if self.nstep != 1:
v.score += float(beam_logp[i, 0])
v.dec_out.append(beam_dec_out[i])
v.dec_state = self.decoder.select_state(beam_state, i)
if self.use_lm:
v.lm_state = beam_lm_states[i]
v.lm_scores = beam_lm_scores[i]
kept_hyps = sorted((S + V), key=lambda x: x.score, reverse=True)[:beam]
return self.sort_nbest(kept_hyps)
def modified_adaptive_expansion_search(
self, enc_out: torch.Tensor
) -> List[ExtendedHypothesis]:
"""It's the modified Adaptive Expansion Search (mAES) implementation.
Based on/modified from https://ieeexplore.ieee.org/document/9250505 and NSC.
Args:
enc_out: Encoder output sequence. (T, D_enc)
Returns:
nbest_hyps: N-best hypothesis.
"""
beam = min(self.beam_size, self.vocab_size)
beam_state = self.decoder.init_state(beam)
init_tokens = [
ExtendedHypothesis(
yseq=[self.blank_id],
score=0.0,
dec_state=self.decoder.select_state(beam_state, 0),
)
]
cache = {}
beam_dec_out, beam_state, beam_lm_tokens = self.decoder.batch_score(
init_tokens,
beam_state,
cache,
self.use_lm,
)
state = self.decoder.select_state(beam_state, 0)
if self.use_lm:
beam_lm_scores, beam_lm_states = self.lm.batch_score(
beam_lm_tokens, [i.lm_state for i in init_tokens], None
)
lm_state = beam_lm_states[0]
lm_scores = beam_lm_scores[0]
else:
lm_state = None
lm_scores = None
kept_hyps = [
ExtendedHypothesis(
yseq=[self.blank_id],
score=0.0,
dec_state=state,
dec_out=[beam_dec_out[0]],
lm_state=lm_state,
lm_scores=lm_scores,
)
]
for enc_out_t in enc_out:
hyps = self.prefix_search(
sorted(kept_hyps, key=lambda x: len(x.yseq), reverse=True),
enc_out_t,
)
kept_hyps = []
beam_enc_out = enc_out_t.unsqueeze(0)
list_b = []
duplication_check = [hyp.yseq for hyp in hyps]
for n in range(self.nstep):
beam_dec_out = torch.stack([h.dec_out[-1] for h in hyps])
beam_logp, beam_idx = torch.log_softmax(
self.joint_network(beam_enc_out, beam_dec_out),
dim=-1,
).topk(self.max_candidates, dim=-1)
k_expansions = select_k_expansions(
hyps,
beam_idx,
beam_logp,
self.expansion_gamma,
)
list_exp = []
for i, hyp in enumerate(hyps):
for k, new_score in k_expansions[i]:
new_hyp = ExtendedHypothesis(
yseq=hyp.yseq[:],
score=new_score,
dec_out=hyp.dec_out[:],
dec_state=hyp.dec_state,
lm_state=hyp.lm_state,
lm_scores=hyp.lm_scores,
)
if k == 0:
list_b.append(new_hyp)
else:
if new_hyp.yseq + [int(k)] not in duplication_check:
new_hyp.yseq.append(int(k))
if self.use_lm:
new_hyp.score += self.lm_weight * float(
hyp.lm_scores[k]
)
list_exp.append(new_hyp)
if not list_exp:
kept_hyps = sorted(list_b, key=lambda x: x.score, reverse=True)[
:beam
]
break
else:
beam_state = self.decoder.create_batch_states(
beam_state,
[hyp.dec_state for hyp in list_exp],
[hyp.yseq for hyp in list_exp],
)
beam_dec_out, beam_state, beam_lm_tokens = self.decoder.batch_score(
list_exp,
beam_state,
cache,
self.use_lm,
)
if self.use_lm:
beam_lm_scores, beam_lm_states = self.lm.batch_score(
beam_lm_tokens, [k.lm_state for k in list_exp], None
)
if n < (self.nstep - 1):
for i, hyp in enumerate(list_exp):
hyp.dec_out.append(beam_dec_out[i])
hyp.dec_state = self.decoder.select_state(beam_state, i)
if self.use_lm:
hyp.lm_state = beam_lm_states[i]
hyp.lm_scores = beam_lm_scores[i]
hyps = list_exp[:]
else:
beam_logp = torch.log_softmax(
self.joint_network(beam_enc_out, beam_dec_out),
dim=-1,
)
for i, hyp in enumerate(list_exp):
hyp.score += float(beam_logp[i, 0])
hyp.dec_out.append(beam_dec_out[i])
hyp.dec_state = self.decoder.select_state(beam_state, i)
if self.use_lm:
hyp.lm_states = beam_lm_states[i]
hyp.lm_scores = beam_lm_scores[i]
kept_hyps = sorted(
list_b + list_exp, key=lambda x: x.score, reverse=True
)[:beam]
return self.sort_nbest(kept_hyps)
def multi_blank_greedy_search(self, enc_out: torch.Tensor) -> List[Hypothesis]:
"""Greedy Search for Multi-Blank Transducer (Multi-Blank Greedy, MBG).
In this implementation, we assume:
1. the index of standard blank is the last entry of self.multi_blank_indices
rather than self.blank_id (to avoid too much change on original transducer)
2. other entries in self.multi_blank_indices are big blanks that account for
multiple frames.
Based on https://arxiv.org/abs/2211.03541
Args:
enc_out: Encoder output sequence. (T, D_enc)
Returns:
hyp: 1-best hypothesis.
"""
big_blank_duration = 1
blank_start = self.multi_blank_indices[0]
blank_end = self.multi_blank_indices[-1]
dec_state = self.decoder.init_state(1)
hyp = Hypothesis(score=0.0, yseq=[blank_end], dec_state=dec_state)
cache = {}
for enc_out_t in enc_out:
# case 1: skip frames until big_blank_duration == 1
if big_blank_duration > 1:
big_blank_duration -= 1
continue
symbols_added = 0
while symbols_added <= 3:
dec_out, state, _ = self.decoder.score(hyp, cache)
logp = torch.log_softmax(self.joint_network(enc_out_t, dec_out), dim=-1)
top_logp, k = torch.max(logp, dim=-1)
# case 2: predict a blank token
if blank_start <= k <= blank_end:
big_blank_duration = self.multi_blank_durations[k - blank_start]
hyp.score += top_logp
break
# case 3: predict a non-blank token
else:
symbols_added += 1
hyp.yseq.append(int(k))
hyp.score += float(top_logp)
hyp.dec_state = state
return [hyp]
| 31,972 | 32.514675 | 88 | py |
espnet | espnet-master/espnet2/asr/transducer/error_calculator.py | """Error Calculator module for Transducer."""
from typing import List, Tuple
import torch
from espnet2.asr.decoder.abs_decoder import AbsDecoder
from espnet2.asr.transducer.beam_search_transducer import BeamSearchTransducer
class ErrorCalculatorTransducer(object):
"""Calculate CER and WER for transducer models.
Args:
decoder: Decoder module.
token_list: List of tokens.
sym_space: Space symbol.
sym_blank: Blank symbol.
report_cer: Whether to compute CER.
report_wer: Whether to compute WER.
"""
def __init__(
self,
decoder: AbsDecoder,
joint_network: torch.nn.Module,
token_list: List[int],
sym_space: str,
sym_blank: str,
report_cer: bool = False,
report_wer: bool = False,
):
"""Construct an ErrorCalculatorTransducer."""
super().__init__()
self.beam_search = BeamSearchTransducer(
decoder=decoder,
joint_network=joint_network,
beam_size=2,
search_type="default",
score_norm=False,
)
self.decoder = decoder
self.token_list = token_list
self.space = sym_space
self.blank = sym_blank
self.report_cer = report_cer
self.report_wer = report_wer
def __call__(self, encoder_out: torch.Tensor, target: torch.Tensor):
"""Calculate sentence-level WER/CER score for Transducer model.
Args:
encoder_out: Encoder output sequences. (B, T, D_enc)
target: Target label ID sequences. (B, L)
Returns:
: Sentence-level CER score.
: Sentence-level WER score.
"""
cer, wer = None, None
batchsize = int(encoder_out.size(0))
batch_nbest = []
encoder_out = encoder_out.to(next(self.decoder.parameters()).device)
for b in range(batchsize):
nbest_hyps = self.beam_search(encoder_out[b])
batch_nbest.append(nbest_hyps)
pred = [nbest_hyp[0].yseq[1:] for nbest_hyp in batch_nbest]
char_pred, char_target = self.convert_to_char(pred, target)
if self.report_cer:
cer = self.calculate_cer(char_pred, char_target)
if self.report_wer:
wer = self.calculate_wer(char_pred, char_target)
return cer, wer
def convert_to_char(
self, pred: torch.Tensor, target: torch.Tensor
) -> Tuple[List, List]:
"""Convert label ID sequences to character sequences.
Args:
pred: Prediction label ID sequences. (B, U)
target: Target label ID sequences. (B, L)
Returns:
char_pred: Prediction character sequences. (B, ?)
char_target: Target character sequences. (B, ?)
"""
char_pred, char_target = [], []
for i, pred_i in enumerate(pred):
char_pred_i = [self.token_list[int(h)] for h in pred_i]
char_target_i = [self.token_list[int(r)] for r in target[i]]
char_pred_i = "".join(char_pred_i).replace(self.space, " ")
char_pred_i = char_pred_i.replace(self.blank, "")
char_target_i = "".join(char_target_i).replace(self.space, " ")
char_target_i = char_target_i.replace(self.blank, "")
char_pred.append(char_pred_i)
char_target.append(char_target_i)
return char_pred, char_target
def calculate_cer(
self, char_pred: torch.Tensor, char_target: torch.Tensor
) -> float:
"""Calculate sentence-level CER score.
Args:
char_pred: Prediction character sequences. (B, ?)
char_target: Target character sequences. (B, ?)
Returns:
: Average sentence-level CER score.
"""
import editdistance
distances, lens = [], []
for i, char_pred_i in enumerate(char_pred):
pred = char_pred_i.replace(" ", "")
target = char_target[i].replace(" ", "")
distances.append(editdistance.eval(pred, target))
lens.append(len(target))
return float(sum(distances)) / sum(lens)
def calculate_wer(
self, char_pred: torch.Tensor, char_target: torch.Tensor
) -> float:
"""Calculate sentence-level WER score.
Args:
char_pred: Prediction character sequences. (B, ?)
char_target: Target character sequences. (B, ?)
Returns:
: Average sentence-level WER score
"""
import editdistance
distances, lens = [], []
for i, char_pred_i in enumerate(char_pred):
pred = char_pred_i.split()
target = char_target[i].split()
distances.append(editdistance.eval(pred, target))
lens.append(len(target))
return float(sum(distances)) / sum(lens)
| 4,907 | 27.701754 | 78 | py |
espnet | espnet-master/espnet2/asr/transducer/rnnt_multi_blank/rnnt_multi_blank.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2018-2019, Mingkun Huang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.autograd import Function
from torch.nn import Module
from espnet2.asr.transducer.rnnt_multi_blank import rnnt
from espnet2.asr.transducer.rnnt_multi_blank.utils.cpu_utils import cpu_rnnt
__all__ = ["rnnt_loss", "RNNTLossNumba", "MultiblankRNNTLossNumba"]
class _RNNTNumba(Function):
@staticmethod
def forward(
ctx,
acts,
labels,
act_lens,
label_lens,
blank,
reduction,
fastemit_lambda,
clamp,
):
"""
log_probs: Tensor of (batch x seqLength x labelLength x outputDim)
containing output from network
labels: 2 dimensional Tensor containing all the targets of
the batch with zero padded
act_lens: Tensor of size (batch) containing size of each
output sequence from the network
label_lens: Tensor of (batch) containing label length of each example
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level
Emission Regularization.
"""
is_cuda = acts.is_cuda
certify_inputs(acts, labels, act_lens, label_lens)
if clamp < 0:
raise ValueError("`clamp` must be 0.0 or positive float value.")
loss_func = rnnt.rnnt_loss_gpu if is_cuda else rnnt.rnnt_loss_cpu
grads = torch.zeros_like(acts) if acts.requires_grad else None
minibatch_size = acts.size(0)
costs = torch.zeros(minibatch_size, device=acts.device, dtype=acts.dtype)
loss_func(
acts,
labels=labels,
input_lengths=act_lens,
label_lengths=label_lens,
costs=costs,
grads=grads,
blank_label=blank,
fastemit_lambda=fastemit_lambda,
clamp=clamp,
num_threads=0,
)
if reduction in ["sum", "mean"]:
costs = costs.sum().unsqueeze_(-1)
if reduction == "mean":
costs /= minibatch_size
if grads is not None:
grads /= minibatch_size
ctx.grads = grads
return costs
@staticmethod
def backward(ctx, grad_output):
if grad_output is not None and ctx.grads is not None:
grad_output = grad_output.view(-1, 1, 1, 1).to(ctx.grads)
return ctx.grads.mul_(grad_output), None, None, None, None, None, None, None
class _MultiblankRNNTNumba(Function):
"""
Numba class for multi-blank transducer loss (https://arxiv.org/pdf/2211.03541.pdf)
"""
@staticmethod
def forward(
ctx,
acts,
labels,
act_lens,
label_lens,
blank,
big_blank_durations,
reduction,
fastemit_lambda,
clamp,
sigma,
):
"""
big_blank_durations: list of durations for multi-blank transducer, e.g.
[2, 4, 8].
sigma: hyper-parameter for logit under-normalization method for training
multi-blank transducers. Recommended value 0.05.
Refer to https://arxiv.org/pdf/2211.03541 for detailed explanations for
the above parameters;
For other parameters for this class, refer to comment for class _RNNTNumba
"""
is_cuda = acts.is_cuda
certify_inputs(acts, labels, act_lens, label_lens)
if clamp < 0:
raise ValueError("`clamp` must be 0.0 or positive float value.")
if is_cuda:
loss_func = rnnt.multiblank_rnnt_loss_gpu
else:
raise NotImplementedError()
grads = torch.zeros_like(acts) if acts.requires_grad else None
minibatch_size = acts.size(0)
costs = torch.zeros(minibatch_size, device=acts.device, dtype=acts.dtype)
loss_func(
acts,
labels=labels,
input_lengths=act_lens,
label_lengths=label_lens,
costs=costs,
grads=grads,
blank_label=blank,
big_blank_durations=big_blank_durations,
fastemit_lambda=fastemit_lambda,
clamp=clamp,
sigma=sigma,
num_threads=0,
)
if reduction in ["sum", "mean"]:
costs = costs.sum().unsqueeze_(-1)
if reduction == "mean":
costs /= minibatch_size
if grads is not None:
grads /= minibatch_size
ctx.grads = grads
return costs
@staticmethod
def backward(ctx, grad_output):
if grad_output is not None and ctx.grads is not None:
grad_output = grad_output.view(-1, 1, 1, 1).to(ctx.grads)
return (
ctx.grads.mul_(grad_output),
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
)
def rnnt_loss(
acts,
labels,
act_lens,
label_lens,
blank=0,
reduction="mean",
fastemit_lambda: float = 0.0,
clamp: float = 0.0,
):
"""RNN Transducer Loss (functional form)
Args:
acts: Tensor of (batch x seqLength x labelLength x outputDim)
containing output from network
labels: 2 dimensional Tensor containing all the targets of
the batch with zero padded
act_lens: Tensor of size (batch) containing size of each
output sequence from the network
label_lens: Tensor of (batch) containing label length of each example
blank (int, optional): blank label. Default: 0.
reduction (string, optional): Specifies the reduction to apply to the output:
'none' | 'mean' | 'sum'. 'none': no reduction will be applied,
'mean': the output losses will be divided by the target lengths and
then the mean over the batch is taken. Default: 'mean'
"""
if not acts.is_cuda:
# Since CPU requires log_softmax to be computed explicitly,
# we need to perform grad clipping
# *after* we have obtained the gradients of loss(logsoftmax()).
# This is highly wasteful since it requires a copy of the entire joint
# tensor which is expensive. CUDA version is much more efficient since
# it performs an inplace logsoftmax, and therefore
# can inplace clamp the gradient.
if clamp > 0.0:
acts = cpu_rnnt.LogSoftmaxGradModification.apply(acts, clamp)
# NOTE manually done log_softmax for CPU version,
# log_softmax is computed within GPU version.
acts = torch.nn.functional.log_softmax(acts, -1)
return _RNNTNumba.apply(
acts, labels, act_lens, label_lens, blank, reduction, fastemit_lambda, clamp
)
def multiblank_rnnt_loss(
acts,
labels,
act_lens,
label_lens,
blank,
big_blank_durations=[],
reduction="mean",
fastemit_lambda: float = 0.0,
clamp: float = 0.0,
):
"""
Multi-blank RNN Transducer (https://arxiv.org/pdf/2211.03541.pdf)
Loss (functional form)
Args:
acts: Tensor of (batch x seqLength x labelLength x outputDim) containing
output from network
labels: 2 dimensional Tensor containing all the targets of the batch with
zero padded
act_lens: Tensor of size (batch) containing size of each output
sequence from the network
label_lens: Tensor of (batch) containing label length of each example
blank (int): standard blank label.
big_blank_durations: list of durations for multi-blank transducer, e.g.
[2, 4, 8].
sigma: hyper-parameter for logit under-normalization method for training
multi-blank transducers. Recommended value 0.05.
Refer to https://arxiv.org/pdf/2211.03541 for detailed explanations for
the last two params.
reduction (string, optional): Specifies the reduction to apply to the output:
'none' | 'mean' | 'sum'. 'none': no reduction will be applied,
'mean': the output losses will be divided by the target lengths and
then the mean over the batch is taken. Default: 'mean'
"""
if not acts.is_cuda:
# Since CPU requires log_softmax to be computed explicitly,
# we need to perform grad clipping
# *after* we have obtained the gradients of loss(logsoftmax()).
# This is highly wasteful since it requires a copy of the entire
# joint tensor which is expensive.
# CUDA version is much more efficient since it performs an inplace
# logsoftmax, and therefore can inplace clamp the gradient.
if clamp > 0.0:
acts = cpu_rnnt.LogSoftmaxGradModification.apply(acts, clamp)
# NOTE manually done log_softmax for CPU version,
# log_softmax is computed within GPU version.
acts = torch.nn.functional.log_softmax(acts, -1)
return _MultiblankRNNTNumba.apply(
acts,
labels,
act_lens,
label_lens,
blank,
big_blank_durations,
reduction,
fastemit_lambda,
clamp,
)
class RNNTLossNumba(Module):
"""
Parameters:
blank (int, optional): blank label. Default: 0.
reduction (string, optional): Specifies the reduction to apply to the output:
'none' | 'mean' | 'sum'. 'none': no reduction will be applied,
'mean': the output losses will be divided by the target lengths and
then the mean over the batch is taken. Default: 'mean'
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level
Emission Regularization.
clamp: Float value. When set to value >= 0.0, will clamp the
gradient to [-clamp, clamp].
"""
def __init__(
self, blank=0, reduction="mean", fastemit_lambda: float = 0.0, clamp: float = -1
):
super(RNNTLossNumba, self).__init__()
self.blank = blank
self.fastemit_lambda = fastemit_lambda
self.clamp = float(clamp) if clamp > 0 else 0.0
self.reduction = reduction
self.loss = _RNNTNumba.apply
def forward(self, acts, labels, act_lens, label_lens):
"""
log_probs: Tensor of (batch x seqLength x labelLength x outputDim)
containing output from network
labels: 2 dimensional Tensor containing all the targets of the
batch with zero padded
act_lens: Tensor of size (batch) containing size of each output
sequence from the network
label_lens: Tensor of (batch) containing label length of each example
"""
if not acts.is_cuda:
# Since CPU requires log_softmax to be computed explicitly,
# we need to perform grad clipping
# *after* we have obtained the gradients of loss(logsoftmax()).
# This is highly wasteful since it requires a copy of the entire
# joint tensor which is expensive.
# CUDA version is much more efficient since it performs an
# inplace logsoftmax, and therefore can inplace clamp the gradient.
if self.clamp > 0.0:
acts = cpu_rnnt.LogSoftmaxGradModification.apply(acts, self.clamp)
# NOTE manually done log_softmax for CPU version,
# log_softmax is computed within GPU version.
acts = torch.nn.functional.log_softmax(acts, -1)
return self.loss(
acts,
labels,
act_lens,
label_lens,
self.blank,
self.reduction,
self.fastemit_lambda,
self.clamp,
)
class MultiblankRNNTLossNumba(Module):
"""
Parameters:
blank (int): standard blank label.
big_blank_durations: list of durations for multi-blank transducer, e.g.
[2, 4, 8].
sigma: hyper-parameter for logit under-normalization method for training
multi-blank transducers. Recommended value 0.05.
Refer to https://arxiv.org/pdf/2211.03541 for detailed explanations for
the above parameters;
reduction (string, optional): Specifies the reduction to apply to the output:
'none' | 'mean' | 'sum'. 'none': no reduction will be applied,
'mean': the output losses will be divided by the target lengths and
then the mean over the batch is taken. Default: 'mean'
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level
Emission Regularization.
clamp: Float value. When set to value >= 0.0, will clamp the
gradient to [-clamp, clamp].
"""
def __init__(
self,
blank,
big_blank_durations,
reduction="mean",
fastemit_lambda: float = 0.0,
clamp: float = -1,
sigma: float = 0.0,
):
super(MultiblankRNNTLossNumba, self).__init__()
self.blank = blank
self.big_blank_durations = big_blank_durations
self.fastemit_lambda = fastemit_lambda
self.clamp = float(clamp) if clamp > 0 else 0.0
self.reduction = reduction
self.loss = _MultiblankRNNTNumba.apply
self.sigma = sigma
def forward(self, acts, labels, act_lens, label_lens):
"""
log_probs: Tensor of (batch x seqLength x labelLength x outputDim)
containing output from network
labels: 2 dimensional Tensor containing all the targets of
the batch with zero padded
act_lens: Tensor of size (batch) containing size of each output
sequence from the network
label_lens: Tensor of (batch) containing label length of each example
"""
if not acts.is_cuda:
# Since CPU requires log_softmax to be computed explicitly,
# we need to perform grad clipping
# *after* we have obtained the gradients of loss(logsoftmax()).
# This is highly wasteful since it requires a copy of the entire
# joint tensor which is expensive.
# CUDA version is much more efficient since it performs an
# inplace logsoftmax, and therefore can inplace clamp the gradient.
if self.clamp > 0.0:
acts = cpu_rnnt.LogSoftmaxGradModification.apply(acts, self.clamp)
# NOTE manually done log_softmax for CPU version,
# log_softmax is computed within GPU version.
acts = torch.nn.functional.log_softmax(acts, -1)
return self.loss(
acts,
labels,
act_lens,
label_lens,
self.blank,
self.big_blank_durations,
self.reduction,
self.fastemit_lambda,
self.clamp,
self.sigma,
)
def check_type(var, t, name):
if var.dtype is not t:
raise TypeError("{} must be {}".format(name, t))
def check_contiguous(var, name):
if not var.is_contiguous():
raise ValueError("{} must be contiguous".format(name))
def check_dim(var, dim, name):
if len(var.shape) != dim:
raise ValueError("{} must be {}D".format(name, dim))
def certify_inputs(log_probs, labels, lengths, label_lengths):
# check_type(log_probs, torch.float32, "log_probs")
check_type(labels, torch.int32, "labels")
check_type(label_lengths, torch.int32, "label_lengths")
check_type(lengths, torch.int32, "lengths")
check_contiguous(log_probs, "log_probs")
check_contiguous(labels, "labels")
check_contiguous(label_lengths, "label_lengths")
check_contiguous(lengths, "lengths")
if lengths.shape[0] != log_probs.shape[0]:
raise ValueError(
f"Must have a length per example. "
f"Given lengths dim: {lengths.shape[0]}, "
f"Log probs dim : {log_probs.shape[0]}"
)
if label_lengths.shape[0] != log_probs.shape[0]:
raise ValueError(
"Must have a label length per example. "
f"Given label lengths dim : {label_lengths.shape[0]}, "
f"Log probs dim : {log_probs.shape[0]}"
)
check_dim(log_probs, 4, "log_probs")
check_dim(labels, 2, "labels")
check_dim(lengths, 1, "lenghts")
check_dim(label_lengths, 1, "label_lenghts")
max_T = torch.max(lengths)
max_U = torch.max(label_lengths)
T, U = log_probs.shape[1:3]
if T != max_T:
raise ValueError(
f"Input length mismatch! Given T: {T}, Expected max T from input \
lengths: {max_T}"
)
if U != max_U + 1:
raise ValueError(
f"Output length mismatch! Given U: {U}, Expected max U from target \
lengths: {max_U} + 1"
)
| 18,251 | 35.142574 | 88 | py |
espnet | espnet-master/espnet2/asr/transducer/rnnt_multi_blank/rnnt.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2018-2019, Mingkun Huang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import torch
from numba import cuda
from espnet2.asr.transducer.rnnt_multi_blank.utils import global_constants, rnnt_helper
from espnet2.asr.transducer.rnnt_multi_blank.utils.cpu_utils import cpu_rnnt
from espnet2.asr.transducer.rnnt_multi_blank.utils.cuda_utils import gpu_rnnt
def rnnt_loss_cpu(
acts: torch.Tensor,
labels: torch.Tensor,
input_lengths: torch.Tensor,
label_lengths: torch.Tensor,
costs: torch.Tensor,
grads: torch.Tensor,
blank_label: int,
fastemit_lambda: float,
clamp: float,
num_threads: int,
):
"""
Wrapper method for accessing CPU RNNT loss.
CPU implementation ported from [HawkAaron/warp-transducer]
(https://github.com/HawkAaron/warp-transducer).
Args:
acts: Activation tensor of shape [B, T, U, V+1].
labels: Ground truth labels of shape [B, U].
input_lengths: Lengths of the acoustic sequence as a vector of ints [B].
label_lengths: Lengths of the target sequence as a vector of ints [B].
costs: Zero vector of length [B] in which costs will be set.
grads: Zero tensor of shape [B, T, U, V+1] where the gradient will be set.
blank_label: Index of the blank token in the vocabulary.
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level
Emission Regularization.
clamp: Float value. When set to value >= 0.0, will clamp the
gradient to [-clamp, clamp].
num_threads: Number of threads for OpenMP.
"""
# aliases
log_probs = acts
flat_labels = labels
minibatch_size = log_probs.shape[0]
maxT = log_probs.shape[1]
maxU = log_probs.shape[2]
alphabet_size = log_probs.shape[3]
if num_threads < 0:
num_threads = multiprocessing.cpu_count()
num_threads = max(1, num_threads) # have to use at least 1 thread
gpu_size, status = rnnt_helper.get_workspace_size(
maxT, maxU, minibatch_size, gpu=False
)
if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:
raise RuntimeError(
"Invalid parameter passed when calculating working space memory"
)
cpu_workspace = torch.zeros(
gpu_size, device=log_probs.device, dtype=log_probs.dtype, requires_grad=False
)
# VIEW TENSORS AS VECTORS FOR POINTER INDEXING
log_probs, acts_shape = rnnt_helper.flatten_tensor(log_probs)
flat_labels, labels_shape = rnnt_helper.flatten_tensor(flat_labels)
wrapper = cpu_rnnt.CPURNNT(
minibatch=minibatch_size,
maxT=maxT,
maxU=maxU,
alphabet_size=alphabet_size,
workspace=cpu_workspace,
blank=blank_label,
fastemit_lambda=fastemit_lambda,
clamp=clamp,
num_threads=num_threads,
batch_first=True,
)
if grads is None:
status = wrapper.score_forward(
log_probs=log_probs.data,
costs=costs,
flat_labels=flat_labels.data,
label_lengths=label_lengths.data,
input_lengths=input_lengths.data,
)
if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:
raise RuntimeError("Could not calculate forward scores")
else:
# FLATTEN GRAD TENSOR
grads, grads_shape = rnnt_helper.flatten_tensor(grads)
status = wrapper.cost_and_grad(
log_probs=log_probs.data,
grads=grads.data,
costs=costs,
flat_labels=flat_labels.data,
label_lengths=label_lengths.data,
input_lengths=input_lengths.data,
)
if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:
raise RuntimeError("Could not calculate forward scores")
del cpu_workspace, wrapper
return True
def rnnt_loss_gpu(
acts: torch.Tensor,
labels: torch.Tensor,
input_lengths: torch.Tensor,
label_lengths: torch.Tensor,
costs: torch.Tensor,
grads: torch.Tensor,
blank_label: int,
fastemit_lambda: float,
clamp: float,
num_threads: int,
):
"""
Wrapper method for accessing GPU RNNT loss.
CUDA implementation ported from [HawkAaron/warp-transducer]
(https://github.com/HawkAaron/warp-transducer).
Args:
acts: Activation tensor of shape [B, T, U, V+1].
labels: Ground truth labels of shape [B, U].
input_lengths: Lengths of the acoustic sequence as a vector of ints [B].
label_lengths: Lengths of the target sequence as a vector of ints [B].
costs: Zero vector of length [B] in which costs will be set.
grads: Zero tensor of shape [B, T, U, V+1] where the gradient will be set.
blank_label: Index of the blank token in the vocabulary.
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level
Emission Regularization.
clamp: Float value. When set to value >= 0.0, will clamp the
gradient to [-clamp, clamp].
num_threads: Number of threads for OpenMP.
"""
minibatch_size = acts.shape[0]
maxT = acts.shape[1]
maxU = acts.shape[2]
alphabet_size = acts.shape[3]
if hasattr(cuda, "external_stream"):
stream = cuda.external_stream(
torch.cuda.current_stream(acts.device).cuda_stream
)
else:
stream = cuda.default_stream()
if num_threads < 0:
num_threads = multiprocessing.cpu_count()
num_threads = max(1, num_threads) # have to use at least 1 thread
gpu_size, status = rnnt_helper.get_workspace_size(
maxT, maxU, minibatch_size, gpu=True
)
if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:
raise RuntimeError(
"Invalid parameter passed when calculating working space memory"
)
# Select GPU index
cuda.select_device(acts.device.index)
gpu_workspace = torch.zeros(
gpu_size, device=acts.device, dtype=acts.dtype, requires_grad=False
)
# VIEW TENSORS AS VECTORS FOR POINTER INDEXING
acts, acts_shape = rnnt_helper.flatten_tensor(acts)
wrapper = gpu_rnnt.GPURNNT(
minibatch=minibatch_size,
maxT=maxT,
maxU=maxU,
alphabet_size=alphabet_size,
workspace=gpu_workspace,
blank=blank_label,
fastemit_lambda=fastemit_lambda,
clamp=clamp,
num_threads=num_threads,
stream=stream,
)
if grads is None:
status = wrapper.score_forward(
acts=acts.data,
costs=costs.data,
pad_labels=labels.data,
label_lengths=label_lengths.data,
input_lengths=input_lengths.data,
)
if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:
raise RuntimeError("Could not calculate forward scores")
else:
# FLATTEN GRAD TENSOR
grads, grads_shape = rnnt_helper.flatten_tensor(grads)
status = wrapper.cost_and_grad(
acts=acts.data,
grads=grads.data,
costs=costs.data,
pad_labels=labels.data,
label_lengths=label_lengths.data,
input_lengths=input_lengths.data,
)
if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:
raise RuntimeError("Could not calculate forward scores")
del gpu_workspace, wrapper
return True
def multiblank_rnnt_loss_gpu(
acts: torch.Tensor,
labels: torch.Tensor,
input_lengths: torch.Tensor,
label_lengths: torch.Tensor,
costs: torch.Tensor,
grads: torch.Tensor,
blank_label: int,
big_blank_durations: list,
fastemit_lambda: float,
clamp: float,
num_threads: int,
sigma: float,
):
"""
Wrapper method for accessing GPU Multi-blank RNNT loss
(https://arxiv.org/pdf/2211.03541.pdf).
CUDA implementation ported from [HawkAaron/warp-transducer]
(https://github.com/HawkAaron/warp-transducer).
Args:
acts: Activation tensor of shape [B, T, U, V + num_big_blanks + 1].
labels: Ground truth labels of shape [B, U].
input_lengths: Lengths of the acoustic sequence as a vector of ints [B].
label_lengths: Lengths of the target sequence as a vector of ints [B].
costs: Zero vector of length [B] in which costs will be set.
grads: Zero tensor of shape [B, T, U, V + num_big_blanks + 1]
where the gradient will be set.
blank_label: Index of the standard blank token in the vocabulary.
big_blank_durations: A list of supported durations for big blank symbols
in the model, e.g. [2, 4, 8]. Note we only include durations for ``big
blanks'' here and it should not include 1 for the standard blank.
Those big blanks have vocabulary indices after the standard blank index.
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level
Emission Regularization.
clamp: Float value. When set to value >= 0.0, will clamp the
gradient to [-clamp, clamp].
num_threads: Number of threads for OpenMP.
sigma: logit-undernormalization weight used in the multi-blank model. Refer to
the multi-blank paper https://arxiv.org/pdf/2211.03541
for detailed explanations.
"""
minibatch_size = acts.shape[0]
maxT = acts.shape[1]
maxU = acts.shape[2]
alphabet_size = acts.shape[3]
if hasattr(cuda, "external_stream"):
stream = cuda.external_stream(
torch.cuda.current_stream(acts.device).cuda_stream
)
else:
stream = cuda.default_stream()
if num_threads < 0:
num_threads = multiprocessing.cpu_count()
num_threads = max(1, num_threads) # have to use at least 1 thread
gpu_size, status = rnnt_helper.get_workspace_size(
maxT, maxU, minibatch_size, gpu=True
)
if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:
raise RuntimeError(
"Invalid parameter passed when calculating working space memory"
)
# Select GPU index
cuda.select_device(acts.device.index)
gpu_workspace = torch.zeros(
gpu_size, device=acts.device, dtype=acts.dtype, requires_grad=False
)
big_blank_workspace = torch.zeros(
len(big_blank_durations),
device=acts.device,
dtype=torch.long,
requires_grad=False,
)
for i in range(0, len(big_blank_durations)):
big_blank_workspace[i] = big_blank_durations[i]
# VIEW TENSORS AS VECTORS FOR POINTER INDEXING
acts, acts_shape = rnnt_helper.flatten_tensor(acts)
wrapper = gpu_rnnt.MultiblankGPURNNT(
minibatch=minibatch_size,
maxT=maxT,
maxU=maxU,
alphabet_size=alphabet_size,
workspace=gpu_workspace,
big_blank_workspace=big_blank_workspace,
num_big_blanks=len(big_blank_durations),
blank=blank_label,
fastemit_lambda=fastemit_lambda,
clamp=clamp,
num_threads=num_threads,
stream=stream,
sigma=sigma,
)
if grads is None:
status = wrapper.score_forward(
acts=acts.data,
costs=costs.data,
pad_labels=labels.data,
label_lengths=label_lengths.data,
input_lengths=input_lengths.data,
)
if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:
raise RuntimeError("Could not calculate forward scores")
else:
# FLATTEN GRAD TENSOR
grads, grads_shape = rnnt_helper.flatten_tensor(grads)
status = wrapper.cost_and_grad(
acts=acts.data,
grads=grads.data,
costs=costs.data,
pad_labels=labels.data,
label_lengths=label_lengths.data,
input_lengths=input_lengths.data,
)
if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:
raise RuntimeError("Could not calculate forward scores")
del gpu_workspace, big_blank_workspace, wrapper
return True
| 13,479 | 32.95466 | 87 | py |
espnet | espnet-master/espnet2/asr/transducer/rnnt_multi_blank/utils/rnnt_helper.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2018-2019, Mingkun Huang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Optional, Tuple
import torch
from numba import cuda
from espnet2.asr.transducer.rnnt_multi_blank.utils import global_constants
threshold = global_constants.THRESHOLD
@cuda.jit(device=True, inline=True)
def log_sum_exp(a: float, b: float):
if a == global_constants.FP32_NEG_INF:
return b
if b == global_constants.FP32_NEG_INF:
return a
if a > b:
return math.log1p(math.exp(b - a)) + a
else:
return math.log1p(math.exp(a - b)) + b
@cuda.jit(device=True, inline=True)
def div_up(x: int, y: int):
return (x + y - 1) // y
@cuda.jit(device=True)
def maximum(x, y):
if x < y:
return y
else:
return x
@cuda.jit(device=True)
def add(x, y):
return x + y
@cuda.jit(device=True)
def identity(x):
return x
@cuda.jit(device=True)
def negate(x):
return -x
@cuda.jit(device=True)
def exponential(x):
return math.exp(x)
@cuda.jit(device=True)
def log_plus(p1: float, p2: float):
if p1 == global_constants.FP32_NEG_INF:
return p2
if p2 == global_constants.FP32_NEG_INF:
return p1
result = math.log1p(math.exp(-math.fabs(p1 - p2))) + maximum(p1, p2)
return result
@cuda.jit(device=True, inline=True)
def copy_data_1d(source: torch.Tensor, dest: torch.Tensor, idx: int):
dest[idx] = source[idx]
@cuda.jit()
def compute_costs_data(
source: torch.Tensor, dest: torch.Tensor, fastemit_lambda: float
):
block = cuda.blockIdx.x
tid = cuda.threadIdx.x
idx = block * cuda.blockDim.x + tid
length = source.shape[0]
if idx < length:
copy_data_1d(source, dest, idx)
dest[idx] *= -1.0
dest[idx] *= 1.0 + fastemit_lambda
def get_workspace_size(
maxT: int, maxU: int, minibatch: int, gpu: bool
) -> Tuple[Optional[int], global_constants.RNNTStatus]:
if minibatch <= 0 or maxT <= 0 or maxU <= 0:
return (None, global_constants.RNNTStatus.RNNT_STATUS_INVALID_VALUE)
# per minibatch memory
per_minibatch_size = 0
# alphas & betas
per_minibatch_size += maxT * maxU * 2
if not gpu:
# // blank & label log probability cache
per_minibatch_size += maxT * maxU * 2
else:
# // softmax denominator
per_minibatch_size += maxT * maxU
# // forward - backward loglikelihood
per_minibatch_size += 2
size = per_minibatch_size * minibatch
return (size, global_constants.RNNTStatus.RNNT_STATUS_SUCCESS)
def flatten_tensor(x: torch.Tensor):
original_shape = x.shape
x = x.view([-1])
return x, original_shape
| 3,802 | 24.52349 | 76 | py |
espnet | espnet-master/espnet2/asr/transducer/rnnt_multi_blank/utils/cuda_utils/reduce.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2018-2019, Mingkun Huang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import math
import torch
from numba import cuda
from espnet2.asr.transducer.rnnt_multi_blank.utils import global_constants, rnnt_helper
warp_size = global_constants.warp_size()
dtype = global_constants.dtype()
CTA_REDUCE_SIZE = 128
class I_Op(enum.Enum):
"""
Represents an operation that is performed on the input tensor
"""
EXPONENTIAL = 0
IDENTITY = 1
class R_Op(enum.Enum):
"""
Represents a reduction operation performed on the input tensor
"""
ADD = 0
MAXIMUM = 1
@cuda.jit(device=True)
def CTAReduce(tid: int, x, storage, count: int, R_opid: int):
"""
CUDA Warp reduction kernel.
It is a device kernel to be called by other kernels.
The data will be read from the right segement recursively, and reduced (ROP) onto
the left half. Operation continues while warp size is larger than a given offset.
Beyond this offset, warp reduction is performed via `shfl_down_sync`,
which halves the reduction space and sums the two halves at each call.
Note:
Efficient warp occurs at input shapes of 2 ^ K.
References:
- Warp Primitives
[https://developer.nvidia.com/blog/using-cuda-warp-level-primitives/]
Args:
tid: CUDA thread index
x: activation. Single float.
storage: shared memory of size CTA_REDUCE_SIZE used for reduction
in parallel threads.
count: equivalent to num_rows, which is equivalent to alphabet_size (V+1)
R_opid: Operator ID for reduction. See R_Op for more information.
"""
storage[tid] = x
cuda.syncthreads()
# Fold the data in half with each pass
offset = CTA_REDUCE_SIZE // 2
while offset >= warp_size:
if (tid + offset) < count and tid < offset:
# Read from the right half and store to the left half.
if R_opid == 0:
x = rnnt_helper.add(x, storage[offset + tid])
else:
x = rnnt_helper.maximum(x, storage[offset + tid])
storage[tid] = x
cuda.syncthreads()
offset = offset // 2
offset = warp_size // 2
while offset > 0:
# warp reduction and sync
shuff = cuda.shfl_down_sync(0xFFFFFFFF, x, offset)
if (tid + offset < count) and (tid < offset):
if R_opid == 0:
x = rnnt_helper.add(x, shuff)
else:
x = rnnt_helper.maximum(x, shuff)
offset = offset // 2
return x
@cuda.jit()
def _reduce_rows(I_opid: int, R_opid: int, acts, output, num_rows: int):
"""
CUDA Warp reduction kernel which reduces via the R_Op.Maximum
Reduces the input data such that I_Op = Identity and R_op = Maximum.
The result is stored in the blockIdx, and is stored as an identity op.
Note:
Efficient warp occurs at input shapes of 2 ^ K.
References:
- Warp Primitives
[https://developer.nvidia.com/blog/using-cuda-warp-level-primitives/]
Args:
I_opid: Operator ID for input. See I_Op for more information. For this kernel,
the Identity op is chosen in general, and therefore the input
is reduced in place without scaling.
R_opid: Operator ID for reduction. See R_Op for more information.
For this kernel, generally Maximum op is chosen.
It reduces the kernel via max.
acts: Flatened activation matrix of shape [B * T * U * (V+1)].
output: Flatened output matrix of shape [B * T * U * (V+1)].
Data will be overwritten.
num_rows: Vocabulary size (including blank token) - V+1.
"""
tid = cuda.threadIdx.x
idx = tid
col = cuda.blockIdx.x
# allocate shared thread memory
storage = cuda.shared.array(shape=(CTA_REDUCE_SIZE,), dtype=acts.dtype)
max = output[col]
# // Each block works on a column
if idx < num_rows:
curr = acts[col * num_rows + idx] - max
if I_opid == 0:
curr = rnnt_helper.exponential(curr)
else:
curr = rnnt_helper.identity(curr)
idx += CTA_REDUCE_SIZE
while idx < num_rows:
activation_ = acts[col * num_rows + idx] - max
if I_opid == 0 and R_opid == 0:
curr = rnnt_helper.add(curr, rnnt_helper.exponential(activation_))
elif I_opid == 0 and R_opid == 1:
curr = rnnt_helper.maximum(curr, rnnt_helper.exponential(activation_))
elif I_opid == 1 and R_opid == 0:
curr = rnnt_helper.add(curr, rnnt_helper.identity(activation_))
else:
curr = rnnt_helper.maximum(curr, rnnt_helper.identity(activation_))
idx += CTA_REDUCE_SIZE
# // Sum thread-totals over the CTA.
curr = CTAReduce(tid, curr, storage, num_rows, R_opid)
# // Store result in out (inplace, I_op: identity)
if tid == 0:
output[col] = curr
@cuda.jit()
def _reduce_minus(I_opid: int, R_opid: int, acts, output, num_rows: int):
"""
CUDA Warp reduction kernel which reduces via the R_Op.Add
Reduces the input data such that I_Op = Exponential and R_op = Add.
The result is stored in the blockIdx, and is stored as an exp op.
Note:
Efficient warp occurs at input shapes of 2 ^ K.
References:
- Warp Primitives
[https://developer.nvidia.com/blog/using-cuda-warp-level-primitives/]
Args:
I_opid: Operator ID for input. See I_Op for more information. For this kernel,
the Exponential op is chosen in general, and therefore the input
is reduced in place with scaling.
R_opid: Operator ID for reduction. See R_Op for more information. For this
kernel, generally Add op is chosen. It reduces the kernel via summation.
acts: Flatened activation matrix of shape [B * T * U * (V+1)].
output: Flatened output matrix of shape [B * T * U * (V+1)].
Data will be overwritten.
num_rows: Vocabulary size (including blank token) - V+1.
"""
tid = cuda.threadIdx.x
idx = tid
col = cuda.blockIdx.x
# allocate shared thread memory
storage = cuda.shared.array(shape=(CTA_REDUCE_SIZE,), dtype=acts.dtype)
max = output[col]
# // Each block works on a column
if idx < num_rows:
curr = acts[col * num_rows + idx] - max
if I_opid == 0:
curr = rnnt_helper.exponential(curr)
else:
curr = rnnt_helper.identity(curr)
idx += CTA_REDUCE_SIZE
while idx < num_rows:
activation_ = acts[col * num_rows + idx] - max
if I_opid == 0 and R_opid == 0:
curr = rnnt_helper.add(curr, rnnt_helper.exponential(activation_))
elif I_opid == 0 and R_opid == 1:
curr = rnnt_helper.maximum(curr, rnnt_helper.exponential(activation_))
elif I_opid == 1 and R_opid == 0:
curr = rnnt_helper.add(curr, rnnt_helper.identity(activation_))
else:
curr = rnnt_helper.maximum(curr, rnnt_helper.identity(activation_))
idx += CTA_REDUCE_SIZE
# // Sum thread-totals over the CTA.
curr = CTAReduce(tid, curr, storage, num_rows, R_opid)
# // Store result in out (inplace, I_op: exponential)
if tid == 0:
output[col] = -max - math.log(curr)
def ReduceHelper(
I_opid: int,
R_opid: int,
acts: torch.Tensor,
output: torch.Tensor,
num_rows: int,
num_cols: int,
minus: bool,
stream,
):
"""
CUDA Warp reduction kernel helper which reduces via the R_Op.Add and writes
the result to `output` according to I_op id.
The result is stored in the blockIdx.
Note:
Efficient warp occurs at input shapes of 2 ^ K.
References:
- Warp Primitives
[https://developer.nvidia.com/blog/using-cuda-warp-level-primitives/]
Args:
I_opid: Operator ID for input. See I_Op for more information.
R_opid: Operator ID for reduction. See R_Op for more information.
acts: Flatened activation matrix of shape [B * T * U * (V+1)].
output: Flatened output matrix of shape [B * T * U * (V+1)].
Data will be overwritten.
num_rows: Vocabulary size (including blank token) - V+1.
Represents the number of threads per block.
num_cols: Flattened shape of activation matrix, without vocabulary dimension
(B * T * U). Represents number of blocks per grid.
minus: Bool flag whether to add or subtract as reduction.
If minus is set; calls _reduce_minus, else calls _reduce_rows kernel.
stream: CUDA Stream.
"""
if minus:
grid_size = num_cols
# call kernel
_reduce_minus[grid_size, CTA_REDUCE_SIZE, stream, 0](
I_opid, R_opid, acts, output, num_rows
)
else:
grid_size = num_cols
# call kernel
_reduce_rows[grid_size, CTA_REDUCE_SIZE, stream, 0](
I_opid, R_opid, acts, output, num_rows
)
return True
def reduce_exp(acts: torch.Tensor, denom, rows: int, cols: int, minus: bool, stream):
"""
Helper method to call the Warp Reduction Kernel to perform `exp` reduction.
Note:
Efficient warp occurs at input shapes of 2 ^ K.
References:
- Warp Primitives
[https://developer.nvidia.com/blog/using-cuda-warp-level-primitives/]
Args:
acts: Flatened activation matrix of shape [B * T * U * (V+1)].
output: Flatened output matrix of shape [B * T * U * (V+1)].
Data will be overwritten.
rows: Vocabulary size (including blank token) - V+1.
Represents the number of threads per block.
cols: Flattened shape of activation matrix, without vocabulary dimension
(B * T * U). Represents number of blocks per grid.
minus: Bool flag whether to add or subtract as reduction.
If minus is set; calls _reduce_minus, else calls _reduce_rows kernel.
stream: CUDA Stream.
"""
return ReduceHelper(
I_opid=I_Op.EXPONENTIAL.value,
R_opid=R_Op.ADD.value,
acts=acts,
output=denom,
num_rows=rows,
num_cols=cols,
minus=minus,
stream=stream,
)
def reduce_max(acts: torch.Tensor, denom, rows: int, cols: int, minus: bool, stream):
"""
Helper method to call the Warp Reduction Kernel to perform `max` reduction.
Note:
Efficient warp occurs at input shapes of 2 ^ K.
References:
- Warp Primitives
[https://developer.nvidia.com/blog/using-cuda-warp-level-primitives/]
Args:
acts: Flatened activation matrix of shape [B * T * U * (V+1)].
output: Flatened output matrix of shape [B * T * U * (V+1)].
Data will be overwritten.
rows: Vocabulary size (including blank token) - V+1.
Represents the number of threads per block.
cols: Flattened shape of activation matrix, without vocabulary dimension
(B * T * U). Represents number of blocks per grid.
minus: Bool flag whether to add or subtract as reduction.
If minus is set; calls _reduce_minus, else calls _reduce_rows kernel.
stream: CUDA Stream.
"""
return ReduceHelper(
I_opid=I_Op.IDENTITY.value,
R_opid=R_Op.MAXIMUM.value,
acts=acts,
output=denom,
num_rows=rows,
num_cols=cols,
minus=minus,
stream=stream,
)
| 12,677 | 31.84456 | 87 | py |
espnet | espnet-master/espnet2/asr/transducer/rnnt_multi_blank/utils/cuda_utils/gpu_rnnt.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2018-2019, Mingkun Huang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
from typing import Optional, Tuple
import numba
import torch
from numba import cuda
from espnet2.asr.transducer.rnnt_multi_blank.utils import global_constants, rnnt_helper
from espnet2.asr.transducer.rnnt_multi_blank.utils.cuda_utils import (
gpu_rnnt_kernel,
reduce,
)
class GPURNNT:
def __init__(
self,
minibatch: int,
maxT: int,
maxU: int,
alphabet_size: int,
workspace,
blank: int,
fastemit_lambda: float,
clamp: float,
num_threads: int,
stream,
):
"""
Helper class to launch the CUDA Kernels to compute the Transducer Loss.
Args:
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length.
Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length.
Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
workspace: An allocated chunk of memory that will be sliced off and
reshaped into required blocks used as working memory.
blank: Index of the RNNT blank token in the vocabulary.
Generally the first or last token in the vocab.
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level
Emission Regularization.
clamp: Float value. When set to value >= 0.0, will clamp
the gradient to [-clamp, clamp].
num_threads: Number of OMP threads to launch.
stream: Numba Cuda Stream.
"""
self.minibatch_ = minibatch
self.maxT_ = maxT
self.maxU_ = maxU
self.alphabet_size_ = alphabet_size
self.gpu_workspace = cuda.as_cuda_array(
workspace
) # a flat vector of floatX numbers that represents allocated memory slices
self.blank_ = blank
self.fastemit_lambda_ = fastemit_lambda
self.clamp_ = abs(clamp)
self.num_threads_ = num_threads
self.stream_ = stream # type: cuda.cudadrv.driver.Stream
if num_threads > 0:
numba.set_num_threads(min(multiprocessing.cpu_count(), num_threads))
self.num_threads_ = numba.get_num_threads()
else:
self.num_threads_ = numba.get_num_threads()
def log_softmax(self, acts: torch.Tensor, denom: torch.Tensor):
"""
Computes the log softmax denominator of the input activation tensor
and stores the result in denom.
Args:
acts: Activation tensor of shape [B, T, U, V+1]. The input must be
represented as a flat tensor of shape [B * T * U * (V+1)] to
allow pointer indexing.
denom: A zero tensor of same shape as acts.
Updates:
This kernel inplace updates the `denom` tensor
"""
# // trans_acts + pred_acts -> log_softmax denominator
reduce.reduce_max(
acts,
denom,
rows=self.alphabet_size_,
cols=self.minibatch_ * self.maxT_ * self.maxU_,
minus=False,
stream=self.stream_,
)
reduce.reduce_exp(
acts,
denom,
rows=self.alphabet_size_,
cols=self.minibatch_ * self.maxT_ * self.maxU_,
minus=True,
stream=self.stream_,
)
def compute_cost_and_score(
self,
acts: torch.Tensor,
grads: Optional[torch.Tensor],
costs: torch.Tensor,
labels: torch.Tensor,
label_lengths: torch.Tensor,
input_lengths: torch.Tensor,
) -> global_constants.RNNTStatus:
"""
Compute both the loss and the gradients.
Args:
acts: A flattened tensor of shape [B, T, U, V+1] representing the
activation matrix.
grad: A flattented zero tensor of same shape as acts.
costs: A zero vector of length B which will be updated inplace
with the log probability costs.
flat_labels: A flattened matrix of labels of shape [B, U]
label_lengths: A vector of length B that contains the original
lengths of the acoustic sequence.
input_lengths: A vector of length B that contains the original
lengths of the target sequence.
Updates:
This will launch kernels that will update inline the following variables:
- grads: Gradients of the activation matrix wrt the costs vector.
- costs: Negative log likelihood of the forward variable.
Returns:
An enum that either represents a successful RNNT operation or failure.
"""
training = grads is not None
if training:
grads *= 0.0 # zero grads
used_offset, (
denom,
alphas,
betas,
llForward,
llBackward,
) = self._prepare_workspace()
# START EXECUTION
self.log_softmax(acts, denom)
# Compute alphas
gpu_rnnt_kernel.compute_alphas_kernel[
self.minibatch_, self.maxU_, self.stream_, 0
](
acts,
denom,
alphas,
llForward,
input_lengths,
label_lengths,
labels,
self.minibatch_,
self.maxT_,
self.maxU_,
self.alphabet_size_,
self.blank_,
)
if training:
# Compute betas
gpu_rnnt_kernel.compute_betas_kernel[
self.minibatch_, self.maxU_, self.stream_, 0
](
acts,
denom,
betas,
llBackward,
input_lengths,
label_lengths,
labels,
self.minibatch_,
self.maxT_,
self.maxU_,
self.alphabet_size_,
self.blank_,
)
# Compute gradient
grad_blocks_per_grid = self.minibatch_ * self.maxT_ * self.maxU_
grad_threads_per_block = gpu_rnnt_kernel.GPU_RNNT_THREAD_SIZE
gpu_rnnt_kernel.compute_grad_kernel[
grad_blocks_per_grid, grad_threads_per_block, self.stream_, 0
](
grads,
acts,
denom,
alphas,
betas,
llForward,
input_lengths,
label_lengths,
labels,
self.minibatch_,
self.maxT_,
self.maxU_,
self.alphabet_size_,
self.blank_,
self.fastemit_lambda_,
self.clamp_,
)
# // cost copy, negate (for log likelihood) and update with additional
# regularizers This needs to be done via CUDA, because we used temporary
# memory llForward passed to alpha, which was updated with log likelihoods.
# But copying this data into a pytorch pointer is more difficult
# (numba api is one way)
# Therefore launch a pointwise CUDA kernel to update the costs inplace
# from data of llForward then negate to compute the loglikelihood.
threadsperblock = min(costs.shape[0], 32)
blockspergrid = (costs.shape[0] + (threadsperblock - 1)) // threadsperblock
rnnt_helper.compute_costs_data[blockspergrid, threadsperblock, self.stream_, 0](
llForward, costs, self.fastemit_lambda_
)
self.stream_.synchronize()
return global_constants.RNNTStatus.RNNT_STATUS_SUCCESS
def cost_and_grad(
self,
acts: torch.Tensor,
grads: torch.Tensor,
costs: torch.Tensor,
pad_labels: torch.Tensor,
label_lengths: torch.Tensor,
input_lengths: torch.Tensor,
):
if (
acts is None
or grads is None
or costs is None
or pad_labels is None
or label_lengths is None
or input_lengths is None
):
return global_constants.RNNTStatus.RNNT_STATUS_INVALID_VALUE
return self.compute_cost_and_score(
acts, grads, costs, pad_labels, label_lengths, input_lengths
)
def score_forward(
self,
acts: torch.Tensor,
costs: torch.Tensor,
pad_labels: torch.Tensor,
label_lengths: torch.Tensor,
input_lengths: torch.Tensor,
):
if (
acts is None
or costs is None
or pad_labels is None
or label_lengths is None
or input_lengths is None
):
return global_constants.RNNTStatus.RNNT_STATUS_INVALID_VALUE
return self.compute_cost_and_score(
acts, None, costs, pad_labels, label_lengths, input_lengths
)
def _prepare_workspace(self) -> Tuple[int, Tuple[torch.Tensor, ...]]:
"""
Helper method that uses the workspace and constructs slices of it
that can be used.
Returns:
An int, representing the offset of the used workspace (practically, the
slice of the workspace consumed) A tuple of tensors representing
the shared workspace.
"""
used_offset = 0
# // denom
denom = self.gpu_workspace[
used_offset : used_offset + self.maxT_ * self.maxU_ * self.minibatch_
]
used_offset += self.maxT_ * self.maxU_ * self.minibatch_
# // alphas & betas
alphas = self.gpu_workspace[
used_offset : used_offset + self.maxT_ * self.maxU_ * self.minibatch_
]
used_offset += self.maxT_ * self.maxU_ * self.minibatch_
betas = self.gpu_workspace[
used_offset : used_offset + self.maxT_ * self.maxU_ * self.minibatch_
]
used_offset += self.maxT_ * self.maxU_ * self.minibatch_
# // logllh
llForward = self.gpu_workspace[used_offset : used_offset + self.minibatch_]
used_offset += self.minibatch_
llBackward = self.gpu_workspace[used_offset : used_offset + self.minibatch_]
used_offset += self.minibatch_
return used_offset, (denom, alphas, betas, llForward, llBackward)
class MultiblankGPURNNT(GPURNNT):
def __init__(
self,
sigma: float,
num_big_blanks: int,
minibatch: int,
maxT: int,
maxU: int,
alphabet_size: int,
workspace,
big_blank_workspace,
blank: int,
fastemit_lambda: float,
clamp: float,
num_threads: int,
stream,
):
"""
Helper class to launch the CUDA Kernels to compute Multi-blank Transducer Loss
(https://arxiv.org/pdf/2211.03541).
Args:
sigma: Hyper-parameter related to the logit-normalization method
in training multi-blank transducers.
num_big_blanks: Number of big blank symbols the model has. This should
not include the standard blank symbol.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length.
Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length.
Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V + 1 + num-big-blanks
workspace: An allocated chunk of memory that will be sliced off and
reshaped into required blocks used as working memory.
big_blank_workspace: An allocated chunk of memory that will be sliced
off and reshaped into required blocks used as working memory
specifically for the multi-blank related computations.
blank: Index of the RNNT blank token in the vocabulary.
Generally the first or last token in the vocab.
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with
Sequence-level Emission Regularization.
clamp: Float value. When set to value >= 0.0, will clamp the
gradient to [-clamp, clamp].
num_threads: Number of OMP threads to launch.
stream: Numba Cuda Stream.
"""
super().__init__(
minibatch,
maxT,
maxU,
alphabet_size,
workspace,
blank,
fastemit_lambda,
clamp,
num_threads,
stream,
)
self.big_blank_workspace = cuda.as_cuda_array(
big_blank_workspace
) # a flat vector of integer numbers that represents allocated memory slices
self.num_big_blanks = num_big_blanks
self.sigma = sigma
def compute_cost_and_score(
self,
acts: torch.Tensor,
grads: Optional[torch.Tensor],
costs: torch.Tensor,
labels: torch.Tensor,
label_lengths: torch.Tensor,
input_lengths: torch.Tensor,
) -> global_constants.RNNTStatus:
"""
Compute both the loss and the gradients.
Args:
acts: A flattened tensor of shape [B, T, U, V+1] representing
the activation matrix.
grad: A flattented zero tensor of same shape as acts.
costs: A zero vector of length B which will be updated inplace
with the log probability costs.
flat_labels: A flattened matrix of labels of shape [B, U]
label_lengths: A vector of length B that contains the original
lengths of the acoustic sequence.
input_lengths: A vector of length B that contains the original
lengths of the target sequence.
Updates:
This will launch kernels that will update inline the following variables:
- grads: Gradients of the activation matrix wrt the costs vector.
- costs: Negative log likelihood of the forward variable.
Returns:
An enum that either represents a successful RNNT operation or failure.
"""
training = grads is not None
if training:
grads *= 0.0 # zero grads
_, (
denom,
alphas,
betas,
llForward,
llBackward,
bigblank_durations,
) = self._prepare_workspace()
# START EXECUTION
self.log_softmax(acts, denom)
# Compute alphas
gpu_rnnt_kernel.compute_multiblank_alphas_kernel[
self.minibatch_, self.maxU_, self.stream_, 0
](
acts,
denom,
self.sigma,
alphas,
llForward,
input_lengths,
label_lengths,
labels,
self.minibatch_,
self.maxT_,
self.maxU_,
self.alphabet_size_,
self.blank_,
bigblank_durations,
self.num_big_blanks,
)
if training:
# Compute betas
gpu_rnnt_kernel.compute_multiblank_betas_kernel[
self.minibatch_, self.maxU_, self.stream_, 0
](
acts,
denom,
self.sigma,
betas,
llBackward,
input_lengths,
label_lengths,
labels,
self.minibatch_,
self.maxT_,
self.maxU_,
self.alphabet_size_,
self.blank_,
bigblank_durations,
self.num_big_blanks,
)
# Compute gradient
grad_blocks_per_grid = self.minibatch_ * self.maxT_ * self.maxU_
grad_threads_per_block = gpu_rnnt_kernel.GPU_RNNT_THREAD_SIZE
gpu_rnnt_kernel.compute_multiblank_grad_kernel[
grad_blocks_per_grid, grad_threads_per_block, self.stream_, 0
](
grads,
acts,
denom,
self.sigma,
alphas,
betas,
llForward,
input_lengths,
label_lengths,
labels,
self.minibatch_,
self.maxT_,
self.maxU_,
self.alphabet_size_,
self.blank_,
bigblank_durations,
self.num_big_blanks,
self.fastemit_lambda_,
self.clamp_,
)
# // cost copy, negate (for log likelihood) and update with additional
# regularizers. This needs to be done via CUDA, because we used temporary
# memory llForward passed to alpha, which was updated with log likelihoods.
# But copying this data into a pytorch pointer is more difficult
# (numba api is one way)
# Therefore launch a pointwise CUDA kernel to update the costs inplace
# from data of llForward. Then negate to compute the loglikelihood.
threadsperblock = min(costs.shape[0], 32)
blockspergrid = (costs.shape[0] + (threadsperblock - 1)) // threadsperblock
rnnt_helper.compute_costs_data[blockspergrid, threadsperblock, self.stream_, 0](
llForward, costs, self.fastemit_lambda_
)
self.stream_.synchronize()
return global_constants.RNNTStatus.RNNT_STATUS_SUCCESS
def cost_and_grad(
self,
acts: torch.Tensor,
grads: torch.Tensor,
costs: torch.Tensor,
pad_labels: torch.Tensor,
label_lengths: torch.Tensor,
input_lengths: torch.Tensor,
):
if (
acts is None
or grads is None
or costs is None
or pad_labels is None
or label_lengths is None
or input_lengths is None
):
return global_constants.RNNTStatus.RNNT_STATUS_INVALID_VALUE
return self.compute_cost_and_score(
acts, grads, costs, pad_labels, label_lengths, input_lengths
)
def score_forward(
self,
acts: torch.Tensor,
costs: torch.Tensor,
pad_labels: torch.Tensor,
label_lengths: torch.Tensor,
input_lengths: torch.Tensor,
):
if (
acts is None
or costs is None
or pad_labels is None
or label_lengths is None
or input_lengths is None
):
return global_constants.RNNTStatus.RNNT_STATUS_INVALID_VALUE
return self.compute_cost_and_score(
acts, None, costs, pad_labels, label_lengths, input_lengths
)
def _prepare_workspace(self) -> (int, Tuple[torch.Tensor]):
"""
Helper method that uses the workspace and constructs slices of it that
can be used.
Returns:
An int, representing the offset of the used workspace (practically,
the slice of the workspace consumed) A tuple of tensors representing
the shared workspace.
"""
used_offset = 0
# // denom
denom = self.gpu_workspace[
used_offset : used_offset + self.maxT_ * self.maxU_ * self.minibatch_
]
used_offset += self.maxT_ * self.maxU_ * self.minibatch_
# // alphas & betas
alphas = self.gpu_workspace[
used_offset : used_offset + self.maxT_ * self.maxU_ * self.minibatch_
]
used_offset += self.maxT_ * self.maxU_ * self.minibatch_
betas = self.gpu_workspace[
used_offset : used_offset + self.maxT_ * self.maxU_ * self.minibatch_
]
used_offset += self.maxT_ * self.maxU_ * self.minibatch_
# // logllh
llForward = self.gpu_workspace[used_offset : used_offset + self.minibatch_]
used_offset += self.minibatch_
llBackward = self.gpu_workspace[used_offset : used_offset + self.minibatch_]
used_offset += self.minibatch_
bigblank_durations = self.big_blank_workspace[: self.num_big_blanks]
return used_offset, (
denom,
alphas,
betas,
llForward,
llBackward,
bigblank_durations,
)
| 21,906 | 33.608215 | 88 | py |
espnet | espnet-master/espnet2/asr/transducer/rnnt_multi_blank/utils/cuda_utils/gpu_rnnt_kernel.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2018-2019, Mingkun Huang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from numba import cuda
from espnet2.asr.transducer.rnnt_multi_blank.utils import rnnt_helper
GPU_RNNT_THREAD_SIZE = 256
@cuda.jit(device=True, inline=True)
def logp(
denom: torch.Tensor,
acts: torch.Tensor,
maxT: int,
maxU: int,
alphabet_size: int,
mb: int,
t: int,
u: int,
v: int,
):
"""
Compute the sum of log probability from the activation tensor and its denominator.
Args:
denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the
logprobs activation tensor across entire vocabulary.
acts: Tensor of shape [B, T, U, V+1] flattened.
Represents the logprobs activation tensor.
maxT: The maximum possible acoustic sequence length.
Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length.
Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
mb: Batch indexer.
t: Acoustic sequence timestep indexer.
u: Target sequence timestep indexer.
v: Vocabulary token indexer.
Returns:
The sum of logprobs[mb, t, u, v] + denom[mb, t, u]
"""
col = (mb * maxT + t) * maxU + u
return denom[col] + acts[col * alphabet_size + v]
@cuda.jit()
def compute_alphas_kernel(
acts: torch.Tensor,
denom: torch.Tensor,
alphas: torch.Tensor,
llForward: torch.Tensor,
xlen: torch.Tensor,
ylen: torch.Tensor,
mlabels: torch.Tensor, # [B]
minibatch: int,
maxT: int,
maxU: int,
alphabet_size: int,
blank_: int,
):
"""
Compute alpha (forward variable) probabilities over the transduction step.
Args:
acts: Tensor of shape [B, T, U, V+1] flattened.
Represents the logprobs activation tensor.
denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the
logprobs activation tensor across entire vocabulary.
alphas: Zero tensor of shape [B, T, U]. Will be updated inside the kernel
with the forward variable probabilities.
llForward: Zero tensor of shape [B]. Represents the log-likelihood of the
forward pass. Returned as the forward pass loss that is reduced by
the optimizer.
xlen: Vector of length B which contains the actual acoustic sequence
lengths in the padded activation tensor.
ylen: Vector of length B which contains the actual target sequence
lengths in the padded activation tensor.
mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token
- usually the RNNT blank). The matrix contains the padded target
transcription that must be predicted.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length.
Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length.
Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
blank_: Index of the RNNT blank token in the vocabulary.
Generally the first or last token in the vocab.
Updates:
Kernel inplace updates the following inputs:
- alphas: forward variable scores.
- llForward: log-likelihood of forward variable.
"""
# // launch B blocks, each block has U threads
b = cuda.blockIdx.x # // batch id
u = cuda.threadIdx.x # label id, u
T = xlen[b] # select AM length of current sample
U = ylen[b] + 1 # select target length of current sample, +1 for the blank token
labels: torch.Tensor = mlabels[
b
] # mb label start point, equivalent to mlabels + b * (maxU - 1)
offset = b * maxT * maxU # pointer indexing offset
# alphas += offset # pointer offset, ignored since we explicitly add offset
# Initilize alpha[b, t=0, u=0] for all b in B
if u == 0:
alphas[offset] = 0
# sync until all alphas are initialized
cuda.syncthreads()
# Ordinary alpha calculations, broadcast across B=b and U=u
# Look up forward variable calculation from rnnt_numpy.forward_pass()
for n in range(1, T + U - 1):
t = n - u
if u == 0:
# for t in range(1, T) step to initialize alphas[b, t, 0]
if t > 0 and t < T:
alphas[offset + t * maxU + u] = alphas[
offset + (t - 1) * maxU + u
] + logp(denom, acts, maxT, maxU, alphabet_size, b, t - 1, 0, blank_)
elif u < U:
# for u in range(1, U) step to initialize alphas[b, 0, u]
if t == 0:
alphas[offset + u] = alphas[offset + u - 1] + logp(
denom, acts, maxT, maxU, alphabet_size, b, 0, u - 1, labels[u - 1]
)
# for t in range(1, T) for u in range(1, U) step to compute alphas[b, t, u]
elif t > 0 and t < T:
no_emit = alphas[offset + (t - 1) * maxU + u] + logp(
denom, acts, maxT, maxU, alphabet_size, b, t - 1, u, blank_
)
emit = alphas[offset + t * maxU + u - 1] + logp(
denom, acts, maxT, maxU, alphabet_size, b, t, u - 1, labels[u - 1]
)
alphas[offset + t * maxU + u] = rnnt_helper.log_sum_exp(emit, no_emit)
# sync across all B=b and U=u
cuda.syncthreads()
# After final sync, alphas[b, T-1, U - 1] + logprobs[b, T-1, U-1, blank]
# + denom[b, T-1, U-1] gives log-likelihood of forward pass.
if u == 0:
loglike = alphas[offset + (T - 1) * maxU + U - 1] + logp(
denom, acts, maxT, maxU, alphabet_size, b, T - 1, U - 1, blank_
)
llForward[b] = loglike
@cuda.jit()
def compute_betas_kernel(
acts: torch.Tensor,
denom: torch.Tensor,
betas: torch.Tensor,
llBackward: torch.Tensor,
xlen: torch.Tensor,
ylen: torch.Tensor,
mlabels: torch.Tensor, # [B, U]
minibatch: int,
maxT: int,
maxU: int,
alphabet_size: int,
blank_: int,
):
"""
Compute beta (backward variable) probabilities over the transduction step.
Args:
acts: Tensor of shape [B, T, U, V+1] flattened.
Represents the logprobs activation tensor.
denom: Tensor of shape [B, T, U] flattened. Represents the denominator
of the logprobs activation tensor across entire vocabulary.
betas: Zero tensor of shape [B, T, U]. Will be updated inside the kernel
with the backward variable probabilities.
llBackward: Zero tensor of shape [B]. Represents the log-likelihood
of the backward pass. Returned as the backward pass loss that
is reduced by the optimizer.
xlen: Vector of length B which contains the actual acoustic
sequence lengths in the padded activation tensor.
ylen: Vector of length B which contains the actual target sequence
lengths in the padded activation tensor.
mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token
- usually the RNNT blank). The matrix contains the padded target
transcription that must be predicted.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length.
Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length.
Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
blank_: Index of the RNNT blank token in the vocabulary.
Generally the first or last token in the vocab.
Updates:
Kernel inplace updates the following inputs:
- betas: backward variable scores.
- llBackward: log-likelihood of backward variable.
"""
# // launch B blocks, each block has U threads
b = cuda.blockIdx.x # // batch id
u = cuda.threadIdx.x # label id, u
T = xlen[b] # select AM length of current sample
U = ylen[b] + 1 # select target length of current sample, +1 for the blank token
labels: torch.Tensor = mlabels[
b
] # mb label start point, equivalent to mlabels + b * (maxU - 1)
offset = b * maxT * maxU # pointer indexing offset
# betas += offset # pointer offset, ignored since we explicitly add offset
# Initilize beta[b, t=T-1, u=U-1] for all b in B
# with log_probs[b, t=T-1, u=U-1, blank]
if u == 0:
betas[offset + (T - 1) * maxU + U - 1] = logp(
denom, acts, maxT, maxU, alphabet_size, b, T - 1, U - 1, blank_
)
# sync until all betas are initialized
cuda.syncthreads()
# Ordinary beta calculations, broadcast across B=b and U=u
# Look up backward variable calculation from rnnt_numpy.backward_pass()
for n in range(T + U - 2, -1, -1):
t = n - u
if u == (U - 1):
# for t in reversed(range(T - 1)) step to initialize betas[b, t, U-1]
if t >= 0 and t < (T - 1):
betas[offset + t * maxU + U - 1] = betas[
offset + (t + 1) * maxU + U - 1
] + logp(denom, acts, maxT, maxU, alphabet_size, b, t, U - 1, blank_)
elif u < U:
if t == T - 1:
# for u in reversed(range(U - 1)) step to initialize betas[b, T-1, u]
betas[offset + (T - 1) * maxU + u] = betas[
offset + (T - 1) * maxU + u + 1
] + logp(denom, acts, maxT, maxU, alphabet_size, b, T - 1, u, labels[u])
elif (t >= 0) and (t < T - 1):
# for t in reversed(range(T - 1)) for u in reversed(range(U - 1))
# step to compute betas[b, t, u]
no_emit = betas[offset + (t + 1) * maxU + u] + logp(
denom, acts, maxT, maxU, alphabet_size, b, t, u, blank_
)
emit = betas[offset + t * maxU + u + 1] + logp(
denom, acts, maxT, maxU, alphabet_size, b, t, u, labels[u]
)
betas[offset + t * maxU + u] = rnnt_helper.log_sum_exp(emit, no_emit)
# sync across all B=b and U=u
cuda.syncthreads()
# After final sync, betas[b, 0, 0] gives
# log-likelihood of backward pass.
if u == 0:
llBackward[b] = betas[offset]
@cuda.jit()
def compute_grad_kernel(
grads: torch.Tensor,
acts: torch.Tensor,
denom: torch.Tensor,
alphas: torch.Tensor,
betas: torch.Tensor,
logll: torch.Tensor,
xlen: torch.Tensor,
ylen: torch.Tensor,
mlabels: torch.Tensor, # [B, U]
minibatch: int,
maxT: int,
maxU: int,
alphabet_size: int,
blank_: int,
fastemit_lambda: float,
clamp: float,
):
"""
Compute gradients over the transduction step.
Args:
grads: Zero Tensor of shape [B, T, U, V+1]. Is updated by this kernel to
contain the gradients of this batch of samples.
acts: Tensor of shape [B, T, U, V+1] flattened.
Represents the logprobs activation tensor.
denom: Tensor of shape [B, T, U] flattened. Represents the denominator
of the logprobs activation tensor across entire vocabulary.
alphas: Alpha variable, contains forward probabilities.
A tensor of shape [B, T, U].
betas: Beta varoable, contains backward probabilities.
A tensor of shape [B, T, U].
logll: Log-likelihood of the forward variable, represented as a vector
of shape [B]. Represents the log-likelihood of the forward pass.
xlen: Vector of length B which contains the actual acoustic sequence
lengths in the padded activation tensor.
ylen: Vector of length B which contains the actual target sequence lengths
in the padded activation tensor.
mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token
- usually the RNNT blank). The matrix contains the padded target
transcription that must be predicted.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length.
Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length.
Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
blank_: Index of the RNNT blank token in the vocabulary.
Generally the first or last token in the vocab.
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level
Emission Regularization.
clamp: Float value. When set to value >= 0.0, will clamp the
gradient to [-clamp, clamp].
Updates:
Kernel inplace updates the following inputs:
- grads: Gradients with respect to the log likelihood (logll).
"""
# Kernel call:
# blocks_per_grid = minibatch (b) * maxT (t) * maxU (u)
# threads_per_block = constant buffer size of parallel threads (v :: Constant)
tid = cuda.threadIdx.x # represents v, taking steps of some constant size
idx = tid # index of v < V+1; in steps of constant buffer size
col = cuda.blockIdx.x # represents a fused index of b * t * u
# Decompose original indices from fused `col`
u = col % maxU # (b * t * u) % u = u
bt = (col - u) // maxU # (b * t * u - u) // U = b * t
t = bt % maxT # (b * t) % t = t
mb = (bt - t) // maxT # (b * t - t) // T = b
# constants
T = xlen[mb] # select AM length of current sample
U = ylen[mb] + 1 # select target length of current sample, +1 for the blank token
labels: torch.Tensor = mlabels[mb] # labels = mlabels + mb * (maxU - 1);
# Buffered gradient calculations, broadcast across B=b, T=t and U=u,
# looped over V with some constant stride.
# Look up gradient calculation from rnnt_numpy.compute_gradient()
if t < T and u < U:
# For cuda kernels, maximum number of threads per block is limited to some value
# However, it may be the case that vocabulary size is larger than this limit
# To work around this, an arbitrary thread buffer size is chosen such that,
# 1) each element within the thread pool operates independently of the other
# 2) An inner while loop moves the index of each buffer element by the size
# of the buffer itself, such that all elements of the vocabulary size are
# covered in (V + 1 // thread_buffer) number of steps.
# As such, each thread will perform the while loop at least
# (V + 1 // thread_buffer) number of times
while idx < alphabet_size:
# remember, `col` represents the tri-index [b, t, u]
# therefore; logpk = denom[b, t, u] + acts[b, t, u, v]
logpk = denom[col] + acts[col * alphabet_size + idx]
# initialize the grad of the sample acts[b, t, u, v]
grad = math.exp(alphas[col] + betas[col] + logpk - logll[mb])
# If FastEmit regularization is enabled, calculate the gradeint of
# probability of predicting the next label at the current timestep.
# The formula for this is Equation 9 in https://arxiv.org/abs/2010.11148,
# multiplied by the log probability of the current step (t, u),
# normalized by the total log likelihood. Once the gradient has been
# calculated, scale it by `fastemit_lambda`, as in Equation 10.
if fastemit_lambda > 0.0 and u < U - 1:
fastemit_grad = fastemit_lambda * math.exp(
alphas[col] # alphas(t, u)
+ (
denom[col] + acts[col * alphabet_size + labels[u]]
) # y_hat(t, u)
+ betas[col + 1] # betas(t, u+1)
+ logpk # log Pr(k|t, u)
- logll[mb] # total log likelihood for normalization
)
else:
fastemit_grad = 0.0
# Update the gradient of act[b, t, u, v] with the gradient from
# FastEmit regularization
grad = grad + fastemit_grad
# // grad to last blank transition
# grad[b, T-1, U-1, v=blank] -= exp(alphas[b, t, u) + logpk - logll[b])
if (idx == blank_) and (t == T - 1) and (u == U - 1):
grad -= math.exp(alphas[col] + logpk - logll[mb])
# grad of blank across t < T;
# grad[b, t<T-1, u, v=blank] -= exp(alphas[b, t, u]
# + logpk - logll[b] betas[b, t + 1, u])
if (idx == blank_) and (t < T - 1):
grad -= math.exp(alphas[col] + logpk - logll[mb] + betas[col + maxU])
# grad of correct token across u < U;
# grad[b, t, u<U-1, v=label[u]] -= exp(alphas[b, t, u]
# + logpk - logll[b] + betas[b, t, u+1])
# Scale the gradient by (1.0 + FastEmit_lambda) in log space,
# then exponentiate
if (u < U - 1) and (idx == labels[u]):
# exp(log(1 + fastemit_lambda) + ...) is numerically more stable than
# multiplying (1.0 + fastemit_lambda) with result.
grad -= math.exp(
math.log1p(fastemit_lambda)
+ alphas[col]
+ logpk
- logll[mb]
+ betas[col + 1]
)
# update grads[b, t, u, v] = grad
grads[col * alphabet_size + idx] = grad
# clamp gradient (if needed)
if clamp > 0.0:
g = grads[col * alphabet_size + idx]
g = min(g, clamp)
g = max(g, -clamp)
grads[col * alphabet_size + idx] = g
# update internal index through the thread_buffer;
# until idx < V + 1, such that entire vocabulary has been updated.
idx += GPU_RNNT_THREAD_SIZE
@cuda.jit()
def compute_multiblank_alphas_kernel(
acts: torch.Tensor,
denom: torch.Tensor,
sigma: float,
alphas: torch.Tensor,
llForward: torch.Tensor,
xlen: torch.Tensor,
ylen: torch.Tensor,
mlabels: torch.Tensor,
minibatch: int,
maxT: int,
maxU: int,
alphabet_size: int,
blank_: int,
big_blank_duration: torch.Tensor,
num_big_blanks: int,
):
"""
Compute alpha (forward variable) probabilities for multi-blank transducuer loss
(https://arxiv.org/pdf/2211.03541).
Args:
acts: Tensor of shape [B, T, U, V + 1 + num_big_blanks] flattened.
Represents the logprobs activation tensor.
denom: Tensor of shape [B, T, U] flattened. Represents the denominator of
the logprobs activation tensor across entire vocabulary.
sigma: Hyper-parameter for logit-undernormalization technique for training
multi-blank transducers.
alphas: Zero tensor of shape [B, T, U]. Will be updated inside the kernel
with the forward variable probabilities.
llForward: Zero tensor of shape [B]. Represents the log-likelihood of the
forward pass. Returned as the forward pass loss that is
reduced by the optimizer.
xlen: Vector of length B which contains the actual acoustic sequence
lengths in the padded activation tensor.
ylen: Vector of length B which contains the actual target sequence
lengths in the padded activation tensor.
mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token
- usually the RNNT blank). The matrix contains the padded target
transcription that must be predicted.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length.
Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length.
Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
blank_: Index of the RNNT standard blank token in the vocabulary.
big_blank_durations: Vector of supported big blank durations of the model.
num_big_blanks: Number of big blanks of the model.
Updates:
Kernel inplace updates the following inputs:
- alphas: forward variable scores.
- llForward: log-likelihood of forward variable.
"""
# // launch B blocks, each block has U threads
b = cuda.blockIdx.x # // batch id
u = cuda.threadIdx.x # label id, u
T = xlen[b] # select AM length of current sample
U = ylen[b] + 1 # select target length of current sample, +1 for the blank token
labels: torch.Tensor = mlabels[
b
] # mb label start point, equivalent to mlabels + b * (maxU - 1)
offset = b * maxT * maxU # pointer indexing offset
# Initilize alpha[b, t=0, u=0] for all b in B
if u == 0:
alphas[offset] = 0
# sync until all alphas are initialized
cuda.syncthreads()
# Ordinary alpha calculations, broadcast across B=b and U=u
# Look up forward variable calculation from rnnt_numpy.forward_pass()
# Note: because of the logit under-normalization, everytime logp() is called,
# it is always followed by a `-sigma` term.
for n in range(1, T + U - 1):
t = n - u
if u == 0:
# for t in range(1, T) step to initialize alphas[b, t, 0]
if t > 0 and t < T:
alphas[offset + t * maxU + u] = (
alphas[offset + (t - 1) * maxU + u]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, t - 1, 0, blank_)
- sigma
)
# Now add the weights for big blanks.
for i in range(num_big_blanks):
if t >= big_blank_duration[i]:
alphas[offset + t * maxU + u] = rnnt_helper.log_sum_exp(
alphas[offset + t * maxU + u],
alphas[offset + (t - big_blank_duration[i]) * maxU + u]
+ logp(
denom,
acts,
maxT,
maxU,
alphabet_size,
b,
t - big_blank_duration[i],
0,
blank_ - 1 - i,
)
- sigma,
)
elif u < U:
# for u in range(1, U) step to initialize alphas[b, 0, u]
if t == 0:
alphas[offset + u] = (
alphas[offset + u - 1]
+ logp(
denom,
acts,
maxT,
maxU,
alphabet_size,
b,
0,
u - 1,
labels[u - 1],
)
- sigma
)
# for t in range(1, T) for u in range(1, U) step to compute alphas[b, t, u]
elif t > 0 and t < T:
no_emit = (
alphas[offset + (t - 1) * maxU + u]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, t - 1, u, blank_)
- sigma
)
emit = (
alphas[offset + t * maxU + u - 1]
+ logp(
denom,
acts,
maxT,
maxU,
alphabet_size,
b,
t,
u - 1,
labels[u - 1],
)
- sigma
)
alphas[offset + t * maxU + u] = rnnt_helper.log_sum_exp(emit, no_emit)
# Now add the weights for big blanks.
for i in range(num_big_blanks):
if t >= big_blank_duration[i]:
# big-blank weight here is
# alpha(t - duration, u) * p(big-blank | t - duration, u)
# / exp(sigma), in log domain
# do this all all big-blanks if the above condition is met
big_blank_no_emit = (
alphas[offset + (t - big_blank_duration[i]) * maxU + u]
+ logp(
denom,
acts,
maxT,
maxU,
alphabet_size,
b,
t - big_blank_duration[i],
u,
blank_ - 1 - i,
)
- sigma
)
alphas[offset + t * maxU + u] = rnnt_helper.log_sum_exp(
alphas[offset + t * maxU + u], big_blank_no_emit
)
# sync across all B=b and U=u
cuda.syncthreads()
# After final sync, alphas[b, T-1, U - 1] + logprobs[b, T-1, U-1, blank]
# + denom[b, T-1, U-1] gives log-likelihood of forward pass.
if u == 0:
loglike = (
alphas[offset + (T - 1) * maxU + U - 1]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, T - 1, U - 1, blank_)
- sigma
)
# Now add the weights for big blanks for the final weight computation.
for i in range(num_big_blanks):
if T >= big_blank_duration[i]:
big_blank_loglike = (
alphas[offset + (T - big_blank_duration[i]) * maxU + U - 1]
+ logp(
denom,
acts,
maxT,
maxU,
alphabet_size,
b,
T - big_blank_duration[i],
U - 1,
blank_ - 1 - i,
)
- sigma
)
loglike = rnnt_helper.log_sum_exp(loglike, big_blank_loglike)
llForward[b] = loglike
@cuda.jit()
def compute_multiblank_betas_kernel(
acts: torch.Tensor,
denom: torch.Tensor,
sigma: float,
betas: torch.Tensor,
llBackward: torch.Tensor,
xlen: torch.Tensor,
ylen: torch.Tensor,
mlabels: torch.Tensor, # [B, U]
minibatch: int,
maxT: int,
maxU: int,
alphabet_size: int,
blank_: int,
big_blank_duration: torch.Tensor,
num_big_blanks: int,
):
"""
Compute beta (backward variable) probabilities for multi-blank transducer loss
(https://arxiv.org/pdf/2211.03541).
Args:
acts: Tensor of shape [B, T, U, V + 1 + num-big-blanks] flattened.
Represents the logprobs activation tensor.
denom: Tensor of shape [B, T, U] flattened. Represents the denominator
of the logprobs activation tensor across entire vocabulary.
sigma: Hyper-parameter for logit-undernormalization technique for
training multi-blank transducers.
betas: Zero tensor of shape [B, T, U]. Will be updated inside the kernel
with the backward variable probabilities.
llBackward: Zero tensor of shape [B]. Represents the log-likelihood
of the backward pass. Returned as the backward pass loss
that is reduced by the optimizer.
xlen: Vector of length B which contains the actual acoustic sequence
lengths in the padded activation tensor.
ylen: Vector of length B which contains the actual target sequence
lengths in the padded activation tensor.
mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token
- usually the RNNT blank). The matrix contains the padded target
transcription that must be predicted.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length.
Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length.
Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
blank_: Index of the RNNT standard blank token in the vocabulary.
big_blank_durations: Vector of supported big blank durations of the model.
num_big_blanks: Number of big blanks of the model.
Updates:
Kernel inplace updates the following inputs:
- betas: backward variable scores.
- llBackward: log-likelihood of backward variable.
"""
# // launch B blocks, each block has U threads
b = cuda.blockIdx.x # // batch id
u = cuda.threadIdx.x # label id, u
T = xlen[b] # select AM length of current sample
U = ylen[b] + 1 # select target length of current sample, +1 for the blank token
labels: torch.Tensor = mlabels[
b
] # mb label start point, equivalent to mlabels + b * (maxU - 1)
offset = b * maxT * maxU # pointer indexing offset
# Note: just like the alphas, because of the logit under-normalization, everytime
# logp() is called, it is always followed by a `-sigma` term.
# Initilize beta[b, t=T-1, u=U-1] for all b in B with
# log_probs[b, t=T-1, u=U-1, blank]
if u == 0:
betas[offset + (T - 1) * maxU + U - 1] = (
logp(denom, acts, maxT, maxU, alphabet_size, b, T - 1, U - 1, blank_)
- sigma
)
# sync until all betas are initialized
cuda.syncthreads()
# Ordinary beta calculations, broadcast across B=b and U=u
# Look up backward variable calculation from rnnt_numpy.backward_pass()
for n in range(T + U - 2, -1, -1):
t = n - u
if u == (U - 1):
# for t in reversed(range(T - 1)) step to initialize betas[b, t, U-1]
if t >= 0 and t < (T - 1):
# beta[t, U - 1] = beta[t + 1, U - 1] * p(blank | t, U - 1) / exp(sigma)
# this part is the same as regular RNN-T.
betas[offset + t * maxU + U - 1] = (
betas[offset + (t + 1) * maxU + U - 1]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, t, U - 1, blank_)
- sigma
)
# now add the weights from big blanks
for i in range(num_big_blanks):
if t + big_blank_duration[i] < T:
# adding to beta[t, U - 1] of weight (in log domain),
# beta[t + duration, U - 1] *
# p(big-blank | t, U - 1) / exp(sigma)
betas[offset + t * maxU + U - 1] = rnnt_helper.log_sum_exp(
betas[offset + t * maxU + U - 1],
betas[offset + (t + big_blank_duration[i]) * maxU + U - 1]
+ logp(
denom,
acts,
maxT,
maxU,
alphabet_size,
b,
t,
U - 1,
blank_ - 1 - i,
)
- sigma,
)
elif t + big_blank_duration[i] == T and big_blank_duration[i] != 1:
# adding to beta[T - duration, U - 1] of weight (in log domain),
# p(big-blank | T - duration, U - 1) / exp(sigma)
betas[offset + t * maxU + U - 1] = rnnt_helper.log_sum_exp(
betas[offset + t * maxU + U - 1],
logp(
denom,
acts,
maxT,
maxU,
alphabet_size,
b,
t,
U - 1,
blank_ - 1 - i,
)
- sigma,
)
elif u < U:
if t == T - 1:
# for u in reversed(range(U - 1)) step to initialize betas[b, T-1, u]
betas[offset + (T - 1) * maxU + u] = (
betas[offset + (T - 1) * maxU + u + 1]
+ logp(
denom, acts, maxT, maxU, alphabet_size, b, T - 1, u, labels[u]
)
- sigma
)
elif (t >= 0) and (t < T - 1):
# for t in reversed(range(T - 1)) for u in reversed(range(U - 1))
# step to compute betas[b, t, u]
no_emit = (
betas[offset + (t + 1) * maxU + u]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, t, u, blank_)
- sigma
)
emit = (
betas[offset + t * maxU + u + 1]
+ logp(denom, acts, maxT, maxU, alphabet_size, b, t, u, labels[u])
- sigma
)
betas[offset + t * maxU + u] = rnnt_helper.log_sum_exp(emit, no_emit)
# now add the weights from big blanks
for i in range(num_big_blanks):
if t < T - big_blank_duration[i]:
# added weight for the big-blank,
# beta[t + duration, u] * p(big-blank | t, u) / exp(sigma)
big_blank_no_emit = (
betas[offset + (t + big_blank_duration[i]) * maxU + u]
+ logp(
denom,
acts,
maxT,
maxU,
alphabet_size,
b,
t,
u,
blank_ - 1 - i,
)
- sigma
)
betas[offset + t * maxU + u] = rnnt_helper.log_sum_exp(
betas[offset + t * maxU + u], big_blank_no_emit
)
# sync across all B=b and U=u
cuda.syncthreads()
# After final sync, betas[b, 0, 0] gives
# log-likelihood of backward pass.
if u == 0:
llBackward[b] = betas[offset]
@cuda.jit()
def compute_multiblank_grad_kernel(
grads: torch.Tensor,
acts: torch.Tensor,
denom: torch.Tensor,
sigma: float,
alphas: torch.Tensor,
betas: torch.Tensor,
logll: torch.Tensor,
xlen: torch.Tensor,
ylen: torch.Tensor,
mlabels: torch.Tensor, # [B, U]
minibatch: int,
maxT: int,
maxU: int,
alphabet_size: int,
blank_: int,
big_blank_duration: torch.Tensor,
num_big_blanks: int,
fastemit_lambda: float,
clamp: float,
):
"""
Compute gradients for multi-blank transducer loss
(https://arxiv.org/pdf/2211.03541).
Args:
grads: Zero Tensor of shape [B, T, U, V + 1 + num_big_blanks].
Is updated by this kernel to contain the gradients of this batch of samples.
acts: Tensor of shape [B, T, U, V + 1 + num_big_blanks] flattened.
Represents the logprobs activation tensor.
denom: Tensor of shape [B, T, U] flattened. Represents the denominator
of the logprobs activation tensor across entire vocabulary.
sigma: Hyper-parameter for logit-undernormalization technique
for training multi-blank transducers.
alphas: Alpha variable, contains forward probabilities.
A tensor of shape [B, T, U].
betas: Beta varoable, contains backward probabilities.
A tensor of shape [B, T, U].
logll: Log-likelihood of the forward variable, represented as
a vector of shape [B]. Represents the log-likelihood of the forward pass.
xlen: Vector of length B which contains the actual acoustic
sequence lengths in the padded activation tensor.
ylen: Vector of length B which contains the actual target sequence
lengths in the padded activation tensor.
mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token
- usually the RNNT blank). The matrix contains the padded target
transcription that must be predicted.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length.
Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length.
Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
blank_: Index of the RNNT blank token in the vocabulary.
Generally the first or last token in the vocab.
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level
Emission Regularization.
clamp: Float value. When set to value >= 0.0, will clamp
the gradient to [-clamp, clamp].
big_blank_durations: Vector of supported big blank durations of the model.
num_big_blanks: Number of big blanks of the model.
Updates:
Kernel inplace updates the following inputs:
- grads: Gradients with respect to the log likelihood (logll).
"""
# Kernel call:
# blocks_per_grid = minibatch (b) * maxT (t) * maxU (u)
# threads_per_block = constant buffer size of parallel threads (v :: Constant)
tid = cuda.threadIdx.x # represents v, taking steps of some constant size
idx = tid # index of v < V+1; in steps of constant buffer size
col = cuda.blockIdx.x # represents a fused index of b * t * u
# Decompose original indices from fused `col`
u = col % maxU # (b * t * u) % u = u
bt = (col - u) // maxU # (b * t * u - u) // U = b * t
t = bt % maxT # (b * t) % t = t
mb = (bt - t) // maxT # (b * t - t) // T = b
# constants
T = xlen[mb] # select AM length of current sample
U = ylen[mb] + 1 # select target length of current sample, +1 for the blank token
labels: torch.Tensor = mlabels[mb] # labels = mlabels + mb * (maxU - 1);
# Buffered gradient calculations, broadcast across B=b, T=t and U=u, looped over
# V with some constant stride. Look up gradient calculation from
# rnnt_numpy.compute_gradient()
if t < T and u < U:
# For cuda kernels, maximum number of threads per block is limited to some value
# However, it may be the case that vocabulary size is larger than this limit
# To work around this, an arbitrary thread buffer size is chosen such that,
# 1) each element within the thread pool operates independently of the other
# 2) An inner while loop moves the index of each buffer element by the size
# of the buffer itself, such that all elements of the vocabulary size are
# covered in (V + 1 // thread_buffer) number of steps.
# As such, each thread will perform the while loop at least
# (V + 1 // thread_buffer) number of times
while idx < alphabet_size:
# remember, `col` represents the tri-index [b, t, u]
# therefore; logpk = denom[b, t, u] + acts[b, t, u, v]
logpk = denom[col] + acts[col * alphabet_size + idx]
# initialize the grad of the sample acts[b, t, u, v]
grad = math.exp(alphas[col] + betas[col] + logpk - logll[mb])
# In all of the following computation, whenever logpk is used, we
# need to subtract sigma based on our derivation of the gradient of
# the logit under-normalization method.
# If FastEmit regularization is enabled, calculate the gradeint of
# probability of predicting the next label at the current timestep.
# The formula for this is Equation 9 in https://arxiv.org/abs/2010.11148,
# multiplied by the log probability of the current step (t, u), normalized
# by the total log likelihood. Once the gradient has been calculated,
# scale it by `fastemit_lambda`, as in Equation 10.
if fastemit_lambda > 0.0 and u < U - 1:
fastemit_grad = fastemit_lambda * math.exp(
alphas[col] # alphas(t, u)
+ (denom[col] + acts[col * alphabet_size + labels[u]])
+ betas[col + 1] # betas(t, u+1)
+ logpk # log Pr(k|t, u)
- sigma
- logll[mb] # total log likelihood for normalization
)
else:
fastemit_grad = 0.0
# Update the gradient of act[b, t, u, v] with the gradient
# from FastEmit regularization
grad = grad + fastemit_grad
# grad to last blank transition
# grad[b, T-1, U-1, v=blank] -= exp(alphas[b, t, u)
# + logpk - sigma - logll[b])
if (idx == blank_) and (t == T - 1) and (u == U - 1):
grad -= math.exp(alphas[col] + logpk - sigma - logll[mb])
else:
# this is one difference of the multi-blank gradient from standard RNN-T
# gradient -- basically, wherever the blank_ symbol is addressed in the
# original code, we need to do similar things to big blanks, and we need
# to change the if conditions to match the duration of the big-blank.
# grad[b, T-duration, U-1, v=big-blank] -=
# exp(alphas[b, t, u) + logpk - sigma - logll[b])
for i in range(num_big_blanks):
if (
(idx == blank_ - 1 - i)
and (t == T - big_blank_duration[i])
and (u == U - 1)
):
grad -= math.exp(alphas[col] + logpk - sigma - logll[mb])
# grad of blank across t < T;
# grad[b, t<T-1, u, v=blank] -= exp(alphas[b, t, u] +
# logpk - sigma - logll[b] betas[b, t + 1, u])
if (idx == blank_) and (t < T - 1):
grad -= math.exp(
alphas[col] + logpk - sigma - logll[mb] + betas[col + maxU]
)
else:
# This is another difference between multi-blank and RNN-T gradients.
# Now we consider gradients for big-blanks.
# grad[b, t<T-duration, u, v=big-blank] -=
# exp(alphas[b, t, u] + logpk - sigma - logll[b]
# + betas[b, t + duration, u])
for i in range(num_big_blanks):
if (idx == blank_ - 1 - i) and (t < T - big_blank_duration[i]):
grad -= math.exp(
alphas[col]
+ logpk
- sigma
- logll[mb]
+ betas[col + big_blank_duration[i] * maxU]
)
# grad of correct token across u < U;
# grad[b, t, u<U-1, v=label[u]] -=
# exp(alphas[b, t, u] + logpk - sigma - logll[b] + betas[b, t, u+1])
# Scale the gradient by (1.0 + FastEmit_lambda) in log space,
# then exponentiate
if (u < U - 1) and (idx == labels[u]):
# exp(log(1 + fastemit_lambda) + ...) is numerically more stable than
# multiplying (1.0 + fastemit_lambda) with result.
grad -= math.exp(
math.log1p(fastemit_lambda)
+ alphas[col]
+ logpk
- sigma
- logll[mb]
+ betas[col + 1]
)
# update grads[b, t, u, v] = grad
grads[col * alphabet_size + idx] = grad
# clamp gradient (if needed)
if clamp > 0.0:
g = grads[col * alphabet_size + idx]
g = min(g, clamp)
g = max(g, -clamp)
grads[col * alphabet_size + idx] = g
# update internal index through the thread_buffer;
# until idx < V + 1, such that entire vocabulary has been updated.
idx += GPU_RNNT_THREAD_SIZE
| 46,645 | 42.150786 | 88 | py |
espnet | espnet-master/espnet2/asr/transducer/rnnt_multi_blank/utils/cpu_utils/cpu_rnnt.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2018-2019, Mingkun Huang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import multiprocessing
from typing import Optional
import numba
import torch
from torch.autograd import Function
from espnet2.asr.transducer.rnnt_multi_blank.utils import global_constants
def log_sum_exp(a: torch.Tensor, b: torch.Tensor):
"""
Logsumexp with safety checks for infs.
"""
if torch.isinf(a):
return b
if torch.isinf(b):
return a
if a > b:
return math.log1p(math.exp(b - a)) + a
else:
return math.log1p(math.exp(a - b)) + b
class CpuRNNT_index:
def __init__(
self, U: int, maxU: int, minibatch: int, alphabet_size: int, batch_first: bool
):
"""
A placeholder Index computation class that emits the resolved index in a
flattened tensor, mimicing pointer indexing in CUDA kernels on the CPU.
Args:
U: Length of the current target sample (without padding).
maxU: Max Length of the padded target samples.
minibatch: Minibatch index
alphabet_size: Size of the vocabulary including RNNT blank - V+1.
batch_first: Bool flag determining if batch index is first or third.
"""
super(CpuRNNT_index, self).__init__()
self.U = U
self.maxU = maxU
self.minibatch = minibatch
self.alphabet_size = alphabet_size
self.batch_first = batch_first
def __call__(self, t: int, u: int, v: Optional[int] = None):
# if indexing all the values of the vocabulary, then only t, u are provided
if v is None:
return t * self.U + u
else:
# otherwise, t, u, v are provided to index particular value
# in the vocabulary.
if self.batch_first:
return (t * self.maxU + u) * self.alphabet_size + v
else:
return (t * self.maxU + u) * self.minibatch * self.alphabet_size + v
class CpuRNNT_metadata:
def __init__(
self,
T: int,
U: int,
workspace: torch.Tensor,
bytes_used: int,
blank: int,
labels: torch.Tensor,
log_probs: torch.Tensor,
idx: CpuRNNT_index,
):
"""
Metadata for CPU based RNNT loss calculation. Holds the working space memory.
Args:
T: Length of the acoustic sequence (without padding).
U: Length of the target sequence (without padding).
workspace: Working space memory for the CPU.
bytes_used: Number of bytes currently used for indexing the working
space memory. Generally 0.
blank: Index of the blank token in the vocabulary.
labels: Ground truth padded labels matrix of shape [B, U]
log_probs: Log probs / activation matrix of flattented shape [B, T, U, V+1]
idx:
"""
super(CpuRNNT_metadata, self).__init__()
self.alphas = workspace[bytes_used : bytes_used + T * U]
bytes_used += T * U
self.betas = workspace[bytes_used : bytes_used + T * U]
bytes_used += T * U
self.log_probs2 = workspace[
bytes_used : bytes_used + T * U * 2
] # // only store blank & label
bytes_used += T * U * 2
self.bytes_used = bytes_used
self.setup_probs(T, U, labels, blank, log_probs, idx)
def setup_probs(
self,
T: int,
U: int,
labels: torch.Tensor,
blank: int,
log_probs: torch.Tensor,
idx: CpuRNNT_index,
):
# initialize the log probs memory for blank and label token.
for t in range(T):
for u in range(U):
# mult with 2 is for selecting either blank or label token.
# Odd idx is blank.
offset = (t * U + u) * 2
self.log_probs2[offset] = log_probs[idx(t, u, blank)]
# // labels do not have first blank
if u < U - 1:
self.log_probs2[offset + 1] = log_probs[idx(t, u, labels[u])]
class LogSoftmaxGradModification(Function):
@staticmethod
def forward(ctx, acts, clamp):
if clamp < 0:
raise ValueError("`clamp` must be 0.0 or positive float.")
# This is needed for correctness (inplace is problematic),
# but it wastes a log of memory.
res = acts.new(acts)
ctx.clamp = clamp
return res
@staticmethod
def backward(ctx, grad_output):
# Clamp the gradients of loss(logsoftmax(...))
# CPU computes logsoftmax explicitly, so we need to override t
grad_output = torch.clamp(grad_output, -ctx.clamp, ctx.clamp)
return (
grad_output,
None,
)
class CPURNNT:
def __init__(
self,
minibatch: int,
maxT: int,
maxU: int,
alphabet_size: int,
workspace: torch.Tensor,
blank: int,
fastemit_lambda: float,
clamp: float,
num_threads: int,
batch_first: bool,
):
"""
Helper class to compute the Transducer Loss on CPU.
Args:
minibatch: Size of the minibatch b.
maxT: The maximum possible acoustic sequence length.
Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length.
Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
workspace: An allocated chunk of memory that will be sliced off and
reshaped into required blocks used as working memory.
blank: Index of the RNNT blank token in the vocabulary.
Generally the first or last token in the vocab.
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level
Emission Regularization.
clamp: Float value. When set to value >= 0.0, will clamp the
gradient to [-clamp, clamp].
num_threads: Number of OMP threads to launch.
batch_first: Bool that decides if batch dimension is first or third.
"""
self.minibatch_ = minibatch
self.maxT_ = maxT
self.maxU_ = maxU
self.alphabet_size_ = alphabet_size
# a flat vector of floatX numbers that represents allocated memory slices
self.workspace = workspace
self.blank_ = blank
self.fastemit_lambda_ = fastemit_lambda
self.clamp_ = abs(clamp)
self.num_threads_ = num_threads
self.batch_first = batch_first
if num_threads > 0:
numba.set_num_threads(min(multiprocessing.cpu_count(), num_threads))
else:
self.num_threads_ = numba.get_num_threads()
def cost_and_grad_kernel(
self,
log_probs: torch.Tensor,
grad: torch.Tensor,
labels: torch.Tensor,
mb: int,
T: int,
U: int,
bytes_used: int,
):
idx = CpuRNNT_index(
U, self.maxU_, self.minibatch_, self.alphabet_size_, self.batch_first
)
rnntm = CpuRNNT_metadata(
T, U, self.workspace, bytes_used, self.blank_, labels, log_probs, idx
)
if self.batch_first:
# zero grads
grad *= 0.0
llForward = self.compute_alphas(rnntm.log_probs2, T, U, rnntm.alphas)
llBackward = self.compute_betas_and_grads(
grad, rnntm.log_probs2, T, U, rnntm.alphas, rnntm.betas, labels, llForward
)
# Scale llForward by FastEmit lambda
llForward *= 1.0 + self.fastemit_lambda_
llBackward *= 1.0 + self.fastemit_lambda_
diff = (llForward - llBackward).abs()
if diff > 0.1:
print(f"WARNING: Forward backward likelihood mismatch : {diff}")
return -llForward
def compute_alphas(
self, log_probs: torch.Tensor, T: int, U: int, alphas: torch.Tensor
):
"""
Compute the probability of the forward variable alpha.
Args:
log_probs: Flattened tensor [B, T, U, V+1]
T: Length of the acoustic sequence T (not padded).
U: Length of the target sequence U (not padded).
alphas: Working space memory for alpha of shape [B, T, U].
Returns:
Loglikelihood of the forward variable alpha.
"""
idx = CpuRNNT_index(
U, self.maxU_, self.minibatch_, self.alphabet_size_, self.batch_first
)
alphas[0] = 0
for t in range(T):
for u in range(U):
if u == 0 and t > 0:
alphas[idx(t, 0)] = (
alphas[idx(t - 1, 0)] + log_probs[idx(t - 1, 0) * 2]
)
if t == 0 and u > 0:
alphas[idx(0, u)] = (
alphas[idx(0, u - 1)] + log_probs[idx(0, u - 1) * 2 + 1]
)
if t > 0 and u > 0:
no_emit = alphas[idx(t - 1, u)] + log_probs[idx(t - 1, u) * 2]
emit = alphas[idx(t, u - 1)] + log_probs[idx(t, u - 1) * 2 + 1]
alphas[idx(t, u)] = log_sum_exp(emit, no_emit)
loglike = alphas[idx(T - 1, U - 1)] + log_probs[idx(T - 1, U - 1) * 2]
return loglike
def compute_betas_and_grads(
self,
grad: torch.Tensor,
log_probs: torch.Tensor,
T: int,
U: int,
alphas: torch.Tensor,
betas: torch.Tensor,
labels: torch.Tensor,
logll: torch.Tensor,
):
"""
Compute backward variable beta as well as gradients of the activation
matrix wrt loglikelihood of forward variable.
Args:
grad: Working space memory of flattened shape [B, T, U, V+1]
log_probs: Activatio tensor of flattented shape [B, T, U, V+1]
T: Length of the acoustic sequence T (not padded).
U: Length of the target sequence U (not padded).
alphas: Working space memory for alpha of shape [B, T, U].
betas: Working space memory for alpha of shape [B, T, U].
labels: Ground truth label of shape [B, U]
logll: Loglikelihood of the forward variable.
Returns:
Loglikelihood of the forward variable and inplace updates the grad tensor.
"""
idx = CpuRNNT_index(
U, self.maxU_, self.minibatch_, self.alphabet_size_, self.batch_first
)
betas[idx(T - 1, U - 1)] = log_probs[idx(T - 1, U - 1) * 2]
for t in range(T - 1, -1, -1):
for u in range(U - 1, -1, -1):
if (u == U - 1) and (t < T - 1):
betas[idx(t, U - 1)] = (
betas[idx(t + 1, U - 1)] + log_probs[idx(t, U - 1) * 2]
)
if (t == T - 1) and (u < U - 1):
betas[idx(T - 1, u)] = (
betas[idx(T - 1, u + 1)] + log_probs[idx(T - 1, u) * 2 + 1]
)
if (t < T - 1) and (u < U - 1):
no_emit = betas[idx(t + 1, u)] + log_probs[idx(t, u) * 2]
emit = betas[idx(t, u + 1)] + log_probs[idx(t, u) * 2 + 1]
betas[idx(t, u)] = log_sum_exp(emit, no_emit)
loglike = betas[0]
# // Gradients w.r.t. log probabilities
for t in range(T):
for u in range(U):
if t < T - 1:
g = alphas[idx(t, u)] + betas[idx(t + 1, u)]
grad[idx(t, u, self.blank_)] = -torch.exp(
log_probs[idx(t, u) * 2] + g - loglike
)
if u < U - 1:
g = alphas[idx(t, u)] + betas[idx(t, u + 1)]
grad[idx(t, u, labels[u])] = -torch.exp(
math.log1p(self.fastemit_lambda_)
+ log_probs[idx(t, u) * 2 + 1]
+ g
- loglike
)
# // gradient to the last blank transition
grad[idx(T - 1, U - 1, self.blank_)] = -torch.exp(
log_probs[idx(T - 1, U - 1) * 2] + alphas[idx(T - 1, U - 1)] - loglike
)
return loglike
def cost_and_grad(
self,
log_probs: torch.Tensor,
grads: torch.Tensor,
costs: torch.Tensor,
flat_labels: torch.Tensor,
label_lengths: torch.Tensor,
input_lengths: torch.Tensor,
) -> global_constants.RNNTStatus:
# // per minibatch memory
per_minibatch_bytes = 0
# // alphas & betas
per_minibatch_bytes += self.maxT_ * self.maxU_ * 2
# // blank & label log probability cache
per_minibatch_bytes += self.maxT_ * self.maxU_ * 2
for mb in range(self.minibatch_):
T = input_lengths[mb] # // Length of utterance (time)
U = label_lengths[mb] + 1 # // Number of labels in transcription
batch_size = self.alphabet_size_
if self.batch_first:
batch_size = self.maxT_ * self.maxU_ * self.alphabet_size_
costs[mb] = self.cost_and_grad_kernel(
log_probs[(mb * batch_size) :],
grads[(mb * batch_size) :],
flat_labels[(mb * (self.maxU_ - 1)) :],
mb,
T,
U,
mb * per_minibatch_bytes,
)
return global_constants.RNNTStatus.RNNT_STATUS_SUCCESS
def score_forward(
self,
log_probs: torch.Tensor,
costs: torch.Tensor,
flat_labels: torch.Tensor,
label_lengths: torch.Tensor,
input_lengths: torch.Tensor,
):
# // per minibatch memory
per_minibatch_bytes = 0
# // alphas & betas
per_minibatch_bytes += self.maxT_ * self.maxU_ * 2
# // blank & label log probability cache
per_minibatch_bytes += self.maxT_ * self.maxU_ * 2
for mb in range(self.minibatch_):
T = input_lengths[mb] # // Length of utterance (time)
U = label_lengths[mb] + 1 # // Number of labels in transcription
batch_size = self.alphabet_size_
if self.batch_first:
batch_size = self.maxT_ * self.maxU_ * self.alphabet_size_
idx = CpuRNNT_index(
U, self.maxU_, self.minibatch_, self.alphabet_size_, self.batch_first
)
rnntm = CpuRNNT_metadata(
T,
U,
self.workspace,
mb * per_minibatch_bytes,
self.blank_,
flat_labels[(mb * (self.maxU_ - 1)) :],
log_probs[(mb * batch_size) :],
idx,
)
costs[mb] = -self.compute_alphas(rnntm.log_probs2, T, U, rnntm.alphas)
return global_constants.RNNTStatus.RNNT_STATUS_SUCCESS
| 16,277 | 33.93133 | 87 | py |
espnet | espnet-master/espnet2/asr/specaug/specaug.py | """SpecAugment module."""
from typing import Optional, Sequence, Union
from espnet2.asr.specaug.abs_specaug import AbsSpecAug
from espnet2.layers.mask_along_axis import MaskAlongAxis, MaskAlongAxisVariableMaxWidth
from espnet2.layers.time_warp import TimeWarp
class SpecAug(AbsSpecAug):
"""Implementation of SpecAug.
Reference:
Daniel S. Park et al.
"SpecAugment: A Simple Data
Augmentation Method for Automatic Speech Recognition"
.. warning::
When using cuda mode, time_warp doesn't have reproducibility
due to `torch.nn.functional.interpolate`.
"""
def __init__(
self,
apply_time_warp: bool = True,
time_warp_window: int = 5,
time_warp_mode: str = "bicubic",
apply_freq_mask: bool = True,
freq_mask_width_range: Union[int, Sequence[int]] = (0, 20),
num_freq_mask: int = 2,
apply_time_mask: bool = True,
time_mask_width_range: Optional[Union[int, Sequence[int]]] = None,
time_mask_width_ratio_range: Optional[Union[float, Sequence[float]]] = None,
num_time_mask: int = 2,
):
if not apply_time_warp and not apply_time_mask and not apply_freq_mask:
raise ValueError(
"Either one of time_warp, time_mask, or freq_mask should be applied"
)
if (
apply_time_mask
and (time_mask_width_range is not None)
and (time_mask_width_ratio_range is not None)
):
raise ValueError(
'Either one of "time_mask_width_range" or '
'"time_mask_width_ratio_range" can be used'
)
super().__init__()
self.apply_time_warp = apply_time_warp
self.apply_freq_mask = apply_freq_mask
self.apply_time_mask = apply_time_mask
if apply_time_warp:
self.time_warp = TimeWarp(window=time_warp_window, mode=time_warp_mode)
else:
self.time_warp = None
if apply_freq_mask:
self.freq_mask = MaskAlongAxis(
dim="freq",
mask_width_range=freq_mask_width_range,
num_mask=num_freq_mask,
)
else:
self.freq_mask = None
if apply_time_mask:
if time_mask_width_range is not None:
self.time_mask = MaskAlongAxis(
dim="time",
mask_width_range=time_mask_width_range,
num_mask=num_time_mask,
)
elif time_mask_width_ratio_range is not None:
self.time_mask = MaskAlongAxisVariableMaxWidth(
dim="time",
mask_width_ratio_range=time_mask_width_ratio_range,
num_mask=num_time_mask,
)
else:
raise ValueError(
'Either one of "time_mask_width_range" or '
'"time_mask_width_ratio_range" should be used.'
)
else:
self.time_mask = None
def forward(self, x, x_lengths=None):
if self.time_warp is not None:
x, x_lengths = self.time_warp(x, x_lengths)
if self.freq_mask is not None:
x, x_lengths = self.freq_mask(x, x_lengths)
if self.time_mask is not None:
x, x_lengths = self.time_mask(x, x_lengths)
return x, x_lengths
| 3,435 | 34.42268 | 87 | py |
espnet | espnet-master/espnet2/asr/specaug/abs_specaug.py | from typing import Optional, Tuple
import torch
class AbsSpecAug(torch.nn.Module):
"""Abstract class for the augmentation of spectrogram
The process-flow:
Frontend -> SpecAug -> Normalization -> Encoder -> Decoder
"""
def forward(
self, x: torch.Tensor, x_lengths: torch.Tensor = None
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
raise NotImplementedError
| 408 | 21.722222 | 63 | py |
espnet | espnet-master/espnet2/asr/frontend/s3prl.py | import copy
import logging
from typing import Optional, Tuple, Union
import humanfriendly
import torch
from typeguard import check_argument_types
from espnet2.asr.frontend.abs_frontend import AbsFrontend
from espnet2.utils.get_default_kwargs import get_default_kwargs
from espnet.nets.pytorch_backend.frontends.frontend import Frontend
class S3prlFrontend(AbsFrontend):
"""Speech Pretrained Representation frontend structure for ASR."""
def __init__(
self,
fs: Union[int, str] = 16000,
frontend_conf: Optional[dict] = get_default_kwargs(Frontend),
download_dir: str = None,
multilayer_feature: bool = False,
layer: int = -1,
):
try:
import s3prl
from s3prl.nn import Featurizer, S3PRLUpstream
except Exception as e:
print("Error: S3PRL is not properly installed.")
print("Please install S3PRL: cd ${MAIN_ROOT}/tools && make s3prl.done")
raise e
assert check_argument_types()
super().__init__()
if isinstance(fs, str):
fs = humanfriendly.parse_size(fs)
if fs != 16000:
logging.warning(
"All the upstream models in S3PRL now only support 16 kHz audio."
)
if download_dir is not None:
s3prl.util.download.set_dir(download_dir)
assert frontend_conf.get("upstream", None) in S3PRLUpstream.available_names()
upstream = S3PRLUpstream(
frontend_conf.get("upstream"),
path_or_url=frontend_conf.get("path_or_url", None),
normalize=frontend_conf.get("normalize", False),
extra_conf=frontend_conf.get("extra_conf", None),
)
if getattr(upstream.upstream, "model", None):
if getattr(upstream.upstream.model, "feature_grad_mult", None):
upstream.upstream.model.feature_grad_mult = 1.0
upstream.eval()
if layer != -1:
layer_selections = [layer]
assert (
not multilayer_feature
), "multilayer feature will be deactivated, when specific layer used"
else:
layer_selections = None
featurizer = Featurizer(upstream, layer_selections=layer_selections)
self.multilayer_feature = multilayer_feature
self.layer = layer
self.upstream, self.featurizer = upstream, featurizer
self.pretrained_params = copy.deepcopy(self.upstream.state_dict())
self.frontend_type = "s3prl"
self.hop_length = self.featurizer.downsample_rate
self.tile_factor = frontend_conf.get("tile_factor", 1)
def _tile_representations(self, feature):
"""Tile up the representations by `tile_factor`.
Input - sequence of representations
shape: (batch_size, seq_len, feature_dim)
Output - sequence of tiled representations
shape: (batch_size, seq_len * factor, feature_dim)
"""
assert (
len(feature.shape) == 3
), "Input argument `feature` has invalid shape: {}".format(feature.shape)
tiled_feature = feature.repeat(1, 1, self.tile_factor)
tiled_feature = tiled_feature.reshape(
feature.size(0), feature.size(1) * self.tile_factor, feature.size(2)
)
return tiled_feature
def output_size(self) -> int:
return self.featurizer.output_size
def forward(
self, input: torch.Tensor, input_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
feats, feats_lens = self.upstream(input, input_lengths)
if self.layer != -1:
layer = self.layer
feats, feats_lens = feats[layer], feats_lens[layer]
return feats, feats_lens
if self.multilayer_feature:
feats, feats_lens = self.featurizer(feats, feats_lens)
else:
feats, feats_lens = self.featurizer(feats[-1:], feats_lens[-1:])
if self.tile_factor != 1:
feats = self._tile_representations(feats)
return feats, feats_lens
def reload_pretrained_parameters(self):
self.upstream.load_state_dict(self.pretrained_params)
logging.info("Pretrained S3PRL frontend model parameters reloaded!")
| 4,294 | 35.398305 | 85 | py |
espnet | espnet-master/espnet2/asr/frontend/windowing.py | #!/usr/bin/env python3
# 2020, Technische Universität München; Ludwig Kürzinger
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Sliding Window for raw audio input data."""
from typing import Tuple
import torch
from typeguard import check_argument_types
from espnet2.asr.frontend.abs_frontend import AbsFrontend
class SlidingWindow(AbsFrontend):
"""Sliding Window.
Provides a sliding window over a batched continuous raw audio tensor.
Optionally, provides padding (Currently not implemented).
Combine this module with a pre-encoder compatible with raw audio data,
for example Sinc convolutions.
Known issues:
Output length is calculated incorrectly if audio shorter than win_length.
WARNING: trailing values are discarded - padding not implemented yet.
There is currently no additional window function applied to input values.
"""
def __init__(
self,
win_length: int = 400,
hop_length: int = 160,
channels: int = 1,
padding: int = None,
fs=None,
):
"""Initialize.
Args:
win_length: Length of frame.
hop_length: Relative starting point of next frame.
channels: Number of input channels.
padding: Padding (placeholder, currently not implemented).
fs: Sampling rate (placeholder for compatibility, not used).
"""
assert check_argument_types()
super().__init__()
self.fs = fs
self.win_length = win_length
self.hop_length = hop_length
self.channels = channels
self.padding = padding
def forward(
self, input: torch.Tensor, input_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Apply a sliding window on the input.
Args:
input: Input (B, T, C*D) or (B, T*C*D), with D=C=1.
input_lengths: Input lengths within batch.
Returns:
Tensor: Output with dimensions (B, T, C, D), with D=win_length.
Tensor: Output lengths within batch.
"""
input_size = input.size()
B = input_size[0]
T = input_size[1]
C = self.channels
D = self.win_length
# (B, T, C) --> (T, B, C)
continuous = input.view(B, T, C).permute(1, 0, 2)
windowed = continuous.unfold(0, D, self.hop_length)
# (T, B, C, D) --> (B, T, C, D)
output = windowed.permute(1, 0, 2, 3).contiguous()
# After unfold(), windowed lengths change:
output_lengths = (input_lengths - self.win_length) // self.hop_length + 1
return output, output_lengths
def output_size(self) -> int:
"""Return output length of feature dimension D, i.e. the window length."""
return self.win_length
| 2,814 | 32.511905 | 82 | py |
espnet | espnet-master/espnet2/asr/frontend/fused.py | from typing import Tuple
import numpy as np
import torch
from typeguard import check_argument_types
from espnet2.asr.frontend.abs_frontend import AbsFrontend
from espnet2.asr.frontend.default import DefaultFrontend
from espnet2.asr.frontend.s3prl import S3prlFrontend
class FusedFrontends(AbsFrontend):
def __init__(
self, frontends=None, align_method="linear_projection", proj_dim=100, fs=16000
):
assert check_argument_types()
super().__init__()
self.align_method = (
align_method # fusing method : linear_projection only for now
)
self.proj_dim = proj_dim # dim of the projection done on each frontend
self.frontends = [] # list of the frontends to combine
for i, frontend in enumerate(frontends):
frontend_type = frontend["frontend_type"]
if frontend_type == "default":
n_mels, fs, n_fft, win_length, hop_length = (
frontend.get("n_mels", 80),
fs,
frontend.get("n_fft", 512),
frontend.get("win_length"),
frontend.get("hop_length", 128),
)
window, center, normalized, onesided = (
frontend.get("window", "hann"),
frontend.get("center", True),
frontend.get("normalized", False),
frontend.get("onesided", True),
)
fmin, fmax, htk, apply_stft = (
frontend.get("fmin", None),
frontend.get("fmax", None),
frontend.get("htk", False),
frontend.get("apply_stft", True),
)
self.frontends.append(
DefaultFrontend(
n_mels=n_mels,
n_fft=n_fft,
fs=fs,
win_length=win_length,
hop_length=hop_length,
window=window,
center=center,
normalized=normalized,
onesided=onesided,
fmin=fmin,
fmax=fmax,
htk=htk,
apply_stft=apply_stft,
)
)
elif frontend_type == "s3prl":
frontend_conf, download_dir, multilayer_feature = (
frontend.get("frontend_conf"),
frontend.get("download_dir"),
frontend.get("multilayer_feature"),
)
self.frontends.append(
S3prlFrontend(
fs=fs,
frontend_conf=frontend_conf,
download_dir=download_dir,
multilayer_feature=multilayer_feature,
)
)
else:
raise NotImplementedError # frontends are only default or s3prl
self.frontends = torch.nn.ModuleList(self.frontends)
self.gcd = np.gcd.reduce([frontend.hop_length for frontend in self.frontends])
self.factors = [frontend.hop_length // self.gcd for frontend in self.frontends]
if torch.cuda.is_available():
dev = "cuda"
else:
dev = "cpu"
if self.align_method == "linear_projection":
self.projection_layers = [
torch.nn.Linear(
in_features=frontend.output_size(),
out_features=self.factors[i] * self.proj_dim,
)
for i, frontend in enumerate(self.frontends)
]
self.projection_layers = torch.nn.ModuleList(self.projection_layers)
self.projection_layers = self.projection_layers.to(torch.device(dev))
def output_size(self) -> int:
return len(self.frontends) * self.proj_dim
def forward(
self, input: torch.Tensor, input_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
# step 0 : get all frontends features
self.feats = []
for frontend in self.frontends:
with torch.no_grad():
input_feats, feats_lens = frontend.forward(input, input_lengths)
self.feats.append([input_feats, feats_lens])
if (
self.align_method == "linear_projection"
): # TODO(Dan): to add other align methods
# first step : projections
self.feats_proj = []
for i, frontend in enumerate(self.frontends):
input_feats = self.feats[i][0]
self.feats_proj.append(self.projection_layers[i](input_feats))
# 2nd step : reshape
self.feats_reshaped = []
for i, frontend in enumerate(self.frontends):
input_feats_proj = self.feats_proj[i]
bs, nf, dim = input_feats_proj.shape
input_feats_reshaped = torch.reshape(
input_feats_proj, (bs, nf * self.factors[i], dim // self.factors[i])
)
self.feats_reshaped.append(input_feats_reshaped)
# 3rd step : drop the few last frames
m = min([x.shape[1] for x in self.feats_reshaped])
self.feats_final = [x[:, :m, :] for x in self.feats_reshaped]
input_feats = torch.cat(
self.feats_final, dim=-1
) # change the input size of the preencoder : proj_dim * n_frontends
feats_lens = torch.ones_like(self.feats[0][1]) * (m)
else:
raise NotImplementedError
return input_feats, feats_lens
| 5,752 | 38.40411 | 88 | py |
espnet | espnet-master/espnet2/asr/frontend/whisper.py | import contextlib
from typing import Tuple
import torch
import torch.nn.functional as F
from typeguard import check_argument_types
from espnet2.asr.frontend.abs_frontend import AbsFrontend
class WhisperFrontend(AbsFrontend):
"""Speech Representation Using Encoder Outputs from OpenAI's Whisper Model:
URL: https://github.com/openai/whisper
"""
def __init__(
self,
whisper_model: str = "small",
freeze_weights: bool = True,
download_dir: str = None,
):
try:
import whisper
from whisper.audio import HOP_LENGTH, N_FFT, N_MELS
except Exception as e:
print("Error: whisper is not properly installed.")
print(
"Please install whisper with: cd ${MAIN_ROOT}/tools && "
"./installers/install_whisper.sh"
)
raise e
assert check_argument_types()
super().__init__()
self.n_fft = N_FFT
self.win_length = N_FFT
self.hop_length = HOP_LENGTH
self.n_mels = N_MELS
self.mel_filters = whisper.audio.mel_filters
self.pad_or_trim = whisper.pad_or_trim
assert whisper_model in whisper.available_models()
self.whisper = whisper.load_model(whisper_model, download_root=download_dir)
self.whisper.eval()
self.freeze_weights = freeze_weights
def output_size(self) -> int:
return self.whisper.encoder.ln_post.normalized_shape[-1]
def log_mel_spectrogram(
self,
audio: torch.Tensor,
ilens: torch.Tensor = None,
) -> torch.Tensor:
window = torch.hann_window(self.win_length).to(audio.device)
stft = torch.stft(
audio, self.n_fft, self.hop_length, window=window, return_complex=True
)
# whisper deletes the last frame by default (Shih-Lun)
magnitudes = stft[..., :-1].abs() ** 2
filters = self.mel_filters(audio.device, self.n_mels)
mel_spec = filters @ magnitudes
log_spec = torch.clamp(mel_spec, min=1e-10).log10()
if ilens is not None:
olens = ilens // self.hop_length
else:
olens = None
log_spec = torch.maximum(
log_spec,
log_spec.view(audio.size(0), -1).max(dim=-1)[0][:, None, None] - 8.0,
)
log_spec = (log_spec + 4.0) / 4.0
return log_spec, olens
def whisper_encode(
self,
input: torch.Tensor,
ilens: torch.Tensor = None,
) -> torch.Tensor:
whisper_encoder = self.whisper.encoder
x = F.gelu(whisper_encoder.conv1(input))
x = F.gelu(whisper_encoder.conv2(x))
x = x.permute(0, 2, 1)
n_frames = x.size(1)
max_pos = whisper_encoder.positional_embedding.size(0)
if n_frames <= max_pos:
x = (x + whisper_encoder.positional_embedding[: x.size(1), :]).to(x.dtype)
else:
x = x[:, :max_pos, :] + whisper_encoder.positional_embedding
for block in whisper_encoder.blocks:
x = block(x)
x = whisper_encoder.ln_post(x)
if ilens is not None:
olens = (
1
+ (
ilens
- whisper_encoder.conv2.kernel_size[0]
+ 2 * whisper_encoder.conv2.padding[0]
)
// whisper_encoder.conv2.stride[0]
)
olens = torch.clamp(olens, max=max_pos)
else:
olens = None
return x, olens
def forward(
self, input: torch.Tensor, input_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
feats, feats_lens = self.log_mel_spectrogram(input, input_lengths)
with torch.no_grad() if self.freeze_weights else contextlib.nullcontext():
feats, feats_lens = self.whisper_encode(feats, feats_lens)
return feats, feats_lens
| 3,963 | 28.804511 | 86 | py |
espnet | espnet-master/espnet2/asr/frontend/default.py | import copy
from typing import Optional, Tuple, Union
import humanfriendly
import numpy as np
import torch
from torch_complex.tensor import ComplexTensor
from typeguard import check_argument_types
from espnet2.asr.frontend.abs_frontend import AbsFrontend
from espnet2.layers.log_mel import LogMel
from espnet2.layers.stft import Stft
from espnet2.utils.get_default_kwargs import get_default_kwargs
from espnet.nets.pytorch_backend.frontends.frontend import Frontend
class DefaultFrontend(AbsFrontend):
"""Conventional frontend structure for ASR.
Stft -> WPE -> MVDR-Beamformer -> Power-spec -> Mel-Fbank -> CMVN
"""
def __init__(
self,
fs: Union[int, str] = 16000,
n_fft: int = 512,
win_length: int = None,
hop_length: int = 128,
window: Optional[str] = "hann",
center: bool = True,
normalized: bool = False,
onesided: bool = True,
n_mels: int = 80,
fmin: int = None,
fmax: int = None,
htk: bool = False,
frontend_conf: Optional[dict] = get_default_kwargs(Frontend),
apply_stft: bool = True,
):
assert check_argument_types()
super().__init__()
if isinstance(fs, str):
fs = humanfriendly.parse_size(fs)
# Deepcopy (In general, dict shouldn't be used as default arg)
frontend_conf = copy.deepcopy(frontend_conf)
self.hop_length = hop_length
if apply_stft:
self.stft = Stft(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
center=center,
window=window,
normalized=normalized,
onesided=onesided,
)
else:
self.stft = None
self.apply_stft = apply_stft
if frontend_conf is not None:
self.frontend = Frontend(idim=n_fft // 2 + 1, **frontend_conf)
else:
self.frontend = None
self.logmel = LogMel(
fs=fs,
n_fft=n_fft,
n_mels=n_mels,
fmin=fmin,
fmax=fmax,
htk=htk,
)
self.n_mels = n_mels
self.frontend_type = "default"
def output_size(self) -> int:
return self.n_mels
def forward(
self, input: torch.Tensor, input_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
# 1. Domain-conversion: e.g. Stft: time -> time-freq
if self.stft is not None:
input_stft, feats_lens = self._compute_stft(input, input_lengths)
else:
input_stft = ComplexTensor(input[..., 0], input[..., 1])
feats_lens = input_lengths
# 2. [Option] Speech enhancement
if self.frontend is not None:
assert isinstance(input_stft, ComplexTensor), type(input_stft)
# input_stft: (Batch, Length, [Channel], Freq)
input_stft, _, mask = self.frontend(input_stft, feats_lens)
# 3. [Multi channel case]: Select a channel
if input_stft.dim() == 4:
# h: (B, T, C, F) -> h: (B, T, F)
if self.training:
# Select 1ch randomly
ch = np.random.randint(input_stft.size(2))
input_stft = input_stft[:, :, ch, :]
else:
# Use the first channel
input_stft = input_stft[:, :, 0, :]
# 4. STFT -> Power spectrum
# h: ComplexTensor(B, T, F) -> torch.Tensor(B, T, F)
input_power = input_stft.real**2 + input_stft.imag**2
# 5. Feature transform e.g. Stft -> Log-Mel-Fbank
# input_power: (Batch, [Channel,] Length, Freq)
# -> input_feats: (Batch, Length, Dim)
input_feats, _ = self.logmel(input_power, feats_lens)
return input_feats, feats_lens
def _compute_stft(
self, input: torch.Tensor, input_lengths: torch.Tensor
) -> torch.Tensor:
input_stft, feats_lens = self.stft(input, input_lengths)
assert input_stft.dim() >= 4, input_stft.shape
# "2" refers to the real/imag parts of Complex
assert input_stft.shape[-1] == 2, input_stft.shape
# Change torch.Tensor to ComplexTensor
# input_stft: (..., F, 2) -> (..., F)
input_stft = ComplexTensor(input_stft[..., 0], input_stft[..., 1])
return input_stft, feats_lens
| 4,417 | 32.469697 | 77 | py |
espnet | espnet-master/espnet2/asr/frontend/abs_frontend.py | from abc import ABC, abstractmethod
from typing import Tuple
import torch
class AbsFrontend(torch.nn.Module, ABC):
@abstractmethod
def output_size(self) -> int:
raise NotImplementedError
@abstractmethod
def forward(
self, input: torch.Tensor, input_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
raise NotImplementedError
| 385 | 21.705882 | 62 | py |
espnet | espnet-master/espnet2/asr/layers/fastformer.py | """Fastformer attention definition.
Reference:
Wu et al., "Fastformer: Additive Attention Can Be All You Need"
https://arxiv.org/abs/2108.09084
https://github.com/wuch15/Fastformer
"""
import numpy
import torch
class FastSelfAttention(torch.nn.Module):
"""Fast self-attention used in Fastformer."""
def __init__(
self,
size,
attention_heads,
dropout_rate,
):
super().__init__()
if size % attention_heads != 0:
raise ValueError(
f"Hidden size ({size}) is not an integer multiple "
f"of attention heads ({attention_heads})"
)
self.attention_head_size = size // attention_heads
self.num_attention_heads = attention_heads
self.query = torch.nn.Linear(size, size)
self.query_att = torch.nn.Linear(size, attention_heads)
self.key = torch.nn.Linear(size, size)
self.key_att = torch.nn.Linear(size, attention_heads)
self.transform = torch.nn.Linear(size, size)
self.dropout = torch.nn.Dropout(dropout_rate)
def espnet_initialization_fn(self):
self.apply(self.init_weights)
def init_weights(self, module):
if isinstance(module, torch.nn.Linear):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, torch.nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def transpose_for_scores(self, x):
"""Reshape and transpose to compute scores.
Args:
x: (batch, time, size = n_heads * attn_dim)
Returns:
(batch, n_heads, time, attn_dim)
"""
new_x_shape = x.shape[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
return x.reshape(*new_x_shape).transpose(1, 2)
def forward(self, xs_pad, mask):
"""Forward method.
Args:
xs_pad: (batch, time, size = n_heads * attn_dim)
mask: (batch, 1, time), nonpadding is 1, padding is 0
Returns:
torch.Tensor: (batch, time, size)
"""
batch_size, seq_len, _ = xs_pad.shape
mixed_query_layer = self.query(xs_pad) # (batch, time, size)
mixed_key_layer = self.key(xs_pad) # (batch, time, size)
if mask is not None:
mask = mask.eq(0) # padding is 1, nonpadding is 0
# (batch, n_heads, time)
query_for_score = (
self.query_att(mixed_query_layer).transpose(1, 2)
/ self.attention_head_size**0.5
)
if mask is not None:
min_value = float(
numpy.finfo(
torch.tensor(0, dtype=query_for_score.dtype).numpy().dtype
).min
)
query_for_score = query_for_score.masked_fill(mask, min_value)
query_weight = torch.softmax(query_for_score, dim=-1).masked_fill(mask, 0.0)
else:
query_weight = torch.softmax(query_for_score, dim=-1)
query_weight = query_weight.unsqueeze(2) # (batch, n_heads, 1, time)
query_layer = self.transpose_for_scores(
mixed_query_layer
) # (batch, n_heads, time, attn_dim)
pooled_query = (
torch.matmul(query_weight, query_layer)
.transpose(1, 2)
.reshape(-1, 1, self.num_attention_heads * self.attention_head_size)
) # (batch, 1, size = n_heads * attn_dim)
pooled_query = self.dropout(pooled_query)
pooled_query_repeat = pooled_query.repeat(1, seq_len, 1) # (batch, time, size)
mixed_query_key_layer = (
mixed_key_layer * pooled_query_repeat
) # (batch, time, size)
# (batch, n_heads, time)
query_key_score = (
self.key_att(mixed_query_key_layer) / self.attention_head_size**0.5
).transpose(1, 2)
if mask is not None:
min_value = float(
numpy.finfo(
torch.tensor(0, dtype=query_key_score.dtype).numpy().dtype
).min
)
query_key_score = query_key_score.masked_fill(mask, min_value)
query_key_weight = torch.softmax(query_key_score, dim=-1).masked_fill(
mask, 0.0
)
else:
query_key_weight = torch.softmax(query_key_score, dim=-1)
query_key_weight = query_key_weight.unsqueeze(2) # (batch, n_heads, 1, time)
key_layer = self.transpose_for_scores(
mixed_query_key_layer
) # (batch, n_heads, time, attn_dim)
pooled_key = torch.matmul(
query_key_weight, key_layer
) # (batch, n_heads, 1, attn_dim)
pooled_key = self.dropout(pooled_key)
# NOTE: value = query, due to param sharing
weighted_value = (pooled_key * query_layer).transpose(
1, 2
) # (batch, time, n_heads, attn_dim)
weighted_value = weighted_value.reshape(
weighted_value.shape[:-2]
+ (self.num_attention_heads * self.attention_head_size,)
) # (batch, time, size)
weighted_value = (
self.dropout(self.transform(weighted_value)) + mixed_query_layer
)
return weighted_value
| 5,282 | 33.305195 | 88 | py |
espnet | espnet-master/espnet2/asr/layers/cgmlp.py | """MLP with convolutional gating (cgMLP) definition.
References:
https://openreview.net/forum?id=RA-zVvZLYIy
https://arxiv.org/abs/2105.08050
"""
import torch
from espnet.nets.pytorch_backend.nets_utils import get_activation
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
class ConvolutionalSpatialGatingUnit(torch.nn.Module):
"""Convolutional Spatial Gating Unit (CSGU)."""
def __init__(
self,
size: int,
kernel_size: int,
dropout_rate: float,
use_linear_after_conv: bool,
gate_activation: str,
):
super().__init__()
n_channels = size // 2 # split input channels
self.norm = LayerNorm(n_channels)
self.conv = torch.nn.Conv1d(
n_channels,
n_channels,
kernel_size,
1,
(kernel_size - 1) // 2,
groups=n_channels,
)
if use_linear_after_conv:
self.linear = torch.nn.Linear(n_channels, n_channels)
else:
self.linear = None
if gate_activation == "identity":
self.act = torch.nn.Identity()
else:
self.act = get_activation(gate_activation)
self.dropout = torch.nn.Dropout(dropout_rate)
def espnet_initialization_fn(self):
torch.nn.init.normal_(self.conv.weight, std=1e-6)
torch.nn.init.ones_(self.conv.bias)
if self.linear is not None:
torch.nn.init.normal_(self.linear.weight, std=1e-6)
torch.nn.init.ones_(self.linear.bias)
def forward(self, x, gate_add=None):
"""Forward method
Args:
x (torch.Tensor): (N, T, D)
gate_add (torch.Tensor): (N, T, D/2)
Returns:
out (torch.Tensor): (N, T, D/2)
"""
x_r, x_g = x.chunk(2, dim=-1)
x_g = self.norm(x_g) # (N, T, D/2)
x_g = self.conv(x_g.transpose(1, 2)).transpose(1, 2) # (N, T, D/2)
if self.linear is not None:
x_g = self.linear(x_g)
if gate_add is not None:
x_g = x_g + gate_add
x_g = self.act(x_g)
out = x_r * x_g # (N, T, D/2)
out = self.dropout(out)
return out
class ConvolutionalGatingMLP(torch.nn.Module):
"""Convolutional Gating MLP (cgMLP)."""
def __init__(
self,
size: int,
linear_units: int,
kernel_size: int,
dropout_rate: float,
use_linear_after_conv: bool,
gate_activation: str,
):
super().__init__()
self.channel_proj1 = torch.nn.Sequential(
torch.nn.Linear(size, linear_units), torch.nn.GELU()
)
self.csgu = ConvolutionalSpatialGatingUnit(
size=linear_units,
kernel_size=kernel_size,
dropout_rate=dropout_rate,
use_linear_after_conv=use_linear_after_conv,
gate_activation=gate_activation,
)
self.channel_proj2 = torch.nn.Linear(linear_units // 2, size)
def forward(self, x, mask):
if isinstance(x, tuple):
xs_pad, pos_emb = x
else:
xs_pad, pos_emb = x, None
xs_pad = self.channel_proj1(xs_pad) # size -> linear_units
xs_pad = self.csgu(xs_pad) # linear_units -> linear_units/2
xs_pad = self.channel_proj2(xs_pad) # linear_units/2 -> size
if pos_emb is not None:
out = (xs_pad, pos_emb)
else:
out = xs_pad
return out
| 3,518 | 27.152 | 75 | py |
espnet | espnet-master/espnet2/asr/preencoder/linear.py | #!/usr/bin/env python3
# 2021, Carnegie Mellon University; Xuankai Chang
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Linear Projection."""
from typing import Tuple
import torch
from typeguard import check_argument_types
from espnet2.asr.preencoder.abs_preencoder import AbsPreEncoder
class LinearProjection(AbsPreEncoder):
"""Linear Projection Preencoder."""
def __init__(self, input_size: int, output_size: int, dropout: float = 0.0):
"""Initialize the module."""
assert check_argument_types()
super().__init__()
self.output_dim = output_size
self.linear_out = torch.nn.Linear(input_size, output_size)
self.dropout = torch.nn.Dropout(dropout)
def forward(
self, input: torch.Tensor, input_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Forward."""
output = self.linear_out(self.dropout(input))
return output, input_lengths # no state in this layer
def output_size(self) -> int:
"""Get the output size."""
return self.output_dim
| 1,095 | 28.621622 | 80 | py |
espnet | espnet-master/espnet2/asr/preencoder/abs_preencoder.py | from abc import ABC, abstractmethod
from typing import Tuple
import torch
class AbsPreEncoder(torch.nn.Module, ABC):
@abstractmethod
def output_size(self) -> int:
raise NotImplementedError
@abstractmethod
def forward(
self, input: torch.Tensor, input_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
raise NotImplementedError
| 387 | 21.823529 | 62 | py |
espnet | espnet-master/espnet2/asr/preencoder/sinc.py | #!/usr/bin/env python3
# 2020, Technische Universität München; Ludwig Kürzinger
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Sinc convolutions for raw audio input."""
from collections import OrderedDict
from typing import Optional, Tuple, Union
import humanfriendly
import torch
from typeguard import check_argument_types
from espnet2.asr.preencoder.abs_preencoder import AbsPreEncoder
from espnet2.layers.sinc_conv import LogCompression, SincConv
class LightweightSincConvs(AbsPreEncoder):
"""Lightweight Sinc Convolutions.
Instead of using precomputed features, end-to-end speech recognition
can also be done directly from raw audio using sinc convolutions, as
described in "Lightweight End-to-End Speech Recognition from Raw Audio
Data Using Sinc-Convolutions" by Kürzinger et al.
https://arxiv.org/abs/2010.07597
To use Sinc convolutions in your model instead of the default f-bank
frontend, set this module as your pre-encoder with `preencoder: sinc`
and use the input of the sliding window frontend with
`frontend: sliding_window` in your yaml configuration file.
So that the process flow is:
Frontend (SlidingWindow) -> SpecAug -> Normalization ->
Pre-encoder (LightweightSincConvs) -> Encoder -> Decoder
Note that this method also performs data augmentation in time domain
(vs. in spectral domain in the default frontend).
Use `plot_sinc_filters.py` to visualize the learned Sinc filters.
"""
def __init__(
self,
fs: Union[int, str, float] = 16000,
in_channels: int = 1,
out_channels: int = 256,
activation_type: str = "leakyrelu",
dropout_type: str = "dropout",
windowing_type: str = "hamming",
scale_type: str = "mel",
):
"""Initialize the module.
Args:
fs: Sample rate.
in_channels: Number of input channels.
out_channels: Number of output channels (for each input channel).
activation_type: Choice of activation function.
dropout_type: Choice of dropout function.
windowing_type: Choice of windowing function.
scale_type: Choice of filter-bank initialization scale.
"""
assert check_argument_types()
super().__init__()
if isinstance(fs, str):
fs = humanfriendly.parse_size(fs)
self.fs = fs
self.in_channels = in_channels
self.out_channels = out_channels
self.activation_type = activation_type
self.dropout_type = dropout_type
self.windowing_type = windowing_type
self.scale_type = scale_type
self.choices_dropout = {
"dropout": torch.nn.Dropout,
"spatial": SpatialDropout,
"dropout2d": torch.nn.Dropout2d,
}
if dropout_type not in self.choices_dropout:
raise NotImplementedError(
f"Dropout type has to be one of "
f"{list(self.choices_dropout.keys())}",
)
self.choices_activation = {
"leakyrelu": torch.nn.LeakyReLU,
"relu": torch.nn.ReLU,
}
if activation_type not in self.choices_activation:
raise NotImplementedError(
f"Activation type has to be one of "
f"{list(self.choices_activation.keys())}",
)
# initialization
self._create_sinc_convs()
# Sinc filters require custom initialization
self.espnet_initialization_fn()
def _create_sinc_convs(self):
blocks = OrderedDict()
# SincConvBlock
out_channels = 128
self.filters = SincConv(
self.in_channels,
out_channels,
kernel_size=101,
stride=1,
fs=self.fs,
window_func=self.windowing_type,
scale_type=self.scale_type,
)
block = OrderedDict(
[
("Filters", self.filters),
("LogCompression", LogCompression()),
("BatchNorm", torch.nn.BatchNorm1d(out_channels, affine=True)),
("AvgPool", torch.nn.AvgPool1d(2)),
]
)
blocks["SincConvBlock"] = torch.nn.Sequential(block)
in_channels = out_channels
# First convolutional block, connects the sinc output to the front-end "body"
out_channels = 128
blocks["DConvBlock1"] = self.gen_lsc_block(
in_channels,
out_channels,
depthwise_kernel_size=25,
depthwise_stride=2,
pointwise_groups=0,
avgpool=True,
dropout_probability=0.1,
)
in_channels = out_channels
# Second convolutional block, multiple convolutional layers
out_channels = self.out_channels
for layer in [2, 3, 4]:
blocks[f"DConvBlock{layer}"] = self.gen_lsc_block(
in_channels, out_channels, depthwise_kernel_size=9, depthwise_stride=1
)
in_channels = out_channels
# Third Convolutional block, acts as coupling to encoder
out_channels = self.out_channels
blocks["DConvBlock5"] = self.gen_lsc_block(
in_channels,
out_channels,
depthwise_kernel_size=7,
depthwise_stride=1,
pointwise_groups=0,
)
self.blocks = torch.nn.Sequential(blocks)
def gen_lsc_block(
self,
in_channels: int,
out_channels: int,
depthwise_kernel_size: int = 9,
depthwise_stride: int = 1,
depthwise_groups=None,
pointwise_groups=0,
dropout_probability: float = 0.15,
avgpool=False,
):
"""Generate a convolutional block for Lightweight Sinc convolutions.
Each block consists of either a depthwise or a depthwise-separable
convolutions together with dropout, (batch-)normalization layer, and
an optional average-pooling layer.
Args:
in_channels: Number of input channels.
out_channels: Number of output channels.
depthwise_kernel_size: Kernel size of the depthwise convolution.
depthwise_stride: Stride of the depthwise convolution.
depthwise_groups: Number of groups of the depthwise convolution.
pointwise_groups: Number of groups of the pointwise convolution.
dropout_probability: Dropout probability in the block.
avgpool: If True, an AvgPool layer is inserted.
Returns:
torch.nn.Sequential: Neural network building block.
"""
block = OrderedDict()
if not depthwise_groups:
# GCD(in_channels, out_channels) to prevent size mismatches
depthwise_groups, r = in_channels, out_channels
while r != 0:
depthwise_groups, r = depthwise_groups, depthwise_groups % r
block["depthwise"] = torch.nn.Conv1d(
in_channels,
out_channels,
depthwise_kernel_size,
depthwise_stride,
groups=depthwise_groups,
)
if pointwise_groups:
block["pointwise"] = torch.nn.Conv1d(
out_channels, out_channels, 1, 1, groups=pointwise_groups
)
block["activation"] = self.choices_activation[self.activation_type]()
block["batchnorm"] = torch.nn.BatchNorm1d(out_channels, affine=True)
if avgpool:
block["avgpool"] = torch.nn.AvgPool1d(2)
block["dropout"] = self.choices_dropout[self.dropout_type](dropout_probability)
return torch.nn.Sequential(block)
def espnet_initialization_fn(self):
"""Initialize sinc filters with filterbank values."""
self.filters.init_filters()
for block in self.blocks:
for layer in block:
if type(layer) == torch.nn.BatchNorm1d and layer.affine:
layer.weight.data[:] = 1.0
layer.bias.data[:] = 0.0
def forward(
self, input: torch.Tensor, input_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Apply Lightweight Sinc Convolutions.
The input shall be formatted as (B, T, C_in, D_in)
with B as batch size, T as time dimension, C_in as channels,
and D_in as feature dimension.
The output will then be (B, T, C_out*D_out)
with C_out and D_out as output dimensions.
The current module structure only handles D_in=400, so that D_out=1.
Remark for the multichannel case: C_out is the number of out_channels
given at initialization multiplied with C_in.
"""
# Transform input data:
# (B, T, C_in, D_in) -> (B*T, C_in, D_in)
B, T, C_in, D_in = input.size()
input_frames = input.view(B * T, C_in, D_in)
output_frames = self.blocks.forward(input_frames)
# ---TRANSFORM: (B*T, C_out, D_out) -> (B, T, C_out*D_out)
_, C_out, D_out = output_frames.size()
output_frames = output_frames.view(B, T, C_out * D_out)
return output_frames, input_lengths # no state in this layer
def output_size(self) -> int:
"""Get the output size."""
return self.out_channels * self.in_channels
class SpatialDropout(torch.nn.Module):
"""Spatial dropout module.
Apply dropout to full channels on tensors of input (B, C, D)
"""
def __init__(
self,
dropout_probability: float = 0.15,
shape: Optional[Union[tuple, list]] = None,
):
"""Initialize.
Args:
dropout_probability: Dropout probability.
shape (tuple, list): Shape of input tensors.
"""
assert check_argument_types()
super().__init__()
if shape is None:
shape = (0, 2, 1)
self.dropout = torch.nn.Dropout2d(dropout_probability)
self.shape = (shape,)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward of spatial dropout module."""
y = x.permute(*self.shape)
y = self.dropout(y)
return y.permute(*self.shape)
| 10,222 | 35.251773 | 87 | py |
espnet | espnet-master/espnet2/asr/encoder/hubert_encoder.py | # Copyright 2021 Tianzi Wang
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0
# Thanks to Abdelrahman Mohamed and Wei-Ning Hsu's help in this implementation,
# Their origial Hubert work is in:
# Paper: https://arxiv.org/pdf/2106.07447.pdf
# Code in Fairseq: https://github.com/pytorch/fairseq/tree/master/examples/hubert
"""Encoder definition."""
import contextlib
import copy
import logging
import os
from pathlib import Path
from typing import List, Optional, Tuple
import torch
import yaml
from filelock import FileLock
from typeguard import check_argument_types
from espnet2.asr.encoder.abs_encoder import AbsEncoder
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
class TorchAudioHuBERTPretrainEncoder(AbsEncoder):
"""Torch Audio Hubert encoder module.
Args:
extractor_mode: Operation mode of feature extractor.
Valid values are "group_norm" or "layer_norm".
extractor_conv_layer_config: Configuration of convolution layers in feature
extractor. List of convolution configuration,
i.e. [(output_channel, kernel_size, stride), ...]
extractor_conv_bias: Whether to include bias term to each convolution
operation.
encoder_embed_dim: The dimension of embedding in encoder.
encoder_projection_dropout: The dropout probability applied after the input
feature is projected to "encoder_embed_dim".
encoder_pos_conv_kernel: Kernel size of convolutional positional embeddings.
encoder_pos_conv_groups: Number of groups of convolutional positional
embeddings.
encoder_num_layers: Number of self attention layers in transformer block.
encoder_num_heads: Number of heads in self attention layers.
encoder_attention_dropout: Dropout probability applied after softmax in
self-attention layer.
encoder_ff_interm_features: Dimension of hidden features in feed forward layer.
encoder_ff_interm_dropout: Dropout probability applied in feedforward layer.
encoder_dropout: Dropout probability applied at the end of feed forward layer.
encoder_layer_norm_first: Control the order of layer norm in transformer layer
and each encoder layer. If True, in transformer layer, layer norm is
applied before features are fed to encoder layers.
encoder_layer_drop: Probability to drop each encoder layer during training.
mask_prob: Probability for each token to be chosen as start of the span
to be masked.
mask_selection: How to choose the mask length.
Options: [static, uniform, normal, poisson].
mask_other: Secondary mask argument (used for more complex distributions).
mask_length: The lengths of the mask.
no_mask_overlap: Whether to allow masks to overlap.
mask_min_space: Minimum space between spans (if no overlap is enabled).
mask_channel_prob: (float): The probability of replacing a feature with 0.
mask_channel_selection: How to choose the mask length for channel masking.
Options: [static, uniform, normal, poisson].
mask_channel_other: Secondary mask argument for channel masking(used for more
complex distributions).
mask_channel_length: Minimum space between spans (if no overlap is enabled)
for channel masking.
no_mask_channel_overlap: Whether to allow channel masks to overlap.
mask_channel_min_space: Minimum space between spans for channel
masking(if no overlap is enabled).
skip_masked: If True, skip computing losses over masked frames.
skip_nomask: If True, skip computing losses over unmasked frames.
num_classes: The number of classes in the labels.
final_dim: Project final representations and targets to final_dim.
feature_grad_mult: The factor to scale the convolutional feature extraction
layer gradients by. The scale factor will not affect the forward pass.
finetuning: Whether to finetuning the model with ASR or other tasks.
freeze_encoder_updates: The number of steps to freeze the encoder parameters
in ASR finetuning.
Hubert specific Args:
Please refer to:
https://pytorch.org/audio/stable/generated/torchaudio.models.hubert_pretrain_model.html#torchaudio.models.hubert_pretrain_model
"""
def __init__(
self,
input_size: int = None,
extractor_mode: str = "group_norm",
extractor_conv_layer_config: Optional[List[Tuple[int, int, int]]] = [
(512, 10, 5),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 2, 2),
(512, 2, 2),
],
extractor_conv_bias: bool = False,
encoder_embed_dim: int = 768,
encoder_projection_dropout: float = 0.1,
encoder_pos_conv_kernel: int = 128,
encoder_pos_conv_groups: int = 16,
encoder_num_layers: int = 12,
encoder_num_heads: int = 12,
encoder_attention_dropout: float = 0.1,
encoder_ff_interm_features: int = 3072,
encoder_ff_interm_dropout: float = 0.0,
encoder_dropout: float = 0.1,
encoder_layer_norm_first: bool = False,
encoder_layer_drop: float = 0.05,
mask_prob: float = 0.8,
mask_selection: str = "static",
mask_other: float = 0.0,
mask_length: int = 10,
no_mask_overlap: bool = False,
mask_min_space: int = 1,
mask_channel_prob: float = 0.0,
mask_channel_selection: str = "static",
mask_channel_other: float = 0.0,
mask_channel_length: int = 10,
no_mask_channel_overlap: bool = False,
mask_channel_min_space: int = 1,
skip_masked: bool = False,
skip_nomask: bool = False,
num_classes: int = 100,
final_dim: int = 256,
feature_grad_mult: Optional[float] = 0.1,
finetuning: bool = False,
freeze_encoder_updates: int = 0,
):
assert check_argument_types()
super().__init__()
try:
import torchaudio
except Exception as e:
print("Error: torchaudio is not properly installed.")
print("Please install torchaudio")
raise e
self._output_size = encoder_embed_dim
self.hubert_pretrain_model = torchaudio.models.hubert_pretrain_model(
extractor_mode=extractor_mode,
extractor_conv_layer_config=extractor_conv_layer_config,
extractor_conv_bias=extractor_conv_bias,
encoder_embed_dim=encoder_embed_dim,
encoder_projection_dropout=encoder_projection_dropout,
encoder_pos_conv_kernel=encoder_pos_conv_kernel,
encoder_pos_conv_groups=encoder_pos_conv_groups,
encoder_num_layers=encoder_num_layers,
encoder_num_heads=encoder_num_heads,
encoder_attention_dropout=encoder_attention_dropout,
encoder_ff_interm_features=encoder_ff_interm_features,
encoder_ff_interm_dropout=encoder_ff_interm_dropout,
encoder_dropout=encoder_dropout,
encoder_layer_norm_first=encoder_layer_norm_first,
encoder_layer_drop=encoder_layer_drop,
mask_prob=mask_prob,
mask_selection=mask_selection,
mask_other=mask_other,
mask_length=mask_length,
no_mask_overlap=no_mask_overlap,
mask_min_space=mask_min_space,
mask_channel_prob=mask_channel_prob,
mask_channel_selection=mask_channel_selection,
mask_channel_other=mask_channel_other,
mask_channel_length=mask_channel_length,
no_mask_channel_overlap=no_mask_channel_overlap,
mask_channel_min_space=mask_channel_min_space,
skip_masked=skip_masked,
skip_nomask=skip_nomask,
num_classes=num_classes,
final_dim=final_dim,
feature_grad_mult=feature_grad_mult,
)
self.pretrained_params = copy.deepcopy(self.hubert_pretrain_model.state_dict())
self.finetuning = finetuning
if finetuning:
for p in self.hubert_pretrain_model.wav2vec2.feature_extractor.parameters():
p.requires_grad = False
self.register_buffer("global_step", torch.LongTensor([0]))
self.freeze_encoder_updates = freeze_encoder_updates
def output_size(self) -> int:
return self._output_size
def forward(
self,
xs_pad: torch.Tensor,
ilens: torch.Tensor,
ys_pad: torch.Tensor = None,
ys_pad_length: torch.Tensor = None,
prev_states: torch.Tensor = None,
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
"""Forward Hubert Pretrain Encoder.
Args:
xs_pad: input tensor (B, L, D)
ilens: input length (B)
prev_states: Not to be used now.
Returns:
position embedded tensor and mask
"""
if not self.finetuning:
return self._pretraining_forward(xs_pad, ilens, ys_pad)
else:
if self.training:
return self._finetuning_forward(xs_pad, ilens)
else:
return self._eval_forward(xs_pad, ilens)
def _pretraining_forward(self, xs_pad, ilens, ys_pad):
assert ys_pad is not None
(
logit_m,
logit_u,
feature_penalty,
) = self.hubert_pretrain_model.forward(xs_pad, ys_pad, ilens)
return logit_m, logit_u, feature_penalty
def _finetuning_forward(self, xs_pad, ilens):
def get_padding_mask(input, lengths):
"""get_padding_mask() from torchaudio.models.wav2vec2.components"""
batch_size, max_len, _ = input.shape
mask = (
torch.arange(max_len, device=lengths.device).expand(batch_size, max_len)
>= lengths[:, None]
)
return mask
# manually add the steps. It is not accurate.
# TODO(simpleoier): to introduce the global update steps into encoder module
self.global_step += 1
if self.global_step <= self.freeze_encoder_updates:
with torch.no_grad():
x, out_len = self.hubert_pretrain_model.wav2vec2.feature_extractor(
xs_pad, ilens
)
padding_mask = get_padding_mask(x, out_len)
(
x,
attention_mask,
) = self.hubert_pretrain_model.wav2vec2.encoder._preprocess(x, out_len)
x, _ = self.hubert_pretrain_model.mask_generator(x, padding_mask)
x = self.hubert_pretrain_model.wav2vec2.encoder.transformer(
x, attention_mask=attention_mask
)
else:
with torch.no_grad():
x, out_len = self.hubert_pretrain_model.wav2vec2.feature_extractor(
xs_pad, ilens
)
padding_mask = get_padding_mask(x, out_len)
(
x,
attention_mask,
) = self.hubert_pretrain_model.wav2vec2.encoder._preprocess(x, out_len)
x, _ = self.hubert_pretrain_model.mask_generator(x, padding_mask)
x = self.hubert_pretrain_model.wav2vec2.encoder.transformer(
x, attention_mask=attention_mask
)
return x, (~padding_mask).long().sum(dim=1), None
def _eval_forward(self, xs_pad, ilens):
x, lengths = self.hubert_pretrain_model.wav2vec2.feature_extractor(
xs_pad, ilens
)
x = self.hubert_pretrain_model.wav2vec2.encoder(x, lengths)
return x, lengths, None
def reload_pretrained_parameters(self):
self.hubert_pretrain_model.load_state_dict(self.pretrained_params, strict=False)
logging.info("Pretrained Hubert model parameters reloaded!")
class FairseqHubertEncoder(AbsEncoder):
"""FairSeq Hubert encoder module, used for loading pretrained weight and finetuning
Args:
input_size: input dim
hubert_url: url to Hubert pretrained model
hubert_dir_path: directory to download the Wav2Vec2.0 pretrained model.
output_size: dimension of attention
normalize_before: whether to use layer_norm before the first block
freeze_finetune_updates: steps that freeze all layers except output layer
before tuning the whole model (nessasary to prevent overfit).
dropout_rate: dropout rate
activation_dropout: dropout rate in activation function
attention_dropout: dropout rate in attention
Hubert specific Args:
Please refer to:
https://github.com/pytorch/fairseq/blob/master/fairseq/models/hubert/hubert.py
"""
def __init__(
self,
input_size: int,
hubert_url: str = "./",
hubert_dir_path: str = "./",
output_size: int = 256,
normalize_before: bool = False,
freeze_finetune_updates: int = 0,
dropout_rate: float = 0.0,
activation_dropout: float = 0.1,
attention_dropout: float = 0.0,
mask_length: int = 10,
mask_prob: float = 0.75,
mask_selection: str = "static",
mask_other: int = 0,
apply_mask: bool = True,
mask_channel_length: int = 64,
mask_channel_prob: float = 0.5,
mask_channel_other: int = 0,
mask_channel_selection: str = "static",
layerdrop: float = 0.1,
feature_grad_mult: float = 0.0,
):
assert check_argument_types()
super().__init__()
self.apply_mask = apply_mask
try:
import fairseq
from fairseq.models.hubert.hubert import HubertModel
except Exception as e:
print("Error: FairSeq is not properly installed.")
print("Please install FairSeq: cd ${MAIN_ROOT}/tools && make fairseq.done")
raise e
arg_overrides = {
"dropout": dropout_rate,
"activation_dropout": activation_dropout,
"attention_dropout": attention_dropout,
"mask_length": mask_length,
"mask_prob": mask_prob,
"mask_selection": mask_selection,
"mask_other": mask_other,
"mask_channel_length": mask_channel_length,
"mask_channel_prob": mask_channel_prob,
"mask_channel_selection": mask_channel_selection,
"mask_channel_other": mask_channel_other,
"encoder_layerdrop": layerdrop,
"feature_grad_mult": feature_grad_mult,
"data": hubert_dir_path,
}
if hubert_url == "espnet":
self.hubert_model_path = hubert_dir_path
s = torch.load(
self.hubert_model_path,
map_location=torch.device("cpu"),
)
if all("encoder.encoder" in k for k in s):
try:
state = {
k.replace("encoder.encoder.", ""): v
for k, v in s.items()
if "label_embs_concat" not in k
}
except Exception as e:
raise e
config_file = os.path.join(
"/".join(self.hubert_model_path.split("/")[:-1]),
"config.yaml",
)
config_file = Path(config_file)
with config_file.open("r", encoding="utf-8") as f:
self.pretrained_cfg = yaml.safe_load(f)
model = FairseqHubertPretrainEncoder(
input_size=self.pretrained_cfg["input_size"],
hubert_dict=self.pretrained_cfg["hubert_dict"],
**self.pretrained_cfg["encoder_conf"],
)
model = model.encoder
d = self.pretrained_cfg["encoder_conf"]["output_size"]
self.pretrained_params = copy.deepcopy(state)
else:
self.hubert_model_path = download_hubert(hubert_url, hubert_dir_path)
(
models,
self.pretrained_cfg,
task,
) = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[self.hubert_model_path],
arg_overrides=arg_overrides,
strict=False,
)
model = models[0]
d = self.pretrained_cfg.model.encoder_embed_dim
self.pretrained_params = copy.deepcopy(model.state_dict())
self._output_size = output_size
if not isinstance(model, HubertModel):
try:
model = model.hubert_encoder.hubert_model
except Exception as e:
print(
"Error: pretrained models should be within: "
"'HubertModel, Hubertctc' classes, etc."
)
raise e
self.encoders = model
self.normalize_before = normalize_before
if self.normalize_before:
self.after_norm = LayerNorm(output_size)
if output_size and output_size != d:
self.output_layer = torch.nn.Sequential(
torch.nn.Linear(d, output_size),
)
else:
self.output_layer = None
self.freeze_finetune_updates = freeze_finetune_updates
self.register_buffer("num_updates", torch.LongTensor([0]))
def output_size(self) -> int:
return self._output_size
def forward(
self,
xs_pad: torch.Tensor,
ilens: torch.Tensor,
prev_states: torch.Tensor = None,
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
"""Forward Hubert ASR Encoder.
Args:
xs_pad: input tensor (B, L, D)
ilens: input length (B)
prev_states: Not to be used now.
Returns:
position embedded tensor and mask
"""
masks = make_pad_mask(ilens).to(xs_pad.device)
ft = self.freeze_finetune_updates <= self.num_updates
if self.num_updates <= self.freeze_finetune_updates:
self.num_updates += 1
elif ft and self.num_updates == self.freeze_finetune_updates + 1:
self.num_updates += 1
logging.info("Start fine-tuning hubert parameters!")
else:
self.num_updates += 1
with torch.no_grad() if not ft else contextlib.nullcontext():
enc_outputs = self.encoders(
xs_pad,
padding_mask=masks,
mask=self.apply_mask and self.training,
features_only=True,
output_layer=None,
)
xs_pad = enc_outputs["x"] # (B,T,C),
masks = enc_outputs["padding_mask"] # (B, T)
# save gpu memory
del enc_outputs
olens = (~masks).sum(dim=1)
if self.output_layer is not None:
xs_pad = self.output_layer(xs_pad)
if self.normalize_before:
xs_pad = self.after_norm(xs_pad)
return xs_pad, olens, None
def reload_pretrained_parameters(self):
self.encoders.load_state_dict(self.pretrained_params, strict=False)
logging.info("Pretrained Hubert model parameters reloaded!")
class FairseqHubertPretrainEncoder(AbsEncoder):
"""FairSeq Hubert pretrain encoder module, only used for pretraining stage
Args:
input_size: input dim
output_size: dimension of attention
linear_units: dimension of feedforward layers
attention_heads: the number of heads of multi head attention
num_blocks: the number of encoder blocks
dropout_rate: dropout rate
attention_dropout_rate: dropout rate in attention
hubert_dict: target dictionary for Hubert pretraining
label_rate: label frame rate. -1 for sequence label
sample_rate: target sample rate.
use_amp: whether to use automatic mixed precision
normalize_before: whether to use layer_norm before the first block
"""
def __init__(
self,
input_size: int = 1,
output_size: int = 1024,
linear_units: int = 1024,
attention_heads: int = 12,
num_blocks: int = 12,
dropout_rate: float = 0.0,
attention_dropout_rate: float = 0.0,
activation_dropout_rate: float = 0.0,
hubert_dict: str = "./dict.txt",
label_rate: int = 100,
checkpoint_activations: bool = False,
sample_rate: int = 16000,
use_amp: bool = False,
**kwargs,
):
assert check_argument_types()
super().__init__()
self._output_size = output_size
self.use_amp = use_amp
try:
from fairseq.data.dictionary import Dictionary
from fairseq.models.hubert.hubert import HubertConfig # noqa: H301
from fairseq.models.hubert.hubert import HubertModel # noqa: H301
from fairseq.models.hubert.hubert import ( # noqa: H301
HubertPretrainingConfig,
)
except Exception as e:
print("Error: FairSeq is not properly installed.")
print("Please install FairSeq: cd ${MAIN_ROOT}/tools && make fairseq.done")
raise e
cfg_overides = {
"encoder_embed_dim": output_size,
"encoder_ffn_embed_dim": linear_units,
"encoder_attention_heads": attention_heads,
"encoder_layers": num_blocks,
"final_dim": output_size,
"dropout": dropout_rate,
"attention_dropout": attention_dropout_rate,
"label_rate": label_rate,
"checkpoint_activations": checkpoint_activations,
}
cfg_overides = {**cfg_overides, **kwargs}
self.cfg = HubertConfig()
for key, value in cfg_overides.items():
if hasattr(self.cfg, key):
setattr(self.cfg, key, value)
hubert_task_cfg = HubertPretrainingConfig()
hubert_task_cfg_overides = {
"label_rate": label_rate,
"sample_rate": sample_rate,
}
for key, value in hubert_task_cfg_overides.items():
if hasattr(hubert_task_cfg, key):
setattr(hubert_task_cfg, key, value)
d = Dictionary()
self._build_dictionary(d, hubert_dict)
self.encoder = HubertModel(self.cfg, hubert_task_cfg, self.dictionaries)
def _build_dictionary(self, dictionary, hubert_dict_path):
if os.path.exists(f"{hubert_dict_path}"):
setattr(dictionary, "symbols", [])
setattr(dictionary, "count", [])
setattr(dictionary, "indices", {})
dictionary.add_from_file(f"{hubert_dict_path}")
else:
dictionary.add_symbol("0")
self.dictionaries = [dictionary]
def output_size(self) -> int:
return self._output_size
def forward(
self,
xs_pad: torch.Tensor,
ilens: torch.Tensor,
ys_pad: torch.Tensor,
ys_pad_length: torch.Tensor,
prev_states: torch.Tensor = None,
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
"""Forward Hubert Pretrain Encoder.
Args:
xs_pad: input tensor (B, L, D)
ilens: input length (B)
prev_states: Not to be used now.
Returns:
position embedded tensor and mask
"""
self.cast_mask_emb()
masks = make_pad_mask(ilens).to(xs_pad.device)
ys_pad = ys_pad[:, : min(ys_pad_length)]
enc_outputs = self.encoder(
xs_pad,
padding_mask=masks,
mask=True,
target_list=[ys_pad],
features_only=False,
)
return enc_outputs
def cast_mask_emb(self):
if self.use_amp and self.encoder.mask_emb.dtype != torch.cuda.HalfTensor:
self.encoder.mask_emb = torch.nn.Parameter(self.encoder.mask_emb.half())
def reload_pretrained_parameters(self):
self.encoder.mask_emb = torch.nn.Parameter(
torch.HalfTensor(self.cfg.encoder_embed_dim).uniform_()
)
logging.info(
f"Hubert mask embedding re-initiallized!, \
{self.encoder.mask_emb.dtype}, \
{self.use_amp}"
)
def download_hubert(model_url, dir_path):
os.makedirs(dir_path, exist_ok=True)
model_name = model_url.split("/")[-1]
model_path = os.path.join(dir_path, model_name)
with FileLock(model_path + ".lock"):
if not os.path.exists(model_path):
torch.hub.download_url_to_file(model_url, model_path)
logging.info(f"Hubert model downloaded {model_path}")
else:
logging.info(f"Hubert model {model_path} already exists.")
return model_path
| 25,130 | 37.842349 | 135 | py |
espnet | espnet-master/espnet2/asr/encoder/rnn_encoder.py | from typing import Optional, Sequence, Tuple
import numpy as np
import torch
from typeguard import check_argument_types
from espnet2.asr.encoder.abs_encoder import AbsEncoder
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
from espnet.nets.pytorch_backend.rnn.encoders import RNN, RNNP
class RNNEncoder(AbsEncoder):
"""RNNEncoder class.
Args:
input_size: The number of expected features in the input
output_size: The number of output features
hidden_size: The number of hidden features
bidirectional: If ``True`` becomes a bidirectional LSTM
use_projection: Use projection layer or not
num_layers: Number of recurrent layers
dropout: dropout probability
"""
def __init__(
self,
input_size: int,
rnn_type: str = "lstm",
bidirectional: bool = True,
use_projection: bool = True,
num_layers: int = 4,
hidden_size: int = 320,
output_size: int = 320,
dropout: float = 0.0,
subsample: Optional[Sequence[int]] = (2, 2, 1, 1),
):
assert check_argument_types()
super().__init__()
self._output_size = output_size
self.rnn_type = rnn_type
self.bidirectional = bidirectional
self.use_projection = use_projection
if rnn_type not in {"lstm", "gru"}:
raise ValueError(f"Not supported rnn_type={rnn_type}")
if subsample is None:
subsample = np.ones(num_layers + 1, dtype=np.int64)
else:
subsample = subsample[:num_layers]
# Append 1 at the beginning because the second or later is used
subsample = np.pad(
np.array(subsample, dtype=np.int64),
[1, num_layers - len(subsample)],
mode="constant",
constant_values=1,
)
rnn_type = ("b" if bidirectional else "") + rnn_type
if use_projection:
self.enc = torch.nn.ModuleList(
[
RNNP(
input_size,
num_layers,
hidden_size,
output_size,
subsample,
dropout,
typ=rnn_type,
)
]
)
else:
self.enc = torch.nn.ModuleList(
[
RNN(
input_size,
num_layers,
hidden_size,
output_size,
dropout,
typ=rnn_type,
)
]
)
def output_size(self) -> int:
return self._output_size
def forward(
self,
xs_pad: torch.Tensor,
ilens: torch.Tensor,
prev_states: torch.Tensor = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
if prev_states is None:
prev_states = [None] * len(self.enc)
assert len(prev_states) == len(self.enc)
current_states = []
for module, prev_state in zip(self.enc, prev_states):
xs_pad, ilens, states = module(xs_pad, ilens, prev_state=prev_state)
current_states.append(states)
if self.use_projection:
xs_pad.masked_fill_(make_pad_mask(ilens, xs_pad, 1), 0.0)
else:
xs_pad = xs_pad.masked_fill(make_pad_mask(ilens, xs_pad, 1), 0.0)
return xs_pad, ilens, current_states
| 3,587 | 30.752212 | 80 | py |
espnet | espnet-master/espnet2/asr/encoder/contextual_block_transformer_encoder.py | # Copyright 2020 Emiru Tsunoo
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Encoder definition."""
import math
from typing import Optional, Tuple
import torch
from typeguard import check_argument_types
from espnet2.asr.encoder.abs_encoder import AbsEncoder
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
from espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention
from espnet.nets.pytorch_backend.transformer.contextual_block_encoder_layer import (
ContextualBlockEncoderLayer,
)
from espnet.nets.pytorch_backend.transformer.embedding import StreamPositionalEncoding
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
from espnet.nets.pytorch_backend.transformer.multi_layer_conv import (
Conv1dLinear,
MultiLayeredConv1d,
)
from espnet.nets.pytorch_backend.transformer.positionwise_feed_forward import (
PositionwiseFeedForward,
)
from espnet.nets.pytorch_backend.transformer.repeat import repeat
from espnet.nets.pytorch_backend.transformer.subsampling_without_posenc import (
Conv2dSubsamplingWOPosEnc,
)
class ContextualBlockTransformerEncoder(AbsEncoder):
"""Contextual Block Transformer encoder module.
Details in Tsunoo et al. "Transformer ASR with contextual block processing"
(https://arxiv.org/abs/1910.07204)
Args:
input_size: input dim
output_size: dimension of attention
attention_heads: the number of heads of multi head attention
linear_units: the number of units of position-wise feed forward
num_blocks: the number of encoder blocks
dropout_rate: dropout rate
attention_dropout_rate: dropout rate in attention
positional_dropout_rate: dropout rate after adding positional encoding
input_layer: input layer type
pos_enc_class: PositionalEncoding or ScaledPositionalEncoding
normalize_before: whether to use layer_norm before the first block
concat_after: whether to concat attention layer's input and output
if True, additional linear will be applied.
i.e. x -> x + linear(concat(x, att(x)))
if False, no additional linear will be applied.
i.e. x -> x + att(x)
positionwise_layer_type: linear of conv1d
positionwise_conv_kernel_size: kernel size of positionwise conv1d layer
padding_idx: padding_idx for input_layer=embed
block_size: block size for contextual block processing
hop_Size: hop size for block processing
look_ahead: look-ahead size for block_processing
init_average: whether to use average as initial context (otherwise max values)
ctx_pos_enc: whether to use positional encoding to the context vectors
"""
def __init__(
self,
input_size: int,
output_size: int = 256,
attention_heads: int = 4,
linear_units: int = 2048,
num_blocks: int = 6,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.1,
attention_dropout_rate: float = 0.0,
input_layer: Optional[str] = "conv2d",
pos_enc_class=StreamPositionalEncoding,
normalize_before: bool = True,
concat_after: bool = False,
positionwise_layer_type: str = "linear",
positionwise_conv_kernel_size: int = 1,
padding_idx: int = -1,
block_size: int = 40,
hop_size: int = 16,
look_ahead: int = 16,
init_average: bool = True,
ctx_pos_enc: bool = True,
):
assert check_argument_types()
super().__init__()
self._output_size = output_size
self.pos_enc = pos_enc_class(output_size, positional_dropout_rate)
if input_layer == "linear":
self.embed = torch.nn.Sequential(
torch.nn.Linear(input_size, output_size),
torch.nn.LayerNorm(output_size),
torch.nn.Dropout(dropout_rate),
torch.nn.ReLU(),
)
self.subsample = 1
elif input_layer == "conv2d":
self.embed = Conv2dSubsamplingWOPosEnc(
input_size, output_size, dropout_rate, kernels=[3, 3], strides=[2, 2]
)
self.subsample = 4
elif input_layer == "conv2d6":
self.embed = Conv2dSubsamplingWOPosEnc(
input_size, output_size, dropout_rate, kernels=[3, 5], strides=[2, 3]
)
self.subsample = 6
elif input_layer == "conv2d8":
self.embed = Conv2dSubsamplingWOPosEnc(
input_size,
output_size,
dropout_rate,
kernels=[3, 3, 3],
strides=[2, 2, 2],
)
self.subsample = 8
elif input_layer == "embed":
self.embed = torch.nn.Sequential(
torch.nn.Embedding(input_size, output_size, padding_idx=padding_idx),
)
self.subsample = 1
elif input_layer is None:
self.embed = None
self.subsample = 1
else:
raise ValueError("unknown input_layer: " + input_layer)
self.normalize_before = normalize_before
if positionwise_layer_type == "linear":
positionwise_layer = PositionwiseFeedForward
positionwise_layer_args = (
output_size,
linear_units,
dropout_rate,
)
elif positionwise_layer_type == "conv1d":
positionwise_layer = MultiLayeredConv1d
positionwise_layer_args = (
output_size,
linear_units,
positionwise_conv_kernel_size,
dropout_rate,
)
elif positionwise_layer_type == "conv1d-linear":
positionwise_layer = Conv1dLinear
positionwise_layer_args = (
output_size,
linear_units,
positionwise_conv_kernel_size,
dropout_rate,
)
else:
raise NotImplementedError("Support only linear or conv1d.")
self.encoders = repeat(
num_blocks,
lambda lnum: ContextualBlockEncoderLayer(
output_size,
MultiHeadedAttention(
attention_heads, output_size, attention_dropout_rate
),
positionwise_layer(*positionwise_layer_args),
dropout_rate,
num_blocks,
normalize_before,
concat_after,
),
)
if self.normalize_before:
self.after_norm = LayerNorm(output_size)
# for block processing
self.block_size = block_size
self.hop_size = hop_size
self.look_ahead = look_ahead
self.init_average = init_average
self.ctx_pos_enc = ctx_pos_enc
def output_size(self) -> int:
return self._output_size
def forward(
self,
xs_pad: torch.Tensor,
ilens: torch.Tensor,
prev_states: torch.Tensor = None,
is_final=True,
infer_mode=False,
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
"""Embed positions in tensor.
Args:
xs_pad: input tensor (B, L, D)
ilens: input length (B)
prev_states: Not to be used now.
infer_mode: whether to be used for inference. This is used to
distinguish between forward_train (train and validate) and
forward_infer (decode).
Returns:
position embedded tensor and mask
"""
if self.training or not infer_mode:
return self.forward_train(xs_pad, ilens, prev_states)
else:
return self.forward_infer(xs_pad, ilens, prev_states, is_final)
def forward_train(
self,
xs_pad: torch.Tensor,
ilens: torch.Tensor,
prev_states: torch.Tensor = None,
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
"""Embed positions in tensor.
Args:
xs_pad: input tensor (B, L, D)
ilens: input length (B)
prev_states: Not to be used now.
Returns:
position embedded tensor and mask
"""
masks = (~make_pad_mask(ilens)[:, None, :]).to(xs_pad.device)
if isinstance(self.embed, Conv2dSubsamplingWOPosEnc):
xs_pad, masks = self.embed(xs_pad, masks)
elif self.embed is not None:
xs_pad = self.embed(xs_pad)
# create empty output container
total_frame_num = xs_pad.size(1)
ys_pad = xs_pad.new_zeros(xs_pad.size())
past_size = self.block_size - self.hop_size - self.look_ahead
# block_size could be 0 meaning infinite
# apply usual encoder for short sequence
if self.block_size == 0 or total_frame_num <= self.block_size:
xs_pad, masks, _, _, _, _, _ = self.encoders(
self.pos_enc(xs_pad), masks, False, None, None
)
if self.normalize_before:
xs_pad = self.after_norm(xs_pad)
olens = masks.squeeze(1).sum(1)
return xs_pad, olens, None
# start block processing
cur_hop = 0
block_num = math.ceil(
float(total_frame_num - past_size - self.look_ahead) / float(self.hop_size)
)
bsize = xs_pad.size(0)
addin = xs_pad.new_zeros(
bsize, block_num, xs_pad.size(-1)
) # additional context embedding vecctors
# first step
if self.init_average: # initialize with average value
addin[:, 0, :] = xs_pad.narrow(1, cur_hop, self.block_size).mean(1)
else: # initialize with max value
addin[:, 0, :] = xs_pad.narrow(1, cur_hop, self.block_size).max(1)
cur_hop += self.hop_size
# following steps
while cur_hop + self.block_size < total_frame_num:
if self.init_average: # initialize with average value
addin[:, cur_hop // self.hop_size, :] = xs_pad.narrow(
1, cur_hop, self.block_size
).mean(1)
else: # initialize with max value
addin[:, cur_hop // self.hop_size, :] = xs_pad.narrow(
1, cur_hop, self.block_size
).max(1)
cur_hop += self.hop_size
# last step
if cur_hop < total_frame_num and cur_hop // self.hop_size < block_num:
if self.init_average: # initialize with average value
addin[:, cur_hop // self.hop_size, :] = xs_pad.narrow(
1, cur_hop, total_frame_num - cur_hop
).mean(1)
else: # initialize with max value
addin[:, cur_hop // self.hop_size, :] = xs_pad.narrow(
1, cur_hop, total_frame_num - cur_hop
).max(1)
if self.ctx_pos_enc:
addin = self.pos_enc(addin)
xs_pad = self.pos_enc(xs_pad)
# set up masks
mask_online = xs_pad.new_zeros(
xs_pad.size(0), block_num, self.block_size + 2, self.block_size + 2
)
mask_online.narrow(2, 1, self.block_size + 1).narrow(
3, 0, self.block_size + 1
).fill_(1)
xs_chunk = xs_pad.new_zeros(
bsize, block_num, self.block_size + 2, xs_pad.size(-1)
)
# fill the input
# first step
left_idx = 0
block_idx = 0
xs_chunk[:, block_idx, 1 : self.block_size + 1] = xs_pad.narrow(
-2, left_idx, self.block_size
)
left_idx += self.hop_size
block_idx += 1
# following steps
while left_idx + self.block_size < total_frame_num and block_idx < block_num:
xs_chunk[:, block_idx, 1 : self.block_size + 1] = xs_pad.narrow(
-2, left_idx, self.block_size
)
left_idx += self.hop_size
block_idx += 1
# last steps
last_size = total_frame_num - left_idx
xs_chunk[:, block_idx, 1 : last_size + 1] = xs_pad.narrow(
-2, left_idx, last_size
)
# fill the initial context vector
xs_chunk[:, 0, 0] = addin[:, 0]
xs_chunk[:, 1:, 0] = addin[:, 0 : block_num - 1]
xs_chunk[:, :, self.block_size + 1] = addin
# forward
ys_chunk, mask_online, _, _, _, _, _ = self.encoders(
xs_chunk, mask_online, False, xs_chunk
)
# copy output
# first step
offset = self.block_size - self.look_ahead - self.hop_size + 1
left_idx = 0
block_idx = 0
cur_hop = self.block_size - self.look_ahead
ys_pad[:, left_idx:cur_hop] = ys_chunk[:, block_idx, 1 : cur_hop + 1]
left_idx += self.hop_size
block_idx += 1
# following steps
while left_idx + self.block_size < total_frame_num and block_idx < block_num:
ys_pad[:, cur_hop : cur_hop + self.hop_size] = ys_chunk[
:, block_idx, offset : offset + self.hop_size
]
cur_hop += self.hop_size
left_idx += self.hop_size
block_idx += 1
ys_pad[:, cur_hop:total_frame_num] = ys_chunk[
:, block_idx, offset : last_size + 1, :
]
if self.normalize_before:
ys_pad = self.after_norm(ys_pad)
olens = masks.squeeze(1).sum(1)
return ys_pad, olens, None
def forward_infer(
self,
xs_pad: torch.Tensor,
ilens: torch.Tensor,
prev_states: torch.Tensor = None,
is_final: bool = True,
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
"""Embed positions in tensor.
Args:
xs_pad: input tensor (B, L, D)
ilens: input length (B)
prev_states: Not to be used now.
Returns:
position embedded tensor and mask
"""
if prev_states is None:
prev_addin = None
buffer_before_downsampling = None
ilens_buffer = None
buffer_after_downsampling = None
n_processed_blocks = 0
past_encoder_ctx = None
else:
prev_addin = prev_states["prev_addin"]
buffer_before_downsampling = prev_states["buffer_before_downsampling"]
ilens_buffer = prev_states["ilens_buffer"]
buffer_after_downsampling = prev_states["buffer_after_downsampling"]
n_processed_blocks = prev_states["n_processed_blocks"]
past_encoder_ctx = prev_states["past_encoder_ctx"]
bsize = xs_pad.size(0)
assert bsize == 1
if prev_states is not None:
xs_pad = torch.cat([buffer_before_downsampling, xs_pad], dim=1)
ilens += ilens_buffer
if is_final:
buffer_before_downsampling = None
else:
n_samples = xs_pad.size(1) // self.subsample - 1
if n_samples < 2:
next_states = {
"prev_addin": prev_addin,
"buffer_before_downsampling": xs_pad,
"ilens_buffer": ilens,
"buffer_after_downsampling": buffer_after_downsampling,
"n_processed_blocks": n_processed_blocks,
"past_encoder_ctx": past_encoder_ctx,
}
return (
xs_pad.new_zeros(bsize, 0, self._output_size),
xs_pad.new_zeros(bsize),
next_states,
)
n_res_samples = xs_pad.size(1) % self.subsample + self.subsample * 2
buffer_before_downsampling = xs_pad.narrow(
1, xs_pad.size(1) - n_res_samples, n_res_samples
)
xs_pad = xs_pad.narrow(1, 0, n_samples * self.subsample)
ilens_buffer = ilens.new_full(
[1], dtype=torch.long, fill_value=n_res_samples
)
ilens = ilens.new_full(
[1], dtype=torch.long, fill_value=n_samples * self.subsample
)
if isinstance(self.embed, Conv2dSubsamplingWOPosEnc):
xs_pad, _ = self.embed(xs_pad, None)
elif self.embed is not None:
xs_pad = self.embed(xs_pad)
# create empty output container
if buffer_after_downsampling is not None:
xs_pad = torch.cat([buffer_after_downsampling, xs_pad], dim=1)
total_frame_num = xs_pad.size(1)
if is_final:
past_size = self.block_size - self.hop_size - self.look_ahead
block_num = math.ceil(
float(total_frame_num - past_size - self.look_ahead)
/ float(self.hop_size)
)
buffer_after_downsampling = None
else:
if total_frame_num <= self.block_size:
next_states = {
"prev_addin": prev_addin,
"buffer_before_downsampling": buffer_before_downsampling,
"ilens_buffer": ilens_buffer,
"buffer_after_downsampling": xs_pad,
"n_processed_blocks": n_processed_blocks,
"past_encoder_ctx": past_encoder_ctx,
}
return (
xs_pad.new_zeros(bsize, 0, self._output_size),
xs_pad.new_zeros(bsize),
next_states,
)
overlap_size = self.block_size - self.hop_size
block_num = max(0, xs_pad.size(1) - overlap_size) // self.hop_size
res_frame_num = xs_pad.size(1) - self.hop_size * block_num
buffer_after_downsampling = xs_pad.narrow(
1, xs_pad.size(1) - res_frame_num, res_frame_num
)
xs_pad = xs_pad.narrow(1, 0, block_num * self.hop_size + overlap_size)
# block_size could be 0 meaning infinite
# apply usual encoder for short sequence
assert self.block_size > 0
if n_processed_blocks == 0 and total_frame_num <= self.block_size and is_final:
xs_chunk = self.pos_enc(xs_pad).unsqueeze(1)
xs_pad, _, _, _, _, _, _ = self.encoders(
xs_chunk, None, True, None, None, True
)
xs_pad = xs_pad.squeeze(0)
if self.normalize_before:
xs_pad = self.after_norm(xs_pad)
return xs_pad, None, None
# start block processing
xs_chunk = xs_pad.new_zeros(
bsize, block_num, self.block_size + 2, xs_pad.size(-1)
)
for i in range(block_num):
cur_hop = i * self.hop_size
chunk_length = min(self.block_size, total_frame_num - cur_hop)
addin = xs_pad.narrow(1, cur_hop, chunk_length)
if self.init_average:
addin = addin.mean(1, keepdim=True)
else:
addin = addin.max(1, keepdim=True)
if self.ctx_pos_enc:
addin = self.pos_enc(addin, i + n_processed_blocks)
if prev_addin is None:
prev_addin = addin
xs_chunk[:, i, 0] = prev_addin
xs_chunk[:, i, -1] = addin
chunk = self.pos_enc(
xs_pad.narrow(1, cur_hop, chunk_length),
cur_hop + self.hop_size * n_processed_blocks,
)
xs_chunk[:, i, 1 : chunk_length + 1] = chunk
prev_addin = addin
# mask setup, it should be the same to that of forward_train
mask_online = xs_pad.new_zeros(
xs_pad.size(0), block_num, self.block_size + 2, self.block_size + 2
)
mask_online.narrow(2, 1, self.block_size + 1).narrow(
3, 0, self.block_size + 1
).fill_(1)
ys_chunk, _, _, _, past_encoder_ctx, _, _ = self.encoders(
xs_chunk, mask_online, True, past_encoder_ctx
)
# remove addin
ys_chunk = ys_chunk.narrow(2, 1, self.block_size)
offset = self.block_size - self.look_ahead - self.hop_size
if is_final:
if n_processed_blocks == 0:
y_length = xs_pad.size(1)
else:
y_length = xs_pad.size(1) - offset
else:
y_length = block_num * self.hop_size
if n_processed_blocks == 0:
y_length += offset
ys_pad = xs_pad.new_zeros((xs_pad.size(0), y_length, xs_pad.size(2)))
if n_processed_blocks == 0:
ys_pad[:, 0:offset] = ys_chunk[:, 0, 0:offset]
for i in range(block_num):
cur_hop = i * self.hop_size
if n_processed_blocks == 0:
cur_hop += offset
if i == block_num - 1 and is_final:
chunk_length = min(self.block_size - offset, ys_pad.size(1) - cur_hop)
else:
chunk_length = self.hop_size
ys_pad[:, cur_hop : cur_hop + chunk_length] = ys_chunk[
:, i, offset : offset + chunk_length
]
if self.normalize_before:
ys_pad = self.after_norm(ys_pad)
if is_final:
next_states = None
else:
next_states = {
"prev_addin": prev_addin,
"buffer_before_downsampling": buffer_before_downsampling,
"ilens_buffer": ilens_buffer,
"buffer_after_downsampling": buffer_after_downsampling,
"n_processed_blocks": n_processed_blocks + block_num,
"past_encoder_ctx": past_encoder_ctx,
}
return ys_pad, None, next_states
| 21,748 | 37.022727 | 87 | py |
espnet | espnet-master/espnet2/asr/encoder/longformer_encoder.py | # Copyright 2020 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Conformer encoder definition."""
from typing import List, Optional, Tuple
import torch
from typeguard import check_argument_types
from espnet2.asr.ctc import CTC
from espnet2.asr.encoder.conformer_encoder import ConformerEncoder
from espnet.nets.pytorch_backend.conformer.convolution import ConvolutionModule
from espnet.nets.pytorch_backend.conformer.encoder_layer import EncoderLayer
from espnet.nets.pytorch_backend.nets_utils import get_activation, make_pad_mask
from espnet.nets.pytorch_backend.transformer.embedding import PositionalEncoding
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
from espnet.nets.pytorch_backend.transformer.multi_layer_conv import (
Conv1dLinear,
MultiLayeredConv1d,
)
from espnet.nets.pytorch_backend.transformer.positionwise_feed_forward import (
PositionwiseFeedForward,
)
from espnet.nets.pytorch_backend.transformer.repeat import repeat
from espnet.nets.pytorch_backend.transformer.subsampling import (
Conv2dSubsampling,
Conv2dSubsampling1,
Conv2dSubsampling2,
Conv2dSubsampling6,
Conv2dSubsampling8,
TooShortUttError,
check_short_utt,
)
class LongformerEncoder(ConformerEncoder):
"""Longformer SA Conformer encoder module.
Args:
input_size (int): Input dimension.
output_size (int): Dimension of attention.
attention_heads (int): The number of heads of multi head attention.
linear_units (int): The number of units of position-wise feed forward.
num_blocks (int): The number of decoder blocks.
dropout_rate (float): Dropout rate.
attention_dropout_rate (float): Dropout rate in attention.
positional_dropout_rate (float): Dropout rate after adding positional encoding.
input_layer (Union[str, torch.nn.Module]): Input layer type.
normalize_before (bool): Whether to use layer_norm before the first block.
concat_after (bool): Whether to concat attention layer's input and output.
If True, additional linear will be applied.
i.e. x -> x + linear(concat(x, att(x)))
If False, no additional linear will be applied. i.e. x -> x + att(x)
positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear".
positionwise_conv_kernel_size (int): Kernel size of positionwise conv1d layer.
rel_pos_type (str): Whether to use the latest relative positional encoding or
the legacy one. The legacy relative positional encoding will be deprecated
in the future. More Details can be found in
https://github.com/espnet/espnet/pull/2816.
encoder_pos_enc_layer_type (str): Encoder positional encoding layer type.
encoder_attn_layer_type (str): Encoder attention layer type.
activation_type (str): Encoder activation function type.
macaron_style (bool): Whether to use macaron style for positionwise layer.
use_cnn_module (bool): Whether to use convolution module.
zero_triu (bool): Whether to zero the upper triangular part of attention matrix.
cnn_module_kernel (int): Kernerl size of convolution module.
padding_idx (int): Padding idx for input_layer=embed.
attention_windows (list): Layer-wise attention window sizes
for longformer self-attn
attention_dilation(list): Layer-wise attention dilation sizes
for longformer self-attn
attention_mode(str): Implementation for longformer self-attn.
Default="sliding_chunks"
Choose 'n2', 'tvm' or 'sliding_chunks'. More details in
https://github.com/allenai/longformer
"""
def __init__(
self,
input_size: int,
output_size: int = 256,
attention_heads: int = 4,
linear_units: int = 2048,
num_blocks: int = 6,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.1,
attention_dropout_rate: float = 0.0,
input_layer: str = "conv2d",
normalize_before: bool = True,
concat_after: bool = False,
positionwise_layer_type: str = "linear",
positionwise_conv_kernel_size: int = 3,
macaron_style: bool = False,
rel_pos_type: str = "legacy",
pos_enc_layer_type: str = "abs_pos",
selfattention_layer_type: str = "lf_selfattn",
activation_type: str = "swish",
use_cnn_module: bool = True,
zero_triu: bool = False,
cnn_module_kernel: int = 31,
padding_idx: int = -1,
interctc_layer_idx: List[int] = [],
interctc_use_conditioning: bool = False,
attention_windows: list = [100, 100, 100, 100, 100, 100],
attention_dilation: list = [1, 1, 1, 1, 1, 1],
attention_mode: str = "sliding_chunks",
):
assert check_argument_types()
super().__init__(input_size)
self._output_size = output_size
activation = get_activation(activation_type)
if pos_enc_layer_type == "abs_pos":
pos_enc_class = PositionalEncoding
else:
raise ValueError(
"incorrect or unknown pos_enc_layer: "
+ pos_enc_layer_type
+ "Use abs_pos"
)
if len(attention_dilation) != num_blocks:
raise ValueError(
"incorrect attention_dilation parameter of length"
+ str(len(attention_dilation))
+ " does not match num_blocks"
+ str(num_blocks)
)
if len(attention_windows) != num_blocks:
raise ValueError(
"incorrect attention_windows parameter of length"
+ str(len(attention_windows))
+ " does not match num_blocks"
+ str(num_blocks)
)
if attention_mode != "tvm" and max(attention_dilation) != 1:
raise ValueError(
"incorrect attention mode for dilation: "
+ attention_mode
+ "Use attention_mode=tvm with Cuda Kernel"
)
if input_layer == "linear":
self.embed = torch.nn.Sequential(
torch.nn.Linear(input_size, output_size),
torch.nn.LayerNorm(output_size),
torch.nn.Dropout(dropout_rate),
pos_enc_class(output_size, positional_dropout_rate),
)
elif input_layer == "conv2d":
self.embed = Conv2dSubsampling(
input_size,
output_size,
dropout_rate,
pos_enc_class(output_size, positional_dropout_rate),
)
elif input_layer == "conv2d1":
self.embed = Conv2dSubsampling1(
input_size,
output_size,
dropout_rate,
pos_enc_class(output_size, positional_dropout_rate),
)
elif input_layer == "conv2d2":
self.embed = Conv2dSubsampling2(
input_size,
output_size,
dropout_rate,
pos_enc_class(output_size, positional_dropout_rate),
)
elif input_layer == "conv2d6":
self.embed = Conv2dSubsampling6(
input_size,
output_size,
dropout_rate,
pos_enc_class(output_size, positional_dropout_rate),
)
elif input_layer == "conv2d8":
self.embed = Conv2dSubsampling8(
input_size,
output_size,
dropout_rate,
pos_enc_class(output_size, positional_dropout_rate),
)
elif input_layer == "embed":
self.embed = torch.nn.Sequential(
torch.nn.Embedding(input_size, output_size, padding_idx=padding_idx),
pos_enc_class(output_size, positional_dropout_rate),
)
elif isinstance(input_layer, torch.nn.Module):
self.embed = torch.nn.Sequential(
input_layer,
pos_enc_class(output_size, positional_dropout_rate),
)
elif input_layer is None:
self.embed = torch.nn.Sequential(
pos_enc_class(output_size, positional_dropout_rate)
)
else:
raise ValueError("unknown input_layer: " + input_layer)
self.normalize_before = normalize_before
if positionwise_layer_type == "linear":
positionwise_layer = PositionwiseFeedForward
positionwise_layer_args = (
output_size,
linear_units,
dropout_rate,
activation,
)
elif positionwise_layer_type == "conv1d":
positionwise_layer = MultiLayeredConv1d
positionwise_layer_args = (
output_size,
linear_units,
positionwise_conv_kernel_size,
dropout_rate,
)
elif positionwise_layer_type == "conv1d-linear":
positionwise_layer = Conv1dLinear
positionwise_layer_args = (
output_size,
linear_units,
positionwise_conv_kernel_size,
dropout_rate,
)
else:
raise NotImplementedError("Support only linear or conv1d.")
self.selfattention_layer_type = selfattention_layer_type
if selfattention_layer_type == "lf_selfattn":
assert pos_enc_layer_type == "abs_pos"
from longformer.longformer import LongformerConfig
from espnet.nets.pytorch_backend.transformer.longformer_attention import (
LongformerAttention,
)
encoder_selfattn_layer = LongformerAttention
config = LongformerConfig(
attention_window=attention_windows,
attention_dilation=attention_dilation,
autoregressive=False,
num_attention_heads=attention_heads,
hidden_size=output_size,
attention_probs_dropout_prob=dropout_rate,
attention_mode=attention_mode,
)
encoder_selfattn_layer_args = (config,)
else:
raise ValueError(
"incompatible or unknown encoder_attn_layer: "
+ selfattention_layer_type
+ " Use lf_selfattn"
)
convolution_layer = ConvolutionModule
convolution_layer_args = (output_size, cnn_module_kernel, activation)
self.encoders = repeat(
num_blocks,
lambda layer_id: EncoderLayer(
output_size,
encoder_selfattn_layer(*(encoder_selfattn_layer_args + (layer_id,))),
positionwise_layer(*positionwise_layer_args),
positionwise_layer(*positionwise_layer_args) if macaron_style else None,
convolution_layer(*convolution_layer_args) if use_cnn_module else None,
dropout_rate,
normalize_before,
concat_after,
),
)
if self.normalize_before:
self.after_norm = LayerNorm(output_size)
self.interctc_layer_idx = interctc_layer_idx
if len(interctc_layer_idx) > 0:
assert 0 < min(interctc_layer_idx) and max(interctc_layer_idx) < num_blocks
self.interctc_use_conditioning = interctc_use_conditioning
self.conditioning_layer = None
def output_size(self) -> int:
return self._output_size
def forward(
self,
xs_pad: torch.Tensor,
ilens: torch.Tensor,
prev_states: torch.Tensor = None,
ctc: CTC = None,
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
"""Calculate forward propagation.
Args:
xs_pad (torch.Tensor): Input tensor (#batch, L, input_size).
ilens (torch.Tensor): Input length (#batch).
prev_states (torch.Tensor): Not to be used now.
Returns:
torch.Tensor: Output tensor (#batch, L, output_size).
torch.Tensor: Output length (#batch).
torch.Tensor: Not to be used now.
"""
masks = (~make_pad_mask(ilens)[:, None, :]).to(xs_pad.device)
if (
isinstance(self.embed, Conv2dSubsampling)
or isinstance(self.embed, Conv2dSubsampling1)
or isinstance(self.embed, Conv2dSubsampling2)
or isinstance(self.embed, Conv2dSubsampling6)
or isinstance(self.embed, Conv2dSubsampling8)
):
short_status, limit_size = check_short_utt(self.embed, xs_pad.size(1))
if short_status:
raise TooShortUttError(
f"has {xs_pad.size(1)} frames and is too short for subsampling "
+ f"(it needs more than {limit_size} frames), return empty results",
xs_pad.size(1),
limit_size,
)
xs_pad, masks = self.embed(xs_pad, masks)
else:
xs_pad = self.embed(xs_pad)
if self.selfattention_layer_type == "lf_selfattn":
seq_len = xs_pad.shape[1]
attention_window = (
max([x.self_attn.attention_window for x in self.encoders]) * 2
)
padding_len = (
attention_window - seq_len % attention_window
) % attention_window
xs_pad = torch.nn.functional.pad(
xs_pad, (0, 0, 0, padding_len), "constant", 0
)
masks = torch.nn.functional.pad(masks, (0, padding_len), "constant", False)
xs_pad, masks = self.encoders(xs_pad, masks)
intermediate_outs = []
if len(self.interctc_layer_idx) == 0:
xs_pad, masks = self.encoders(xs_pad, masks)
else:
for layer_idx, encoder_layer in enumerate(self.encoders):
xs_pad, masks = encoder_layer(xs_pad, masks)
if layer_idx + 1 in self.interctc_layer_idx:
encoder_out = xs_pad
if isinstance(encoder_out, tuple):
encoder_out = encoder_out[0]
# intermediate outputs are also normalized
if self.normalize_before:
encoder_out = self.after_norm(encoder_out)
intermediate_outs.append((layer_idx + 1, encoder_out))
if self.interctc_use_conditioning:
ctc_out = ctc.softmax(encoder_out)
if isinstance(xs_pad, tuple):
x, pos_emb = xs_pad
x = x + self.conditioning_layer(ctc_out)
xs_pad = (x, pos_emb)
else:
xs_pad = xs_pad + self.conditioning_layer(ctc_out)
if isinstance(xs_pad, tuple):
xs_pad = xs_pad[0]
if self.normalize_before:
xs_pad = self.after_norm(xs_pad)
olens = masks.squeeze(1).sum(1)
if len(intermediate_outs) > 0:
return (xs_pad, intermediate_outs), olens, None
return xs_pad, olens, None
| 15,394 | 39.195822 | 88 | py |
espnet | espnet-master/espnet2/asr/encoder/conformer_encoder.py | # Copyright 2020 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Conformer encoder definition."""
import logging
from typing import List, Optional, Tuple, Union
import torch
from typeguard import check_argument_types
from espnet2.asr.ctc import CTC
from espnet2.asr.encoder.abs_encoder import AbsEncoder
from espnet.nets.pytorch_backend.conformer.convolution import ConvolutionModule
from espnet.nets.pytorch_backend.conformer.encoder_layer import EncoderLayer
from espnet.nets.pytorch_backend.nets_utils import get_activation, make_pad_mask
from espnet.nets.pytorch_backend.transformer.attention import (
LegacyRelPositionMultiHeadedAttention,
MultiHeadedAttention,
RelPositionMultiHeadedAttention,
)
from espnet.nets.pytorch_backend.transformer.embedding import (
LegacyRelPositionalEncoding,
PositionalEncoding,
RelPositionalEncoding,
ScaledPositionalEncoding,
)
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
from espnet.nets.pytorch_backend.transformer.multi_layer_conv import (
Conv1dLinear,
MultiLayeredConv1d,
)
from espnet.nets.pytorch_backend.transformer.positionwise_feed_forward import (
PositionwiseFeedForward,
)
from espnet.nets.pytorch_backend.transformer.repeat import repeat
from espnet.nets.pytorch_backend.transformer.subsampling import (
Conv2dSubsampling,
Conv2dSubsampling1,
Conv2dSubsampling2,
Conv2dSubsampling6,
Conv2dSubsampling8,
TooShortUttError,
check_short_utt,
)
class ConformerEncoder(AbsEncoder):
"""Conformer encoder module.
Args:
input_size (int): Input dimension.
output_size (int): Dimension of attention.
attention_heads (int): The number of heads of multi head attention.
linear_units (int): The number of units of position-wise feed forward.
num_blocks (int): The number of decoder blocks.
dropout_rate (float): Dropout rate.
attention_dropout_rate (float): Dropout rate in attention.
positional_dropout_rate (float): Dropout rate after adding positional encoding.
input_layer (Union[str, torch.nn.Module]): Input layer type.
normalize_before (bool): Whether to use layer_norm before the first block.
concat_after (bool): Whether to concat attention layer's input and output.
If True, additional linear will be applied.
i.e. x -> x + linear(concat(x, att(x)))
If False, no additional linear will be applied. i.e. x -> x + att(x)
positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear".
positionwise_conv_kernel_size (int): Kernel size of positionwise conv1d layer.
rel_pos_type (str): Whether to use the latest relative positional encoding or
the legacy one. The legacy relative positional encoding will be deprecated
in the future. More Details can be found in
https://github.com/espnet/espnet/pull/2816.
encoder_pos_enc_layer_type (str): Encoder positional encoding layer type.
encoder_attn_layer_type (str): Encoder attention layer type.
activation_type (str): Encoder activation function type.
macaron_style (bool): Whether to use macaron style for positionwise layer.
use_cnn_module (bool): Whether to use convolution module.
zero_triu (bool): Whether to zero the upper triangular part of attention matrix.
cnn_module_kernel (int): Kernerl size of convolution module.
padding_idx (int): Padding idx for input_layer=embed.
"""
def __init__(
self,
input_size: int,
output_size: int = 256,
attention_heads: int = 4,
linear_units: int = 2048,
num_blocks: int = 6,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.1,
attention_dropout_rate: float = 0.0,
input_layer: str = "conv2d",
normalize_before: bool = True,
concat_after: bool = False,
positionwise_layer_type: str = "linear",
positionwise_conv_kernel_size: int = 3,
macaron_style: bool = False,
rel_pos_type: str = "legacy",
pos_enc_layer_type: str = "rel_pos",
selfattention_layer_type: str = "rel_selfattn",
activation_type: str = "swish",
use_cnn_module: bool = True,
zero_triu: bool = False,
cnn_module_kernel: int = 31,
padding_idx: int = -1,
interctc_layer_idx: List[int] = [],
interctc_use_conditioning: bool = False,
stochastic_depth_rate: Union[float, List[float]] = 0.0,
layer_drop_rate: float = 0.0,
max_pos_emb_len: int = 5000,
):
assert check_argument_types()
super().__init__()
self._output_size = output_size
if rel_pos_type == "legacy":
if pos_enc_layer_type == "rel_pos":
pos_enc_layer_type = "legacy_rel_pos"
if selfattention_layer_type == "rel_selfattn":
selfattention_layer_type = "legacy_rel_selfattn"
elif rel_pos_type == "latest":
assert selfattention_layer_type != "legacy_rel_selfattn"
assert pos_enc_layer_type != "legacy_rel_pos"
else:
raise ValueError("unknown rel_pos_type: " + rel_pos_type)
activation = get_activation(activation_type)
if pos_enc_layer_type == "abs_pos":
pos_enc_class = PositionalEncoding
elif pos_enc_layer_type == "scaled_abs_pos":
pos_enc_class = ScaledPositionalEncoding
elif pos_enc_layer_type == "rel_pos":
assert selfattention_layer_type == "rel_selfattn"
pos_enc_class = RelPositionalEncoding
elif pos_enc_layer_type == "legacy_rel_pos":
assert selfattention_layer_type == "legacy_rel_selfattn"
pos_enc_class = LegacyRelPositionalEncoding
logging.warning(
"Using legacy_rel_pos and it will be deprecated in the future."
)
else:
raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type)
if input_layer == "linear":
self.embed = torch.nn.Sequential(
torch.nn.Linear(input_size, output_size),
torch.nn.LayerNorm(output_size),
torch.nn.Dropout(dropout_rate),
pos_enc_class(output_size, positional_dropout_rate, max_pos_emb_len),
)
elif input_layer == "conv2d":
self.embed = Conv2dSubsampling(
input_size,
output_size,
dropout_rate,
pos_enc_class(output_size, positional_dropout_rate, max_pos_emb_len),
)
elif input_layer == "conv2d1":
self.embed = Conv2dSubsampling1(
input_size,
output_size,
dropout_rate,
pos_enc_class(output_size, positional_dropout_rate, max_pos_emb_len),
)
elif input_layer == "conv2d2":
self.embed = Conv2dSubsampling2(
input_size,
output_size,
dropout_rate,
pos_enc_class(output_size, positional_dropout_rate, max_pos_emb_len),
)
elif input_layer == "conv2d6":
self.embed = Conv2dSubsampling6(
input_size,
output_size,
dropout_rate,
pos_enc_class(output_size, positional_dropout_rate, max_pos_emb_len),
)
elif input_layer == "conv2d8":
self.embed = Conv2dSubsampling8(
input_size,
output_size,
dropout_rate,
pos_enc_class(output_size, positional_dropout_rate, max_pos_emb_len),
)
elif input_layer == "embed":
self.embed = torch.nn.Sequential(
torch.nn.Embedding(input_size, output_size, padding_idx=padding_idx),
pos_enc_class(output_size, positional_dropout_rate, max_pos_emb_len),
)
elif isinstance(input_layer, torch.nn.Module):
self.embed = torch.nn.Sequential(
input_layer,
pos_enc_class(output_size, positional_dropout_rate, max_pos_emb_len),
)
elif input_layer is None:
self.embed = torch.nn.Sequential(
pos_enc_class(output_size, positional_dropout_rate, max_pos_emb_len)
)
else:
raise ValueError("unknown input_layer: " + input_layer)
self.normalize_before = normalize_before
if positionwise_layer_type == "linear":
positionwise_layer = PositionwiseFeedForward
positionwise_layer_args = (
output_size,
linear_units,
dropout_rate,
activation,
)
elif positionwise_layer_type == "conv1d":
positionwise_layer = MultiLayeredConv1d
positionwise_layer_args = (
output_size,
linear_units,
positionwise_conv_kernel_size,
dropout_rate,
)
elif positionwise_layer_type == "conv1d-linear":
positionwise_layer = Conv1dLinear
positionwise_layer_args = (
output_size,
linear_units,
positionwise_conv_kernel_size,
dropout_rate,
)
else:
raise NotImplementedError("Support only linear or conv1d.")
if selfattention_layer_type == "selfattn":
encoder_selfattn_layer = MultiHeadedAttention
encoder_selfattn_layer_args = (
attention_heads,
output_size,
attention_dropout_rate,
)
elif selfattention_layer_type == "legacy_rel_selfattn":
assert pos_enc_layer_type == "legacy_rel_pos"
encoder_selfattn_layer = LegacyRelPositionMultiHeadedAttention
encoder_selfattn_layer_args = (
attention_heads,
output_size,
attention_dropout_rate,
)
logging.warning(
"Using legacy_rel_selfattn and it will be deprecated in the future."
)
elif selfattention_layer_type == "rel_selfattn":
assert pos_enc_layer_type == "rel_pos"
encoder_selfattn_layer = RelPositionMultiHeadedAttention
encoder_selfattn_layer_args = (
attention_heads,
output_size,
attention_dropout_rate,
zero_triu,
)
else:
raise ValueError("unknown encoder_attn_layer: " + selfattention_layer_type)
convolution_layer = ConvolutionModule
convolution_layer_args = (output_size, cnn_module_kernel, activation)
if isinstance(stochastic_depth_rate, float):
stochastic_depth_rate = [stochastic_depth_rate] * num_blocks
if len(stochastic_depth_rate) != num_blocks:
raise ValueError(
f"Length of stochastic_depth_rate ({len(stochastic_depth_rate)}) "
f"should be equal to num_blocks ({num_blocks})"
)
self.encoders = repeat(
num_blocks,
lambda lnum: EncoderLayer(
output_size,
encoder_selfattn_layer(*encoder_selfattn_layer_args),
positionwise_layer(*positionwise_layer_args),
positionwise_layer(*positionwise_layer_args) if macaron_style else None,
convolution_layer(*convolution_layer_args) if use_cnn_module else None,
dropout_rate,
normalize_before,
concat_after,
stochastic_depth_rate[lnum],
),
layer_drop_rate,
)
if self.normalize_before:
self.after_norm = LayerNorm(output_size)
self.interctc_layer_idx = interctc_layer_idx
if len(interctc_layer_idx) > 0:
assert 0 < min(interctc_layer_idx) and max(interctc_layer_idx) < num_blocks
self.interctc_use_conditioning = interctc_use_conditioning
self.conditioning_layer = None
def output_size(self) -> int:
return self._output_size
def forward(
self,
xs_pad: torch.Tensor,
ilens: torch.Tensor,
prev_states: torch.Tensor = None,
ctc: CTC = None,
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
"""Calculate forward propagation.
Args:
xs_pad (torch.Tensor): Input tensor (#batch, L, input_size).
ilens (torch.Tensor): Input length (#batch).
prev_states (torch.Tensor): Not to be used now.
Returns:
torch.Tensor: Output tensor (#batch, L, output_size).
torch.Tensor: Output length (#batch).
torch.Tensor: Not to be used now.
"""
masks = (~make_pad_mask(ilens)[:, None, :]).to(xs_pad.device)
if (
isinstance(self.embed, Conv2dSubsampling)
or isinstance(self.embed, Conv2dSubsampling1)
or isinstance(self.embed, Conv2dSubsampling2)
or isinstance(self.embed, Conv2dSubsampling6)
or isinstance(self.embed, Conv2dSubsampling8)
):
short_status, limit_size = check_short_utt(self.embed, xs_pad.size(1))
if short_status:
raise TooShortUttError(
f"has {xs_pad.size(1)} frames and is too short for subsampling "
+ f"(it needs more than {limit_size} frames), return empty results",
xs_pad.size(1),
limit_size,
)
xs_pad, masks = self.embed(xs_pad, masks)
else:
xs_pad = self.embed(xs_pad)
intermediate_outs = []
if len(self.interctc_layer_idx) == 0:
xs_pad, masks = self.encoders(xs_pad, masks)
else:
for layer_idx, encoder_layer in enumerate(self.encoders):
xs_pad, masks = encoder_layer(xs_pad, masks)
if layer_idx + 1 in self.interctc_layer_idx:
encoder_out = xs_pad
if isinstance(encoder_out, tuple):
encoder_out = encoder_out[0]
# intermediate outputs are also normalized
if self.normalize_before:
encoder_out = self.after_norm(encoder_out)
intermediate_outs.append((layer_idx + 1, encoder_out))
if self.interctc_use_conditioning:
ctc_out = ctc.softmax(encoder_out)
if isinstance(xs_pad, tuple):
x, pos_emb = xs_pad
x = x + self.conditioning_layer(ctc_out)
xs_pad = (x, pos_emb)
else:
xs_pad = xs_pad + self.conditioning_layer(ctc_out)
if isinstance(xs_pad, tuple):
xs_pad = xs_pad[0]
if self.normalize_before:
xs_pad = self.after_norm(xs_pad)
olens = masks.squeeze(1).sum(1)
if len(intermediate_outs) > 0:
return (xs_pad, intermediate_outs), olens, None
return xs_pad, olens, None
| 15,429 | 39.820106 | 88 | py |
espnet | espnet-master/espnet2/asr/encoder/transformer_encoder_multispkr.py | # Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Encoder definition."""
from typing import Optional, Tuple
import torch
from typeguard import check_argument_types
from espnet2.asr.encoder.abs_encoder import AbsEncoder
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
from espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention
from espnet.nets.pytorch_backend.transformer.embedding import PositionalEncoding
from espnet.nets.pytorch_backend.transformer.encoder_layer import EncoderLayer
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
from espnet.nets.pytorch_backend.transformer.multi_layer_conv import (
Conv1dLinear,
MultiLayeredConv1d,
)
from espnet.nets.pytorch_backend.transformer.positionwise_feed_forward import (
PositionwiseFeedForward,
)
from espnet.nets.pytorch_backend.transformer.repeat import repeat
from espnet.nets.pytorch_backend.transformer.subsampling import (
Conv2dSubsampling,
Conv2dSubsampling1,
Conv2dSubsampling2,
Conv2dSubsampling6,
Conv2dSubsampling8,
TooShortUttError,
check_short_utt,
)
class TransformerEncoder(AbsEncoder):
"""Transformer encoder module.
Args:
input_size: input dim
output_size: dimension of attention
attention_heads: the number of heads of multi head attention
linear_units: the number of units of position-wise feed forward
num_blocks: the number of recognition encoder blocks
num_blocks_sd: the number of speaker dependent encoder blocks
dropout_rate: dropout rate
attention_dropout_rate: dropout rate in attention
positional_dropout_rate: dropout rate after adding positional encoding
input_layer: input layer type
pos_enc_class: PositionalEncoding or ScaledPositionalEncoding
normalize_before: whether to use layer_norm before the first block
concat_after: whether to concat attention layer's input and output
if True, additional linear will be applied.
i.e. x -> x + linear(concat(x, att(x)))
if False, no additional linear will be applied.
i.e. x -> x + att(x)
positionwise_layer_type: linear of conv1d
positionwise_conv_kernel_size: kernel size of positionwise conv1d layer
padding_idx: padding_idx for input_layer=embed
num_inf: number of inference output
"""
def __init__(
self,
input_size: int,
output_size: int = 256,
attention_heads: int = 4,
linear_units: int = 2048,
num_blocks: int = 6,
num_blocks_sd: int = 6,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.1,
attention_dropout_rate: float = 0.0,
input_layer: Optional[str] = "conv2d",
pos_enc_class=PositionalEncoding,
normalize_before: bool = True,
concat_after: bool = False,
positionwise_layer_type: str = "linear",
positionwise_conv_kernel_size: int = 1,
padding_idx: int = -1,
num_inf: int = 1,
):
assert check_argument_types()
super().__init__()
self._output_size = output_size
if input_layer == "linear":
self.embed = torch.nn.Sequential(
torch.nn.Linear(input_size, output_size),
torch.nn.LayerNorm(output_size),
torch.nn.Dropout(dropout_rate),
torch.nn.ReLU(),
pos_enc_class(output_size, positional_dropout_rate),
)
elif input_layer == "conv2d":
self.embed = Conv2dSubsampling(input_size, output_size, dropout_rate)
elif input_layer == "conv2d1":
self.embed = Conv2dSubsampling1(input_size, output_size, dropout_rate)
elif input_layer == "conv2d2":
self.embed = Conv2dSubsampling2(input_size, output_size, dropout_rate)
elif input_layer == "conv2d6":
self.embed = Conv2dSubsampling6(input_size, output_size, dropout_rate)
elif input_layer == "conv2d8":
self.embed = Conv2dSubsampling8(input_size, output_size, dropout_rate)
elif input_layer == "embed":
self.embed = torch.nn.Sequential(
torch.nn.Embedding(input_size, output_size, padding_idx=padding_idx),
pos_enc_class(output_size, positional_dropout_rate),
)
elif input_layer is None:
self.embed = torch.nn.Sequential(
pos_enc_class(output_size, positional_dropout_rate)
)
else:
raise ValueError("unknown input_layer: " + input_layer)
self.normalize_before = normalize_before
if positionwise_layer_type == "linear":
positionwise_layer = PositionwiseFeedForward
positionwise_layer_args = (
output_size,
linear_units,
dropout_rate,
)
elif positionwise_layer_type == "conv1d":
positionwise_layer = MultiLayeredConv1d
positionwise_layer_args = (
output_size,
linear_units,
positionwise_conv_kernel_size,
dropout_rate,
)
elif positionwise_layer_type == "conv1d-linear":
positionwise_layer = Conv1dLinear
positionwise_layer_args = (
output_size,
linear_units,
positionwise_conv_kernel_size,
dropout_rate,
)
else:
raise NotImplementedError("Support only linear or conv1d.")
self.encoders = repeat(
num_blocks,
lambda lnum: EncoderLayer(
output_size,
MultiHeadedAttention(
attention_heads, output_size, attention_dropout_rate
),
positionwise_layer(*positionwise_layer_args),
dropout_rate,
normalize_before,
concat_after,
),
)
if self.normalize_before:
self.after_norm = LayerNorm(output_size)
self.num_inf = num_inf
self.encoders_sd = torch.nn.ModuleList(
[
repeat(
num_blocks_sd,
lambda lnum: EncoderLayer(
output_size,
MultiHeadedAttention(
attention_heads, output_size, attention_dropout_rate
),
positionwise_layer(*positionwise_layer_args),
dropout_rate,
normalize_before,
concat_after,
),
)
for _ in range(num_inf)
]
)
def output_size(self) -> int:
return self._output_size
def forward(
self,
xs_pad: torch.Tensor,
ilens: torch.Tensor,
prev_states: torch.Tensor = None,
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
"""Embed positions in tensor.
Args:
xs_pad: input tensor (B, L, D)
ilens: input length (B)
prev_states: Not to be used now.
Returns:
position embedded tensor and mask
"""
masks = (~make_pad_mask(ilens)[:, None, :]).to(xs_pad.device)
if (
isinstance(self.embed, Conv2dSubsampling)
or isinstance(self.embed, Conv2dSubsampling1)
or isinstance(self.embed, Conv2dSubsampling2)
or isinstance(self.embed, Conv2dSubsampling6)
or isinstance(self.embed, Conv2dSubsampling8)
):
short_status, limit_size = check_short_utt(self.embed, xs_pad.size(1))
if short_status:
raise TooShortUttError(
f"has {xs_pad.size(1)} frames and is too short for subsampling "
+ f"(it needs more than {limit_size} frames), return empty results",
xs_pad.size(1),
limit_size,
)
xs_pad, masks = self.embed(xs_pad, masks)
else:
xs_pad = self.embed(xs_pad)
xs_sd, masks_sd = [None] * self.num_inf, [None] * self.num_inf
for ns in range(self.num_inf):
xs_sd[ns], masks_sd[ns] = self.encoders_sd[ns](xs_pad, masks)
xs_sd[ns], masks_sd[ns] = self.encoders(xs_sd[ns], masks_sd[ns]) # Enc_rec
if self.normalize_before:
xs_sd[ns] = self.after_norm(xs_sd[ns])
olens = [masks_sd[ns].squeeze(1).sum(1) for ns in range(self.num_inf)]
return torch.stack(xs_sd, dim=1), torch.stack(olens, dim=1), None
| 8,820 | 38.030973 | 88 | py |
espnet | espnet-master/espnet2/asr/encoder/transformer_encoder.py | # Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Transformer encoder definition."""
from typing import List, Optional, Tuple
import torch
from typeguard import check_argument_types
from espnet2.asr.ctc import CTC
from espnet2.asr.encoder.abs_encoder import AbsEncoder
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
from espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention
from espnet.nets.pytorch_backend.transformer.embedding import PositionalEncoding
from espnet.nets.pytorch_backend.transformer.encoder_layer import EncoderLayer
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
from espnet.nets.pytorch_backend.transformer.multi_layer_conv import (
Conv1dLinear,
MultiLayeredConv1d,
)
from espnet.nets.pytorch_backend.transformer.positionwise_feed_forward import (
PositionwiseFeedForward,
)
from espnet.nets.pytorch_backend.transformer.repeat import repeat
from espnet.nets.pytorch_backend.transformer.subsampling import (
Conv1dSubsampling2,
Conv2dSubsampling,
Conv2dSubsampling1,
Conv2dSubsampling2,
Conv2dSubsampling6,
Conv2dSubsampling8,
TooShortUttError,
check_short_utt,
)
class TransformerEncoder(AbsEncoder):
"""Transformer encoder module.
Args:
input_size: input dim
output_size: dimension of attention
attention_heads: the number of heads of multi head attention
linear_units: the number of units of position-wise feed forward
num_blocks: the number of decoder blocks
dropout_rate: dropout rate
attention_dropout_rate: dropout rate in attention
positional_dropout_rate: dropout rate after adding positional encoding
input_layer: input layer type
pos_enc_class: PositionalEncoding or ScaledPositionalEncoding
normalize_before: whether to use layer_norm before the first block
concat_after: whether to concat attention layer's input and output
if True, additional linear will be applied.
i.e. x -> x + linear(concat(x, att(x)))
if False, no additional linear will be applied.
i.e. x -> x + att(x)
positionwise_layer_type: linear of conv1d
positionwise_conv_kernel_size: kernel size of positionwise conv1d layer
padding_idx: padding_idx for input_layer=embed
"""
def __init__(
self,
input_size: int,
output_size: int = 256,
attention_heads: int = 4,
linear_units: int = 2048,
num_blocks: int = 6,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.1,
attention_dropout_rate: float = 0.0,
input_layer: Optional[str] = "conv2d",
pos_enc_class=PositionalEncoding,
normalize_before: bool = True,
concat_after: bool = False,
positionwise_layer_type: str = "linear",
positionwise_conv_kernel_size: int = 1,
padding_idx: int = -1,
interctc_layer_idx: List[int] = [],
interctc_use_conditioning: bool = False,
):
assert check_argument_types()
super().__init__()
self._output_size = output_size
if input_layer == "linear":
self.embed = torch.nn.Sequential(
torch.nn.Linear(input_size, output_size),
torch.nn.LayerNorm(output_size),
torch.nn.Dropout(dropout_rate),
torch.nn.ReLU(),
pos_enc_class(output_size, positional_dropout_rate),
)
elif input_layer == "conv1d2":
self.embed = Conv1dSubsampling2(
input_size,
output_size,
dropout_rate,
pos_enc_class(output_size, positional_dropout_rate),
)
elif input_layer == "conv2d":
self.embed = Conv2dSubsampling(input_size, output_size, dropout_rate)
elif input_layer == "conv2d1":
self.embed = Conv2dSubsampling1(input_size, output_size, dropout_rate)
elif input_layer == "conv2d2":
self.embed = Conv2dSubsampling2(input_size, output_size, dropout_rate)
elif input_layer == "conv2d6":
self.embed = Conv2dSubsampling6(input_size, output_size, dropout_rate)
elif input_layer == "conv2d8":
self.embed = Conv2dSubsampling8(input_size, output_size, dropout_rate)
elif input_layer == "embed":
self.embed = torch.nn.Sequential(
torch.nn.Embedding(input_size, output_size, padding_idx=padding_idx),
pos_enc_class(output_size, positional_dropout_rate),
)
elif input_layer is None:
if input_size == output_size:
self.embed = None
else:
self.embed = torch.nn.Linear(input_size, output_size)
else:
raise ValueError("unknown input_layer: " + input_layer)
self.normalize_before = normalize_before
if positionwise_layer_type == "linear":
positionwise_layer = PositionwiseFeedForward
positionwise_layer_args = (
output_size,
linear_units,
dropout_rate,
)
elif positionwise_layer_type == "conv1d":
positionwise_layer = MultiLayeredConv1d
positionwise_layer_args = (
output_size,
linear_units,
positionwise_conv_kernel_size,
dropout_rate,
)
elif positionwise_layer_type == "conv1d-linear":
positionwise_layer = Conv1dLinear
positionwise_layer_args = (
output_size,
linear_units,
positionwise_conv_kernel_size,
dropout_rate,
)
else:
raise NotImplementedError("Support only linear or conv1d.")
self.encoders = repeat(
num_blocks,
lambda lnum: EncoderLayer(
output_size,
MultiHeadedAttention(
attention_heads, output_size, attention_dropout_rate
),
positionwise_layer(*positionwise_layer_args),
dropout_rate,
normalize_before,
concat_after,
),
)
if self.normalize_before:
self.after_norm = LayerNorm(output_size)
self.interctc_layer_idx = interctc_layer_idx
if len(interctc_layer_idx) > 0:
assert 0 < min(interctc_layer_idx) and max(interctc_layer_idx) < num_blocks
self.interctc_use_conditioning = interctc_use_conditioning
self.conditioning_layer = None
def output_size(self) -> int:
return self._output_size
def forward(
self,
xs_pad: torch.Tensor,
ilens: torch.Tensor,
prev_states: torch.Tensor = None,
ctc: CTC = None,
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
"""Embed positions in tensor.
Args:
xs_pad: input tensor (B, L, D)
ilens: input length (B)
prev_states: Not to be used now.
Returns:
position embedded tensor and mask
"""
masks = (~make_pad_mask(ilens)[:, None, :]).to(xs_pad.device)
if self.embed is None:
xs_pad = xs_pad
elif (
isinstance(self.embed, Conv2dSubsampling)
or isinstance(self.embed, Conv1dSubsampling2)
or isinstance(self.embed, Conv2dSubsampling1)
or isinstance(self.embed, Conv2dSubsampling2)
or isinstance(self.embed, Conv2dSubsampling6)
or isinstance(self.embed, Conv2dSubsampling8)
):
short_status, limit_size = check_short_utt(self.embed, xs_pad.size(1))
if short_status:
raise TooShortUttError(
f"has {xs_pad.size(1)} frames and is too short for subsampling "
+ f"(it needs more than {limit_size} frames), return empty results",
xs_pad.size(1),
limit_size,
)
xs_pad, masks = self.embed(xs_pad, masks)
else:
xs_pad = self.embed(xs_pad)
intermediate_outs = []
if len(self.interctc_layer_idx) == 0:
xs_pad, masks = self.encoders(xs_pad, masks)
else:
for layer_idx, encoder_layer in enumerate(self.encoders):
xs_pad, masks = encoder_layer(xs_pad, masks)
if layer_idx + 1 in self.interctc_layer_idx:
encoder_out = xs_pad
# intermediate outputs are also normalized
if self.normalize_before:
encoder_out = self.after_norm(encoder_out)
intermediate_outs.append((layer_idx + 1, encoder_out))
if self.interctc_use_conditioning:
ctc_out = ctc.softmax(encoder_out)
xs_pad = xs_pad + self.conditioning_layer(ctc_out)
if self.normalize_before:
xs_pad = self.after_norm(xs_pad)
olens = masks.squeeze(1).sum(1)
if len(intermediate_outs) > 0:
return (xs_pad, intermediate_outs), olens, None
return xs_pad, olens, None
| 9,402 | 37.855372 | 88 | py |
espnet | espnet-master/espnet2/asr/encoder/vgg_rnn_encoder.py | from typing import Tuple
import numpy as np
import torch
from typeguard import check_argument_types
from espnet2.asr.encoder.abs_encoder import AbsEncoder
from espnet.nets.e2e_asr_common import get_vgg2l_odim
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
from espnet.nets.pytorch_backend.rnn.encoders import RNN, RNNP, VGG2L
class VGGRNNEncoder(AbsEncoder):
"""VGGRNNEncoder class.
Args:
input_size: The number of expected features in the input
bidirectional: If ``True`` becomes a bidirectional LSTM
use_projection: Use projection layer or not
num_layers: Number of recurrent layers
hidden_size: The number of hidden features
output_size: The number of output features
dropout: dropout probability
"""
def __init__(
self,
input_size: int,
rnn_type: str = "lstm",
bidirectional: bool = True,
use_projection: bool = True,
num_layers: int = 4,
hidden_size: int = 320,
output_size: int = 320,
dropout: float = 0.0,
in_channel: int = 1,
):
assert check_argument_types()
super().__init__()
self._output_size = output_size
self.rnn_type = rnn_type
self.bidirectional = bidirectional
self.use_projection = use_projection
if rnn_type not in {"lstm", "gru"}:
raise ValueError(f"Not supported rnn_type={rnn_type}")
# Subsample is not used for VGGRNN
subsample = np.ones(num_layers + 1, dtype=np.int64)
rnn_type = ("b" if bidirectional else "") + rnn_type
if use_projection:
self.enc = torch.nn.ModuleList(
[
VGG2L(in_channel),
RNNP(
get_vgg2l_odim(input_size, in_channel=in_channel),
num_layers,
hidden_size,
output_size,
subsample,
dropout,
typ=rnn_type,
),
]
)
else:
self.enc = torch.nn.ModuleList(
[
VGG2L(in_channel),
RNN(
get_vgg2l_odim(input_size, in_channel=in_channel),
num_layers,
hidden_size,
output_size,
dropout,
typ=rnn_type,
),
]
)
def output_size(self) -> int:
return self._output_size
def forward(
self,
xs_pad: torch.Tensor,
ilens: torch.Tensor,
prev_states: torch.Tensor = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
if prev_states is None:
prev_states = [None] * len(self.enc)
assert len(prev_states) == len(self.enc)
current_states = []
for module, prev_state in zip(self.enc, prev_states):
xs_pad, ilens, states = module(xs_pad, ilens, prev_state=prev_state)
current_states.append(states)
if self.use_projection:
xs_pad.masked_fill_(make_pad_mask(ilens, xs_pad, 1), 0.0)
else:
xs_pad = xs_pad.masked_fill(make_pad_mask(ilens, xs_pad, 1), 0.0)
return xs_pad, ilens, current_states
| 3,415 | 31.533333 | 80 | py |
espnet | espnet-master/espnet2/asr/encoder/abs_encoder.py | from abc import ABC, abstractmethod
from typing import Optional, Tuple
import torch
class AbsEncoder(torch.nn.Module, ABC):
@abstractmethod
def output_size(self) -> int:
raise NotImplementedError
@abstractmethod
def forward(
self,
xs_pad: torch.Tensor,
ilens: torch.Tensor,
prev_states: torch.Tensor = None,
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
raise NotImplementedError
| 470 | 22.55 | 67 | py |
espnet | espnet-master/espnet2/asr/encoder/e_branchformer_encoder.py | # Copyright 2022 Kwangyoun Kim (ASAPP inc.)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""E-Branchformer encoder definition.
Reference:
Kwangyoun Kim, Felix Wu, Yifan Peng, Jing Pan,
Prashant Sridhar, Kyu J. Han, Shinji Watanabe,
"E-Branchformer: Branchformer with Enhanced merging
for speech recognition," in SLT 2022.
"""
import logging
from typing import List, Optional, Tuple
import torch
from typeguard import check_argument_types
from espnet2.asr.ctc import CTC
from espnet2.asr.encoder.abs_encoder import AbsEncoder
from espnet2.asr.layers.cgmlp import ConvolutionalGatingMLP
from espnet2.asr.layers.fastformer import FastSelfAttention
from espnet.nets.pytorch_backend.nets_utils import get_activation, make_pad_mask
from espnet.nets.pytorch_backend.transformer.attention import ( # noqa: H301
LegacyRelPositionMultiHeadedAttention,
MultiHeadedAttention,
RelPositionMultiHeadedAttention,
)
from espnet.nets.pytorch_backend.transformer.embedding import ( # noqa: H301
LegacyRelPositionalEncoding,
PositionalEncoding,
RelPositionalEncoding,
ScaledPositionalEncoding,
)
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
from espnet.nets.pytorch_backend.transformer.positionwise_feed_forward import (
PositionwiseFeedForward,
)
from espnet.nets.pytorch_backend.transformer.repeat import repeat
from espnet.nets.pytorch_backend.transformer.subsampling import (
Conv1dSubsampling2,
Conv1dSubsampling3,
Conv2dSubsampling,
Conv2dSubsampling1,
Conv2dSubsampling2,
Conv2dSubsampling6,
Conv2dSubsampling8,
TooShortUttError,
check_short_utt,
)
class EBranchformerEncoderLayer(torch.nn.Module):
"""E-Branchformer encoder layer module.
Args:
size (int): model dimension
attn: standard self-attention or efficient attention
cgmlp: ConvolutionalGatingMLP
feed_forward: feed-forward module, optional
feed_forward: macaron-style feed-forward module, optional
dropout_rate (float): dropout probability
merge_conv_kernel (int): kernel size of the depth-wise conv in merge module
"""
def __init__(
self,
size: int,
attn: torch.nn.Module,
cgmlp: torch.nn.Module,
feed_forward: Optional[torch.nn.Module],
feed_forward_macaron: Optional[torch.nn.Module],
dropout_rate: float,
merge_conv_kernel: int = 3,
):
super().__init__()
self.size = size
self.attn = attn
self.cgmlp = cgmlp
self.feed_forward = feed_forward
self.feed_forward_macaron = feed_forward_macaron
self.ff_scale = 1.0
if self.feed_forward is not None:
self.norm_ff = LayerNorm(size)
if self.feed_forward_macaron is not None:
self.ff_scale = 0.5
self.norm_ff_macaron = LayerNorm(size)
self.norm_mha = LayerNorm(size) # for the MHA module
self.norm_mlp = LayerNorm(size) # for the MLP module
self.norm_final = LayerNorm(size) # for the final output of the block
self.dropout = torch.nn.Dropout(dropout_rate)
self.depthwise_conv_fusion = torch.nn.Conv1d(
size + size,
size + size,
kernel_size=merge_conv_kernel,
stride=1,
padding=(merge_conv_kernel - 1) // 2,
groups=size + size,
bias=True,
)
self.merge_proj = torch.nn.Linear(size + size, size)
def forward(self, x_input, mask, cache=None):
"""Compute encoded features.
Args:
x_input (Union[Tuple, torch.Tensor]): Input tensor w/ or w/o pos emb.
- w/ pos emb: Tuple of tensors [(#batch, time, size), (1, time, size)].
- w/o pos emb: Tensor (#batch, time, size).
mask (torch.Tensor): Mask tensor for the input (#batch, 1, time).
cache (torch.Tensor): Cache tensor of the input (#batch, time - 1, size).
Returns:
torch.Tensor: Output tensor (#batch, time, size).
torch.Tensor: Mask tensor (#batch, time).
"""
if cache is not None:
raise NotImplementedError("cache is not None, which is not tested")
if isinstance(x_input, tuple):
x, pos_emb = x_input[0], x_input[1]
else:
x, pos_emb = x_input, None
if self.feed_forward_macaron is not None:
residual = x
x = self.norm_ff_macaron(x)
x = residual + self.ff_scale * self.dropout(self.feed_forward_macaron(x))
# Two branches
x1 = x
x2 = x
# Branch 1: multi-headed attention module
x1 = self.norm_mha(x1)
if isinstance(self.attn, FastSelfAttention):
x_att = self.attn(x1, mask)
else:
if pos_emb is not None:
x_att = self.attn(x1, x1, x1, pos_emb, mask)
else:
x_att = self.attn(x1, x1, x1, mask)
x1 = self.dropout(x_att)
# Branch 2: convolutional gating mlp
x2 = self.norm_mlp(x2)
if pos_emb is not None:
x2 = (x2, pos_emb)
x2 = self.cgmlp(x2, mask)
if isinstance(x2, tuple):
x2 = x2[0]
x2 = self.dropout(x2)
# Merge two branches
x_concat = torch.cat([x1, x2], dim=-1)
x_tmp = x_concat.transpose(1, 2)
x_tmp = self.depthwise_conv_fusion(x_tmp)
x_tmp = x_tmp.transpose(1, 2)
x = x + self.dropout(self.merge_proj(x_concat + x_tmp))
if self.feed_forward is not None:
# feed forward module
residual = x
x = self.norm_ff(x)
x = residual + self.ff_scale * self.dropout(self.feed_forward(x))
x = self.norm_final(x)
if pos_emb is not None:
return (x, pos_emb), mask
return x, mask
class EBranchformerEncoder(AbsEncoder):
"""E-Branchformer encoder module."""
def __init__(
self,
input_size: int,
output_size: int = 256,
attention_heads: int = 4,
attention_layer_type: str = "rel_selfattn",
pos_enc_layer_type: str = "rel_pos",
rel_pos_type: str = "latest",
cgmlp_linear_units: int = 2048,
cgmlp_conv_kernel: int = 31,
use_linear_after_conv: bool = False,
gate_activation: str = "identity",
num_blocks: int = 12,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.1,
attention_dropout_rate: float = 0.0,
input_layer: Optional[str] = "conv2d",
zero_triu: bool = False,
padding_idx: int = -1,
layer_drop_rate: float = 0.0,
max_pos_emb_len: int = 5000,
use_ffn: bool = False,
macaron_ffn: bool = False,
ffn_activation_type: str = "swish",
linear_units: int = 2048,
positionwise_layer_type: str = "linear",
merge_conv_kernel: int = 3,
interctc_layer_idx=None,
interctc_use_conditioning: bool = False,
):
assert check_argument_types()
super().__init__()
self._output_size = output_size
if rel_pos_type == "legacy":
if pos_enc_layer_type == "rel_pos":
pos_enc_layer_type = "legacy_rel_pos"
if attention_layer_type == "rel_selfattn":
attention_layer_type = "legacy_rel_selfattn"
elif rel_pos_type == "latest":
assert attention_layer_type != "legacy_rel_selfattn"
assert pos_enc_layer_type != "legacy_rel_pos"
else:
raise ValueError("unknown rel_pos_type: " + rel_pos_type)
if pos_enc_layer_type == "abs_pos":
pos_enc_class = PositionalEncoding
elif pos_enc_layer_type == "scaled_abs_pos":
pos_enc_class = ScaledPositionalEncoding
elif pos_enc_layer_type == "rel_pos":
assert attention_layer_type == "rel_selfattn"
pos_enc_class = RelPositionalEncoding
elif pos_enc_layer_type == "legacy_rel_pos":
assert attention_layer_type == "legacy_rel_selfattn"
pos_enc_class = LegacyRelPositionalEncoding
logging.warning(
"Using legacy_rel_pos and it will be deprecated in the future."
)
else:
raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type)
if input_layer == "linear":
self.embed = torch.nn.Sequential(
torch.nn.Linear(input_size, output_size),
torch.nn.LayerNorm(output_size),
torch.nn.Dropout(dropout_rate),
pos_enc_class(output_size, positional_dropout_rate, max_pos_emb_len),
)
elif input_layer == "conv1d2":
self.embed = Conv1dSubsampling2(
input_size,
output_size,
dropout_rate,
pos_enc_class(output_size, positional_dropout_rate, max_pos_emb_len),
)
elif input_layer == "conv1d3":
self.embed = Conv1dSubsampling3(
input_size,
output_size,
dropout_rate,
pos_enc_class(output_size, positional_dropout_rate, max_pos_emb_len),
)
elif input_layer == "conv2d":
self.embed = Conv2dSubsampling(
input_size,
output_size,
dropout_rate,
pos_enc_class(output_size, positional_dropout_rate, max_pos_emb_len),
)
elif input_layer == "conv2d1":
self.embed = Conv2dSubsampling1(
input_size,
output_size,
dropout_rate,
pos_enc_class(output_size, positional_dropout_rate, max_pos_emb_len),
)
elif input_layer == "conv2d2":
self.embed = Conv2dSubsampling2(
input_size,
output_size,
dropout_rate,
pos_enc_class(output_size, positional_dropout_rate, max_pos_emb_len),
)
elif input_layer == "conv2d6":
self.embed = Conv2dSubsampling6(
input_size,
output_size,
dropout_rate,
pos_enc_class(output_size, positional_dropout_rate, max_pos_emb_len),
)
elif input_layer == "conv2d8":
self.embed = Conv2dSubsampling8(
input_size,
output_size,
dropout_rate,
pos_enc_class(output_size, positional_dropout_rate, max_pos_emb_len),
)
elif input_layer == "embed":
self.embed = torch.nn.Sequential(
torch.nn.Embedding(input_size, output_size, padding_idx=padding_idx),
pos_enc_class(output_size, positional_dropout_rate, max_pos_emb_len),
)
elif isinstance(input_layer, torch.nn.Module):
self.embed = torch.nn.Sequential(
input_layer,
pos_enc_class(output_size, positional_dropout_rate, max_pos_emb_len),
)
elif input_layer is None:
if input_size == output_size:
self.embed = None
else:
self.embed = torch.nn.Linear(input_size, output_size)
else:
raise ValueError("unknown input_layer: " + input_layer)
activation = get_activation(ffn_activation_type)
if positionwise_layer_type == "linear":
positionwise_layer = PositionwiseFeedForward
positionwise_layer_args = (
output_size,
linear_units,
dropout_rate,
activation,
)
elif positionwise_layer_type is None:
logging.warning("no macaron ffn")
else:
raise ValueError("Support only linear.")
if attention_layer_type == "selfattn":
encoder_selfattn_layer = MultiHeadedAttention
encoder_selfattn_layer_args = (
attention_heads,
output_size,
attention_dropout_rate,
)
elif attention_layer_type == "legacy_rel_selfattn":
assert pos_enc_layer_type == "legacy_rel_pos"
encoder_selfattn_layer = LegacyRelPositionMultiHeadedAttention
encoder_selfattn_layer_args = (
attention_heads,
output_size,
attention_dropout_rate,
)
logging.warning(
"Using legacy_rel_selfattn and it will be deprecated in the future."
)
elif attention_layer_type == "rel_selfattn":
assert pos_enc_layer_type == "rel_pos"
encoder_selfattn_layer = RelPositionMultiHeadedAttention
encoder_selfattn_layer_args = (
attention_heads,
output_size,
attention_dropout_rate,
zero_triu,
)
elif attention_layer_type == "fast_selfattn":
assert pos_enc_layer_type in ["abs_pos", "scaled_abs_pos"]
encoder_selfattn_layer = FastSelfAttention
encoder_selfattn_layer_args = (
output_size,
attention_heads,
attention_dropout_rate,
)
else:
raise ValueError("unknown encoder_attn_layer: " + attention_layer_type)
cgmlp_layer = ConvolutionalGatingMLP
cgmlp_layer_args = (
output_size,
cgmlp_linear_units,
cgmlp_conv_kernel,
dropout_rate,
use_linear_after_conv,
gate_activation,
)
self.encoders = repeat(
num_blocks,
lambda lnum: EBranchformerEncoderLayer(
output_size,
encoder_selfattn_layer(*encoder_selfattn_layer_args),
cgmlp_layer(*cgmlp_layer_args),
positionwise_layer(*positionwise_layer_args) if use_ffn else None,
positionwise_layer(*positionwise_layer_args)
if use_ffn and macaron_ffn
else None,
dropout_rate,
merge_conv_kernel,
),
layer_drop_rate,
)
self.after_norm = LayerNorm(output_size)
if interctc_layer_idx is None:
interctc_layer_idx = []
self.interctc_layer_idx = interctc_layer_idx
if len(interctc_layer_idx) > 0:
assert 0 < min(interctc_layer_idx) and max(interctc_layer_idx) < num_blocks
self.interctc_use_conditioning = interctc_use_conditioning
self.conditioning_layer = None
def output_size(self) -> int:
return self._output_size
def forward(
self,
xs_pad: torch.Tensor,
ilens: torch.Tensor,
prev_states: torch.Tensor = None,
ctc: CTC = None,
max_layer: int = None,
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
"""Calculate forward propagation.
Args:
xs_pad (torch.Tensor): Input tensor (#batch, L, input_size).
ilens (torch.Tensor): Input length (#batch).
prev_states (torch.Tensor): Not to be used now.
ctc (CTC): Intermediate CTC module.
max_layer (int): Layer depth below which InterCTC is applied.
Returns:
torch.Tensor: Output tensor (#batch, L, output_size).
torch.Tensor: Output length (#batch).
torch.Tensor: Not to be used now.
"""
masks = (~make_pad_mask(ilens)[:, None, :]).to(xs_pad.device)
if (
isinstance(self.embed, Conv2dSubsampling)
or isinstance(self.embed, Conv1dSubsampling2)
or isinstance(self.embed, Conv1dSubsampling3)
or isinstance(self.embed, Conv2dSubsampling1)
or isinstance(self.embed, Conv2dSubsampling2)
or isinstance(self.embed, Conv2dSubsampling6)
or isinstance(self.embed, Conv2dSubsampling8)
):
short_status, limit_size = check_short_utt(self.embed, xs_pad.size(1))
if short_status:
raise TooShortUttError(
f"has {xs_pad.size(1)} frames and is too short for subsampling "
+ f"(it needs more than {limit_size} frames), return empty results",
xs_pad.size(1),
limit_size,
)
xs_pad, masks = self.embed(xs_pad, masks)
elif self.embed is not None:
xs_pad = self.embed(xs_pad)
intermediate_outs = []
if len(self.interctc_layer_idx) == 0:
if max_layer is not None and 0 <= max_layer < len(self.encoders):
for layer_idx, encoder_layer in enumerate(self.encoders):
xs_pad, masks = encoder_layer(xs_pad, masks)
if layer_idx >= max_layer:
break
else:
xs_pad, masks = self.encoders(xs_pad, masks)
else:
for layer_idx, encoder_layer in enumerate(self.encoders):
xs_pad, masks = encoder_layer(xs_pad, masks)
if layer_idx + 1 in self.interctc_layer_idx:
encoder_out = xs_pad
if isinstance(encoder_out, tuple):
encoder_out = encoder_out[0]
intermediate_outs.append((layer_idx + 1, encoder_out))
if self.interctc_use_conditioning:
ctc_out = ctc.softmax(encoder_out)
if isinstance(xs_pad, tuple):
xs_pad = list(xs_pad)
xs_pad[0] = xs_pad[0] + self.conditioning_layer(ctc_out)
xs_pad = tuple(xs_pad)
else:
xs_pad = xs_pad + self.conditioning_layer(ctc_out)
if isinstance(xs_pad, tuple):
xs_pad = xs_pad[0]
xs_pad = self.after_norm(xs_pad)
olens = masks.squeeze(1).sum(1)
if len(intermediate_outs) > 0:
return (xs_pad, intermediate_outs), olens, None
return xs_pad, olens, None
| 18,321 | 36.014141 | 88 | py |
espnet | espnet-master/espnet2/asr/encoder/wav2vec2_encoder.py | # Copyright 2021 Xuankai Chang
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Encoder definition."""
import contextlib
import copy
import logging
import os
from typing import Optional, Tuple
import torch
from filelock import FileLock
from typeguard import check_argument_types
from espnet2.asr.encoder.abs_encoder import AbsEncoder
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
class FairSeqWav2Vec2Encoder(AbsEncoder):
"""FairSeq Wav2Vec2 encoder module.
Args:
input_size: input dim
output_size: dimension of attention
w2v_url: url to Wav2Vec2.0 pretrained model
w2v_dir_path: directory to download the Wav2Vec2.0 pretrained model.
normalize_before: whether to use layer_norm before the first block
finetune_last_n_layers: last n layers to be finetuned in Wav2Vec2.0
0 means to finetune every layer if freeze_w2v=False.
"""
def __init__(
self,
input_size: int,
w2v_url: str,
w2v_dir_path: str = "./",
output_size: int = 256,
normalize_before: bool = False,
freeze_finetune_updates: int = 0,
):
assert check_argument_types()
super().__init__()
if w2v_url != "":
try:
import fairseq
from fairseq.models.wav2vec.wav2vec2 import Wav2Vec2Model
except Exception as e:
print("Error: FairSeq is not properly installed.")
print(
"Please install FairSeq: cd ${MAIN_ROOT}/tools && make fairseq.done"
)
raise e
self.w2v_model_path = download_w2v(w2v_url, w2v_dir_path)
self._output_size = output_size
models, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[self.w2v_model_path],
arg_overrides={"data": w2v_dir_path},
)
model = models[0]
if not isinstance(model, Wav2Vec2Model):
try:
model = model.w2v_encoder.w2v_model
except Exception as e:
print(
"Error: pretrained models should be within: "
"'Wav2Vec2Model, Wav2VecCTC' classes, etc."
)
raise e
self.encoders = model
self.pretrained_params = copy.deepcopy(model.state_dict())
self.normalize_before = normalize_before
if self.normalize_before:
self.after_norm = LayerNorm(output_size)
if model.cfg.encoder_embed_dim != output_size:
# TODO(xkc09): try LSTM
self.output_layer = torch.nn.Sequential(
torch.nn.Linear(model.cfg.encoder_embed_dim, output_size),
)
else:
self.output_layer = None
self.freeze_finetune_updates = freeze_finetune_updates
self.register_buffer("num_updates", torch.LongTensor([0]))
def output_size(self) -> int:
return self._output_size
def forward(
self,
xs_pad: torch.Tensor,
ilens: torch.Tensor,
prev_states: torch.Tensor = None,
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
"""Forward FairSeqWav2Vec2 Encoder.
Args:
xs_pad: input tensor (B, L, D)
ilens: input length (B)
prev_states: Not to be used now.
Returns:
position embedded tensor and mask
"""
masks = make_pad_mask(ilens).to(xs_pad.device)
ft = self.freeze_finetune_updates <= self.num_updates
if self.num_updates <= self.freeze_finetune_updates:
self.num_updates += 1
elif ft and self.num_updates == self.freeze_finetune_updates + 1:
self.num_updates += 1
logging.info("Start fine-tuning wav2vec parameters!")
with torch.no_grad() if not ft else contextlib.nullcontext():
enc_outputs = self.encoders(
xs_pad,
masks,
mask=self.training,
features_only=True,
)
xs_pad = enc_outputs["x"] # (B,T,C),
bs = xs_pad.shape[0]
if enc_outputs["padding_mask"] is not None:
masks = enc_outputs["padding_mask"] # (B, T)
olens = (~masks).sum(dim=1) # (B)
else:
olens = torch.IntTensor([xs_pad.shape[1]]).repeat(bs).to(xs_pad.device)
if self.output_layer is not None:
xs_pad = self.output_layer(xs_pad)
if self.normalize_before:
xs_pad = self.after_norm(xs_pad)
return xs_pad, olens, None
def reload_pretrained_parameters(self):
self.encoders.load_state_dict(self.pretrained_params)
logging.info("Pretrained Wav2Vec model parameters reloaded!")
def download_w2v(model_url, dir_path):
os.makedirs(dir_path, exist_ok=True)
model_name = model_url.split("/")[-1]
model_path = os.path.join(dir_path, model_name)
dict_url = "https://dl.fbaipublicfiles.com/fairseq/wav2vec/dict.ltr.txt"
dict_path = os.path.join(dir_path, dict_url.split("/")[-1])
with FileLock(model_path + ".lock"):
if not os.path.exists(model_path):
torch.hub.download_url_to_file(model_url, model_path)
torch.hub.download_url_to_file(dict_url, dict_path)
logging.info(f"Wav2Vec model downloaded {model_path}")
else:
logging.info(f"Wav2Vec model {model_path} already exists.")
return model_path
| 5,628 | 32.307692 | 88 | py |
espnet | espnet-master/espnet2/asr/encoder/contextual_block_conformer_encoder.py | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 21 17:27:16 2021.
@author: Keqi Deng (UCAS)
"""
import math
from typing import Optional, Tuple
import torch
from typeguard import check_argument_types
from espnet2.asr.encoder.abs_encoder import AbsEncoder
from espnet.nets.pytorch_backend.conformer.contextual_block_encoder_layer import (
ContextualBlockEncoderLayer,
)
from espnet.nets.pytorch_backend.conformer.convolution import ConvolutionModule
from espnet.nets.pytorch_backend.nets_utils import get_activation, make_pad_mask
from espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention
from espnet.nets.pytorch_backend.transformer.embedding import StreamPositionalEncoding
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
from espnet.nets.pytorch_backend.transformer.multi_layer_conv import (
Conv1dLinear,
MultiLayeredConv1d,
)
from espnet.nets.pytorch_backend.transformer.positionwise_feed_forward import (
PositionwiseFeedForward,
)
from espnet.nets.pytorch_backend.transformer.repeat import repeat
from espnet.nets.pytorch_backend.transformer.subsampling_without_posenc import (
Conv2dSubsamplingWOPosEnc,
)
class ContextualBlockConformerEncoder(AbsEncoder):
"""Contextual Block Conformer encoder module.
Args:
input_size: input dim
output_size: dimension of attention
attention_heads: the number of heads of multi head attention
linear_units: the number of units of position-wise feed forward
num_blocks: the number of decoder blocks
dropout_rate: dropout rate
attention_dropout_rate: dropout rate in attention
positional_dropout_rate: dropout rate after adding positional encoding
input_layer: input layer type
pos_enc_class: PositionalEncoding or ScaledPositionalEncoding
normalize_before: whether to use layer_norm before the first block
concat_after: whether to concat attention layer's input and output
if True, additional linear will be applied.
i.e. x -> x + linear(concat(x, att(x)))
if False, no additional linear will be applied.
i.e. x -> x + att(x)
positionwise_layer_type: linear of conv1d
positionwise_conv_kernel_size: kernel size of positionwise conv1d layer
padding_idx: padding_idx for input_layer=embed
block_size: block size for contextual block processing
hop_Size: hop size for block processing
look_ahead: look-ahead size for block_processing
init_average: whether to use average as initial context (otherwise max values)
ctx_pos_enc: whether to use positional encoding to the context vectors
"""
def __init__(
self,
input_size: int,
output_size: int = 256,
attention_heads: int = 4,
linear_units: int = 2048,
num_blocks: int = 6,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.1,
attention_dropout_rate: float = 0.0,
input_layer: Optional[str] = "conv2d",
normalize_before: bool = True,
concat_after: bool = False,
positionwise_layer_type: str = "linear",
positionwise_conv_kernel_size: int = 3,
macaron_style: bool = False,
pos_enc_class=StreamPositionalEncoding,
selfattention_layer_type: str = "rel_selfattn",
activation_type: str = "swish",
use_cnn_module: bool = True,
cnn_module_kernel: int = 31,
padding_idx: int = -1,
block_size: int = 40,
hop_size: int = 16,
look_ahead: int = 16,
init_average: bool = True,
ctx_pos_enc: bool = True,
):
assert check_argument_types()
super().__init__()
self._output_size = output_size
self.pos_enc = pos_enc_class(output_size, positional_dropout_rate)
activation = get_activation(activation_type)
if input_layer == "linear":
self.embed = torch.nn.Sequential(
torch.nn.Linear(input_size, output_size),
torch.nn.LayerNorm(output_size),
torch.nn.Dropout(dropout_rate),
torch.nn.ReLU(),
)
self.subsample = 1
elif input_layer == "conv2d":
self.embed = Conv2dSubsamplingWOPosEnc(
input_size, output_size, dropout_rate, kernels=[3, 3], strides=[2, 2]
)
self.subsample = 4
elif input_layer == "conv2d6":
self.embed = Conv2dSubsamplingWOPosEnc(
input_size, output_size, dropout_rate, kernels=[3, 5], strides=[2, 3]
)
self.subsample = 6
elif input_layer == "conv2d8":
self.embed = Conv2dSubsamplingWOPosEnc(
input_size,
output_size,
dropout_rate,
kernels=[3, 3, 3],
strides=[2, 2, 2],
)
self.subsample = 8
elif input_layer == "embed":
self.embed = torch.nn.Sequential(
torch.nn.Embedding(input_size, output_size, padding_idx=padding_idx),
)
self.subsample = 1
elif isinstance(input_layer, torch.nn.Module):
self.embed = torch.nn.Sequential(
input_layer,
pos_enc_class(output_size, positional_dropout_rate),
)
self.subsample = 1
elif input_layer is None:
self.embed = torch.nn.Sequential(
pos_enc_class(output_size, positional_dropout_rate)
)
self.subsample = 1
else:
raise ValueError("unknown input_layer: " + input_layer)
self.normalize_before = normalize_before
if positionwise_layer_type == "linear":
positionwise_layer = PositionwiseFeedForward
positionwise_layer_args = (
output_size,
linear_units,
dropout_rate,
)
elif positionwise_layer_type == "conv1d":
positionwise_layer = MultiLayeredConv1d
positionwise_layer_args = (
output_size,
linear_units,
positionwise_conv_kernel_size,
dropout_rate,
)
elif positionwise_layer_type == "conv1d-linear":
positionwise_layer = Conv1dLinear
positionwise_layer_args = (
output_size,
linear_units,
positionwise_conv_kernel_size,
dropout_rate,
)
else:
raise NotImplementedError("Support only linear or conv1d.")
convolution_layer = ConvolutionModule
convolution_layer_args = (output_size, cnn_module_kernel, activation)
self.encoders = repeat(
num_blocks,
lambda lnum: ContextualBlockEncoderLayer(
output_size,
MultiHeadedAttention(
attention_heads, output_size, attention_dropout_rate
),
positionwise_layer(*positionwise_layer_args),
positionwise_layer(*positionwise_layer_args) if macaron_style else None,
convolution_layer(*convolution_layer_args) if use_cnn_module else None,
dropout_rate,
num_blocks,
normalize_before,
concat_after,
),
)
if self.normalize_before:
self.after_norm = LayerNorm(output_size)
# for block processing
self.block_size = block_size
self.hop_size = hop_size
self.look_ahead = look_ahead
self.init_average = init_average
self.ctx_pos_enc = ctx_pos_enc
def output_size(self) -> int:
return self._output_size
def forward(
self,
xs_pad: torch.Tensor,
ilens: torch.Tensor,
prev_states: torch.Tensor = None,
is_final=True,
infer_mode=False,
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
"""Embed positions in tensor.
Args:
xs_pad: input tensor (B, L, D)
ilens: input length (B)
prev_states: Not to be used now.
infer_mode: whether to be used for inference. This is used to
distinguish between forward_train (train and validate) and
forward_infer (decode).
Returns:
position embedded tensor and mask
"""
if self.training or not infer_mode:
return self.forward_train(xs_pad, ilens, prev_states)
else:
return self.forward_infer(xs_pad, ilens, prev_states, is_final)
def forward_train(
self,
xs_pad: torch.Tensor,
ilens: torch.Tensor,
prev_states: torch.Tensor = None,
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
"""Embed positions in tensor.
Args:
xs_pad: input tensor (B, L, D)
ilens: input length (B)
prev_states: Not to be used now.
Returns:
position embedded tensor and mask
"""
masks = (~make_pad_mask(ilens)[:, None, :]).to(xs_pad.device)
if isinstance(self.embed, Conv2dSubsamplingWOPosEnc):
xs_pad, masks = self.embed(xs_pad, masks)
elif self.embed is not None:
xs_pad = self.embed(xs_pad)
# create empty output container
total_frame_num = xs_pad.size(1)
ys_pad = xs_pad.new_zeros(xs_pad.size())
past_size = self.block_size - self.hop_size - self.look_ahead
# block_size could be 0 meaning infinite
# apply usual encoder for short sequence
if self.block_size == 0 or total_frame_num <= self.block_size:
xs_pad, masks, _, _, _, _, _ = self.encoders(
self.pos_enc(xs_pad), masks, False, None, None
)
if self.normalize_before:
xs_pad = self.after_norm(xs_pad)
olens = masks.squeeze(1).sum(1)
return xs_pad, olens, None
# start block processing
cur_hop = 0
block_num = math.ceil(
float(total_frame_num - past_size - self.look_ahead) / float(self.hop_size)
)
bsize = xs_pad.size(0)
addin = xs_pad.new_zeros(
bsize, block_num, xs_pad.size(-1)
) # additional context embedding vecctors
# first step
if self.init_average: # initialize with average value
addin[:, 0, :] = xs_pad.narrow(1, cur_hop, self.block_size).mean(1)
else: # initialize with max value
addin[:, 0, :] = xs_pad.narrow(1, cur_hop, self.block_size).max(1)
cur_hop += self.hop_size
# following steps
while cur_hop + self.block_size < total_frame_num:
if self.init_average: # initialize with average value
addin[:, cur_hop // self.hop_size, :] = xs_pad.narrow(
1, cur_hop, self.block_size
).mean(1)
else: # initialize with max value
addin[:, cur_hop // self.hop_size, :] = xs_pad.narrow(
1, cur_hop, self.block_size
).max(1)
cur_hop += self.hop_size
# last step
if cur_hop < total_frame_num and cur_hop // self.hop_size < block_num:
if self.init_average: # initialize with average value
addin[:, cur_hop // self.hop_size, :] = xs_pad.narrow(
1, cur_hop, total_frame_num - cur_hop
).mean(1)
else: # initialize with max value
addin[:, cur_hop // self.hop_size, :] = xs_pad.narrow(
1, cur_hop, total_frame_num - cur_hop
).max(1)
if self.ctx_pos_enc:
addin = self.pos_enc(addin)
xs_pad = self.pos_enc(xs_pad)
# set up masks
mask_online = xs_pad.new_zeros(
xs_pad.size(0), block_num, self.block_size + 2, self.block_size + 2
)
mask_online.narrow(2, 1, self.block_size + 1).narrow(
3, 0, self.block_size + 1
).fill_(1)
xs_chunk = xs_pad.new_zeros(
bsize, block_num, self.block_size + 2, xs_pad.size(-1)
)
# fill the input
# first step
left_idx = 0
block_idx = 0
xs_chunk[:, block_idx, 1 : self.block_size + 1] = xs_pad.narrow(
-2, left_idx, self.block_size
)
left_idx += self.hop_size
block_idx += 1
# following steps
while left_idx + self.block_size < total_frame_num and block_idx < block_num:
xs_chunk[:, block_idx, 1 : self.block_size + 1] = xs_pad.narrow(
-2, left_idx, self.block_size
)
left_idx += self.hop_size
block_idx += 1
# last steps
last_size = total_frame_num - left_idx
xs_chunk[:, block_idx, 1 : last_size + 1] = xs_pad.narrow(
-2, left_idx, last_size
)
# fill the initial context vector
xs_chunk[:, 0, 0] = addin[:, 0]
xs_chunk[:, 1:, 0] = addin[:, 0 : block_num - 1]
xs_chunk[:, :, self.block_size + 1] = addin
# forward
ys_chunk, mask_online, _, _, _, _, _ = self.encoders(
xs_chunk, mask_online, False, xs_chunk
)
# copy output
# first step
offset = self.block_size - self.look_ahead - self.hop_size + 1
left_idx = 0
block_idx = 0
cur_hop = self.block_size - self.look_ahead
ys_pad[:, left_idx:cur_hop] = ys_chunk[:, block_idx, 1 : cur_hop + 1]
left_idx += self.hop_size
block_idx += 1
# following steps
while left_idx + self.block_size < total_frame_num and block_idx < block_num:
ys_pad[:, cur_hop : cur_hop + self.hop_size] = ys_chunk[
:, block_idx, offset : offset + self.hop_size
]
cur_hop += self.hop_size
left_idx += self.hop_size
block_idx += 1
ys_pad[:, cur_hop:total_frame_num] = ys_chunk[
:, block_idx, offset : last_size + 1, :
]
if self.normalize_before:
ys_pad = self.after_norm(ys_pad)
olens = masks.squeeze(1).sum(1)
return ys_pad, olens, None
def forward_infer(
self,
xs_pad: torch.Tensor,
ilens: torch.Tensor,
prev_states: torch.Tensor = None,
is_final: bool = True,
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
"""Embed positions in tensor.
Args:
xs_pad: input tensor (B, L, D)
ilens: input length (B)
prev_states: Not to be used now.
Returns:
position embedded tensor and mask
"""
if prev_states is None:
prev_addin = None
buffer_before_downsampling = None
ilens_buffer = None
buffer_after_downsampling = None
n_processed_blocks = 0
past_encoder_ctx = None
else:
prev_addin = prev_states["prev_addin"]
buffer_before_downsampling = prev_states["buffer_before_downsampling"]
ilens_buffer = prev_states["ilens_buffer"]
buffer_after_downsampling = prev_states["buffer_after_downsampling"]
n_processed_blocks = prev_states["n_processed_blocks"]
past_encoder_ctx = prev_states["past_encoder_ctx"]
bsize = xs_pad.size(0)
assert bsize == 1
if prev_states is not None:
xs_pad = torch.cat([buffer_before_downsampling, xs_pad], dim=1)
ilens += ilens_buffer
if is_final:
buffer_before_downsampling = None
else:
n_samples = xs_pad.size(1) // self.subsample - 1
if n_samples < 2:
next_states = {
"prev_addin": prev_addin,
"buffer_before_downsampling": xs_pad,
"ilens_buffer": ilens,
"buffer_after_downsampling": buffer_after_downsampling,
"n_processed_blocks": n_processed_blocks,
"past_encoder_ctx": past_encoder_ctx,
}
return (
xs_pad.new_zeros(bsize, 0, self._output_size),
xs_pad.new_zeros(bsize),
next_states,
)
n_res_samples = xs_pad.size(1) % self.subsample + self.subsample * 2
buffer_before_downsampling = xs_pad.narrow(
1, xs_pad.size(1) - n_res_samples, n_res_samples
)
xs_pad = xs_pad.narrow(1, 0, n_samples * self.subsample)
ilens_buffer = ilens.new_full(
[1], dtype=torch.long, fill_value=n_res_samples
)
ilens = ilens.new_full(
[1], dtype=torch.long, fill_value=n_samples * self.subsample
)
if isinstance(self.embed, Conv2dSubsamplingWOPosEnc):
xs_pad, _ = self.embed(xs_pad, None)
elif self.embed is not None:
xs_pad = self.embed(xs_pad)
# create empty output container
if buffer_after_downsampling is not None:
xs_pad = torch.cat([buffer_after_downsampling, xs_pad], dim=1)
total_frame_num = xs_pad.size(1)
if is_final:
past_size = self.block_size - self.hop_size - self.look_ahead
block_num = math.ceil(
float(total_frame_num - past_size - self.look_ahead)
/ float(self.hop_size)
)
buffer_after_downsampling = None
else:
if total_frame_num <= self.block_size:
next_states = {
"prev_addin": prev_addin,
"buffer_before_downsampling": buffer_before_downsampling,
"ilens_buffer": ilens_buffer,
"buffer_after_downsampling": xs_pad,
"n_processed_blocks": n_processed_blocks,
"past_encoder_ctx": past_encoder_ctx,
}
return (
xs_pad.new_zeros(bsize, 0, self._output_size),
xs_pad.new_zeros(bsize),
next_states,
)
overlap_size = self.block_size - self.hop_size
block_num = max(0, xs_pad.size(1) - overlap_size) // self.hop_size
res_frame_num = xs_pad.size(1) - self.hop_size * block_num
buffer_after_downsampling = xs_pad.narrow(
1, xs_pad.size(1) - res_frame_num, res_frame_num
)
xs_pad = xs_pad.narrow(1, 0, block_num * self.hop_size + overlap_size)
# block_size could be 0 meaning infinite
# apply usual encoder for short sequence
assert self.block_size > 0
if n_processed_blocks == 0 and total_frame_num <= self.block_size and is_final:
xs_chunk = self.pos_enc(xs_pad).unsqueeze(1)
xs_pad, _, _, _, _, _, _ = self.encoders(
xs_chunk, None, True, None, None, True
)
xs_pad = xs_pad.squeeze(0)
if self.normalize_before:
xs_pad = self.after_norm(xs_pad)
return xs_pad, None, None
# start block processing
xs_chunk = xs_pad.new_zeros(
bsize, block_num, self.block_size + 2, xs_pad.size(-1)
)
for i in range(block_num):
cur_hop = i * self.hop_size
chunk_length = min(self.block_size, total_frame_num - cur_hop)
addin = xs_pad.narrow(1, cur_hop, chunk_length)
if self.init_average:
addin = addin.mean(1, keepdim=True)
else:
addin = addin.max(1, keepdim=True)
if self.ctx_pos_enc:
addin = self.pos_enc(addin, i + n_processed_blocks)
if prev_addin is None:
prev_addin = addin
xs_chunk[:, i, 0] = prev_addin
xs_chunk[:, i, -1] = addin
chunk = self.pos_enc(
xs_pad.narrow(1, cur_hop, chunk_length),
cur_hop + self.hop_size * n_processed_blocks,
)
xs_chunk[:, i, 1 : chunk_length + 1] = chunk
prev_addin = addin
# mask setup, it should be the same to that of forward_train
mask_online = xs_pad.new_zeros(
xs_pad.size(0), block_num, self.block_size + 2, self.block_size + 2
)
mask_online.narrow(2, 1, self.block_size + 1).narrow(
3, 0, self.block_size + 1
).fill_(1)
ys_chunk, _, _, _, past_encoder_ctx, _, _ = self.encoders(
xs_chunk, mask_online, True, past_encoder_ctx
)
# remove addin
ys_chunk = ys_chunk.narrow(2, 1, self.block_size)
offset = self.block_size - self.look_ahead - self.hop_size
if is_final:
if n_processed_blocks == 0:
y_length = xs_pad.size(1)
else:
y_length = xs_pad.size(1) - offset
else:
y_length = block_num * self.hop_size
if n_processed_blocks == 0:
y_length += offset
ys_pad = xs_pad.new_zeros((xs_pad.size(0), y_length, xs_pad.size(2)))
if n_processed_blocks == 0:
ys_pad[:, 0:offset] = ys_chunk[:, 0, 0:offset]
for i in range(block_num):
cur_hop = i * self.hop_size
if n_processed_blocks == 0:
cur_hop += offset
if i == block_num - 1 and is_final:
chunk_length = min(self.block_size - offset, ys_pad.size(1) - cur_hop)
else:
chunk_length = self.hop_size
ys_pad[:, cur_hop : cur_hop + chunk_length] = ys_chunk[
:, i, offset : offset + chunk_length
]
if self.normalize_before:
ys_pad = self.after_norm(ys_pad)
if is_final:
next_states = None
else:
next_states = {
"prev_addin": prev_addin,
"buffer_before_downsampling": buffer_before_downsampling,
"ilens_buffer": ilens_buffer,
"buffer_after_downsampling": buffer_after_downsampling,
"n_processed_blocks": n_processed_blocks + block_num,
"past_encoder_ctx": past_encoder_ctx,
}
return ys_pad, None, next_states
| 22,601 | 37.243655 | 88 | py |
espnet | espnet-master/espnet2/asr/encoder/branchformer_encoder.py | # Copyright 2022 Yifan Peng (Carnegie Mellon University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Branchformer encoder definition.
Reference:
Yifan Peng, Siddharth Dalmia, Ian Lane, and Shinji Watanabe,
“Branchformer: Parallel MLP-Attention Architectures to Capture
Local and Global Context for Speech Recognition and Understanding,”
in Proceedings of ICML, 2022.
"""
import logging
from typing import List, Optional, Tuple, Union
import numpy
import torch
from typeguard import check_argument_types
from espnet2.asr.encoder.abs_encoder import AbsEncoder
from espnet2.asr.layers.cgmlp import ConvolutionalGatingMLP
from espnet2.asr.layers.fastformer import FastSelfAttention
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
from espnet.nets.pytorch_backend.transformer.attention import ( # noqa: H301
LegacyRelPositionMultiHeadedAttention,
MultiHeadedAttention,
RelPositionMultiHeadedAttention,
)
from espnet.nets.pytorch_backend.transformer.embedding import ( # noqa: H301
LegacyRelPositionalEncoding,
PositionalEncoding,
RelPositionalEncoding,
ScaledPositionalEncoding,
)
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
from espnet.nets.pytorch_backend.transformer.repeat import repeat
from espnet.nets.pytorch_backend.transformer.subsampling import (
Conv2dSubsampling,
Conv2dSubsampling1,
Conv2dSubsampling2,
Conv2dSubsampling6,
Conv2dSubsampling8,
TooShortUttError,
check_short_utt,
)
class BranchformerEncoderLayer(torch.nn.Module):
"""Branchformer encoder layer module.
Args:
size (int): model dimension
attn: standard self-attention or efficient attention, optional
cgmlp: ConvolutionalGatingMLP, optional
dropout_rate (float): dropout probability
merge_method (str): concat, learned_ave, fixed_ave
cgmlp_weight (float): weight of the cgmlp branch, between 0 and 1,
used if merge_method is fixed_ave
attn_branch_drop_rate (float): probability of dropping the attn branch,
used if merge_method is learned_ave
stochastic_depth_rate (float): stochastic depth probability
"""
def __init__(
self,
size: int,
attn: Optional[torch.nn.Module],
cgmlp: Optional[torch.nn.Module],
dropout_rate: float,
merge_method: str,
cgmlp_weight: float = 0.5,
attn_branch_drop_rate: float = 0.0,
stochastic_depth_rate: float = 0.0,
):
super().__init__()
assert (attn is not None) or (
cgmlp is not None
), "At least one branch should be valid"
self.size = size
self.attn = attn
self.cgmlp = cgmlp
self.merge_method = merge_method
self.cgmlp_weight = cgmlp_weight
self.attn_branch_drop_rate = attn_branch_drop_rate
self.stochastic_depth_rate = stochastic_depth_rate
self.use_two_branches = (attn is not None) and (cgmlp is not None)
if attn is not None:
self.norm_mha = LayerNorm(size) # for the MHA module
if cgmlp is not None:
self.norm_mlp = LayerNorm(size) # for the MLP module
self.norm_final = LayerNorm(size) # for the final output of the block
self.dropout = torch.nn.Dropout(dropout_rate)
if self.use_two_branches:
if merge_method == "concat":
self.merge_proj = torch.nn.Linear(size + size, size)
elif merge_method == "learned_ave":
# attention-based pooling for two branches
self.pooling_proj1 = torch.nn.Linear(size, 1)
self.pooling_proj2 = torch.nn.Linear(size, 1)
# linear projections for calculating merging weights
self.weight_proj1 = torch.nn.Linear(size, 1)
self.weight_proj2 = torch.nn.Linear(size, 1)
# linear projection after weighted average
self.merge_proj = torch.nn.Linear(size, size)
elif merge_method == "fixed_ave":
assert (
0.0 <= cgmlp_weight <= 1.0
), "cgmlp weight should be between 0.0 and 1.0"
# remove the other branch if only one branch is used
if cgmlp_weight == 0.0:
self.use_two_branches = False
self.cgmlp = None
self.norm_mlp = None
elif cgmlp_weight == 1.0:
self.use_two_branches = False
self.attn = None
self.norm_mha = None
# linear projection after weighted average
self.merge_proj = torch.nn.Linear(size, size)
else:
raise ValueError(f"unknown merge method: {merge_method}")
else:
self.merge_proj = torch.nn.Identity()
def forward(self, x_input, mask, cache=None):
"""Compute encoded features.
Args:
x_input (Union[Tuple, torch.Tensor]): Input tensor w/ or w/o pos emb.
- w/ pos emb: Tuple of tensors [(#batch, time, size), (1, time, size)].
- w/o pos emb: Tensor (#batch, time, size).
mask (torch.Tensor): Mask tensor for the input (#batch, 1, time).
cache (torch.Tensor): Cache tensor of the input (#batch, time - 1, size).
Returns:
torch.Tensor: Output tensor (#batch, time, size).
torch.Tensor: Mask tensor (#batch, time).
"""
if cache is not None:
raise NotImplementedError("cache is not None, which is not tested")
if isinstance(x_input, tuple):
x, pos_emb = x_input[0], x_input[1]
else:
x, pos_emb = x_input, None
skip_layer = False
# with stochastic depth, residual connection `x + f(x)` becomes
# `x <- x + 1 / (1 - p) * f(x)` at training time.
stoch_layer_coeff = 1.0
if self.training and self.stochastic_depth_rate > 0:
skip_layer = torch.rand(1).item() < self.stochastic_depth_rate
stoch_layer_coeff = 1.0 / (1 - self.stochastic_depth_rate)
if skip_layer:
if cache is not None:
x = torch.cat([cache, x], dim=1)
if pos_emb is not None:
return (x, pos_emb), mask
return x, mask
# Two branches
x1 = x
x2 = x
# Branch 1: multi-headed attention module
if self.attn is not None:
x1 = self.norm_mha(x1)
if isinstance(self.attn, FastSelfAttention):
x_att = self.attn(x1, mask)
else:
if pos_emb is not None:
x_att = self.attn(x1, x1, x1, pos_emb, mask)
else:
x_att = self.attn(x1, x1, x1, mask)
x1 = self.dropout(x_att)
# Branch 2: convolutional gating mlp
if self.cgmlp is not None:
x2 = self.norm_mlp(x2)
if pos_emb is not None:
x2 = (x2, pos_emb)
x2 = self.cgmlp(x2, mask)
if isinstance(x2, tuple):
x2 = x2[0]
x2 = self.dropout(x2)
# Merge two branches
if self.use_two_branches:
if self.merge_method == "concat":
x = x + stoch_layer_coeff * self.dropout(
self.merge_proj(torch.cat([x1, x2], dim=-1))
)
elif self.merge_method == "learned_ave":
if (
self.training
and self.attn_branch_drop_rate > 0
and torch.rand(1).item() < self.attn_branch_drop_rate
):
# Drop the attn branch
w1, w2 = 0.0, 1.0
else:
# branch1
score1 = (
self.pooling_proj1(x1).transpose(1, 2) / self.size**0.5
) # (batch, 1, time)
if mask is not None:
min_value = float(
numpy.finfo(
torch.tensor(0, dtype=score1.dtype).numpy().dtype
).min
)
score1 = score1.masked_fill(mask.eq(0), min_value)
score1 = torch.softmax(score1, dim=-1).masked_fill(
mask.eq(0), 0.0
)
else:
score1 = torch.softmax(score1, dim=-1)
pooled1 = torch.matmul(score1, x1).squeeze(1) # (batch, size)
weight1 = self.weight_proj1(pooled1) # (batch, 1)
# branch2
score2 = (
self.pooling_proj2(x2).transpose(1, 2) / self.size**0.5
) # (batch, 1, time)
if mask is not None:
min_value = float(
numpy.finfo(
torch.tensor(0, dtype=score2.dtype).numpy().dtype
).min
)
score2 = score2.masked_fill(mask.eq(0), min_value)
score2 = torch.softmax(score2, dim=-1).masked_fill(
mask.eq(0), 0.0
)
else:
score2 = torch.softmax(score2, dim=-1)
pooled2 = torch.matmul(score2, x2).squeeze(1) # (batch, size)
weight2 = self.weight_proj2(pooled2) # (batch, 1)
# normalize weights of two branches
merge_weights = torch.softmax(
torch.cat([weight1, weight2], dim=-1), dim=-1
) # (batch, 2)
merge_weights = merge_weights.unsqueeze(-1).unsqueeze(
-1
) # (batch, 2, 1, 1)
w1, w2 = merge_weights[:, 0], merge_weights[:, 1] # (batch, 1, 1)
x = x + stoch_layer_coeff * self.dropout(
self.merge_proj(w1 * x1 + w2 * x2)
)
elif self.merge_method == "fixed_ave":
x = x + stoch_layer_coeff * self.dropout(
self.merge_proj(
(1.0 - self.cgmlp_weight) * x1 + self.cgmlp_weight * x2
)
)
else:
raise RuntimeError(f"unknown merge method: {self.merge_method}")
else:
if self.attn is None:
x = x + stoch_layer_coeff * self.dropout(self.merge_proj(x2))
elif self.cgmlp is None:
x = x + stoch_layer_coeff * self.dropout(self.merge_proj(x1))
else:
# This should not happen
raise RuntimeError("Both branches are not None, which is unexpected.")
x = self.norm_final(x)
if pos_emb is not None:
return (x, pos_emb), mask
return x, mask
class BranchformerEncoder(AbsEncoder):
"""Branchformer encoder module."""
def __init__(
self,
input_size: int,
output_size: int = 256,
use_attn: bool = True,
attention_heads: int = 4,
attention_layer_type: str = "rel_selfattn",
pos_enc_layer_type: str = "rel_pos",
rel_pos_type: str = "latest",
use_cgmlp: bool = True,
cgmlp_linear_units: int = 2048,
cgmlp_conv_kernel: int = 31,
use_linear_after_conv: bool = False,
gate_activation: str = "identity",
merge_method: str = "concat",
cgmlp_weight: Union[float, List[float]] = 0.5,
attn_branch_drop_rate: Union[float, List[float]] = 0.0,
num_blocks: int = 12,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.1,
attention_dropout_rate: float = 0.0,
input_layer: Optional[str] = "conv2d",
zero_triu: bool = False,
padding_idx: int = -1,
stochastic_depth_rate: Union[float, List[float]] = 0.0,
):
assert check_argument_types()
super().__init__()
self._output_size = output_size
if rel_pos_type == "legacy":
if pos_enc_layer_type == "rel_pos":
pos_enc_layer_type = "legacy_rel_pos"
if attention_layer_type == "rel_selfattn":
attention_layer_type = "legacy_rel_selfattn"
elif rel_pos_type == "latest":
assert attention_layer_type != "legacy_rel_selfattn"
assert pos_enc_layer_type != "legacy_rel_pos"
else:
raise ValueError("unknown rel_pos_type: " + rel_pos_type)
if pos_enc_layer_type == "abs_pos":
pos_enc_class = PositionalEncoding
elif pos_enc_layer_type == "scaled_abs_pos":
pos_enc_class = ScaledPositionalEncoding
elif pos_enc_layer_type == "rel_pos":
assert attention_layer_type == "rel_selfattn"
pos_enc_class = RelPositionalEncoding
elif pos_enc_layer_type == "legacy_rel_pos":
assert attention_layer_type == "legacy_rel_selfattn"
pos_enc_class = LegacyRelPositionalEncoding
logging.warning(
"Using legacy_rel_pos and it will be deprecated in the future."
)
else:
raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type)
if input_layer == "linear":
self.embed = torch.nn.Sequential(
torch.nn.Linear(input_size, output_size),
torch.nn.LayerNorm(output_size),
torch.nn.Dropout(dropout_rate),
pos_enc_class(output_size, positional_dropout_rate),
)
elif input_layer == "conv2d":
self.embed = Conv2dSubsampling(
input_size,
output_size,
dropout_rate,
pos_enc_class(output_size, positional_dropout_rate),
)
elif input_layer == "conv2d1":
self.embed = Conv2dSubsampling1(
input_size,
output_size,
dropout_rate,
pos_enc_class(output_size, positional_dropout_rate),
)
elif input_layer == "conv2d2":
self.embed = Conv2dSubsampling2(
input_size,
output_size,
dropout_rate,
pos_enc_class(output_size, positional_dropout_rate),
)
elif input_layer == "conv2d6":
self.embed = Conv2dSubsampling6(
input_size,
output_size,
dropout_rate,
pos_enc_class(output_size, positional_dropout_rate),
)
elif input_layer == "conv2d8":
self.embed = Conv2dSubsampling8(
input_size,
output_size,
dropout_rate,
pos_enc_class(output_size, positional_dropout_rate),
)
elif input_layer == "embed":
self.embed = torch.nn.Sequential(
torch.nn.Embedding(input_size, output_size, padding_idx=padding_idx),
pos_enc_class(output_size, positional_dropout_rate),
)
elif isinstance(input_layer, torch.nn.Module):
self.embed = torch.nn.Sequential(
input_layer,
pos_enc_class(output_size, positional_dropout_rate),
)
elif input_layer is None:
if input_size == output_size:
self.embed = None
else:
self.embed = torch.nn.Linear(input_size, output_size)
else:
raise ValueError("unknown input_layer: " + input_layer)
if attention_layer_type == "selfattn":
encoder_selfattn_layer = MultiHeadedAttention
encoder_selfattn_layer_args = (
attention_heads,
output_size,
attention_dropout_rate,
)
elif attention_layer_type == "legacy_rel_selfattn":
assert pos_enc_layer_type == "legacy_rel_pos"
encoder_selfattn_layer = LegacyRelPositionMultiHeadedAttention
encoder_selfattn_layer_args = (
attention_heads,
output_size,
attention_dropout_rate,
)
logging.warning(
"Using legacy_rel_selfattn and it will be deprecated in the future."
)
elif attention_layer_type == "rel_selfattn":
assert pos_enc_layer_type == "rel_pos"
encoder_selfattn_layer = RelPositionMultiHeadedAttention
encoder_selfattn_layer_args = (
attention_heads,
output_size,
attention_dropout_rate,
zero_triu,
)
elif attention_layer_type == "fast_selfattn":
assert pos_enc_layer_type in ["abs_pos", "scaled_abs_pos"]
encoder_selfattn_layer = FastSelfAttention
encoder_selfattn_layer_args = (
output_size,
attention_heads,
attention_dropout_rate,
)
else:
raise ValueError("unknown encoder_attn_layer: " + attention_layer_type)
cgmlp_layer = ConvolutionalGatingMLP
cgmlp_layer_args = (
output_size,
cgmlp_linear_units,
cgmlp_conv_kernel,
dropout_rate,
use_linear_after_conv,
gate_activation,
)
if isinstance(stochastic_depth_rate, float):
stochastic_depth_rate = [stochastic_depth_rate] * num_blocks
if len(stochastic_depth_rate) != num_blocks:
raise ValueError(
f"Length of stochastic_depth_rate ({len(stochastic_depth_rate)}) "
f"should be equal to num_blocks ({num_blocks})"
)
if isinstance(cgmlp_weight, float):
cgmlp_weight = [cgmlp_weight] * num_blocks
if len(cgmlp_weight) != num_blocks:
raise ValueError(
f"Length of cgmlp_weight ({len(cgmlp_weight)}) should be equal to "
f"num_blocks ({num_blocks})"
)
if isinstance(attn_branch_drop_rate, float):
attn_branch_drop_rate = [attn_branch_drop_rate] * num_blocks
if len(attn_branch_drop_rate) != num_blocks:
raise ValueError(
f"Length of attn_branch_drop_rate ({len(attn_branch_drop_rate)}) "
f"should be equal to num_blocks ({num_blocks})"
)
self.encoders = repeat(
num_blocks,
lambda lnum: BranchformerEncoderLayer(
output_size,
encoder_selfattn_layer(*encoder_selfattn_layer_args)
if use_attn
else None,
cgmlp_layer(*cgmlp_layer_args) if use_cgmlp else None,
dropout_rate,
merge_method,
cgmlp_weight[lnum],
attn_branch_drop_rate[lnum],
stochastic_depth_rate[lnum],
),
)
self.after_norm = LayerNorm(output_size)
def output_size(self) -> int:
return self._output_size
def forward(
self,
xs_pad: torch.Tensor,
ilens: torch.Tensor,
prev_states: torch.Tensor = None,
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
"""Calculate forward propagation.
Args:
xs_pad (torch.Tensor): Input tensor (#batch, L, input_size).
ilens (torch.Tensor): Input length (#batch).
prev_states (torch.Tensor): Not to be used now.
Returns:
torch.Tensor: Output tensor (#batch, L, output_size).
torch.Tensor: Output length (#batch).
torch.Tensor: Not to be used now.
"""
masks = (~make_pad_mask(ilens)[:, None, :]).to(xs_pad.device)
if (
isinstance(self.embed, Conv2dSubsampling)
or isinstance(self.embed, Conv2dSubsampling1)
or isinstance(self.embed, Conv2dSubsampling2)
or isinstance(self.embed, Conv2dSubsampling6)
or isinstance(self.embed, Conv2dSubsampling8)
):
short_status, limit_size = check_short_utt(self.embed, xs_pad.size(1))
if short_status:
raise TooShortUttError(
f"has {xs_pad.size(1)} frames and is too short for subsampling "
+ f"(it needs more than {limit_size} frames), return empty results",
xs_pad.size(1),
limit_size,
)
xs_pad, masks = self.embed(xs_pad, masks)
elif self.embed is not None:
xs_pad = self.embed(xs_pad)
xs_pad, masks = self.encoders(xs_pad, masks)
if isinstance(xs_pad, tuple):
xs_pad = xs_pad[0]
xs_pad = self.after_norm(xs_pad)
olens = masks.squeeze(1).sum(1)
return xs_pad, olens, None
| 21,251 | 37.154399 | 88 | py |
espnet | espnet-master/espnet2/asr/encoder/whisper_encoder.py | import copy
from typing import Optional, Tuple, Union
import torch
import torch.nn.functional as F
from typeguard import check_argument_types
from espnet2.asr.encoder.abs_encoder import AbsEncoder
from espnet2.asr.specaug.specaug import SpecAug
class OpenAIWhisperEncoder(AbsEncoder):
"""Transformer-based Speech Encoder from OpenAI's Whisper Model:
URL: https://github.com/openai/whisper
"""
def __init__(
self,
input_size: int = 1,
dropout_rate: float = 0.0,
whisper_model: str = "small",
download_dir: str = None,
use_specaug: bool = False,
specaug_conf: Union[dict, None] = None,
do_pad_trim: bool = False,
):
try:
import whisper
from whisper.audio import HOP_LENGTH, N_FFT, N_MELS, N_SAMPLES
except Exception as e:
print("Error: whisper is not properly installed.")
print(
"Please install whisper with: cd ${MAIN_ROOT}/tools &&",
"./installers/install_whisper.sh",
)
raise e
assert check_argument_types()
super().__init__()
self.n_fft = N_FFT
self.win_length = N_FFT
self.hop_length = HOP_LENGTH
self.n_mels = N_MELS
self.mel_filters = whisper.audio.mel_filters
# note that originally Whisper doesn't use dropouts
self.dropout = torch.nn.Dropout(dropout_rate)
assert whisper_model in whisper.available_models()
_model = whisper.load_model(whisper_model, download_root=download_dir)
self.encoders = copy.deepcopy(_model.encoder)
self.encoders.train()
del _model
if use_specaug:
self.specaug = SpecAug(**specaug_conf)
else:
self.specaug = None
self.do_pad_trim = do_pad_trim
self.pad_samples = N_SAMPLES
def output_size(self) -> int:
return self.encoders.ln_post.normalized_shape[-1]
def pad_or_trim(
self,
array: torch.Tensor,
length: int,
axis: int = -1,
) -> torch.Tensor:
"""Pad or trim the audio array to N_SAMPLES.
Used in zero-shot inference cases.
"""
if array.shape[axis] > length:
array = array.index_select(
dim=axis, index=torch.arange(length).to(array.device)
)
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes])
return array
def log_mel_spectrogram(
self,
audio: torch.Tensor,
ilens: torch.Tensor = None,
) -> torch.Tensor:
"""Use log-mel spectrogram computation native to Whisper training"""
window = torch.hann_window(self.win_length).to(audio.device)
stft = torch.stft(
audio, self.n_fft, self.hop_length, window=window, return_complex=True
)
# whisper deletes the last frame by default (Shih-Lun)
magnitudes = stft[..., :-1].abs() ** 2
filters = self.mel_filters(audio.device, self.n_mels)
mel_spec = filters @ magnitudes
log_spec = torch.clamp(mel_spec, min=1e-10).log10()
if ilens is not None:
olens = ilens // self.hop_length
else:
olens = None
log_spec = torch.maximum(
log_spec,
log_spec.view(audio.size(0), -1).max(dim=-1)[0][:, None, None] - 8.0,
)
log_spec = (log_spec + 4.0) / 4.0
return log_spec, olens
def whisper_encode(
self,
input: torch.Tensor,
ilens: torch.Tensor = None,
) -> torch.Tensor:
x = F.gelu(self.encoders.conv1(input))
x = F.gelu(self.encoders.conv2(x))
x = x.permute(0, 2, 1)
n_frames = x.size(1)
max_pos = self.encoders.positional_embedding.size(0)
if n_frames <= max_pos:
x = (x + self.encoders.positional_embedding[: x.size(1), :]).to(x.dtype)
else:
# due to positional encoding, audios >30 sec won't be accepted
x = x[:, :max_pos, :] + self.encoders.positional_embedding
x = self.dropout(x)
for layer, block in enumerate(self.encoders.blocks):
x = block(x)
if layer < len(self.encoders.blocks) - 1:
x = self.dropout(x)
x = self.encoders.ln_post(x)
if ilens is not None:
olens = (
1
+ (
ilens
- self.encoders.conv2.kernel_size[0]
+ 2 * self.encoders.conv2.padding[0]
)
// self.encoders.conv2.stride[0]
)
olens = torch.clamp(olens, max=max_pos)
else:
olens = None
return x, olens
def forward(
self,
xs_pad: torch.Tensor,
ilens: torch.Tensor,
prev_states: torch.Tensor = None,
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
if self.do_pad_trim:
xs_pad = self.pad_or_trim(xs_pad, self.pad_samples)
feats, feats_lens = self.log_mel_spectrogram(xs_pad, ilens)
if self.specaug is not None and self.encoders.training:
feats = torch.transpose(feats, 1, 2)
feats, feats_lens = self.specaug(feats, feats_lens)
feats = torch.transpose(feats, 1, 2)
xs_pad, olens = self.whisper_encode(feats, feats_lens)
return xs_pad, olens, None
| 5,639 | 29.486486 | 86 | py |
espnet | espnet-master/espnet2/asr/state_spaces/ff.py | # This code is derived from https://github.com/HazyResearch/state-spaces
"""Implementation of FFN block in the style of Transformers."""
from functools import partial
from torch import nn
from espnet2.asr.state_spaces.base import SequenceModule
from espnet2.asr.state_spaces.components import DropoutNd, LinearActivation
class FF(SequenceModule):
def __init__(
self,
d_input,
expand=2,
d_output=None,
transposed=False,
activation="gelu",
initializer=None,
dropout=0.0,
tie_dropout=False,
):
super().__init__()
self.d_output = d_input if d_output is None else d_output
self.transposed = transposed
d_inner = expand * d_input
linear1 = LinearActivation(
d_input,
d_inner,
transposed=transposed,
activation=activation,
initializer=initializer,
activate=True,
)
dropout_cls = (
partial(DropoutNd, transposed=self.transposed)
if tie_dropout
else nn.Dropout
)
# dropout_cls = nn.Dropout2d if self.transposed else nn.Dropout
drop = dropout_cls(dropout) if dropout > 0.0 else nn.Identity()
linear2 = LinearActivation(
d_inner,
self.d_output,
transposed=transposed,
activation=None,
initializer=initializer,
activate=False,
)
self.ff = nn.Sequential(
linear1,
drop,
linear2,
)
def forward(self, x, *args, **kwargs):
return self.ff(x), None
def step(self, x, state, **kwargs):
# x: [batch, d_input]
if self.transposed:
# expects: [batch, d_input, seq_len]
return self.ff(x.unsqueeze(-1)).squeeze(-1), state
else:
return self.ff(x), state
| 1,923 | 26.098592 | 75 | py |
espnet | espnet-master/espnet2/asr/state_spaces/base.py | # This code is derived from https://github.com/HazyResearch/state-spaces
import functools
from torch import nn
class SequenceModule(nn.Module):
"""Abstract sequence model class.
All models must adhere to this interface
A SequenceModule is generally a model that transforms an input of shape
(n_batch, l_sequence, d_model) to (n_batch, l_sequence, d_output)
REQUIRED methods and attributes
forward, d_model, d_output: controls standard forward pass,
a sequence-to-sequence transformation
__init__ should also satisfy the following interface;
see SequenceIdentity for an example
def __init__(self, d_model, transposed=False, **kwargs)
OPTIONAL methods
default_state, step: allows stepping the model recurrently with a hidden state
state_to_tensor, d_state: allows decoding from hidden state
"""
@property
def d_model(self):
"""Model dimension (generally same as input dimension).
This attribute is required for all SequenceModule instantiations.
It is used by the rest of the pipeline
(e.g. model backbone, encoder) to track the internal shapes of the full model.
"""
if getattr(self, "_d_model", None) is None:
raise NotImplementedError("SequenceModule instantiation must set d_model")
return self._d_model
@d_model.setter
def d_model(self, d):
self._d_model = d
@property
def d_output(self):
"""Output dimension of model.
This attribute is required for all SequenceModule instantiations.
It is used by the rest of the pipeline
(e.g. model backbone, decoder) to track the internal shapes of the full model.
"""
if getattr(self, "_d_output", None) is None:
raise NotImplementedError(
"SequenceModule instantiation must specify d_output for decoder"
)
return self._d_output
@d_output.setter
def d_output(self, d):
self._d_output = d
def forward(self, x, state=None, **kwargs):
"""Forward pass.
A sequence-to-sequence transformation with an optional state.
Generally, this should map a tensor of shape
(batch, length, self.d_model) to (batch, length, self.d_output)
Additionally, it returns a "state" which can be any additional information
For example, RNN and SSM layers may return their hidden state,
while some types of transformer layers
(e.g. Transformer-XL) may want to pass a state as well
"""
return x, None
@property
def state_to_tensor(self):
"""Return a function mapping a state to a single tensor.
This method should be implemented if one wants to use
the hidden state insteadof the output sequence for final prediction.
Currently only used with the StateDecoder.
"""
return lambda _: None
@property
def d_state(self):
"""Return dimension of output of self.state_to_tensor."""
return None
def default_state(self, *batch_shape, device=None):
"""Create initial state for a batch of inputs."""
return None
def step(self, x, state=None, **kwargs):
"""Step the model recurrently for one step of the input sequence.
For example, this should correspond to unrolling an RNN for one step.
If the forward pass has signature (B, L, H1) -> (B, L, H2),
this method should generally have signature
(B, H1) -> (B, H2) with an optional recurrent state.
"""
raise NotImplementedError
def TransposedModule(module):
"""Transpose module.
Wrap a SequenceModule class to accept transposed parameter,
handle state, absorb kwargs
"""
# https://stackoverflow.com/a/65470430/1980685
@functools.wraps(module, updated=())
class TransposedModule(module):
def __init__(self, *args, transposed=False, **kwargs):
super().__init__(*args, **kwargs)
self.transposed = transposed
def forward(self, x, state=None, **kwargs):
if self.transposed:
x = x.transpose(-1, -2)
x, next_state = super().forward(x, state) # Don't use kwarg because nn.LSTM
next_state = None if state is None else next_state
if self.transposed:
x = x.transpose(-1, -2)
return x, next_state
# https://stackoverflow.com/questions/5352781/how-to-set-class-names-dynamically
# TransposedModule.__name__ = module.__name__ # functools wraps is better solution
return TransposedModule
@TransposedModule
class SequenceIdentity(SequenceModule):
"""Simple SequenceModule for testing purposes."""
def __init__(self, d_model, dropout=0.0, **kwargs):
"""Initialize SequenceModule.
d_model: input dimension (sometimes denoted H for hidden dimension)
transposed: if True, inputs have axis ordering (B, H, L) instead of (B, H, L)
"""
super().__init__()
self.d_model = d_model
self.d_output = d_model
def forward(self, x, state=None):
"""Forward pass."""
return x, state
def default_state(self, *batch_shape, device=None):
"""Create initial state for a batch of inputs."""
return None
def step(self, x, state=None, **kwargs):
"""Step the model recurrently for one step of the input sequence."""
return x, state
| 5,487 | 33.3 | 88 | py |
espnet | espnet-master/espnet2/asr/state_spaces/cauchy.py | # This code is derived from https://github.com/HazyResearch/state-spaces
import torch
from cauchy_mult import (
cauchy_mult_bwd,
cauchy_mult_fwd,
cauchy_mult_sym_bwd,
cauchy_mult_sym_fwd,
)
from einops import rearrange
def cauchy_mult_torch(
v: torch.Tensor, z: torch.Tensor, w: torch.Tensor, symmetric=True
) -> torch.Tensor:
"""Compute Cauchy kernel.
v: (B, N)
z: (L)
w: (B, N)
symmetric: whether to assume that v and w contain complex conjugate pairs, of the
form [v_half, v_half.conj()] and [w_half, w_half.conj()]
"""
if not symmetric:
return (
rearrange(v, "b n -> b 1 n")
/ (rearrange(z, "l -> l 1") - rearrange(w, "b n -> b 1 n"))
).sum(dim=-1)
else:
N = v.shape[-1]
assert N % 2 == 0
vv = rearrange(v[:, : N // 2], "b n -> b 1 n")
zz = rearrange(z, "l -> l 1")
ww = rearrange(w[:, : N // 2], "b n -> b 1 n")
return 2 * (
(zz * vv.real - vv.real * ww.real - vv.imag * ww.imag)
/ (zz * zz - 2 * zz * ww.real + ww.abs().square())
).sum(dim=-1)
def cauchy_mult_keops(v, z, w):
from pykeops.torch import LazyTensor
v_l = LazyTensor(rearrange(v, "b N -> b 1 N 1"))
z_l = LazyTensor(rearrange(z, "L -> 1 L 1 1"))
w_l = LazyTensor(rearrange(w, "b N -> b 1 N 1"))
sub = z_l - w_l # (b N L 1), for some reason it doesn't display the last dimension
div = v_l / sub
s = div.sum(dim=2, backend="GPU")
return s.squeeze(-1)
def _cauchy_mult(v, z, w, symmetric=True):
if not symmetric:
return CauchyMultiply.apply(v, z, w)
else:
return CauchyMultiplySymmetric.apply(v, z, w)
def cauchy_mult(v, z, w, symmetric=True):
"""Wrap the cuda method to deal with shapes."""
v, w = torch.broadcast_tensors(v, w)
shape = v.shape
# z_shape = z.shape
z = z.squeeze()
assert len(z.shape) == 1
v = v.contiguous()
w = w.contiguous()
z = z.contiguous()
N = v.size(-1)
assert w.size(-1) == N
y = _cauchy_mult(v.view(-1, N), z, w.view(-1, N), symmetric=symmetric)
y = y.view(*shape[:-1], z.size(-1))
# y = z.new_zeros(*shape[:-1], z.size(-1))
return y
class CauchyMultiply(torch.autograd.Function):
@staticmethod
def forward(ctx, v, z, w):
batch, N = v.shape
# supported_N_values = [1 << log_n for log_n in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]
supported_N_values = [1 << log_n for log_n in [6]]
L = z.shape[-1]
if N not in supported_N_values:
raise NotImplementedError(f"Only support N values in {supported_N_values}")
if L % 32 != 0:
raise NotImplementedError("Only support L values that are multiples of 32")
if not v.is_cuda and z.is_cuda and w.is_cuda:
raise NotImplementedError("Only support CUDA tensors")
ctx.save_for_backward(v, z, w)
return cauchy_mult_fwd(v, z, w)
@staticmethod
def backward(ctx, dout):
v, z, w = ctx.saved_tensors
dv, dw = cauchy_mult_bwd(v, z, w, dout)
return dv, None, dw
class CauchyMultiplySymmetric(torch.autograd.Function):
@staticmethod
def forward(ctx, v, z, w):
batch, N = v.shape
supported_N_values = [1 << log_n for log_n in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]
L = z.shape[-1]
if N not in supported_N_values:
raise NotImplementedError(f"Only support N values in {supported_N_values}")
max_L_value = 32 * 1024 * 64 * 1024
if L > max_L_value:
raise NotImplementedError("Only support L values <= {max_L_value}")
if not v.is_cuda and z.is_cuda and w.is_cuda:
raise NotImplementedError("Only support CUDA tensors")
ctx.save_for_backward(v, z, w)
return cauchy_mult_sym_fwd(v, z, w)
@staticmethod
def backward(ctx, dout):
v, z, w = ctx.saved_tensors
dv, dw = cauchy_mult_sym_bwd(v, z, w, dout)
return dv, None, dw
| 4,022 | 31.443548 | 88 | py |
espnet | espnet-master/espnet2/asr/state_spaces/residual.py | # This code is derived from https://github.com/HazyResearch/state-spaces
"""Implementations of different types of residual functions."""
import torch
from torch import nn
class Residual(nn.Module):
"""Residual connection with constant affine weights.
Can simulate standard residual, no residual, and "constant gates".
"""
def __init__(self, i_layer, d_input, d_model, alpha=1.0, beta=1.0):
# print("ConstantResidual extra kwargs", kwargs)
super().__init__()
assert (d_input == d_model) or alpha == 0.0
self.i_layer = i_layer
self.d_input = d_input
self.d_model = d_model
self.alpha = alpha
self.beta = beta
@property
def d_output(self):
return self.d_model
def forward(self, x, y, transposed):
y = self.beta * y if self.beta != 1.0 else y
return self.alpha * x + y if self.alpha else y
class Affine(Residual):
"""Residual connection with learnable scalar multipliers on the main branch.
scalar: Single scalar multiplier, or one per dimension
scale, power: Initialize to scale * layer_num**(-power)
"""
def __init__(self, *args, scalar=True, gamma=0.0, **kwargs):
# print("ConstantResidual extra kwargs", kwargs)
super().__init__(*args, **kwargs)
self.scalar = scalar
self.gamma = gamma
c = self.beta * self.i_layer ** (-self.gamma)
d = 1 if self.scalar else self.d_input
self.affine = nn.Parameter(c * torch.ones(d))
def forward(self, x, y, transposed):
c = self.affine
if transposed:
c = c.unsqueeze(-1)
return self.alpha * x + c * y
class Feedforward(Residual):
def __init__(self, *args):
# print("Feedforward extra kwargs", kwargs)
super().__init__(*args, alpha=0.0, beta=1.0)
class Highway(Residual):
def __init__(self, *args, scaling_correction=False, elemwise=False):
super().__init__(*args)
self.scaling_correction = 1.732 if scaling_correction else 1.0
self.elemwise = elemwise
self.Wx = nn.Linear(self.d_input, self.d_input)
if self.elemwise:
self.Wy = nn.Parameter(torch.randn(self.d_input))
else:
self.Wy = nn.Linear(self.d_input, self.d_input)
def forward(self, x, y, transposed=False):
if self.elemwise:
y = self.Wy * y
else:
y = self.Wy(y)
r = torch.sigmoid(self.Wx(x) + y)
z = self.scaling_correction * (1.0 - r) * x + r * y
return z
class DecayResidual(Residual):
"""Residual connection that can decay the linear combination depending on depth."""
def __init__(self, *args, power=0.5, l2=True):
# print("DecayResidual extra kwargs", kwargs)
super().__init__(*args)
self.power = power
self.l2 = l2
def forward(self, x, y, transposed):
beta = self.i_layer ** (-self.power)
if self.l2:
alpha = (1.0 - beta**2) ** 0.5
else:
alpha = 1.0 - beta
return alpha * x + beta * y
registry = {
"F": Feedforward,
"N": Feedforward,
"R": Residual,
"H": Highway,
"D": DecayResidual,
"A": Affine,
"none": Feedforward,
"ff": Feedforward,
"feedforward": Feedforward,
"residual": Residual,
"highway": Highway,
"decay": DecayResidual,
"affine": Affine,
}
| 3,422 | 27.764706 | 87 | py |
espnet | espnet-master/espnet2/asr/state_spaces/block.py | # This code is derived from https://github.com/HazyResearch/state-spaces
"""Implements a full residual block around a black box layer.
Configurable options include:
normalization position: prenorm or postnorm
normalization type: batchnorm, layernorm etc.
subsampling/pooling
residual options: feedforward, residual, affine scalars, depth-dependent scaling, etc.
"""
from functools import partial
from torch import nn
import espnet2.asr.state_spaces.utils as utils
from espnet2.asr.state_spaces import registry
from espnet2.asr.state_spaces.base import SequenceModule
from espnet2.asr.state_spaces.components import (
DropoutNd,
Normalization,
StochasticDepth,
)
from espnet2.asr.state_spaces.pool import registry as pool_registry
from espnet2.asr.state_spaces.residual import registry as residual_registry
class SequenceResidualBlock(SequenceModule):
"""Residual block wrapper for black box layer.
The SequenceResidualBlock class implements a generic
(batch, length, d_input) -> (batch, length, d_input) transformation
Args:
d_input: Input feature dimension
i_layer: Layer index, only needs to be passed into certain residuals like Decay
dropout: Dropout for black box module
tie_dropout: Tie dropout mask across sequence like nn.Dropout1d/nn.Dropout2d
transposed: Transpose inputs so each layer receives (batch, dim, length)
layer: Config for black box module
residual: Config for residual function
norm: Config for normalization layer
pool: Config for pooling layer per stage
drop_path: Drop ratio for stochastic depth
"""
def __init__(
self,
d_input,
i_layer=None,
prenorm=True,
dropout=0.0,
tie_dropout=False,
transposed=False,
layer=None,
residual=None,
norm=None,
pool=None,
drop_path=0.0,
):
super().__init__()
self.i_layer = i_layer
self.d_input = d_input
# self.layer = utils.instantiate(registry.layer, layer, d_input)
if layer is None:
layer = {}
self.layer = utils.instantiate(registry.layer, layer, d_input)
self.prenorm = prenorm
self.transposed = transposed
# Residual
# d_residual is the output dimension after residual
if residual is None:
self.residual = None
self.d_residual = self.layer.d_output
else:
self.residual = utils.instantiate(
residual_registry, residual, i_layer, d_input, self.layer.d_output
)
self.d_residual = self.residual.d_output
# Normalization
d_norm = d_input if self.prenorm else self.d_residual
# We don't use config to directly instantiate
# since Normalization has some special cases
if norm is None:
self.norm = None
elif isinstance(norm, str):
self.norm = Normalization(d_norm, transposed=self.transposed, _name_=norm)
else:
self.norm = Normalization(d_norm, transposed=self.transposed, **norm)
# Pool
self.pool = utils.instantiate(
pool_registry, pool, self.d_residual, transposed=self.transposed
)
# Dropout
dropout_cls = (
partial(DropoutNd, transposed=self.transposed)
if tie_dropout
else nn.Dropout
)
self.drop = dropout_cls(dropout) if dropout > 0.0 else nn.Identity()
# Stochastic depth
self.drop_path = (
StochasticDepth(drop_path, mode="row") if drop_path > 0.0 else nn.Identity()
)
@property
def d_output(self):
return self.pool.d_output if self.pool is not None else self.d_residual
@property
def d_state(self):
return self.layer.d_state
@property
def state_to_tensor(self):
return self.layer.state_to_tensor
def default_state(self, *args, **kwargs):
return self.layer.default_state(*args, **kwargs)
def forward(self, x, state=None, **kwargs):
y = x
# Pre-norm
if self.norm is not None and self.prenorm:
y = self.norm(y)
# Black box layer
y, state = self.layer(y, state=state, **kwargs)
# Residual
if self.residual is not None:
y = self.residual(x, self.drop_path(self.drop(y)), self.transposed)
# Post-norm
if self.norm is not None and not self.prenorm:
y = self.norm(y)
# Pool
if self.pool is not None:
y = self.pool(y)
return y, state
def step(self, x, state, **kwargs):
y = x
# Pre-norm
if self.norm is not None and self.prenorm:
y = self.norm.step(y)
# Black box layer
y, state = self.layer.step(y, state, **kwargs)
# Residual
if self.residual is not None:
y = self.residual(
x, y, transposed=False
) # NOTE this would not work with concat residual function (catformer)
# Post-norm
if self.norm is not None and not self.prenorm:
y = self.norm.step(y)
# Pool
if self.pool is not None:
y = self.pool(y)
return y, state
| 5,330 | 29.289773 | 88 | py |
espnet | espnet-master/espnet2/asr/state_spaces/model.py | # This code is derived from https://github.com/HazyResearch/state-spaces
from functools import partial
import torch
import torch.nn as nn
from einops import rearrange
from espnet2.asr.state_spaces.base import SequenceModule
from espnet2.asr.state_spaces.block import SequenceResidualBlock
from espnet2.asr.state_spaces.components import DropoutNd, Normalization
from espnet2.asr.state_spaces.utils import to_dict, to_list
class SequenceModel(SequenceModule):
"""Isotropic deep sequence model backbone, in the style of ResNets / Transformers.
The SequenceModel class implements a generic
(batch, length, d_input) -> (batch, length, d_output) transformation
Args:
d_model: Resize input (useful for deep models with residuals)
n_layers: Number of layers
transposed: Transpose inputs so each layer receives (batch, dim, length)
dropout: Dropout parameter applied on every residual and every layer
tie_dropout: Tie dropout mask across sequence like nn.Dropout1d/nn.Dropout2d
prenorm: Pre-norm vs. post-norm
n_repeat: Each layer is repeated n times per stage before applying pooling
layer: Layer config, must be specified
residual: Residual config
norm: Normalization config (e.g. layer vs batch)
pool: Config for pooling layer per stage
track_norms: Log norms of each layer output
dropinp: Input dropout
drop_path: Stochastic depth for each residual path
"""
def __init__(
self,
d_model,
n_layers=1,
transposed=False,
dropout=0.0,
tie_dropout=False,
prenorm=True,
n_repeat=1,
layer=None,
residual=None,
norm=None,
pool=None,
track_norms=True,
dropinp=0.0,
drop_path=0.0,
):
super().__init__()
# Save arguments needed for forward pass
self.d_model = d_model
self.transposed = transposed
self.track_norms = track_norms
# Input dropout (not really used)
dropout_fn = (
partial(DropoutNd, transposed=self.transposed)
if tie_dropout
else nn.Dropout
)
self.drop = dropout_fn(dropinp) if dropinp > 0.0 else nn.Identity()
layer = to_list(layer, recursive=False)
# Some special arguments are passed into each layer
for _layer in layer:
# If layers don't specify dropout, add it
if _layer.get("dropout", None) is None:
_layer["dropout"] = dropout
# Ensure all layers are shaped the same way
_layer["transposed"] = transposed
# Duplicate layers
layers = layer * n_layers * n_repeat
# Instantiate layers
_layers = []
d = d_model
for i, layer in enumerate(layers):
# Pool at the end of every n_repeat blocks
pool_cfg = pool if (i + 1) % n_repeat == 0 else None
block = SequenceResidualBlock(
d,
i + 1,
prenorm=prenorm,
dropout=dropout,
tie_dropout=tie_dropout,
transposed=transposed,
layer=layer,
residual=residual,
norm=norm,
pool=pool_cfg,
drop_path=drop_path,
)
_layers.append(block)
d = block.d_output
self.d_output = d
self.layers = nn.ModuleList(_layers)
if prenorm:
if norm is None:
self.norm = None
elif isinstance(norm, str):
self.norm = Normalization(
self.d_output, transposed=self.transposed, _name_=norm
)
else:
self.norm = Normalization(
self.d_output, transposed=self.transposed, **norm
)
else:
self.norm = nn.Identity()
def forward(self, inputs, *args, state=None, **kwargs):
# Inputs assumed to be (batch, sequence, dim)
if self.transposed:
inputs = rearrange(inputs, "b ... d -> b d ...")
inputs = self.drop(inputs)
# Track norms
if self.track_norms:
output_norms = [torch.mean(inputs.detach() ** 2)]
# Apply layers
outputs = inputs
prev_states = [None] * len(self.layers) if state is None else state
next_states = []
for layer, prev_state in zip(self.layers, prev_states):
outputs, state = layer(outputs, *args, state=prev_state, **kwargs)
next_states.append(state)
if self.track_norms:
output_norms.append(torch.mean(outputs.detach() ** 2))
if self.norm is not None:
outputs = self.norm(outputs)
if self.transposed:
outputs = rearrange(outputs, "b d ... -> b ... d")
if self.track_norms:
metrics = to_dict(output_norms, recursive=False)
self.metrics = {f"norm/{i}": v for i, v in metrics.items()}
return outputs, next_states
@property
def d_state(self):
d_states = [layer.d_state for layer in self.layers]
return sum([d for d in d_states if d is not None])
@property
def state_to_tensor(self):
# Slightly hacky way to implement this in a curried manner
# (so that the function can be extracted from an instance)
# Somewhat more sound may be to turn this into a
# @staticmethod and grab subclasses using hydra.utils.get_class
def fn(state):
x = [
_layer.state_to_tensor(_state)
for (_layer, _state) in zip(self.layers, state)
]
x = [_x for _x in x if _x is not None]
return torch.cat(x, dim=-1)
return fn
def default_state(self, *batch_shape, device=None):
return [
layer.default_state(*batch_shape, device=device) for layer in self.layers
]
def step(self, x, state, **kwargs):
# Apply layers
prev_states = [None] * len(self.layers) if state is None else state
next_states = []
for layer, prev_state in zip(self.layers, prev_states):
x, state = layer.step(x, state=prev_state, **kwargs)
next_states.append(state)
x = self.norm(x)
return x, next_states
| 6,438 | 33.61828 | 86 | py |
espnet | espnet-master/espnet2/asr/state_spaces/components.py | # This code is derived from https://github.com/HazyResearch/state-spaces
import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from opt_einsum import contract
def stochastic_depth(input: torch.tensor, p: float, mode: str, training: bool = True):
"""Apply stochastic depth.
Implements the Stochastic Depth from `"Deep Networks with Stochastic Depth"
<https://arxiv.org/abs/1603.09382>`_ used for randomly dropping residual
branches of residual architectures.
Args:
input (Tensor[N, ...]): The input tensor or arbitrary dimensions with the first
one being its batch i.e. a batch with ``N`` rows.
p (float): probability of the input to be zeroed.
mode (str): ``"batch"`` or ``"row"``.
``"batch"`` randomly zeroes the entire input, ``"row"`` zeroes
randomly selected rows from the batch.
training: apply stochastic depth if is ``True``. Default: ``True``
Returns:
Tensor[N, ...]: The randomly zeroed tensor.
"""
if p < 0.0 or p > 1.0:
raise ValueError(
"drop probability has to be between 0 and 1, but got {}".format(p)
)
if mode not in ["batch", "row"]:
raise ValueError(
"mode has to be either 'batch' or 'row', but got {}".format(mode)
)
if not training or p == 0.0:
return input
survival_rate = 1.0 - p
if mode == "row":
size = [input.shape[0]] + [1] * (input.ndim - 1)
else:
size = [1] * input.ndim
noise = torch.empty(size, dtype=input.dtype, device=input.device)
noise = noise.bernoulli_(survival_rate).div_(survival_rate)
return input * noise
class StochasticDepth(nn.Module):
"""Stochastic depth module.
See :func:`stochastic_depth`.
"""
def __init__(self, p: float, mode: str) -> None:
# NOTE: need to upgrade to torchvision==0.11.0 to use StochasticDepth directly
# from torchvision.ops import StochasticDepth
super().__init__()
self.p = p
self.mode = mode
def forward(self, input):
return stochastic_depth(input, self.p, self.mode, self.training)
def __repr__(self) -> str:
tmpstr = self.__class__.__name__ + "("
tmpstr += "p=" + str(self.p)
tmpstr += ", mode=" + str(self.mode)
tmpstr += ")"
return tmpstr
class DropoutNd(nn.Module):
def __init__(self, p: float = 0.5, tie=True, transposed=True):
"""Initialize dropout module.
tie: tie dropout mask across sequence lengths (Dropout1d/2d/3d)
"""
super().__init__()
if p < 0 or p >= 1:
raise ValueError(
"dropout probability has to be in [0, 1), " "but got {}".format(p)
)
self.p = p
self.tie = tie
self.transposed = transposed
self.binomial = torch.distributions.binomial.Binomial(probs=1 - self.p)
def forward(self, X):
"""Forward pass.
X: (batch, dim, lengths...)
"""
if self.training:
if not self.transposed:
X = rearrange(X, "b d ... -> b ... d")
# binomial = torch.distributions.binomial.Binomial(
# probs=1-self.p) # This is incredibly slow
mask_shape = X.shape[:2] + (1,) * (X.ndim - 2) if self.tie else X.shape
# mask = self.binomial.sample(mask_shape)
mask = torch.rand(*mask_shape, device=X.device) < 1.0 - self.p
X = X * mask * (1.0 / (1 - self.p))
if not self.transposed:
X = rearrange(X, "b ... d -> b d ...")
return X
return X
def Activation(activation=None, size=None, dim=-1):
if activation in [None, "id", "identity", "linear"]:
return nn.Identity()
elif activation == "tanh":
return nn.Tanh()
elif activation == "relu":
return nn.ReLU()
elif activation == "gelu":
return nn.GELU()
elif activation in ["swish", "silu"]:
return nn.SiLU()
elif activation == "glu":
return nn.GLU(dim=dim)
elif activation == "sigmoid":
return nn.Sigmoid()
elif activation == "sqrelu":
return SquaredReLU()
elif activation == "ln":
return TransposedLN(dim)
else:
raise NotImplementedError(
"hidden activation '{}' is not implemented".format(activation)
)
def get_initializer(name, activation=None):
if activation in [None, "id", "identity", "linear", "modrelu"]:
nonlinearity = "linear"
elif activation in ["relu", "tanh", "sigmoid"]:
nonlinearity = activation
elif activation in ["gelu", "swish", "silu"]:
nonlinearity = "relu" # Close to ReLU so approximate with ReLU's gain
else:
raise NotImplementedError(
f"get_initializer: activation {activation} not supported"
)
if name == "uniform":
initializer = partial(torch.nn.init.kaiming_uniform_, nonlinearity=nonlinearity)
elif name == "normal":
initializer = partial(torch.nn.init.kaiming_normal_, nonlinearity=nonlinearity)
elif name == "xavier":
initializer = torch.nn.init.xavier_normal_
elif name == "zero":
initializer = partial(torch.nn.init.constant_, val=0)
elif name == "one":
initializer = partial(torch.nn.init.constant_, val=1)
else:
raise NotImplementedError(
f"get_initializer: initializer type {name} not supported"
)
return initializer
def LinearActivation(
d_input,
d_output,
bias=True,
zero_bias_init=False,
transposed=False,
initializer=None,
activation=None,
activate=False, # Apply activation as part of this module
weight_norm=False,
**kwargs,
):
"""Return a linear module, initialization, and activation."""
# Construct core module
# linear_cls = partial(nn.Conv1d, kernel_size=1) if transposed else nn.Linear
linear_cls = TransposedLinear if transposed else nn.Linear
if activation == "glu":
d_output *= 2
linear = linear_cls(d_input, d_output, bias=bias, **kwargs)
# Initialize weight
if initializer is not None:
get_initializer(initializer, activation)(linear.weight)
# Initialize bias
if bias and zero_bias_init:
nn.init.zeros_(linear.bias)
# Weight norm
if weight_norm:
linear = nn.utils.weight_norm(linear)
if activate and activation is not None:
activation = Activation(activation, d_output, dim=1 if transposed else -1)
linear = nn.Sequential(linear, activation)
return linear
class SquaredReLU(nn.Module):
def forward(self, x):
return F.relu(x) ** 2
class TransposedLinear(nn.Module):
"""Transposed linear module.
Linear module on the second-to-last dimension
Assumes shape (B, D, L), where L can be 1 or more axis
"""
def __init__(self, d_input, d_output, bias=True):
super().__init__()
self.weight = nn.Parameter(torch.empty(d_output, d_input))
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) # nn.Linear default init
# nn.init.kaiming_uniform_(
# self.weight, nonlinearity='linear') # should be equivalent
if bias:
self.bias = nn.Parameter(torch.empty(d_output))
bound = 1 / math.sqrt(d_input)
nn.init.uniform_(self.bias, -bound, bound)
setattr(self.bias, "_optim", {"weight_decay": 0.0})
else:
self.bias = 0.0
def forward(self, x):
num_axis = len(x.shape[2:]) # num_axis in L, for broadcasting bias
y = contract("b u ..., v u -> b v ...", x, self.weight) + self.bias.view(
-1, *[1] * num_axis
)
return y
class TransposedLN(nn.Module):
"""Transposed LayerNorm module.
LayerNorm module over second dimension
Assumes shape (B, D, L), where L can be 1 or more axis
This is slow and a dedicated CUDA/Triton implementation
shuld provide substantial end-to-end speedup
"""
def __init__(self, d, scalar=True):
super().__init__()
self.scalar = scalar
if self.scalar:
self.m = nn.Parameter(torch.zeros(1))
self.s = nn.Parameter(torch.ones(1))
setattr(self.m, "_optim", {"weight_decay": 0.0})
setattr(self.s, "_optim", {"weight_decay": 0.0})
else:
self.ln = nn.LayerNorm(d)
def forward(self, x):
if self.scalar:
# calc. stats over D dim / channels
s, m = torch.std_mean(x, dim=1, unbiased=False, keepdim=True)
y = (self.s / s) * (x - m + self.m)
else:
# move channel to last axis, apply layer_norm,
# then move channel back to second axis
_x = self.ln(rearrange(x, "b d ... -> b ... d"))
y = rearrange(_x, "b ... d -> b d ...")
return y
class Normalization(nn.Module):
def __init__(
self,
d,
transposed=False, # Length dimension is -1 or -2
_name_="layer",
**kwargs,
):
super().__init__()
self.transposed = transposed
self._name_ = _name_
if _name_ == "layer":
self.channel = True # Normalize over channel dimension
if self.transposed:
self.norm = TransposedLN(d, **kwargs)
else:
self.norm = nn.LayerNorm(d, **kwargs)
elif _name_ == "instance":
self.channel = False
norm_args = {"affine": False, "track_running_stats": False}
norm_args.update(kwargs)
self.norm = nn.InstanceNorm1d(
d, **norm_args
) # (True, True) performs very poorly
elif _name_ == "batch":
self.channel = False
norm_args = {"affine": True, "track_running_stats": True}
norm_args.update(kwargs)
self.norm = nn.BatchNorm1d(d, **norm_args)
elif _name_ == "group":
self.channel = False
self.norm = nn.GroupNorm(1, d, *kwargs)
elif _name_ == "none":
self.channel = True
self.norm = nn.Identity()
else:
raise NotImplementedError
def forward(self, x):
# Handle higher dimension logic
shape = x.shape
if self.transposed:
x = rearrange(x, "b d ... -> b d (...)")
else:
x = rearrange(x, "b ... d -> b (...)d ")
# The cases of LayerNorm / no normalization
# are automatically handled in all cases
# Instance/Batch Norm work automatically with transposed axes
if self.channel or self.transposed:
x = self.norm(x)
else:
x = x.transpose(-1, -2)
x = self.norm(x)
x = x.transpose(-1, -2)
x = x.view(shape)
return x
def step(self, x, **kwargs):
assert self._name_ in ["layer", "instance", "batch", "group", "none"]
if self.transposed:
x = x.unsqueeze(-1)
x = self.forward(x)
if self.transposed:
x = x.squeeze(-1)
return x
class TSNormalization(nn.Module):
def __init__(self, method, horizon):
super().__init__()
self.method = method
self.horizon = horizon
def forward(self, x):
# x must be BLD
if self.method == "mean":
self.scale = x.abs()[:, : -self.horizon].mean(dim=1)[:, None, :]
return x / self.scale
elif self.method == "last":
self.scale = x.abs()[:, -self.horizon - 1][:, None, :]
return x / self.scale
return x
class TSInverseNormalization(nn.Module):
def __init__(self, method, normalizer):
super().__init__()
self.method = method
self.normalizer = normalizer
def forward(self, x):
if self.method == "mean" or self.method == "last":
return x * self.normalizer.scale
return x
class ReversibleInstanceNorm1dInput(nn.Module):
def __init__(self, d, transposed=False):
super().__init__()
# BLD if transpoed is False, otherwise BDL
self.transposed = transposed
self.norm = nn.InstanceNorm1d(d, affine=True, track_running_stats=False)
def forward(self, x):
# Means, stds
if not self.transposed:
x = x.transpose(-1, -2)
self.s, self.m = torch.std_mean(x, dim=-1, unbiased=False, keepdim=True)
self.s += 1e-4
x = (x - self.m) / self.s
# x = self.norm.weight.unsqueeze(-1) * x + self.norm.bias.unsqueeze(-1)
if not self.transposed:
return x.transpose(-1, -2)
return x
class ReversibleInstanceNorm1dOutput(nn.Module):
def __init__(self, norm_input):
super().__init__()
self.transposed = norm_input.transposed
self.weight = norm_input.norm.weight
self.bias = norm_input.norm.bias
self.norm_input = norm_input
def forward(self, x):
if not self.transposed:
x = x.transpose(-1, -2)
# x = (x - self.bias.unsqueeze(-1))/self.weight.unsqueeze(-1)
x = x * self.norm_input.s + self.norm_input.m
if not self.transposed:
return x.transpose(-1, -2)
return x
| 13,449 | 31.177033 | 88 | py |
espnet | espnet-master/espnet2/asr/state_spaces/attention.py | """Multi-Head Attention layer definition."""
import math
import numpy
import torch
from torch import nn
from espnet2.asr.state_spaces.base import SequenceModule
class MultiHeadedAttention(SequenceModule):
"""Multi-Head Attention layer inheriting SequenceModule.
Comparing default MHA module in ESPnet, this module returns additional dummy state
and has step function for autoregressive inference.
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
"""
def __init__(self, n_feat, n_head, dropout=0.0, transposed=False, **kwargs):
"""Construct an MultiHeadedAttention object."""
super().__init__()
assert n_feat % n_head == 0
# We assume d_v always equals d_k
self.d_k = n_feat // n_head
self.h = n_head
self.linear_q = nn.Linear(n_feat, n_feat)
self.linear_k = nn.Linear(n_feat, n_feat)
self.linear_v = nn.Linear(n_feat, n_feat)
self.linear_out = nn.Linear(n_feat, n_feat)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
self.d_output = n_feat
def forward_qkv(self, query, key, value):
"""Transform query, key and value.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
Returns:
torch.Tensor: Transformed query tensor (#batch, n_head, time1, d_k).
torch.Tensor: Transformed key tensor (#batch, n_head, time2, d_k).
torch.Tensor: Transformed value tensor (#batch, n_head, time2, d_k).
"""
n_batch = query.size(0)
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
q = q.transpose(1, 2) # (batch, head, time1, d_k)
k = k.transpose(1, 2) # (batch, head, time2, d_k)
v = v.transpose(1, 2) # (batch, head, time2, d_k)
return q, k, v
def forward_attention(self, value, scores, mask):
"""Compute attention context vector.
Args:
value (torch.Tensor): Transformed value (#batch, n_head, time2, d_k).
scores (torch.Tensor): Attention score (#batch, n_head, time1, time2).
mask (torch.Tensor): Mask (#batch, 1, time2) or (#batch, time1, time2).
Returns:
torch.Tensor: Transformed value (#batch, time1, d_model)
weighted by the attention score (#batch, time1, time2).
"""
n_batch = value.size(0)
if mask is not None:
mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)
min_value = float(
numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min
)
scores = scores.masked_fill(mask, min_value)
self.attn = torch.softmax(scores, dim=-1).masked_fill(
mask, 0.0
) # (batch, head, time1, time2)
else:
self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
p_attn = self.dropout(self.attn)
x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)
x = (
x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
) # (batch, time1, d_model)
return self.linear_out(x) # (batch, time1, d_model)
def forward(self, query, memory=None, mask=None, *args, **kwargs):
"""Compute scaled dot product attention.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
torch.Tensor: Output tensor (#batch, time1, d_model).
"""
# self-attention
if memory is None:
memory = query
q, k, v = self.forward_qkv(query=query, key=memory, value=memory)
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
return self.forward_attention(v, scores, mask), None
def step(self, query, state, memory=None, mask=None, **kwargs):
if memory is None:
memory = query
return self.forward(query, memory, mask=mask, **kwargs)[0].squeeze(1), state
| 4,629 | 36.642276 | 86 | py |
espnet | espnet-master/espnet2/asr/state_spaces/pool.py | # This code is derived from https://github.com/HazyResearch/state-spaces
"""Implements downsampling and upsampling on sequences."""
import torch
import torch.nn.functional as F
from einops import rearrange, reduce, repeat
from torch import nn
from espnet2.asr.state_spaces.base import SequenceModule
from espnet2.asr.state_spaces.components import LinearActivation
"""Simple pooling functions that just downsample or repeat
stride: Subsample on the layer dimension
expand: Repeat on the feature dimension
"""
def downsample(x, stride=1, expand=1, transposed=False):
if x is None:
return None
if stride > 1:
assert x.ndim == 3, (
"Downsampling with higher-dimensional inputs is currently not supported."
"It is recommended to use average or spectral pooling instead."
)
if transposed:
x = x[..., 0::stride]
else:
x = x[..., 0::stride, :]
if expand > 1:
if transposed:
x = repeat(x, "b d ... -> b (d e) ...", e=expand)
else:
x = repeat(x, "b ... d -> b ... (d e)", e=expand)
return x
def upsample(x, stride=1, expand=1, transposed=False):
if x is None:
return None
if expand > 1:
if transposed:
x = reduce(x, "... (d e) l -> ... d l", "mean", e=expand)
else:
x = reduce(x, "... (d e) -> ... d", "mean", e=expand)
if stride > 1:
if transposed:
x = repeat(x, "... l -> ... (l e)", e=stride)
else:
x = repeat(x, "... l d -> ... (l e) d", e=stride)
return x
class DownSample(SequenceModule):
def __init__(self, d_input, stride=1, expand=1, transposed=True):
super().__init__()
self.d_input = d_input
self.stride = stride
self.expand = expand
self.transposed = transposed
def forward(self, x):
return downsample(x, self.stride, self.expand, False, self.transposed)
def step(self, x, state, **kwargs):
if self.stride > 1 or self.expand > 1:
raise NotImplementedError
return x, state
@property
def d_output(self):
return self.d_input * self.expand
class DownAvgPool(SequenceModule):
def __init__(self, d_input, stride=1, expand=1, transposed=True):
super().__init__()
self.d_input = d_input
self.stride = stride
self.expand = expand
self.transposed = transposed
def forward(self, x):
if not self.transposed:
x = rearrange(x, "b ... d -> b d ...")
if self.stride > 1:
# einops appears slower than F
if x.ndim == 3:
x = F.avg_pool1d(x, self.stride, self.stride)
elif x.ndim == 4:
x = F.avg_pool2d(x, self.stride, self.stride)
else:
# Reduction string e.g. "b d (l1 2) (l2 2) -> b d l1 l2"
reduce_str = (
"b d "
+ " ".join([f"(l{i} {self.stride})" for i in range(x.ndim - 2)])
+ " -> b d "
+ " ".join([f"l{i}" for i in range(x.ndim - 2)])
)
x = reduce(x, reduce_str, "mean")
if self.expand > 1:
x = repeat(x, "b d ... -> b (d e) ...", e=self.expand)
if not self.transposed:
x = rearrange(x, "b d ... -> b ... d")
return x
def step(self, x, state, **kwargs):
if self.stride > 1 or self.expand > 1:
raise NotImplementedError
return x, state
@property
def d_output(self):
return self.d_input * self.expand
class DownSpectralPool(SequenceModule):
def __init__(self, d_input, stride=1, expand=1, transposed=True):
super().__init__()
self.d_input = d_input
self.stride = stride
self.expand = expand
self.transposed = transposed
def forward(self, x):
"""Forward pass.
x: (B, L..., D)
"""
if not self.transposed:
x = rearrange(x, "b ... d -> b d ...")
shape = x.shape[2:]
x_f = torch.fft.ifftn(x, s=shape)
for axis, l in enumerate(shape):
assert l % self.stride == 0, "input length must be divisible by stride"
new_l = l // self.stride
idx = torch.cat(
[torch.arange(0, new_l - new_l // 2), l + torch.arange(-new_l // 2, 0)]
).to(x_f.device)
x_f = torch.index_select(x_f, 2 + axis, idx)
x = torch.fft.ifftn(x_f, s=[length // self.stride for length in shape])
x = x.real
if self.expand > 1:
x = repeat(x, "b d ... -> b (d e) ...", e=self.expand)
if not self.transposed:
x = rearrange(x, "b d ... -> b ... d")
return x
def step(self, x, state, **kwargs):
if self.stride > 1 or self.expand > 1:
raise NotImplementedError
return x, state
@property
def d_output(self):
return self.d_input * self.expand
class UpSample(nn.Module):
def __init__(self, d_input, stride=1, expand=1, transposed=True):
super().__init__()
self.d_input = d_input
self.stride = stride
self.expand = expand
self.transposed = transposed
def forward(self, x):
return upsample(x, self.stride, self.expand, self.transposed)
@property
def d_output(self):
return self.d_input // self.expand
def step(self, x, state, **kwargs):
if self.stride > 1 or self.expand > 1:
raise NotImplementedError
return x, state
""" Pooling functions with trainable parameters """
# For the flexible backbone SequenceModel
class DownLinearPool(SequenceModule):
def __init__(self, d_input, stride=1, expand=1, transposed=True):
super().__init__()
self.d_input = d_input
self.stride = stride
self.expand = expand
self.transposed = transposed
self.linear = LinearActivation(
d_input * stride,
d_input * expand,
transposed=transposed,
)
def forward(self, x):
if self.transposed:
x = rearrange(x, "... h (l s) -> ... (h s) l", s=self.stride)
else:
x = rearrange(x, "... (l s) h -> ... l (h s)", s=self.stride)
x = self.linear(x)
return x
def step(self, x, state, **kwargs):
if self.stride > 1 or self.expand > 1:
raise NotImplementedError
return x, state
@property
def d_output(self):
return self.d_input * self.expand
""" Pooling functions with trainable parameters """
class DownPool2d(SequenceModule):
def __init__(self, d_input, d_output, stride=1, transposed=True, weight_norm=True):
super().__init__()
self.linear = LinearActivation(
d_input,
d_output,
transposed=transposed,
weight_norm=weight_norm,
)
self.pool = (nn.AvgPool2d(kernel_size=stride, stride=stride),)
def forward(self, x):
if self.transposed:
x = self.pool(x)
# DownLinearPool is used by the registry (for isotropic backbone)
# DownPool is essentially the same as DownLinearPool. These should be consolidated
class DownPool(SequenceModule):
def __init__(
self,
d_input,
d_output=None,
expand=None,
stride=1,
transposed=True,
weight_norm=True,
initializer=None,
activation=None,
):
super().__init__()
assert (d_output is None) + (expand is None) == 1
if d_output is None:
d_output = d_input * expand
self.d_output = d_output
self.stride = stride
self.transposed = transposed
self.linear = LinearActivation(
d_input * stride,
d_output,
transposed=transposed,
initializer=initializer,
weight_norm=weight_norm,
activation=activation,
activate=True if activation is not None else False,
)
def forward(self, x):
if self.transposed:
x = rearrange(x, "... h (l s) -> ... (h s) l", s=self.stride)
else:
x = rearrange(x, "... (l s) h -> ... l (h s)", s=self.stride)
x = self.linear(x)
return x, None
def step(self, x, state, **kwargs):
"""Step one time step as a recurrent model.
x: (..., H)
"""
if x is None:
return None, state
state.append(x)
if len(state) == self.stride:
x = rearrange(torch.stack(state, dim=-1), "... h s -> ... (h s)")
if self.transposed:
x = x.unsqueeze(-1)
x = self.linear(x)
if self.transposed:
x = x.squeeze(-1)
return x, []
else:
return None, state
def default_state(self, *batch_shape, device=None):
return []
class UpPool(SequenceModule):
def __init__(
self,
d_input,
d_output,
stride,
transposed=True,
weight_norm=True,
initializer=None,
activation=None,
):
super().__init__()
self.d_input = d_input
self._d_output = d_output
self.stride = stride
self.transposed = transposed
self.linear = LinearActivation(
d_input,
d_output * stride,
transposed=transposed,
initializer=initializer,
weight_norm=weight_norm,
activation=activation,
activate=True if activation is not None else False,
)
def forward(self, x, skip=None):
x = self.linear(x)
if self.transposed:
x = F.pad(x[..., :-1], (1, 0)) # Shift to ensure causality
x = rearrange(x, "... (h s) l -> ... h (l s)", s=self.stride)
else:
x = F.pad(x[..., :-1, :], (0, 0, 1, 0)) # Shift to ensure causality
x = rearrange(x, "... l (h s) -> ... (l s) h", s=self.stride)
if skip is not None:
x = x + skip
return x, None
def step(self, x, state, **kwargs):
"""Step one time step as a recurrent model.
x: (..., H)
"""
assert len(state) > 0
y, state = state[0], state[1:]
if len(state) == 0:
assert x is not None
if self.transposed:
x = x.unsqueeze(-1)
x = self.linear(x)
if self.transposed:
x = x.squeeze(-1)
x = rearrange(x, "... (h s) -> ... h s", s=self.stride)
state = list(torch.unbind(x, dim=-1))
else:
assert x is None
return y, state
def default_state(self, *batch_shape, device=None):
state = torch.zeros(
batch_shape + (self.d_output, self.stride), device=device
) # (batch, h, s)
state = list(torch.unbind(state, dim=-1)) # List of (..., H)
return state
@property
def d_output(self):
return self._d_output
registry = {
"sample": DownSample,
"pool": DownAvgPool,
"linear": DownLinearPool,
"spectral": DownSpectralPool,
}
| 11,292 | 28.030848 | 87 | py |
espnet | espnet-master/espnet2/asr/state_spaces/s4.py | # This code is derived from https://github.com/HazyResearch/state-spaces
"""Standalone version of Structured (Sequence) State Space (S4) model."""
import logging
import math
import os
from functools import wraps
# from pytorch_lightning.utilities import rank_zero_only
from typing import Any, Callable, Optional
import numpy as np
import opt_einsum as oe
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
from espnet2.asr.state_spaces.components import Activation, DropoutNd, LinearActivation
contract = oe.contract
contract_expression = oe.contract_expression
def rank_zero_only(fn: Callable) -> Callable:
"""Decorator function from PyTorch Lightning.
Function that can be used as a decorator
to enable a function/method being called only on global rank 0.
"""
@wraps(fn)
def wrapped_fn(*args: Any, **kwargs: Any) -> Optional[Any]:
if rank_zero_only.rank == 0:
return fn(*args, **kwargs)
return None
return wrapped_fn
def _get_rank() -> int:
# SLURM_PROCID can be set even if SLURM is not managing the multiprocessing,
# therefore LOCAL_RANK needs to be checked first
rank_keys = ("RANK", "LOCAL_RANK", "SLURM_PROCID", "JSM_NAMESPACE_RANK")
for key in rank_keys:
rank = os.environ.get(key)
if rank is not None:
return int(rank)
return 0
# add the attribute to the function but don't overwrite
# in case Trainer has already set it
rank_zero_only.rank = getattr(rank_zero_only, "rank", _get_rank())
def get_logger(name=__name__, level=logging.INFO) -> logging.Logger:
"""Initialize multi-GPU-friendly python logger."""
logger = logging.getLogger(name)
logger.setLevel(level)
# this ensures all logging levels get marked with the rank zero decorator
# otherwise logs would get multiplied for each GPU process in multi-GPU setup
for level in (
"debug",
"info",
"warning",
"error",
"exception",
"fatal",
"critical",
):
setattr(logger, level, rank_zero_only(getattr(logger, level)))
return logger
log = get_logger(__name__)
""" Cauchy and Vandermonde kernels """
try: # Try CUDA extension
from .cauchy import cauchy_mult
has_cauchy_extension = True
except ImportError:
log.warning(
"CUDA extension for cauchy multiplication not found."
" Please install it via `cd /path/to/espnet/tools && . ./activate_python.sh"
" && ./installers/install_cauchy_mult.sh`."
" This should speed up end-to-end training by 10-50%"
)
has_cauchy_extension = False
try: # Try pykeops
import pykeops # noqa
from pykeops.torch import Genred
has_pykeops = True
log.info("Pykeops installation found.")
def _broadcast_dims(*tensors):
max_dim = max([len(tensor.shape) for tensor in tensors])
tensors = [
tensor.view((1,) * (max_dim - len(tensor.shape)) + tensor.shape)
for tensor in tensors
]
return tensors
def cauchy_conj(v, z, w):
"""Pykeops version."""
expr_num = "z * ComplexReal(v) - Real2Complex(Sum(v * w))"
expr_denom = "ComplexMult(z-w, z-Conj(w))"
cauchy_mult = Genred(
f"ComplexDivide({expr_num}, {expr_denom})",
[
"v = Vj(2)",
"z = Vi(2)",
"w = Vj(2)",
],
reduction_op="Sum",
axis=1,
)
v, z, w = _broadcast_dims(v, z, w)
v = _c2r(v)
z = _c2r(z)
w = _c2r(w)
r = 2 * cauchy_mult(v, z, w, backend="GPU")
return _r2c(r)
def log_vandermonde(v, x, L):
expr = "ComplexMult(v, ComplexExp(ComplexMult(x, l)))"
vandermonde_mult = Genred(
expr,
[
"v = Vj(2)",
"x = Vj(2)",
"l = Vi(2)",
],
reduction_op="Sum",
axis=1,
)
length = torch.arange(L).to(x)
v, x, length = _broadcast_dims(v, x, length)
v = _c2r(v)
x = _c2r(x)
length = _c2r(length)
r = vandermonde_mult(v, x, length, backend="GPU")
return 2 * _r2c(r).real
def log_vandermonde_transpose(u, v, x, L):
"""Compute Vandermonde product.
u: ... H L
v: ... H N
x: ... H N
Returns: ... H N
V = Vandermonde(a, L) : (H N L)
contract_L(V * u * v)
"""
expr = "ComplexMult(ComplexMult(v, u), ComplexExp(ComplexMult(x, l)))"
vandermonde_mult = Genred(
expr,
[
"u = Vj(2)",
"v = Vi(2)",
"x = Vi(2)",
"l = Vj(2)",
],
reduction_op="Sum",
axis=1,
)
length = torch.arange(L).to(x)
u, v, x, length = _broadcast_dims(u, v, x, length)
u = _c2r(u)
v = _c2r(v)
x = _c2r(x)
length = _c2r(length)
r = vandermonde_mult(u, v, x, length, backend="GPU")
return _r2c(r)
except ImportError:
has_pykeops = False
if not has_cauchy_extension:
log.warning(
"Falling back on slow Cauchy kernel. "
"Install at least one of pykeops or the CUDA extension for efficiency."
)
def cauchy_naive(v, z, w):
"""Naive version.
v, w: (..., N)
z: (..., L)
returns: (..., L)
"""
cauchy_matrix = v.unsqueeze(-1) / (
z.unsqueeze(-2) - w.unsqueeze(-1)
) # (... N L)
return torch.sum(cauchy_matrix, dim=-2)
# Vandermonde functions
log.warning(
"Falling back on slow Vandermonde kernel. "
"Install pykeops for improved memory efficiency."
)
def log_vandermonde(v, x, L):
r"""Compute Vandermonde product.
v: (..., N)
x: (..., N)
returns: (..., L) \sum v x^l
"""
vandermonde_matrix = torch.exp(
x.unsqueeze(-1) * torch.arange(L).to(x)
) # (... N L)
vandermonde_prod = contract(
"... n, ... n l -> ... l", v, vandermonde_matrix
) # (... L)
return 2 * vandermonde_prod.real
def log_vandermonde_transpose(u, v, x, L):
vandermonde_matrix = torch.exp(
x.unsqueeze(-1) * torch.arange(L).to(x)
) # (... N L)
vandermonde_prod = contract(
"... l, ... n, ... n l -> ... n", u.to(x), v.to(x), vandermonde_matrix
) # (... L)
return vandermonde_prod
def _conj(x):
return torch.cat([x, x.conj()], dim=-1)
_c2r = torch.view_as_real
_r2c = torch.view_as_complex
if tuple(map(int, torch.__version__.split(".")[:2])) >= (1, 10):
def _resolve_conj(x):
return x.conj().resolve_conj()
else:
def _resolve_conj(x):
return x.conj()
""" Misc functional utilities """
def power(L, A, v=None):
"""Compute A^L and the scan sum_i A^i v_i.
A: (..., N, N)
v: (..., N, L)
"""
E = torch.eye(A.shape[-1]).to(A) # , dtype=A.dtype, device=A.device)
powers = [A]
length = 1
while True:
if L % 2 == 1:
E = powers[-1] @ E
L //= 2
if L == 0:
break
length *= 2
powers.append(powers[-1] @ powers[-1])
if v is None:
return E
# Invariants:
# powers[-1] := A^length
# length := largest po2 at most L
# Note that an alternative divide and conquer to compute the reduction is possible
# and can be embedded into the above loop without caching intermediate powers of A
# We do this reverse divide-and-conquer for efficiency reasons:
# 1) it involves fewer padding steps for non-po2 L
# 2) it involves more contiguous arrays
# Take care of edge case for non-po2 arrays
# Note that this initial step is a no-op for the case of power of 2 (length == L)
k = v.size(-1) - length
v_ = powers.pop() @ v[..., length:]
v = v[..., :length]
v[..., :k] = v[..., :k] + v_
# Handle reduction for power of 2
while v.size(-1) > 1:
v = rearrange(v, "... (z l) -> ... z l", z=2)
v = v[..., 0, :] + powers.pop() @ v[..., 1, :]
return E, v.squeeze(-1)
""" HiPPO utilities """
def transition(measure, N):
"""A, B transition matrices for different measures."""
# Legendre (translated)
if measure == "legt":
Q = np.arange(N, dtype=np.float64)
R = (2 * Q + 1) ** 0.5
j, i = np.meshgrid(Q, Q)
A = R[:, None] * np.where(i < j, (-1.0) ** (i - j), 1) * R[None, :]
B = R[:, None]
A = -A
# Halve again for timescale correctness
A *= 0.5
B *= 0.5
# Legendre (scaled)
elif measure == "legs":
q = np.arange(N, dtype=np.float64)
col, row = np.meshgrid(q, q)
r = 2 * q + 1
M = -(np.where(row >= col, r, 0) - np.diag(q))
T = np.sqrt(np.diag(2 * q + 1))
A = T @ M @ np.linalg.inv(T)
B = np.diag(T)[:, None]
B = B.copy() # Otherwise "UserWarning: given NumPY array is not writeable..."
# after torch.as_tensor(B)
elif measure == "legsd":
# Essentially equivalent to S4D-LegS
q = np.arange(N, dtype=np.float64)
col, row = np.meshgrid(q, q)
r = 2 * q + 1
M = -(np.where(row >= col, r, 0) - np.diag(q))
T = np.sqrt(np.diag(2 * q + 1))
A = T @ M @ np.linalg.inv(T)
B = np.diag(T)[:, None]
B = B.copy() # Otherwise "UserWarning: given NumPY array is not writeable..."
# after torch.as_tensor(B)
A += 0.5 * B * B[None, :, 0]
B = B / 2.0
elif measure in ["fourier_diag", "foud"]:
# Essentially equivalent to S4D-Lin
freqs = np.arange(N // 2)
d = np.stack([freqs, np.zeros(N // 2)], axis=-1).reshape(-1)[:-1]
A = 2 * np.pi * (-np.diag(d, 1) + np.diag(d, -1))
A = A - 0.5 * np.eye(N)
B = np.zeros(N)
B[0::2] = 2**0.5
B[0] = 1
B = B[:, None]
elif measure in ["fourier", "fout"]:
freqs = np.arange(N // 2)
d = np.stack([np.zeros(N // 2), freqs], axis=-1).reshape(-1)[1:]
A = np.pi * (-np.diag(d, 1) + np.diag(d, -1))
B = np.zeros(N)
B[0::2] = 2**0.5
B[0] = 1
# Subtract off rank correction - this corresponds
# to the other endpoint u(t-1) in this case
A = A - B[:, None] * B[None, :]
B = B[:, None]
else:
raise NotImplementedError
return A, B
def rank_correction(measure, N, rank=1, dtype=torch.float):
"""Return low-rank matrix L such that A + L is normal."""
if measure == "legs":
assert rank >= 1
P = torch.sqrt(0.5 + torch.arange(N, dtype=dtype)).unsqueeze(0) # (1 N)
elif measure == "legt":
assert rank >= 2
P = torch.sqrt(1 + 2 * torch.arange(N, dtype=dtype)) # (N)
P0 = P.clone()
P0[0::2] = 0.0
P1 = P.clone()
P1[1::2] = 0.0
P = torch.stack([P0, P1], dim=0) # (2 N)
P *= 2 ** (
-0.5
) # Halve the rank correct just like the original matrix was halved
elif measure in ["fourier", "fout"]:
P = torch.zeros(N)
P[0::2] = 2**0.5
P[0] = 1
P = P.unsqueeze(0)
elif measure in ["fourier_diag", "foud", "legsd"]:
P = torch.zeros(1, N, dtype=dtype)
else:
raise NotImplementedError
d = P.size(0)
if rank > d:
P = torch.cat([P, torch.zeros(rank - d, N, dtype=dtype)], dim=0) # (rank N)
return P
def nplr(measure, N, rank=1, dtype=torch.float, diagonalize_precision=True):
"""Decompose as Normal Plus Low-Rank (NPLR).
Return w, p, q, V, B such that
(w - p q^*, B) is unitarily equivalent to the original HiPPO A, B by the matrix V
i.e. A = V[w - p q^*]V^*, B = V B
"""
assert dtype == torch.float or torch.double
cdtype = torch.cfloat if dtype == torch.float else torch.cdouble
A, B = transition(measure, N)
A = torch.as_tensor(A, dtype=dtype) # (N, N)
B = torch.as_tensor(B, dtype=dtype)[:, 0] # (N,)
P = rank_correction(measure, N, rank=rank, dtype=dtype) # (r N)
AP = A + torch.sum(P.unsqueeze(-2) * P.unsqueeze(-1), dim=-3)
# We require AP to be nearly skew-symmetric
_A = AP + AP.transpose(-1, -2)
# if not torch.allclose(_A - _A[0,0]*torch.eye(N), torch.zeros(N, N), atol=1e-5):
err = torch.sum((_A - _A[0, 0] * torch.eye(N)) ** 2) / N
if err > 1e-5:
print("WARNING: HiPPO matrix not skew symmetric", err)
# Take advantage of identity + skew-symmetric form
# to calculate real and imaginary parts separately
# Imaginary part can use eigh instead of eig
w_re = torch.mean(torch.diagonal(AP), -1, keepdim=True)
# Diagonalize in double precision
if diagonalize_precision:
AP = AP.to(torch.double)
w_im, V = torch.linalg.eigh(AP * -1j) # (..., N) (..., N, N)
if diagonalize_precision:
w_im, V = w_im.to(cdtype), V.to(cdtype)
w = w_re + 1j * w_im
# Check: V w V^{-1} = A
# print("check", V @ torch.diag_embed(w) @ V.conj().transpose(-1, -2))
# Only keep half of each conjugate pair
_, idx = torch.sort(w.imag)
w_sorted = w[idx]
V_sorted = V[:, idx]
# There is an edge case when eigenvalues can be 0,
# which requires some machinery to handle
# We use a huge hack here: Assume only one pair is 0,
# and that it is the first row/column of A (only happens in Fourier case)
V = V_sorted[:, : N // 2]
w = w_sorted[: N // 2]
assert w[-2].abs() > 1e-4, "Only 1 zero eigenvalue allowed in diagonal part of A"
if w[-1].abs() < 1e-4:
V[:, -1] = 0.0
V[0, -1] = 2**-0.5
V[1, -1] = 2**-0.5 * 1j
_AP = V @ torch.diag_embed(w) @ V.conj().transpose(-1, -2)
err = torch.sum((2 * _AP.real - AP) ** 2) / N
if err > 1e-5:
print(
"Warning: Diagonalization of A matrix not numerically precise - error", err
)
# print("check", V @ torch.diag_embed(w) @ V.conj().transpose(-1, -2))
V_inv = V.conj().transpose(-1, -2)
B = contract("ij, j -> i", V_inv, B.to(V)) # V^* B
P = contract("ij, ...j -> ...i", V_inv, P.to(V)) # V^* P
return w, P, B, V
def dplr(
scaling,
N,
rank=1,
H=1,
dtype=torch.float,
real_scale=1.0,
imag_scale=1.0,
random_real=False,
random_imag=False,
normalize=False,
diagonal=True,
random_B=False,
):
assert dtype == torch.float or torch.double
dtype = torch.cfloat if dtype == torch.float else torch.cdouble
pi = torch.tensor(math.pi)
if random_real:
real_part = torch.rand(H, N // 2)
else:
real_part = 0.5 * torch.ones(H, N // 2)
if random_imag:
imag_part = N // 2 * torch.rand(H, N // 2)
else:
imag_part = repeat(torch.arange(N // 2), "n -> h n", h=H)
real_part = real_scale * real_part
if scaling == "random":
imag_part = torch.randn(H, N // 2)
elif scaling == "real":
imag_part = 0 * imag_part
real_part = 1 + repeat(torch.arange(N // 2), "n -> h n", h=H)
elif scaling in ["linear", "lin"]:
imag_part = pi * imag_part
elif scaling in [
"inverse",
"inv",
]: # Based on asymptotics of the default HiPPO matrix
imag_part = 1 / pi * N * (N / (1 + 2 * imag_part) - 1)
elif scaling in ["inverse2", "inv2"]:
imag_part = 1 / pi * N * (N / (1 + imag_part) - 1)
elif scaling in ["quadratic", "quad"]:
imag_part = 1 / pi * (1 + 2 * imag_part) ** 2
elif scaling in ["legs", "hippo"]:
w, _, _, _ = nplr("legsd", N)
imag_part = w.imag
else:
raise NotImplementedError
imag_part = imag_scale * imag_part
w = -real_part + 1j * imag_part
# Initialize B
if random_B:
B = torch.randn(H, N // 2, dtype=dtype)
else:
B = torch.ones(H, N // 2, dtype=dtype)
if normalize:
norm = (
-B / w
) # (H, N) # Result if you integrate the kernel with constant 1 function
zeta = 2 * torch.sum(
torch.abs(norm) ** 2, dim=-1, keepdim=True
) # Variance with a random C vector
B = B / zeta**0.5
P = torch.randn(rank, H, N // 2, dtype=dtype)
if diagonal:
P = P * 0.0
V = torch.eye(N, dtype=dtype)[:: N // 2] # Only used in testing
V = repeat(V, "n m -> h n m", h=H)
return w, P, B, V
def ssm(measure, N, R, H, **ssm_args):
"""Dispatcher to create single SSM initialization.
N: state size
R: rank (for DPLR parameterization)
H: number of independent SSM copies
"""
if measure == "dplr":
w, P, B, V = dplr(N=N, rank=R, H=H, **ssm_args)
elif measure.startswith("diag"):
args = measure.split("-")
assert args[0] == "diag" and len(args) > 1
scaling = args[1]
w, P, B, V = dplr(scaling=scaling, N=N, rank=R, H=H, diagonal=True, **ssm_args)
else:
w, P, B, V = nplr(measure, N, R, **ssm_args)
w = repeat(w, "n -> s n", s=H)
P = repeat(P, "r n -> r s n", s=H)
B = repeat(B, "n -> s n", s=H)
V = repeat(V, "n m -> s n m", s=H)
return w, P, B, V
combinations = {
"hippo": ["legs", "fourier"],
"diag": ["diag-inv", "diag-lin"],
"all": ["legs", "fourier", "diag-inv", "diag-lin"],
}
def combination(measures, N, R, S, **ssm_args):
if isinstance(measures, str):
measures = combinations[measures] if measures in combinations else [measures]
assert S % len(measures) == 0, (
f"{S} independent trainable SSM copies must be multiple of {len(measures)} "
"different measures"
)
w, P, B, V = zip(
*[ssm(measure, N, R, S // len(measures), **ssm_args) for measure in measures]
)
w = torch.cat(w, dim=0) # (S N)
P = torch.cat(P, dim=1) # (R S N)
B = torch.cat(B, dim=0) # (S N)
V = torch.cat(V, dim=0) # (S N N)
return w, P, B, V
class OptimModule(nn.Module):
"""Interface for Module that allows registering buffers/parameters with configurable optimizer hyperparameters. # noqa"""
def register(self, name, tensor, lr=None):
"""Register a tensor with a configurable learning rate and 0 weight decay."""
if lr == 0.0:
self.register_buffer(name, tensor)
else:
self.register_parameter(name, nn.Parameter(tensor))
optim = {"weight_decay": 0.0}
if lr is not None:
optim["lr"] = lr
setattr(getattr(self, name), "_optim", optim)
class SSKernelNPLR(OptimModule):
"""Stores a representation of and computes the SSKernel function.
K_L(A^dt, B^dt, C) corresponding to a discretized state space,
where A is Normal + Low Rank (NPLR)
"""
@torch.no_grad()
def _setup_C(self, L):
"""Construct C~ from C.
Two modes are supported: go directly to length L if self.L is 1,
or length is doubled
"""
if self.L.item() == 0:
if self.verbose:
log.info(f"S4: Initializing kernel to length {L}")
double_length = False
elif L > self.L.item(): # 2*int(self.L) == L:
if self.verbose:
log.info(
f"S4: Doubling length from L = {self.L.item()} to {2*self.L.item()}"
)
double_length = True
L = self.L.item() # Convenience for the math below
else:
return
C = _r2c(self.C)
dA, _ = self._setup_state()
dA_L = power(L, dA)
# Multiply C by I - dA_L
C_ = _conj(C)
prod = contract("h m n, c h n -> c h m", dA_L.transpose(-1, -2), C_)
if double_length:
prod = -prod # Multiply by I + dA_L instead
C_ = C_ - prod
C_ = C_[..., : self.N] # Take conjugate pairs again
self.C.copy_(_c2r(C_))
self.L = 2 * self.L if double_length else self.L + L # Preserve type/device
def _omega(self, L, dtype, device, cache=True):
"""Calculate (and cache) FFT nodes and their "unprocessed" version with the bilinear transform. # noqa
This should be called everytime the internal length self.L changes
"""
# Use cached if available
if cache and hasattr(self, "omega") and self.omega.size(-1) == L // 2 + 1:
return self.omega, self.z
omega = torch.tensor(
np.exp(-2j * np.pi / (L)), dtype=dtype, device=device
) # \omega_{2L}
omega = omega ** torch.arange(0, L // 2 + 1, device=device)
z = 2 * (1 - omega) / (1 + omega)
# Cache if necessary
if cache:
self.omega = omega
self.z = z
return omega, z
def __init__(
self,
w,
P,
B,
C,
log_dt,
L=None, # starting/maximum length of kernel
lr=None,
verbose=False,
keops=False,
real_type="exp", # ['none' | 'exp' | 'relu' | sigmoid']
real_tolerance=1e-3,
bandlimit=None,
):
"""Initialize kernel.
L: Maximum length; this module computes an SSM kernel of length L
A is represented by diag(w) - PP^*
w: (S, N) diagonal part
P: (R, S, N) low-rank part
B: (S, N)
C: (C, H, N)
dt: (H) timescale per feature
lr: [dict | float | None] hook to set lr of special parameters (A, B, dt)
Dimensions:
N (or d_state): state size
H (or d_model): total SSM copies
S (or n_ssm): number of trainable copies of (A, B, dt); must divide H
R (or rank): rank of low-rank part
C (or channels): system is 1-dim to C-dim
The forward pass of this Module returns a tensor of shape (C, H, L)
Note: tensor shape N here denotes half the true state size,
because of conjugate symmetry
"""
super().__init__()
self.verbose = verbose
self.keops = keops
self.bandlimit = bandlimit
self.real_type = real_type
self.real_tolerance = real_tolerance
# Rank of low-rank correction
self.rank = P.shape[-3]
assert w.size(-1) == P.size(-1) == B.size(-1) == C.size(-1)
self.H = log_dt.size(-1)
self.N = w.size(-1)
# Check different SSM inits
assert w.size(-2) == P.size(-2) == B.size(-2) # n_ssm
assert self.H % w.size(0) == 0
self.n_ssm = w.size(0)
self.broadcast = self.H // w.size(
0
) # Each trainable SSM needs to be duplicated this many times
# Broadcast everything to correct shapes
C = C.expand(torch.broadcast_shapes(C.shape, (1, self.H, self.N))) # (C, H, N)
B = B.unsqueeze(0) # (1, 1, N)
# Register parameters
self.C = nn.Parameter(_c2r(_resolve_conj(C)))
if lr is None or isinstance(lr, float):
lr_dict = {}
else:
lr_dict, lr = lr, None
self.register("log_dt", log_dt, lr_dict.get("dt", lr))
self.register("B", _c2r(B), lr_dict.get("B", lr))
self.register("P", _c2r(P), lr_dict.get("A", lr))
self.register("inv_w_real", self._w_init(w.real), lr_dict.get("A", lr))
self.register("w_imag", w.imag, lr_dict.get("A", lr))
self.l_max = L
self.register_buffer("L", torch.tensor(0)) # Internal length
def _w_init(self, w_real):
w_real = torch.clamp(w_real, max=-self.real_tolerance)
if self.real_type == "none":
return -w_real
elif self.real_type == "exp":
return torch.log(-w_real) # Some of the HiPPO methods have real part 0
elif self.real_type == "relu":
return -w_real
elif self.real_type == "sigmoid":
return torch.logit(-w_real)
elif self.real_type == "softplus":
return torch.log(torch.exp(-w_real) - 1)
else:
raise NotImplementedError
def _w(self):
# Get the internal w (diagonal) parameter
if self.real_type == "none":
w_real = -self.inv_w_real
elif self.real_type == "exp":
w_real = -torch.exp(self.inv_w_real)
elif self.real_type == "relu":
w_real = -F.relu(self.inv_w_real)
elif self.real_type == "sigmoid":
w_real = -F.sigmoid(self.inv_w_real)
elif self.real_type == "softplus":
w_real = -F.softplus(self.inv_w_real)
else:
raise NotImplementedError
w = w_real + 1j * self.w_imag
return w
def forward(self, state=None, rate=1.0, L=None):
"""Forward pass.
state: (B, H, N) initial state
rate: sampling rate factor
L: target length
returns:
(C, H, L) convolution kernel (generally C=1)
(B, H, L) output from initial state
"""
# Initialize C~
# if necessary (done in forward pass so it's on the correct device)
if self.L.item() == 0 and self.l_max is not None and self.l_max > 0:
self._setup_C(self.l_max)
# Handle sampling rate logic
# The idea is that this kernel's length (in continuous units) is self.L,
# while we are asked
# to provide a kernel of length L at (relative) frequency rate
if L is None:
L = round(self.L.item() / rate)
# Increase the internal length if needed
continuous_L = round(rate * L)
while continuous_L > self.L.item():
self._setup_C(continuous_L)
discrete_L = round(self.L.item() / rate)
dt = torch.exp(self.log_dt) * rate
B = _r2c(self.B)
C = _r2c(self.C)
P = _r2c(self.P)
Q = P.conj()
w = self._w() # (n_ssm, N)
# Address bandlimiting
if self.bandlimit is not None:
freqs = w.imag.abs() / (2 * math.pi) # (H, N)
freqs = dt[:, None] / rate * freqs # (H, N)
mask = torch.where(freqs < self.bandlimit * 0.5, 1, 0)
C = C * mask
# Get FFT nodes of right length
omega, z = self._omega(
discrete_L, dtype=w.dtype, device=w.device, cache=(rate == 1.0)
)
# Broadcast parameters to same hidden features H
B = repeat(B, "1 t n -> 1 (v t) n", v=self.broadcast)
P = repeat(P, "r t n -> r (v t) n", v=self.broadcast)
Q = repeat(Q, "r t n -> r (v t) n", v=self.broadcast)
w = repeat(w, "t n -> (v t) n", v=self.broadcast)
# Augment B
if state is not None:
# Have to "unbilinear" the state to put it into the same "type" as B
# Compute 1/dt * (I + dt/2 A) @ state
# Can do this without expanding
# (maybe minor speedup using conj symmetry in theory),
# but it's easier to read this way
s = _conj(state) if state.size(-1) == self.N else state # (B H N)
sA = s * _conj(w) - contract( # (B H N)
"bhm, rhm, rhn -> bhn", s, _conj(Q), _conj(P)
)
s = s / dt.unsqueeze(-1) + sA / 2
s = s[..., : self.N]
B = torch.cat([s, B], dim=-3) # (B+1, H, N)
# Incorporate dt into A
w = w * dt.unsqueeze(-1) # (H N)
# Stack B and p, C and q for convenient batching
B = torch.cat([B, P], dim=-3) # (B+1+R, H, N)
C = torch.cat([C, Q], dim=-3) # (C+R, H, N)
# Incorporate B and C batch dimensions
v = B.unsqueeze(-3) * C.unsqueeze(-4) # (B+1+R, C+R, H, N)
# Calculate resolvent at omega
if has_cauchy_extension and z.dtype == torch.cfloat and not self.keops:
r = cauchy_mult(v, z, w, symmetric=True)
elif has_pykeops:
r = cauchy_conj(v, z, w)
else:
r = cauchy_naive(v, z, w)
r = r * dt[None, None, :, None] # (B+1+R, C+R, H, L)
# Low-rank Woodbury correction
if self.rank == 1:
k_f = r[:-1, :-1, :, :] - r[:-1, -1:, :, :] * r[-1:, :-1, :, :] / (
1 + r[-1:, -1:, :, :]
)
elif self.rank == 2:
r00 = r[: -self.rank, : -self.rank, :, :]
r01 = r[: -self.rank, -self.rank :, :, :]
r10 = r[-self.rank :, : -self.rank, :, :]
r11 = r[-self.rank :, -self.rank :, :, :]
det = (1 + r11[:1, :1, :, :]) * (1 + r11[1:, 1:, :, :]) - r11[
:1, 1:, :, :
] * r11[1:, :1, :, :]
s = (
r01[:, :1, :, :] * (1 + r11[1:, 1:, :, :]) * r10[:1, :, :, :]
+ r01[:, 1:, :, :] * (1 + r11[:1, :1, :, :]) * r10[1:, :, :, :]
- r01[:, :1, :, :] * (r11[:1, 1:, :, :]) * r10[1:, :, :, :]
- r01[:, 1:, :, :] * (r11[1:, :1, :, :]) * r10[:1, :, :, :]
)
s = s / det
k_f = r00 - s
else:
r00 = r[: -self.rank, : -self.rank, :, :]
r01 = r[: -self.rank, -self.rank :, :, :]
r10 = r[-self.rank :, : -self.rank, :, :]
r11 = r[-self.rank :, -self.rank :, :, :]
r11 = rearrange(r11, "a b h n -> h n a b")
r11 = torch.linalg.inv(torch.eye(self.rank, device=r.device) + r11)
r11 = rearrange(r11, "h n a b -> a b h n")
k_f = r00 - torch.einsum(
"i j h n, j k h n, k l h n -> i l h n", r01, r11, r10
)
# Final correction for the bilinear transform
k_f = k_f * 2 / (1 + omega)
# Move from frequency to coefficients
k = torch.fft.irfft(k_f, n=discrete_L) # (B+1, C, H, L)
# # Truncate to target length
k = k[..., :L]
if state is not None:
k_state = k[:-1, :, :, :] # (B, C, H, L)
else:
k_state = None
k_B = k[-1, :, :, :] # (C H L)
return k_B, k_state
@torch.no_grad()
def _setup_linear(self):
"""Create parameters that allow fast linear stepping of state."""
w = self._w()
B = _r2c(self.B) # (H N)
P = _r2c(self.P)
Q = P.conj()
# Repeat w shape properly
B = repeat(B, "1 t n -> 1 (v t) n", v=self.broadcast)
P = repeat(P, "r t n -> r (v t) n", v=self.broadcast)
Q = repeat(Q, "r t n -> r (v t) n", v=self.broadcast)
w = repeat(w, "t n -> (v t) n", v=self.broadcast)
# Prepare Linear stepping
dt = torch.exp(self.log_dt)
D = (2.0 / dt.unsqueeze(-1) - w).reciprocal() # (H, N)
R = (
torch.eye(self.rank, dtype=w.dtype, device=w.device)
+ 2 * contract("r h n, h n, s h n -> h r s", Q, D, P).real
) # (H R R)
Q_D = rearrange(Q * D, "r h n -> h r n")
try:
R = torch.linalg.solve(R, Q_D) # (H R N)
except: # noqa
R = torch.tensor(
np.linalg.solve(
R.to(Q_D).contiguous().detach().cpu(),
Q_D.contiguous().detach().cpu(),
)
).to(Q_D)
R = rearrange(R, "h r n -> r h n")
self.step_params = {
"D": D, # (H N)
"R": R, # (R H N)
"P": P, # (R H N)
"Q": Q, # (R H N)
"B": B, # (1 H N)
"E": 2.0 / dt.unsqueeze(-1) + w, # (H N)
}
def _step_state_linear(self, u=None, state=None):
"""Step one time step as a recurrent model.
Version of the step function that has time O(N) instead of O(N^2) per step,
which takes advantage of the DPLR form and bilinear discretization.
Unfortunately, as currently implemented it's about 2x slower
because it calls several sequential operations.
Perhaps a fused CUDA kernel implementation would be much faster
u: (H) input
state: (H, N/2) state with conjugate pairs
Optionally, the state can have last dimension N
Returns: same shape as state
"""
C = _r2c(self.C) # View used for dtype/device
if u is None: # Special case used to find dA
u = torch.zeros(self.H, dtype=C.dtype, device=C.device)
if state is None: # Special case used to find dB
state = torch.zeros(self.H, self.N, dtype=C.dtype, device=C.device)
step_params = self.step_params.copy()
if (
state.size(-1) == self.N
): # Only store half of the conjugate pairs; should be true by default
# There should be a slightly faster way using conjugate symmetry
def contract_fn(p, x, y):
return contract(
"r h n, r h m, ... h m -> ... h n", _conj(p), _conj(x), _conj(y)
)[
..., : self.N
] # inner outer product
else:
assert state.size(-1) == 2 * self.N
step_params = {k: _conj(v) for k, v in step_params.items()}
# Worth setting up a contract_expression in default_state
# if we want to use this at inference time for stepping
def contract_fn(p, x, y):
return contract(
"r h n, r h m, ... h m -> ... h n", p, x, y
) # inner outer product
D = step_params["D"] # (H N)
E = step_params["E"] # (H N)
R = step_params["R"] # (R H N)
P = step_params["P"] # (R H N)
Q = step_params["Q"] # (R H N)
B = step_params["B"] # (1 H N)
new_state = E * state - contract_fn(P, Q, state) # (B H N)
new_state = new_state + 2.0 * B * u.unsqueeze(-1) # (B H N)
new_state = D * (new_state - contract_fn(P, R, new_state))
return new_state
def _setup_state(self):
"""Construct dA and dB for discretized state equation."""
# Construct dA and dB by using the stepping
self._setup_linear()
C = _r2c(self.C) # Just returns a view that we use for finding dtype/device
state = torch.eye(2 * self.N, dtype=C.dtype, device=C.device).unsqueeze(
-2
) # (N 1 N)
dA = self._step_state_linear(state=state)
dA = rearrange(dA, "n h m -> h m n")
u = C.new_ones(self.H)
dB = self._step_state_linear(u=u)
dB = _conj(dB)
dB = rearrange(dB, "1 h n -> h n") # (H N)
return dA, dB
def _step_state(self, u, state):
"""Step one time step as a recurrent model.
Must be called after self.default_state() is used to construct an initial state!
"""
next_state = self.state_contraction(self.dA, state) + self.input_contraction(
self.dB, u
)
return next_state
def _setup_step(self, mode="dense"):
"""Set up dA, dB, dC discretized parameters for stepping."""
self.dA, self.dB = self._setup_state()
# Calculate original C
C = _conj(_r2c(self.C)) # (H C N)
if self.L.item() == 0:
dC = C
else:
# self.C represents C_tilde
dA_L = power(self.L.item(), self.dA)
E = torch.eye(self.dA.size(-1)).to(dA_L)
dC = torch.linalg.solve(
E - dA_L.transpose(-1, -2),
C.unsqueeze(-1),
).squeeze(-1)
self.dC = dC
# Do special preprocessing for different step modes
self._step_mode = mode
if mode == "linear":
# Linear case: special step function for the state, we need to handle output
# use conjugate symmetry by default, which affects the output projection
self.dC = 2 * self.dC[:, :, : self.N]
elif mode == "diagonal":
# Eigendecomposition of the A matrix
L, V = torch.linalg.eig(self.dA)
V_inv = torch.linalg.inv(V)
# Check that the eigendedecomposition is correct
if self.verbose:
print(
"Diagonalization error:",
torch.dist(V @ torch.diag_embed(L) @ V_inv, self.dA),
)
# Change the parameterization to diagonalize
self.dA = L
self.dB = contract("h n m, h m -> h n", V_inv, self.dB)
self.dC = contract("h n m, c h n -> c h m", V, self.dC)
elif mode == "dense":
pass
else:
raise NotImplementedError(
"NPLR Kernel step mode must be {'dense' | 'linear' | 'diagonal'}"
)
def default_state(self, *batch_shape):
C = _r2c(self.C)
N = C.size(-1)
H = C.size(-2)
# Cache the tensor contractions we will later do, for efficiency
# These are put in this function because they depend on the batch size
step_mode = getattr(self, "_step_mode", "dense") # Used in default_state,
# which is called without _setup_step() in forward_state()
if step_mode != "linear":
N *= 2
if step_mode == "diagonal":
self.state_contraction = contract_expression(
"h n, ... h n -> ... h n",
(H, N),
batch_shape + (H, N),
)
else:
# Dense (quadratic) case: expand all terms
self.state_contraction = contract_expression(
"h m n, ... h n -> ... h m",
(H, N, N),
batch_shape + (H, N),
)
self.input_contraction = contract_expression(
"h n, ... h -> ... h n",
(H, N),
batch_shape + (H,), # self.dB.shape
)
self.output_contraction = contract_expression(
"c h n, ... h n -> ... c h",
(C.shape[0], H, N), # self.dC.shape
batch_shape + (H, N),
)
state = torch.zeros(*batch_shape, H, N, dtype=C.dtype, device=C.device)
return state
def step(self, u, state):
"""Step one time step as a recurrent model.
Must have called self._setup_step()
and created state with self.default_state() before calling this
"""
if self._step_mode == "linear":
new_state = self._step_state_linear(u, state)
else:
new_state = self._step_state(u, state)
y = self.output_contraction(self.dC, new_state)
return y.real, new_state
class SSKernelDiag(OptimModule):
"""Version using (complex) diagonal state matrix (S4D)."""
def __init__(
self,
A,
B,
C,
log_dt,
L=None,
disc="bilinear",
real_type="exp",
lr=None,
bandlimit=None,
):
super().__init__()
self.L = L
self.disc = disc
self.bandlimit = bandlimit
self.real_type = real_type
# Rank of low-rank correction
assert A.size(-1) == C.size(-1)
self.H = log_dt.size(-1)
self.N = A.size(-1)
assert A.size(-2) == B.size(-2) # Number of independent SSMs trained
assert self.H % A.size(-2) == 0
self.n_ssm = A.size(-2)
self.repeat = self.H // A.size(0)
self.channels = C.shape[0]
self.C = nn.Parameter(_c2r(_resolve_conj(C)))
# Register parameters
if lr is None or isinstance(lr, float):
lr_dict = {}
else:
lr_dict, lr = lr, None
self.register("log_dt", log_dt, lr_dict.get("dt", lr))
self.register("A", _c2r(A), lr_dict.get("A", lr))
self.register("B", _c2r(B), lr_dict.get("B", lr))
self.register("inv_A_real", self._A_init(A.real), lr_dict.get("A", lr))
self.register("A_imag", A.imag, lr_dict.get("A", lr))
def _A_init(self, A_real):
A_real = torch.clamp(A_real, max=-1e-4)
if self.real_type == "none":
return -A_real
elif self.real_type == "exp":
return torch.log(-A_real) # Some of the HiPPO methods have real part 0
elif self.real_type == "relu":
return -A_real
elif self.real_type == "sigmoid":
return torch.logit(-A_real)
elif self.real_type == "softplus":
return torch.log(torch.exp(-A_real) - 1)
else:
raise NotImplementedError
def _A(self):
# Get the internal A (diagonal) parameter
if self.real_type == "none":
A_real = -self.inv_A_real
elif self.real_type == "exp":
A_real = -torch.exp(self.inv_A_real)
elif self.real_type == "relu":
# JAX version seems to NaN if you alloA 0's,
# although this code Aas fine Aithout it
A_real = -F.relu(self.inv_A_real) - 1e-4
elif self.real_type == "sigmoid":
A_real = -F.sigmoid(self.inv_A_real)
elif self.real_type == "softplus":
A_real = -F.softplus(self.inv_A_real)
else:
raise NotImplementedError
A = A_real + 1j * self.A_imag
return A
def forward(self, L, state=None, rate=1.0, u=None):
"""Forward pass.
state: (B, H, N) initial state
rate: sampling rate factor
L: target length
returns:
(C, H, L) convolution kernel (generally C=1)
(B, H, L) output from initial state
"""
dt = torch.exp(self.log_dt) * rate # (H)
C = _r2c(self.C) # (C H N)
A = self._A() # (H N)
B = _r2c(self.B)
B = repeat(B, "t n -> 1 (v t) n", v=self.repeat)
if self.bandlimit is not None:
freqs = dt[:, None] / rate * A.imag.abs() / (2 * math.pi) # (H, N)
mask = torch.where(freqs < self.bandlimit * 0.5, 1, 0)
C = C * mask
# Incorporate dt into A
A = repeat(A, "t n -> (v t) n", v=self.repeat)
dtA = A * dt.unsqueeze(-1) # (H N)
# Augment B with state
if state is not None:
s = state / dt.unsqueeze(-1)
if self.disc == "bilinear":
s = s * (1.0 + dtA / 2)
elif self.disc == "zoh":
s = s * dtA * dtA.exp() / (dtA.exp() - 1.0)
B = torch.cat([s, B], dim=-3) # (1+B H N)
C = (B[:, None, :, :] * C).view(-1, self.H, self.N)
if self.disc == "zoh":
# Power up
C = C * (torch.exp(dtA) - 1.0) / A
K = log_vandermonde(C, dtA, L) # (H L)
elif self.disc == "bilinear":
C = C * (1.0 - dtA / 2).reciprocal() * dt.unsqueeze(-1) # or * dtA / A
dA = (1.0 + dtA / 2) / (1.0 - dtA / 2)
K = log_vandermonde(C, dA.log(), L)
elif self.disc == "dss":
# Implementation from DSS meant for case
# when real eigenvalues can be positive
P = dtA.unsqueeze(-1) * torch.arange(L, device=C.device) # [H N L]
A_gt_0 = A.real > 0 # [N]
if A_gt_0.any():
with torch.no_grad():
P_max = dtA * (A_gt_0 * (L - 1)) # [H N]
P = P - P_max.unsqueeze(-1) # [H N L]
S = P.exp() # [H N L]
dtA_neg = dtA * (1 - 2 * A_gt_0) # [H N]
num = dtA_neg.exp() - 1 # [H N]
den = (dtA_neg * L).exp() - 1 # [H N]
# Inline reciprocal function for DSS logic
x = den * A
x_conj = _resolve_conj(x)
r = x_conj / (x * x_conj + 1e-7)
C = C * num * r # [C H N]
K = contract("chn,hnl->chl", C, S).float()
else:
assert False, f"{self.disc} not supported"
K = K.view(-1, self.channels, self.H, L) # (1+B C H L)
if state is not None:
K_state = K[:-1, :, :, :] # (B C H L)
else:
K_state = None
K = K[-1, :, :, :] # (C H L)
return K, K_state
def _setup_step(self):
# These methods are organized
# like this to be compatible with the NPLR kernel interface
dt = torch.exp(self.log_dt) # (H)
B = _r2c(self.B) # (H N)
C = _r2c(self.C) # (C H N)
self.dC = C
A = self._A() # (H N)
# Incorporate dt into A
dtA = A * dt.unsqueeze(-1) # (H N)
if self.disc == "zoh":
self.dA = torch.exp(dtA) # (H N)
self.dB = B * (torch.exp(dtA) - 1.0) / A # (C H N)
elif self.disc == "bilinear":
self.dA = (1.0 + dtA / 2) / (1.0 - dtA / 2)
self.dB = (
B * (1.0 - dtA / 2).reciprocal() * dt.unsqueeze(-1)
) # or * dtA / A
def default_state(self, *batch_shape):
C = _r2c(self.C)
state = torch.zeros(
*batch_shape, self.H, self.N, dtype=C.dtype, device=C.device
)
return state
def step(self, u, state):
next_state = contract("h n, b h n -> b h n", self.dA, state) + contract(
"h n, b h -> b h n", self.dB, u
)
y = contract("c h n, b h n -> b c h", self.dC, next_state)
return 2 * y.real, next_state
def forward_state(self, u, state):
self._setup_step()
AL = self.dA ** u.size(-1)
u = u.flip(-1).to(self.dA).contiguous() # (B H L)
v = log_vandermonde_transpose(u, self.dB, self.dA.log(), u.size(-1))
next_state = AL * state + v
return next_state
class SSKernel(nn.Module):
"""Wrapper around SSKernel parameterizations.
The SSKernel is expected to support the interface
forward()
default_state()
_setup_step()
step()
"""
def __init__(
self,
H,
N=64,
L=None,
measure="legs",
rank=1,
channels=1,
dt_min=0.001,
dt_max=0.1,
deterministic=False,
lr=None,
mode="nplr",
n_ssm=None,
verbose=False,
measure_args={},
**kernel_args,
):
r"""State Space Kernel which computes the convolution kernel $\\bar{K}$.
H: Number of independent SSM copies;
controls the size of the model. Also called d_model in the config.
N: State size (dimensionality of parameters A, B, C).
Also called d_state in the config.
Generally shouldn't need to be adjusted and doens't affect speed much.
L: Maximum length of convolution kernel, if known.
Should work in the majority of cases even if not known.
measure: Options for initialization of (A, B).
For NPLR mode, recommendations are "legs",
"fout", "hippo" (combination of both).
For Diag mode, recommendations are "diag-inv",
"diag-lin", "diag-legs", and "diag" (combination of diag-inv and diag-lin)
rank: Rank of low-rank correction for NPLR mode.
Needs to be increased for measure "legt"
channels: C channels turns the SSM from a 1-dim to C-dim map;
can think of it having C separate "heads" per SSM.
This was partly a feature to make it easier to implement bidirectionality;
it is recommended to set channels=1
and adjust H to control parameters instead
dt_min, dt_max: min and max values for the step size dt (\Delta)
mode: Which kernel algorithm to use. 'nplr' is the full S4 model;
'diag' is the simpler S4D; 'slow' is a dense version for testing
n_ssm: Number of independent trainable (A, B) SSMs,
e.g. n_ssm=1 means all A/B parameters are tied across
the H different instantiations of C.
n_ssm=None means all H SSMs are completely independent.
Generally, changing this option can save parameters but doesn't affect
performance or speed much. This parameter must divide H
lr: Passing in a number (e.g. 0.001) sets
attributes of SSM parameers (A, B, dt).
A custom optimizer hook is needed to configure the optimizer
to set the learning rates appropriately for these parameters.
"""
super().__init__()
self.N = N
self.H = H
dtype, cdtype = torch.float, torch.cfloat
self.channels = channels
self.n_ssm = n_ssm if n_ssm is not None else H
self.mode = mode
self.verbose = verbose
self.kernel_args = kernel_args
# Generate dt
if deterministic:
log_dt = torch.exp(torch.linspace(math.log(dt_min), math.log(dt_max), H))
else:
log_dt = torch.rand(self.H, dtype=dtype) * (
math.log(dt_max) - math.log(dt_min)
) + math.log(dt_min)
# Compute the preprocessed representation
w, P, B, V = combination(measure, self.N, rank, self.n_ssm, **measure_args)
# Broadcast C to have H channels
if deterministic:
C = torch.zeros(channels, self.H, self.N, dtype=cdtype)
C[:, :, :1] = 1.0
C = contract("hmn, chn -> chm", V.conj().transpose(-1, -2), C) # V^* C
else:
C = torch.randn(channels, self.H, self.N // 2, dtype=cdtype)
# Broadcast other parameters to have n_ssm copies
assert (
self.n_ssm % B.size(-2) == 0
and self.n_ssm % P.size(-2) == 0
and self.n_ssm % w.size(-2) == 0
)
# Broadcast tensors to n_ssm copies
# These will be the parameters,
# so make sure tensors are materialized and contiguous
B = repeat(B, "t n -> (v t) n", v=self.n_ssm // B.size(-2)).clone().contiguous()
P = (
repeat(P, "r t n -> r (v t) n", v=self.n_ssm // P.size(-2))
.clone()
.contiguous()
)
w = repeat(w, "t n -> (v t) n", v=self.n_ssm // w.size(-2)).clone().contiguous()
C = C.contiguous()
if mode == "nplr":
self.kernel = SSKernelNPLR(
w,
P,
B,
C,
log_dt,
L=L,
lr=lr,
verbose=verbose,
**kernel_args,
)
elif mode == "diag":
if not measure.startswith("diag"):
log.warning(
"Diagonal kernel (S4D) activated but initialization is not "
"intended for S4D. Set `measure` to 'diag-lin', 'diag-inv', or "
"'diag-legs' for the main variants, or 'diag' "
"for a combination of S4D-Lin and S4D-Inv."
)
C = C * repeat(B, "t n -> (v t) n", v=H // self.n_ssm)
self.kernel = SSKernelDiag(
w,
B,
C,
log_dt,
L=L,
lr=lr,
**kernel_args,
)
else:
raise NotImplementedError(f"mode={mode} is not valid")
def forward(self, state=None, L=None, rate=None):
return self.kernel(state=state, L=L, rate=rate)
@torch.no_grad()
def forward_state(self, u, state):
"""Forward the state through a sequence.
i.e. computes the state after passing chunk through SSM
state: (B, H, N)
u: (B, H, L)
Returns: (B, H, N)
"""
if hasattr(self.kernel, "forward_state"):
return self.kernel.forward_state(u, state)
dA, dB = self.kernel._setup_state() # Construct dA, dB matrices
# dA, dB = self.kernel.dA, self.kernel.dB # (H N N) (H N)
conj = state.size(-1) != dA.size(-1)
if conj:
state = _conj(state)
v = contract(
"h n, b h l -> b h n l", dB, u.flip(-1)
) # dB.unsqueeze(-1) * u.flip(-1).unsqueeze(-2)
AL, v = power(u.size(-1), dA, v)
next_state = contract("h m n, b h n -> b h m", AL, state)
next_state = next_state + v
if conj:
next_state = next_state[..., : next_state.size(-1) // 2]
return next_state
def _setup_step(self, **kwargs):
# This method is intended to be private so that setting up an S4 module with
# ```
# if hasattr(module, 'setup_step'): module.setup_step()
# ```
# will not trigger this method multiple times
self.kernel._setup_step(**kwargs)
def step(self, u, state, **kwargs):
y, state = self.kernel.step(u, state, **kwargs)
return y, state
def default_state(self, *args, **kwargs):
return self.kernel.default_state(*args, **kwargs)
class S4(nn.Module):
def __init__(
self,
d_model,
d_state=64,
l_max=None,
channels=1,
bidirectional=False,
# Arguments for position-wise feedforward components
activation="gelu",
postact="glu",
hyper_act=None,
dropout=0.0,
tie_dropout=False,
bottleneck=None,
gate=None,
transposed=True,
verbose=False,
# SSM Kernel arguments
**kernel_args,
):
"""Initialize S4 module.
d_state: the dimension of the state, also denoted by N
l_max: the maximum kernel length, also denoted by L.
Set l_max=None to always use a global kernel
channels: can be interpreted as a number of "heads";
the SSM is a map from a 1-dim to C-dim sequence.
It's not recommended to change this unless desperate for things to tune;
instead, increase d_model for larger models
bidirectional: if True, convolution kernel will be two-sided
Position-wise feedforward components:
--------------------
activation: activation in between SS and FF
postact: activation after FF
hyper_act: use a "hypernetwork" multiplication (experimental)
dropout: standard dropout argument. tie_dropout=True ties the dropout
mask across the sequence length, emulating nn.Dropout1d
Other arguments:
--------------------
transposed: choose backbone axis ordering of
(B, L, H) (if False) or (B, H, L) (if True)
[B=batch size, L=sequence length, H=hidden dimension]
gate: add gated activation (GSS)
bottleneck: reduce SSM dimension (GSS)
See the class SSKernel for the kernel constructor which accepts kernel_args.
Relevant options that are worth considering
and tuning include "mode" + "measure", "dt_min", "dt_max", "lr"
Other options are all experimental and should not need to be configured
"""
super().__init__()
if verbose:
log.info(f"Constructing S4 (H, N, L) = ({d_model}, {d_state}, {l_max})")
self.d_model = d_model
self.H = d_model
self.N = d_state
self.L = l_max
self.bidirectional = bidirectional
self.channels = channels
self.transposed = transposed
self.gate = gate
self.bottleneck = bottleneck
if bottleneck is not None:
self.H = self.H // bottleneck
self.input_linear = LinearActivation(
self.d_model,
self.H,
transposed=not self.transposed,
activation=activation,
activate=True,
)
if gate is not None:
self.input_gate = LinearActivation(
self.d_model,
self.d_model * gate,
transposed=not self.transposed,
activation=activation,
activate=True,
)
self.output_gate = LinearActivation(
self.d_model * gate,
self.d_model,
transposed=self.transposed,
activation=None,
activate=False,
)
# optional multiplicative modulation GLU-style
# https://arxiv.org/abs/2002.05202
self.hyper = hyper_act is not None
if self.hyper:
channels *= 2
self.hyper_activation = Activation(hyper_act)
self.D = nn.Parameter(torch.randn(channels, self.H))
if self.bidirectional:
channels *= 2
# SSM Kernel
self.kernel = SSKernel(
self.H,
N=self.N,
L=self.L,
channels=channels,
verbose=verbose,
**kernel_args,
)
# Pointwise
self.activation = Activation(activation)
dropout_fn = DropoutNd if tie_dropout else nn.Dropout
self.dropout = dropout_fn(dropout) if dropout > 0.0 else nn.Identity()
# position-wise output transform to mix features
self.output_linear = LinearActivation(
self.H * self.channels,
self.d_model * (1 if self.gate is None else self.gate),
transposed=self.transposed,
activation=postact,
activate=True,
)
def forward(self, u, state=None, rate=1.0, lengths=None, **kwargs):
"""Forward pass.
u: (B H L) if self.transposed else (B L H)
state: (H N) never needed unless you know what you're doing
Returns: same shape as u
"""
if not self.transposed:
u = u.transpose(-1, -2)
L = u.size(-1)
# Mask out padding tokens
if isinstance(lengths, int):
if lengths != L:
lengths = torch.tensor(lengths, dtype=torch.long, device=u.device)
else:
lengths = None
if lengths is not None:
if lengths.ndim == 0:
lengths = lengths.unsqueeze(0)
assert (
isinstance(lengths, torch.Tensor)
and lengths.ndim == 1
and lengths.size(0) in [1, u.size(0)]
), print(f"l:{lengths.ndim}, {lengths.size()}, {u.size()}")
mask = torch.where(
torch.arange(L, device=lengths.device) < lengths[:, None, None],
1.0,
0.0,
)
u = u * mask
if self.gate is not None:
v = self.input_gate(u)
if self.bottleneck is not None:
u = self.input_linear(u)
# Compute SS Kernel
L_kernel = L if self.L is None else min(L, round(self.L / rate))
k, k_state = self.kernel(
L=L_kernel, rate=rate, state=state
) # (C H L) (B C H L)
# Convolution
if self.bidirectional:
k0, k1 = rearrange(k, "(s c) h l -> s c h l", s=2)
k = F.pad(k0, (0, L)) + F.pad(k1.flip(-1), (L, 0))
k_f = torch.fft.rfft(k, n=L_kernel + L) # (C H L)
u_f = torch.fft.rfft(u, n=L_kernel + L) # (B H L)
y_f = contract("bhl,chl->bchl", u_f, k_f)
y = torch.fft.irfft(y_f, n=L_kernel + L)[..., :L] # (B C H L)
# Compute D term in state space equation - essentially a skip connection
y = y + contract("bhl,ch->bchl", u, self.D)
# Compute state update
if state is not None:
assert (
not self.bidirectional
), "Bidirectional not supported with state forwarding"
y = y + k_state #
next_state = self.kernel.forward_state(u, state)
else:
next_state = None
# Optional hyper-network multiplication
if self.hyper:
y, yh = rearrange(y, "b (s c) h l -> s b c h l", s=2)
y = self.hyper_activation(yh) * y
# Reshape to flatten channels
y = rearrange(y, "... c h l -> ... (c h) l")
y = self.dropout(self.activation(y))
if not self.transposed:
y = y.transpose(-1, -2)
y = self.output_linear(y)
if self.gate is not None:
if not self.transposed:
v = v.transpose(-1, -2)
y = self.output_gate(y * v)
return y, next_state
def setup_step(self, **kwargs):
self.kernel._setup_step(**kwargs)
def step(self, u, state, **kwargs):
"""Step one time step as a recurrent model.
Intended to be used during validation.
u: (B H)
state: (B H N)
Returns: output (B H), state (B H N)
"""
assert not self.training
y, next_state = self.kernel.step(u, state) # (B C H)
y = y + u.unsqueeze(-2) * self.D
y = rearrange(y, "b c h -> b (c h)")
y = self.activation(y)
if self.transposed:
y = self.output_linear(y.unsqueeze(-1)).squeeze(-1)
else:
y = self.output_linear(y)
return y, next_state
def default_state(self, *batch_shape, device=None):
# kernel is not a SequenceModule so it doesn't need to adhere to same interface
# the kernel will know the device of its own parameters
return self.kernel.default_state(*batch_shape)
@property
def d_output(self):
return self.d_model
| 61,257 | 32.806843 | 125 | py |
espnet | espnet-master/espnet2/asr/decoder/s4_decoder.py | """Decoder definition."""
from typing import Any, List, Tuple
import torch
from typeguard import check_argument_types
from espnet2.asr.decoder.abs_decoder import AbsDecoder
from espnet2.asr.state_spaces.model import SequenceModel
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
from espnet.nets.scorer_interface import BatchScorerInterface
class S4Decoder(AbsDecoder, BatchScorerInterface):
"""S4 decoder module.
Args:
vocab_size: output dim
encoder_output_size: dimension of hidden vector
input_layer: input layer type
dropinp: input dropout
dropout: dropout parameter applied on every residual and every layer
prenorm: pre-norm vs. post-norm
n_layers: number of layers
transposed: transpose inputs so each layer receives (batch, dim, length)
tie_dropout: tie dropout mask across sequence like nn.Dropout1d/nn.Dropout2d
n_repeat: each layer is repeated n times per stage before applying pooling
layer: layer config, must be specified
residual: residual config
norm: normalization config (e.g. layer vs batch)
pool: config for pooling layer per stage
track_norms: log norms of each layer output
drop_path: drop rate for stochastic depth
"""
def __init__(
self,
vocab_size: int,
encoder_output_size: int,
input_layer: str = "embed",
dropinp: float = 0.0,
dropout: float = 0.25,
prenorm: bool = True,
n_layers: int = 16,
transposed: bool = False,
tie_dropout: bool = False,
n_repeat=1,
layer=None,
residual=None,
norm=None,
pool=None,
track_norms=True,
drop_path: float = 0.0,
):
assert check_argument_types()
super().__init__()
self.d_model = encoder_output_size
self.sos = vocab_size - 1
self.eos = vocab_size - 1
self.odim = vocab_size
self.dropout = dropout
if input_layer == "embed":
self.embed = torch.nn.Embedding(vocab_size, self.d_model)
else:
raise NotImplementedError
self.dropout_emb = torch.nn.Dropout(p=dropout)
self.decoder = SequenceModel(
self.d_model,
n_layers=n_layers,
transposed=transposed,
dropout=dropout,
tie_dropout=tie_dropout,
prenorm=prenorm,
n_repeat=n_repeat,
layer=layer,
residual=residual,
norm=norm,
pool=pool,
track_norms=track_norms,
dropinp=dropinp,
drop_path=drop_path,
)
self.output = torch.nn.Linear(self.d_model, vocab_size)
def init_state(self, x: torch.Tensor):
"""Initialize state."""
return self.decoder.default_state(1, device=x.device)
def forward(
self,
hs_pad: torch.Tensor,
hlens: torch.Tensor,
ys_in_pad: torch.Tensor,
ys_in_lens: torch.Tensor,
state=None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Forward decoder.
Args:
hs_pad: encoded memory, float32 (batch, maxlen_in, feat)
hlens: (batch)
ys_in_pad:
input token ids, int64 (batch, maxlen_out)
if input_layer == "embed"
input tensor (batch, maxlen_out, #mels) in the other cases
ys_in_lens: (batch)
Returns:
(tuple): tuple containing:
x: decoded token score before softmax (batch, maxlen_out, token)
if use_output_layer is True,
olens: (batch, )
"""
memory = hs_pad
memory_mask = (~make_pad_mask(hlens, maxlen=memory.size(1)))[:, None, :].to(
memory.device
)
emb = self.embed(ys_in_pad)
z, state = self.decoder(
emb,
state=state,
memory=memory,
lengths=ys_in_lens,
mask=memory_mask,
)
decoded = self.output(z)
return decoded, ys_in_lens
def score(self, ys, state, x):
raise NotImplementedError
def batch_score(
self, ys: torch.Tensor, states: List[Any], xs: torch.Tensor
) -> Tuple[torch.Tensor, List[Any]]:
"""Score new token batch.
Args:
ys (torch.Tensor): torch.int64 prefix tokens (n_batch, ylen).
states (List[Any]): Scorer states for prefix tokens.
xs (torch.Tensor):
The encoder feature that generates ys (n_batch, xlen, n_feat).
Returns:
tuple[torch.Tensor, List[Any]]: Tuple of
batchfied scores for next token with shape of `(n_batch, n_vocab)`
and next state list for ys.
"""
# merge states
n_batch = len(ys)
ys = self.embed(ys[:, -1:])
# workaround for remaining beam width of 1
if type(states[0]) is list:
states = states[0]
assert ys.size(1) == 1, ys.shape
ys = ys.squeeze(1)
ys, states = self.decoder.step(ys, state=states, memory=xs)
logp = self.output(ys).log_softmax(dim=-1)
states_list = [
[state[b].unsqueeze(0) if state is not None else None for state in states]
for b in range(n_batch)
]
return logp, states_list
| 5,441 | 30.275862 | 86 | py |
espnet | espnet-master/espnet2/asr/decoder/rnn_decoder.py | import random
import numpy as np
import torch
import torch.nn.functional as F
from typeguard import check_argument_types
from espnet2.asr.decoder.abs_decoder import AbsDecoder
from espnet2.utils.get_default_kwargs import get_default_kwargs
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask, to_device
from espnet.nets.pytorch_backend.rnn.attentions import initial_att
def build_attention_list(
eprojs: int,
dunits: int,
atype: str = "location",
num_att: int = 1,
num_encs: int = 1,
aheads: int = 4,
adim: int = 320,
awin: int = 5,
aconv_chans: int = 10,
aconv_filts: int = 100,
han_mode: bool = False,
han_type=None,
han_heads: int = 4,
han_dim: int = 320,
han_conv_chans: int = -1,
han_conv_filts: int = 100,
han_win: int = 5,
):
att_list = torch.nn.ModuleList()
if num_encs == 1:
for i in range(num_att):
att = initial_att(
atype,
eprojs,
dunits,
aheads,
adim,
awin,
aconv_chans,
aconv_filts,
)
att_list.append(att)
elif num_encs > 1: # no multi-speaker mode
if han_mode:
att = initial_att(
han_type,
eprojs,
dunits,
han_heads,
han_dim,
han_win,
han_conv_chans,
han_conv_filts,
han_mode=True,
)
return att
else:
att_list = torch.nn.ModuleList()
for idx in range(num_encs):
att = initial_att(
atype[idx],
eprojs,
dunits,
aheads[idx],
adim[idx],
awin[idx],
aconv_chans[idx],
aconv_filts[idx],
)
att_list.append(att)
else:
raise ValueError(
"Number of encoders needs to be more than one. {}".format(num_encs)
)
return att_list
class RNNDecoder(AbsDecoder):
def __init__(
self,
vocab_size: int,
encoder_output_size: int,
rnn_type: str = "lstm",
num_layers: int = 1,
hidden_size: int = 320,
sampling_probability: float = 0.0,
dropout: float = 0.0,
context_residual: bool = False,
replace_sos: bool = False,
num_encs: int = 1,
att_conf: dict = get_default_kwargs(build_attention_list),
):
# FIXME(kamo): The parts of num_spk should be refactored more more more
assert check_argument_types()
if rnn_type not in {"lstm", "gru"}:
raise ValueError(f"Not supported: rnn_type={rnn_type}")
super().__init__()
eprojs = encoder_output_size
self.dtype = rnn_type
self.dunits = hidden_size
self.dlayers = num_layers
self.context_residual = context_residual
self.sos = vocab_size - 1
self.eos = vocab_size - 1
self.odim = vocab_size
self.sampling_probability = sampling_probability
self.dropout = dropout
self.num_encs = num_encs
# for multilingual translation
self.replace_sos = replace_sos
self.embed = torch.nn.Embedding(vocab_size, hidden_size)
self.dropout_emb = torch.nn.Dropout(p=dropout)
self.decoder = torch.nn.ModuleList()
self.dropout_dec = torch.nn.ModuleList()
self.decoder += [
torch.nn.LSTMCell(hidden_size + eprojs, hidden_size)
if self.dtype == "lstm"
else torch.nn.GRUCell(hidden_size + eprojs, hidden_size)
]
self.dropout_dec += [torch.nn.Dropout(p=dropout)]
for _ in range(1, self.dlayers):
self.decoder += [
torch.nn.LSTMCell(hidden_size, hidden_size)
if self.dtype == "lstm"
else torch.nn.GRUCell(hidden_size, hidden_size)
]
self.dropout_dec += [torch.nn.Dropout(p=dropout)]
# NOTE: dropout is applied only for the vertical connections
# see https://arxiv.org/pdf/1409.2329.pdf
if context_residual:
self.output = torch.nn.Linear(hidden_size + eprojs, vocab_size)
else:
self.output = torch.nn.Linear(hidden_size, vocab_size)
self.att_list = build_attention_list(
eprojs=eprojs, dunits=hidden_size, **att_conf
)
def zero_state(self, hs_pad):
return hs_pad.new_zeros(hs_pad.size(0), self.dunits)
def rnn_forward(self, ey, z_list, c_list, z_prev, c_prev):
if self.dtype == "lstm":
z_list[0], c_list[0] = self.decoder[0](ey, (z_prev[0], c_prev[0]))
for i in range(1, self.dlayers):
z_list[i], c_list[i] = self.decoder[i](
self.dropout_dec[i - 1](z_list[i - 1]),
(z_prev[i], c_prev[i]),
)
else:
z_list[0] = self.decoder[0](ey, z_prev[0])
for i in range(1, self.dlayers):
z_list[i] = self.decoder[i](
self.dropout_dec[i - 1](z_list[i - 1]), z_prev[i]
)
return z_list, c_list
def forward(self, hs_pad, hlens, ys_in_pad, ys_in_lens, strm_idx=0):
# to support mutiple encoder asr mode, in single encoder mode,
# convert torch.Tensor to List of torch.Tensor
if self.num_encs == 1:
hs_pad = [hs_pad]
hlens = [hlens]
# attention index for the attention module
# in SPA (speaker parallel attention),
# att_idx is used to select attention module. In other cases, it is 0.
att_idx = min(strm_idx, len(self.att_list) - 1)
# hlens should be list of list of integer
hlens = [list(map(int, hlens[idx])) for idx in range(self.num_encs)]
# get dim, length info
olength = ys_in_pad.size(1)
# initialization
c_list = [self.zero_state(hs_pad[0])]
z_list = [self.zero_state(hs_pad[0])]
for _ in range(1, self.dlayers):
c_list.append(self.zero_state(hs_pad[0]))
z_list.append(self.zero_state(hs_pad[0]))
z_all = []
if self.num_encs == 1:
att_w = None
self.att_list[att_idx].reset() # reset pre-computation of h
else:
att_w_list = [None] * (self.num_encs + 1) # atts + han
att_c_list = [None] * self.num_encs # atts
for idx in range(self.num_encs + 1):
# reset pre-computation of h in atts and han
self.att_list[idx].reset()
# pre-computation of embedding
eys = self.dropout_emb(self.embed(ys_in_pad)) # utt x olen x zdim
# loop for an output sequence
for i in range(olength):
if self.num_encs == 1:
att_c, att_w = self.att_list[att_idx](
hs_pad[0], hlens[0], self.dropout_dec[0](z_list[0]), att_w
)
else:
for idx in range(self.num_encs):
att_c_list[idx], att_w_list[idx] = self.att_list[idx](
hs_pad[idx],
hlens[idx],
self.dropout_dec[0](z_list[0]),
att_w_list[idx],
)
hs_pad_han = torch.stack(att_c_list, dim=1)
hlens_han = [self.num_encs] * len(ys_in_pad)
att_c, att_w_list[self.num_encs] = self.att_list[self.num_encs](
hs_pad_han,
hlens_han,
self.dropout_dec[0](z_list[0]),
att_w_list[self.num_encs],
)
if i > 0 and random.random() < self.sampling_probability:
z_out = self.output(z_all[-1])
z_out = np.argmax(z_out.detach().cpu(), axis=1)
z_out = self.dropout_emb(self.embed(to_device(self, z_out)))
ey = torch.cat((z_out, att_c), dim=1) # utt x (zdim + hdim)
else:
# utt x (zdim + hdim)
ey = torch.cat((eys[:, i, :], att_c), dim=1)
z_list, c_list = self.rnn_forward(ey, z_list, c_list, z_list, c_list)
if self.context_residual:
z_all.append(
torch.cat((self.dropout_dec[-1](z_list[-1]), att_c), dim=-1)
) # utt x (zdim + hdim)
else:
z_all.append(self.dropout_dec[-1](z_list[-1])) # utt x (zdim)
z_all = torch.stack(z_all, dim=1)
z_all = self.output(z_all)
z_all.masked_fill_(
make_pad_mask(ys_in_lens, z_all, 1),
0,
)
return z_all, ys_in_lens
def init_state(self, x):
# to support mutiple encoder asr mode, in single encoder mode,
# convert torch.Tensor to List of torch.Tensor
if self.num_encs == 1:
x = [x]
c_list = [self.zero_state(x[0].unsqueeze(0))]
z_list = [self.zero_state(x[0].unsqueeze(0))]
for _ in range(1, self.dlayers):
c_list.append(self.zero_state(x[0].unsqueeze(0)))
z_list.append(self.zero_state(x[0].unsqueeze(0)))
# TODO(karita): support strm_index for `asr_mix`
strm_index = 0
att_idx = min(strm_index, len(self.att_list) - 1)
if self.num_encs == 1:
a = None
self.att_list[att_idx].reset() # reset pre-computation of h
else:
a = [None] * (self.num_encs + 1) # atts + han
for idx in range(self.num_encs + 1):
# reset pre-computation of h in atts and han
self.att_list[idx].reset()
return dict(
c_prev=c_list[:],
z_prev=z_list[:],
a_prev=a,
workspace=(att_idx, z_list, c_list),
)
def score(self, yseq, state, x):
# to support mutiple encoder asr mode, in single encoder mode,
# convert torch.Tensor to List of torch.Tensor
if self.num_encs == 1:
x = [x]
att_idx, z_list, c_list = state["workspace"]
vy = yseq[-1].unsqueeze(0)
ey = self.dropout_emb(self.embed(vy)) # utt list (1) x zdim
if self.num_encs == 1:
att_c, att_w = self.att_list[att_idx](
x[0].unsqueeze(0),
[x[0].size(0)],
self.dropout_dec[0](state["z_prev"][0]),
state["a_prev"],
)
else:
att_w = [None] * (self.num_encs + 1) # atts + han
att_c_list = [None] * self.num_encs # atts
for idx in range(self.num_encs):
att_c_list[idx], att_w[idx] = self.att_list[idx](
x[idx].unsqueeze(0),
[x[idx].size(0)],
self.dropout_dec[0](state["z_prev"][0]),
state["a_prev"][idx],
)
h_han = torch.stack(att_c_list, dim=1)
att_c, att_w[self.num_encs] = self.att_list[self.num_encs](
h_han,
[self.num_encs],
self.dropout_dec[0](state["z_prev"][0]),
state["a_prev"][self.num_encs],
)
ey = torch.cat((ey, att_c), dim=1) # utt(1) x (zdim + hdim)
z_list, c_list = self.rnn_forward(
ey, z_list, c_list, state["z_prev"], state["c_prev"]
)
if self.context_residual:
logits = self.output(
torch.cat((self.dropout_dec[-1](z_list[-1]), att_c), dim=-1)
)
else:
logits = self.output(self.dropout_dec[-1](z_list[-1]))
logp = F.log_softmax(logits, dim=1).squeeze(0)
return (
logp,
dict(
c_prev=c_list[:],
z_prev=z_list[:],
a_prev=att_w,
workspace=(att_idx, z_list, c_list),
),
)
| 12,120 | 35.399399 | 81 | py |
espnet | espnet-master/espnet2/asr/decoder/transformer_decoder.py | # Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Decoder definition."""
from typing import Any, List, Sequence, Tuple
import torch
from typeguard import check_argument_types
from espnet2.asr.decoder.abs_decoder import AbsDecoder
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
from espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention
from espnet.nets.pytorch_backend.transformer.decoder_layer import DecoderLayer
from espnet.nets.pytorch_backend.transformer.dynamic_conv import DynamicConvolution
from espnet.nets.pytorch_backend.transformer.dynamic_conv2d import DynamicConvolution2D
from espnet.nets.pytorch_backend.transformer.embedding import PositionalEncoding
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
from espnet.nets.pytorch_backend.transformer.lightconv import LightweightConvolution
from espnet.nets.pytorch_backend.transformer.lightconv2d import LightweightConvolution2D
from espnet.nets.pytorch_backend.transformer.mask import subsequent_mask
from espnet.nets.pytorch_backend.transformer.positionwise_feed_forward import (
PositionwiseFeedForward,
)
from espnet.nets.pytorch_backend.transformer.repeat import repeat
from espnet.nets.scorer_interface import BatchScorerInterface
class BaseTransformerDecoder(AbsDecoder, BatchScorerInterface):
"""Base class of Transfomer decoder module.
Args:
vocab_size: output dim
encoder_output_size: dimension of attention
attention_heads: the number of heads of multi head attention
linear_units: the number of units of position-wise feed forward
num_blocks: the number of decoder blocks
dropout_rate: dropout rate
self_attention_dropout_rate: dropout rate for attention
input_layer: input layer type
use_output_layer: whether to use output layer
pos_enc_class: PositionalEncoding or ScaledPositionalEncoding
normalize_before: whether to use layer_norm before the first block
concat_after: whether to concat attention layer's input and output
if True, additional linear will be applied.
i.e. x -> x + linear(concat(x, att(x)))
if False, no additional linear will be applied.
i.e. x -> x + att(x)
"""
def __init__(
self,
vocab_size: int,
encoder_output_size: int,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.1,
input_layer: str = "embed",
use_output_layer: bool = True,
pos_enc_class=PositionalEncoding,
normalize_before: bool = True,
):
assert check_argument_types()
super().__init__()
attention_dim = encoder_output_size
if input_layer == "embed":
self.embed = torch.nn.Sequential(
torch.nn.Embedding(vocab_size, attention_dim),
pos_enc_class(attention_dim, positional_dropout_rate),
)
elif input_layer == "linear":
self.embed = torch.nn.Sequential(
torch.nn.Linear(vocab_size, attention_dim),
torch.nn.LayerNorm(attention_dim),
torch.nn.Dropout(dropout_rate),
torch.nn.ReLU(),
pos_enc_class(attention_dim, positional_dropout_rate),
)
else:
raise ValueError(f"only 'embed' or 'linear' is supported: {input_layer}")
self.normalize_before = normalize_before
if self.normalize_before:
self.after_norm = LayerNorm(attention_dim)
if use_output_layer:
self.output_layer = torch.nn.Linear(attention_dim, vocab_size)
else:
self.output_layer = None
# Must set by the inheritance
self.decoders = None
def forward(
self,
hs_pad: torch.Tensor,
hlens: torch.Tensor,
ys_in_pad: torch.Tensor,
ys_in_lens: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Forward decoder.
Args:
hs_pad: encoded memory, float32 (batch, maxlen_in, feat)
hlens: (batch)
ys_in_pad:
input token ids, int64 (batch, maxlen_out)
if input_layer == "embed"
input tensor (batch, maxlen_out, #mels) in the other cases
ys_in_lens: (batch)
Returns:
(tuple): tuple containing:
x: decoded token score before softmax (batch, maxlen_out, token)
if use_output_layer is True,
olens: (batch, )
"""
tgt = ys_in_pad
# tgt_mask: (B, 1, L)
tgt_mask = (~make_pad_mask(ys_in_lens)[:, None, :]).to(tgt.device)
# m: (1, L, L)
m = subsequent_mask(tgt_mask.size(-1), device=tgt_mask.device).unsqueeze(0)
# tgt_mask: (B, L, L)
tgt_mask = tgt_mask & m
memory = hs_pad
memory_mask = (~make_pad_mask(hlens, maxlen=memory.size(1)))[:, None, :].to(
memory.device
)
# Padding for Longformer
if memory_mask.shape[-1] != memory.shape[1]:
padlen = memory.shape[1] - memory_mask.shape[-1]
memory_mask = torch.nn.functional.pad(
memory_mask, (0, padlen), "constant", False
)
x = self.embed(tgt)
x, tgt_mask, memory, memory_mask = self.decoders(
x, tgt_mask, memory, memory_mask
)
if self.normalize_before:
x = self.after_norm(x)
if self.output_layer is not None:
x = self.output_layer(x)
olens = tgt_mask.sum(1)
return x, olens
def forward_one_step(
self,
tgt: torch.Tensor,
tgt_mask: torch.Tensor,
memory: torch.Tensor,
cache: List[torch.Tensor] = None,
) -> Tuple[torch.Tensor, List[torch.Tensor]]:
"""Forward one step.
Args:
tgt: input token ids, int64 (batch, maxlen_out)
tgt_mask: input token mask, (batch, maxlen_out)
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (include 1.2)
memory: encoded memory, float32 (batch, maxlen_in, feat)
cache: cached output list of (batch, max_time_out-1, size)
Returns:
y, cache: NN output value and cache per `self.decoders`.
y.shape` is (batch, maxlen_out, token)
"""
x = self.embed(tgt)
if cache is None:
cache = [None] * len(self.decoders)
new_cache = []
for c, decoder in zip(cache, self.decoders):
x, tgt_mask, memory, memory_mask = decoder(
x, tgt_mask, memory, None, cache=c
)
new_cache.append(x)
if self.normalize_before:
y = self.after_norm(x[:, -1])
else:
y = x[:, -1]
if self.output_layer is not None:
y = torch.log_softmax(self.output_layer(y), dim=-1)
return y, new_cache
def score(self, ys, state, x):
"""Score."""
ys_mask = subsequent_mask(len(ys), device=x.device).unsqueeze(0)
logp, state = self.forward_one_step(
ys.unsqueeze(0), ys_mask, x.unsqueeze(0), cache=state
)
return logp.squeeze(0), state
def batch_score(
self, ys: torch.Tensor, states: List[Any], xs: torch.Tensor
) -> Tuple[torch.Tensor, List[Any]]:
"""Score new token batch.
Args:
ys (torch.Tensor): torch.int64 prefix tokens (n_batch, ylen).
states (List[Any]): Scorer states for prefix tokens.
xs (torch.Tensor):
The encoder feature that generates ys (n_batch, xlen, n_feat).
Returns:
tuple[torch.Tensor, List[Any]]: Tuple of
batchfied scores for next token with shape of `(n_batch, n_vocab)`
and next state list for ys.
"""
# merge states
n_batch = len(ys)
n_layers = len(self.decoders)
if states[0] is None:
batch_state = None
else:
# transpose state of [batch, layer] into [layer, batch]
batch_state = [
torch.stack([states[b][i] for b in range(n_batch)])
for i in range(n_layers)
]
# batch decoding
ys_mask = subsequent_mask(ys.size(-1), device=xs.device).unsqueeze(0)
logp, states = self.forward_one_step(ys, ys_mask, xs, cache=batch_state)
# transpose state of [layer, batch] into [batch, layer]
state_list = [[states[i][b] for i in range(n_layers)] for b in range(n_batch)]
return logp, state_list
class TransformerDecoder(BaseTransformerDecoder):
def __init__(
self,
vocab_size: int,
encoder_output_size: int,
attention_heads: int = 4,
linear_units: int = 2048,
num_blocks: int = 6,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.1,
self_attention_dropout_rate: float = 0.0,
src_attention_dropout_rate: float = 0.0,
input_layer: str = "embed",
use_output_layer: bool = True,
pos_enc_class=PositionalEncoding,
normalize_before: bool = True,
concat_after: bool = False,
layer_drop_rate: float = 0.0,
):
assert check_argument_types()
super().__init__(
vocab_size=vocab_size,
encoder_output_size=encoder_output_size,
dropout_rate=dropout_rate,
positional_dropout_rate=positional_dropout_rate,
input_layer=input_layer,
use_output_layer=use_output_layer,
pos_enc_class=pos_enc_class,
normalize_before=normalize_before,
)
attention_dim = encoder_output_size
self.decoders = repeat(
num_blocks,
lambda lnum: DecoderLayer(
attention_dim,
MultiHeadedAttention(
attention_heads, attention_dim, self_attention_dropout_rate
),
MultiHeadedAttention(
attention_heads, attention_dim, src_attention_dropout_rate
),
PositionwiseFeedForward(attention_dim, linear_units, dropout_rate),
dropout_rate,
normalize_before,
concat_after,
),
layer_drop_rate,
)
class LightweightConvolutionTransformerDecoder(BaseTransformerDecoder):
def __init__(
self,
vocab_size: int,
encoder_output_size: int,
attention_heads: int = 4,
linear_units: int = 2048,
num_blocks: int = 6,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.1,
self_attention_dropout_rate: float = 0.0,
src_attention_dropout_rate: float = 0.0,
input_layer: str = "embed",
use_output_layer: bool = True,
pos_enc_class=PositionalEncoding,
normalize_before: bool = True,
concat_after: bool = False,
conv_wshare: int = 4,
conv_kernel_length: Sequence[int] = (11, 11, 11, 11, 11, 11),
conv_usebias: int = False,
):
assert check_argument_types()
if len(conv_kernel_length) != num_blocks:
raise ValueError(
"conv_kernel_length must have equal number of values to num_blocks: "
f"{len(conv_kernel_length)} != {num_blocks}"
)
super().__init__(
vocab_size=vocab_size,
encoder_output_size=encoder_output_size,
dropout_rate=dropout_rate,
positional_dropout_rate=positional_dropout_rate,
input_layer=input_layer,
use_output_layer=use_output_layer,
pos_enc_class=pos_enc_class,
normalize_before=normalize_before,
)
attention_dim = encoder_output_size
self.decoders = repeat(
num_blocks,
lambda lnum: DecoderLayer(
attention_dim,
LightweightConvolution(
wshare=conv_wshare,
n_feat=attention_dim,
dropout_rate=self_attention_dropout_rate,
kernel_size=conv_kernel_length[lnum],
use_kernel_mask=True,
use_bias=conv_usebias,
),
MultiHeadedAttention(
attention_heads, attention_dim, src_attention_dropout_rate
),
PositionwiseFeedForward(attention_dim, linear_units, dropout_rate),
dropout_rate,
normalize_before,
concat_after,
),
)
class LightweightConvolution2DTransformerDecoder(BaseTransformerDecoder):
def __init__(
self,
vocab_size: int,
encoder_output_size: int,
attention_heads: int = 4,
linear_units: int = 2048,
num_blocks: int = 6,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.1,
self_attention_dropout_rate: float = 0.0,
src_attention_dropout_rate: float = 0.0,
input_layer: str = "embed",
use_output_layer: bool = True,
pos_enc_class=PositionalEncoding,
normalize_before: bool = True,
concat_after: bool = False,
conv_wshare: int = 4,
conv_kernel_length: Sequence[int] = (11, 11, 11, 11, 11, 11),
conv_usebias: int = False,
):
assert check_argument_types()
if len(conv_kernel_length) != num_blocks:
raise ValueError(
"conv_kernel_length must have equal number of values to num_blocks: "
f"{len(conv_kernel_length)} != {num_blocks}"
)
super().__init__(
vocab_size=vocab_size,
encoder_output_size=encoder_output_size,
dropout_rate=dropout_rate,
positional_dropout_rate=positional_dropout_rate,
input_layer=input_layer,
use_output_layer=use_output_layer,
pos_enc_class=pos_enc_class,
normalize_before=normalize_before,
)
attention_dim = encoder_output_size
self.decoders = repeat(
num_blocks,
lambda lnum: DecoderLayer(
attention_dim,
LightweightConvolution2D(
wshare=conv_wshare,
n_feat=attention_dim,
dropout_rate=self_attention_dropout_rate,
kernel_size=conv_kernel_length[lnum],
use_kernel_mask=True,
use_bias=conv_usebias,
),
MultiHeadedAttention(
attention_heads, attention_dim, src_attention_dropout_rate
),
PositionwiseFeedForward(attention_dim, linear_units, dropout_rate),
dropout_rate,
normalize_before,
concat_after,
),
)
class DynamicConvolutionTransformerDecoder(BaseTransformerDecoder):
def __init__(
self,
vocab_size: int,
encoder_output_size: int,
attention_heads: int = 4,
linear_units: int = 2048,
num_blocks: int = 6,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.1,
self_attention_dropout_rate: float = 0.0,
src_attention_dropout_rate: float = 0.0,
input_layer: str = "embed",
use_output_layer: bool = True,
pos_enc_class=PositionalEncoding,
normalize_before: bool = True,
concat_after: bool = False,
conv_wshare: int = 4,
conv_kernel_length: Sequence[int] = (11, 11, 11, 11, 11, 11),
conv_usebias: int = False,
):
assert check_argument_types()
if len(conv_kernel_length) != num_blocks:
raise ValueError(
"conv_kernel_length must have equal number of values to num_blocks: "
f"{len(conv_kernel_length)} != {num_blocks}"
)
super().__init__(
vocab_size=vocab_size,
encoder_output_size=encoder_output_size,
dropout_rate=dropout_rate,
positional_dropout_rate=positional_dropout_rate,
input_layer=input_layer,
use_output_layer=use_output_layer,
pos_enc_class=pos_enc_class,
normalize_before=normalize_before,
)
attention_dim = encoder_output_size
self.decoders = repeat(
num_blocks,
lambda lnum: DecoderLayer(
attention_dim,
DynamicConvolution(
wshare=conv_wshare,
n_feat=attention_dim,
dropout_rate=self_attention_dropout_rate,
kernel_size=conv_kernel_length[lnum],
use_kernel_mask=True,
use_bias=conv_usebias,
),
MultiHeadedAttention(
attention_heads, attention_dim, src_attention_dropout_rate
),
PositionwiseFeedForward(attention_dim, linear_units, dropout_rate),
dropout_rate,
normalize_before,
concat_after,
),
)
class DynamicConvolution2DTransformerDecoder(BaseTransformerDecoder):
def __init__(
self,
vocab_size: int,
encoder_output_size: int,
attention_heads: int = 4,
linear_units: int = 2048,
num_blocks: int = 6,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.1,
self_attention_dropout_rate: float = 0.0,
src_attention_dropout_rate: float = 0.0,
input_layer: str = "embed",
use_output_layer: bool = True,
pos_enc_class=PositionalEncoding,
normalize_before: bool = True,
concat_after: bool = False,
conv_wshare: int = 4,
conv_kernel_length: Sequence[int] = (11, 11, 11, 11, 11, 11),
conv_usebias: int = False,
):
assert check_argument_types()
if len(conv_kernel_length) != num_blocks:
raise ValueError(
"conv_kernel_length must have equal number of values to num_blocks: "
f"{len(conv_kernel_length)} != {num_blocks}"
)
super().__init__(
vocab_size=vocab_size,
encoder_output_size=encoder_output_size,
dropout_rate=dropout_rate,
positional_dropout_rate=positional_dropout_rate,
input_layer=input_layer,
use_output_layer=use_output_layer,
pos_enc_class=pos_enc_class,
normalize_before=normalize_before,
)
attention_dim = encoder_output_size
self.decoders = repeat(
num_blocks,
lambda lnum: DecoderLayer(
attention_dim,
DynamicConvolution2D(
wshare=conv_wshare,
n_feat=attention_dim,
dropout_rate=self_attention_dropout_rate,
kernel_size=conv_kernel_length[lnum],
use_kernel_mask=True,
use_bias=conv_usebias,
),
MultiHeadedAttention(
attention_heads, attention_dim, src_attention_dropout_rate
),
PositionwiseFeedForward(attention_dim, linear_units, dropout_rate),
dropout_rate,
normalize_before,
concat_after,
),
)
| 19,739 | 36.31569 | 88 | py |
espnet | espnet-master/espnet2/asr/decoder/mlm_decoder.py | # Copyright 2022 Yosuke Higuchi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Masked LM Decoder definition."""
from typing import Tuple
import torch
from typeguard import check_argument_types
from espnet2.asr.decoder.abs_decoder import AbsDecoder
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
from espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention
from espnet.nets.pytorch_backend.transformer.decoder_layer import DecoderLayer
from espnet.nets.pytorch_backend.transformer.embedding import PositionalEncoding
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
from espnet.nets.pytorch_backend.transformer.positionwise_feed_forward import (
PositionwiseFeedForward,
)
from espnet.nets.pytorch_backend.transformer.repeat import repeat
class MLMDecoder(AbsDecoder):
def __init__(
self,
vocab_size: int,
encoder_output_size: int,
attention_heads: int = 4,
linear_units: int = 2048,
num_blocks: int = 6,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.1,
self_attention_dropout_rate: float = 0.0,
src_attention_dropout_rate: float = 0.0,
input_layer: str = "embed",
use_output_layer: bool = True,
pos_enc_class=PositionalEncoding,
normalize_before: bool = True,
concat_after: bool = False,
):
assert check_argument_types()
super().__init__()
attention_dim = encoder_output_size
vocab_size += 1 # for mask token
if input_layer == "embed":
self.embed = torch.nn.Sequential(
torch.nn.Embedding(vocab_size, attention_dim),
pos_enc_class(attention_dim, positional_dropout_rate),
)
elif input_layer == "linear":
self.embed = torch.nn.Sequential(
torch.nn.Linear(vocab_size, attention_dim),
torch.nn.LayerNorm(attention_dim),
torch.nn.Dropout(dropout_rate),
torch.nn.ReLU(),
pos_enc_class(attention_dim, positional_dropout_rate),
)
else:
raise ValueError(f"only 'embed' or 'linear' is supported: {input_layer}")
self.normalize_before = normalize_before
if self.normalize_before:
self.after_norm = LayerNorm(attention_dim)
if use_output_layer:
self.output_layer = torch.nn.Linear(attention_dim, vocab_size)
else:
self.output_layer = None
self.decoders = repeat(
num_blocks,
lambda lnum: DecoderLayer(
attention_dim,
MultiHeadedAttention(
attention_heads, attention_dim, self_attention_dropout_rate
),
MultiHeadedAttention(
attention_heads, attention_dim, src_attention_dropout_rate
),
PositionwiseFeedForward(attention_dim, linear_units, dropout_rate),
dropout_rate,
normalize_before,
concat_after,
),
)
def forward(
self,
hs_pad: torch.Tensor,
hlens: torch.Tensor,
ys_in_pad: torch.Tensor,
ys_in_lens: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Forward decoder.
Args:
hs_pad: encoded memory, float32 (batch, maxlen_in, feat)
hlens: (batch)
ys_in_pad:
input token ids, int64 (batch, maxlen_out)
if input_layer == "embed"
input tensor (batch, maxlen_out, #mels) in the other cases
ys_in_lens: (batch)
Returns:
(tuple): tuple containing:
x: decoded token score before softmax (batch, maxlen_out, token)
if use_output_layer is True,
olens: (batch, )
"""
tgt = ys_in_pad
# tgt_mask: (B, 1, L)
tgt_mask = (~make_pad_mask(ys_in_lens)[:, None, :]).to(tgt.device)
tgt_max_len = tgt_mask.size(-1)
# tgt_mask_tmp: (B, L, L)
tgt_mask_tmp = tgt_mask.transpose(1, 2).repeat(1, 1, tgt_max_len)
tgt_mask = tgt_mask.repeat(1, tgt_max_len, 1) & tgt_mask_tmp
memory = hs_pad
memory_mask = (~make_pad_mask(hlens))[:, None, :].to(memory.device)
x = self.embed(tgt)
x, tgt_mask, memory, memory_mask = self.decoders(
x, tgt_mask, memory, memory_mask
)
if self.normalize_before:
x = self.after_norm(x)
if self.output_layer is not None:
x = self.output_layer(x)
olens = tgt_mask.sum(1)
return x, olens
| 4,749 | 35.259542 | 85 | py |
espnet | espnet-master/espnet2/asr/decoder/transducer_decoder.py | """(RNN-)Transducer decoder definition."""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from typeguard import check_argument_types
from espnet2.asr.decoder.abs_decoder import AbsDecoder
from espnet2.asr.transducer.beam_search_transducer import ExtendedHypothesis, Hypothesis
class TransducerDecoder(AbsDecoder):
"""(RNN-)Transducer decoder module.
Args:
vocab_size: Output dimension.
layers_type: (RNN-)Decoder layers type.
num_layers: Number of decoder layers.
hidden_size: Number of decoder units per layer.
dropout: Dropout rate for decoder layers.
dropout_embed: Dropout rate for embedding layer.
embed_pad: Embed/Blank symbol ID.
"""
def __init__(
self,
vocab_size: int,
rnn_type: str = "lstm",
num_layers: int = 1,
hidden_size: int = 320,
dropout: float = 0.0,
dropout_embed: float = 0.0,
embed_pad: int = 0,
):
assert check_argument_types()
if rnn_type not in {"lstm", "gru"}:
raise ValueError(f"Not supported: rnn_type={rnn_type}")
super().__init__()
self.embed = torch.nn.Embedding(vocab_size, hidden_size, padding_idx=embed_pad)
self.dropout_embed = torch.nn.Dropout(p=dropout_embed)
dec_net = torch.nn.LSTM if rnn_type == "lstm" else torch.nn.GRU
self.decoder = torch.nn.ModuleList(
[
dec_net(hidden_size, hidden_size, 1, batch_first=True)
for _ in range(num_layers)
]
)
self.dropout_dec = torch.nn.ModuleList(
[torch.nn.Dropout(p=dropout) for _ in range(num_layers)]
)
self.dlayers = num_layers
self.dunits = hidden_size
self.dtype = rnn_type
self.odim = vocab_size
self.ignore_id = -1
self.blank_id = embed_pad
self.device = next(self.parameters()).device
def set_device(self, device: torch.device):
"""Set GPU device to use.
Args:
device: Device ID.
"""
self.device = device
def init_state(
self, batch_size: int
) -> Tuple[torch.Tensor, Optional[torch.tensor]]:
"""Initialize decoder states.
Args:
batch_size: Batch size.
Returns:
: Initial decoder hidden states. ((N, B, D_dec), (N, B, D_dec))
"""
h_n = torch.zeros(
self.dlayers,
batch_size,
self.dunits,
device=self.device,
)
if self.dtype == "lstm":
c_n = torch.zeros(
self.dlayers,
batch_size,
self.dunits,
device=self.device,
)
return (h_n, c_n)
return (h_n, None)
def rnn_forward(
self,
sequence: torch.Tensor,
state: Tuple[torch.Tensor, Optional[torch.Tensor]],
) -> Tuple[torch.Tensor, Tuple[torch.Tensor, Optional[torch.Tensor]]]:
"""Encode source label sequences.
Args:
sequence: RNN input sequences. (B, D_emb)
state: Decoder hidden states. ((N, B, D_dec), (N, B, D_dec))
Returns:
sequence: RNN output sequences. (B, D_dec)
(h_next, c_next): Decoder hidden states. (N, B, D_dec), (N, B, D_dec))
"""
h_prev, c_prev = state
h_next, c_next = self.init_state(sequence.size(0))
for layer in range(self.dlayers):
if self.dtype == "lstm":
(
sequence,
(
h_next[layer : layer + 1],
c_next[layer : layer + 1],
),
) = self.decoder[layer](
sequence, hx=(h_prev[layer : layer + 1], c_prev[layer : layer + 1])
)
else:
sequence, h_next[layer : layer + 1] = self.decoder[layer](
sequence, hx=h_prev[layer : layer + 1]
)
sequence = self.dropout_dec[layer](sequence)
return sequence, (h_next, c_next)
def forward(self, labels: torch.Tensor) -> torch.Tensor:
"""Encode source label sequences.
Args:
labels: Label ID sequences. (B, L)
Returns:
dec_out: Decoder output sequences. (B, T, U, D_dec)
"""
init_state = self.init_state(labels.size(0))
dec_embed = self.dropout_embed(self.embed(labels))
dec_out, _ = self.rnn_forward(dec_embed, init_state)
return dec_out
def score(
self, hyp: Hypothesis, cache: Dict[str, Any]
) -> Tuple[torch.Tensor, Tuple[torch.Tensor, Optional[torch.Tensor]], torch.Tensor]:
"""One-step forward hypothesis.
Args:
hyp: Hypothesis.
cache: Pairs of (dec_out, state) for each label sequence. (key)
Returns:
dec_out: Decoder output sequence. (1, D_dec)
new_state: Decoder hidden states. ((N, 1, D_dec), (N, 1, D_dec))
label: Label ID for LM. (1,)
"""
label = torch.full((1, 1), hyp.yseq[-1], dtype=torch.long, device=self.device)
str_labels = "_".join(list(map(str, hyp.yseq)))
if str_labels in cache:
dec_out, dec_state = cache[str_labels]
else:
dec_emb = self.embed(label)
dec_out, dec_state = self.rnn_forward(dec_emb, hyp.dec_state)
cache[str_labels] = (dec_out, dec_state)
return dec_out[0][0], dec_state, label[0]
def batch_score(
self,
hyps: Union[List[Hypothesis], List[ExtendedHypothesis]],
dec_states: Tuple[torch.Tensor, Optional[torch.Tensor]],
cache: Dict[str, Any],
use_lm: bool,
) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor], torch.Tensor]:
"""One-step forward hypotheses.
Args:
hyps: Hypotheses.
states: Decoder hidden states. ((N, B, D_dec), (N, B, D_dec))
cache: Pairs of (dec_out, dec_states) for each label sequences. (keys)
use_lm: Whether to compute label ID sequences for LM.
Returns:
dec_out: Decoder output sequences. (B, D_dec)
dec_states: Decoder hidden states. ((N, B, D_dec), (N, B, D_dec))
lm_labels: Label ID sequences for LM. (B,)
"""
final_batch = len(hyps)
process = []
done = [None] * final_batch
for i, hyp in enumerate(hyps):
str_labels = "_".join(list(map(str, hyp.yseq)))
if str_labels in cache:
done[i] = cache[str_labels]
else:
process.append((str_labels, hyp.yseq[-1], hyp.dec_state))
if process:
labels = torch.LongTensor([[p[1]] for p in process], device=self.device)
p_dec_states = self.create_batch_states(
self.init_state(labels.size(0)), [p[2] for p in process]
)
dec_emb = self.embed(labels)
dec_out, new_states = self.rnn_forward(dec_emb, p_dec_states)
j = 0
for i in range(final_batch):
if done[i] is None:
state = self.select_state(new_states, j)
done[i] = (dec_out[j], state)
cache[process[j][0]] = (dec_out[j], state)
j += 1
dec_out = torch.cat([d[0] for d in done], dim=0)
dec_states = self.create_batch_states(dec_states, [d[1] for d in done])
if use_lm:
lm_labels = torch.LongTensor(
[h.yseq[-1] for h in hyps], device=self.device
).view(final_batch, 1)
return dec_out, dec_states, lm_labels
return dec_out, dec_states, None
def select_state(
self, states: Tuple[torch.Tensor, Optional[torch.Tensor]], idx: int
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Get specified ID state from decoder hidden states.
Args:
states: Decoder hidden states. ((N, B, D_dec), (N, B, D_dec))
idx: State ID to extract.
Returns:
: Decoder hidden state for given ID.
((N, 1, D_dec), (N, 1, D_dec))
"""
return (
states[0][:, idx : idx + 1, :],
states[1][:, idx : idx + 1, :] if self.dtype == "lstm" else None,
)
def create_batch_states(
self,
states: Tuple[torch.Tensor, Optional[torch.Tensor]],
new_states: List[Tuple[torch.Tensor, Optional[torch.Tensor]]],
check_list: Optional[List] = None,
) -> List[Tuple[torch.Tensor, Optional[torch.Tensor]]]:
"""Create decoder hidden states.
Args:
states: Decoder hidden states. ((N, B, D_dec), (N, B, D_dec))
new_states: Decoder hidden states. [N x ((1, D_dec), (1, D_dec))]
Returns:
states: Decoder hidden states. ((N, B, D_dec), (N, B, D_dec))
"""
return (
torch.cat([s[0] for s in new_states], dim=1),
torch.cat([s[1] for s in new_states], dim=1)
if self.dtype == "lstm"
else None,
)
| 9,252 | 29.946488 | 88 | py |
espnet | espnet-master/espnet2/asr/decoder/hugging_face_transformers_decoder.py | #!/usr/bin/env python3
# 2022, University of Stuttgart; Pavel Denisov
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Hugging Face Transformers Decoder."""
import copy
import logging
from typing import Tuple
import torch
from typeguard import check_argument_types
from espnet2.asr.decoder.abs_decoder import AbsDecoder
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
try:
from transformers import AutoModelForSeq2SeqLM
is_transformers_available = True
except ImportError:
is_transformers_available = False
class HuggingFaceTransformersDecoder(AbsDecoder):
"""Hugging Face Transformers Decoder.
Args:
encoder_output_size: dimension of encoder attention
model_name_or_path: Hugging Face Transformers model name
"""
def __init__(
self,
vocab_size: int,
encoder_output_size: int,
model_name_or_path: str,
):
assert check_argument_types()
super().__init__()
if not is_transformers_available:
raise ImportError(
"`transformers` is not available. Please install it via `pip install"
" transformers` or `cd /path/to/espnet/tools && . ./activate_python.sh"
" && ./installers/install_transformers.sh`."
)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path)
if hasattr(model, "model"):
self.decoder = model.model.decoder
else:
self.decoder = model.decoder
self.lm_head = model.lm_head
self.model_name_or_path = model_name_or_path
self.decoder_pretrained_params = copy.deepcopy(self.decoder.state_dict())
self.lm_head_pretrained_params = copy.deepcopy(self.lm_head.state_dict())
if encoder_output_size != self.decoder.config.hidden_size:
self.linear_in = torch.nn.Linear(
encoder_output_size, self.decoder.config.hidden_size
)
else:
self.linear_in = torch.nn.Identity()
def forward(
self,
hs_pad: torch.Tensor,
hlens: torch.Tensor,
ys_in_pad: torch.Tensor,
ys_in_lens: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Forward decoder.
Args:
hs_pad: encoded memory, float32 (batch, maxlen_in, feat)
hlens: (batch)
ys_in_pad: input tensor (batch, maxlen_out, #mels)
ys_in_lens: (batch)
Returns:
(tuple): tuple containing:
x: decoded token score before softmax (batch, maxlen_out, token)
if use_output_layer is True,
olens: (batch, )
"""
args = {"return_dict": True}
if self.decoder.__class__.__name__ == "MBartDecoder":
ys_in_pad[:, 0] = 2
args["input_ids"] = ys_in_pad
mask = (~make_pad_mask(ys_in_lens)).to(ys_in_pad.device).float()
args["attention_mask"] = mask
args["encoder_hidden_states"] = self.linear_in(hs_pad)
hs_mask = (~make_pad_mask(hlens)).to(hs_pad.device).float()
args["encoder_attention_mask"] = hs_mask
x = self.decoder(**args).last_hidden_state
x = self.lm_head(x)
olens = mask.sum(1).to(torch.int)
return x, olens
def reload_pretrained_parameters(self):
self.decoder.load_state_dict(self.decoder_pretrained_params)
self.lm_head.load_state_dict(self.lm_head_pretrained_params)
logging.info("Pretrained Transformers model parameters reloaded!")
| 3,571 | 30.610619 | 87 | py |
espnet | espnet-master/espnet2/asr/decoder/abs_decoder.py | from abc import ABC, abstractmethod
from typing import Tuple
import torch
from espnet.nets.scorer_interface import ScorerInterface
class AbsDecoder(torch.nn.Module, ScorerInterface, ABC):
@abstractmethod
def forward(
self,
hs_pad: torch.Tensor,
hlens: torch.Tensor,
ys_in_pad: torch.Tensor,
ys_in_lens: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
raise NotImplementedError
| 447 | 22.578947 | 56 | py |
espnet | espnet-master/espnet2/asr/decoder/whisper_decoder.py | import copy
from typing import Any, List, Tuple
import torch
from typeguard import check_argument_types
from espnet2.asr.decoder.abs_decoder import AbsDecoder
from espnet.nets.scorer_interface import BatchScorerInterface
class OpenAIWhisperDecoder(AbsDecoder, BatchScorerInterface):
"""Transformer-based Speech-to-Text Decoder from OpenAI's Whisper Model:
URL: https://github.com/openai/whisper
"""
def __init__(
self,
vocab_size: int,
encoder_output_size: int,
dropout_rate: float = 0.0,
whisper_model: str = "small",
download_dir: str = None,
):
try:
import whisper
except Exception as e:
print("Error: whisper is not properly installed.")
print(
"Please install whisper with: cd ${MAIN_ROOT}/tools && "
"./installers/install_whisper.sh"
)
raise e
assert check_argument_types()
super().__init__()
assert whisper_model in whisper.available_models()
_model = whisper.load_model(whisper_model, download_root=download_dir)
self.decoders = copy.deepcopy(_model.decoder)
attention_dim = self.decoders.token_embedding.embedding_dim
# note that originally Whisper doesn't use dropouts
self.dropout = torch.nn.Dropout(dropout_rate)
# vocab size mismatch -> reinitialize embedding
# orig vocab size (multilingual): 51865
# orig vocab size (english): 51864
if vocab_size != self.decoders.token_embedding.num_embeddings:
orig_emb_std, orig_emb_mean = torch.std_mean(
self.decoders.token_embedding.weight
)
self.decoders.token_embedding = torch.nn.Embedding(
vocab_size, attention_dim
)
torch.nn.init.normal_(
self.decoders.token_embedding.weight,
orig_emb_mean.item(),
orig_emb_std.item(),
)
self.decoders.train()
del _model
def forward(
self,
hs_pad: torch.Tensor,
hlens: torch.Tensor,
ys_in_pad: torch.Tensor,
ys_in_lens: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Forward decoder.
Args:
hs_pad: encoded memory, float32 (batch, maxlen_in, feat)
hlens: (batch)
ys_in_pad:
input token ids, int64 (batch, maxlen_out)
if input_layer == "embed"
input tensor (batch, maxlen_out, #mels) in the other cases
ys_in_lens: (batch)
Returns:
(tuple): tuple containing:
x: decoded token score before softmax (batch, maxlen_out, token)
if use_output_layer is True,
olens: (batch, )
"""
tgt, memory = ys_in_pad, hs_pad
tgt = (
self.decoders.token_embedding(tgt)
+ self.decoders.positional_embedding[: tgt.size(1)]
)
tgt = self.dropout(tgt)
x = tgt.to(memory.dtype)
for layer, block in enumerate(self.decoders.blocks):
x = block(x, memory, mask=self.decoders.mask)
if layer < len(self.decoders.blocks) - 1:
x = self.dropout(x)
x = self.decoders.ln(x)
x = (
x @ torch.transpose(self.decoders.token_embedding.weight.to(x.dtype), 0, 1)
).float()
return x, ys_in_lens
def forward_one_step(
self,
tgt: torch.Tensor,
tgt_mask: torch.Tensor,
memory: torch.Tensor,
cache: List[torch.Tensor] = None,
) -> Tuple[torch.Tensor, List[torch.Tensor]]:
"""Forward one step.
Args:
tgt: input token ids, int64 (batch, maxlen_out)
tgt_mask: input token mask, (batch, maxlen_out)
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (include 1.2)
memory: encoded memory, float32 (batch, maxlen_in, feat)
cache: cached output list of (batch, max_time_out-1, size)
Returns:
y, cache: NN output value and cache per `self.decoders`.
y.shape` is (batch, maxlen_out, token)
NOTE (Shih-Lun):
cache implementation is ignored for now
for simplicity & correctness
"""
x = (
self.decoders.token_embedding(tgt)
+ self.decoders.positional_embedding[: tgt.size(1)]
)
x = self.dropout(x)
x = x.to(memory.dtype)
for layer, block in enumerate(self.decoders.blocks):
x = block(x, memory, mask=self.decoders.mask)
if layer < len(self.decoders.blocks) - 1:
x = self.dropout(x)
x = self.decoders.ln(x)
y = x[:, -1]
y = (
y @ torch.transpose(self.decoders.token_embedding.weight.to(x.dtype), 0, 1)
).float()
y = torch.log_softmax(y, dim=-1)
return y, None
def score(self, ys, state, x):
"""Score."""
logp, state = self.forward_one_step(
ys.unsqueeze(0), torch.empty(0), x.unsqueeze(0), cache=state # dummy mask
)
return logp.squeeze(0), state
def batch_score(
self, ys: torch.Tensor, states: List[Any], xs: torch.Tensor
) -> Tuple[torch.Tensor, List[Any]]:
"""Score new token batch.
Args:
ys (torch.Tensor): torch.int64 prefix tokens (n_batch, ylen).
states (List[Any]): Scorer states for prefix tokens.
xs (torch.Tensor):
The encoder feature that generates ys (n_batch, xlen, n_feat).
Returns:
tuple[torch.Tensor, List[Any]]: Tuple of
batchfied scores for next token with shape of `(n_batch, n_vocab)`
and next state list for ys.
"""
# batch decoding, dummy mask is passed
logp, states = self.forward_one_step(ys, torch.empty(0), xs, cache=None)
return logp, None
| 6,090 | 32.467033 | 87 | py |
espnet | espnet-master/espnet2/lm/espnet_model.py | from typing import Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from typeguard import check_argument_types
from espnet2.lm.abs_model import AbsLM
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet2.train.abs_espnet_model import AbsESPnetModel
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
class ESPnetLanguageModel(AbsESPnetModel):
def __init__(self, lm: AbsLM, vocab_size: int, ignore_id: int = 0):
assert check_argument_types()
super().__init__()
self.lm = lm
self.sos = vocab_size - 1
self.eos = vocab_size - 1
# ignore_id may be assumed as 0, shared with CTC-blank symbol for ASR.
self.ignore_id = ignore_id
def nll(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
max_length: Optional[int] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Compute negative log likelihood(nll)
Normally, this function is called in batchify_nll.
Args:
text: (Batch, Length)
text_lengths: (Batch,)
max_lengths: int
"""
batch_size = text.size(0)
# For data parallel
if max_length is None:
text = text[:, : text_lengths.max()]
else:
text = text[:, :max_length]
# 1. Create a sentence pair like '<sos> w1 w2 w3' and 'w1 w2 w3 <eos>'
# text: (Batch, Length) -> x, y: (Batch, Length + 1)
x = F.pad(text, [1, 0], "constant", self.eos)
t = F.pad(text, [0, 1], "constant", self.ignore_id)
for i, l in enumerate(text_lengths):
t[i, l] = self.sos
x_lengths = text_lengths + 1
# 2. Forward Language model
# x: (Batch, Length) -> y: (Batch, Length, NVocab)
y, _ = self.lm(x, None)
# 3. Calc negative log likelihood
# nll: (BxL,)
nll = F.cross_entropy(y.view(-1, y.shape[-1]), t.view(-1), reduction="none")
# nll: (BxL,) -> (BxL,)
if max_length is None:
nll.masked_fill_(make_pad_mask(x_lengths).to(nll.device).view(-1), 0.0)
else:
nll.masked_fill_(
make_pad_mask(x_lengths, maxlen=max_length + 1).to(nll.device).view(-1),
0.0,
)
# nll: (BxL,) -> (B, L)
nll = nll.view(batch_size, -1)
return nll, x_lengths
def batchify_nll(
self, text: torch.Tensor, text_lengths: torch.Tensor, batch_size: int = 100
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Compute negative log likelihood(nll) from transformer language model
To avoid OOM, this fuction seperate the input into batches.
Then call nll for each batch and combine and return results.
Args:
text: (Batch, Length)
text_lengths: (Batch,)
batch_size: int, samples each batch contain when computing nll,
you may change this to avoid OOM or increase
"""
total_num = text.size(0)
if total_num <= batch_size:
nll, x_lengths = self.nll(text, text_lengths)
else:
nlls = []
x_lengths = []
max_length = text_lengths.max()
start_idx = 0
while True:
end_idx = min(start_idx + batch_size, total_num)
batch_text = text[start_idx:end_idx, :]
batch_text_lengths = text_lengths[start_idx:end_idx]
# batch_nll: [B * T]
batch_nll, batch_x_lengths = self.nll(
batch_text, batch_text_lengths, max_length=max_length
)
nlls.append(batch_nll)
x_lengths.append(batch_x_lengths)
start_idx = end_idx
if start_idx == total_num:
break
nll = torch.cat(nlls)
x_lengths = torch.cat(x_lengths)
assert nll.size(0) == total_num
assert x_lengths.size(0) == total_num
return nll, x_lengths
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
**kwargs,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
nll, y_lengths = self.nll(text, text_lengths)
ntokens = y_lengths.sum()
loss = nll.sum() / ntokens
stats = dict(loss=loss.detach())
# force_gatherable: to-device and to-tensor if scalar for DataParallel
loss, stats, weight = force_gatherable((loss, stats, ntokens), loss.device)
return loss, stats, weight
def collect_feats(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
**kwargs,
) -> Dict[str, torch.Tensor]:
return {}
| 4,785 | 34.191176 | 88 | py |
espnet | espnet-master/espnet2/lm/seq_rnn_lm.py | """Sequential implementation of Recurrent Neural Network Language Model."""
from typing import Tuple, Union
import torch
import torch.nn as nn
from typeguard import check_argument_types
from espnet2.lm.abs_model import AbsLM
class SequentialRNNLM(AbsLM):
"""Sequential RNNLM.
See also:
https://github.com/pytorch/examples/blob/4581968193699de14b56527296262dd76ab43557/word_language_model/model.py
"""
def __init__(
self,
vocab_size: int,
unit: int = 650,
nhid: int = None,
nlayers: int = 2,
dropout_rate: float = 0.0,
tie_weights: bool = False,
rnn_type: str = "lstm",
ignore_id: int = 0,
):
assert check_argument_types()
super().__init__()
ninp = unit
if nhid is None:
nhid = unit
rnn_type = rnn_type.upper()
self.drop = nn.Dropout(dropout_rate)
self.encoder = nn.Embedding(vocab_size, ninp, padding_idx=ignore_id)
if rnn_type in ["LSTM", "GRU"]:
rnn_class = getattr(nn, rnn_type)
self.rnn = rnn_class(
ninp, nhid, nlayers, dropout=dropout_rate, batch_first=True
)
else:
try:
nonlinearity = {"RNN_TANH": "tanh", "RNN_RELU": "relu"}[rnn_type]
except KeyError:
raise ValueError(
"""An invalid option for `--model` was supplied,
options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']"""
)
self.rnn = nn.RNN(
ninp,
nhid,
nlayers,
nonlinearity=nonlinearity,
dropout=dropout_rate,
batch_first=True,
)
self.decoder = nn.Linear(nhid, vocab_size)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models"
# (Press & Wolf 2016) https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers:
# A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
if nhid != ninp:
raise ValueError(
"When using the tied flag, nhid must be equal to emsize"
)
self.decoder.weight = self.encoder.weight
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
def zero_state(self):
"""Initialize LM state filled with zero values."""
if isinstance(self.rnn, torch.nn.LSTM):
h = torch.zeros((self.nlayers, self.nhid), dtype=torch.float)
c = torch.zeros((self.nlayers, self.nhid), dtype=torch.float)
state = h, c
else:
state = torch.zeros((self.nlayers, self.nhid), dtype=torch.float)
return state
def forward(
self, input: torch.Tensor, hidden: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
emb = self.drop(self.encoder(input))
output, hidden = self.rnn(emb, hidden)
output = self.drop(output)
decoded = self.decoder(
output.contiguous().view(output.size(0) * output.size(1), output.size(2))
)
return (
decoded.view(output.size(0), output.size(1), decoded.size(1)),
hidden,
)
def score(
self,
y: torch.Tensor,
state: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]],
x: torch.Tensor,
) -> Tuple[torch.Tensor, Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]]:
"""Score new token.
Args:
y: 1D torch.int64 prefix tokens.
state: Scorer state for prefix tokens
x: 2D encoder feature that generates ys.
Returns:
Tuple of
torch.float32 scores for next token (n_vocab)
and next state for ys
"""
y, new_state = self(y[-1].view(1, 1), state)
logp = y.log_softmax(dim=-1).view(-1)
return logp, new_state
def batch_score(
self, ys: torch.Tensor, states: torch.Tensor, xs: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Score new token batch.
Args:
ys (torch.Tensor): torch.int64 prefix tokens (n_batch, ylen).
states (List[Any]): Scorer states for prefix tokens.
xs (torch.Tensor):
The encoder feature that generates ys (n_batch, xlen, n_feat).
Returns:
tuple[torch.Tensor, List[Any]]: Tuple of
batchfied scores for next token with shape of `(n_batch, n_vocab)`
and next state list for ys.
"""
if states[0] is None:
states = None
elif isinstance(self.rnn, torch.nn.LSTM):
# states: Batch x 2 x (Nlayers, Dim) -> 2 x (Nlayers, Batch, Dim)
h = torch.stack([h for h, c in states], dim=1)
c = torch.stack([c for h, c in states], dim=1)
states = h, c
else:
# states: Batch x (Nlayers, Dim) -> (Nlayers, Batch, Dim)
states = torch.stack(states, dim=1)
ys, states = self(ys[:, -1:], states)
# ys: (Batch, 1, Nvocab) -> (Batch, NVocab)
assert ys.size(1) == 1, ys.shape
ys = ys.squeeze(1)
logp = ys.log_softmax(dim=-1)
# state: Change to batch first
if isinstance(self.rnn, torch.nn.LSTM):
# h, c: (Nlayers, Batch, Dim)
h, c = states
# states: Batch x 2 x (Nlayers, Dim)
states = [(h[:, i], c[:, i]) for i in range(h.size(1))]
else:
# states: (Nlayers, Batch, Dim) -> Batch x (Nlayers, Dim)
states = [states[:, i] for i in range(states.size(1))]
return logp, states
| 5,887 | 32.83908 | 118 | py |
espnet | espnet-master/espnet2/lm/transformer_lm.py | from typing import Any, List, Tuple
import torch
import torch.nn as nn
from espnet2.lm.abs_model import AbsLM
from espnet.nets.pytorch_backend.transformer.embedding import PositionalEncoding
from espnet.nets.pytorch_backend.transformer.encoder import Encoder
from espnet.nets.pytorch_backend.transformer.mask import subsequent_mask
class TransformerLM(AbsLM):
def __init__(
self,
vocab_size: int,
pos_enc: str = None,
embed_unit: int = 128,
att_unit: int = 256,
head: int = 2,
unit: int = 1024,
layer: int = 4,
dropout_rate: float = 0.5,
):
super().__init__()
if pos_enc == "sinusoidal":
pos_enc_class = PositionalEncoding
elif pos_enc is None:
def pos_enc_class(*args, **kwargs):
return nn.Sequential() # indentity
else:
raise ValueError(f"unknown pos-enc option: {pos_enc}")
self.embed = nn.Embedding(vocab_size, embed_unit)
self.encoder = Encoder(
idim=embed_unit,
attention_dim=att_unit,
attention_heads=head,
linear_units=unit,
num_blocks=layer,
dropout_rate=dropout_rate,
input_layer="linear",
pos_enc_class=pos_enc_class,
)
self.decoder = nn.Linear(att_unit, vocab_size)
def _target_mask(self, ys_in_pad):
ys_mask = ys_in_pad != 0
m = subsequent_mask(ys_mask.size(-1), device=ys_mask.device).unsqueeze(0)
return ys_mask.unsqueeze(-2) & m
def forward(self, input: torch.Tensor, hidden: None) -> Tuple[torch.Tensor, None]:
"""Compute LM loss value from buffer sequences.
Args:
input (torch.Tensor): Input ids. (batch, len)
hidden (torch.Tensor): Target ids. (batch, len)
"""
x = self.embed(input)
mask = self._target_mask(input)
h, _ = self.encoder(x, mask)
y = self.decoder(h)
return y, None
def score(
self, y: torch.Tensor, state: Any, x: torch.Tensor
) -> Tuple[torch.Tensor, Any]:
"""Score new token.
Args:
y (torch.Tensor): 1D torch.int64 prefix tokens.
state: Scorer state for prefix tokens
x (torch.Tensor): encoder feature that generates ys.
Returns:
tuple[torch.Tensor, Any]: Tuple of
torch.float32 scores for next token (vocab_size)
and next state for ys
"""
y = y.unsqueeze(0)
h, _, cache = self.encoder.forward_one_step(
self.embed(y), self._target_mask(y), cache=state
)
h = self.decoder(h[:, -1])
logp = h.log_softmax(dim=-1).squeeze(0)
return logp, cache
def batch_score(
self, ys: torch.Tensor, states: List[Any], xs: torch.Tensor
) -> Tuple[torch.Tensor, List[Any]]:
"""Score new token batch.
Args:
ys (torch.Tensor): torch.int64 prefix tokens (n_batch, ylen).
states (List[Any]): Scorer states for prefix tokens.
xs (torch.Tensor):
The encoder feature that generates ys (n_batch, xlen, n_feat).
Returns:
tuple[torch.Tensor, List[Any]]: Tuple of
batchfied scores for next token with shape of `(n_batch, vocab_size)`
and next state list for ys.
"""
# merge states
n_batch = len(ys)
n_layers = len(self.encoder.encoders)
if states[0] is None:
batch_state = None
else:
# transpose state of [batch, layer] into [layer, batch]
batch_state = [
torch.stack([states[b][i] for b in range(n_batch)])
for i in range(n_layers)
]
# batch decoding
h, _, states = self.encoder.forward_one_step(
self.embed(ys), self._target_mask(ys), cache=batch_state
)
h = self.decoder(h[:, -1])
logp = h.log_softmax(dim=-1)
# transpose state of [layer, batch] into [batch, layer]
state_list = [[states[i][b] for i in range(n_layers)] for b in range(n_batch)]
return logp, state_list
| 4,239 | 31.615385 | 86 | py |
espnet | espnet-master/espnet2/lm/abs_model.py | from abc import ABC, abstractmethod
from typing import Tuple
import torch
from espnet.nets.scorer_interface import BatchScorerInterface
class AbsLM(torch.nn.Module, BatchScorerInterface, ABC):
"""The abstract LM class
To share the loss calculation way among different models,
We uses delegate pattern here:
The instance of this class should be passed to "LanguageModel"
>>> from espnet2.lm.abs_model import AbsLM
>>> lm = AbsLM()
>>> model = LanguageESPnetModel(lm=lm)
This "model" is one of mediator objects for "Task" class.
"""
@abstractmethod
def forward(
self, input: torch.Tensor, hidden: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
raise NotImplementedError
| 747 | 24.793103 | 66 | py |
espnet | espnet-master/espnet2/iterators/sequence_iter_factory.py | import random
from functools import partial
from typing import Any, Sequence, Union
import numpy as np
from torch.utils.data import DataLoader
from typeguard import check_argument_types
from espnet2.iterators.abs_iter_factory import AbsIterFactory
from espnet2.samplers.abs_sampler import AbsSampler
def worker_init_fn(worker_id, base_seed=0):
"""Set random seed for each worker in DataLoader."""
seed = base_seed + worker_id
random.seed(seed)
np.random.seed(seed)
class RawSampler(AbsSampler):
def __init__(self, batches):
self.batches = batches
def __len__(self):
return len(self.batches)
def __iter__(self):
return iter(self.batches)
def generate(self, seed):
return list(self.batches)
class SequenceIterFactory(AbsIterFactory):
"""Build iterator for each epoch.
This class simply creates pytorch DataLoader except for the following points:
- The random seed is decided according to the number of epochs. This feature
guarantees reproducibility when resuming from middle of training process.
- Enable to restrict the number of samples for one epoch. This features
controls the interval number between training and evaluation.
"""
def __init__(
self,
dataset,
batches: Union[AbsSampler, Sequence[Sequence[Any]]],
num_iters_per_epoch: int = None,
seed: int = 0,
shuffle: bool = False,
num_workers: int = 0,
collate_fn=None,
pin_memory: bool = False,
):
assert check_argument_types()
if not isinstance(batches, AbsSampler):
self.sampler = RawSampler(batches)
else:
self.sampler = batches
self.dataset = dataset
self.num_iters_per_epoch = num_iters_per_epoch
self.shuffle = shuffle
self.seed = seed
self.num_workers = num_workers
self.collate_fn = collate_fn
# https://discuss.pytorch.org/t/what-is-the-disadvantage-of-using-pin-memory/1702
self.pin_memory = pin_memory
def build_iter(self, epoch: int, shuffle: bool = None) -> DataLoader:
if shuffle is None:
shuffle = self.shuffle
if self.num_iters_per_epoch is not None:
N = len(self.sampler)
# If corpus size is larger than the num_per_epoch
if self.num_iters_per_epoch < N:
N = len(self.sampler)
real_epoch, offset = divmod(self.num_iters_per_epoch * epoch, N)
if offset >= self.num_iters_per_epoch:
current_batches = self.sampler.generate(real_epoch + self.seed)
if shuffle:
np.random.RandomState(real_epoch + self.seed).shuffle(
current_batches
)
batches = current_batches[
offset - self.num_iters_per_epoch : offset
]
else:
prev_batches = self.sampler.generate(real_epoch - 1 + self.seed)
current_batches = self.sampler.generate(real_epoch + self.seed)
if shuffle:
np.random.RandomState(real_epoch - 1 + self.seed).shuffle(
prev_batches
)
np.random.RandomState(real_epoch + self.seed).shuffle(
current_batches
)
batches = (
prev_batches[offset - self.num_iters_per_epoch :]
+ current_batches[:offset]
)
# If corpus size is less than the num_per_epoch
else:
_epoch, _cursor = divmod(self.num_iters_per_epoch * (epoch - 1), N)
_remain = self.num_iters_per_epoch
batches = []
current_batches = self.sampler.generate(_epoch + self.seed)
if shuffle:
np.random.RandomState(_epoch + self.seed).shuffle(current_batches)
while _remain > 0:
_batches = current_batches[_cursor : _cursor + _remain]
batches += _batches
if _cursor + _remain >= N:
_epoch += 1
_cursor = 0
current_batches = self.sampler.generate(_epoch + self.seed)
if shuffle:
np.random.RandomState(_epoch + self.seed).shuffle(
current_batches
)
else:
_cursor = _cursor + _remain
_remain -= len(_batches)
assert len(batches) == self.num_iters_per_epoch
else:
batches = self.sampler.generate(epoch + self.seed)
if shuffle:
np.random.RandomState(epoch + self.seed).shuffle(batches)
# For backward compatibility for pytorch DataLoader
if self.collate_fn is not None:
kwargs = dict(collate_fn=self.collate_fn)
else:
kwargs = {}
return DataLoader(
dataset=self.dataset,
batch_sampler=batches,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
worker_init_fn=partial(worker_init_fn, base_seed=epoch + self.seed),
**kwargs,
)
| 5,509 | 35.490066 | 89 | py |
espnet | espnet-master/espnet2/iterators/chunk_iter_factory.py | import logging
import re
from collections import defaultdict
from typing import Any, Dict, Iterator, List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from typeguard import check_argument_types
from espnet2.iterators.abs_iter_factory import AbsIterFactory
from espnet2.iterators.sequence_iter_factory import SequenceIterFactory
from espnet2.samplers.abs_sampler import AbsSampler
class ChunkIterFactory(AbsIterFactory):
"""Creates chunks from a sequence
Examples:
>>> batches = [["id1"], ["id2"], ...]
>>> batch_size = 128
>>> chunk_length = 1000
>>> iter_factory = ChunkIterFactory(dataset, batches, batch_size, chunk_length)
>>> it = iter_factory.build_iter(epoch)
>>> for ids, batch in it:
... ...
- The number of mini-batches are varied in each epochs and
we can't get the number in advance
because IterFactory doesn't be given to the length information.
- Since the first reason, "num_iters_per_epoch" can't be implemented
for this iterator. Instead of it, "num_samples_per_epoch" is implemented.
"""
def __init__(
self,
dataset,
batch_size: int,
batches: Union[AbsSampler, Sequence[Sequence[Any]]],
chunk_length: Union[int, str],
chunk_shift_ratio: float = 0.5,
num_cache_chunks: int = 1024,
num_samples_per_epoch: Optional[int] = None,
seed: int = 0,
shuffle: bool = False,
num_workers: int = 0,
collate_fn=None,
pin_memory: bool = False,
excluded_key_prefixes: Optional[List[str]] = None,
):
assert check_argument_types()
assert all(len(x) == 1 for x in batches), "batch-size must be 1"
self.per_sample_iter_factory = SequenceIterFactory(
dataset=dataset,
batches=batches,
num_iters_per_epoch=num_samples_per_epoch,
seed=seed,
shuffle=shuffle,
num_workers=num_workers,
collate_fn=collate_fn,
pin_memory=pin_memory,
)
self.num_cache_chunks = max(num_cache_chunks, batch_size)
if isinstance(chunk_length, str):
if len(chunk_length) == 0:
raise ValueError("e.g. 5,8 or 3-5: but got empty string")
self.chunk_lengths = []
for x in chunk_length.split(","):
try:
sps = list(map(int, x.split("-")))
except ValueError:
raise ValueError(f"e.g. 5,8 or 3-5: but got {chunk_length}")
if len(sps) > 2:
raise ValueError(f"e.g. 5,8 or 3-5: but got {chunk_length}")
elif len(sps) == 2:
# Append all numbers between the range into the candidates
self.chunk_lengths += list(range(sps[0], sps[1] + 1))
else:
self.chunk_lengths += [sps[0]]
else:
# Single candidates: Fixed chunk length
self.chunk_lengths = [chunk_length]
self.chunk_shift_ratio = chunk_shift_ratio
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
# keys that satisfy either condition below will be excluded from the length
# consistency check:
# - exactly match one of the prefixes in `excluded_key_prefixes`
# - have one of the prefixes in `excluded_key_prefixes` and end with numbers
if excluded_key_prefixes is None:
excluded_key_prefixes = ["utt2category"]
elif "utt2category" not in excluded_key_prefixes:
excluded_key_prefixes = excluded_key_prefixes + ["utt2category"]
self.excluded_key_pattern = (
"(" + "[0-9]*)|(".join(excluded_key_prefixes) + "[0-9]*)"
)
if self.excluded_key_pattern:
logging.info(
f"Data keys with the following patterns will be excluded from the "
f"length consistency check:\n{self.excluded_key_pattern}"
)
def build_iter(
self,
epoch: int,
shuffle: Optional[bool] = None,
) -> Iterator[Tuple[List[str], Dict[str, torch.Tensor]]]:
per_sample_loader = self.per_sample_iter_factory.build_iter(epoch, shuffle)
if shuffle is None:
shuffle = self.shuffle
state = np.random.RandomState(epoch + self.seed)
# NOTE(kamo):
# This iterator supports multiple chunk lengths and
# keep chunks for each lengths here until collecting specified numbers
cache_chunks_dict = defaultdict(dict)
cache_id_list_dict = defaultdict(dict)
for ids, batch in per_sample_loader:
# Must be per-sample-loader
assert len(ids) == 1, f"Must be per-sample-loader: {len(ids)}"
assert all(len(x) == 1 for x in batch.values())
# Get keys of sequence data
sequence_keys = []
for key in batch:
if key + "_lengths" in batch:
sequence_keys.append(key)
# Remove lengths data and get the first sample
batch = {k: v[0] for k, v in batch.items() if not k.endswith("_lengths")}
id_ = ids[0]
for key in sequence_keys:
if self.excluded_key_pattern is not None and re.fullmatch(
self.excluded_key_pattern, key
):
# ignore length inconsistency for `excluded_key_prefixes`
continue
if len(batch[key]) != len(batch[sequence_keys[0]]):
raise RuntimeError(
f"All sequences must has same length: "
f"{len(batch[key])} != {len(batch[sequence_keys[0]])}"
)
L = len(batch[sequence_keys[0]])
# Select chunk length
chunk_lengths = [lg for lg in self.chunk_lengths if lg < L]
if len(chunk_lengths) == 0:
logging.warning(
f"The length of '{id_}' is {L}, but it is shorter than "
f"any candidates of chunk-length: {self.chunk_lengths}"
)
continue
# Convert numpy array to number
category = (
batch.get("utt2category", torch.LongTensor([0]))
.type(torch.int64)
.item()
)
W = int(state.choice(chunk_lengths, 1))
cache_id_list = cache_id_list_dict[category].setdefault(W, [])
cache_chunks = cache_chunks_dict[category].setdefault(W, {})
# Shift width to the next chunk
S = int(W * self.chunk_shift_ratio)
# Number of chunks
N = (L - W) // S + 1
if shuffle:
Z = state.randint(0, (L - W) % S + 1)
else:
Z = 0
# Split a sequence into chunks.
# Note that the marginal frames divided by chunk length are discarded
for k, v in batch.items():
if k not in cache_chunks:
cache_chunks[k] = []
if k in sequence_keys:
# Shift chunks with overlapped length for data augmentation
if self.excluded_key_pattern is not None and re.fullmatch(
self.excluded_key_pattern, k
):
for _ in range(N):
cache_chunks[k].append(v)
else:
cache_chunks[k] += [
v[Z + i * S : Z + i * S + W] for i in range(N)
]
else:
# If not sequence, use whole data instead of chunk
cache_chunks[k] += [v for _ in range(N)]
cache_id_list += [id_ for _ in range(N)]
if len(cache_id_list) > self.num_cache_chunks:
cache_id_list, cache_chunks = yield from self._generate_mini_batches(
cache_id_list,
cache_chunks,
shuffle,
state,
)
cache_id_list_dict[category][W] = cache_id_list
cache_chunks_dict[category][W] = cache_chunks
else:
for category in cache_id_list_dict.keys():
for W in cache_id_list_dict[category]:
cache_id_list = cache_id_list_dict[category].setdefault(W, [])
cache_chunks = cache_chunks_dict[category].setdefault(W, {})
yield from self._generate_mini_batches(
cache_id_list,
cache_chunks,
shuffle,
state,
)
def _generate_mini_batches(
self,
id_list: List[str],
batches: Dict[str, List[torch.Tensor]],
shuffle: bool,
state: np.random.RandomState,
):
if shuffle:
indices = np.arange(0, len(id_list))
state.shuffle(indices)
batches = {k: [v[i] for i in indices] for k, v in batches.items()}
id_list = [id_list[i] for i in indices]
bs = self.batch_size
while len(id_list) >= bs:
# Make mini-batch and yield
yield (
id_list[:bs],
{k: torch.stack(v[:bs], 0) for k, v in batches.items()},
)
id_list = id_list[bs:]
batches = {k: v[bs:] for k, v in batches.items()}
return id_list, batches
| 9,683 | 37.581673 | 87 | py |
espnet | espnet-master/espnet2/tts/espnet_model.py | # Copyright 2020 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Text-to-speech ESPnet model."""
from contextlib import contextmanager
from typing import Dict, Optional, Tuple
import torch
from packaging.version import parse as V
from typeguard import check_argument_types
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.layers.inversible_interface import InversibleInterface
from espnet2.train.abs_espnet_model import AbsESPnetModel
from espnet2.tts.abs_tts import AbsTTS
from espnet2.tts.feats_extract.abs_feats_extract import AbsFeatsExtract
if V(torch.__version__) >= V("1.6.0"):
from torch.cuda.amp import autocast
else:
# Nothing to do if torch<1.6.0
@contextmanager
def autocast(enabled=True): # NOQA
yield
class ESPnetTTSModel(AbsESPnetModel):
"""ESPnet model for text-to-speech task."""
def __init__(
self,
feats_extract: Optional[AbsFeatsExtract],
pitch_extract: Optional[AbsFeatsExtract],
energy_extract: Optional[AbsFeatsExtract],
normalize: Optional[AbsNormalize and InversibleInterface],
pitch_normalize: Optional[AbsNormalize and InversibleInterface],
energy_normalize: Optional[AbsNormalize and InversibleInterface],
tts: AbsTTS,
):
"""Initialize ESPnetTTSModel module."""
assert check_argument_types()
super().__init__()
self.feats_extract = feats_extract
self.pitch_extract = pitch_extract
self.energy_extract = energy_extract
self.normalize = normalize
self.pitch_normalize = pitch_normalize
self.energy_normalize = energy_normalize
self.tts = tts
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
durations: Optional[torch.Tensor] = None,
durations_lengths: Optional[torch.Tensor] = None,
pitch: Optional[torch.Tensor] = None,
pitch_lengths: Optional[torch.Tensor] = None,
energy: Optional[torch.Tensor] = None,
energy_lengths: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
**kwargs,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Caclualte outputs and return the loss tensor.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B,).
duration (Optional[Tensor]): Duration tensor.
duration_lengths (Optional[Tensor]): Duration length tensor (B,).
pitch (Optional[Tensor]): Pitch tensor.
pitch_lengths (Optional[Tensor]): Pitch length tensor (B,).
energy (Optional[Tensor]): Energy tensor.
energy_lengths (Optional[Tensor]): Energy length tensor (B,).
spembs (Optional[Tensor]): Speaker embedding tensor (B, D).
sids (Optional[Tensor]): Speaker ID tensor (B, 1).
lids (Optional[Tensor]): Language ID tensor (B, 1).
kwargs: "utt_id" is among the input.
Returns:
Tensor: Loss scalar tensor.
Dict[str, float]: Statistics to be monitored.
Tensor: Weight tensor to summarize losses.
"""
with autocast(False):
# Extract features
if self.feats_extract is not None:
feats, feats_lengths = self.feats_extract(speech, speech_lengths)
else:
# Use precalculated feats (feats_type != raw case)
feats, feats_lengths = speech, speech_lengths
# Extract auxiliary features
if self.pitch_extract is not None and pitch is None:
pitch, pitch_lengths = self.pitch_extract(
speech,
speech_lengths,
feats_lengths=feats_lengths,
durations=durations,
durations_lengths=durations_lengths,
)
if self.energy_extract is not None and energy is None:
energy, energy_lengths = self.energy_extract(
speech,
speech_lengths,
feats_lengths=feats_lengths,
durations=durations,
durations_lengths=durations_lengths,
)
# Normalize
if self.normalize is not None:
feats, feats_lengths = self.normalize(feats, feats_lengths)
if self.pitch_normalize is not None:
pitch, pitch_lengths = self.pitch_normalize(pitch, pitch_lengths)
if self.energy_normalize is not None:
energy, energy_lengths = self.energy_normalize(energy, energy_lengths)
# Make batch for tts inputs
batch = dict(
text=text,
text_lengths=text_lengths,
feats=feats,
feats_lengths=feats_lengths,
)
# Update batch for additional auxiliary inputs
if spembs is not None:
batch.update(spembs=spembs)
if sids is not None:
batch.update(sids=sids)
if lids is not None:
batch.update(lids=lids)
if durations is not None:
batch.update(durations=durations, durations_lengths=durations_lengths)
if self.pitch_extract is not None and pitch is not None:
batch.update(pitch=pitch, pitch_lengths=pitch_lengths)
if self.energy_extract is not None and energy is not None:
batch.update(energy=energy, energy_lengths=energy_lengths)
if self.tts.require_raw_speech:
batch.update(speech=speech, speech_lengths=speech_lengths)
return self.tts(**batch)
def collect_feats(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
durations: Optional[torch.Tensor] = None,
durations_lengths: Optional[torch.Tensor] = None,
pitch: Optional[torch.Tensor] = None,
pitch_lengths: Optional[torch.Tensor] = None,
energy: Optional[torch.Tensor] = None,
energy_lengths: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
**kwargs,
) -> Dict[str, torch.Tensor]:
"""Caclualte features and return them as a dict.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B,).
durations (Optional[Tensor): Duration tensor.
durations_lengths (Optional[Tensor): Duration length tensor (B,).
pitch (Optional[Tensor): Pitch tensor.
pitch_lengths (Optional[Tensor): Pitch length tensor (B,).
energy (Optional[Tensor): Energy tensor.
energy_lengths (Optional[Tensor): Energy length tensor (B,).
spembs (Optional[Tensor]): Speaker embedding tensor (B, D).
sids (Optional[Tensor]): Speaker ID tensor (B, 1).
lids (Optional[Tensor]): Language ID tensor (B, 1).
Returns:
Dict[str, Tensor]: Dict of features.
"""
# feature extraction
if self.feats_extract is not None:
feats, feats_lengths = self.feats_extract(speech, speech_lengths)
else:
# Use precalculated feats (feats_type != raw case)
feats, feats_lengths = speech, speech_lengths
if self.pitch_extract is not None:
pitch, pitch_lengths = self.pitch_extract(
speech,
speech_lengths,
feats_lengths=feats_lengths,
durations=durations,
durations_lengths=durations_lengths,
)
if self.energy_extract is not None:
energy, energy_lengths = self.energy_extract(
speech,
speech_lengths,
feats_lengths=feats_lengths,
durations=durations,
durations_lengths=durations_lengths,
)
# store in dict
feats_dict = dict(feats=feats, feats_lengths=feats_lengths)
if pitch is not None:
feats_dict.update(pitch=pitch, pitch_lengths=pitch_lengths)
if energy is not None:
feats_dict.update(energy=energy, energy_lengths=energy_lengths)
return feats_dict
def inference(
self,
text: torch.Tensor,
speech: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
durations: Optional[torch.Tensor] = None,
pitch: Optional[torch.Tensor] = None,
energy: Optional[torch.Tensor] = None,
**decode_config,
) -> Dict[str, torch.Tensor]:
"""Caclualte features and return them as a dict.
Args:
text (Tensor): Text index tensor (T_text).
speech (Tensor): Speech waveform tensor (T_wav).
spembs (Optional[Tensor]): Speaker embedding tensor (D,).
sids (Optional[Tensor]): Speaker ID tensor (1,).
lids (Optional[Tensor]): Language ID tensor (1,).
durations (Optional[Tensor): Duration tensor.
pitch (Optional[Tensor): Pitch tensor.
energy (Optional[Tensor): Energy tensor.
Returns:
Dict[str, Tensor]: Dict of outputs.
"""
input_dict = dict(text=text)
if decode_config["use_teacher_forcing"] or getattr(self.tts, "use_gst", False):
if speech is None:
raise RuntimeError("missing required argument: 'speech'")
if self.feats_extract is not None:
feats = self.feats_extract(speech[None])[0][0]
else:
# Use precalculated feats (feats_type != raw case)
feats = speech
if self.normalize is not None:
feats = self.normalize(feats[None])[0][0]
input_dict.update(feats=feats)
if self.tts.require_raw_speech:
input_dict.update(speech=speech)
if decode_config["use_teacher_forcing"]:
if durations is not None:
input_dict.update(durations=durations)
if self.pitch_extract is not None:
pitch = self.pitch_extract(
speech[None],
feats_lengths=torch.LongTensor([len(feats)]),
durations=durations[None],
)[0][0]
if self.pitch_normalize is not None:
pitch = self.pitch_normalize(pitch[None])[0][0]
if pitch is not None:
input_dict.update(pitch=pitch)
if self.energy_extract is not None:
energy = self.energy_extract(
speech[None],
feats_lengths=torch.LongTensor([len(feats)]),
durations=durations[None],
)[0][0]
if self.energy_normalize is not None:
energy = self.energy_normalize(energy[None])[0][0]
if energy is not None:
input_dict.update(energy=energy)
if spembs is not None:
input_dict.update(spembs=spembs)
if sids is not None:
input_dict.update(sids=sids)
if lids is not None:
input_dict.update(lids=lids)
output_dict = self.tts.inference(**input_dict, **decode_config)
if self.normalize is not None and output_dict.get("feat_gen") is not None:
# NOTE: normalize.inverse is in-place operation
feat_gen_denorm = self.normalize.inverse(
output_dict["feat_gen"].clone()[None]
)[0][0]
output_dict.update(feat_gen_denorm=feat_gen_denorm)
return output_dict
| 12,407 | 39.15534 | 87 | py |
espnet | espnet-master/espnet2/tts/abs_tts.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Text-to-speech abstrast class."""
from abc import ABC, abstractmethod
from typing import Dict, Tuple
import torch
class AbsTTS(torch.nn.Module, ABC):
"""TTS abstract class."""
@abstractmethod
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: torch.Tensor,
feats_lengths: torch.Tensor,
**kwargs,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Calculate outputs and return the loss tensor."""
raise NotImplementedError
@abstractmethod
def inference(
self,
text: torch.Tensor,
**kwargs,
) -> Dict[str, torch.Tensor]:
"""Return predicted output as a dict."""
raise NotImplementedError
@property
def require_raw_speech(self):
"""Return whether or not raw_speech is required."""
return False
@property
def require_vocoder(self):
"""Return whether or not vocoder is required."""
return True
| 1,113 | 23.755556 | 68 | py |
espnet | espnet-master/espnet2/tts/tacotron2/tacotron2.py | # Copyright 2020 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Tacotron 2 related modules for ESPnet2."""
import logging
from typing import Dict, Optional, Sequence, Tuple
import torch
import torch.nn.functional as F
from typeguard import check_argument_types
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet2.tts.abs_tts import AbsTTS
from espnet2.tts.gst.style_encoder import StyleEncoder
from espnet.nets.pytorch_backend.e2e_tts_tacotron2 import (
GuidedAttentionLoss,
Tacotron2Loss,
)
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
from espnet.nets.pytorch_backend.rnn.attentions import AttForward, AttForwardTA, AttLoc
from espnet.nets.pytorch_backend.tacotron2.decoder import Decoder
from espnet.nets.pytorch_backend.tacotron2.encoder import Encoder
class Tacotron2(AbsTTS):
"""Tacotron2 module for end-to-end text-to-speech.
This is a module of Spectrogram prediction network in Tacotron2 described
in `Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`_,
which converts the sequence of characters into the sequence of Mel-filterbanks.
.. _`Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`:
https://arxiv.org/abs/1712.05884
"""
def __init__(
self,
# network structure related
idim: int,
odim: int,
embed_dim: int = 512,
elayers: int = 1,
eunits: int = 512,
econv_layers: int = 3,
econv_chans: int = 512,
econv_filts: int = 5,
atype: str = "location",
adim: int = 512,
aconv_chans: int = 32,
aconv_filts: int = 15,
cumulate_att_w: bool = True,
dlayers: int = 2,
dunits: int = 1024,
prenet_layers: int = 2,
prenet_units: int = 256,
postnet_layers: int = 5,
postnet_chans: int = 512,
postnet_filts: int = 5,
output_activation: str = None,
use_batch_norm: bool = True,
use_concate: bool = True,
use_residual: bool = False,
reduction_factor: int = 1,
# extra embedding related
spks: Optional[int] = None,
langs: Optional[int] = None,
spk_embed_dim: Optional[int] = None,
spk_embed_integration_type: str = "concat",
use_gst: bool = False,
gst_tokens: int = 10,
gst_heads: int = 4,
gst_conv_layers: int = 6,
gst_conv_chans_list: Sequence[int] = (32, 32, 64, 64, 128, 128),
gst_conv_kernel_size: int = 3,
gst_conv_stride: int = 2,
gst_gru_layers: int = 1,
gst_gru_units: int = 128,
# training related
dropout_rate: float = 0.5,
zoneout_rate: float = 0.1,
use_masking: bool = True,
use_weighted_masking: bool = False,
bce_pos_weight: float = 5.0,
loss_type: str = "L1+L2",
use_guided_attn_loss: bool = True,
guided_attn_loss_sigma: float = 0.4,
guided_attn_loss_lambda: float = 1.0,
):
"""Initialize Tacotron2 module.
Args:
idim (int): Dimension of the inputs.
odim: (int) Dimension of the outputs.
embed_dim (int): Dimension of the token embedding.
elayers (int): Number of encoder blstm layers.
eunits (int): Number of encoder blstm units.
econv_layers (int): Number of encoder conv layers.
econv_filts (int): Number of encoder conv filter size.
econv_chans (int): Number of encoder conv filter channels.
dlayers (int): Number of decoder lstm layers.
dunits (int): Number of decoder lstm units.
prenet_layers (int): Number of prenet layers.
prenet_units (int): Number of prenet units.
postnet_layers (int): Number of postnet layers.
postnet_filts (int): Number of postnet filter size.
postnet_chans (int): Number of postnet filter channels.
output_activation (str): Name of activation function for outputs.
adim (int): Number of dimension of mlp in attention.
aconv_chans (int): Number of attention conv filter channels.
aconv_filts (int): Number of attention conv filter size.
cumulate_att_w (bool): Whether to cumulate previous attention weight.
use_batch_norm (bool): Whether to use batch normalization.
use_concate (bool): Whether to concat enc outputs w/ dec lstm outputs.
reduction_factor (int): Reduction factor.
spks (Optional[int]): Number of speakers. If set to > 1, assume that the
sids will be provided as the input and use sid embedding layer.
langs (Optional[int]): Number of languages. If set to > 1, assume that the
lids will be provided as the input and use sid embedding layer.
spk_embed_dim (Optional[int]): Speaker embedding dimension. If set to > 0,
assume that spembs will be provided as the input.
spk_embed_integration_type (str): How to integrate speaker embedding.
use_gst (str): Whether to use global style token.
gst_tokens (int): Number of GST embeddings.
gst_heads (int): Number of heads in GST multihead attention.
gst_conv_layers (int): Number of conv layers in GST.
gst_conv_chans_list: (Sequence[int]): List of the number of channels of conv
layers in GST.
gst_conv_kernel_size (int): Kernel size of conv layers in GST.
gst_conv_stride (int): Stride size of conv layers in GST.
gst_gru_layers (int): Number of GRU layers in GST.
gst_gru_units (int): Number of GRU units in GST.
dropout_rate (float): Dropout rate.
zoneout_rate (float): Zoneout rate.
use_masking (bool): Whether to mask padded part in loss calculation.
use_weighted_masking (bool): Whether to apply weighted masking in
loss calculation.
bce_pos_weight (float): Weight of positive sample of stop token
(only for use_masking=True).
loss_type (str): Loss function type ("L1", "L2", or "L1+L2").
use_guided_attn_loss (bool): Whether to use guided attention loss.
guided_attn_loss_sigma (float): Sigma in guided attention loss.
guided_attn_loss_lambda (float): Lambda in guided attention loss.
"""
assert check_argument_types()
super().__init__()
# store hyperparameters
self.idim = idim
self.odim = odim
self.eos = idim - 1
self.cumulate_att_w = cumulate_att_w
self.reduction_factor = reduction_factor
self.use_gst = use_gst
self.use_guided_attn_loss = use_guided_attn_loss
self.loss_type = loss_type
# define activation function for the final output
if output_activation is None:
self.output_activation_fn = None
elif hasattr(F, output_activation):
self.output_activation_fn = getattr(F, output_activation)
else:
raise ValueError(
f"there is no such an activation function. " f"({output_activation})"
)
# set padding idx
padding_idx = 0
self.padding_idx = padding_idx
# define network modules
self.enc = Encoder(
idim=idim,
embed_dim=embed_dim,
elayers=elayers,
eunits=eunits,
econv_layers=econv_layers,
econv_chans=econv_chans,
econv_filts=econv_filts,
use_batch_norm=use_batch_norm,
use_residual=use_residual,
dropout_rate=dropout_rate,
padding_idx=padding_idx,
)
if self.use_gst:
self.gst = StyleEncoder(
idim=odim, # the input is mel-spectrogram
gst_tokens=gst_tokens,
gst_token_dim=eunits,
gst_heads=gst_heads,
conv_layers=gst_conv_layers,
conv_chans_list=gst_conv_chans_list,
conv_kernel_size=gst_conv_kernel_size,
conv_stride=gst_conv_stride,
gru_layers=gst_gru_layers,
gru_units=gst_gru_units,
)
self.spks = None
if spks is not None and spks > 1:
self.spks = spks
self.sid_emb = torch.nn.Embedding(spks, eunits)
self.langs = None
if langs is not None and langs > 1:
self.langs = langs
self.lid_emb = torch.nn.Embedding(langs, eunits)
self.spk_embed_dim = None
if spk_embed_dim is not None and spk_embed_dim > 0:
self.spk_embed_dim = spk_embed_dim
self.spk_embed_integration_type = spk_embed_integration_type
if self.spk_embed_dim is None:
dec_idim = eunits
elif self.spk_embed_integration_type == "concat":
dec_idim = eunits + spk_embed_dim
elif self.spk_embed_integration_type == "add":
dec_idim = eunits
self.projection = torch.nn.Linear(self.spk_embed_dim, eunits)
else:
raise ValueError(f"{spk_embed_integration_type} is not supported.")
if atype == "location":
att = AttLoc(dec_idim, dunits, adim, aconv_chans, aconv_filts)
elif atype == "forward":
att = AttForward(dec_idim, dunits, adim, aconv_chans, aconv_filts)
if self.cumulate_att_w:
logging.warning(
"cumulation of attention weights is disabled "
"in forward attention."
)
self.cumulate_att_w = False
elif atype == "forward_ta":
att = AttForwardTA(dec_idim, dunits, adim, aconv_chans, aconv_filts, odim)
if self.cumulate_att_w:
logging.warning(
"cumulation of attention weights is disabled "
"in forward attention."
)
self.cumulate_att_w = False
else:
raise NotImplementedError("Support only location or forward")
self.dec = Decoder(
idim=dec_idim,
odim=odim,
att=att,
dlayers=dlayers,
dunits=dunits,
prenet_layers=prenet_layers,
prenet_units=prenet_units,
postnet_layers=postnet_layers,
postnet_chans=postnet_chans,
postnet_filts=postnet_filts,
output_activation_fn=self.output_activation_fn,
cumulate_att_w=self.cumulate_att_w,
use_batch_norm=use_batch_norm,
use_concate=use_concate,
dropout_rate=dropout_rate,
zoneout_rate=zoneout_rate,
reduction_factor=reduction_factor,
)
self.taco2_loss = Tacotron2Loss(
use_masking=use_masking,
use_weighted_masking=use_weighted_masking,
bce_pos_weight=bce_pos_weight,
)
if self.use_guided_attn_loss:
self.attn_loss = GuidedAttentionLoss(
sigma=guided_attn_loss_sigma,
alpha=guided_attn_loss_lambda,
)
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: torch.Tensor,
feats_lengths: torch.Tensor,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
joint_training: bool = False,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Calculate forward propagation.
Args:
text (LongTensor): Batch of padded character ids (B, T_text).
text_lengths (LongTensor): Batch of lengths of each input batch (B,).
feats (Tensor): Batch of padded target features (B, T_feats, odim).
feats_lengths (LongTensor): Batch of the lengths of each target (B,).
spembs (Optional[Tensor]): Batch of speaker embeddings (B, spk_embed_dim).
sids (Optional[Tensor]): Batch of speaker IDs (B, 1).
lids (Optional[Tensor]): Batch of language IDs (B, 1).
joint_training (bool): Whether to perform joint training with vocoder.
Returns:
Tensor: Loss scalar value.
Dict: Statistics to be monitored.
Tensor: Weight value if not joint training else model outputs.
"""
text = text[:, : text_lengths.max()] # for data-parallel
feats = feats[:, : feats_lengths.max()] # for data-parallel
batch_size = text.size(0)
# Add eos at the last of sequence
xs = F.pad(text, [0, 1], "constant", self.padding_idx)
for i, l in enumerate(text_lengths):
xs[i, l] = self.eos
ilens = text_lengths + 1
ys = feats
olens = feats_lengths
# make labels for stop prediction
labels = make_pad_mask(olens - 1).to(ys.device, ys.dtype)
labels = F.pad(labels, [0, 1], "constant", 1.0)
# calculate tacotron2 outputs
after_outs, before_outs, logits, att_ws = self._forward(
xs=xs,
ilens=ilens,
ys=ys,
olens=olens,
spembs=spembs,
sids=sids,
lids=lids,
)
# modify mod part of groundtruth
if self.reduction_factor > 1:
assert olens.ge(
self.reduction_factor
).all(), "Output length must be greater than or equal to reduction factor."
olens = olens.new([olen - olen % self.reduction_factor for olen in olens])
max_out = max(olens)
ys = ys[:, :max_out]
labels = labels[:, :max_out]
labels = torch.scatter(
labels, 1, (olens - 1).unsqueeze(1), 1.0
) # see #3388
# calculate taco2 loss
l1_loss, mse_loss, bce_loss = self.taco2_loss(
after_outs, before_outs, logits, ys, labels, olens
)
if self.loss_type == "L1+L2":
loss = l1_loss + mse_loss + bce_loss
elif self.loss_type == "L1":
loss = l1_loss + bce_loss
elif self.loss_type == "L2":
loss = mse_loss + bce_loss
else:
raise ValueError(f"unknown --loss-type {self.loss_type}")
stats = dict(
l1_loss=l1_loss.item(),
mse_loss=mse_loss.item(),
bce_loss=bce_loss.item(),
)
# calculate attention loss
if self.use_guided_attn_loss:
# NOTE(kan-bayashi): length of output for auto-regressive
# input will be changed when r > 1
if self.reduction_factor > 1:
olens_in = olens.new([olen // self.reduction_factor for olen in olens])
else:
olens_in = olens
attn_loss = self.attn_loss(att_ws, ilens, olens_in)
loss = loss + attn_loss
stats.update(attn_loss=attn_loss.item())
if not joint_training:
stats.update(loss=loss.item())
loss, stats, weight = force_gatherable(
(loss, stats, batch_size), loss.device
)
return loss, stats, weight
else:
return loss, stats, after_outs
def _forward(
self,
xs: torch.Tensor,
ilens: torch.Tensor,
ys: torch.Tensor,
olens: torch.Tensor,
spembs: torch.Tensor,
sids: torch.Tensor,
lids: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
hs, hlens = self.enc(xs, ilens)
if self.use_gst:
style_embs = self.gst(ys)
hs = hs + style_embs.unsqueeze(1)
if self.spks is not None:
sid_embs = self.sid_emb(sids.view(-1))
hs = hs + sid_embs.unsqueeze(1)
if self.langs is not None:
lid_embs = self.lid_emb(lids.view(-1))
hs = hs + lid_embs.unsqueeze(1)
if self.spk_embed_dim is not None:
hs = self._integrate_with_spk_embed(hs, spembs)
return self.dec(hs, hlens, ys)
def inference(
self,
text: torch.Tensor,
feats: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
threshold: float = 0.5,
minlenratio: float = 0.0,
maxlenratio: float = 10.0,
use_att_constraint: bool = False,
backward_window: int = 1,
forward_window: int = 3,
use_teacher_forcing: bool = False,
) -> Dict[str, torch.Tensor]:
"""Generate the sequence of features given the sequences of characters.
Args:
text (LongTensor): Input sequence of characters (T_text,).
feats (Optional[Tensor]): Feature sequence to extract style (N, idim).
spembs (Optional[Tensor]): Speaker embedding (spk_embed_dim,).
sids (Optional[Tensor]): Speaker ID (1,).
lids (Optional[Tensor]): Language ID (1,).
threshold (float): Threshold in inference.
minlenratio (float): Minimum length ratio in inference.
maxlenratio (float): Maximum length ratio in inference.
use_att_constraint (bool): Whether to apply attention constraint.
backward_window (int): Backward window in attention constraint.
forward_window (int): Forward window in attention constraint.
use_teacher_forcing (bool): Whether to use teacher forcing.
Returns:
Dict[str, Tensor]: Output dict including the following items:
* feat_gen (Tensor): Output sequence of features (T_feats, odim).
* prob (Tensor): Output sequence of stop probabilities (T_feats,).
* att_w (Tensor): Attention weights (T_feats, T).
"""
x = text
y = feats
spemb = spembs
# add eos at the last of sequence
x = F.pad(x, [0, 1], "constant", self.eos)
# inference with teacher forcing
if use_teacher_forcing:
assert feats is not None, "feats must be provided with teacher forcing."
xs, ys = x.unsqueeze(0), y.unsqueeze(0)
spembs = None if spemb is None else spemb.unsqueeze(0)
ilens = x.new_tensor([xs.size(1)]).long()
olens = y.new_tensor([ys.size(1)]).long()
outs, _, _, att_ws = self._forward(
xs=xs,
ilens=ilens,
ys=ys,
olens=olens,
spembs=spembs,
sids=sids,
lids=lids,
)
return dict(feat_gen=outs[0], att_w=att_ws[0])
# inference
h = self.enc.inference(x)
if self.use_gst:
style_emb = self.gst(y.unsqueeze(0))
h = h + style_emb
if self.spks is not None:
sid_emb = self.sid_emb(sids.view(-1))
h = h + sid_emb
if self.langs is not None:
lid_emb = self.lid_emb(lids.view(-1))
h = h + lid_emb
if self.spk_embed_dim is not None:
hs, spembs = h.unsqueeze(0), spemb.unsqueeze(0)
h = self._integrate_with_spk_embed(hs, spembs)[0]
out, prob, att_w = self.dec.inference(
h,
threshold=threshold,
minlenratio=minlenratio,
maxlenratio=maxlenratio,
use_att_constraint=use_att_constraint,
backward_window=backward_window,
forward_window=forward_window,
)
return dict(feat_gen=out, prob=prob, att_w=att_w)
def _integrate_with_spk_embed(
self, hs: torch.Tensor, spembs: torch.Tensor
) -> torch.Tensor:
"""Integrate speaker embedding with hidden states.
Args:
hs (Tensor): Batch of hidden state sequences (B, Tmax, eunits).
spembs (Tensor): Batch of speaker embeddings (B, spk_embed_dim).
Returns:
Tensor: Batch of integrated hidden state sequences (B, Tmax, eunits) if
integration_type is "add" else (B, Tmax, eunits + spk_embed_dim).
"""
if self.spk_embed_integration_type == "add":
# apply projection and then add to hidden states
spembs = self.projection(F.normalize(spembs))
hs = hs + spembs.unsqueeze(1)
elif self.spk_embed_integration_type == "concat":
# concat hidden states with spk embeds
spembs = F.normalize(spembs).unsqueeze(1).expand(-1, hs.size(1), -1)
hs = torch.cat([hs, spembs], dim=-1)
else:
raise NotImplementedError("support only add or concat.")
return hs
| 21,020 | 38.8125 | 88 | py |
espnet | espnet-master/espnet2/tts/fastspeech/fastspeech.py | # Copyright 2020 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Fastspeech related modules for ESPnet2."""
import logging
from typing import Dict, Optional, Sequence, Tuple
import torch
import torch.nn.functional as F
from typeguard import check_argument_types
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet2.torch_utils.initialize import initialize
from espnet2.tts.abs_tts import AbsTTS
from espnet2.tts.gst.style_encoder import StyleEncoder
from espnet.nets.pytorch_backend.conformer.encoder import Encoder as ConformerEncoder
from espnet.nets.pytorch_backend.e2e_tts_fastspeech import (
FeedForwardTransformerLoss as FastSpeechLoss,
)
from espnet.nets.pytorch_backend.fastspeech.duration_predictor import DurationPredictor
from espnet.nets.pytorch_backend.fastspeech.length_regulator import LengthRegulator
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask, make_pad_mask
from espnet.nets.pytorch_backend.tacotron2.decoder import Postnet
from espnet.nets.pytorch_backend.transformer.embedding import (
PositionalEncoding,
ScaledPositionalEncoding,
)
from espnet.nets.pytorch_backend.transformer.encoder import (
Encoder as TransformerEncoder,
)
class FastSpeech(AbsTTS):
"""FastSpeech module for end-to-end text-to-speech.
This is a module of FastSpeech, feed-forward Transformer with duration predictor
described in `FastSpeech: Fast, Robust and Controllable Text to Speech`_, which
does not require any auto-regressive processing during inference, resulting in
fast decoding compared with auto-regressive Transformer.
.. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:
https://arxiv.org/pdf/1905.09263.pdf
"""
def __init__(
self,
# network structure related
idim: int,
odim: int,
adim: int = 384,
aheads: int = 4,
elayers: int = 6,
eunits: int = 1536,
dlayers: int = 6,
dunits: int = 1536,
postnet_layers: int = 5,
postnet_chans: int = 512,
postnet_filts: int = 5,
postnet_dropout_rate: float = 0.5,
positionwise_layer_type: str = "conv1d",
positionwise_conv_kernel_size: int = 1,
use_scaled_pos_enc: bool = True,
use_batch_norm: bool = True,
encoder_normalize_before: bool = True,
decoder_normalize_before: bool = True,
encoder_concat_after: bool = False,
decoder_concat_after: bool = False,
duration_predictor_layers: int = 2,
duration_predictor_chans: int = 384,
duration_predictor_kernel_size: int = 3,
duration_predictor_dropout_rate: float = 0.1,
reduction_factor: int = 1,
encoder_type: str = "transformer",
decoder_type: str = "transformer",
transformer_enc_dropout_rate: float = 0.1,
transformer_enc_positional_dropout_rate: float = 0.1,
transformer_enc_attn_dropout_rate: float = 0.1,
transformer_dec_dropout_rate: float = 0.1,
transformer_dec_positional_dropout_rate: float = 0.1,
transformer_dec_attn_dropout_rate: float = 0.1,
# only for conformer
conformer_rel_pos_type: str = "legacy",
conformer_pos_enc_layer_type: str = "rel_pos",
conformer_self_attn_layer_type: str = "rel_selfattn",
conformer_activation_type: str = "swish",
use_macaron_style_in_conformer: bool = True,
use_cnn_in_conformer: bool = True,
conformer_enc_kernel_size: int = 7,
conformer_dec_kernel_size: int = 31,
zero_triu: bool = False,
# extra embedding related
spks: Optional[int] = None,
langs: Optional[int] = None,
spk_embed_dim: Optional[int] = None,
spk_embed_integration_type: str = "add",
use_gst: bool = False,
gst_tokens: int = 10,
gst_heads: int = 4,
gst_conv_layers: int = 6,
gst_conv_chans_list: Sequence[int] = (32, 32, 64, 64, 128, 128),
gst_conv_kernel_size: int = 3,
gst_conv_stride: int = 2,
gst_gru_layers: int = 1,
gst_gru_units: int = 128,
# training related
init_type: str = "xavier_uniform",
init_enc_alpha: float = 1.0,
init_dec_alpha: float = 1.0,
use_masking: bool = False,
use_weighted_masking: bool = False,
):
"""Initialize FastSpeech module.
Args:
idim (int): Dimension of the inputs.
odim (int): Dimension of the outputs.
elayers (int): Number of encoder layers.
eunits (int): Number of encoder hidden units.
dlayers (int): Number of decoder layers.
dunits (int): Number of decoder hidden units.
postnet_layers (int): Number of postnet layers.
postnet_chans (int): Number of postnet channels.
postnet_filts (int): Kernel size of postnet.
postnet_dropout_rate (float): Dropout rate in postnet.
use_scaled_pos_enc (bool): Whether to use trainable scaled pos encoding.
use_batch_norm (bool): Whether to use batch normalization in encoder prenet.
encoder_normalize_before (bool): Whether to apply layernorm layer before
encoder block.
decoder_normalize_before (bool): Whether to apply layernorm layer before
decoder block.
encoder_concat_after (bool): Whether to concatenate attention layer's input
and output in encoder.
decoder_concat_after (bool): Whether to concatenate attention layer's input
and output in decoder.
duration_predictor_layers (int): Number of duration predictor layers.
duration_predictor_chans (int): Number of duration predictor channels.
duration_predictor_kernel_size (int): Kernel size of duration predictor.
duration_predictor_dropout_rate (float): Dropout rate in duration predictor.
reduction_factor (int): Reduction factor.
encoder_type (str): Encoder type ("transformer" or "conformer").
decoder_type (str): Decoder type ("transformer" or "conformer").
transformer_enc_dropout_rate (float): Dropout rate in encoder except
attention and positional encoding.
transformer_enc_positional_dropout_rate (float): Dropout rate after encoder
positional encoding.
transformer_enc_attn_dropout_rate (float): Dropout rate in encoder
self-attention module.
transformer_dec_dropout_rate (float): Dropout rate in decoder except
attention & positional encoding.
transformer_dec_positional_dropout_rate (float): Dropout rate after decoder
positional encoding.
transformer_dec_attn_dropout_rate (float): Dropout rate in decoder
self-attention module.
conformer_rel_pos_type (str): Relative pos encoding type in conformer.
conformer_pos_enc_layer_type (str): Pos encoding layer type in conformer.
conformer_self_attn_layer_type (str): Self-attention layer type in conformer
conformer_activation_type (str): Activation function type in conformer.
use_macaron_style_in_conformer: Whether to use macaron style FFN.
use_cnn_in_conformer: Whether to use CNN in conformer.
conformer_enc_kernel_size: Kernel size of encoder conformer.
conformer_dec_kernel_size: Kernel size of decoder conformer.
zero_triu: Whether to use zero triu in relative self-attention module.
spks (Optional[int]): Number of speakers. If set to > 1, assume that the
sids will be provided as the input and use sid embedding layer.
langs (Optional[int]): Number of languages. If set to > 1, assume that the
lids will be provided as the input and use sid embedding layer.
spk_embed_dim (Optional[int]): Speaker embedding dimension. If set to > 0,
assume that spembs will be provided as the input.
spk_embed_integration_type: How to integrate speaker embedding.
use_gst (str): Whether to use global style token.
gst_tokens (int): The number of GST embeddings.
gst_heads (int): The number of heads in GST multihead attention.
gst_conv_layers (int): The number of conv layers in GST.
gst_conv_chans_list: (Sequence[int]):
List of the number of channels of conv layers in GST.
gst_conv_kernel_size (int): Kernel size of conv layers in GST.
gst_conv_stride (int): Stride size of conv layers in GST.
gst_gru_layers (int): The number of GRU layers in GST.
gst_gru_units (int): The number of GRU units in GST.
init_type (str): How to initialize transformer parameters.
init_enc_alpha (float): Initial value of alpha in scaled pos encoding of the
encoder.
init_dec_alpha (float): Initial value of alpha in scaled pos encoding of the
decoder.
use_masking (bool): Whether to apply masking for padded part in loss
calculation.
use_weighted_masking (bool): Whether to apply weighted masking in loss
calculation.
"""
assert check_argument_types()
super().__init__()
# store hyperparameters
self.idim = idim
self.odim = odim
self.eos = idim - 1
self.reduction_factor = reduction_factor
self.encoder_type = encoder_type
self.decoder_type = decoder_type
self.use_scaled_pos_enc = use_scaled_pos_enc
self.use_gst = use_gst
# use idx 0 as padding idx
self.padding_idx = 0
# get positional encoding class
pos_enc_class = (
ScaledPositionalEncoding if self.use_scaled_pos_enc else PositionalEncoding
)
# check relative positional encoding compatibility
if "conformer" in [encoder_type, decoder_type]:
if conformer_rel_pos_type == "legacy":
if conformer_pos_enc_layer_type == "rel_pos":
conformer_pos_enc_layer_type = "legacy_rel_pos"
logging.warning(
"Fallback to conformer_pos_enc_layer_type = 'legacy_rel_pos' "
"due to the compatibility. If you want to use the new one, "
"please use conformer_pos_enc_layer_type = 'latest'."
)
if conformer_self_attn_layer_type == "rel_selfattn":
conformer_self_attn_layer_type = "legacy_rel_selfattn"
logging.warning(
"Fallback to "
"conformer_self_attn_layer_type = 'legacy_rel_selfattn' "
"due to the compatibility. If you want to use the new one, "
"please use conformer_pos_enc_layer_type = 'latest'."
)
elif conformer_rel_pos_type == "latest":
assert conformer_pos_enc_layer_type != "legacy_rel_pos"
assert conformer_self_attn_layer_type != "legacy_rel_selfattn"
else:
raise ValueError(f"Unknown rel_pos_type: {conformer_rel_pos_type}")
# define encoder
encoder_input_layer = torch.nn.Embedding(
num_embeddings=idim, embedding_dim=adim, padding_idx=self.padding_idx
)
if encoder_type == "transformer":
self.encoder = TransformerEncoder(
idim=idim,
attention_dim=adim,
attention_heads=aheads,
linear_units=eunits,
num_blocks=elayers,
input_layer=encoder_input_layer,
dropout_rate=transformer_enc_dropout_rate,
positional_dropout_rate=transformer_enc_positional_dropout_rate,
attention_dropout_rate=transformer_enc_attn_dropout_rate,
pos_enc_class=pos_enc_class,
normalize_before=encoder_normalize_before,
concat_after=encoder_concat_after,
positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size,
)
elif encoder_type == "conformer":
self.encoder = ConformerEncoder(
idim=idim,
attention_dim=adim,
attention_heads=aheads,
linear_units=eunits,
num_blocks=elayers,
input_layer=encoder_input_layer,
dropout_rate=transformer_enc_dropout_rate,
positional_dropout_rate=transformer_enc_positional_dropout_rate,
attention_dropout_rate=transformer_enc_attn_dropout_rate,
normalize_before=encoder_normalize_before,
concat_after=encoder_concat_after,
positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size,
macaron_style=use_macaron_style_in_conformer,
pos_enc_layer_type=conformer_pos_enc_layer_type,
selfattention_layer_type=conformer_self_attn_layer_type,
activation_type=conformer_activation_type,
use_cnn_module=use_cnn_in_conformer,
cnn_module_kernel=conformer_enc_kernel_size,
)
else:
raise ValueError(f"{encoder_type} is not supported.")
# define GST
if self.use_gst:
self.gst = StyleEncoder(
idim=odim, # the input is mel-spectrogram
gst_tokens=gst_tokens,
gst_token_dim=adim,
gst_heads=gst_heads,
conv_layers=gst_conv_layers,
conv_chans_list=gst_conv_chans_list,
conv_kernel_size=gst_conv_kernel_size,
conv_stride=gst_conv_stride,
gru_layers=gst_gru_layers,
gru_units=gst_gru_units,
)
# define spk and lang embedding
self.spks = None
if spks is not None and spks > 1:
self.spks = spks
self.sid_emb = torch.nn.Embedding(spks, adim)
self.langs = None
if langs is not None and langs > 1:
self.langs = langs
self.lid_emb = torch.nn.Embedding(langs, adim)
# define additional projection for speaker embedding
self.spk_embed_dim = None
if spk_embed_dim is not None and spk_embed_dim > 0:
self.spk_embed_dim = spk_embed_dim
self.spk_embed_integration_type = spk_embed_integration_type
if self.spk_embed_dim is not None:
if self.spk_embed_integration_type == "add":
self.projection = torch.nn.Linear(self.spk_embed_dim, adim)
else:
self.projection = torch.nn.Linear(adim + self.spk_embed_dim, adim)
# define duration predictor
self.duration_predictor = DurationPredictor(
idim=adim,
n_layers=duration_predictor_layers,
n_chans=duration_predictor_chans,
kernel_size=duration_predictor_kernel_size,
dropout_rate=duration_predictor_dropout_rate,
)
# define length regulator
self.length_regulator = LengthRegulator()
# define decoder
# NOTE: we use encoder as decoder
# because fastspeech's decoder is the same as encoder
if decoder_type == "transformer":
self.decoder = TransformerEncoder(
idim=0,
attention_dim=adim,
attention_heads=aheads,
linear_units=dunits,
num_blocks=dlayers,
input_layer=None,
dropout_rate=transformer_dec_dropout_rate,
positional_dropout_rate=transformer_dec_positional_dropout_rate,
attention_dropout_rate=transformer_dec_attn_dropout_rate,
pos_enc_class=pos_enc_class,
normalize_before=decoder_normalize_before,
concat_after=decoder_concat_after,
positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size,
)
elif decoder_type == "conformer":
self.decoder = ConformerEncoder(
idim=0,
attention_dim=adim,
attention_heads=aheads,
linear_units=dunits,
num_blocks=dlayers,
input_layer=None,
dropout_rate=transformer_dec_dropout_rate,
positional_dropout_rate=transformer_dec_positional_dropout_rate,
attention_dropout_rate=transformer_dec_attn_dropout_rate,
normalize_before=decoder_normalize_before,
concat_after=decoder_concat_after,
positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size,
macaron_style=use_macaron_style_in_conformer,
pos_enc_layer_type=conformer_pos_enc_layer_type,
selfattention_layer_type=conformer_self_attn_layer_type,
activation_type=conformer_activation_type,
use_cnn_module=use_cnn_in_conformer,
cnn_module_kernel=conformer_dec_kernel_size,
)
else:
raise ValueError(f"{decoder_type} is not supported.")
# define final projection
self.feat_out = torch.nn.Linear(adim, odim * reduction_factor)
# define postnet
self.postnet = (
None
if postnet_layers == 0
else Postnet(
idim=idim,
odim=odim,
n_layers=postnet_layers,
n_chans=postnet_chans,
n_filts=postnet_filts,
use_batch_norm=use_batch_norm,
dropout_rate=postnet_dropout_rate,
)
)
# initialize parameters
self._reset_parameters(
init_type=init_type,
init_enc_alpha=init_enc_alpha,
init_dec_alpha=init_dec_alpha,
)
# define criterions
self.criterion = FastSpeechLoss(
use_masking=use_masking, use_weighted_masking=use_weighted_masking
)
def _forward(
self,
xs: torch.Tensor,
ilens: torch.Tensor,
ys: Optional[torch.Tensor] = None,
olens: Optional[torch.Tensor] = None,
ds: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
is_inference: bool = False,
alpha: float = 1.0,
) -> Sequence[torch.Tensor]:
# forward encoder
x_masks = self._source_mask(ilens)
hs, _ = self.encoder(xs, x_masks) # (B, T_text, adim)
# integrate with GST
if self.use_gst:
style_embs = self.gst(ys)
hs = hs + style_embs.unsqueeze(1)
# integrate with SID and LID embeddings
if self.spks is not None:
sid_embs = self.sid_emb(sids.view(-1))
hs = hs + sid_embs.unsqueeze(1)
if self.langs is not None:
lid_embs = self.lid_emb(lids.view(-1))
hs = hs + lid_embs.unsqueeze(1)
# integrate speaker embedding
if self.spk_embed_dim is not None:
hs = self._integrate_with_spk_embed(hs, spembs)
# forward duration predictor and length regulator
d_masks = make_pad_mask(ilens).to(xs.device)
if is_inference:
d_outs = self.duration_predictor.inference(hs, d_masks) # (B, T_text)
hs = self.length_regulator(hs, d_outs, alpha) # (B, T_feats, adim)
else:
d_outs = self.duration_predictor(hs, d_masks) # (B, T_text)
hs = self.length_regulator(hs, ds) # (B, T_feats, adim)
# forward decoder
if olens is not None and not is_inference:
if self.reduction_factor > 1:
olens_in = olens.new([olen // self.reduction_factor for olen in olens])
else:
olens_in = olens
h_masks = self._source_mask(olens_in)
else:
h_masks = None
zs, _ = self.decoder(hs, h_masks) # (B, T_feats, adim)
before_outs = self.feat_out(zs).view(
zs.size(0), -1, self.odim
) # (B, T_feats, odim)
# postnet -> (B, T_feats//r * r, odim)
if self.postnet is None:
after_outs = before_outs
else:
after_outs = before_outs + self.postnet(
before_outs.transpose(1, 2)
).transpose(1, 2)
return before_outs, after_outs, d_outs
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: torch.Tensor,
feats_lengths: torch.Tensor,
durations: torch.Tensor,
durations_lengths: torch.Tensor,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
joint_training: bool = False,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Calculate forward propagation.
Args:
text (LongTensor): Batch of padded character ids (B, T_text).
text_lengths (LongTensor): Batch of lengths of each input (B,).
feats (Tensor): Batch of padded target features (B, T_feats, odim).
feats_lengths (LongTensor): Batch of the lengths of each target (B,).
durations (LongTensor): Batch of padded durations (B, T_text + 1).
durations_lengths (LongTensor): Batch of duration lengths (B, T_text + 1).
spembs (Optional[Tensor]): Batch of speaker embeddings (B, spk_embed_dim).
sids (Optional[Tensor]): Batch of speaker IDs (B, 1).
lids (Optional[Tensor]): Batch of language IDs (B, 1).
joint_training (bool): Whether to perform joint training with vocoder.
Returns:
Tensor: Loss scalar value.
Dict: Statistics to be monitored.
Tensor: Weight value if not joint training else model outputs.
"""
text = text[:, : text_lengths.max()] # for data-parallel
feats = feats[:, : feats_lengths.max()] # for data-parallel
durations = durations[:, : durations_lengths.max()] # for data-parallel
batch_size = text.size(0)
# Add eos at the last of sequence
xs = F.pad(text, [0, 1], "constant", self.padding_idx)
for i, l in enumerate(text_lengths):
xs[i, l] = self.eos
ilens = text_lengths + 1
ys, ds = feats, durations
olens = feats_lengths
# forward propagation
before_outs, after_outs, d_outs = self._forward(
xs,
ilens,
ys,
olens,
ds,
spembs=spembs,
sids=sids,
lids=lids,
is_inference=False,
)
# modifiy mod part of groundtruth
if self.reduction_factor > 1:
olens = olens.new([olen - olen % self.reduction_factor for olen in olens])
max_olen = max(olens)
ys = ys[:, :max_olen]
# calculate loss
if self.postnet is None:
after_outs = None
l1_loss, duration_loss = self.criterion(
after_outs, before_outs, d_outs, ys, ds, ilens, olens
)
loss = l1_loss + duration_loss
stats = dict(
l1_loss=l1_loss.item(),
duration_loss=duration_loss.item(),
)
# report extra information
if self.encoder_type == "transformer" and self.use_scaled_pos_enc:
stats.update(
encoder_alpha=self.encoder.embed[-1].alpha.data.item(),
)
if self.decoder_type == "transformer" and self.use_scaled_pos_enc:
stats.update(
decoder_alpha=self.decoder.embed[-1].alpha.data.item(),
)
if not joint_training:
stats.update(loss=loss.item())
loss, stats, weight = force_gatherable(
(loss, stats, batch_size), loss.device
)
return loss, stats, weight
else:
return loss, stats, after_outs if after_outs is not None else before_outs
def inference(
self,
text: torch.Tensor,
feats: Optional[torch.Tensor] = None,
durations: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
alpha: float = 1.0,
use_teacher_forcing: bool = False,
) -> Dict[str, torch.Tensor]:
"""Generate the sequence of features given the sequences of characters.
Args:
text (LongTensor): Input sequence of characters (T_text,).
feats (Optional[Tensor]): Feature sequence to extract style (N, idim).
durations (Optional[LongTensor]): Groundtruth of duration (T_text + 1,).
spembs (Optional[Tensor]): Speaker embedding (spk_embed_dim,).
sids (Optional[Tensor]): Speaker ID (1,).
lids (Optional[Tensor]): Language ID (1,).
alpha (float): Alpha to control the speed.
use_teacher_forcing (bool): Whether to use teacher forcing.
If true, groundtruth of duration, pitch and energy will be used.
Returns:
Dict[str, Tensor]: Output dict including the following items:
* feat_gen (Tensor): Output sequence of features (T_feats, odim).
* duration (Tensor): Duration sequence (T_text + 1,).
"""
x, y = text, feats
spemb, d = spembs, durations
# add eos at the last of sequence
x = F.pad(x, [0, 1], "constant", self.eos)
# setup batch axis
ilens = torch.tensor([x.shape[0]], dtype=torch.long, device=x.device)
xs, ys = x.unsqueeze(0), None
if y is not None:
ys = y.unsqueeze(0)
if spemb is not None:
spembs = spemb.unsqueeze(0)
if use_teacher_forcing:
# use groundtruth of duration
ds = d.unsqueeze(0)
_, outs, d_outs = self._forward(
xs,
ilens,
ys,
ds=ds,
spembs=spembs,
sids=sids,
lids=lids,
) # (1, T_feats, odim)
else:
# inference
_, outs, d_outs = self._forward(
xs,
ilens,
ys,
spembs=spembs,
sids=sids,
lids=lids,
is_inference=True,
alpha=alpha,
) # (1, T_feats, odim)
return dict(feat_gen=outs[0], duration=d_outs[0])
def _integrate_with_spk_embed(
self, hs: torch.Tensor, spembs: torch.Tensor
) -> torch.Tensor:
"""Integrate speaker embedding with hidden states.
Args:
hs (Tensor): Batch of hidden state sequences (B, T_text, adim).
spembs (Tensor): Batch of speaker embeddings (B, spk_embed_dim).
Returns:
Tensor: Batch of integrated hidden state sequences (B, T_text, adim).
"""
if self.spk_embed_integration_type == "add":
# apply projection and then add to hidden states
spembs = self.projection(F.normalize(spembs))
hs = hs + spembs.unsqueeze(1)
elif self.spk_embed_integration_type == "concat":
# concat hidden states with spk embeds and then apply projection
spembs = F.normalize(spembs).unsqueeze(1).expand(-1, hs.size(1), -1)
hs = self.projection(torch.cat([hs, spembs], dim=-1))
else:
raise NotImplementedError("support only add or concat.")
return hs
def _source_mask(self, ilens: torch.Tensor) -> torch.Tensor:
"""Make masks for self-attention.
Args:
ilens (LongTensor): Batch of lengths (B,).
Returns:
Tensor: Mask tensor for self-attention.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
>>> ilens = [5, 3]
>>> self._source_mask(ilens)
tensor([[[1, 1, 1, 1, 1],
[1, 1, 1, 0, 0]]], dtype=torch.uint8)
"""
x_masks = make_non_pad_mask(ilens).to(next(self.parameters()).device)
return x_masks.unsqueeze(-2)
def _reset_parameters(
self, init_type: str, init_enc_alpha: float, init_dec_alpha: float
):
# initialize parameters
if init_type != "pytorch":
initialize(self, init_type)
# initialize alpha in scaled positional encoding
if self.encoder_type == "transformer" and self.use_scaled_pos_enc:
self.encoder.embed[-1].alpha.data = torch.tensor(init_enc_alpha)
if self.decoder_type == "transformer" and self.use_scaled_pos_enc:
self.decoder.embed[-1].alpha.data = torch.tensor(init_dec_alpha)
| 29,711 | 41.26458 | 88 | py |
espnet | espnet-master/espnet2/tts/prodiff/prodiff.py | # Copyright 2022 Hitachi LTD. (Nelson Yalta)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
# Based in FastSpeech2
"""ProDiff related modules for ESPnet2."""
import logging
from typing import Dict, Optional, Sequence, Tuple
import torch
import torch.nn.functional as F
from typeguard import check_argument_types
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet2.torch_utils.initialize import initialize
from espnet2.tts.abs_tts import AbsTTS
from espnet2.tts.fastspeech2.variance_predictor import VariancePredictor
from espnet2.tts.gst.style_encoder import StyleEncoder
from espnet2.tts.prodiff.denoiser import SpectogramDenoiser
from espnet2.tts.prodiff.loss import ProDiffLoss
from espnet.nets.pytorch_backend.conformer.encoder import Encoder as ConformerEncoder
from espnet.nets.pytorch_backend.fastspeech.duration_predictor import DurationPredictor
from espnet.nets.pytorch_backend.fastspeech.length_regulator import LengthRegulator
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask, make_pad_mask
from espnet.nets.pytorch_backend.tacotron2.decoder import Postnet
from espnet.nets.pytorch_backend.transformer.embedding import (
PositionalEncoding,
ScaledPositionalEncoding,
)
from espnet.nets.pytorch_backend.transformer.encoder import (
Encoder as TransformerEncoder,
)
class ProDiff(AbsTTS):
"""ProDiff module.
This is a module of ProDiff described in `ProDiff: Progressive Fast Diffusion Model
for High-Quality Text-to-Speech`_.
.. _`ProDiff: Progressive Fast Diffusion Model for High-Quality Text-to-Speech`:
https://arxiv.org/abs/2207.06389
"""
def __init__(
self,
# network structure related
idim: int,
odim: int,
adim: int = 384,
aheads: int = 4,
elayers: int = 6,
eunits: int = 1536,
postnet_layers: int = 0,
postnet_chans: int = 512,
postnet_filts: int = 5,
postnet_dropout_rate: float = 0.5,
positionwise_layer_type: str = "conv1d",
positionwise_conv_kernel_size: int = 1,
use_scaled_pos_enc: bool = True,
use_batch_norm: bool = True,
encoder_normalize_before: bool = True,
encoder_concat_after: bool = False,
reduction_factor: int = 1,
encoder_type: str = "transformer",
decoder_type: str = "diffusion",
transformer_enc_dropout_rate: float = 0.1,
transformer_enc_positional_dropout_rate: float = 0.1,
transformer_enc_attn_dropout_rate: float = 0.1,
# Denoiser Decoder
denoiser_layers: int = 20,
denoiser_channels: int = 256,
diffusion_steps: int = 1000,
diffusion_timescale: int = 1,
diffusion_beta: float = 40.0,
diffusion_scheduler: str = "vpsde",
diffusion_cycle_ln: int = 1,
# only for conformer
conformer_rel_pos_type: str = "legacy",
conformer_pos_enc_layer_type: str = "rel_pos",
conformer_self_attn_layer_type: str = "rel_selfattn",
conformer_activation_type: str = "swish",
use_macaron_style_in_conformer: bool = True,
use_cnn_in_conformer: bool = True,
zero_triu: bool = False,
conformer_enc_kernel_size: int = 7,
# duration predictor
duration_predictor_layers: int = 2,
duration_predictor_chans: int = 384,
duration_predictor_kernel_size: int = 3,
duration_predictor_dropout_rate: float = 0.1,
# energy predictor
energy_predictor_layers: int = 2,
energy_predictor_chans: int = 384,
energy_predictor_kernel_size: int = 3,
energy_predictor_dropout: float = 0.5,
energy_embed_kernel_size: int = 9,
energy_embed_dropout: float = 0.5,
stop_gradient_from_energy_predictor: bool = False,
# pitch predictor
pitch_predictor_layers: int = 2,
pitch_predictor_chans: int = 384,
pitch_predictor_kernel_size: int = 3,
pitch_predictor_dropout: float = 0.5,
pitch_embed_kernel_size: int = 9,
pitch_embed_dropout: float = 0.5,
stop_gradient_from_pitch_predictor: bool = False,
# extra embedding related
spks: Optional[int] = None,
langs: Optional[int] = None,
spk_embed_dim: Optional[int] = None,
spk_embed_integration_type: str = "add",
use_gst: bool = False,
gst_tokens: int = 10,
gst_heads: int = 4,
gst_conv_layers: int = 6,
gst_conv_chans_list: Sequence[int] = (32, 32, 64, 64, 128, 128),
gst_conv_kernel_size: int = 3,
gst_conv_stride: int = 2,
gst_gru_layers: int = 1,
gst_gru_units: int = 128,
# training related
init_type: str = "xavier_uniform",
init_enc_alpha: float = 1.0,
init_dec_alpha: float = 1.0,
use_masking: bool = False,
use_weighted_masking: bool = False,
):
"""Initialize ProDiff module.
Args:
idim (int): Dimension of the inputs.
odim (int): Dimension of the outputs.
elayers (int): Number of encoder layers.
eunits (int): Number of encoder hidden units.
dlayers (int): Number of decoder layers.
dunits (int): Number of decoder hidden units.
postnet_layers (int): Number of postnet layers.
postnet_chans (int): Number of postnet channels.
postnet_filts (int): Kernel size of postnet.
postnet_dropout_rate (float): Dropout rate in postnet.
use_scaled_pos_enc (bool): Whether to use trainable scaled pos encoding.
use_batch_norm (bool): Whether to use batch normalization in encoder prenet.
encoder_normalize_before (bool): Whether to apply layernorm layer before
encoder block.
decoder_normalize_before (bool): Whether to apply layernorm layer before
decoder block.
encoder_concat_after (bool): Whether to concatenate attention layer's input
and output in encoder.
decoder_concat_after (bool): Whether to concatenate attention layer's input
and output in decoder.
reduction_factor (int): Reduction factor.
encoder_type (str): Encoder type ("transformer" or "conformer").
decoder_type (str): Decoder type ("transformer" or "conformer").
transformer_enc_dropout_rate (float): Dropout rate in encoder except
attention and positional encoding.
transformer_enc_positional_dropout_rate (float): Dropout rate after encoder
positional encoding.
transformer_enc_attn_dropout_rate (float): Dropout rate in encoder
self-attention module.
transformer_dec_dropout_rate (float): Dropout rate in decoder except
attention & positional encoding.
transformer_dec_positional_dropout_rate (float): Dropout rate after decoder
positional encoding.
transformer_dec_attn_dropout_rate (float): Dropout rate in decoder
self-attention module.
conformer_rel_pos_type (str): Relative pos encoding type in conformer.
conformer_pos_enc_layer_type (str): Pos encoding layer type in conformer.
conformer_self_attn_layer_type (str): Self-attention layer type in conformer
conformer_activation_type (str): Activation function type in conformer.
use_macaron_style_in_conformer: Whether to use macaron style FFN.
use_cnn_in_conformer: Whether to use CNN in conformer.
zero_triu: Whether to use zero triu in relative self-attention module.
conformer_enc_kernel_size: Kernel size of encoder conformer.
conformer_dec_kernel_size: Kernel size of decoder conformer.
duration_predictor_layers (int): Number of duration predictor layers.
duration_predictor_chans (int): Number of duration predictor channels.
duration_predictor_kernel_size (int): Kernel size of duration predictor.
duration_predictor_dropout_rate (float): Dropout rate in duration predictor.
pitch_predictor_layers (int): Number of pitch predictor layers.
pitch_predictor_chans (int): Number of pitch predictor channels.
pitch_predictor_kernel_size (int): Kernel size of pitch predictor.
pitch_predictor_dropout_rate (float): Dropout rate in pitch predictor.
pitch_embed_kernel_size (float): Kernel size of pitch embedding.
pitch_embed_dropout_rate (float): Dropout rate for pitch embedding.
stop_gradient_from_pitch_predictor: Whether to stop gradient from pitch
predictor to encoder.
energy_predictor_layers (int): Number of energy predictor layers.
energy_predictor_chans (int): Number of energy predictor channels.
energy_predictor_kernel_size (int): Kernel size of energy predictor.
energy_predictor_dropout_rate (float): Dropout rate in energy predictor.
energy_embed_kernel_size (float): Kernel size of energy embedding.
energy_embed_dropout_rate (float): Dropout rate for energy embedding.
stop_gradient_from_energy_predictor: Whether to stop gradient from energy
predictor to encoder.
spks (Optional[int]): Number of speakers. If set to > 1, assume that the
sids will be provided as the input and use sid embedding layer.
langs (Optional[int]): Number of languages. If set to > 1, assume that the
lids will be provided as the input and use sid embedding layer.
spk_embed_dim (Optional[int]): Speaker embedding dimension. If set to > 0,
assume that spembs will be provided as the input.
spk_embed_integration_type: How to integrate speaker embedding.
use_gst (str): Whether to use global style token.
gst_tokens (int): The number of GST embeddings.
gst_heads (int): The number of heads in GST multihead attention.
gst_conv_layers (int): The number of conv layers in GST.
gst_conv_chans_list: (Sequence[int]):
List of the number of channels of conv layers in GST.
gst_conv_kernel_size (int): Kernel size of conv layers in GST.
gst_conv_stride (int): Stride size of conv layers in GST.
gst_gru_layers (int): The number of GRU layers in GST.
gst_gru_units (int): The number of GRU units in GST.
init_type (str): How to initialize transformer parameters.
init_enc_alpha (float): Initial value of alpha in scaled pos encoding of the
encoder.
init_dec_alpha (float): Initial value of alpha in scaled pos encoding of the
decoder.
use_masking (bool): Whether to apply masking for padded part in loss
calculation.
use_weighted_masking (bool): Whether to apply weighted masking in loss
calculation.
"""
assert check_argument_types()
super().__init__()
# store hyperparameters
self.idim = idim
self.odim = odim
self.eos = idim - 1
self.reduction_factor = reduction_factor
self.encoder_type = encoder_type
self.decoder_type = decoder_type
self.stop_gradient_from_pitch_predictor = stop_gradient_from_pitch_predictor
self.stop_gradient_from_energy_predictor = stop_gradient_from_energy_predictor
self.use_scaled_pos_enc = use_scaled_pos_enc
self.use_gst = use_gst
# use idx 0 as padding idx
self.padding_idx = 0
# get positional encoding class
pos_enc_class = (
ScaledPositionalEncoding if self.use_scaled_pos_enc else PositionalEncoding
)
# check relative positional encoding compatibility
if "conformer" in [encoder_type, decoder_type]:
if conformer_rel_pos_type == "legacy":
if conformer_pos_enc_layer_type == "rel_pos":
conformer_pos_enc_layer_type = "legacy_rel_pos"
logging.warning(
"Fallback to conformer_pos_enc_layer_type = 'legacy_rel_pos' "
"due to the compatibility. If you want to use the new one, "
"please use conformer_pos_enc_layer_type = 'latest'."
)
if conformer_self_attn_layer_type == "rel_selfattn":
conformer_self_attn_layer_type = "legacy_rel_selfattn"
logging.warning(
"Fallback to "
"conformer_self_attn_layer_type = 'legacy_rel_selfattn' "
"due to the compatibility. If you want to use the new one, "
"please use conformer_pos_enc_layer_type = 'latest'."
)
elif conformer_rel_pos_type == "latest":
assert conformer_pos_enc_layer_type != "legacy_rel_pos"
assert conformer_self_attn_layer_type != "legacy_rel_selfattn"
else:
raise ValueError(f"Unknown rel_pos_type: {conformer_rel_pos_type}")
# define encoder
encoder_input_layer = torch.nn.Embedding(
num_embeddings=idim, embedding_dim=adim, padding_idx=self.padding_idx
)
if encoder_type == "transformer":
self.encoder = TransformerEncoder(
idim=idim,
attention_dim=adim,
attention_heads=aheads,
linear_units=eunits,
num_blocks=elayers,
input_layer=encoder_input_layer,
dropout_rate=transformer_enc_dropout_rate,
positional_dropout_rate=transformer_enc_positional_dropout_rate,
attention_dropout_rate=transformer_enc_attn_dropout_rate,
pos_enc_class=pos_enc_class,
normalize_before=encoder_normalize_before,
concat_after=encoder_concat_after,
positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size,
)
elif encoder_type == "conformer":
self.encoder = ConformerEncoder(
idim=idim,
attention_dim=adim,
attention_heads=aheads,
linear_units=eunits,
num_blocks=elayers,
input_layer=encoder_input_layer,
dropout_rate=transformer_enc_dropout_rate,
positional_dropout_rate=transformer_enc_positional_dropout_rate,
attention_dropout_rate=transformer_enc_attn_dropout_rate,
normalize_before=encoder_normalize_before,
concat_after=encoder_concat_after,
positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size,
macaron_style=use_macaron_style_in_conformer,
pos_enc_layer_type=conformer_pos_enc_layer_type,
selfattention_layer_type=conformer_self_attn_layer_type,
activation_type=conformer_activation_type,
use_cnn_module=use_cnn_in_conformer,
cnn_module_kernel=conformer_enc_kernel_size,
zero_triu=zero_triu,
)
else:
raise ValueError(f"{encoder_type} is not supported.")
# define GST
if self.use_gst:
self.gst = StyleEncoder(
idim=odim, # the input is mel-spectrogram
gst_tokens=gst_tokens,
gst_token_dim=adim,
gst_heads=gst_heads,
conv_layers=gst_conv_layers,
conv_chans_list=gst_conv_chans_list,
conv_kernel_size=gst_conv_kernel_size,
conv_stride=gst_conv_stride,
gru_layers=gst_gru_layers,
gru_units=gst_gru_units,
)
# define spk and lang embedding
self.spks = None
if spks is not None and spks > 1:
self.spks = spks
self.sid_emb = torch.nn.Embedding(spks, adim)
self.langs = None
if langs is not None and langs > 1:
self.langs = langs
self.lid_emb = torch.nn.Embedding(langs, adim)
# define additional projection for speaker embedding
self.spk_embed_dim = None
if spk_embed_dim is not None and spk_embed_dim > 0:
self.spk_embed_dim = spk_embed_dim
self.spk_embed_integration_type = spk_embed_integration_type
if self.spk_embed_dim is not None:
if self.spk_embed_integration_type == "add":
self.projection = torch.nn.Linear(self.spk_embed_dim, adim)
else:
self.projection = torch.nn.Linear(adim + self.spk_embed_dim, adim)
# define duration predictor
self.duration_predictor = DurationPredictor(
idim=adim,
n_layers=duration_predictor_layers,
n_chans=duration_predictor_chans,
kernel_size=duration_predictor_kernel_size,
dropout_rate=duration_predictor_dropout_rate,
)
# define pitch predictor
self.pitch_predictor = VariancePredictor(
idim=adim,
n_layers=pitch_predictor_layers,
n_chans=pitch_predictor_chans,
kernel_size=pitch_predictor_kernel_size,
dropout_rate=pitch_predictor_dropout,
)
# NOTE(kan-bayashi): We use continuous pitch + FastPitch style avg
self.pitch_embed = torch.nn.Sequential(
torch.nn.Conv1d(
in_channels=1,
out_channels=adim,
kernel_size=pitch_embed_kernel_size,
padding=(pitch_embed_kernel_size - 1) // 2,
),
torch.nn.Dropout(pitch_embed_dropout),
)
# define energy predictor
self.energy_predictor = VariancePredictor(
idim=adim,
n_layers=energy_predictor_layers,
n_chans=energy_predictor_chans,
kernel_size=energy_predictor_kernel_size,
dropout_rate=energy_predictor_dropout,
)
# NOTE(kan-bayashi): We use continuous enegy + FastPitch style avg
self.energy_embed = torch.nn.Sequential(
torch.nn.Conv1d(
in_channels=1,
out_channels=adim,
kernel_size=energy_embed_kernel_size,
padding=(energy_embed_kernel_size - 1) // 2,
),
torch.nn.Dropout(energy_embed_dropout),
)
# define length regulator
self.length_regulator = LengthRegulator()
# define decoder
if decoder_type == "diffusion":
self.decoder = SpectogramDenoiser(
odim,
adim=adim,
layers=denoiser_layers,
channels=denoiser_channels,
timesteps=diffusion_steps,
timescale=diffusion_timescale,
max_beta=diffusion_beta,
scheduler=diffusion_scheduler,
cycle_length=diffusion_cycle_ln,
)
else:
raise NotImplementedError(decoder_type)
# define final projection
if decoder_type != "diffusion":
self.feat_out = torch.nn.Linear(adim, odim * reduction_factor)
if reduction_factor > 1:
raise NotImplementedError()
# define postnet
self.postnet = (
None
if postnet_layers == 0
else Postnet(
idim=idim,
odim=odim,
n_layers=postnet_layers,
n_chans=postnet_chans,
n_filts=postnet_filts,
use_batch_norm=use_batch_norm,
dropout_rate=postnet_dropout_rate,
)
)
# initialize parameters
self._reset_parameters(
init_type=init_type,
init_enc_alpha=init_enc_alpha,
init_dec_alpha=init_dec_alpha,
)
# define criterions
self.criterion = ProDiffLoss(
use_masking=use_masking, use_weighted_masking=use_weighted_masking
)
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: torch.Tensor,
feats_lengths: torch.Tensor,
durations: torch.Tensor,
durations_lengths: torch.Tensor,
pitch: torch.Tensor,
pitch_lengths: torch.Tensor,
energy: torch.Tensor,
energy_lengths: torch.Tensor,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
joint_training: bool = False,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Calculate forward propagation.
Args:
text (LongTensor): Batch of padded token ids (B, T_text).
text_lengths (LongTensor): Batch of lengths of each input (B,).
feats (Tensor): Batch of padded target features (B, T_feats, odim).
feats_lengths (LongTensor): Batch of the lengths of each target (B,).
durations (LongTensor): Batch of padded durations (B, T_text + 1).
durations_lengths (LongTensor): Batch of duration lengths (B, T_text + 1).
pitch (Tensor): Batch of padded token-averaged pitch (B, T_text + 1, 1).
pitch_lengths (LongTensor): Batch of pitch lengths (B, T_text + 1).
energy (Tensor): Batch of padded token-averaged energy (B, T_text + 1, 1).
energy_lengths (LongTensor): Batch of energy lengths (B, T_text + 1).
spembs (Optional[Tensor]): Batch of speaker embeddings (B, spk_embed_dim).
sids (Optional[Tensor]): Batch of speaker IDs (B, 1).
lids (Optional[Tensor]): Batch of language IDs (B, 1).
joint_training (bool): Whether to perform joint training with vocoder.
Returns:
Tensor: Loss scalar value.
Dict: Statistics to be monitored.
Tensor: Weight value if not joint training else model outputs.
"""
text = text[:, : text_lengths.max()] # for data-parallel
feats = feats[:, : feats_lengths.max()] # for data-parallel
durations = durations[:, : durations_lengths.max()] # for data-parallel
pitch = pitch[:, : pitch_lengths.max()] # for data-parallel
energy = energy[:, : energy_lengths.max()] # for data-parallel
batch_size = text.size(0)
# Add eos at the last of sequence
xs = F.pad(text, [0, 1], "constant", self.padding_idx)
for i, l in enumerate(text_lengths):
xs[i, l] = self.eos
ilens = text_lengths + 1
ys, ds, ps, es = feats, durations, pitch, energy
olens = feats_lengths
# forward propagation
before_outs, after_outs, d_outs, p_outs, e_outs = self._forward(
xs,
ilens,
ys,
olens,
ds,
ps,
es,
spembs=spembs,
sids=sids,
lids=lids,
is_inference=False,
)
# modify mod part of groundtruth
if self.reduction_factor > 1:
olens = olens.new([olen - olen % self.reduction_factor for olen in olens])
max_olen = max(olens)
ys = ys[:, :max_olen]
# calculate loss
if self.postnet is None:
after_outs = None
# calculate loss
l1_loss, ssim_loss, duration_loss, pitch_loss, energy_loss = self.criterion(
after_outs=after_outs,
before_outs=before_outs,
d_outs=d_outs,
p_outs=p_outs,
e_outs=e_outs,
ys=ys,
ds=ds,
ps=ps,
es=es,
ilens=ilens,
olens=olens,
)
loss = l1_loss + ssim_loss + duration_loss + pitch_loss + energy_loss
stats = dict(
l1_loss=l1_loss.item(),
ssim_loss=ssim_loss.item(),
duration_loss=duration_loss.item(),
pitch_loss=pitch_loss.item(),
energy_loss=energy_loss.item(),
)
# report extra information
if self.encoder_type == "transformer" and self.use_scaled_pos_enc:
stats.update(
encoder_alpha=self.encoder.embed[-1].alpha.data.item(),
)
if self.decoder_type == "transformer" and self.use_scaled_pos_enc:
stats.update(
decoder_alpha=self.decoder.embed[-1].alpha.data.item(),
)
if not joint_training:
stats.update(loss=loss.item())
loss, stats, weight = force_gatherable(
(loss, stats, batch_size), loss.device
)
return loss, stats, weight
else:
return loss, stats, after_outs if after_outs is not None else before_outs
def _forward(
self,
xs: torch.Tensor,
ilens: torch.Tensor,
ys: Optional[torch.Tensor] = None,
olens: Optional[torch.Tensor] = None,
ds: Optional[torch.Tensor] = None,
ps: Optional[torch.Tensor] = None,
es: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
is_inference: bool = False,
alpha: float = 1.0,
) -> Sequence[torch.Tensor]:
"""Calculate forward propagation without loss.
Args:
xs (Tensor): Batch of padded target features (B, T_feats, odim).
ilens (LongTensor): Batch of the lengths of each target (B,).
Returns:
Tensor: Weight value if not joint training else model outputs.
"""
# forward encoder
x_masks = self._source_mask(ilens)
hs, _ = self.encoder(xs, x_masks) # (B, T_text, adim)
# integrate with GST
if self.use_gst:
style_embs = self.gst(ys)
hs = hs + style_embs.unsqueeze(1)
# integrate with SID and LID embeddings
if self.spks is not None:
sid_embs = self.sid_emb(sids.view(-1))
hs = hs + sid_embs.unsqueeze(1)
if self.langs is not None:
lid_embs = self.lid_emb(lids.view(-1))
hs = hs + lid_embs.unsqueeze(1)
# integrate speaker embedding
if self.spk_embed_dim is not None:
hs = self._integrate_with_spk_embed(hs, spembs)
# forward duration predictor and variance predictors
d_masks = make_pad_mask(ilens).to(xs.device)
if self.stop_gradient_from_pitch_predictor:
p_outs = self.pitch_predictor(hs.detach(), d_masks.unsqueeze(-1))
else:
p_outs = self.pitch_predictor(hs, d_masks.unsqueeze(-1))
if self.stop_gradient_from_energy_predictor:
e_outs = self.energy_predictor(hs.detach(), d_masks.unsqueeze(-1))
else:
e_outs = self.energy_predictor(hs, d_masks.unsqueeze(-1))
if is_inference:
d_outs = self.duration_predictor.inference(hs, d_masks) # (B, T_text)
# use prediction in inference
p_embs = self.pitch_embed(p_outs.transpose(1, 2)).transpose(1, 2)
e_embs = self.energy_embed(e_outs.transpose(1, 2)).transpose(1, 2)
hs = hs + e_embs + p_embs
hs = self.length_regulator(hs, d_outs, alpha) # (B, T_feats, adim)
else:
d_outs = self.duration_predictor(hs, d_masks)
# use groundtruth in training
p_embs = self.pitch_embed(ps.transpose(1, 2)).transpose(1, 2)
e_embs = self.energy_embed(es.transpose(1, 2)).transpose(1, 2)
hs = hs + e_embs + p_embs
hs = self.length_regulator(hs, ds) # (B, T_feats, adim)
# forward decoder
if olens is not None and not is_inference:
if self.reduction_factor > 1:
olens_in = olens.new([olen // self.reduction_factor for olen in olens])
else:
olens_in = olens
h_masks = self._source_mask(olens_in)
else:
h_masks = None
if self.decoder_type == "diffusion":
before_outs = self.decoder(
hs, ys, h_masks, is_inference
) # (B, T_feats, odim)
else:
zs, _ = self.decoder(hs, h_masks) # (B, T_feats, adim)
before_outs = self.feat_out(zs).view(
zs.size(0), -1, self.odim
) # (B, T_feats, odim)
# postnet -> (B, T_feats//r * r, odim)
if self.postnet is None:
after_outs = before_outs
else:
after_outs = before_outs + self.postnet(
before_outs.transpose(1, 2)
).transpose(1, 2)
return before_outs, after_outs, d_outs, p_outs, e_outs
@torch.no_grad()
def inference(
self,
text: torch.Tensor,
feats: Optional[torch.Tensor] = None,
durations: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
pitch: Optional[torch.Tensor] = None,
energy: Optional[torch.Tensor] = None,
alpha: float = 1.0,
use_teacher_forcing: bool = False,
) -> Dict[str, torch.Tensor]:
"""Generate the sequence of features given the sequences of characters.
Args:
text (LongTensor): Input sequence of characters (T_text,).
feats (Optional[Tensor): Feature sequence to extract style (N, idim).
durations (Optional[Tensor): Groundtruth of duration (T_text + 1,).
spembs (Optional[Tensor): Speaker embedding vector (spk_embed_dim,).
sids (Optional[Tensor]): Speaker ID (1,).
lids (Optional[Tensor]): Language ID (1,).
pitch (Optional[Tensor]): Groundtruth of token-avg pitch (T_text + 1, 1).
energy (Optional[Tensor]): Groundtruth of token-avg energy (T_text + 1, 1).
alpha (float): Alpha to control the speed.
use_teacher_forcing (bool): Whether to use teacher forcing.
If true, groundtruth of duration, pitch and energy will be used.
Returns:
Dict[str, Tensor]: Output dict including the following items:
* feat_gen (Tensor): Output sequence of features (T_feats, odim).
* duration (Tensor): Duration sequence (T_text + 1,).
* pitch (Tensor): Pitch sequence (T_text + 1,).
* energy (Tensor): Energy sequence (T_text + 1,).
"""
x, y = text, feats
spemb, d, p, e = spembs, durations, pitch, energy
# add eos at the last of sequence
x = F.pad(x, [0, 1], "constant", self.eos)
# setup batch axis
ilens = torch.tensor([x.shape[0]], dtype=torch.long, device=x.device)
xs, ys = x.unsqueeze(0), None
if y is not None:
ys = y.unsqueeze(0)
if spemb is not None:
spembs = spemb.unsqueeze(0)
if use_teacher_forcing:
# use groundtruth of duration, pitch, and energy
ds, ps, es = d.unsqueeze(0), p.unsqueeze(0), e.unsqueeze(0)
_, outs, d_outs, p_outs, e_outs = self._forward(
xs,
ilens,
ys,
ds=ds,
ps=ps,
es=es,
spembs=spembs,
sids=sids,
lids=lids,
is_inference=True,
) # (1, T_feats, odim)
else:
_, outs, d_outs, p_outs, e_outs = self._forward(
xs,
ilens,
ys,
spembs=spembs,
sids=sids,
lids=lids,
is_inference=True,
alpha=alpha,
) # (1, T_feats, odim)
return dict(
feat_gen=outs[0],
duration=d_outs[0],
pitch=p_outs[0],
energy=e_outs[0],
)
def _integrate_with_spk_embed(
self, hs: torch.Tensor, spembs: torch.Tensor
) -> torch.Tensor:
"""Integrate speaker embedding with hidden states.
Args:
hs (Tensor): Batch of hidden state sequences (B, T_text, adim).
spembs (Tensor): Batch of speaker embeddings (B, spk_embed_dim).
Returns:
Tensor: Batch of integrated hidden state sequences (B, T_text, adim).
"""
if self.spk_embed_integration_type == "add":
# apply projection and then add to hidden states
spembs = self.projection(F.normalize(spembs))
hs = hs + spembs.unsqueeze(1)
elif self.spk_embed_integration_type == "concat":
# concat hidden states with spk embeds and then apply projection
spembs = F.normalize(spembs).unsqueeze(1).expand(-1, hs.size(1), -1)
hs = self.projection(torch.cat([hs, spembs], dim=-1))
else:
raise NotImplementedError("support only add or concat.")
return hs
def _source_mask(self, ilens: torch.Tensor) -> torch.Tensor:
"""Make masks for self-attention.
Args:
ilens (LongTensor): Batch of lengths (B,).
Returns:
Tensor: Mask tensor for self-attention.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
>>> ilens = [5, 3]
>>> self._source_mask(ilens)
tensor([[[1, 1, 1, 1, 1],
[1, 1, 1, 0, 0]]], dtype=torch.uint8)
"""
x_masks = make_non_pad_mask(ilens).to(next(self.parameters()).device)
return x_masks.unsqueeze(-2)
def _reset_parameters(
self, init_type: str, init_enc_alpha: float, init_dec_alpha: float
):
"""Reset parameters of the model.
Args:
init_type (str): Type of initialization.
init_enc_alpha (float): Value of the initialization for the encoder.
init_dec_alpha (float): Value of the initialization for the decoder.
"""
# initialize parameters
if init_type != "pytorch":
initialize(self, init_type)
# initialize alpha in scaled positional encoding
if self.encoder_type == "transformer" and self.use_scaled_pos_enc:
self.encoder.embed[-1].alpha.data = torch.tensor(init_enc_alpha)
if self.decoder_type == "transformer" and self.use_scaled_pos_enc:
self.decoder.embed[-1].alpha.data = torch.tensor(init_dec_alpha)
| 35,170 | 41.120958 | 88 | py |
espnet | espnet-master/espnet2/tts/prodiff/loss.py | # Copyright 2022 Hitachi LTD. (Nelson Yalta)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""ProDiff related loss module for ESPnet2."""
from math import exp
from typing import Tuple
import torch
from torch.nn import functional as F
from typeguard import check_argument_types
from espnet.nets.pytorch_backend.fastspeech.duration_predictor import ( # noqa: H301
DurationPredictorLoss,
)
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask
def gaussian(window_size: int, sigma: float) -> torch.Tensor:
"""Gaussian Noise.
Args:
window_size (int): Window size.
sigma (float): Noise sigma.
Returns:
torch.Tensor: Noise.
"""
gauss = torch.Tensor(
[
exp(-((x - window_size // 2) ** 2) / float(2 * sigma**2))
for x in range(window_size)
]
)
return gauss / gauss.sum()
class SSimLoss(torch.nn.Module):
"""SSimLoss.
This is an implementation of structural similarity (SSIM) loss.
This code is modified from https://github.com/Po-Hsun-Su/pytorch-ssim.
"""
def __init__(
self,
bias: float = 6.0,
window_size: int = 11,
channels: int = 1,
reduction: str = "none",
):
"""Initialization.
Args:
bias (float, optional): value of the bias. Defaults to 6.0.
window_size (int, optional): Window size. Defaults to 11.
channels (int, optional): Number of channels. Defaults to 1.
reduction (str, optional): Type of reduction during the loss
calculation. Defaults to "none".
"""
super().__init__()
self.bias = bias
self.win_len = window_size
self.channels = channels
self.average = False
if reduction == "mean":
self.average = True
win1d = gaussian(window_size, 1.5).unsqueeze(1)
win2d = win1d.mm(win1d.t()).float().unsqueeze(0).unsqueeze(0)
self.window = torch.Tensor(
win2d.expand(channels, 1, window_size, window_size).contiguous()
)
def forward(self, outputs: torch.Tensor, target: torch.Tensor):
"""Calculate forward propagation.
Args:
outputs (torch.Tensor): Batch of output sequences generated
by the model (batch, time, mels).
target (torch.Tensor): Batch of sequences with true
states (batch, time, mels).
Returns:
Tensor: Loss scalar value.
"""
with torch.no_grad():
dim = target.size(-1)
mask = target.abs().sum(-1, keepdim=True).ne(0).float().repeat(1, 1, dim)
outputs = outputs.unsqueeze(1) + self.bias
target = target.unsqueeze(1) + self.bias
loss = 1 - self.ssim(outputs, target)
loss = (loss * mask).sum() / mask.sum()
return loss
def ssim(self, tensor1: torch.Tensor, tensor2: torch.Tensor):
"""Calculate SSIM loss.
Args:
tensor1 (torch.Tensor): Generated output.
tensor2 (torch.Tensor): Groundtruth output.
Returns:
Tensor: Loss scalar value.
"""
window = self.window.to(tensor1.device)
mu1 = F.conv2d(tensor1, window, padding=self.win_len // 2, groups=self.channels)
mu2 = F.conv2d(tensor2, window, padding=self.win_len // 2, groups=self.channels)
mu_corr = mu1 * mu2
mu1 = mu1.pow(2)
mu2 = mu2.pow(2)
sigma1 = (
F.conv2d(
tensor1 * tensor1,
window,
padding=self.win_len // 2,
groups=self.channels,
)
- mu1
)
sigma2 = (
F.conv2d(
tensor2 * tensor2,
window,
padding=self.win_len // 2,
groups=self.channels,
)
- mu2
)
sigma_corr = (
F.conv2d(
tensor1 * tensor2,
window,
padding=self.win_len // 2,
groups=self.channels,
)
- mu_corr
)
C1 = 0.01**2
C2 = 0.03**2
ssim_map = ((2 * mu_corr + C1) * (2 * sigma_corr + C2)) / (
(mu1 + mu2 + C1) * (sigma1 + sigma2 + C2)
)
if self.average:
return ssim_map.mean()
return ssim_map.mean(1)
class ProDiffLoss(torch.nn.Module):
"""Loss function module for ProDiffLoss."""
def __init__(
self,
use_masking: bool = True,
use_weighted_masking: bool = False,
):
"""Initialize feed-forward Transformer loss module.
Args:
use_masking (bool): Whether to apply masking for padded part in loss
calculation.
use_weighted_masking (bool): Whether to weighted masking in loss
calculation.
"""
assert check_argument_types()
super().__init__()
assert (use_masking != use_weighted_masking) or not use_masking
self.use_masking = use_masking
self.use_weighted_masking = use_weighted_masking
# define criterions
reduction = "none" if self.use_weighted_masking else "mean"
self.l1_criterion = torch.nn.L1Loss(reduction=reduction)
self.mse_criterion = torch.nn.MSELoss(reduction=reduction)
self.duration_criterion = DurationPredictorLoss(reduction=reduction)
self.ssim_criterion = SSimLoss(reduction="none")
def forward(
self,
after_outs: torch.Tensor,
before_outs: torch.Tensor,
d_outs: torch.Tensor,
p_outs: torch.Tensor,
e_outs: torch.Tensor,
ys: torch.Tensor,
ds: torch.Tensor,
ps: torch.Tensor,
es: torch.Tensor,
ilens: torch.Tensor,
olens: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""Calculate forward propagation.
Args:
after_outs (Tensor): Batch of outputs after postnets (B, T_feats, odim).
before_outs (Tensor): Batch of outputs before postnets (B, T_feats, odim).
d_outs (LongTensor): Batch of outputs of duration predictor (B, T_text).
p_outs (Tensor): Batch of outputs of pitch predictor (B, T_text, 1).
e_outs (Tensor): Batch of outputs of energy predictor (B, T_text, 1).
ys (Tensor): Batch of target features (B, T_feats, odim).
ds (LongTensor): Batch of durations (B, T_text).
ps (Tensor): Batch of target token-averaged pitch (B, T_text, 1).
es (Tensor): Batch of target token-averaged energy (B, T_text, 1).
ilens (LongTensor): Batch of the lengths of each input (B,).
olens (LongTensor): Batch of the lengths of each target (B,).
Returns:
Tensor: L1 loss value.
Tensor: Duration predictor loss value.
Tensor: Pitch predictor loss value.
Tensor: Energy predictor loss value.
"""
# First SSIM before masks
ssim_loss = self.ssim_criterion(before_outs, ys)
# apply mask to remove padded part
if self.use_masking:
out_masks = make_non_pad_mask(olens).unsqueeze(-1).to(ys.device)
before_outs = before_outs.masked_select(out_masks)
if after_outs is not None:
after_outs = after_outs.masked_select(out_masks)
ys = ys.masked_select(out_masks)
duration_masks = make_non_pad_mask(ilens).to(ys.device)
d_outs = d_outs.masked_select(duration_masks)
ds = ds.masked_select(duration_masks)
pitch_masks = make_non_pad_mask(ilens).unsqueeze(-1).to(ys.device)
p_outs = p_outs.masked_select(pitch_masks)
e_outs = e_outs.masked_select(pitch_masks)
ps = ps.masked_select(pitch_masks)
es = es.masked_select(pitch_masks)
# calculate loss
l1_loss = self.l1_criterion(before_outs, ys)
if after_outs is not None:
l1_loss += self.l1_criterion(after_outs, ys)
duration_loss = self.duration_criterion(d_outs, ds)
pitch_loss = self.mse_criterion(p_outs, ps)
energy_loss = self.mse_criterion(e_outs, es)
# make weighted mask and apply it
if self.use_weighted_masking:
out_masks = make_non_pad_mask(olens).unsqueeze(-1).to(ys.device)
out_weights = out_masks.float() / out_masks.sum(dim=1, keepdim=True).float()
out_weights /= ys.size(0) * ys.size(2)
duration_masks = make_non_pad_mask(ilens).to(ys.device)
duration_weights = (
duration_masks.float() / duration_masks.sum(dim=1, keepdim=True).float()
)
duration_weights /= ds.size(0)
# apply weight
l1_loss = l1_loss.mul(out_weights).masked_select(out_masks).sum()
duration_loss = (
duration_loss.mul(duration_weights).masked_select(duration_masks).sum()
)
pitch_masks = duration_masks.unsqueeze(-1)
pitch_weights = duration_weights.unsqueeze(-1)
pitch_loss = pitch_loss.mul(pitch_weights).masked_select(pitch_masks).sum()
energy_loss = (
energy_loss.mul(pitch_weights).masked_select(pitch_masks).sum()
)
return l1_loss, ssim_loss, duration_loss, pitch_loss, energy_loss
| 9,540 | 33.197133 | 88 | py |
espnet | espnet-master/espnet2/tts/prodiff/denoiser.py | # Implemented from
# (https://github.com/Rongjiehuang/ProDiff/blob/main/modules/ProDiff/model/ProDiff_teacher.py)
# Copyright 2022 Hitachi LTD. (Nelson Yalta)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import math
from typing import Optional, Union
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from espnet.nets.pytorch_backend.transformer.embedding import PositionalEncoding
def _vpsde_beta_t(t: int, T: int, min_beta: float, max_beta: float) -> float:
"""Beta Scheduler.
Args:
t (int): current step.
T (int): total steps.
min_beta (float): minimum beta.
max_beta (float): maximum beta.
Returns:
float: current beta.
"""
t_coef = (2 * t - 1) / (T**2)
return 1.0 - np.exp(-min_beta / T - 0.5 * (max_beta - min_beta) * t_coef)
def noise_scheduler(
sched_type: str,
timesteps: int,
min_beta: float = 0.0,
max_beta: float = 0.01,
s: float = 0.008,
) -> torch.Tensor:
"""Noise Scheduler.
Args:
sched_type (str): type of scheduler.
timesteps (int): numbern of time steps.
min_beta (float, optional): Minimum beta. Defaults to 0.0.
max_beta (float, optional): Maximum beta. Defaults to 0.01.
s (float, optional): Scheduler intersection. Defaults to 0.008.
Returns:
tensor: Noise.
"""
if sched_type == "linear":
scheduler = np.linspace(1e-6, 0.01, timesteps)
elif sched_type == "cosine":
steps = timesteps + 1
x = np.linspace(0, steps, steps)
alphas_cumprod = np.cos(((x / steps) + s) / (1 + s) * np.pi * 0.5) ** 2
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
scheduler = np.clip(betas, a_min=0, a_max=0.999)
elif sched_type == "vpsde":
scheduler = np.array(
[
_vpsde_beta_t(t, timesteps, min_beta, max_beta)
for t in range(1, timesteps + 1)
]
)
else:
raise NotImplementedError
return torch.as_tensor(scheduler.astype(np.float32))
class Mish(nn.Module):
"""Mish Activation Function.
Introduced in `Mish: A Self Regularized Non-Monotonic Activation Function`_.
.. _Mish: A Self Regularized Non-Monotonic Activation Function:
https://arxiv.org/abs/1908.08681
"""
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Calculate forward propagation.
Args:
x (torch.Tensor): Input tensor.
Returns:
torch.Tensor: Output tensor.
"""
return x * torch.tanh(F.softplus(x))
class ResidualBlock(nn.Module):
"""Residual Block for Diffusion Denoiser."""
def __init__(
self,
adim: int,
channels: int,
dilation: int,
) -> None:
"""Initialization.
Args:
adim (int): Size of dimensions.
channels (int): Number of channels.
dilation (int): Size of dilations.
"""
super().__init__()
self.conv = nn.Conv1d(
channels, 2 * channels, 3, padding=dilation, dilation=dilation
)
self.diff_proj = nn.Linear(channels, channels)
self.cond_proj = nn.Conv1d(adim, 2 * channels, 1)
self.out_proj = nn.Conv1d(channels, 2 * channels, 1)
def forward(
self, x: torch.Tensor, condition: torch.Tensor, step: torch.Tensor
) -> Union[torch.Tensor, torch.Tensor]:
"""Calculate forward propagation.
Args:
x (torch.Tensor): Input tensor.
condition (torch.Tensor): Conditioning tensor.
step (torch.Tensor): Number of diffusion step.
Returns:
Union[torch.Tensor, torch.Tensor]: Output tensor.
"""
step = self.diff_proj(step).unsqueeze(-1)
condition = self.cond_proj(condition)
y = x + step
y = self.conv(y) + condition
gate, _filter = torch.chunk(y, 2, dim=1)
y = torch.sigmoid(gate) * torch.tanh(_filter)
y = self.out_proj(y)
residual, skip = torch.chunk(y, 2, dim=1)
return (x + residual) / math.sqrt(2.0), skip
class SpectogramDenoiser(nn.Module):
"""Spectogram Denoiser.
Ref: https://arxiv.org/pdf/2207.06389.pdf.
"""
def __init__(
self,
idim: int,
adim: int = 256,
layers: int = 20,
channels: int = 256,
cycle_length: int = 1,
timesteps: int = 200,
timescale: int = 1,
max_beta: float = 40.0,
scheduler: str = "vpsde",
dropout_rate: float = 0.05,
) -> None:
"""Initialization.
Args:
idim (int): Dimension of the inputs.
adim (int, optional):Dimension of the hidden states. Defaults to 256.
layers (int, optional): Number of layers. Defaults to 20.
channels (int, optional): Number of channels of each layer. Defaults to 256.
cycle_length (int, optional): Cycle length of the diffusion. Defaults to 1.
timesteps (int, optional): Number of timesteps of the diffusion.
Defaults to 200.
timescale (int, optional): Number of timescale. Defaults to 1.
max_beta (float, optional): Maximum beta value for schedueler.
Defaults to 40.
scheduler (str, optional): Type of noise scheduler. Defaults to "vpsde".
dropout_rate (float, optional): Dropout rate. Defaults to 0.05.
"""
super().__init__()
self.idim = idim
self.timesteps = timesteps
self.scale = timescale
self.num_layers = layers
self.channels = channels
# Denoiser
self.in_proj = nn.Conv1d(idim, channels, 1)
self.denoiser_pos = PositionalEncoding(channels, dropout_rate)
self.denoiser_mlp = nn.Sequential(
nn.Linear(channels, channels * 4), Mish(), nn.Linear(channels * 4, channels)
)
self.denoiser_res = nn.ModuleList(
[
ResidualBlock(adim, channels, 2 ** (i % cycle_length))
for i in range(layers)
]
)
self.skip_proj = nn.Conv1d(channels, channels, 1)
self.feats_out = nn.Conv1d(channels, idim, 1)
# Diffusion
self.betas = noise_scheduler(scheduler, timesteps + 1, 0.1, max_beta, 8e-3)
alphas = 1.0 - self.betas
alphas_cumulative = torch.cumprod(alphas, dim=0)
self.register_buffer("alphas_cumulative", torch.sqrt(alphas_cumulative))
self.register_buffer(
"min_alphas_cumulative", torch.sqrt(1.0 - alphas_cumulative)
)
def forward(
self,
xs: torch.Tensor,
ys: Optional[torch.Tensor] = None,
masks: Optional[torch.Tensor] = None,
is_inference: bool = False,
) -> torch.Tensor:
"""Calculate forward propagation.
Args:
xs (torch.Tensor): Phoneme-encoded tensor (#batch, time, dims)
ys (Optional[torch.Tensor], optional): Mel-based reference
tensor (#batch, time, mels). Defaults to None.
masks (Optional[torch.Tensor], optional): Mask tensor (#batch, time).
Defaults to None.
Returns:
torch.Tensor: Output tensor (#batch, time, dims).
"""
if is_inference:
return self.inference(xs)
batch_size = xs.shape[0]
timesteps = (
torch.randint(0, self.timesteps + 1, (batch_size,)).to(xs.device).long()
)
# Diffusion
ys_noise = self.diffusion(ys, timesteps) # (batch, 1, dims, time)
ys_noise = ys_noise * masks.unsqueeze(1)
# Denoise
ys_denoise = self.forward_denoise(ys_noise, timesteps, xs)
ys_denoise = ys_denoise * masks
return ys_denoise.transpose(1, 2)
def forward_denoise(
self, xs_noisy: torch.Tensor, step: torch.Tensor, condition: torch.Tensor
) -> torch.Tensor:
"""Calculate forward for denoising diffusion.
Args:
xs_noisy (torch.Tensor): Input tensor.
step (torch.Tensor): Number of step.
condition (torch.Tensor): Conditioning tensor.
Returns:
torch.Tensor: Denoised tensor.
"""
xs_noisy = xs_noisy.squeeze(1)
condition = condition.transpose(1, 2)
xs_noisy = F.relu(self.in_proj(xs_noisy))
step = step.unsqueeze(-1).expand(-1, self.channels)
step = self.denoiser_pos(step.unsqueeze(1)).squeeze(1)
step = self.denoiser_mlp(step)
skip_conns = list()
for _, layer in enumerate(self.denoiser_res):
xs_noisy, skip = layer(xs_noisy, condition, step)
skip_conns.append(skip)
xs_noisy = torch.sum(torch.stack(skip_conns), dim=0) / math.sqrt(
self.num_layers
)
xs_denoise = F.relu(self.skip_proj(xs_noisy))
xs_denoise = self.feats_out(xs_noisy)
return xs_denoise
def diffusion(
self,
xs_ref: torch.Tensor,
steps: torch.Tensor,
noise: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""Calculate diffusion process during training.
Args:
xs_ref (torch.Tensor): Input tensor.
steps (torch.Tensor): Number of step.
noise (Optional[torch.Tensor], optional): Noise tensor. Defaults to None.
Returns:
torch.Tensor: Output tensor.
"""
# here goes norm_spec if does something
batch_size = xs_ref.shape[0]
xs_ref = xs_ref.transpose(1, 2).unsqueeze(1)
steps = torch.clamp(steps, min=0) # not sure if this is required
# make a noise tensor
if noise is None:
noise = torch.randn_like(xs_ref)
# q-sample
ndims = (batch_size, *((1,) * (xs_ref.dim() - 1)))
cum_prods = self.alphas_cumulative.gather(-1, steps).reshape(ndims)
min_cum_prods = self.min_alphas_cumulative.gather(-1, steps).reshape(ndims)
xs_noisy = xs_ref * cum_prods + noise * min_cum_prods
return xs_noisy
def inference(self, condition: torch.Tensor) -> torch.Tensor:
"""Calculate forward during inference.
Args:
condition (torch.Tensor): Conditioning tensor (batch, time, dims).
Returns:
torch.Tensor: Output tensor.
"""
batch = condition.shape[0]
device = condition.device
shape = (batch, 1, self.idim, condition.shape[1])
xs_noisy = torch.randn(shape).to(device) # (batch, 1, dims, time)
# required params:
beta = self.betas
alph = 1.0 - beta
alph_prod = torch.cumprod(alph, axis=0)
alph_prod_prv = torch.cat((torch.ones((1,)), alph_prod[:-1]))
coef1 = beta * torch.sqrt(alph_prod_prv) / (1.0 - alph_prod)
coef2 = (1.0 - alph_prod_prv) * torch.sqrt(alph) / (1.0 - alph_prod)
post_var = beta * (1.0 - alph_prod_prv) / (1.0 - alph_prod)
post_var = torch.log(torch.maximum(post_var, torch.full((1,), 1e-20)))
# denoising steps
for _step in reversed(range(0, self.timesteps)):
# p-sample
steps = torch.full((batch,), _step, dtype=torch.long).to(device)
xs_denoised = self.forward_denoise(xs_noisy, steps, condition).unsqueeze(1)
# q-posterior (xs_denoised, xs_noisy, steps)
ndims = (batch, *((1,) * (xs_denoised.dim() - 1)))
_coef1 = coef1.gather(-1, steps).reshape(ndims)
_coef2 = coef2.gather(-1, steps).reshape(ndims)
q_mean = _coef1 * xs_denoised + _coef2 * xs_noisy
q_log_var = post_var.gather(-1, steps).reshape(ndims)
# q-posterior-sample
noise = torch.randn_like(xs_denoised).to(device)
_mask = (1 - (steps == 0).float()).reshape(ndims)
xs_noisy = q_mean + _mask * (0.5 * q_log_var).exp() * noise
ys = xs_noisy[0].transpose(1, 2)
return ys
| 12,143 | 32.271233 | 94 | py |
espnet | espnet-master/espnet2/tts/fastspeech2/loss.py | # Copyright 2020 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Fastspeech2 related loss module for ESPnet2."""
from typing import Tuple
import torch
from typeguard import check_argument_types
from espnet.nets.pytorch_backend.fastspeech.duration_predictor import ( # noqa: H301
DurationPredictorLoss,
)
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask
class FastSpeech2Loss(torch.nn.Module):
"""Loss function module for FastSpeech2."""
def __init__(self, use_masking: bool = True, use_weighted_masking: bool = False):
"""Initialize feed-forward Transformer loss module.
Args:
use_masking (bool): Whether to apply masking for padded part in loss
calculation.
use_weighted_masking (bool): Whether to weighted masking in loss
calculation.
"""
assert check_argument_types()
super().__init__()
assert (use_masking != use_weighted_masking) or not use_masking
self.use_masking = use_masking
self.use_weighted_masking = use_weighted_masking
# define criterions
reduction = "none" if self.use_weighted_masking else "mean"
self.l1_criterion = torch.nn.L1Loss(reduction=reduction)
self.mse_criterion = torch.nn.MSELoss(reduction=reduction)
self.duration_criterion = DurationPredictorLoss(reduction=reduction)
def forward(
self,
after_outs: torch.Tensor,
before_outs: torch.Tensor,
d_outs: torch.Tensor,
p_outs: torch.Tensor,
e_outs: torch.Tensor,
ys: torch.Tensor,
ds: torch.Tensor,
ps: torch.Tensor,
es: torch.Tensor,
ilens: torch.Tensor,
olens: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""Calculate forward propagation.
Args:
after_outs (Tensor): Batch of outputs after postnets (B, T_feats, odim).
before_outs (Tensor): Batch of outputs before postnets (B, T_feats, odim).
d_outs (LongTensor): Batch of outputs of duration predictor (B, T_text).
p_outs (Tensor): Batch of outputs of pitch predictor (B, T_text, 1).
e_outs (Tensor): Batch of outputs of energy predictor (B, T_text, 1).
ys (Tensor): Batch of target features (B, T_feats, odim).
ds (LongTensor): Batch of durations (B, T_text).
ps (Tensor): Batch of target token-averaged pitch (B, T_text, 1).
es (Tensor): Batch of target token-averaged energy (B, T_text, 1).
ilens (LongTensor): Batch of the lengths of each input (B,).
olens (LongTensor): Batch of the lengths of each target (B,).
Returns:
Tensor: L1 loss value.
Tensor: Duration predictor loss value.
Tensor: Pitch predictor loss value.
Tensor: Energy predictor loss value.
"""
# apply mask to remove padded part
if self.use_masking:
out_masks = make_non_pad_mask(olens).unsqueeze(-1).to(ys.device)
before_outs = before_outs.masked_select(out_masks)
if after_outs is not None:
after_outs = after_outs.masked_select(out_masks)
ys = ys.masked_select(out_masks)
duration_masks = make_non_pad_mask(ilens).to(ys.device)
d_outs = d_outs.masked_select(duration_masks)
ds = ds.masked_select(duration_masks)
pitch_masks = make_non_pad_mask(ilens).unsqueeze(-1).to(ys.device)
p_outs = p_outs.masked_select(pitch_masks)
e_outs = e_outs.masked_select(pitch_masks)
ps = ps.masked_select(pitch_masks)
es = es.masked_select(pitch_masks)
# calculate loss
l1_loss = self.l1_criterion(before_outs, ys)
if after_outs is not None:
l1_loss += self.l1_criterion(after_outs, ys)
duration_loss = self.duration_criterion(d_outs, ds)
pitch_loss = self.mse_criterion(p_outs, ps)
energy_loss = self.mse_criterion(e_outs, es)
# make weighted mask and apply it
if self.use_weighted_masking:
out_masks = make_non_pad_mask(olens).unsqueeze(-1).to(ys.device)
out_weights = out_masks.float() / out_masks.sum(dim=1, keepdim=True).float()
out_weights /= ys.size(0) * ys.size(2)
duration_masks = make_non_pad_mask(ilens).to(ys.device)
duration_weights = (
duration_masks.float() / duration_masks.sum(dim=1, keepdim=True).float()
)
duration_weights /= ds.size(0)
# apply weight
l1_loss = l1_loss.mul(out_weights).masked_select(out_masks).sum()
duration_loss = (
duration_loss.mul(duration_weights).masked_select(duration_masks).sum()
)
pitch_masks = duration_masks.unsqueeze(-1)
pitch_weights = duration_weights.unsqueeze(-1)
pitch_loss = pitch_loss.mul(pitch_weights).masked_select(pitch_masks).sum()
energy_loss = (
energy_loss.mul(pitch_weights).masked_select(pitch_masks).sum()
)
return l1_loss, duration_loss, pitch_loss, energy_loss
| 5,331 | 40.984252 | 88 | py |
espnet | espnet-master/espnet2/tts/fastspeech2/variance_predictor.py | #!/usr/bin/env python3
# Copyright 2020 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Variance predictor related modules."""
import torch
from typeguard import check_argument_types
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
class VariancePredictor(torch.nn.Module):
"""Variance predictor module.
This is a module of variacne predictor described in `FastSpeech 2:
Fast and High-Quality End-to-End Text to Speech`_.
.. _`FastSpeech 2: Fast and High-Quality End-to-End Text to Speech`:
https://arxiv.org/abs/2006.04558
"""
def __init__(
self,
idim: int,
n_layers: int = 2,
n_chans: int = 384,
kernel_size: int = 3,
bias: bool = True,
dropout_rate: float = 0.5,
):
"""Initilize duration predictor module.
Args:
idim (int): Input dimension.
n_layers (int): Number of convolutional layers.
n_chans (int): Number of channels of convolutional layers.
kernel_size (int): Kernel size of convolutional layers.
dropout_rate (float): Dropout rate.
"""
assert check_argument_types()
super().__init__()
self.conv = torch.nn.ModuleList()
for idx in range(n_layers):
in_chans = idim if idx == 0 else n_chans
self.conv += [
torch.nn.Sequential(
torch.nn.Conv1d(
in_chans,
n_chans,
kernel_size,
stride=1,
padding=(kernel_size - 1) // 2,
bias=bias,
),
torch.nn.ReLU(),
LayerNorm(n_chans, dim=1),
torch.nn.Dropout(dropout_rate),
)
]
self.linear = torch.nn.Linear(n_chans, 1)
def forward(self, xs: torch.Tensor, x_masks: torch.Tensor = None) -> torch.Tensor:
"""Calculate forward propagation.
Args:
xs (Tensor): Batch of input sequences (B, Tmax, idim).
x_masks (ByteTensor): Batch of masks indicating padded part (B, Tmax).
Returns:
Tensor: Batch of predicted sequences (B, Tmax, 1).
"""
xs = xs.transpose(1, -1) # (B, idim, Tmax)
for f in self.conv:
xs = f(xs) # (B, C, Tmax)
xs = self.linear(xs.transpose(1, 2)) # (B, Tmax, 1)
if x_masks is not None:
xs = xs.masked_fill(x_masks, 0.0)
return xs
| 2,624 | 29.172414 | 86 | py |
espnet | espnet-master/espnet2/tts/fastspeech2/fastspeech2.py | # Copyright 2020 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Fastspeech2 related modules for ESPnet2."""
import logging
from typing import Dict, Optional, Sequence, Tuple
import torch
import torch.nn.functional as F
from typeguard import check_argument_types
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet2.torch_utils.initialize import initialize
from espnet2.tts.abs_tts import AbsTTS
from espnet2.tts.fastspeech2.loss import FastSpeech2Loss
from espnet2.tts.fastspeech2.variance_predictor import VariancePredictor
from espnet2.tts.gst.style_encoder import StyleEncoder
from espnet.nets.pytorch_backend.conformer.encoder import Encoder as ConformerEncoder
from espnet.nets.pytorch_backend.fastspeech.duration_predictor import DurationPredictor
from espnet.nets.pytorch_backend.fastspeech.length_regulator import LengthRegulator
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask, make_pad_mask
from espnet.nets.pytorch_backend.tacotron2.decoder import Postnet
from espnet.nets.pytorch_backend.transformer.embedding import (
PositionalEncoding,
ScaledPositionalEncoding,
)
from espnet.nets.pytorch_backend.transformer.encoder import (
Encoder as TransformerEncoder,
)
class FastSpeech2(AbsTTS):
"""FastSpeech2 module.
This is a module of FastSpeech2 described in `FastSpeech 2: Fast and
High-Quality End-to-End Text to Speech`_. Instead of quantized pitch and
energy, we use token-averaged value introduced in `FastPitch: Parallel
Text-to-speech with Pitch Prediction`_.
.. _`FastSpeech 2: Fast and High-Quality End-to-End Text to Speech`:
https://arxiv.org/abs/2006.04558
.. _`FastPitch: Parallel Text-to-speech with Pitch Prediction`:
https://arxiv.org/abs/2006.06873
"""
def __init__(
self,
# network structure related
idim: int,
odim: int,
adim: int = 384,
aheads: int = 4,
elayers: int = 6,
eunits: int = 1536,
dlayers: int = 6,
dunits: int = 1536,
postnet_layers: int = 5,
postnet_chans: int = 512,
postnet_filts: int = 5,
postnet_dropout_rate: float = 0.5,
positionwise_layer_type: str = "conv1d",
positionwise_conv_kernel_size: int = 1,
use_scaled_pos_enc: bool = True,
use_batch_norm: bool = True,
encoder_normalize_before: bool = True,
decoder_normalize_before: bool = True,
encoder_concat_after: bool = False,
decoder_concat_after: bool = False,
reduction_factor: int = 1,
encoder_type: str = "transformer",
decoder_type: str = "transformer",
transformer_enc_dropout_rate: float = 0.1,
transformer_enc_positional_dropout_rate: float = 0.1,
transformer_enc_attn_dropout_rate: float = 0.1,
transformer_dec_dropout_rate: float = 0.1,
transformer_dec_positional_dropout_rate: float = 0.1,
transformer_dec_attn_dropout_rate: float = 0.1,
# only for conformer
conformer_rel_pos_type: str = "legacy",
conformer_pos_enc_layer_type: str = "rel_pos",
conformer_self_attn_layer_type: str = "rel_selfattn",
conformer_activation_type: str = "swish",
use_macaron_style_in_conformer: bool = True,
use_cnn_in_conformer: bool = True,
zero_triu: bool = False,
conformer_enc_kernel_size: int = 7,
conformer_dec_kernel_size: int = 31,
# duration predictor
duration_predictor_layers: int = 2,
duration_predictor_chans: int = 384,
duration_predictor_kernel_size: int = 3,
duration_predictor_dropout_rate: float = 0.1,
# energy predictor
energy_predictor_layers: int = 2,
energy_predictor_chans: int = 384,
energy_predictor_kernel_size: int = 3,
energy_predictor_dropout: float = 0.5,
energy_embed_kernel_size: int = 9,
energy_embed_dropout: float = 0.5,
stop_gradient_from_energy_predictor: bool = False,
# pitch predictor
pitch_predictor_layers: int = 2,
pitch_predictor_chans: int = 384,
pitch_predictor_kernel_size: int = 3,
pitch_predictor_dropout: float = 0.5,
pitch_embed_kernel_size: int = 9,
pitch_embed_dropout: float = 0.5,
stop_gradient_from_pitch_predictor: bool = False,
# extra embedding related
spks: Optional[int] = None,
langs: Optional[int] = None,
spk_embed_dim: Optional[int] = None,
spk_embed_integration_type: str = "add",
use_gst: bool = False,
gst_tokens: int = 10,
gst_heads: int = 4,
gst_conv_layers: int = 6,
gst_conv_chans_list: Sequence[int] = (32, 32, 64, 64, 128, 128),
gst_conv_kernel_size: int = 3,
gst_conv_stride: int = 2,
gst_gru_layers: int = 1,
gst_gru_units: int = 128,
# training related
init_type: str = "xavier_uniform",
init_enc_alpha: float = 1.0,
init_dec_alpha: float = 1.0,
use_masking: bool = False,
use_weighted_masking: bool = False,
):
"""Initialize FastSpeech2 module.
Args:
idim (int): Dimension of the inputs.
odim (int): Dimension of the outputs.
elayers (int): Number of encoder layers.
eunits (int): Number of encoder hidden units.
dlayers (int): Number of decoder layers.
dunits (int): Number of decoder hidden units.
postnet_layers (int): Number of postnet layers.
postnet_chans (int): Number of postnet channels.
postnet_filts (int): Kernel size of postnet.
postnet_dropout_rate (float): Dropout rate in postnet.
use_scaled_pos_enc (bool): Whether to use trainable scaled pos encoding.
use_batch_norm (bool): Whether to use batch normalization in encoder prenet.
encoder_normalize_before (bool): Whether to apply layernorm layer before
encoder block.
decoder_normalize_before (bool): Whether to apply layernorm layer before
decoder block.
encoder_concat_after (bool): Whether to concatenate attention layer's input
and output in encoder.
decoder_concat_after (bool): Whether to concatenate attention layer's input
and output in decoder.
reduction_factor (int): Reduction factor.
encoder_type (str): Encoder type ("transformer" or "conformer").
decoder_type (str): Decoder type ("transformer" or "conformer").
transformer_enc_dropout_rate (float): Dropout rate in encoder except
attention and positional encoding.
transformer_enc_positional_dropout_rate (float): Dropout rate after encoder
positional encoding.
transformer_enc_attn_dropout_rate (float): Dropout rate in encoder
self-attention module.
transformer_dec_dropout_rate (float): Dropout rate in decoder except
attention & positional encoding.
transformer_dec_positional_dropout_rate (float): Dropout rate after decoder
positional encoding.
transformer_dec_attn_dropout_rate (float): Dropout rate in decoder
self-attention module.
conformer_rel_pos_type (str): Relative pos encoding type in conformer.
conformer_pos_enc_layer_type (str): Pos encoding layer type in conformer.
conformer_self_attn_layer_type (str): Self-attention layer type in conformer
conformer_activation_type (str): Activation function type in conformer.
use_macaron_style_in_conformer: Whether to use macaron style FFN.
use_cnn_in_conformer: Whether to use CNN in conformer.
zero_triu: Whether to use zero triu in relative self-attention module.
conformer_enc_kernel_size: Kernel size of encoder conformer.
conformer_dec_kernel_size: Kernel size of decoder conformer.
duration_predictor_layers (int): Number of duration predictor layers.
duration_predictor_chans (int): Number of duration predictor channels.
duration_predictor_kernel_size (int): Kernel size of duration predictor.
duration_predictor_dropout_rate (float): Dropout rate in duration predictor.
pitch_predictor_layers (int): Number of pitch predictor layers.
pitch_predictor_chans (int): Number of pitch predictor channels.
pitch_predictor_kernel_size (int): Kernel size of pitch predictor.
pitch_predictor_dropout_rate (float): Dropout rate in pitch predictor.
pitch_embed_kernel_size (float): Kernel size of pitch embedding.
pitch_embed_dropout_rate (float): Dropout rate for pitch embedding.
stop_gradient_from_pitch_predictor: Whether to stop gradient from pitch
predictor to encoder.
energy_predictor_layers (int): Number of energy predictor layers.
energy_predictor_chans (int): Number of energy predictor channels.
energy_predictor_kernel_size (int): Kernel size of energy predictor.
energy_predictor_dropout_rate (float): Dropout rate in energy predictor.
energy_embed_kernel_size (float): Kernel size of energy embedding.
energy_embed_dropout_rate (float): Dropout rate for energy embedding.
stop_gradient_from_energy_predictor: Whether to stop gradient from energy
predictor to encoder.
spks (Optional[int]): Number of speakers. If set to > 1, assume that the
sids will be provided as the input and use sid embedding layer.
langs (Optional[int]): Number of languages. If set to > 1, assume that the
lids will be provided as the input and use sid embedding layer.
spk_embed_dim (Optional[int]): Speaker embedding dimension. If set to > 0,
assume that spembs will be provided as the input.
spk_embed_integration_type: How to integrate speaker embedding.
use_gst (str): Whether to use global style token.
gst_tokens (int): The number of GST embeddings.
gst_heads (int): The number of heads in GST multihead attention.
gst_conv_layers (int): The number of conv layers in GST.
gst_conv_chans_list: (Sequence[int]):
List of the number of channels of conv layers in GST.
gst_conv_kernel_size (int): Kernel size of conv layers in GST.
gst_conv_stride (int): Stride size of conv layers in GST.
gst_gru_layers (int): The number of GRU layers in GST.
gst_gru_units (int): The number of GRU units in GST.
init_type (str): How to initialize transformer parameters.
init_enc_alpha (float): Initial value of alpha in scaled pos encoding of the
encoder.
init_dec_alpha (float): Initial value of alpha in scaled pos encoding of the
decoder.
use_masking (bool): Whether to apply masking for padded part in loss
calculation.
use_weighted_masking (bool): Whether to apply weighted masking in loss
calculation.
"""
assert check_argument_types()
super().__init__()
# store hyperparameters
self.idim = idim
self.odim = odim
self.eos = idim - 1
self.reduction_factor = reduction_factor
self.encoder_type = encoder_type
self.decoder_type = decoder_type
self.stop_gradient_from_pitch_predictor = stop_gradient_from_pitch_predictor
self.stop_gradient_from_energy_predictor = stop_gradient_from_energy_predictor
self.use_scaled_pos_enc = use_scaled_pos_enc
self.use_gst = use_gst
# use idx 0 as padding idx
self.padding_idx = 0
# get positional encoding class
pos_enc_class = (
ScaledPositionalEncoding if self.use_scaled_pos_enc else PositionalEncoding
)
# check relative positional encoding compatibility
if "conformer" in [encoder_type, decoder_type]:
if conformer_rel_pos_type == "legacy":
if conformer_pos_enc_layer_type == "rel_pos":
conformer_pos_enc_layer_type = "legacy_rel_pos"
logging.warning(
"Fallback to conformer_pos_enc_layer_type = 'legacy_rel_pos' "
"due to the compatibility. If you want to use the new one, "
"please use conformer_pos_enc_layer_type = 'latest'."
)
if conformer_self_attn_layer_type == "rel_selfattn":
conformer_self_attn_layer_type = "legacy_rel_selfattn"
logging.warning(
"Fallback to "
"conformer_self_attn_layer_type = 'legacy_rel_selfattn' "
"due to the compatibility. If you want to use the new one, "
"please use conformer_pos_enc_layer_type = 'latest'."
)
elif conformer_rel_pos_type == "latest":
assert conformer_pos_enc_layer_type != "legacy_rel_pos"
assert conformer_self_attn_layer_type != "legacy_rel_selfattn"
else:
raise ValueError(f"Unknown rel_pos_type: {conformer_rel_pos_type}")
# define encoder
encoder_input_layer = torch.nn.Embedding(
num_embeddings=idim, embedding_dim=adim, padding_idx=self.padding_idx
)
if encoder_type == "transformer":
self.encoder = TransformerEncoder(
idim=idim,
attention_dim=adim,
attention_heads=aheads,
linear_units=eunits,
num_blocks=elayers,
input_layer=encoder_input_layer,
dropout_rate=transformer_enc_dropout_rate,
positional_dropout_rate=transformer_enc_positional_dropout_rate,
attention_dropout_rate=transformer_enc_attn_dropout_rate,
pos_enc_class=pos_enc_class,
normalize_before=encoder_normalize_before,
concat_after=encoder_concat_after,
positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size,
)
elif encoder_type == "conformer":
self.encoder = ConformerEncoder(
idim=idim,
attention_dim=adim,
attention_heads=aheads,
linear_units=eunits,
num_blocks=elayers,
input_layer=encoder_input_layer,
dropout_rate=transformer_enc_dropout_rate,
positional_dropout_rate=transformer_enc_positional_dropout_rate,
attention_dropout_rate=transformer_enc_attn_dropout_rate,
normalize_before=encoder_normalize_before,
concat_after=encoder_concat_after,
positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size,
macaron_style=use_macaron_style_in_conformer,
pos_enc_layer_type=conformer_pos_enc_layer_type,
selfattention_layer_type=conformer_self_attn_layer_type,
activation_type=conformer_activation_type,
use_cnn_module=use_cnn_in_conformer,
cnn_module_kernel=conformer_enc_kernel_size,
zero_triu=zero_triu,
)
else:
raise ValueError(f"{encoder_type} is not supported.")
# define GST
if self.use_gst:
self.gst = StyleEncoder(
idim=odim, # the input is mel-spectrogram
gst_tokens=gst_tokens,
gst_token_dim=adim,
gst_heads=gst_heads,
conv_layers=gst_conv_layers,
conv_chans_list=gst_conv_chans_list,
conv_kernel_size=gst_conv_kernel_size,
conv_stride=gst_conv_stride,
gru_layers=gst_gru_layers,
gru_units=gst_gru_units,
)
# define spk and lang embedding
self.spks = None
if spks is not None and spks > 1:
self.spks = spks
self.sid_emb = torch.nn.Embedding(spks, adim)
self.langs = None
if langs is not None and langs > 1:
self.langs = langs
self.lid_emb = torch.nn.Embedding(langs, adim)
# define additional projection for speaker embedding
self.spk_embed_dim = None
if spk_embed_dim is not None and spk_embed_dim > 0:
self.spk_embed_dim = spk_embed_dim
self.spk_embed_integration_type = spk_embed_integration_type
if self.spk_embed_dim is not None:
if self.spk_embed_integration_type == "add":
self.projection = torch.nn.Linear(self.spk_embed_dim, adim)
else:
self.projection = torch.nn.Linear(adim + self.spk_embed_dim, adim)
# define duration predictor
self.duration_predictor = DurationPredictor(
idim=adim,
n_layers=duration_predictor_layers,
n_chans=duration_predictor_chans,
kernel_size=duration_predictor_kernel_size,
dropout_rate=duration_predictor_dropout_rate,
)
# define pitch predictor
self.pitch_predictor = VariancePredictor(
idim=adim,
n_layers=pitch_predictor_layers,
n_chans=pitch_predictor_chans,
kernel_size=pitch_predictor_kernel_size,
dropout_rate=pitch_predictor_dropout,
)
# NOTE(kan-bayashi): We use continuous pitch + FastPitch style avg
self.pitch_embed = torch.nn.Sequential(
torch.nn.Conv1d(
in_channels=1,
out_channels=adim,
kernel_size=pitch_embed_kernel_size,
padding=(pitch_embed_kernel_size - 1) // 2,
),
torch.nn.Dropout(pitch_embed_dropout),
)
# define energy predictor
self.energy_predictor = VariancePredictor(
idim=adim,
n_layers=energy_predictor_layers,
n_chans=energy_predictor_chans,
kernel_size=energy_predictor_kernel_size,
dropout_rate=energy_predictor_dropout,
)
# NOTE(kan-bayashi): We use continuous enegy + FastPitch style avg
self.energy_embed = torch.nn.Sequential(
torch.nn.Conv1d(
in_channels=1,
out_channels=adim,
kernel_size=energy_embed_kernel_size,
padding=(energy_embed_kernel_size - 1) // 2,
),
torch.nn.Dropout(energy_embed_dropout),
)
# define length regulator
self.length_regulator = LengthRegulator()
# define decoder
# NOTE: we use encoder as decoder
# because fastspeech's decoder is the same as encoder
if decoder_type == "transformer":
self.decoder = TransformerEncoder(
idim=0,
attention_dim=adim,
attention_heads=aheads,
linear_units=dunits,
num_blocks=dlayers,
input_layer=None,
dropout_rate=transformer_dec_dropout_rate,
positional_dropout_rate=transformer_dec_positional_dropout_rate,
attention_dropout_rate=transformer_dec_attn_dropout_rate,
pos_enc_class=pos_enc_class,
normalize_before=decoder_normalize_before,
concat_after=decoder_concat_after,
positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size,
)
elif decoder_type == "conformer":
self.decoder = ConformerEncoder(
idim=0,
attention_dim=adim,
attention_heads=aheads,
linear_units=dunits,
num_blocks=dlayers,
input_layer=None,
dropout_rate=transformer_dec_dropout_rate,
positional_dropout_rate=transformer_dec_positional_dropout_rate,
attention_dropout_rate=transformer_dec_attn_dropout_rate,
normalize_before=decoder_normalize_before,
concat_after=decoder_concat_after,
positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size,
macaron_style=use_macaron_style_in_conformer,
pos_enc_layer_type=conformer_pos_enc_layer_type,
selfattention_layer_type=conformer_self_attn_layer_type,
activation_type=conformer_activation_type,
use_cnn_module=use_cnn_in_conformer,
cnn_module_kernel=conformer_dec_kernel_size,
)
else:
raise ValueError(f"{decoder_type} is not supported.")
# define final projection
self.feat_out = torch.nn.Linear(adim, odim * reduction_factor)
# define postnet
self.postnet = (
None
if postnet_layers == 0
else Postnet(
idim=idim,
odim=odim,
n_layers=postnet_layers,
n_chans=postnet_chans,
n_filts=postnet_filts,
use_batch_norm=use_batch_norm,
dropout_rate=postnet_dropout_rate,
)
)
# initialize parameters
self._reset_parameters(
init_type=init_type,
init_enc_alpha=init_enc_alpha,
init_dec_alpha=init_dec_alpha,
)
# define criterions
self.criterion = FastSpeech2Loss(
use_masking=use_masking, use_weighted_masking=use_weighted_masking
)
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: torch.Tensor,
feats_lengths: torch.Tensor,
durations: torch.Tensor,
durations_lengths: torch.Tensor,
pitch: torch.Tensor,
pitch_lengths: torch.Tensor,
energy: torch.Tensor,
energy_lengths: torch.Tensor,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
joint_training: bool = False,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Calculate forward propagation.
Args:
text (LongTensor): Batch of padded token ids (B, T_text).
text_lengths (LongTensor): Batch of lengths of each input (B,).
feats (Tensor): Batch of padded target features (B, T_feats, odim).
feats_lengths (LongTensor): Batch of the lengths of each target (B,).
durations (LongTensor): Batch of padded durations (B, T_text + 1).
durations_lengths (LongTensor): Batch of duration lengths (B, T_text + 1).
pitch (Tensor): Batch of padded token-averaged pitch (B, T_text + 1, 1).
pitch_lengths (LongTensor): Batch of pitch lengths (B, T_text + 1).
energy (Tensor): Batch of padded token-averaged energy (B, T_text + 1, 1).
energy_lengths (LongTensor): Batch of energy lengths (B, T_text + 1).
spembs (Optional[Tensor]): Batch of speaker embeddings (B, spk_embed_dim).
sids (Optional[Tensor]): Batch of speaker IDs (B, 1).
lids (Optional[Tensor]): Batch of language IDs (B, 1).
joint_training (bool): Whether to perform joint training with vocoder.
Returns:
Tensor: Loss scalar value.
Dict: Statistics to be monitored.
Tensor: Weight value if not joint training else model outputs.
"""
text = text[:, : text_lengths.max()] # for data-parallel
feats = feats[:, : feats_lengths.max()] # for data-parallel
durations = durations[:, : durations_lengths.max()] # for data-parallel
pitch = pitch[:, : pitch_lengths.max()] # for data-parallel
energy = energy[:, : energy_lengths.max()] # for data-parallel
batch_size = text.size(0)
# Add eos at the last of sequence
xs = F.pad(text, [0, 1], "constant", self.padding_idx)
for i, l in enumerate(text_lengths):
xs[i, l] = self.eos
ilens = text_lengths + 1
ys, ds, ps, es = feats, durations, pitch, energy
olens = feats_lengths
# forward propagation
before_outs, after_outs, d_outs, p_outs, e_outs = self._forward(
xs,
ilens,
ys,
olens,
ds,
ps,
es,
spembs=spembs,
sids=sids,
lids=lids,
is_inference=False,
)
# modify mod part of groundtruth
if self.reduction_factor > 1:
olens = olens.new([olen - olen % self.reduction_factor for olen in olens])
max_olen = max(olens)
ys = ys[:, :max_olen]
# calculate loss
if self.postnet is None:
after_outs = None
# calculate loss
l1_loss, duration_loss, pitch_loss, energy_loss = self.criterion(
after_outs=after_outs,
before_outs=before_outs,
d_outs=d_outs,
p_outs=p_outs,
e_outs=e_outs,
ys=ys,
ds=ds,
ps=ps,
es=es,
ilens=ilens,
olens=olens,
)
loss = l1_loss + duration_loss + pitch_loss + energy_loss
stats = dict(
l1_loss=l1_loss.item(),
duration_loss=duration_loss.item(),
pitch_loss=pitch_loss.item(),
energy_loss=energy_loss.item(),
)
# report extra information
if self.encoder_type == "transformer" and self.use_scaled_pos_enc:
stats.update(
encoder_alpha=self.encoder.embed[-1].alpha.data.item(),
)
if self.decoder_type == "transformer" and self.use_scaled_pos_enc:
stats.update(
decoder_alpha=self.decoder.embed[-1].alpha.data.item(),
)
if not joint_training:
stats.update(loss=loss.item())
loss, stats, weight = force_gatherable(
(loss, stats, batch_size), loss.device
)
return loss, stats, weight
else:
return loss, stats, after_outs if after_outs is not None else before_outs
def _forward(
self,
xs: torch.Tensor,
ilens: torch.Tensor,
ys: Optional[torch.Tensor] = None,
olens: Optional[torch.Tensor] = None,
ds: Optional[torch.Tensor] = None,
ps: Optional[torch.Tensor] = None,
es: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
is_inference: bool = False,
alpha: float = 1.0,
) -> Sequence[torch.Tensor]:
# forward encoder
x_masks = self._source_mask(ilens)
hs, _ = self.encoder(xs, x_masks) # (B, T_text, adim)
# integrate with GST
if self.use_gst:
style_embs = self.gst(ys)
hs = hs + style_embs.unsqueeze(1)
# integrate with SID and LID embeddings
if self.spks is not None:
sid_embs = self.sid_emb(sids.view(-1))
hs = hs + sid_embs.unsqueeze(1)
if self.langs is not None:
lid_embs = self.lid_emb(lids.view(-1))
hs = hs + lid_embs.unsqueeze(1)
# integrate speaker embedding
if self.spk_embed_dim is not None:
hs = self._integrate_with_spk_embed(hs, spembs)
# forward duration predictor and variance predictors
d_masks = make_pad_mask(ilens).to(xs.device)
if self.stop_gradient_from_pitch_predictor:
p_outs = self.pitch_predictor(hs.detach(), d_masks.unsqueeze(-1))
else:
p_outs = self.pitch_predictor(hs, d_masks.unsqueeze(-1))
if self.stop_gradient_from_energy_predictor:
e_outs = self.energy_predictor(hs.detach(), d_masks.unsqueeze(-1))
else:
e_outs = self.energy_predictor(hs, d_masks.unsqueeze(-1))
if is_inference:
d_outs = self.duration_predictor.inference(hs, d_masks) # (B, T_text)
# use prediction in inference
p_embs = self.pitch_embed(p_outs.transpose(1, 2)).transpose(1, 2)
e_embs = self.energy_embed(e_outs.transpose(1, 2)).transpose(1, 2)
hs = hs + e_embs + p_embs
hs = self.length_regulator(hs, d_outs, alpha) # (B, T_feats, adim)
else:
d_outs = self.duration_predictor(hs, d_masks)
# use groundtruth in training
p_embs = self.pitch_embed(ps.transpose(1, 2)).transpose(1, 2)
e_embs = self.energy_embed(es.transpose(1, 2)).transpose(1, 2)
hs = hs + e_embs + p_embs
hs = self.length_regulator(hs, ds) # (B, T_feats, adim)
# forward decoder
if olens is not None and not is_inference:
if self.reduction_factor > 1:
olens_in = olens.new([olen // self.reduction_factor for olen in olens])
else:
olens_in = olens
h_masks = self._source_mask(olens_in)
else:
h_masks = None
zs, _ = self.decoder(hs, h_masks) # (B, T_feats, adim)
before_outs = self.feat_out(zs).view(
zs.size(0), -1, self.odim
) # (B, T_feats, odim)
# postnet -> (B, T_feats//r * r, odim)
if self.postnet is None:
after_outs = before_outs
else:
after_outs = before_outs + self.postnet(
before_outs.transpose(1, 2)
).transpose(1, 2)
return before_outs, after_outs, d_outs, p_outs, e_outs
def inference(
self,
text: torch.Tensor,
feats: Optional[torch.Tensor] = None,
durations: Optional[torch.Tensor] = None,
spembs: torch.Tensor = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
pitch: Optional[torch.Tensor] = None,
energy: Optional[torch.Tensor] = None,
alpha: float = 1.0,
use_teacher_forcing: bool = False,
) -> Dict[str, torch.Tensor]:
"""Generate the sequence of features given the sequences of characters.
Args:
text (LongTensor): Input sequence of characters (T_text,).
feats (Optional[Tensor): Feature sequence to extract style (N, idim).
durations (Optional[Tensor): Groundtruth of duration (T_text + 1,).
spembs (Optional[Tensor): Speaker embedding vector (spk_embed_dim,).
sids (Optional[Tensor]): Speaker ID (1,).
lids (Optional[Tensor]): Language ID (1,).
pitch (Optional[Tensor]): Groundtruth of token-avg pitch (T_text + 1, 1).
energy (Optional[Tensor]): Groundtruth of token-avg energy (T_text + 1, 1).
alpha (float): Alpha to control the speed.
use_teacher_forcing (bool): Whether to use teacher forcing.
If true, groundtruth of duration, pitch and energy will be used.
Returns:
Dict[str, Tensor]: Output dict including the following items:
* feat_gen (Tensor): Output sequence of features (T_feats, odim).
* duration (Tensor): Duration sequence (T_text + 1,).
* pitch (Tensor): Pitch sequence (T_text + 1,).
* energy (Tensor): Energy sequence (T_text + 1,).
"""
x, y = text, feats
spemb, d, p, e = spembs, durations, pitch, energy
# add eos at the last of sequence
x = F.pad(x, [0, 1], "constant", self.eos)
# setup batch axis
ilens = torch.tensor([x.shape[0]], dtype=torch.long, device=x.device)
xs, ys = x.unsqueeze(0), None
if y is not None:
ys = y.unsqueeze(0)
if spemb is not None:
spembs = spemb.unsqueeze(0)
if use_teacher_forcing:
# use groundtruth of duration, pitch, and energy
ds, ps, es = d.unsqueeze(0), p.unsqueeze(0), e.unsqueeze(0)
_, outs, d_outs, p_outs, e_outs = self._forward(
xs,
ilens,
ys,
ds=ds,
ps=ps,
es=es,
spembs=spembs,
sids=sids,
lids=lids,
) # (1, T_feats, odim)
else:
_, outs, d_outs, p_outs, e_outs = self._forward(
xs,
ilens,
ys,
spembs=spembs,
sids=sids,
lids=lids,
is_inference=True,
alpha=alpha,
) # (1, T_feats, odim)
return dict(
feat_gen=outs[0],
duration=d_outs[0],
pitch=p_outs[0],
energy=e_outs[0],
)
def _integrate_with_spk_embed(
self, hs: torch.Tensor, spembs: torch.Tensor
) -> torch.Tensor:
"""Integrate speaker embedding with hidden states.
Args:
hs (Tensor): Batch of hidden state sequences (B, T_text, adim).
spembs (Tensor): Batch of speaker embeddings (B, spk_embed_dim).
Returns:
Tensor: Batch of integrated hidden state sequences (B, T_text, adim).
"""
if self.spk_embed_integration_type == "add":
# apply projection and then add to hidden states
spembs = self.projection(F.normalize(spembs))
hs = hs + spembs.unsqueeze(1)
elif self.spk_embed_integration_type == "concat":
# concat hidden states with spk embeds and then apply projection
spembs = F.normalize(spembs).unsqueeze(1).expand(-1, hs.size(1), -1)
hs = self.projection(torch.cat([hs, spembs], dim=-1))
else:
raise NotImplementedError("support only add or concat.")
return hs
def _source_mask(self, ilens: torch.Tensor) -> torch.Tensor:
"""Make masks for self-attention.
Args:
ilens (LongTensor): Batch of lengths (B,).
Returns:
Tensor: Mask tensor for self-attention.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
>>> ilens = [5, 3]
>>> self._source_mask(ilens)
tensor([[[1, 1, 1, 1, 1],
[1, 1, 1, 0, 0]]], dtype=torch.uint8)
"""
x_masks = make_non_pad_mask(ilens).to(next(self.parameters()).device)
return x_masks.unsqueeze(-2)
def _reset_parameters(
self, init_type: str, init_enc_alpha: float, init_dec_alpha: float
):
# initialize parameters
if init_type != "pytorch":
initialize(self, init_type)
# initialize alpha in scaled positional encoding
if self.encoder_type == "transformer" and self.use_scaled_pos_enc:
self.encoder.embed[-1].alpha.data = torch.tensor(init_enc_alpha)
if self.decoder_type == "transformer" and self.use_scaled_pos_enc:
self.decoder.embed[-1].alpha.data = torch.tensor(init_dec_alpha)
| 35,997 | 42.059809 | 88 | py |
espnet | espnet-master/espnet2/tts/feats_extract/energy.py | # Copyright 2020 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Energy extractor."""
from typing import Any, Dict, Tuple, Union
import humanfriendly
import torch
import torch.nn.functional as F
from typeguard import check_argument_types
from espnet2.layers.stft import Stft
from espnet2.tts.feats_extract.abs_feats_extract import AbsFeatsExtract
from espnet.nets.pytorch_backend.nets_utils import pad_list
class Energy(AbsFeatsExtract):
"""Energy extractor."""
def __init__(
self,
fs: Union[int, str] = 22050,
n_fft: int = 1024,
win_length: int = None,
hop_length: int = 256,
window: str = "hann",
center: bool = True,
normalized: bool = False,
onesided: bool = True,
use_token_averaged_energy: bool = True,
reduction_factor: int = None,
):
assert check_argument_types()
super().__init__()
if isinstance(fs, str):
fs = humanfriendly.parse_size(fs)
self.fs = fs
self.n_fft = n_fft
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.use_token_averaged_energy = use_token_averaged_energy
if use_token_averaged_energy:
assert reduction_factor >= 1
self.reduction_factor = reduction_factor
self.stft = Stft(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
window=window,
center=center,
normalized=normalized,
onesided=onesided,
)
def output_size(self) -> int:
return 1
def get_parameters(self) -> Dict[str, Any]:
return dict(
fs=self.fs,
n_fft=self.n_fft,
hop_length=self.hop_length,
window=self.window,
win_length=self.win_length,
center=self.stft.center,
normalized=self.stft.normalized,
use_token_averaged_energy=self.use_token_averaged_energy,
reduction_factor=self.reduction_factor,
)
def forward(
self,
input: torch.Tensor,
input_lengths: torch.Tensor = None,
feats_lengths: torch.Tensor = None,
durations: torch.Tensor = None,
durations_lengths: torch.Tensor = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
# If not provide, we assume that the inputs have the same length
if input_lengths is None:
input_lengths = (
input.new_ones(input.shape[0], dtype=torch.long) * input.shape[1]
)
# Domain-conversion: e.g. Stft: time -> time-freq
input_stft, energy_lengths = self.stft(input, input_lengths)
assert input_stft.dim() >= 4, input_stft.shape
assert input_stft.shape[-1] == 2, input_stft.shape
# input_stft: (..., F, 2) -> (..., F)
input_power = input_stft[..., 0] ** 2 + input_stft[..., 1] ** 2
# sum over frequency (B, N, F) -> (B, N)
energy = torch.sqrt(torch.clamp(input_power.sum(dim=2), min=1.0e-10))
# (Optional): Adjust length to match with the mel-spectrogram
if feats_lengths is not None:
energy = [
self._adjust_num_frames(e[:el].view(-1), fl)
for e, el, fl in zip(energy, energy_lengths, feats_lengths)
]
energy_lengths = feats_lengths
# (Optional): Average by duration to calculate token-wise energy
if self.use_token_averaged_energy:
durations = durations * self.reduction_factor
energy = [
self._average_by_duration(e[:el].view(-1), d)
for e, el, d in zip(energy, energy_lengths, durations)
]
energy_lengths = durations_lengths
# Padding
if isinstance(energy, list):
energy = pad_list(energy, 0.0)
# Return with the shape (B, T, 1)
return energy.unsqueeze(-1), energy_lengths
def _average_by_duration(self, x: torch.Tensor, d: torch.Tensor) -> torch.Tensor:
assert 0 <= len(x) - d.sum() < self.reduction_factor
d_cumsum = F.pad(d.cumsum(dim=0), (1, 0))
x_avg = [
x[start:end].mean() if len(x[start:end]) != 0 else x.new_tensor(0.0)
for start, end in zip(d_cumsum[:-1], d_cumsum[1:])
]
return torch.stack(x_avg)
@staticmethod
def _adjust_num_frames(x: torch.Tensor, num_frames: torch.Tensor) -> torch.Tensor:
if num_frames > len(x):
x = F.pad(x, (0, num_frames - len(x)))
elif num_frames < len(x):
x = x[:num_frames]
return x
| 4,733 | 32.814286 | 86 | py |
espnet | espnet-master/espnet2/tts/feats_extract/ying.py | # modified from https://github.com/dhchoi99/NANSY
# We have modified the implementation of dhchoi99 to be fully differentiable.
import math
from typing import Any, Dict, Tuple, Union
import torch
from espnet2.tts.feats_extract.abs_feats_extract import AbsFeatsExtract
from espnet2.tts.feats_extract.yin import *
from espnet.nets.pytorch_backend.nets_utils import pad_list
class Ying(AbsFeatsExtract):
def __init__(
self,
fs: int = 22050,
w_step: int = 256,
W: int = 2048,
tau_max: int = 2048,
midi_start: int = -5,
midi_end: int = 75,
octave_range: int = 24,
use_token_averaged_ying: bool = False,
):
super().__init__()
self.fs = fs
self.w_step = w_step
self.W = W
self.tau_max = tau_max
self.use_token_averaged_ying = use_token_averaged_ying
self.unfold = torch.nn.Unfold((1, self.W), 1, 0, stride=(1, self.w_step))
midis = list(range(midi_start, midi_end))
self.len_midis = len(midis)
c_ms = torch.tensor([self.midi_to_lag(m, octave_range) for m in midis])
self.register_buffer("c_ms", c_ms)
self.register_buffer("c_ms_ceil", torch.ceil(self.c_ms).long())
self.register_buffer("c_ms_floor", torch.floor(self.c_ms).long())
def output_size(self) -> int:
return 1
def get_parameters(self) -> Dict[str, Any]:
return dict(
fs=self.fs,
w_step=self.w_step,
W=self.W,
tau_max=self.tau_max,
use_token_averaged_ying=self.use_token_averaged_ying,
)
def midi_to_lag(self, m: int, octave_range: float = 12):
"""converts midi-to-lag, eq. (4)
Args:
m: midi
fs: sample_rate
octave_range:
Returns:
lag: time lag(tau, c(m)) calculated from midi, eq. (4)
"""
f = 440 * math.pow(2, (m - 69) / octave_range)
lag = self.fs / f
return lag
def yingram_from_cmndf(self, cmndfs: torch.Tensor) -> torch.Tensor:
"""yingram calculator from cMNDFs
(cumulative Mean Normalized Difference Functions)
Args:
cmndfs: torch.Tensor
calculated cumulative mean normalized difference function
for details, see models/yin.py or eq. (1) and (2)
ms: list of midi(int)
fs: sampling rate
Returns:
y:
calculated batch yingram
"""
# c_ms = np.asarray([Pitch.midi_to_lag(m, fs) for m in ms])
# c_ms = torch.from_numpy(c_ms).to(cmndfs.device)
y = (cmndfs[:, self.c_ms_ceil] - cmndfs[:, self.c_ms_floor]) / (
self.c_ms_ceil - self.c_ms_floor
).unsqueeze(0) * (self.c_ms - self.c_ms_floor).unsqueeze(0) + cmndfs[
:, self.c_ms_floor
]
return y
def yingram(self, x: torch.Tensor):
"""calculates yingram from raw audio (multi segment)
Args:
x: raw audio, torch.Tensor of shape (t)
W: yingram Window Size
tau_max:
fs: sampling rate
w_step: yingram bin step size
Returns:
yingram: yingram. torch.Tensor of shape (80 x t')
"""
# x.shape: t -> B,T, B,T = x.shape
B, T = x.shape
w_len = self.W
frames = self.unfold(x.view(B, 1, 1, T))
frames = frames.permute(0, 2, 1).contiguous().view(-1, self.W) # [B* frames, W]
# If not using gpu, or torch not compatible,
# implemented numpy batch function is still fine
dfs = differenceFunctionTorch(frames, frames.shape[-1], self.tau_max)
cmndfs = cumulativeMeanNormalizedDifferenceFunctionTorch(dfs, self.tau_max)
yingram = self.yingram_from_cmndf(cmndfs) # [B*frames,F]
yingram = yingram.view(B, -1, self.len_midis).permute(0, 2, 1) # [B,F,T]
return yingram
def _average_by_duration(self, x: torch.Tensor, d: torch.Tensor) -> torch.Tensor:
assert 0 <= len(x) - d.sum() < self.reduction_factor
d_cumsum = F.pad(d.cumsum(dim=0), (1, 0))
x_avg = [
x[start:end].masked_select(x[start:end].gt(0.0)).mean(dim=0)
if len(x[start:end].masked_select(x[start:end].gt(0.0))) != 0
else x.new_tensor(0.0)
for start, end in zip(d_cumsum[:-1], d_cumsum[1:])
]
return torch.stack(x_avg)
@staticmethod
def _adjust_num_frames(x: torch.Tensor, num_frames: torch.Tensor) -> torch.Tensor:
x_length = x.shape[1]
if num_frames > x_length:
x = F.pad(x, (0, num_frames - x_length))
elif num_frames < x_length:
x = x[:num_frames]
return x
def forward(
self,
input: torch.Tensor,
input_lengths: torch.Tensor = None,
feats_lengths: torch.Tensor = None,
durations: torch.Tensor = None,
durations_lengths: torch.Tensor = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
if input_lengths is None:
input_lengths = (
input.new_ones(input.shape[0], dtype=torch.long) * input.shape[1]
)
# Compute the YIN pitch
# ying = self.yingram(input)
# ying_lengths = torch.ceil(input_lengths.float() * self.w_step / self.W).long()
# TODO(yifeng): now we pass batch_size = 1,
# maybe remove batch_size in self.yingram
# print("input", input.shape)
ying = [
self.yingram(x[:xl].unsqueeze(0)).squeeze(0)
for x, xl in zip(input, input_lengths)
]
# print("yingram", ying[0].shape)
# (Optional): Adjust length to match with the mel-spectrogram
if feats_lengths is not None:
ying = [
self._adjust_num_frames(p, fl).transpose(0, 1)
for p, fl in zip(ying, feats_lengths)
]
# print("yingram2", ying[0].shape)
# Use token-averaged f0
if self.use_token_averaged_ying:
durations = durations * self.reduction_factor
ying = [
self._average_by_duration(p, d).view(-1)
for p, d in zip(ying, durations)
]
ying_lengths = durations_lengths
else:
ying_lengths = input.new_tensor([len(p) for p in ying], dtype=torch.long)
# Padding
ying = pad_list(ying, 0.0)
# print("yingram3", ying.shape)
return (
ying.float(),
ying_lengths,
) # TODO(yifeng): should float() be here?
def crop_scope(
self, x, yin_start, scope_shift
): # x: tensor [B,C,T] #scope_shift: tensor [B]
return torch.stack(
[
x[
i,
yin_start
+ scope_shift[i] : yin_start
+ self.yin_scope
+ scope_shift[i],
:,
]
for i in range(x.shape[0])
],
dim=0,
)
if __name__ == "__main__":
import librosa as rosa
import matplotlib.pyplot as plt
import torch
wav = torch.tensor(rosa.load("LJ001-0002.wav", fs=22050, mono=True)[0]).unsqueeze(0)
# wav = torch.randn(1,40965)
wav = torch.nn.functional.pad(wav, (0, (-wav.shape[1]) % 256))
# wav = wav[#:,:8096]
print(wav.shape)
pitch = Ying()
with torch.no_grad():
ps = pitch.yingram(torch.nn.functional.pad(wav, (1024, 1024)))
ps = torch.nn.functional.pad(ps, (0, 0, 8, 8), mode="replicate")
print(ps.shape)
spec = torch.stft(wav, 1024, 256, return_complex=False)
print(spec.shape)
plt.subplot(2, 1, 1)
plt.pcolor(ps[0].numpy(), cmap="magma")
plt.colorbar()
plt.subplot(2, 1, 2)
plt.pcolor(ps[0][15:65, :].numpy(), cmap="magma")
plt.colorbar()
plt.show()
| 8,001 | 32.066116 | 88 | py |
espnet | espnet-master/espnet2/tts/feats_extract/abs_feats_extract.py | from abc import ABC, abstractmethod
from typing import Any, Dict, Tuple
import torch
class AbsFeatsExtract(torch.nn.Module, ABC):
@abstractmethod
def output_size(self) -> int:
raise NotImplementedError
@abstractmethod
def get_parameters(self) -> Dict[str, Any]:
raise NotImplementedError
@abstractmethod
def forward(
self, input: torch.Tensor, input_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
raise NotImplementedError
| 503 | 23 | 62 | py |
espnet | espnet-master/espnet2/tts/feats_extract/log_spectrogram.py | from typing import Any, Dict, Optional, Tuple
import torch
from typeguard import check_argument_types
from espnet2.layers.stft import Stft
from espnet2.tts.feats_extract.abs_feats_extract import AbsFeatsExtract
class LogSpectrogram(AbsFeatsExtract):
"""Conventional frontend structure for ASR
Stft -> log-amplitude-spec
"""
def __init__(
self,
n_fft: int = 1024,
win_length: int = None,
hop_length: int = 256,
window: Optional[str] = "hann",
center: bool = True,
normalized: bool = False,
onesided: bool = True,
):
assert check_argument_types()
super().__init__()
self.n_fft = n_fft
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.stft = Stft(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
window=window,
center=center,
normalized=normalized,
onesided=onesided,
)
self.n_fft = n_fft
def output_size(self) -> int:
return self.n_fft // 2 + 1
def get_parameters(self) -> Dict[str, Any]:
"""Return the parameters required by Vocoder"""
return dict(
n_fft=self.n_fft,
n_shift=self.hop_length,
win_length=self.win_length,
window=self.window,
)
def forward(
self, input: torch.Tensor, input_lengths: torch.Tensor = None
) -> Tuple[torch.Tensor, torch.Tensor]:
# 1. Stft: time -> time-freq
input_stft, feats_lens = self.stft(input, input_lengths)
assert input_stft.dim() >= 4, input_stft.shape
# "2" refers to the real/imag parts of Complex
assert input_stft.shape[-1] == 2, input_stft.shape
# NOTE(kamo): We use different definition for log-spec between TTS and ASR
# TTS: log_10(abs(stft))
# ASR: log_e(power(stft))
# STFT -> Power spectrum
# input_stft: (..., F, 2) -> (..., F)
input_power = input_stft[..., 0] ** 2 + input_stft[..., 1] ** 2
log_amp = 0.5 * torch.log10(torch.clamp(input_power, min=1.0e-10))
return log_amp, feats_lens
| 2,244 | 29.337838 | 82 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.