repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
espnet | espnet-master/espnet2/tts/feats_extract/linear_spectrogram.py | from typing import Any, Dict, Optional, Tuple
import torch
from typeguard import check_argument_types
from espnet2.layers.stft import Stft
from espnet2.tts.feats_extract.abs_feats_extract import AbsFeatsExtract
class LinearSpectrogram(AbsFeatsExtract):
"""Linear amplitude spectrogram.
Stft -> amplitude-spec
"""
def __init__(
self,
n_fft: int = 1024,
win_length: int = None,
hop_length: int = 256,
window: Optional[str] = "hann",
center: bool = True,
normalized: bool = False,
onesided: bool = True,
):
assert check_argument_types()
super().__init__()
self.n_fft = n_fft
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.stft = Stft(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
window=window,
center=center,
normalized=normalized,
onesided=onesided,
)
self.n_fft = n_fft
def output_size(self) -> int:
return self.n_fft // 2 + 1
def get_parameters(self) -> Dict[str, Any]:
"""Return the parameters required by Vocoder."""
return dict(
n_fft=self.n_fft,
n_shift=self.hop_length,
win_length=self.win_length,
window=self.window,
)
def forward(
self, input: torch.Tensor, input_lengths: torch.Tensor = None
) -> Tuple[torch.Tensor, torch.Tensor]:
# 1. Stft: time -> time-freq
input_stft, feats_lens = self.stft(input, input_lengths)
assert input_stft.dim() >= 4, input_stft.shape
# "2" refers to the real/imag parts of Complex
assert input_stft.shape[-1] == 2, input_stft.shape
# STFT -> Power spectrum -> Amp spectrum
# input_stft: (..., F, 2) -> (..., F)
input_power = input_stft[..., 0] ** 2 + input_stft[..., 1] ** 2
input_amp = torch.sqrt(torch.clamp(input_power, min=1.0e-10))
return input_amp, feats_lens
| 2,092 | 28.9 | 71 | py |
espnet | espnet-master/espnet2/tts/feats_extract/log_mel_fbank.py | from typing import Any, Dict, Optional, Tuple, Union
import humanfriendly
import torch
from typeguard import check_argument_types
from espnet2.layers.log_mel import LogMel
from espnet2.layers.stft import Stft
from espnet2.tts.feats_extract.abs_feats_extract import AbsFeatsExtract
class LogMelFbank(AbsFeatsExtract):
"""Conventional frontend structure for TTS.
Stft -> amplitude-spec -> Log-Mel-Fbank
"""
def __init__(
self,
fs: Union[int, str] = 16000,
n_fft: int = 1024,
win_length: int = None,
hop_length: int = 256,
window: Optional[str] = "hann",
center: bool = True,
normalized: bool = False,
onesided: bool = True,
n_mels: int = 80,
fmin: Optional[int] = 80,
fmax: Optional[int] = 7600,
htk: bool = False,
log_base: Optional[float] = 10.0,
):
assert check_argument_types()
super().__init__()
if isinstance(fs, str):
fs = humanfriendly.parse_size(fs)
self.fs = fs
self.n_mels = n_mels
self.n_fft = n_fft
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.fmin = fmin
self.fmax = fmax
self.stft = Stft(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
window=window,
center=center,
normalized=normalized,
onesided=onesided,
)
self.logmel = LogMel(
fs=fs,
n_fft=n_fft,
n_mels=n_mels,
fmin=fmin,
fmax=fmax,
htk=htk,
log_base=log_base,
)
def output_size(self) -> int:
return self.n_mels
def get_parameters(self) -> Dict[str, Any]:
"""Return the parameters required by Vocoder"""
return dict(
fs=self.fs,
n_fft=self.n_fft,
n_shift=self.hop_length,
window=self.window,
n_mels=self.n_mels,
win_length=self.win_length,
fmin=self.fmin,
fmax=self.fmax,
)
def forward(
self, input: torch.Tensor, input_lengths: torch.Tensor = None
) -> Tuple[torch.Tensor, torch.Tensor]:
# 1. Domain-conversion: e.g. Stft: time -> time-freq
input_stft, feats_lens = self.stft(input, input_lengths)
assert input_stft.dim() >= 4, input_stft.shape
# "2" refers to the real/imag parts of Complex
assert input_stft.shape[-1] == 2, input_stft.shape
# NOTE(kamo): We use different definition for log-spec between TTS and ASR
# TTS: log_10(abs(stft))
# ASR: log_e(power(stft))
# input_stft: (..., F, 2) -> (..., F)
input_power = input_stft[..., 0] ** 2 + input_stft[..., 1] ** 2
input_amp = torch.sqrt(torch.clamp(input_power, min=1.0e-10))
input_feats, _ = self.logmel(input_amp, feats_lens)
return input_feats, feats_lens
| 3,044 | 28.563107 | 82 | py |
espnet | espnet-master/espnet2/tts/feats_extract/dio.py | # Copyright 2020 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""F0 extractor using DIO + Stonemask algorithm."""
import logging
from typing import Any, Dict, Tuple, Union
import humanfriendly
import numpy as np
import pyworld
import torch
import torch.nn.functional as F
from scipy.interpolate import interp1d
from typeguard import check_argument_types
from espnet2.tts.feats_extract.abs_feats_extract import AbsFeatsExtract
from espnet.nets.pytorch_backend.nets_utils import pad_list
class Dio(AbsFeatsExtract):
"""F0 estimation with dio + stonemask algorithm.
This is f0 extractor based on dio + stonmask algorithm introduced in `WORLD:
a vocoder-based high-quality speech synthesis system for real-time applications`_.
.. _`WORLD: a vocoder-based high-quality speech synthesis system for real-time
applications`: https://doi.org/10.1587/transinf.2015EDP7457
Note:
This module is based on NumPy implementation. Therefore, the computational graph
is not connected.
Todo:
Replace this module with PyTorch-based implementation.
"""
def __init__(
self,
fs: Union[int, str] = 22050,
n_fft: int = 1024,
hop_length: int = 256,
f0min: int = 80,
f0max: int = 400,
use_token_averaged_f0: bool = True,
use_continuous_f0: bool = True,
use_log_f0: bool = True,
reduction_factor: int = None,
):
assert check_argument_types()
super().__init__()
if isinstance(fs, str):
fs = humanfriendly.parse_size(fs)
self.fs = fs
self.n_fft = n_fft
self.hop_length = hop_length
self.frame_period = 1000 * hop_length / fs
self.f0min = f0min
self.f0max = f0max
self.use_token_averaged_f0 = use_token_averaged_f0
self.use_continuous_f0 = use_continuous_f0
self.use_log_f0 = use_log_f0
if use_token_averaged_f0:
assert reduction_factor >= 1
self.reduction_factor = reduction_factor
def output_size(self) -> int:
return 1
def get_parameters(self) -> Dict[str, Any]:
return dict(
fs=self.fs,
n_fft=self.n_fft,
hop_length=self.hop_length,
f0min=self.f0min,
f0max=self.f0max,
use_token_averaged_f0=self.use_token_averaged_f0,
use_continuous_f0=self.use_continuous_f0,
use_log_f0=self.use_log_f0,
reduction_factor=self.reduction_factor,
)
def forward(
self,
input: torch.Tensor,
input_lengths: torch.Tensor = None,
feats_lengths: torch.Tensor = None,
durations: torch.Tensor = None,
durations_lengths: torch.Tensor = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
# If not provide, we assume that the inputs have the same length
if input_lengths is None:
input_lengths = (
input.new_ones(input.shape[0], dtype=torch.long) * input.shape[1]
)
# F0 extraction
pitch = [self._calculate_f0(x[:xl]) for x, xl in zip(input, input_lengths)]
# (Optional): Adjust length to match with the mel-spectrogram
if feats_lengths is not None:
pitch = [
self._adjust_num_frames(p, fl).view(-1)
for p, fl in zip(pitch, feats_lengths)
]
# (Optional): Average by duration to calculate token-wise f0
if self.use_token_averaged_f0:
durations = durations * self.reduction_factor
pitch = [
self._average_by_duration(p, d).view(-1)
for p, d in zip(pitch, durations)
]
pitch_lengths = durations_lengths
else:
pitch_lengths = input.new_tensor([len(p) for p in pitch], dtype=torch.long)
# Padding
pitch = pad_list(pitch, 0.0)
# Return with the shape (B, T, 1)
return pitch.unsqueeze(-1), pitch_lengths
def _calculate_f0(self, input: torch.Tensor) -> torch.Tensor:
x = input.cpu().numpy().astype(np.double)
f0, timeaxis = pyworld.dio(
x,
self.fs,
f0_floor=self.f0min,
f0_ceil=self.f0max,
frame_period=self.frame_period,
)
f0 = pyworld.stonemask(x, f0, timeaxis, self.fs)
if self.use_continuous_f0:
f0 = self._convert_to_continuous_f0(f0)
if self.use_log_f0:
nonzero_idxs = np.where(f0 != 0)[0]
f0[nonzero_idxs] = np.log(f0[nonzero_idxs])
return input.new_tensor(f0.reshape(-1), dtype=torch.float)
@staticmethod
def _adjust_num_frames(x: torch.Tensor, num_frames: torch.Tensor) -> torch.Tensor:
if num_frames > len(x):
x = F.pad(x, (0, num_frames - len(x)))
elif num_frames < len(x):
x = x[:num_frames]
return x
@staticmethod
def _convert_to_continuous_f0(f0: np.array) -> np.array:
if (f0 == 0).all():
logging.warn("All frames seems to be unvoiced.")
return f0
# padding start and end of f0 sequence
start_f0 = f0[f0 != 0][0]
end_f0 = f0[f0 != 0][-1]
start_idx = np.where(f0 == start_f0)[0][0]
end_idx = np.where(f0 == end_f0)[0][-1]
f0[:start_idx] = start_f0
f0[end_idx:] = end_f0
# get non-zero frame index
nonzero_idxs = np.where(f0 != 0)[0]
# perform linear interpolation
interp_fn = interp1d(nonzero_idxs, f0[nonzero_idxs])
f0 = interp_fn(np.arange(0, f0.shape[0]))
return f0
def _average_by_duration(self, x: torch.Tensor, d: torch.Tensor) -> torch.Tensor:
assert 0 <= len(x) - d.sum() < self.reduction_factor
d_cumsum = F.pad(d.cumsum(dim=0), (1, 0))
x_avg = [
x[start:end].masked_select(x[start:end].gt(0.0)).mean(dim=0)
if len(x[start:end].masked_select(x[start:end].gt(0.0))) != 0
else x.new_tensor(0.0)
for start, end in zip(d_cumsum[:-1], d_cumsum[1:])
]
return torch.stack(x_avg)
| 6,224 | 33.016393 | 88 | py |
espnet | espnet-master/espnet2/tts/feats_extract/yin.py | # remove np from https://github.com/dhchoi99/NANSY/blob/master/models/yin.py
# adapted from https://github.com/patriceguyot/Yin
# https://github.com/NVIDIA/mellotron/blob/master/yin.py
import numpy as np
import torch
import torch.nn.functional as F
def differenceFunction(x, N, tau_max):
"""
Compute difference function of data x. This corresponds to equation (6) in [1]
This solution is implemented directly with torch rfft.
:param x: audio data (Tensor)
:param N: length of data
:param tau_max: integration window size
:return: difference function
:rtype: list
"""
# x = np.array(x, np.float64) #[B,T]
assert x.dim() == 2
b, w = x.shape
if w < tau_max:
x = F.pad(
x,
(tau_max - w - (tau_max - w) // 2, (tau_max - w) // 2),
"constant",
mode="reflect",
)
w = tau_max
# x_cumsum = np.concatenate((np.array([0.]), (x * x).cumsum()))
x_cumsum = torch.cat(
[torch.zeros([b, 1], device=x.device), (x * x).cumsum(dim=1)], dim=1
)
size = w + tau_max
p2 = (size // 32).bit_length()
# p2 = ceil(log2(size+1 // 32))
nice_numbers = (16, 18, 20, 24, 25, 27, 30, 32)
size_pad = min(n * 2**p2 for n in nice_numbers if n * 2**p2 >= size)
fc = torch.fft.rfft(x, size_pad) # [B,F]
conv = torch.fft.irfft(fc * fc.conj())[:, :tau_max]
return (
x_cumsum[:, w : w - tau_max : -1]
+ x_cumsum[:, w]
- x_cumsum[:, :tau_max]
- 2 * conv
)
def differenceFunction_np(x, N, tau_max):
"""
Compute difference function of data x. This corresponds to equation (6) in [1]
This solution is implemented directly with Numpy fft.
:param x: audio data
:param N: length of data
:param tau_max: integration window size
:return: difference function
:rtype: list
"""
x = np.array(x, np.float64)
w = x.size
tau_max = min(tau_max, w)
x_cumsum = np.concatenate((np.array([0.0]), (x * x).cumsum()))
size = w + tau_max
p2 = (size // 32).bit_length()
nice_numbers = (16, 18, 20, 24, 25, 27, 30, 32)
size_pad = min(x * 2**p2 for x in nice_numbers if x * 2**p2 >= size)
fc = np.fft.rfft(x, size_pad)
conv = np.fft.irfft(fc * fc.conjugate())[:tau_max]
return x_cumsum[w : w - tau_max : -1] + x_cumsum[w] - x_cumsum[:tau_max] - 2 * conv
def cumulativeMeanNormalizedDifferenceFunction(df, N, eps=1e-8):
"""
Compute cumulative mean normalized difference function (CMND).
This corresponds to equation (8) in [1]
:param df: Difference function
:param N: length of data
:return: cumulative mean normalized difference function
:rtype: list
"""
# np.seterr(divide='ignore', invalid='ignore')
# scipy method, assert df>0 for all element
# cmndf = df[1:] * np.asarray(list(range(1, N)))
# / (np.cumsum(df[1:]).astype(float) + eps)
B, _ = df.shape
cmndf = (
df[:, 1:]
* torch.arange(1, N, device=df.device, dtype=df.dtype).view(1, -1)
/ (df[:, 1:].cumsum(dim=-1) + eps)
)
return torch.cat(
[torch.ones([B, 1], device=df.device, dtype=df.dtype), cmndf], dim=-1
)
def differenceFunctionTorch(xs: torch.Tensor, N, tau_max) -> torch.Tensor:
"""pytorch backend batch-wise differenceFunction
has 1e-4 level error with input shape of (32, 22050*1.5)
Args:
xs:
N:
tau_max:
Returns:
"""
xs = xs.double()
w = xs.shape[-1]
tau_max = min(tau_max, w)
zeros = torch.zeros((xs.shape[0], 1))
x_cumsum = torch.cat(
(
torch.zeros((xs.shape[0], 1), device=xs.device),
(xs * xs).cumsum(dim=-1, dtype=torch.double),
),
dim=-1,
) # B x w
size = w + tau_max
p2 = (size // 32).bit_length()
nice_numbers = (16, 18, 20, 24, 25, 27, 30, 32)
size_pad = min(x * 2**p2 for x in nice_numbers if x * 2**p2 >= size)
fcs = torch.fft.rfft(xs, n=size_pad, dim=-1)
convs = torch.fft.irfft(fcs * fcs.conj())[:, :tau_max]
y1 = torch.flip(x_cumsum[:, w - tau_max + 1 : w + 1], dims=[-1])
y = y1 + x_cumsum[:, w].unsqueeze(-1) - x_cumsum[:, :tau_max] - 2 * convs
return y
def cumulativeMeanNormalizedDifferenceFunctionTorch(
dfs: torch.Tensor, N, eps=1e-8
) -> torch.Tensor:
arange = torch.arange(1, N, device=dfs.device, dtype=torch.float64)
cumsum = torch.cumsum(dfs[:, 1:], dim=-1, dtype=torch.float64).to(dfs.device)
cmndfs = dfs[:, 1:] * arange / (cumsum + eps)
cmndfs = torch.cat(
(torch.ones(cmndfs.shape[0], 1, device=dfs.device), cmndfs), dim=-1
)
return cmndfs
if __name__ == "__main__":
wav = torch.randn(32, int(22050 * 1.5)).cuda()
wav_numpy = wav.detach().cpu().numpy()
x = wav_numpy[0]
w_len = 2048
w_step = 256
tau_max = 2048
W = 2048
startFrames = list(range(0, x.shape[-1] - w_len, w_step))
startFrames = np.asarray(startFrames)
# times = startFrames / sr
frames = [x[..., t : t + W] for t in startFrames]
frames = np.asarray(frames)
frames_torch = torch.from_numpy(frames).cuda()
cmndfs0 = []
for idx, frame in enumerate(frames):
df = differenceFunction(frame, frame.shape[-1], tau_max)
cmndf = cumulativeMeanNormalizedDifferenceFunction(df, tau_max)
cmndfs0.append(cmndf)
cmndfs0 = np.asarray(cmndfs0)
dfs = differenceFunctionTorch(frames_torch, frames_torch.shape[-1], tau_max)
cmndfs1 = (
cumulativeMeanNormalizedDifferenceFunctionTorch(dfs, tau_max)
.detach()
.cpu()
.numpy()
)
print(cmndfs0.shape, cmndfs1.shape)
print(np.sum(np.abs(cmndfs0 - cmndfs1)))
| 5,723 | 29.940541 | 87 | py |
espnet | espnet-master/espnet2/tts/utils/duration_calculator.py | # -*- coding: utf-8 -*-
# Copyright 2020 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Duration calculator for ESPnet2."""
from typing import Tuple
import torch
class DurationCalculator(torch.nn.Module):
"""Duration calculator module."""
@torch.no_grad()
def forward(self, att_ws: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Convert attention weight to durations.
Args:
att_ws (Tesnor): Attention weight tensor (T_feats, T_text) or
(#layers, #heads, T_feats, T_text).
Returns:
LongTensor: Duration of each input (T_text,).
Tensor: Focus rate value.
"""
duration = self._calculate_duration(att_ws)
focus_rate = self._calculate_focus_rete(att_ws)
return duration, focus_rate
@staticmethod
def _calculate_focus_rete(att_ws):
if len(att_ws.shape) == 2:
# tacotron 2 case -> (T_feats, T_text)
return att_ws.max(dim=-1)[0].mean()
elif len(att_ws.shape) == 4:
# transformer case -> (#layers, #heads, T_feats, T_text)
return att_ws.max(dim=-1)[0].mean(dim=-1).max()
else:
raise ValueError("att_ws should be 2 or 4 dimensional tensor.")
@staticmethod
def _calculate_duration(att_ws):
if len(att_ws.shape) == 2:
# tacotron 2 case -> (T_feats, T_text)
pass
elif len(att_ws.shape) == 4:
# transformer case -> (#layers, #heads, T_feats, T_text)
# get the most diagonal head according to focus rate
att_ws = torch.cat(
[att_w for att_w in att_ws], dim=0
) # (#heads * #layers, T_feats, T_text)
diagonal_scores = att_ws.max(dim=-1)[0].mean(dim=-1) # (#heads * #layers,)
diagonal_head_idx = diagonal_scores.argmax()
att_ws = att_ws[diagonal_head_idx] # (T_feats, T_text)
else:
raise ValueError("att_ws should be 2 or 4 dimensional tensor.")
# calculate duration from 2d attention weight
durations = torch.stack(
[att_ws.argmax(-1).eq(i).sum() for i in range(att_ws.shape[1])]
)
return durations.view(-1)
| 2,291 | 33.727273 | 87 | py |
espnet | espnet-master/espnet2/tts/utils/parallel_wavegan_pretrained_vocoder.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Wrapper class for the vocoder model trained with parallel_wavegan repo."""
import logging
import os
from pathlib import Path
from typing import Optional, Union
import torch
import yaml
class ParallelWaveGANPretrainedVocoder(torch.nn.Module):
"""Wrapper class to load the vocoder trained with parallel_wavegan repo."""
def __init__(
self,
model_file: Union[Path, str],
config_file: Optional[Union[Path, str]] = None,
):
"""Initialize ParallelWaveGANPretrainedVocoder module."""
super().__init__()
try:
from parallel_wavegan.utils import load_model
except ImportError:
logging.error(
"`parallel_wavegan` is not installed. "
"Please install via `pip install -U parallel_wavegan`."
)
raise
if config_file is None:
dirname = os.path.dirname(str(model_file))
config_file = os.path.join(dirname, "config.yml")
with open(config_file) as f:
config = yaml.load(f, Loader=yaml.Loader)
self.fs = config["sampling_rate"]
self.vocoder = load_model(model_file, config)
if hasattr(self.vocoder, "remove_weight_norm"):
self.vocoder.remove_weight_norm()
self.normalize_before = False
if hasattr(self.vocoder, "mean"):
self.normalize_before = True
@torch.no_grad()
def forward(self, feats: torch.Tensor) -> torch.Tensor:
"""Generate waveform with pretrained vocoder.
Args:
feats (Tensor): Feature tensor (T_feats, #mels).
Returns:
Tensor: Generated waveform tensor (T_wav).
"""
return self.vocoder.inference(
feats,
normalize_before=self.normalize_before,
).view(-1)
| 1,921 | 30.508197 | 79 | py |
espnet | espnet-master/espnet2/tts/gst/style_encoder.py | # Copyright 2020 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Style encoder of GST-Tacotron."""
from typing import Sequence
import torch
from typeguard import check_argument_types
from espnet.nets.pytorch_backend.transformer.attention import (
MultiHeadedAttention as BaseMultiHeadedAttention,
)
class StyleEncoder(torch.nn.Module):
"""Style encoder.
This module is style encoder introduced in `Style Tokens: Unsupervised Style
Modeling, Control and Transfer in End-to-End Speech Synthesis`.
.. _`Style Tokens: Unsupervised Style Modeling, Control and Transfer in End-to-End
Speech Synthesis`: https://arxiv.org/abs/1803.09017
Args:
idim (int, optional): Dimension of the input mel-spectrogram.
gst_tokens (int, optional): The number of GST embeddings.
gst_token_dim (int, optional): Dimension of each GST embedding.
gst_heads (int, optional): The number of heads in GST multihead attention.
conv_layers (int, optional): The number of conv layers in the reference encoder.
conv_chans_list: (Sequence[int], optional):
List of the number of channels of conv layers in the referece encoder.
conv_kernel_size (int, optional):
Kernel size of conv layers in the reference encoder.
conv_stride (int, optional):
Stride size of conv layers in the reference encoder.
gru_layers (int, optional): The number of GRU layers in the reference encoder.
gru_units (int, optional): The number of GRU units in the reference encoder.
Todo:
* Support manual weight specification in inference.
"""
def __init__(
self,
idim: int = 80,
gst_tokens: int = 10,
gst_token_dim: int = 256,
gst_heads: int = 4,
conv_layers: int = 6,
conv_chans_list: Sequence[int] = (32, 32, 64, 64, 128, 128),
conv_kernel_size: int = 3,
conv_stride: int = 2,
gru_layers: int = 1,
gru_units: int = 128,
):
"""Initilize global style encoder module."""
assert check_argument_types()
super(StyleEncoder, self).__init__()
self.ref_enc = ReferenceEncoder(
idim=idim,
conv_layers=conv_layers,
conv_chans_list=conv_chans_list,
conv_kernel_size=conv_kernel_size,
conv_stride=conv_stride,
gru_layers=gru_layers,
gru_units=gru_units,
)
self.stl = StyleTokenLayer(
ref_embed_dim=gru_units,
gst_tokens=gst_tokens,
gst_token_dim=gst_token_dim,
gst_heads=gst_heads,
)
def forward(self, speech: torch.Tensor) -> torch.Tensor:
"""Calculate forward propagation.
Args:
speech (Tensor): Batch of padded target features (B, Lmax, odim).
Returns:
Tensor: Style token embeddings (B, token_dim).
"""
ref_embs = self.ref_enc(speech)
style_embs = self.stl(ref_embs)
return style_embs
class ReferenceEncoder(torch.nn.Module):
"""Reference encoder module.
This module is reference encoder introduced in `Style Tokens: Unsupervised Style
Modeling, Control and Transfer in End-to-End Speech Synthesis`.
.. _`Style Tokens: Unsupervised Style Modeling, Control and Transfer in End-to-End
Speech Synthesis`: https://arxiv.org/abs/1803.09017
Args:
idim (int, optional): Dimension of the input mel-spectrogram.
conv_layers (int, optional): The number of conv layers in the reference encoder.
conv_chans_list: (Sequence[int], optional):
List of the number of channels of conv layers in the referece encoder.
conv_kernel_size (int, optional):
Kernel size of conv layers in the reference encoder.
conv_stride (int, optional):
Stride size of conv layers in the reference encoder.
gru_layers (int, optional): The number of GRU layers in the reference encoder.
gru_units (int, optional): The number of GRU units in the reference encoder.
"""
def __init__(
self,
idim=80,
conv_layers: int = 6,
conv_chans_list: Sequence[int] = (32, 32, 64, 64, 128, 128),
conv_kernel_size: int = 3,
conv_stride: int = 2,
gru_layers: int = 1,
gru_units: int = 128,
):
"""Initilize reference encoder module."""
assert check_argument_types()
super(ReferenceEncoder, self).__init__()
# check hyperparameters are valid
assert conv_kernel_size % 2 == 1, "kernel size must be odd."
assert (
len(conv_chans_list) == conv_layers
), "the number of conv layers and length of channels list must be the same."
convs = []
padding = (conv_kernel_size - 1) // 2
for i in range(conv_layers):
conv_in_chans = 1 if i == 0 else conv_chans_list[i - 1]
conv_out_chans = conv_chans_list[i]
convs += [
torch.nn.Conv2d(
conv_in_chans,
conv_out_chans,
kernel_size=conv_kernel_size,
stride=conv_stride,
padding=padding,
# Do not use bias due to the following batch norm
bias=False,
),
torch.nn.BatchNorm2d(conv_out_chans),
torch.nn.ReLU(inplace=True),
]
self.convs = torch.nn.Sequential(*convs)
self.conv_layers = conv_layers
self.kernel_size = conv_kernel_size
self.stride = conv_stride
self.padding = padding
# get the number of GRU input units
gru_in_units = idim
for i in range(conv_layers):
gru_in_units = (
gru_in_units - conv_kernel_size + 2 * padding
) // conv_stride + 1
gru_in_units *= conv_out_chans
self.gru = torch.nn.GRU(gru_in_units, gru_units, gru_layers, batch_first=True)
def forward(self, speech: torch.Tensor) -> torch.Tensor:
"""Calculate forward propagation.
Args:
speech (Tensor): Batch of padded target features (B, Lmax, idim).
Returns:
Tensor: Reference embedding (B, gru_units)
"""
batch_size = speech.size(0)
xs = speech.unsqueeze(1) # (B, 1, Lmax, idim)
hs = self.convs(xs).transpose(1, 2) # (B, Lmax', conv_out_chans, idim')
# NOTE(kan-bayashi): We need to care the length?
time_length = hs.size(1)
hs = hs.contiguous().view(batch_size, time_length, -1) # (B, Lmax', gru_units)
self.gru.flatten_parameters()
_, ref_embs = self.gru(hs) # (gru_layers, batch_size, gru_units)
ref_embs = ref_embs[-1] # (batch_size, gru_units)
return ref_embs
class StyleTokenLayer(torch.nn.Module):
"""Style token layer module.
This module is style token layer introduced in `Style Tokens: Unsupervised Style
Modeling, Control and Transfer in End-to-End Speech Synthesis`.
.. _`Style Tokens: Unsupervised Style Modeling, Control and Transfer in End-to-End
Speech Synthesis`: https://arxiv.org/abs/1803.09017
Args:
ref_embed_dim (int, optional): Dimension of the input reference embedding.
gst_tokens (int, optional): The number of GST embeddings.
gst_token_dim (int, optional): Dimension of each GST embedding.
gst_heads (int, optional): The number of heads in GST multihead attention.
dropout_rate (float, optional): Dropout rate in multi-head attention.
"""
def __init__(
self,
ref_embed_dim: int = 128,
gst_tokens: int = 10,
gst_token_dim: int = 256,
gst_heads: int = 4,
dropout_rate: float = 0.0,
):
"""Initilize style token layer module."""
assert check_argument_types()
super(StyleTokenLayer, self).__init__()
gst_embs = torch.randn(gst_tokens, gst_token_dim // gst_heads)
self.register_parameter("gst_embs", torch.nn.Parameter(gst_embs))
self.mha = MultiHeadedAttention(
q_dim=ref_embed_dim,
k_dim=gst_token_dim // gst_heads,
v_dim=gst_token_dim // gst_heads,
n_head=gst_heads,
n_feat=gst_token_dim,
dropout_rate=dropout_rate,
)
def forward(self, ref_embs: torch.Tensor) -> torch.Tensor:
"""Calculate forward propagation.
Args:
ref_embs (Tensor): Reference embeddings (B, ref_embed_dim).
Returns:
Tensor: Style token embeddings (B, gst_token_dim).
"""
batch_size = ref_embs.size(0)
# (num_tokens, token_dim) -> (batch_size, num_tokens, token_dim)
gst_embs = torch.tanh(self.gst_embs).unsqueeze(0).expand(batch_size, -1, -1)
# NOTE(kan-bayashi): Shoule we apply Tanh?
ref_embs = ref_embs.unsqueeze(1) # (batch_size, 1 ,ref_embed_dim)
style_embs = self.mha(ref_embs, gst_embs, gst_embs, None)
return style_embs.squeeze(1)
class MultiHeadedAttention(BaseMultiHeadedAttention):
"""Multi head attention module with different input dimension."""
def __init__(self, q_dim, k_dim, v_dim, n_head, n_feat, dropout_rate=0.0):
"""Initialize multi head attention module."""
# NOTE(kan-bayashi): Do not use super().__init__() here since we want to
# overwrite BaseMultiHeadedAttention.__init__() method.
torch.nn.Module.__init__(self)
assert n_feat % n_head == 0
# We assume d_v always equals d_k
self.d_k = n_feat // n_head
self.h = n_head
self.linear_q = torch.nn.Linear(q_dim, n_feat)
self.linear_k = torch.nn.Linear(k_dim, n_feat)
self.linear_v = torch.nn.Linear(v_dim, n_feat)
self.linear_out = torch.nn.Linear(n_feat, n_feat)
self.attn = None
self.dropout = torch.nn.Dropout(p=dropout_rate)
| 10,101 | 36.003663 | 88 | py |
espnet | espnet-master/espnet2/tts/transformer/transformer.py | # Copyright 2020 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Transformer-TTS related modules."""
from typing import Dict, Optional, Sequence, Tuple
import torch
import torch.nn.functional as F
from typeguard import check_argument_types
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet2.torch_utils.initialize import initialize
from espnet2.tts.abs_tts import AbsTTS
from espnet2.tts.gst.style_encoder import StyleEncoder
from espnet.nets.pytorch_backend.e2e_tts_transformer import (
GuidedMultiHeadAttentionLoss,
TransformerLoss,
)
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask, make_pad_mask
from espnet.nets.pytorch_backend.tacotron2.decoder import Postnet
from espnet.nets.pytorch_backend.tacotron2.decoder import Prenet as DecoderPrenet
from espnet.nets.pytorch_backend.tacotron2.encoder import Encoder as EncoderPrenet
from espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention
from espnet.nets.pytorch_backend.transformer.decoder import Decoder
from espnet.nets.pytorch_backend.transformer.embedding import (
PositionalEncoding,
ScaledPositionalEncoding,
)
from espnet.nets.pytorch_backend.transformer.encoder import Encoder
from espnet.nets.pytorch_backend.transformer.mask import subsequent_mask
class Transformer(AbsTTS):
"""Transformer-TTS module.
This is a module of text-to-speech Transformer described in `Neural Speech Synthesis
with Transformer Network`_, which convert the sequence of tokens into the sequence
of Mel-filterbanks.
.. _`Neural Speech Synthesis with Transformer Network`:
https://arxiv.org/pdf/1809.08895.pdf
"""
def __init__(
self,
# network structure related
idim: int,
odim: int,
embed_dim: int = 512,
eprenet_conv_layers: int = 3,
eprenet_conv_chans: int = 256,
eprenet_conv_filts: int = 5,
dprenet_layers: int = 2,
dprenet_units: int = 256,
elayers: int = 6,
eunits: int = 1024,
adim: int = 512,
aheads: int = 4,
dlayers: int = 6,
dunits: int = 1024,
postnet_layers: int = 5,
postnet_chans: int = 256,
postnet_filts: int = 5,
positionwise_layer_type: str = "conv1d",
positionwise_conv_kernel_size: int = 1,
use_scaled_pos_enc: bool = True,
use_batch_norm: bool = True,
encoder_normalize_before: bool = True,
decoder_normalize_before: bool = True,
encoder_concat_after: bool = False,
decoder_concat_after: bool = False,
reduction_factor: int = 1,
# extra embedding related
spks: Optional[int] = None,
langs: Optional[int] = None,
spk_embed_dim: Optional[int] = None,
spk_embed_integration_type: str = "add",
use_gst: bool = False,
gst_tokens: int = 10,
gst_heads: int = 4,
gst_conv_layers: int = 6,
gst_conv_chans_list: Sequence[int] = (32, 32, 64, 64, 128, 128),
gst_conv_kernel_size: int = 3,
gst_conv_stride: int = 2,
gst_gru_layers: int = 1,
gst_gru_units: int = 128,
# training related
transformer_enc_dropout_rate: float = 0.1,
transformer_enc_positional_dropout_rate: float = 0.1,
transformer_enc_attn_dropout_rate: float = 0.1,
transformer_dec_dropout_rate: float = 0.1,
transformer_dec_positional_dropout_rate: float = 0.1,
transformer_dec_attn_dropout_rate: float = 0.1,
transformer_enc_dec_attn_dropout_rate: float = 0.1,
eprenet_dropout_rate: float = 0.5,
dprenet_dropout_rate: float = 0.5,
postnet_dropout_rate: float = 0.5,
init_type: str = "xavier_uniform",
init_enc_alpha: float = 1.0,
init_dec_alpha: float = 1.0,
use_masking: bool = False,
use_weighted_masking: bool = False,
bce_pos_weight: float = 5.0,
loss_type: str = "L1",
use_guided_attn_loss: bool = True,
num_heads_applied_guided_attn: int = 2,
num_layers_applied_guided_attn: int = 2,
modules_applied_guided_attn: Sequence[str] = ("encoder-decoder"),
guided_attn_loss_sigma: float = 0.4,
guided_attn_loss_lambda: float = 1.0,
):
"""Initialize Transformer module.
Args:
idim (int): Dimension of the inputs.
odim (int): Dimension of the outputs.
embed_dim (int): Dimension of character embedding.
eprenet_conv_layers (int): Number of encoder prenet convolution layers.
eprenet_conv_chans (int): Number of encoder prenet convolution channels.
eprenet_conv_filts (int): Filter size of encoder prenet convolution.
dprenet_layers (int): Number of decoder prenet layers.
dprenet_units (int): Number of decoder prenet hidden units.
elayers (int): Number of encoder layers.
eunits (int): Number of encoder hidden units.
adim (int): Number of attention transformation dimensions.
aheads (int): Number of heads for multi head attention.
dlayers (int): Number of decoder layers.
dunits (int): Number of decoder hidden units.
postnet_layers (int): Number of postnet layers.
postnet_chans (int): Number of postnet channels.
postnet_filts (int): Filter size of postnet.
use_scaled_pos_enc (bool): Whether to use trainable scaled pos encoding.
use_batch_norm (bool): Whether to use batch normalization in encoder prenet.
encoder_normalize_before (bool): Whether to apply layernorm layer before
encoder block.
decoder_normalize_before (bool): Whether to apply layernorm layer before
decoder block.
encoder_concat_after (bool): Whether to concatenate attention layer's input
and output in encoder.
decoder_concat_after (bool): Whether to concatenate attention layer's input
and output in decoder.
positionwise_layer_type (str): Position-wise operation type.
positionwise_conv_kernel_size (int): Kernel size in position wise conv 1d.
reduction_factor (int): Reduction factor.
spks (Optional[int]): Number of speakers. If set to > 1, assume that the
sids will be provided as the input and use sid embedding layer.
langs (Optional[int]): Number of languages. If set to > 1, assume that the
lids will be provided as the input and use sid embedding layer.
spk_embed_dim (Optional[int]): Speaker embedding dimension. If set to > 0,
assume that spembs will be provided as the input.
spk_embed_integration_type (str): How to integrate speaker embedding.
use_gst (str): Whether to use global style token.
gst_tokens (int): Number of GST embeddings.
gst_heads (int): Number of heads in GST multihead attention.
gst_conv_layers (int): Number of conv layers in GST.
gst_conv_chans_list: (Sequence[int]): List of the number of channels of conv
layers in GST.
gst_conv_kernel_size (int): Kernel size of conv layers in GST.
gst_conv_stride (int): Stride size of conv layers in GST.
gst_gru_layers (int): Number of GRU layers in GST.
gst_gru_units (int): Number of GRU units in GST.
transformer_lr (float): Initial value of learning rate.
transformer_warmup_steps (int): Optimizer warmup steps.
transformer_enc_dropout_rate (float): Dropout rate in encoder except
attention and positional encoding.
transformer_enc_positional_dropout_rate (float): Dropout rate after encoder
positional encoding.
transformer_enc_attn_dropout_rate (float): Dropout rate in encoder
self-attention module.
transformer_dec_dropout_rate (float): Dropout rate in decoder except
attention & positional encoding.
transformer_dec_positional_dropout_rate (float): Dropout rate after decoder
positional encoding.
transformer_dec_attn_dropout_rate (float): Dropout rate in decoder
self-attention module.
transformer_enc_dec_attn_dropout_rate (float): Dropout rate in source
attention module.
init_type (str): How to initialize transformer parameters.
init_enc_alpha (float): Initial value of alpha in scaled pos encoding of the
encoder.
init_dec_alpha (float): Initial value of alpha in scaled pos encoding of the
decoder.
eprenet_dropout_rate (float): Dropout rate in encoder prenet.
dprenet_dropout_rate (float): Dropout rate in decoder prenet.
postnet_dropout_rate (float): Dropout rate in postnet.
use_masking (bool): Whether to apply masking for padded part in loss
calculation.
use_weighted_masking (bool): Whether to apply weighted masking in loss
calculation.
bce_pos_weight (float): Positive sample weight in bce calculation
(only for use_masking=true).
loss_type (str): How to calculate loss.
use_guided_attn_loss (bool): Whether to use guided attention loss.
num_heads_applied_guided_attn (int): Number of heads in each layer to apply
guided attention loss.
num_layers_applied_guided_attn (int): Number of layers to apply guided
attention loss.
modules_applied_guided_attn (Sequence[str]): List of module names to apply
guided attention loss.
guided_attn_loss_sigma (float) Sigma in guided attention loss.
guided_attn_loss_lambda (float): Lambda in guided attention loss.
"""
assert check_argument_types()
super().__init__()
# store hyperparameters
self.idim = idim
self.odim = odim
self.eos = idim - 1
self.reduction_factor = reduction_factor
self.use_gst = use_gst
self.use_guided_attn_loss = use_guided_attn_loss
self.use_scaled_pos_enc = use_scaled_pos_enc
self.loss_type = loss_type
self.use_guided_attn_loss = use_guided_attn_loss
if self.use_guided_attn_loss:
if num_layers_applied_guided_attn == -1:
self.num_layers_applied_guided_attn = elayers
else:
self.num_layers_applied_guided_attn = num_layers_applied_guided_attn
if num_heads_applied_guided_attn == -1:
self.num_heads_applied_guided_attn = aheads
else:
self.num_heads_applied_guided_attn = num_heads_applied_guided_attn
self.modules_applied_guided_attn = modules_applied_guided_attn
# use idx 0 as padding idx
self.padding_idx = 0
# get positional encoding class
pos_enc_class = (
ScaledPositionalEncoding if self.use_scaled_pos_enc else PositionalEncoding
)
# define transformer encoder
if eprenet_conv_layers != 0:
# encoder prenet
encoder_input_layer = torch.nn.Sequential(
EncoderPrenet(
idim=idim,
embed_dim=embed_dim,
elayers=0,
econv_layers=eprenet_conv_layers,
econv_chans=eprenet_conv_chans,
econv_filts=eprenet_conv_filts,
use_batch_norm=use_batch_norm,
dropout_rate=eprenet_dropout_rate,
padding_idx=self.padding_idx,
),
torch.nn.Linear(eprenet_conv_chans, adim),
)
else:
encoder_input_layer = torch.nn.Embedding(
num_embeddings=idim, embedding_dim=adim, padding_idx=self.padding_idx
)
self.encoder = Encoder(
idim=idim,
attention_dim=adim,
attention_heads=aheads,
linear_units=eunits,
num_blocks=elayers,
input_layer=encoder_input_layer,
dropout_rate=transformer_enc_dropout_rate,
positional_dropout_rate=transformer_enc_positional_dropout_rate,
attention_dropout_rate=transformer_enc_attn_dropout_rate,
pos_enc_class=pos_enc_class,
normalize_before=encoder_normalize_before,
concat_after=encoder_concat_after,
positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size,
)
# define GST
if self.use_gst:
self.gst = StyleEncoder(
idim=odim, # the input is mel-spectrogram
gst_tokens=gst_tokens,
gst_token_dim=adim,
gst_heads=gst_heads,
conv_layers=gst_conv_layers,
conv_chans_list=gst_conv_chans_list,
conv_kernel_size=gst_conv_kernel_size,
conv_stride=gst_conv_stride,
gru_layers=gst_gru_layers,
gru_units=gst_gru_units,
)
# define spk and lang embedding
self.spks = None
if spks is not None and spks > 1:
self.spks = spks
self.sid_emb = torch.nn.Embedding(spks, adim)
self.langs = None
if langs is not None and langs > 1:
self.langs = langs
self.lid_emb = torch.nn.Embedding(langs, adim)
# define projection layer
self.spk_embed_dim = None
if spk_embed_dim is not None and spk_embed_dim > 0:
self.spk_embed_dim = spk_embed_dim
self.spk_embed_integration_type = spk_embed_integration_type
if self.spk_embed_dim is not None:
if self.spk_embed_integration_type == "add":
self.projection = torch.nn.Linear(self.spk_embed_dim, adim)
else:
self.projection = torch.nn.Linear(adim + self.spk_embed_dim, adim)
# define transformer decoder
if dprenet_layers != 0:
# decoder prenet
decoder_input_layer = torch.nn.Sequential(
DecoderPrenet(
idim=odim,
n_layers=dprenet_layers,
n_units=dprenet_units,
dropout_rate=dprenet_dropout_rate,
),
torch.nn.Linear(dprenet_units, adim),
)
else:
decoder_input_layer = "linear"
self.decoder = Decoder(
odim=odim, # odim is needed when no prenet is used
attention_dim=adim,
attention_heads=aheads,
linear_units=dunits,
num_blocks=dlayers,
dropout_rate=transformer_dec_dropout_rate,
positional_dropout_rate=transformer_dec_positional_dropout_rate,
self_attention_dropout_rate=transformer_dec_attn_dropout_rate,
src_attention_dropout_rate=transformer_enc_dec_attn_dropout_rate,
input_layer=decoder_input_layer,
use_output_layer=False,
pos_enc_class=pos_enc_class,
normalize_before=decoder_normalize_before,
concat_after=decoder_concat_after,
)
# define final projection
self.feat_out = torch.nn.Linear(adim, odim * reduction_factor)
self.prob_out = torch.nn.Linear(adim, reduction_factor)
# define postnet
self.postnet = (
None
if postnet_layers == 0
else Postnet(
idim=idim,
odim=odim,
n_layers=postnet_layers,
n_chans=postnet_chans,
n_filts=postnet_filts,
use_batch_norm=use_batch_norm,
dropout_rate=postnet_dropout_rate,
)
)
# define loss function
self.criterion = TransformerLoss(
use_masking=use_masking,
use_weighted_masking=use_weighted_masking,
bce_pos_weight=bce_pos_weight,
)
if self.use_guided_attn_loss:
self.attn_criterion = GuidedMultiHeadAttentionLoss(
sigma=guided_attn_loss_sigma,
alpha=guided_attn_loss_lambda,
)
# initialize parameters
self._reset_parameters(
init_type=init_type,
init_enc_alpha=init_enc_alpha,
init_dec_alpha=init_dec_alpha,
)
def _reset_parameters(self, init_type, init_enc_alpha=1.0, init_dec_alpha=1.0):
# initialize parameters
if init_type != "pytorch":
initialize(self, init_type)
# initialize alpha in scaled positional encoding
if self.use_scaled_pos_enc:
self.encoder.embed[-1].alpha.data = torch.tensor(init_enc_alpha)
self.decoder.embed[-1].alpha.data = torch.tensor(init_dec_alpha)
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
feats: torch.Tensor,
feats_lengths: torch.Tensor,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
joint_training: bool = False,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Calculate forward propagation.
Args:
text (LongTensor): Batch of padded character ids (B, Tmax).
text_lengths (LongTensor): Batch of lengths of each input batch (B,).
feats (Tensor): Batch of padded target features (B, Lmax, odim).
feats_lengths (LongTensor): Batch of the lengths of each target (B,).
spembs (Optional[Tensor]): Batch of speaker embeddings (B, spk_embed_dim).
sids (Optional[Tensor]): Batch of speaker IDs (B, 1).
lids (Optional[Tensor]): Batch of language IDs (B, 1).
joint_training (bool): Whether to perform joint training with vocoder.
Returns:
Tensor: Loss scalar value.
Dict: Statistics to be monitored.
Tensor: Weight value if not joint training else model outputs.
"""
text = text[:, : text_lengths.max()] # for data-parallel
feats = feats[:, : feats_lengths.max()] # for data-parallel
batch_size = text.size(0)
# Add eos at the last of sequence
xs = F.pad(text, [0, 1], "constant", self.padding_idx)
for i, l in enumerate(text_lengths):
xs[i, l] = self.eos
ilens = text_lengths + 1
ys = feats
olens = feats_lengths
# make labels for stop prediction
labels = make_pad_mask(olens - 1).to(ys.device, ys.dtype)
labels = F.pad(labels, [0, 1], "constant", 1.0)
# calculate transformer outputs
after_outs, before_outs, logits = self._forward(
xs=xs,
ilens=ilens,
ys=ys,
olens=olens,
spembs=spembs,
sids=sids,
lids=lids,
)
# modifiy mod part of groundtruth
olens_in = olens
if self.reduction_factor > 1:
assert olens.ge(
self.reduction_factor
).all(), "Output length must be greater than or equal to reduction factor."
olens_in = olens.new([olen // self.reduction_factor for olen in olens])
olens = olens.new([olen - olen % self.reduction_factor for olen in olens])
max_olen = max(olens)
ys = ys[:, :max_olen]
labels = labels[:, :max_olen]
labels = torch.scatter(
labels, 1, (olens - 1).unsqueeze(1), 1.0
) # see #3388
# calculate loss values
l1_loss, l2_loss, bce_loss = self.criterion(
after_outs, before_outs, logits, ys, labels, olens
)
if self.loss_type == "L1":
loss = l1_loss + bce_loss
elif self.loss_type == "L2":
loss = l2_loss + bce_loss
elif self.loss_type == "L1+L2":
loss = l1_loss + l2_loss + bce_loss
else:
raise ValueError("unknown --loss-type " + self.loss_type)
stats = dict(
l1_loss=l1_loss.item(),
l2_loss=l2_loss.item(),
bce_loss=bce_loss.item(),
)
# calculate guided attention loss
if self.use_guided_attn_loss:
# calculate for encoder
if "encoder" in self.modules_applied_guided_attn:
att_ws = []
for idx, layer_idx in enumerate(
reversed(range(len(self.encoder.encoders)))
):
att_ws += [
self.encoder.encoders[layer_idx].self_attn.attn[
:, : self.num_heads_applied_guided_attn
]
]
if idx + 1 == self.num_layers_applied_guided_attn:
break
att_ws = torch.cat(att_ws, dim=1) # (B, H*L, T_text, T_text)
enc_attn_loss = self.attn_criterion(att_ws, ilens, ilens)
loss = loss + enc_attn_loss
stats.update(enc_attn_loss=enc_attn_loss.item())
# calculate for decoder
if "decoder" in self.modules_applied_guided_attn:
att_ws = []
for idx, layer_idx in enumerate(
reversed(range(len(self.decoder.decoders)))
):
att_ws += [
self.decoder.decoders[layer_idx].self_attn.attn[
:, : self.num_heads_applied_guided_attn
]
]
if idx + 1 == self.num_layers_applied_guided_attn:
break
att_ws = torch.cat(att_ws, dim=1) # (B, H*L, T_feats, T_feats)
dec_attn_loss = self.attn_criterion(att_ws, olens_in, olens_in)
loss = loss + dec_attn_loss
stats.update(dec_attn_loss=dec_attn_loss.item())
# calculate for encoder-decoder
if "encoder-decoder" in self.modules_applied_guided_attn:
att_ws = []
for idx, layer_idx in enumerate(
reversed(range(len(self.decoder.decoders)))
):
att_ws += [
self.decoder.decoders[layer_idx].src_attn.attn[
:, : self.num_heads_applied_guided_attn
]
]
if idx + 1 == self.num_layers_applied_guided_attn:
break
att_ws = torch.cat(att_ws, dim=1) # (B, H*L, T_feats, T_text)
enc_dec_attn_loss = self.attn_criterion(att_ws, ilens, olens_in)
loss = loss + enc_dec_attn_loss
stats.update(enc_dec_attn_loss=enc_dec_attn_loss.item())
# report extra information
if self.use_scaled_pos_enc:
stats.update(
encoder_alpha=self.encoder.embed[-1].alpha.data.item(),
decoder_alpha=self.decoder.embed[-1].alpha.data.item(),
)
if not joint_training:
stats.update(loss=loss.item())
loss, stats, weight = force_gatherable(
(loss, stats, batch_size), loss.device
)
return loss, stats, weight
else:
return loss, stats, after_outs
def _forward(
self,
xs: torch.Tensor,
ilens: torch.Tensor,
ys: torch.Tensor,
olens: torch.Tensor,
spembs: torch.Tensor,
sids: torch.Tensor,
lids: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
# forward encoder
x_masks = self._source_mask(ilens)
hs, h_masks = self.encoder(xs, x_masks)
# integrate with GST
if self.use_gst:
style_embs = self.gst(ys)
hs = hs + style_embs.unsqueeze(1)
# integrate with SID and LID embeddings
if self.spks is not None:
sid_embs = self.sid_emb(sids.view(-1))
hs = hs + sid_embs.unsqueeze(1)
if self.langs is not None:
lid_embs = self.lid_emb(lids.view(-1))
hs = hs + lid_embs.unsqueeze(1)
# integrate speaker embedding
if self.spk_embed_dim is not None:
hs = self._integrate_with_spk_embed(hs, spembs)
# thin out frames for reduction factor
# (B, T_feats, odim) -> (B, T_feats//r, odim)
if self.reduction_factor > 1:
ys_in = ys[:, self.reduction_factor - 1 :: self.reduction_factor]
olens_in = olens.new([olen // self.reduction_factor for olen in olens])
else:
ys_in, olens_in = ys, olens
# add first zero frame and remove last frame for auto-regressive
ys_in = self._add_first_frame_and_remove_last_frame(ys_in)
# forward decoder
y_masks = self._target_mask(olens_in)
zs, _ = self.decoder(ys_in, y_masks, hs, h_masks)
# (B, T_feats//r, odim * r) -> (B, T_feats//r * r, odim)
before_outs = self.feat_out(zs).view(zs.size(0), -1, self.odim)
# (B, T_feats//r, r) -> (B, T_feats//r * r)
logits = self.prob_out(zs).view(zs.size(0), -1)
# postnet -> (B, T_feats//r * r, odim)
if self.postnet is None:
after_outs = before_outs
else:
after_outs = before_outs + self.postnet(
before_outs.transpose(1, 2)
).transpose(1, 2)
return after_outs, before_outs, logits
def inference(
self,
text: torch.Tensor,
feats: Optional[torch.Tensor] = None,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
threshold: float = 0.5,
minlenratio: float = 0.0,
maxlenratio: float = 10.0,
use_teacher_forcing: bool = False,
) -> Dict[str, torch.Tensor]:
"""Generate the sequence of features given the sequences of characters.
Args:
text (LongTensor): Input sequence of characters (T_text,).
feats (Optional[Tensor]): Feature sequence to extract style embedding
(T_feats', idim).
spembs (Optional[Tensor]): Speaker embedding (spk_embed_dim,).
sids (Optional[Tensor]): Speaker ID (1,).
lids (Optional[Tensor]): Language ID (1,).
threshold (float): Threshold in inference.
minlenratio (float): Minimum length ratio in inference.
maxlenratio (float): Maximum length ratio in inference.
use_teacher_forcing (bool): Whether to use teacher forcing.
Returns:
Dict[str, Tensor]: Output dict including the following items:
* feat_gen (Tensor): Output sequence of features (T_feats, odim).
* prob (Tensor): Output sequence of stop probabilities (T_feats,).
* att_w (Tensor): Source attn weight (#layers, #heads, T_feats, T_text).
"""
x = text
y = feats
spemb = spembs
# add eos at the last of sequence
x = F.pad(x, [0, 1], "constant", self.eos)
# inference with teacher forcing
if use_teacher_forcing:
assert feats is not None, "feats must be provided with teacher forcing."
# get teacher forcing outputs
xs, ys = x.unsqueeze(0), y.unsqueeze(0)
spembs = None if spemb is None else spemb.unsqueeze(0)
ilens = x.new_tensor([xs.size(1)]).long()
olens = y.new_tensor([ys.size(1)]).long()
outs, *_ = self._forward(
xs=xs,
ilens=ilens,
ys=ys,
olens=olens,
spembs=spembs,
sids=sids,
lids=lids,
)
# get attention weights
att_ws = []
for i in range(len(self.decoder.decoders)):
att_ws += [self.decoder.decoders[i].src_attn.attn]
att_ws = torch.stack(att_ws, dim=1) # (B, L, H, T_feats, T_text)
return dict(feat_gen=outs[0], att_w=att_ws[0])
# forward encoder
xs = x.unsqueeze(0)
hs, _ = self.encoder(xs, None)
# integrate GST
if self.use_gst:
style_embs = self.gst(y.unsqueeze(0))
hs = hs + style_embs.unsqueeze(1)
# integrate spk & lang embeddings
if self.spks is not None:
sid_embs = self.sid_emb(sids.view(-1))
hs = hs + sid_embs.unsqueeze(1)
if self.langs is not None:
lid_embs = self.lid_emb(lids.view(-1))
hs = hs + lid_embs.unsqueeze(1)
# integrate speaker embedding
if self.spk_embed_dim is not None:
spembs = spemb.unsqueeze(0)
hs = self._integrate_with_spk_embed(hs, spembs)
# set limits of length
maxlen = int(hs.size(1) * maxlenratio / self.reduction_factor)
minlen = int(hs.size(1) * minlenratio / self.reduction_factor)
# initialize
idx = 0
ys = hs.new_zeros(1, 1, self.odim)
outs, probs = [], []
# forward decoder step-by-step
z_cache = self.decoder.init_state(x)
while True:
# update index
idx += 1
# calculate output and stop prob at idx-th step
y_masks = subsequent_mask(idx).unsqueeze(0).to(x.device)
z, z_cache = self.decoder.forward_one_step(
ys, y_masks, hs, cache=z_cache
) # (B, adim)
outs += [
self.feat_out(z).view(self.reduction_factor, self.odim)
] # [(r, odim), ...]
probs += [torch.sigmoid(self.prob_out(z))[0]] # [(r), ...]
# update next inputs
ys = torch.cat(
(ys, outs[-1][-1].view(1, 1, self.odim)), dim=1
) # (1, idx + 1, odim)
# get attention weights
att_ws_ = []
for name, m in self.named_modules():
if isinstance(m, MultiHeadedAttention) and "src" in name:
att_ws_ += [m.attn[0, :, -1].unsqueeze(1)] # [(#heads, 1, T),...]
if idx == 1:
att_ws = att_ws_
else:
# [(#heads, l, T), ...]
att_ws = [
torch.cat([att_w, att_w_], dim=1)
for att_w, att_w_ in zip(att_ws, att_ws_)
]
# check whether to finish generation
if int(sum(probs[-1] >= threshold)) > 0 or idx >= maxlen:
# check mininum length
if idx < minlen:
continue
outs = (
torch.cat(outs, dim=0).unsqueeze(0).transpose(1, 2)
) # (T_feats, odim) -> (1, T_feats, odim) -> (1, odim, T_feats)
if self.postnet is not None:
outs = outs + self.postnet(outs) # (1, odim, T_feats)
outs = outs.transpose(2, 1).squeeze(0) # (T_feats, odim)
probs = torch.cat(probs, dim=0)
break
# concatenate attention weights -> (#layers, #heads, T_feats, T_text)
att_ws = torch.stack(att_ws, dim=0)
return dict(feat_gen=outs, prob=probs, att_w=att_ws)
def _add_first_frame_and_remove_last_frame(self, ys: torch.Tensor) -> torch.Tensor:
ys_in = torch.cat(
[ys.new_zeros((ys.shape[0], 1, ys.shape[2])), ys[:, :-1]], dim=1
)
return ys_in
def _source_mask(self, ilens):
"""Make masks for self-attention.
Args:
ilens (LongTensor): Batch of lengths (B,).
Returns:
Tensor: Mask tensor for self-attention.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
>>> ilens = [5, 3]
>>> self._source_mask(ilens)
tensor([[[1, 1, 1, 1, 1],
[[1, 1, 1, 0, 0]]], dtype=torch.uint8)
"""
x_masks = make_non_pad_mask(ilens).to(next(self.parameters()).device)
return x_masks.unsqueeze(-2)
def _target_mask(self, olens: torch.Tensor) -> torch.Tensor:
"""Make masks for masked self-attention.
Args:
olens (LongTensor): Batch of lengths (B,).
Returns:
Tensor: Mask tensor for masked self-attention.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
>>> olens = [5, 3]
>>> self._target_mask(olens)
tensor([[[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]],
[[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0]]], dtype=torch.uint8)
"""
y_masks = make_non_pad_mask(olens).to(next(self.parameters()).device)
s_masks = subsequent_mask(y_masks.size(-1), device=y_masks.device).unsqueeze(0)
return y_masks.unsqueeze(-2) & s_masks
def _integrate_with_spk_embed(
self, hs: torch.Tensor, spembs: torch.Tensor
) -> torch.Tensor:
"""Integrate speaker embedding with hidden states.
Args:
hs (Tensor): Batch of hidden state sequences (B, Tmax, adim).
spembs (Tensor): Batch of speaker embeddings (B, spk_embed_dim).
Returns:
Tensor: Batch of integrated hidden state sequences (B, Tmax, adim).
"""
if self.spk_embed_integration_type == "add":
# apply projection and then add to hidden states
spembs = self.projection(F.normalize(spembs))
hs = hs + spembs.unsqueeze(1)
elif self.spk_embed_integration_type == "concat":
# concat hidden states with spk embeds and then apply projection
spembs = F.normalize(spembs).unsqueeze(1).expand(-1, hs.size(1), -1)
hs = self.projection(torch.cat([hs, spembs], dim=-1))
else:
raise NotImplementedError("support only add or concat.")
return hs
| 34,927 | 40.286052 | 88 | py |
espnet | espnet-master/espnet2/optimizers/sgd.py | import torch
from typeguard import check_argument_types
class SGD(torch.optim.SGD):
"""Thin inheritance of torch.optim.SGD to bind the required arguments, 'lr'
Note that
the arguments of the optimizer invoked by AbsTask.main()
must have default value except for 'param'.
I can't understand why only SGD.lr doesn't have the default value.
"""
def __init__(
self,
params,
lr: float = 0.1,
momentum: float = 0.0,
dampening: float = 0.0,
weight_decay: float = 0.0,
nesterov: bool = False,
):
assert check_argument_types()
super().__init__(
params,
lr=lr,
momentum=momentum,
dampening=dampening,
weight_decay=weight_decay,
nesterov=nesterov,
)
| 828 | 24.121212 | 79 | py |
espnet | espnet-master/espnet2/optimizers/optim_groups.py | # noqa: E501 This code is modified from: https://github.com/HazyResearch/state-spaces/blob/main/src/utils/optim_groups.py
import torch.nn as nn
def add_optimizer_hooks(
model,
bias_weight_decay=False,
normalization_weight_decay=False,
):
"""Set zero weight decay for some params
Set weight_decay=0.0 for parameters in model.no_weight_decay, for parameters with
attribute _no_weight_decay==True, for bias parameters if bias_weight_decay==False,
for normalization parameters if normalization_weight_decay==False
See: https://discuss.pytorch.org/t/weight-decay-only-for-weights-of-nn-linear-and-nn-conv/114348 # noqa
"""
# Separate out all parameters to those that will and won't experience regularizing
# weight decay
blacklist_weight_modules = (nn.Embedding,)
if not normalization_weight_decay:
blacklist_weight_modules += (
nn.BatchNorm1d,
nn.BatchNorm2d,
nn.BatchNorm3d,
# Not compatible with Pytorch 1.8.1
# nn.LazyBatchNorm1d, nn.LazyBatchNorm2d, nn.LazyBatchNorm3d,
nn.GroupNorm,
nn.SyncBatchNorm,
nn.InstanceNorm1d,
nn.InstanceNorm2d,
nn.InstanceNorm3d,
nn.LayerNorm,
nn.LocalResponseNorm,
)
for mn, m in model.named_modules():
for pn, p in m.named_parameters():
if (
(not bias_weight_decay and pn.endswith("bias"))
or getattr(p, "_no_weight_decay", False)
or isinstance(m, blacklist_weight_modules)
):
setattr(p, "_optim", {"weight_decay": 0.0})
def configure_optimizer(model, optim_class, optim_conf, weight_decay_conf):
# Set zero weight decay for some params
add_optimizer_hooks(
model,
**weight_decay_conf,
)
# Normal parameters
all_params = list(model.parameters())
params = [p for p in all_params if not hasattr(p, "_optim")]
# Instantiate base optimizer
optimizer = optim_class(params, **optim_conf)
# Add parameters with special hyperparameters
hps = [getattr(p, "_optim") for p in all_params if hasattr(p, "_optim")]
hps = [
dict(s)
for s in sorted(list(dict.fromkeys(frozenset(hp.items()) for hp in hps)))
] # Unique dicts
for hp in hps:
params = [p for p in all_params if getattr(p, "_optim", None) == hp]
optimizer.add_param_group({"params": params, **optim_conf, **hp})
return optimizer
| 2,534 | 34.208333 | 121 | py |
espnet | espnet-master/espnet2/asr_transducer/joint_network.py | """Transducer joint network implementation."""
import torch
from espnet2.asr_transducer.activation import get_activation
class JointNetwork(torch.nn.Module):
"""Transducer joint network module.
Args:
output_size: Output size.
encoder_size: Encoder output size.
decoder_size: Decoder output size.
joint_space_size: Joint space size.
joint_act_type: Type of activation for joint network.
**activation_parameters: Parameters for the activation function.
"""
def __init__(
self,
output_size: int,
encoder_size: int,
decoder_size: int,
joint_space_size: int = 256,
joint_activation_type: str = "tanh",
**activation_parameters,
) -> None:
"""Construct a JointNetwork object."""
super().__init__()
self.lin_enc = torch.nn.Linear(encoder_size, joint_space_size)
self.lin_dec = torch.nn.Linear(decoder_size, joint_space_size)
self.lin_out = torch.nn.Linear(joint_space_size, output_size)
self.joint_activation = get_activation(
joint_activation_type, **activation_parameters
)
def forward(
self,
enc_out: torch.Tensor,
dec_out: torch.Tensor,
no_projection: bool = False,
) -> torch.Tensor:
"""Joint computation of encoder and decoder hidden state sequences.
Args:
enc_out: Expanded encoder output state sequences.
(B, T, s_range, D_enc) or (B, T, 1, D_enc)
dec_out: Expanded decoder output state sequences.
(B, T, s_range, D_dec) or (B, 1, U, D_dec)
Returns:
joint_out: Joint output state sequences.
(B, T, U, D_out) or (B, T, s_range, D_out)
"""
if no_projection:
joint_out = self.joint_activation(enc_out + dec_out)
else:
joint_out = self.joint_activation(
self.lin_enc(enc_out) + self.lin_dec(dec_out)
)
return self.lin_out(joint_out)
| 2,097 | 29.405797 | 75 | py |
espnet | espnet-master/espnet2/asr_transducer/activation.py | """Activation functions for Transducer models."""
import torch
from packaging.version import parse as V
def get_activation(
activation_type: str,
ftswish_threshold: float = -0.2,
ftswish_mean_shift: float = 0.0,
hardtanh_min_val: int = -1.0,
hardtanh_max_val: int = 1.0,
leakyrelu_neg_slope: float = 0.01,
smish_alpha: float = 1.0,
smish_beta: float = 1.0,
softplus_beta: float = 1.0,
softplus_threshold: int = 20,
swish_beta: float = 1.0,
) -> torch.nn.Module:
"""Return activation function.
Args:
activation_type: Activation function type.
ftswish_threshold: Threshold value for FTSwish activation formulation.
ftswish_mean_shift: Mean shifting value for FTSwish activation formulation.
hardtanh_min_val: Minimum value of the linear region range for HardTanh.
hardtanh_max_val: Maximum value of the linear region range for HardTanh.
leakyrelu_neg_slope: Negative slope value for LeakyReLU activation formulation.
smish_alpha: Alpha value for Smish activation fomulation.
smish_beta: Beta value for Smish activation formulation.
softplus_beta: Beta value for softplus activation formulation in Mish.
softplus_threshold: Values above this revert to a linear function in Mish.
swish_beta: Beta value for Swish variant formulation.
Returns:
: Activation function.
"""
torch_version = V(torch.__version__)
activations = {
"ftswish": (
FTSwish,
{"threshold": ftswish_threshold, "mean_shift": ftswish_mean_shift},
),
"hardtanh": (
torch.nn.Hardtanh,
{"min_val": hardtanh_min_val, "max_val": hardtanh_max_val},
),
"leaky_relu": (torch.nn.LeakyReLU, {"negative_slope": leakyrelu_neg_slope}),
"mish": (
Mish,
{
"softplus_beta": softplus_beta,
"softplus_threshold": softplus_threshold,
"use_builtin": torch_version >= V("1.9"),
},
),
"relu": (torch.nn.ReLU, {}),
"selu": (torch.nn.SELU, {}),
"smish": (Smish, {"alpha": smish_alpha, "beta": smish_beta}),
"swish": (
Swish,
{"beta": swish_beta, "use_builtin": torch_version >= V("1.8")},
),
"tanh": (torch.nn.Tanh, {}),
"identity": (torch.nn.Identity, {}),
}
act_func, act_args = activations[activation_type]
return act_func(**act_args)
class FTSwish(torch.nn.Module):
"""Flatten-T Swish activation definition.
FTSwish(x) = x * sigmoid(x) + threshold
where FTSwish(x) < 0 = threshold
Reference: https://arxiv.org/abs/1812.06247
Args:
threshold: Threshold value for FTSwish activation formulation. (threshold < 0)
mean_shift: Mean shifting value for FTSwish activation formulation.
(applied only if != 0, disabled by default)
"""
def __init__(self, threshold: float = -0.2, mean_shift: float = 0) -> None:
super().__init__()
assert threshold < 0, "FTSwish threshold parameter should be < 0."
self.threshold = threshold
self.mean_shift = mean_shift
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward computation."""
x = (x * torch.sigmoid(x)) + self.threshold
x = torch.where(x >= 0, x, torch.tensor([self.threshold], device=x.device))
if self.mean_shift != 0:
x.sub_(self.mean_shift)
return x
class Mish(torch.nn.Module):
"""Mish activation definition.
Mish(x) = x * tanh(softplus(x))
Reference: https://arxiv.org/abs/1908.08681.
Args:
softplus_beta: Beta value for softplus activation formulation.
(Usually 0 > softplus_beta >= 2)
softplus_threshold: Values above this revert to a linear function.
(Usually 10 > softplus_threshold >= 20)
use_builtin: Whether to use PyTorch activation function if available.
"""
def __init__(
self,
softplus_beta: float = 1.0,
softplus_threshold: int = 20,
use_builtin: bool = False,
) -> None:
super().__init__()
if use_builtin:
self.mish = torch.nn.Mish()
else:
self.tanh = torch.nn.Tanh()
self.softplus = torch.nn.Softplus(
beta=softplus_beta, threshold=softplus_threshold
)
self.mish = lambda x: x * self.tanh(self.softplus(x))
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward computation."""
return self.mish(x)
class Smish(torch.nn.Module):
"""Smish activation definition.
Smish(x) = (alpha * x) * tanh(log(1 + sigmoid(beta * x)))
where alpha > 0 and beta > 0
Reference: https://www.mdpi.com/2079-9292/11/4/540/htm.
Args:
alpha: Alpha value for Smish activation fomulation.
(Usually, alpha = 1. If alpha <= 0, set value to 1).
beta: Beta value for Smish activation formulation.
(Usually, beta = 1. If beta <= 0, set value to 1).
"""
def __init__(self, alpha: float = 1.0, beta: float = 1.0) -> None:
super().__init__()
self.tanh = torch.nn.Tanh()
self.alpha = alpha if alpha > 0 else 1
self.beta = beta if beta > 0 else 1
self.smish = lambda x: (self.alpha * x) * self.tanh(
torch.log(1 + torch.sigmoid((self.beta * x)))
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward computation."""
return self.smish(x)
class Swish(torch.nn.Module):
"""Swish activation definition.
Swish(x) = (beta * x) * sigmoid(x)
where beta = 1 defines standard Swish activation.
References:
https://arxiv.org/abs/2108.12943 / https://arxiv.org/abs/1710.05941v1.
E-swish variant: https://arxiv.org/abs/1801.07145.
Args:
beta: Beta parameter for E-Swish.
(beta >= 1. If beta < 1, use standard Swish).
use_builtin: Whether to use PyTorch function if available.
"""
def __init__(self, beta: float = 1.0, use_builtin: bool = False) -> None:
super().__init__()
self.beta = beta
if beta > 1:
self.swish = lambda x: (self.beta * x) * torch.sigmoid(x)
else:
if use_builtin:
self.swish = torch.nn.SiLU()
else:
self.swish = lambda x: x * torch.sigmoid(x)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward computation."""
return self.swish(x)
| 6,737 | 30.485981 | 87 | py |
espnet | espnet-master/espnet2/asr_transducer/utils.py | """Utility functions for Transducer models."""
from typing import List, Tuple, Union
import torch
class TooShortUttError(Exception):
"""Raised when the utt is too short for subsampling.
Args:
message: Error message to display.
actual_size: The size that cannot pass the subsampling.
limit: The size limit for subsampling.
"""
def __init__(self, message: str, actual_size: int, limit: int) -> None:
"""Construct a TooShortUttError module."""
super().__init__(message)
self.actual_size = actual_size
self.limit = limit
def check_short_utt(sub_factor: int, size: int) -> Tuple[bool, int]:
"""Check if the input is too short for subsampling.
Args:
sub_factor: Subsampling factor for Conv2DSubsampling.
size: Input size.
Returns:
: Whether an error should be sent.
: Size limit for specified subsampling factor.
"""
if sub_factor == 2 and size < 3:
return True, 7
elif sub_factor == 4 and size < 7:
return True, 7
elif sub_factor == 6 and size < 11:
return True, 11
return False, -1
def get_convinput_module_parameters(
input_size: int,
last_conv_size,
subsampling_factor: int,
is_vgg: bool = True,
) -> Tuple[Union[Tuple[int, int], int], int]:
"""Return the convolution module parameters.
Args:
input_size: Module input size.
last_conv_size: Last convolution size for module output size computation.
subsampling_factor: Total subsampling factor.
is_vgg: Whether the module type is VGG-like.
Returns:
: First MaxPool2D kernel size or second Conv2d kernel size and stride.
output_size: Convolution module output size.
"""
if is_vgg:
maxpool_kernel1 = subsampling_factor // 2
output_size = last_conv_size * (((input_size - 1) // 2 - 1) // 2)
return maxpool_kernel1, output_size
if subsampling_factor == 2:
conv_params = (3, 1)
elif subsampling_factor == 4:
conv_params = (3, 2)
else:
conv_params = (5, 3)
output_size = last_conv_size * (
((input_size - 1) // 2 - (conv_params[0] - conv_params[1])) // conv_params[1]
)
return conv_params, output_size
def make_chunk_mask(
size: int,
chunk_size: int,
num_left_chunks: int = 0,
device: torch.device = None,
) -> torch.Tensor:
"""Create chunk mask for the subsequent steps (size, size).
Reference: https://github.com/k2-fsa/icefall/blob/master/icefall/utils.py
Args:
size: Size of the source mask.
chunk_size: Number of frames in chunk.
num_left_chunks: Number of left chunks the attention module can see.
(null or negative value means full context)
device: Device for the mask tensor.
Returns:
mask: Chunk mask. (size, size)
"""
mask = torch.zeros(size, size, device=device, dtype=torch.bool)
for i in range(size):
if num_left_chunks <= 0:
start = 0
else:
start = max((i // chunk_size - num_left_chunks) * chunk_size, 0)
end = min((i // chunk_size + 1) * chunk_size, size)
mask[i, start:end] = True
return ~mask
def make_source_mask(lengths: torch.Tensor) -> torch.Tensor:
"""Create source mask for given lengths.
Reference: https://github.com/k2-fsa/icefall/blob/master/icefall/utils.py
Args:
lengths: Sequence lengths. (B,)
Returns:
: Mask for the sequence lengths. (B, max_len)
"""
max_len = lengths.max()
batch_size = lengths.size(0)
expanded_lengths = torch.arange(max_len).expand(batch_size, max_len).to(lengths)
return expanded_lengths >= lengths.unsqueeze(1)
def get_transducer_task_io(
labels: torch.Tensor,
encoder_out_lens: torch.Tensor,
ignore_id: int = -1,
blank_id: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""Get Transducer loss I/O.
Args:
labels: Label ID sequences. (B, L)
encoder_out_lens: Encoder output lengths. (B,)
ignore_id: Padding symbol ID.
blank_id: Blank symbol ID.
Returns:
decoder_in: Decoder inputs. (B, U)
target: Target label ID sequences. (B, U)
t_len: Time lengths. (B,)
u_len: Label lengths. (B,)
"""
def pad_list(labels: List[torch.Tensor], padding_value: int = 0):
"""Create padded batch of labels from a list of labels sequences.
Args:
labels: Labels sequences. [B x (?)]
padding_value: Padding value.
Returns:
labels: Batch of padded labels sequences. (B,)
"""
batch_size = len(labels)
padded = (
labels[0]
.new(batch_size, max(x.size(0) for x in labels), *labels[0].size()[1:])
.fill_(padding_value)
)
for i in range(batch_size):
padded[i, : labels[i].size(0)] = labels[i]
return padded
device = labels.device
labels_unpad = [y[y != ignore_id] for y in labels]
blank = labels[0].new([blank_id])
decoder_in = pad_list(
[torch.cat([blank, label], dim=0) for label in labels_unpad], blank_id
).to(device)
target = pad_list(labels_unpad, blank_id).type(torch.int32).to(device)
encoder_out_lens = list(map(int, encoder_out_lens))
t_len = torch.IntTensor(encoder_out_lens).to(device)
u_len = torch.IntTensor([y.size(0) for y in labels_unpad]).to(device)
return decoder_in, target, t_len, u_len
| 5,616 | 26.26699 | 85 | py |
espnet | espnet-master/espnet2/asr_transducer/beam_search_transducer.py | """Search algorithms for Transducer models."""
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from espnet2.asr_transducer.decoder.abs_decoder import AbsDecoder
from espnet2.asr_transducer.joint_network import JointNetwork
@dataclass
class Hypothesis:
"""Default hypothesis definition for Transducer search algorithms.
Args:
score: Total log-probability.
yseq: Label sequence as integer ID sequence.
dec_state: RNN/MEGA Decoder state (None if Stateless).
lm_state: RNNLM state. ((N, D_lm), (N, D_lm)) or None
"""
score: float
yseq: List[int]
dec_state: Optional[Tuple[torch.Tensor, Optional[torch.Tensor]]] = None
lm_state: Optional[Union[Dict[str, Any], List[Any]]] = None
@dataclass
class ExtendedHypothesis(Hypothesis):
"""Extended hypothesis definition for NSC beam search and mAES.
Args:
: Hypothesis dataclass arguments.
dec_out: Decoder output sequence. (B, D_dec)
lm_score: Log-probabilities of the LM for given label. (vocab_size)
"""
dec_out: torch.Tensor = None
lm_score: torch.Tensor = None
class BeamSearchTransducer:
"""Beam search implementation for Transducer.
Args:
decoder: Decoder module.
joint_network: Joint network module.
beam_size: Size of the beam.
lm: LM module.
lm_weight: LM weight for soft fusion.
search_type: Search algorithm to use during inference.
max_sym_exp: Number of maximum symbol expansions at each time step. (TSD)
u_max: Maximum expected target sequence length. (ALSD)
nstep: Number of maximum expansion steps at each time step. (mAES)
expansion_gamma: Allowed logp difference for prune-by-value method. (mAES)
expansion_beta:
Number of additional candidates for expanded hypotheses selection. (mAES)
score_norm: Normalize final scores by length.
nbest: Number of final hypothesis.
streaming: Whether to perform chunk-by-chunk beam search.
"""
def __init__(
self,
decoder: AbsDecoder,
joint_network: JointNetwork,
beam_size: int,
lm: Optional[torch.nn.Module] = None,
lm_weight: float = 0.1,
search_type: str = "default",
max_sym_exp: int = 3,
u_max: int = 50,
nstep: int = 2,
expansion_gamma: float = 2.3,
expansion_beta: int = 2,
score_norm: bool = False,
nbest: int = 1,
streaming: bool = False,
) -> None:
"""Construct a BeamSearchTransducer object."""
super().__init__()
self.decoder = decoder
self.joint_network = joint_network
self.vocab_size = decoder.vocab_size
assert beam_size <= self.vocab_size, (
"beam_size (%d) should be smaller than or equal to vocabulary size (%d)."
% (
beam_size,
self.vocab_size,
)
)
self.beam_size = beam_size
if search_type == "default":
self.search_algorithm = self.default_beam_search
elif search_type == "tsd":
assert max_sym_exp > 1, "max_sym_exp (%d) should be greater than one." % (
max_sym_exp
)
self.max_sym_exp = max_sym_exp
self.search_algorithm = self.time_sync_decoding
elif search_type == "alsd":
assert not streaming, "ALSD is not available in streaming mode."
assert u_max >= 0, "u_max should be a positive integer, a portion of max_T."
self.u_max = u_max
self.search_algorithm = self.align_length_sync_decoding
elif search_type == "maes":
assert self.vocab_size >= beam_size + expansion_beta, (
"beam_size (%d) + expansion_beta (%d) "
" should be smaller than or equal to vocab size (%d)."
% (beam_size, expansion_beta, self.vocab_size)
)
self.max_candidates = beam_size + expansion_beta
self.nstep = nstep
self.expansion_gamma = expansion_gamma
self.search_algorithm = self.modified_adaptive_expansion_search
else:
raise NotImplementedError(
"Specified search type (%s) is not supported." % search_type
)
self.use_lm = lm is not None
if self.use_lm:
assert hasattr(lm, "rnn_type"), "Transformer LM is currently not supported."
self.sos = self.vocab_size - 1
self.lm = lm
self.lm_weight = lm_weight
self.score_norm = score_norm
self.nbest = nbest
self.reset_cache()
def __call__(
self,
enc_out: torch.Tensor,
is_final: bool = True,
) -> List[Hypothesis]:
"""Perform beam search.
Args:
enc_out: Encoder output sequence. (T, D_enc)
is_final: Whether enc_out is the final chunk of data.
Returns:
nbest_hyps: N-best decoding results
"""
self.decoder.set_device(enc_out.device)
hyps = self.search_algorithm(enc_out)
if is_final:
self.reset_cache()
return self.sort_nbest(hyps)
self.search_cache = hyps
return hyps
def reset_cache(self) -> None:
"""Reset cache for streaming decoding."""
self.decoder.score_cache = {}
self.search_cache = None
def sort_nbest(self, hyps: List[Hypothesis]) -> List[Hypothesis]:
"""Sort in-place hypotheses by score or score given sequence length.
Args:
hyps: Hypothesis.
Return:
hyps: Sorted hypothesis.
"""
if self.score_norm:
hyps.sort(key=lambda x: x.score / len(x.yseq), reverse=True)
else:
hyps.sort(key=lambda x: x.score, reverse=True)
return hyps[: self.nbest]
def recombine_hyps(self, hyps: List[Hypothesis]) -> List[Hypothesis]:
"""Recombine hypotheses with same label ID sequence.
Args:
hyps: Hypotheses.
Returns:
final: Recombined hypotheses.
"""
final = {}
for hyp in hyps:
str_yseq = "_".join(map(str, hyp.yseq))
if str_yseq in final:
final[str_yseq].score = np.logaddexp(final[str_yseq].score, hyp.score)
else:
final[str_yseq] = hyp
return [*final.values()]
def select_k_expansions(
self,
hyps: List[ExtendedHypothesis],
topk_idx: torch.Tensor,
topk_logp: torch.Tensor,
) -> List[ExtendedHypothesis]:
"""Return K hypotheses candidates for expansion from a list of hypothesis.
K candidates are selected according to the extended hypotheses probabilities
and a prune-by-value method. Where K is equal to beam_size + beta.
Args:
hyps: Hypotheses.
topk_idx: Indices of candidates hypothesis.
topk_logp: Log-probabilities of candidates hypothesis.
Returns:
k_expansions: Best K expansion hypotheses candidates.
"""
k_expansions = []
for i, hyp in enumerate(hyps):
hyp_i = [
(int(k), hyp.score + float(v))
for k, v in zip(topk_idx[i], topk_logp[i])
]
k_best_exp = max(hyp_i, key=lambda x: x[1])[1]
k_expansions.append(
sorted(
filter(
lambda x: (k_best_exp - self.expansion_gamma) <= x[1], hyp_i
),
key=lambda x: x[1],
reverse=True,
)
)
return k_expansions
def create_lm_batch_inputs(self, hyps_seq: List[List[int]]) -> torch.Tensor:
"""Make batch of inputs with left padding for LM scoring.
Args:
hyps_seq: Hypothesis sequences.
Returns:
: Padded batch of sequences.
"""
max_len = max([len(h) for h in hyps_seq])
return torch.LongTensor(
[[self.sos] + ([0] * (max_len - len(h))) + h[1:] for h in hyps_seq],
device=self.decoder.device,
)
def default_beam_search(self, enc_out: torch.Tensor) -> List[Hypothesis]:
"""Beam search implementation without prefix search.
Modified from https://arxiv.org/pdf/1211.3711.pdf
Args:
enc_out: Encoder output sequence. (T, D)
Returns:
nbest_hyps: N-best hypothesis.
"""
beam_k = min(self.beam_size, (self.vocab_size - 1))
max_t = len(enc_out)
if self.search_cache is not None:
kept_hyps = self.search_cache
else:
kept_hyps = [
Hypothesis(
score=0.0,
yseq=[0],
dec_state=self.decoder.init_state(1),
)
]
for t in range(max_t):
hyps = kept_hyps
kept_hyps = []
while True:
max_hyp = max(hyps, key=lambda x: x.score)
hyps.remove(max_hyp)
dec_out, state = self.decoder.score(
max_hyp.yseq,
max_hyp.dec_state,
)
logp = torch.log_softmax(
self.joint_network(enc_out[t : t + 1, :], dec_out),
dim=-1,
).squeeze(0)
top_k = logp[1:].topk(beam_k, dim=-1)
kept_hyps.append(
Hypothesis(
score=(max_hyp.score + float(logp[0:1])),
yseq=max_hyp.yseq,
dec_state=max_hyp.dec_state,
lm_state=max_hyp.lm_state,
)
)
if self.use_lm:
lm_scores, lm_state = self.lm.score(
torch.LongTensor(
[self.sos] + max_hyp.yseq[1:], device=self.decoder.device
),
max_hyp.lm_state,
None,
)
else:
lm_state = max_hyp.lm_state
for logp, k in zip(*top_k):
score = max_hyp.score + float(logp)
if self.use_lm:
score += self.lm_weight * lm_scores[k + 1]
hyps.append(
Hypothesis(
score=score,
yseq=max_hyp.yseq + [int(k + 1)],
dec_state=state,
lm_state=lm_state,
)
)
hyps_max = float(max(hyps, key=lambda x: x.score).score)
kept_most_prob = sorted(
[hyp for hyp in kept_hyps if hyp.score > hyps_max],
key=lambda x: x.score,
)
if len(kept_most_prob) >= self.beam_size:
kept_hyps = kept_most_prob
break
return kept_hyps
def align_length_sync_decoding(
self,
enc_out: torch.Tensor,
) -> List[Hypothesis]:
"""Alignment-length synchronous beam search implementation.
Based on https://ieeexplore.ieee.org/document/9053040
Args:
h: Encoder output sequences. (T, D)
Returns:
nbest_hyps: N-best hypothesis.
"""
t_max = int(enc_out.size(0))
u_max = min(self.u_max, (t_max - 1))
B = [Hypothesis(yseq=[0], score=0.0, dec_state=self.decoder.init_state(1))]
final = []
if self.use_lm:
B[0].lm_state = self.lm.zero_state()
for i in range(t_max + u_max):
A = []
B_ = []
B_enc_out = []
for hyp in B:
u = len(hyp.yseq) - 1
t = i - u
if t > (t_max - 1):
continue
B_.append(hyp)
B_enc_out.append((t, enc_out[t]))
if B_:
beam_enc_out = torch.stack([b[1] for b in B_enc_out])
beam_dec_out, beam_state = self.decoder.batch_score(B_)
beam_logp = torch.log_softmax(
self.joint_network(beam_enc_out, beam_dec_out),
dim=-1,
)
beam_topk = beam_logp[:, 1:].topk(self.beam_size, dim=-1)
if self.use_lm:
beam_lm_scores, beam_lm_states = self.lm.batch_score(
self.create_lm_batch_inputs([b.yseq for b in B_]),
[b.lm_state for b in B_],
None,
)
for i, hyp in enumerate(B_):
new_hyp = Hypothesis(
score=(hyp.score + float(beam_logp[i, 0])),
yseq=hyp.yseq[:],
dec_state=hyp.dec_state,
lm_state=hyp.lm_state,
)
A.append(new_hyp)
if B_enc_out[i][0] == (t_max - 1):
final.append(new_hyp)
for logp, k in zip(beam_topk[0][i], beam_topk[1][i] + 1):
new_hyp = Hypothesis(
score=(hyp.score + float(logp)),
yseq=(hyp.yseq[:] + [int(k)]),
dec_state=self.decoder.select_state(beam_state, i),
lm_state=hyp.lm_state,
)
if self.use_lm:
new_hyp.score += self.lm_weight * beam_lm_scores[i, k]
new_hyp.lm_state = beam_lm_states[i]
A.append(new_hyp)
B = sorted(A, key=lambda x: x.score, reverse=True)[: self.beam_size]
B = self.recombine_hyps(B)
if final:
return final
return B
def time_sync_decoding(self, enc_out: torch.Tensor) -> List[Hypothesis]:
"""Time synchronous beam search implementation.
Based on https://ieeexplore.ieee.org/document/9053040
Args:
enc_out: Encoder output sequence. (T, D)
Returns:
nbest_hyps: N-best hypothesis.
"""
if self.search_cache is not None:
B = self.search_cache
else:
B = [
Hypothesis(
yseq=[0],
score=0.0,
dec_state=self.decoder.init_state(1),
)
]
if self.use_lm:
B[0].lm_state = self.lm.zero_state()
for enc_out_t in enc_out:
A = []
C = B
enc_out_t = enc_out_t.unsqueeze(0)
for v in range(self.max_sym_exp):
D = []
beam_dec_out, beam_state = self.decoder.batch_score(C)
beam_logp = torch.log_softmax(
self.joint_network(enc_out_t, beam_dec_out),
dim=-1,
)
beam_topk = beam_logp[:, 1:].topk(self.beam_size, dim=-1)
seq_A = [h.yseq for h in A]
for i, hyp in enumerate(C):
if hyp.yseq not in seq_A:
A.append(
Hypothesis(
score=(hyp.score + float(beam_logp[i, 0])),
yseq=hyp.yseq[:],
dec_state=hyp.dec_state,
lm_state=hyp.lm_state,
)
)
else:
dict_pos = seq_A.index(hyp.yseq)
A[dict_pos].score = np.logaddexp(
A[dict_pos].score, (hyp.score + float(beam_logp[i, 0]))
)
if v < (self.max_sym_exp - 1):
if self.use_lm:
beam_lm_scores, beam_lm_states = self.lm.batch_score(
self.create_lm_batch_inputs([c.yseq for c in C]),
[c.lm_state for c in C],
None,
)
for i, hyp in enumerate(C):
for logp, k in zip(beam_topk[0][i], beam_topk[1][i] + 1):
new_hyp = Hypothesis(
score=(hyp.score + float(logp)),
yseq=(hyp.yseq + [int(k)]),
dec_state=self.decoder.select_state(beam_state, i),
lm_state=hyp.lm_state,
)
if self.use_lm:
new_hyp.score += self.lm_weight * beam_lm_scores[i, k]
new_hyp.lm_state = beam_lm_states[i]
D.append(new_hyp)
C = sorted(D, key=lambda x: x.score, reverse=True)[: self.beam_size]
B = sorted(A, key=lambda x: x.score, reverse=True)[: self.beam_size]
return B
def modified_adaptive_expansion_search(
self,
enc_out: torch.Tensor,
) -> List[ExtendedHypothesis]:
"""Modified version of Adaptive Expansion Search (mAES).
Based on AES (https://ieeexplore.ieee.org/document/9250505) and
NSC (https://arxiv.org/abs/2201.05420).
Args:
enc_out: Encoder output sequence. (T, D_enc)
Returns:
nbest_hyps: N-best hypothesis.
"""
if self.search_cache is not None:
kept_hyps = self.search_cache
else:
init_tokens = [
ExtendedHypothesis(
yseq=[0],
score=0.0,
dec_state=self.decoder.init_state(1),
)
]
beam_dec_out, beam_state = self.decoder.batch_score(
init_tokens,
)
if self.use_lm:
beam_lm_scores, beam_lm_states = self.lm.batch_score(
self.create_lm_batch_inputs([h.yseq for h in init_tokens]),
[h.lm_state for h in init_tokens],
None,
)
lm_state = beam_lm_states[0]
lm_score = beam_lm_scores[0]
else:
lm_state = None
lm_score = None
kept_hyps = [
ExtendedHypothesis(
yseq=[0],
score=0.0,
dec_state=self.decoder.select_state(beam_state, 0),
dec_out=beam_dec_out[0],
lm_state=lm_state,
lm_score=lm_score,
)
]
for enc_out_t in enc_out:
hyps = kept_hyps
kept_hyps = []
beam_enc_out = enc_out_t.unsqueeze(0)
list_b = []
for n in range(self.nstep):
beam_dec_out = torch.stack([h.dec_out for h in hyps])
beam_logp, beam_idx = torch.log_softmax(
self.joint_network(beam_enc_out, beam_dec_out),
dim=-1,
).topk(self.max_candidates, dim=-1)
k_expansions = self.select_k_expansions(hyps, beam_idx, beam_logp)
list_exp = []
for i, hyp in enumerate(hyps):
for k, new_score in k_expansions[i]:
new_hyp = ExtendedHypothesis(
yseq=hyp.yseq[:],
score=new_score,
dec_out=hyp.dec_out,
dec_state=hyp.dec_state,
lm_state=hyp.lm_state,
lm_score=hyp.lm_score,
)
if k == 0:
list_b.append(new_hyp)
else:
new_hyp.yseq.append(int(k))
if self.use_lm:
new_hyp.score += self.lm_weight * float(hyp.lm_score[k])
list_exp.append(new_hyp)
if not list_exp:
kept_hyps = sorted(
self.recombine_hyps(list_b), key=lambda x: x.score, reverse=True
)[: self.beam_size]
break
else:
beam_dec_out, beam_state = self.decoder.batch_score(
list_exp,
)
if self.use_lm:
beam_lm_scores, beam_lm_states = self.lm.batch_score(
self.create_lm_batch_inputs([h.yseq for h in list_exp]),
[h.lm_state for h in list_exp],
None,
)
if n < (self.nstep - 1):
for i, hyp in enumerate(list_exp):
hyp.dec_out = beam_dec_out[i]
hyp.dec_state = self.decoder.select_state(beam_state, i)
if self.use_lm:
hyp.lm_state = beam_lm_states[i]
hyp.lm_score = beam_lm_scores[i]
hyps = list_exp[:]
else:
beam_logp = torch.log_softmax(
self.joint_network(beam_enc_out, beam_dec_out),
dim=-1,
)
for i, hyp in enumerate(list_exp):
hyp.score += float(beam_logp[i, 0])
hyp.dec_out = beam_dec_out[i]
hyp.dec_state = self.decoder.select_state(beam_state, i)
if self.use_lm:
hyp.lm_state = beam_lm_states[i]
hyp.lm_score = beam_lm_scores[i]
kept_hyps = sorted(
self.recombine_hyps(list_b + list_exp),
key=lambda x: x.score,
reverse=True,
)[: self.beam_size]
return kept_hyps
| 22,742 | 31.49 | 88 | py |
espnet | espnet-master/espnet2/asr_transducer/error_calculator.py | """Error Calculator module for Transducer."""
from typing import List, Optional, Tuple
import torch
from espnet2.asr_transducer.beam_search_transducer import BeamSearchTransducer
from espnet2.asr_transducer.decoder.abs_decoder import AbsDecoder
from espnet2.asr_transducer.joint_network import JointNetwork
class ErrorCalculator:
"""Calculate CER and WER for transducer models.
Args:
decoder: Decoder module.
joint_network: Joint Network module.
token_list: List of token units.
sym_space: Space symbol.
sym_blank: Blank symbol.
nstep: Maximum number of symbol expansions at each time step w/ mAES.
report_cer: Whether to compute CER.
report_wer: Whether to compute WER.
"""
def __init__(
self,
decoder: AbsDecoder,
joint_network: JointNetwork,
token_list: List[int],
sym_space: str,
sym_blank: str,
nstep: int = 2,
report_cer: bool = False,
report_wer: bool = False,
) -> None:
"""Construct an ErrorCalculatorTransducer object."""
super().__init__()
# (b-flo): Since the commit #8c9c851 we rely on the mAES algorithm for
# validation instead of the default algorithm.
#
# With the addition of k2 pruned transducer loss, the number of emitted symbols
# at each timestep can be restricted during training. Performing an unrestricted
# (/ unconstrained) decoding without regard to the training conditions can lead
# to huge performance degradation. It won't be an issue with mAES and the user
# can now control the number of emitted symbols during validation.
#
# Also, under certain conditions, using the default algorithm can lead to a long
# decoding procedure due to the loop break condition. Other algorithms,
# such as mAES, won't be impacted by that.
self.beam_search = BeamSearchTransducer(
decoder=decoder,
joint_network=joint_network,
beam_size=2,
search_type="maes",
nstep=nstep,
score_norm=False,
)
self.decoder = decoder
self.token_list = token_list
self.space = sym_space
self.blank = sym_blank
self.report_cer = report_cer
self.report_wer = report_wer
def __call__(
self,
encoder_out: torch.Tensor,
target: torch.Tensor,
encoder_out_lens: torch.Tensor,
) -> Tuple[Optional[float], Optional[float]]:
"""Calculate sentence-level WER or/and CER score for Transducer model.
Args:
encoder_out: Encoder output sequences. (B, T, D_enc)
target: Target label ID sequences. (B, L)
encoder_out_lens: Encoder output sequences length. (B,)
Returns:
: Sentence-level CER score.
: Sentence-level WER score.
"""
cer, wer = None, None
batchsize = int(encoder_out.size(0))
encoder_out = encoder_out.to(next(self.decoder.parameters()).device)
batch_nbest = [
self.beam_search(encoder_out[b][: encoder_out_lens[b]])
for b in range(batchsize)
]
pred = [nbest_hyp[0].yseq[1:] for nbest_hyp in batch_nbest]
char_pred, char_target = self.convert_to_char(pred, target)
if self.report_cer:
cer = self.calculate_cer(char_pred, char_target)
if self.report_wer:
wer = self.calculate_wer(char_pred, char_target)
return cer, wer
def convert_to_char(
self, pred: torch.Tensor, target: torch.Tensor
) -> Tuple[List, List]:
"""Convert label ID sequences to character sequences.
Args:
pred: Prediction label ID sequences. (B, U)
target: Target label ID sequences. (B, L)
Returns:
char_pred: Prediction character sequences. (B, ?)
char_target: Target character sequences. (B, ?)
"""
char_pred, char_target = [], []
for i, pred_i in enumerate(pred):
char_pred_i = [self.token_list[int(h)] for h in pred_i]
char_target_i = [self.token_list[int(r)] for r in target[i]]
char_pred_i = "".join(char_pred_i).replace(self.space, " ")
char_pred_i = char_pred_i.replace(self.blank, "")
char_target_i = "".join(char_target_i).replace(self.space, " ")
char_target_i = char_target_i.replace(self.blank, "")
char_pred.append(char_pred_i)
char_target.append(char_target_i)
return char_pred, char_target
def calculate_cer(
self, char_pred: torch.Tensor, char_target: torch.Tensor
) -> float:
"""Calculate sentence-level CER score.
Args:
char_pred: Prediction character sequences. (B, ?)
char_target: Target character sequences. (B, ?)
Returns:
: Average sentence-level CER score.
"""
import editdistance
distances, lens = [], []
for i, char_pred_i in enumerate(char_pred):
pred = char_pred_i.replace(" ", "")
target = char_target[i].replace(" ", "")
distances.append(editdistance.eval(pred, target))
lens.append(len(target))
return float(sum(distances)) / sum(lens)
def calculate_wer(
self, char_pred: torch.Tensor, char_target: torch.Tensor
) -> float:
"""Calculate sentence-level WER score.
Args:
char_pred: Prediction character sequences. (B, ?)
char_target: Target character sequences. (B, ?)
Returns:
: Average sentence-level WER score
"""
import editdistance
distances, lens = [], []
for i, char_pred_i in enumerate(char_pred):
pred = char_pred_i.replace("▁", " ").split()
target = char_target[i].replace("▁", " ").split()
distances.append(editdistance.eval(pred, target))
lens.append(len(target))
return float(sum(distances)) / sum(lens)
| 6,162 | 30.932642 | 88 | py |
espnet | espnet-master/espnet2/asr_transducer/normalization.py | """Normalization modules for Transducer."""
from typing import Dict, Optional, Tuple
import torch
def get_normalization(
normalization_type: str,
eps: Optional[float] = None,
partial: Optional[float] = None,
) -> Tuple[torch.nn.Module, Dict]:
"""Get normalization module and arguments given parameters.
Args:
normalization_type: Normalization module type.
eps: Value added to the denominator.
partial: Value defining the part of the input used for RMS stats (RMSNorm).
Return:
: Normalization module class
: Normalization module arguments
"""
norm = {
"basic_norm": (
BasicNorm,
{"eps": eps if eps is not None else 0.25},
),
"layer_norm": (torch.nn.LayerNorm, {"eps": eps if eps is not None else 1e-12}),
"rms_norm": (
RMSNorm,
{
"eps": eps if eps is not None else 1e-05,
"partial": partial if partial is not None else -1.0,
},
),
"scale_norm": (
ScaleNorm,
{"eps": eps if eps is not None else 1e-05},
),
}
return norm[normalization_type]
class BasicNorm(torch.nn.Module):
"""BasicNorm module definition.
Reference: https://github.com/k2-fsa/icefall/pull/288
Args:
normalized_shape: Expected size.
eps: Value added to the denominator for numerical stability.
"""
def __init__(
self,
normalized_shape: int,
eps: float = 0.25,
) -> None:
"""Construct a BasicNorm object."""
super().__init__()
self.eps = torch.nn.Parameter(torch.tensor(eps).log().detach())
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Compute basic normalization.
Args:
x: Input sequences. (B, T, D_hidden)
Returns:
: Output sequences. (B, T, D_hidden)
"""
scales = (torch.mean(x.pow(2), dim=-1, keepdim=True) + self.eps.exp()) ** -0.5
return x * scales
class RMSNorm(torch.nn.Module):
"""RMSNorm module definition.
Reference: https://arxiv.org/pdf/1910.07467.pdf
Args:
normalized_shape: Expected size.
eps: Value added to the denominator for numerical stability.
partial: Value defining the part of the input used for RMS stats.
"""
def __init__(
self,
normalized_shape: int,
eps: float = 1e-5,
partial: float = 0.0,
) -> None:
"""Construct a RMSNorm object."""
super().__init__()
self.normalized_shape = normalized_shape
self.partial = True if 0 < partial < 1 else False
self.p = partial
self.eps = eps
self.scale = torch.nn.Parameter(torch.ones(normalized_shape))
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Compute RMS normalization.
Args:
x: Input sequences. (B, T, D_hidden)
Returns:
x: Output sequences. (B, T, D_hidden)
"""
if self.partial:
partial_size = int(self.normalized_shape * self.p)
partial_x, _ = torch.split(
x, [partial_size, self.normalized_shape - partial_size], dim=-1
)
norm_x = partial_x.norm(2, dim=-1, keepdim=True)
d_x = partial_size
else:
norm_x = x.norm(2, dim=-1, keepdim=True)
d_x = self.normalized_shape
rms_x = norm_x * d_x ** (-1.0 / 2)
x = self.scale * (x / (rms_x + self.eps))
return x
class ScaleNorm(torch.nn.Module):
"""ScaleNorm module definition.
Reference: https://arxiv.org/pdf/1910.05895.pdf
Args:
normalized_shape: Expected size.
eps: Value added to the denominator for numerical stability.
"""
def __init__(self, normalized_shape: int, eps: float = 1e-5) -> None:
"""Construct a ScaleNorm object."""
super().__init__()
self.eps = eps
self.scale = torch.nn.Parameter(torch.tensor(normalized_shape**0.5))
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Compute scale normalization.
Args:
x: Input sequences. (B, T, D_hidden)
Returns:
: Output sequences. (B, T, D_hidden)
"""
norm = self.scale / torch.norm(x, dim=-1, keepdim=True).clamp(min=self.eps)
return x * norm
| 4,449 | 25.023392 | 87 | py |
espnet | espnet-master/espnet2/asr_transducer/espnet_transducer_model.py | """ESPnet2 ASR Transducer model."""
import logging
from contextlib import contextmanager
from typing import Dict, List, Optional, Tuple, Union
import torch
from packaging.version import parse as V
from typeguard import check_argument_types
from espnet2.asr.frontend.abs_frontend import AbsFrontend
from espnet2.asr.specaug.abs_specaug import AbsSpecAug
from espnet2.asr_transducer.decoder.abs_decoder import AbsDecoder
from espnet2.asr_transducer.encoder.encoder import Encoder
from espnet2.asr_transducer.joint_network import JointNetwork
from espnet2.asr_transducer.utils import get_transducer_task_io
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet2.train.abs_espnet_model import AbsESPnetModel
if V(torch.__version__) >= V("1.6.0"):
from torch.cuda.amp import autocast
else:
@contextmanager
def autocast(enabled=True):
yield
class ESPnetASRTransducerModel(AbsESPnetModel):
"""ESPnet2ASRTransducerModel module definition.
Args:
vocab_size: Size of complete vocabulary (w/ SOS/EOS and blank included).
token_list: List of tokens in vocabulary (minus reserved tokens).
frontend: Frontend module.
specaug: SpecAugment module.
normalize: Normalization module.
encoder: Encoder module.
decoder: Decoder module.
joint_network: Joint Network module.
transducer_weight: Weight of the Transducer loss.
use_k2_pruned_loss: Whether to use k2 pruned Transducer loss.
k2_pruned_loss_args: Arguments of the k2 loss pruned Transducer loss.
warmup_steps: Number of steps in warmup, used for pruned loss scaling.
validation_nstep: Maximum number of symbol expansions at each time step
when reporting CER or/and WER using mAES.
fastemit_lambda: FastEmit lambda value.
auxiliary_ctc_weight: Weight of auxiliary CTC loss.
auxiliary_ctc_dropout_rate: Dropout rate for auxiliary CTC loss inputs.
auxiliary_lm_loss_weight: Weight of auxiliary LM loss.
auxiliary_lm_loss_smoothing: Smoothing rate for LM loss' label smoothing.
ignore_id: Initial padding ID.
sym_space: Space symbol.
sym_blank: Blank Symbol.
report_cer: Whether to report Character Error Rate during validation.
report_wer: Whether to report Word Error Rate during validation.
extract_feats_in_collect_stats: Whether to use extract_feats stats collection.
"""
def __init__(
self,
vocab_size: int,
token_list: Union[Tuple[str, ...], List[str]],
frontend: Optional[AbsFrontend],
specaug: Optional[AbsSpecAug],
normalize: Optional[AbsNormalize],
encoder: Encoder,
decoder: AbsDecoder,
joint_network: JointNetwork,
transducer_weight: float = 1.0,
use_k2_pruned_loss: bool = False,
k2_pruned_loss_args: Dict = {},
warmup_steps: int = 25000,
validation_nstep: int = 2,
fastemit_lambda: float = 0.0,
auxiliary_ctc_weight: float = 0.0,
auxiliary_ctc_dropout_rate: float = 0.0,
auxiliary_lm_loss_weight: float = 0.0,
auxiliary_lm_loss_smoothing: float = 0.05,
ignore_id: int = -1,
sym_space: str = "<space>",
sym_blank: str = "<blank>",
report_cer: bool = False,
report_wer: bool = False,
extract_feats_in_collect_stats: bool = True,
) -> None:
"""Construct an ESPnetASRTransducerModel object."""
super().__init__()
assert check_argument_types()
# The following labels ID are reserved:
# - 0: Blank symbol.
# - 1: Unknown symbol.
# - vocab_size - 1: SOS/EOS symbol.
self.vocab_size = vocab_size
self.ignore_id = ignore_id
self.token_list = token_list.copy()
self.sym_space = sym_space
self.sym_blank = sym_blank
self.frontend = frontend
self.specaug = specaug
self.normalize = normalize
self.encoder = encoder
self.decoder = decoder
self.joint_network = joint_network
self.criterion_transducer = None
self.error_calculator = None
self.use_auxiliary_ctc = auxiliary_ctc_weight > 0
self.use_auxiliary_lm_loss = auxiliary_lm_loss_weight > 0
if use_k2_pruned_loss:
self.am_proj = torch.nn.Linear(
encoder.output_size,
vocab_size,
)
self.lm_proj = torch.nn.Linear(
decoder.output_size,
vocab_size,
)
self.warmup_steps = warmup_steps
self.steps_num = -1
self.k2_pruned_loss_args = k2_pruned_loss_args
self.k2_loss_type = k2_pruned_loss_args.get("loss_type", "regular")
self.use_k2_pruned_loss = use_k2_pruned_loss
if self.use_auxiliary_ctc:
self.ctc_lin = torch.nn.Linear(encoder.output_size, vocab_size)
self.ctc_dropout_rate = auxiliary_ctc_dropout_rate
if self.use_auxiliary_lm_loss:
self.lm_lin = torch.nn.Linear(decoder.output_size, vocab_size)
eps = auxiliary_lm_loss_smoothing / (vocab_size - 1)
self.lm_loss_smooth_neg = eps
self.lm_loss_smooth_pos = (1 - auxiliary_lm_loss_smoothing) + eps
self.transducer_weight = transducer_weight
self.fastemit_lambda = fastemit_lambda
self.auxiliary_ctc_weight = auxiliary_ctc_weight
self.auxiliary_lm_loss_weight = auxiliary_lm_loss_weight
self.report_cer = report_cer
self.report_wer = report_wer
self.validation_nstep = validation_nstep
self.extract_feats_in_collect_stats = extract_feats_in_collect_stats
def forward(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
text: torch.Tensor,
text_lengths: torch.Tensor,
**kwargs,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Forward architecture and compute loss(es).
Args:
speech: Speech sequences. (B, S)
speech_lengths: Speech sequences lengths. (B,)
text: Label ID sequences. (B, L)
text_lengths: Label ID sequences lengths. (B,)
kwargs: Contains "utts_id".
Return:
loss: Main loss value.
stats: Task statistics.
weight: Task weights.
"""
assert text_lengths.dim() == 1, text_lengths.shape
assert (
speech.shape[0]
== speech_lengths.shape[0]
== text.shape[0]
== text_lengths.shape[0]
), (speech.shape, speech_lengths.shape, text.shape, text_lengths.shape)
batch_size = speech.shape[0]
text = text[:, : text_lengths.max()]
# 1. Encoder
encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
# 2. Transducer-related I/O preparation
decoder_in, target, t_len, u_len = get_transducer_task_io(
text,
encoder_out_lens,
ignore_id=self.ignore_id,
)
# 3. Decoder
self.decoder.set_device(encoder_out.device)
decoder_out = self.decoder(decoder_in)
# 4. Joint Network and RNNT loss computation
if self.use_k2_pruned_loss:
loss_trans = self._calc_k2_transducer_pruned_loss(
encoder_out, decoder_out, text, t_len, u_len, **self.k2_pruned_loss_args
)
else:
joint_out = self.joint_network(
encoder_out.unsqueeze(2), decoder_out.unsqueeze(1)
)
loss_trans = self._calc_transducer_loss(
encoder_out,
joint_out,
target,
t_len,
u_len,
)
# 5. Auxiliary losses
loss_ctc, loss_lm = 0.0, 0.0
if self.use_auxiliary_ctc:
loss_ctc = self._calc_ctc_loss(
encoder_out,
target,
t_len,
u_len,
)
if self.use_auxiliary_lm_loss:
loss_lm = self._calc_lm_loss(decoder_out, target)
loss = (
self.transducer_weight * loss_trans
+ self.auxiliary_ctc_weight * loss_ctc
+ self.auxiliary_lm_loss_weight * loss_lm
)
# 6. CER/WER computation.
if not self.training and (self.report_cer or self.report_wer):
if self.error_calculator is None:
from espnet2.asr_transducer.error_calculator import ErrorCalculator
if self.use_k2_pruned_loss and self.k2_loss_type == "modified":
self.validation_nstep = 1
self.error_calculator = ErrorCalculator(
self.decoder,
self.joint_network,
self.token_list,
self.sym_space,
self.sym_blank,
nstep=self.validation_nstep,
report_cer=self.report_cer,
report_wer=self.report_wer,
)
cer_transducer, wer_transducer = self.error_calculator(
encoder_out, target, t_len
)
else:
cer_transducer, wer_transducer = None, None
stats = dict(
loss=loss.detach(),
loss_transducer=loss_trans.detach(),
loss_aux_ctc=loss_ctc.detach() if loss_ctc > 0.0 else None,
loss_aux_lm=loss_lm.detach() if loss_lm > 0.0 else None,
cer_transducer=cer_transducer,
wer_transducer=wer_transducer,
)
# force_gatherable: to-device and to-tensor if scalar for DataParallel
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
return loss, stats, weight
def collect_feats(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
text: torch.Tensor,
text_lengths: torch.Tensor,
**kwargs,
) -> Dict[str, torch.Tensor]:
"""Collect features sequences and features lengths sequences.
Args:
speech: Speech sequences. (B, S)
speech_lengths: Speech sequences lengths. (B,)
text: Label ID sequences. (B, L)
text_lengths: Label ID sequences lengths. (B,)
kwargs: Contains "utts_id".
Return:
{}: "feats": Features sequences. (B, T, D_feats),
"feats_lengths": Features sequences lengths. (B,)
"""
if self.extract_feats_in_collect_stats:
feats, feats_lengths = self._extract_feats(speech, speech_lengths)
else:
# Generate dummy stats if extract_feats_in_collect_stats is False
logging.warning(
"Generating dummy stats for feats and feats_lengths, "
"because encoder_conf.extract_feats_in_collect_stats is "
f"{self.extract_feats_in_collect_stats}"
)
feats, feats_lengths = speech, speech_lengths
return {"feats": feats, "feats_lengths": feats_lengths}
def encode(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Encoder speech sequences.
Args:
speech: Speech sequences. (B, S)
speech_lengths: Speech sequences lengths. (B,)
Return:
encoder_out: Encoder outputs. (B, T, D_enc)
encoder_out_lens: Encoder outputs lengths. (B,)
"""
with autocast(False):
# 1. Extract feats
feats, feats_lengths = self._extract_feats(speech, speech_lengths)
# 2. Data augmentation
if self.specaug is not None and self.training:
feats, feats_lengths = self.specaug(feats, feats_lengths)
# 3. Normalization for feature: e.g. Global-CMVN, Utterance-CMVN
if self.normalize is not None:
feats, feats_lengths = self.normalize(feats, feats_lengths)
# 4. Forward encoder
encoder_out, encoder_out_lens = self.encoder(feats, feats_lengths)
assert encoder_out.size(0) == speech.size(0), (
encoder_out.size(),
speech.size(0),
)
assert encoder_out.size(1) <= encoder_out_lens.max(), (
encoder_out.size(),
encoder_out_lens.max(),
)
return encoder_out, encoder_out_lens
def _extract_feats(
self, speech: torch.Tensor, speech_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Extract features sequences and features sequences lengths.
Args:
speech: Speech sequences. (B, S)
speech_lengths: Speech sequences lengths. (B,)
Return:
feats: Features sequences. (B, T, D_feats)
feats_lengths: Features sequences lengths. (B,)
"""
assert speech_lengths.dim() == 1, speech_lengths.shape
# for data-parallel
speech = speech[:, : speech_lengths.max()]
if self.frontend is not None:
feats, feats_lengths = self.frontend(speech, speech_lengths)
else:
feats, feats_lengths = speech, speech_lengths
return feats, feats_lengths
def _calc_transducer_loss(
self,
encoder_out: torch.Tensor,
joint_out: torch.Tensor,
target: torch.Tensor,
t_len: torch.Tensor,
u_len: torch.Tensor,
) -> torch.Tensor:
"""Compute Transducer loss.
Args:
encoder_out: Encoder output sequences. (B, T, D_enc)
joint_out: Joint Network output sequences (B, T, U, D_joint)
target: Target label ID sequences. (B, L)
t_len: Encoder output sequences lengths. (B,)
u_len: Target label ID sequences lengths. (B,)
Return:
loss_transducer: Transducer loss value.
"""
if self.criterion_transducer is None:
try:
from warprnnt_pytorch import RNNTLoss
self.criterion_transducer = RNNTLoss(
reduction="mean",
fastemit_lambda=self.fastemit_lambda,
)
except ImportError:
logging.error(
"warp-transducer was not installed. "
"Please consult the installation documentation."
)
exit(1)
with autocast(False):
loss_transducer = self.criterion_transducer(
joint_out.float(),
target,
t_len,
u_len,
)
return loss_transducer
def _calc_k2_transducer_pruned_loss(
self,
encoder_out: torch.Tensor,
decoder_out: torch.Tensor,
labels: torch.Tensor,
encoder_out_len: torch.Tensor,
decoder_out_len: torch.Tensor,
prune_range: int = 5,
simple_loss_scaling: float = 0.5,
lm_scale: float = 0.0,
am_scale: float = 0.0,
loss_type: str = "regular",
reduction: str = "mean",
padding_idx: int = 0,
) -> torch.Tensor:
"""Compute k2 pruned Transducer loss.
Args:
encoder_out: Encoder output sequences. (B, T, D_enc)
decoder_out: Decoder output sequences. (B, T, D_dec)
labels: Label ID sequences. (B, L)
encoder_out_len: Encoder output sequences lengths. (B,)
decoder_out_len: Target label ID sequences lengths. (B,)
prune_range: How many tokens by frame are used compute the pruned loss.
simple_loss_scaling: The weight to scale the simple loss after warm-up.
lm_scale: The scale factor to smooth the LM part.
am_scale: The scale factor to smooth the AM part.
loss_type: Define the type of path to take for loss computation.
(Either 'regular', 'smoothed' or 'constrained')
padding_idx: SOS/EOS + Padding index.
Return:
loss_transducer: Transducer loss value.
"""
try:
import k2
if self.fastemit_lambda > 0.0:
logging.info(
"Disabling FastEmit, it is not available with k2 Transducer loss. "
"Please see delay_penalty option instead."
)
except ImportError:
logging.error(
"k2 was not installed. Please consult the installation documentation."
)
exit(1)
# Note (b-flo): We use a dummy scaling scheme until the training parts are
# revised (in a short future).
self.steps_num += 1
if self.steps_num < self.warmup_steps:
pruned_loss_scaling = 0.1 + 0.9 * (self.steps_num / self.warmup_steps)
simple_loss_scaling = 1.0 - (
(self.steps_num / self.warmup_steps) * (1.0 - simple_loss_scaling)
)
else:
pruned_loss_scaling = 1.0
labels_unpad = [y[y != self.ignore_id].tolist() for y in labels]
target = k2.RaggedTensor(labels_unpad).to(decoder_out.device)
target_padded = target.pad(mode="constant", padding_value=padding_idx)
target_padded = target_padded.to(torch.int64)
boundary = torch.zeros(
(encoder_out.size(0), 4),
dtype=torch.int64,
device=encoder_out.device,
)
boundary[:, 2] = decoder_out_len
boundary[:, 3] = encoder_out_len
lm = self.lm_proj(decoder_out)
am = self.am_proj(encoder_out)
with autocast(False):
simple_loss, (px_grad, py_grad) = k2.rnnt_loss_smoothed(
lm.float(),
am.float(),
target_padded,
padding_idx,
lm_only_scale=lm_scale,
am_only_scale=am_scale,
boundary=boundary,
rnnt_type=loss_type,
reduction=reduction,
return_grad=True,
)
ranges = k2.get_rnnt_prune_ranges(
px_grad,
py_grad,
boundary,
prune_range,
)
am_pruned, lm_pruned = k2.do_rnnt_pruning(
self.joint_network.lin_enc(encoder_out),
self.joint_network.lin_dec(decoder_out),
ranges,
)
joint_out = self.joint_network(am_pruned, lm_pruned, no_projection=True)
with autocast(False):
pruned_loss = k2.rnnt_loss_pruned(
joint_out.float(),
target_padded,
ranges,
padding_idx,
boundary,
rnnt_type=loss_type,
reduction=reduction,
)
loss_transducer = (
simple_loss_scaling * simple_loss + pruned_loss_scaling * pruned_loss
)
return loss_transducer
def _calc_ctc_loss(
self,
encoder_out: torch.Tensor,
target: torch.Tensor,
t_len: torch.Tensor,
u_len: torch.Tensor,
) -> torch.Tensor:
"""Compute CTC loss.
Args:
encoder_out: Encoder output sequences. (B, T, D_enc)
target: Target label ID sequences. (B, L)
t_len: Encoder output sequences lengths. (B,)
u_len: Target label ID sequences lengths. (B,)
Return:
loss_ctc: CTC loss value.
"""
ctc_in = self.ctc_lin(
torch.nn.functional.dropout(encoder_out, p=self.ctc_dropout_rate)
)
ctc_in = torch.log_softmax(ctc_in.transpose(0, 1), dim=-1)
target_mask = target != 0
ctc_target = target[target_mask].cpu()
with torch.backends.cudnn.flags(deterministic=True):
loss_ctc = torch.nn.functional.ctc_loss(
ctc_in,
ctc_target,
t_len,
u_len,
zero_infinity=True,
reduction="sum",
)
loss_ctc /= target.size(0)
return loss_ctc
def _calc_lm_loss(
self,
decoder_out: torch.Tensor,
target: torch.Tensor,
) -> torch.Tensor:
"""Compute LM loss (i.e.: Cross-entropy with smoothing).
Args:
decoder_out: Decoder output sequences. (B, U, D_dec)
target: Target label ID sequences. (B, L)
Return:
loss_lm: LM loss value.
"""
batch_size = decoder_out.size(0)
logp = torch.log_softmax(
self.lm_lin(decoder_out[:, :-1, :]).view(-1, self.vocab_size),
dim=1,
)
target = target.view(-1).type(torch.int64)
ignore = (target == 0).unsqueeze(1)
with torch.no_grad():
true_dist = logp.clone().fill_(self.lm_loss_smooth_neg)
true_dist.scatter_(1, target.unsqueeze(1), self.lm_loss_smooth_pos)
loss_lm = torch.nn.functional.kl_div(logp, true_dist, reduction="none")
loss_lm = loss_lm.masked_fill(ignore, 0).sum() / batch_size
return loss_lm
| 21,402 | 32.758675 | 88 | py |
espnet | espnet-master/espnet2/asr_transducer/frontend/online_audio_processor.py | """Online processor for Transducer models chunk-by-chunk streaming decoding."""
from typing import Dict, Tuple
import torch
class OnlineAudioProcessor:
"""OnlineProcessor module definition.
Args:
feature_extractor: Feature extractor module.
normalization_module: Normalization module.
decoding_window: Size of the decoding window (in ms).
encoder_sub_factor: Encoder subsampling factor.
frontend_conf: Frontend configuration.
device: Device to pin module tensors on.
audio_sampling_rate: Input sampling rate.
"""
def __init__(
self,
feature_extractor: torch.nn.Module,
normalization_module: torch.nn.Module,
decoding_window: int,
encoder_sub_factor: int,
frontend_conf: Dict,
device: torch.device,
audio_sampling_rate: int = 16000,
) -> None:
"""Construct an OnlineAudioProcessor."""
self.n_fft = frontend_conf.get("n_fft", 512)
self.hop_sz = frontend_conf.get("hop_length", 128)
self.win_sz = frontend_conf.get("win_sz", self.n_fft)
self.win_hop_sz = self.win_sz - self.hop_sz
self.trim_val = (self.win_sz // -self.hop_sz) // -2
self.decoding_samples = round(decoding_window * audio_sampling_rate / 1000)
self.offset_frames = 2 * encoder_sub_factor + 3
self.feature_extractor = feature_extractor
self.normalization_module = normalization_module
self.device = device
self.reset_cache()
def reset_cache(self) -> None:
"""Reset cache parameters.
Args:
None
Returns:
None
"""
self.samples = None
self.samples_length = torch.zeros([1], dtype=torch.long, device=self.device)
self.feats = None
def get_current_samples(
self, samples: torch.Tensor, is_final: bool
) -> torch.Tensor:
"""Get samples for feature computation.
Args:
samples: Speech data. (S)
is_final: Whether speech corresponds to the final chunk of data.
Returns:
samples: New speech data. (1, decoding_samples)
"""
if self.samples is not None:
samples = torch.cat([self.samples, samples], dim=0)
samples_sz = samples.size(0)
if is_final:
waveform_buffer = None
if samples_sz < self.decoding_samples:
samples = torch.nn.functional.pad(
samples,
(0, self.decoding_samples - samples_sz),
mode="constant",
value=0.0,
)
else:
n_frames = (samples_sz - self.win_hop_sz) // self.hop_sz
n_residual = (samples_sz - self.win_hop_sz) % self.hop_sz
waveform_buffer = samples.narrow(
0,
samples_sz - self.win_hop_sz - n_residual,
self.win_hop_sz + n_residual,
)
samples = samples.narrow(0, 0, self.win_hop_sz + n_frames * self.hop_sz)
self.samples = waveform_buffer
samples = samples.unsqueeze(0).to(device=self.device)
self.samples_length.fill_(samples.size(1))
return samples
def get_current_feats(
self, feats: torch.Tensor, feats_length: torch.Tensor, is_final: bool
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Get features for current decoding window.
Args:
feats: Computed features sequence. (1, F, D_feats)
feats_length: Computed features sequence length. (1,)
is_final: Whether feats corresponds to the final chunk of data.
Returns:
feats: Decoding window features sequence. (1, chunk_sz_bs, D_feats)
feats_length: Decoding window features length sequence. (1,)
"""
if self.feats is not None:
if is_final:
feats = feats.narrow(1, self.trim_val, feats.size(1) - self.trim_val)
else:
feats = feats.narrow(
1, self.trim_val, feats.size(1) - 2 * self.trim_val
)
feats = torch.cat((self.feats, feats), dim=1)
else:
feats = feats.narrow(1, 0, feats.size(1) - self.trim_val)
self.feats = feats[:, -self.offset_frames :, :]
feats_length.fill_(feats.size(1))
return feats, feats_length
def compute_features(self, samples: torch.Tensor, is_final: bool) -> None:
"""Compute features from input samples.
Args:
samples: Speech data. (S)
is_final: Whether speech corresponds to the final chunk of data.
Returns:
feats: Features sequence. (1, chunk_sz_bs, D_feats)
feats_length: Features length sequence. (1,)
"""
samples = self.get_current_samples(samples, is_final)
feats, feats_length = self.feature_extractor(samples, self.samples_length)
if self.normalization_module is not None:
feats, feats_length = self.normalization_module(feats, feats_length)
feats, feats_length = self.get_current_feats(feats, feats_length, is_final)
return feats, feats_length
| 5,265 | 30.159763 | 85 | py |
espnet | espnet-master/espnet2/asr_transducer/encoder/building.py | """Set of methods to build Transducer encoder architecture."""
from typing import Any, Dict, List, Optional, Union
from espnet2.asr_transducer.activation import get_activation
from espnet2.asr_transducer.encoder.blocks.branchformer import Branchformer
from espnet2.asr_transducer.encoder.blocks.conformer import Conformer
from espnet2.asr_transducer.encoder.blocks.conv1d import Conv1d
from espnet2.asr_transducer.encoder.blocks.conv_input import ConvInput
from espnet2.asr_transducer.encoder.blocks.ebranchformer import EBranchformer
from espnet2.asr_transducer.encoder.modules.attention import ( # noqa: H301
RelPositionMultiHeadedAttention,
)
from espnet2.asr_transducer.encoder.modules.convolution import ( # noqa: H301
ConformerConvolution,
ConvolutionalSpatialGatingUnit,
DepthwiseConvolution,
)
from espnet2.asr_transducer.encoder.modules.multi_blocks import MultiBlocks
from espnet2.asr_transducer.encoder.modules.positional_encoding import ( # noqa: H301
RelPositionalEncoding,
)
from espnet2.asr_transducer.normalization import get_normalization
from espnet.nets.pytorch_backend.transformer.positionwise_feed_forward import (
PositionwiseFeedForward,
)
def build_main_parameters(
pos_wise_act_type: str = "swish",
conv_mod_act_type: str = "swish",
pos_enc_dropout_rate: float = 0.0,
pos_enc_max_len: int = 5000,
simplified_att_score: bool = False,
norm_type: str = "layer_norm",
conv_mod_norm_type: str = "layer_norm",
after_norm_eps: Optional[float] = None,
after_norm_partial: Optional[float] = None,
blockdrop_rate: float = 0.0,
dynamic_chunk_training: bool = False,
short_chunk_threshold: float = 0.75,
short_chunk_size: int = 25,
num_left_chunks: int = 0,
**activation_parameters,
) -> Dict[str, Any]:
"""Build encoder main parameters.
Args:
pos_wise_act_type: X-former position-wise feed-forward activation type.
conv_mod_act_type: X-former convolution module activation type.
pos_enc_dropout_rate: Positional encoding dropout rate.
pos_enc_max_len: Positional encoding maximum length.
simplified_att_score: Whether to use simplified attention score computation.
norm_type: X-former normalization module type.
conv_mod_norm_type: Conformer convolution module normalization type.
after_norm_eps: Epsilon value for the final normalization.
after_norm_partial: Value for the final normalization with RMSNorm.
blockdrop_rate: Probability threshold of dropping out each encoder block.
dynamic_chunk_training: Whether to use dynamic chunk training.
short_chunk_threshold: Threshold for dynamic chunk selection.
short_chunk_size: Minimum number of frames during dynamic chunk training.
num_left_chunks: Number of left chunks the attention module can see.
(null or negative value means full context)
**activations_parameters: Parameters of the activation functions.
(See espnet2/asr_transducer/activation.py)
Returns:
: Main encoder parameters
"""
main_params = {}
main_params["pos_wise_act"] = get_activation(
pos_wise_act_type, **activation_parameters
)
main_params["conv_mod_act"] = get_activation(
conv_mod_act_type, **activation_parameters
)
main_params["pos_enc_dropout_rate"] = pos_enc_dropout_rate
main_params["pos_enc_max_len"] = pos_enc_max_len
main_params["simplified_att_score"] = simplified_att_score
main_params["norm_type"] = norm_type
main_params["conv_mod_norm_type"] = conv_mod_norm_type
(
main_params["after_norm_class"],
main_params["after_norm_args"],
) = get_normalization(norm_type, eps=after_norm_eps, partial=after_norm_partial)
main_params["blockdrop_rate"] = blockdrop_rate
main_params["dynamic_chunk_training"] = dynamic_chunk_training
main_params["short_chunk_threshold"] = max(0, short_chunk_threshold)
main_params["short_chunk_size"] = max(0, short_chunk_size)
main_params["num_left_chunks"] = max(0, num_left_chunks)
return main_params
def build_positional_encoding(
block_size: int, configuration: Dict[str, Any]
) -> RelPositionalEncoding:
"""Build positional encoding block.
Args:
block_size: Input/output size.
configuration: Positional encoding configuration.
Returns:
: Positional encoding module.
"""
return RelPositionalEncoding(
block_size,
configuration.get("pos_enc_dropout_rate", 0.0),
max_len=configuration.get("pos_enc_max_len", 5000),
)
def build_input_block(
input_size: int,
configuration: Dict[str, Union[str, int]],
) -> ConvInput:
"""Build encoder input block.
Args:
input_size: Input size.
configuration: Input block configuration.
Returns:
: ConvInput block function.
"""
return ConvInput(
input_size,
configuration["conv_size"],
configuration["subsampling_factor"],
vgg_like=configuration["vgg_like"],
output_size=configuration["output_size"],
)
def build_branchformer_block(
configuration: List[Dict[str, Any]],
main_params: Dict[str, Any],
) -> Branchformer:
"""Build Branchformer block.
Args:
configuration: Branchformer block configuration.
main_params: Encoder main parameters.
Returns:
: Branchformer block function.
"""
hidden_size = configuration["hidden_size"]
linear_size = configuration["linear_size"]
dropout_rate = configuration.get("dropout_rate", 0.0)
conv_mod_norm_class, conv_mod_norm_args = get_normalization(
main_params["conv_mod_norm_type"],
eps=configuration.get("conv_mod_norm_eps"),
partial=configuration.get("conv_mod_norm_partial"),
)
conv_mod_args = (
linear_size,
configuration["conv_mod_kernel_size"],
conv_mod_norm_class,
conv_mod_norm_args,
dropout_rate,
main_params["dynamic_chunk_training"],
)
mult_att_args = (
configuration.get("heads", 4),
hidden_size,
configuration.get("att_dropout_rate", 0.0),
main_params["simplified_att_score"],
)
norm_class, norm_args = get_normalization(
main_params["norm_type"],
eps=configuration.get("norm_eps"),
partial=configuration.get("norm_partial"),
)
return lambda: Branchformer(
hidden_size,
linear_size,
RelPositionMultiHeadedAttention(*mult_att_args),
ConvolutionalSpatialGatingUnit(*conv_mod_args),
norm_class=norm_class,
norm_args=norm_args,
dropout_rate=dropout_rate,
)
def build_conformer_block(
configuration: List[Dict[str, Any]],
main_params: Dict[str, Any],
) -> Conformer:
"""Build Conformer block.
Args:
configuration: Conformer block configuration.
main_params: Encoder main parameters.
Returns:
: Conformer block function.
"""
hidden_size = configuration["hidden_size"]
linear_size = configuration["linear_size"]
pos_wise_args = (
hidden_size,
linear_size,
configuration.get("pos_wise_dropout_rate", 0.0),
main_params["pos_wise_act"],
)
conv_mod_norm_args = {
"eps": configuration.get("conv_mod_norm_eps", 1e-05),
"momentum": configuration.get("conv_mod_norm_momentum", 0.1),
}
conv_mod_args = (
hidden_size,
configuration["conv_mod_kernel_size"],
main_params["conv_mod_act"],
conv_mod_norm_args,
main_params["dynamic_chunk_training"],
)
mult_att_args = (
configuration.get("heads", 4),
hidden_size,
configuration.get("att_dropout_rate", 0.0),
main_params["simplified_att_score"],
)
norm_class, norm_args = get_normalization(
main_params["norm_type"],
eps=configuration.get("norm_eps"),
partial=configuration.get("norm_partial"),
)
return lambda: Conformer(
hidden_size,
RelPositionMultiHeadedAttention(*mult_att_args),
PositionwiseFeedForward(*pos_wise_args),
PositionwiseFeedForward(*pos_wise_args),
ConformerConvolution(*conv_mod_args),
norm_class=norm_class,
norm_args=norm_args,
dropout_rate=configuration.get("dropout_rate", 0.0),
)
def build_conv1d_block(
configuration: List[Dict[str, Any]],
causal: bool,
) -> Conv1d:
"""Build Conv1d block.
Args:
configuration: Conv1d block configuration.
Returns:
: Conv1d block function.
"""
return lambda: Conv1d(
configuration["input_size"],
configuration["output_size"],
configuration["kernel_size"],
stride=configuration.get("stride", 1),
dilation=configuration.get("dilation", 1),
groups=configuration.get("groups", 1),
bias=configuration.get("bias", True),
relu=configuration.get("relu", True),
batch_norm=configuration.get("batch_norm", False),
causal=causal,
dropout_rate=configuration.get("dropout_rate", 0.0),
)
def build_ebranchformer_block(
configuration: List[Dict[str, Any]],
main_params: Dict[str, Any],
) -> EBranchformer:
"""Build E-Branchformer block.
Args:
configuration: E-Branchformer block configuration.
main_params: Encoder main parameters.
Returns:
: E-Branchformer block function.
"""
hidden_size = configuration["hidden_size"]
linear_size = configuration["linear_size"]
dropout_rate = configuration.get("dropout_rate", 0.0)
pos_wise_args = (
hidden_size,
linear_size,
configuration.get("pos_wise_dropout_rate", 0.0),
main_params["pos_wise_act"],
)
conv_mod_norm_class, conv_mod_norm_args = get_normalization(
main_params["conv_mod_norm_type"],
eps=configuration.get("conv_mod_norm_eps"),
partial=configuration.get("conv_mod_norm_partial"),
)
conv_mod_args = (
linear_size,
configuration["conv_mod_kernel_size"],
conv_mod_norm_class,
conv_mod_norm_args,
dropout_rate,
main_params["dynamic_chunk_training"],
)
mult_att_args = (
configuration.get("heads", 4),
hidden_size,
configuration.get("att_dropout_rate", 0.0),
main_params["simplified_att_score"],
)
depthwise_conv_args = (
hidden_size,
configuration.get(
"depth_conv_kernel_size", configuration["conv_mod_kernel_size"]
),
main_params["dynamic_chunk_training"],
)
norm_class, norm_args = get_normalization(
main_params["norm_type"],
eps=configuration.get("norm_eps"),
partial=configuration.get("norm_partial"),
)
return lambda: EBranchformer(
hidden_size,
linear_size,
RelPositionMultiHeadedAttention(*mult_att_args),
PositionwiseFeedForward(*pos_wise_args),
PositionwiseFeedForward(*pos_wise_args),
ConvolutionalSpatialGatingUnit(*conv_mod_args),
DepthwiseConvolution(*depthwise_conv_args),
norm_class=norm_class,
norm_args=norm_args,
dropout_rate=dropout_rate,
)
def build_body_blocks(
configuration: List[Dict[str, Any]],
main_params: Dict[str, Any],
output_size: int,
) -> MultiBlocks:
"""Build encoder body blocks.
Args:
configuration: Body blocks configuration.
main_params: Encoder main parameters.
output_size: Architecture output size.
Returns:
MultiBlocks function encapsulation all encoder blocks.
"""
fn_modules = []
extended_conf = []
for c in configuration:
if c.get("num_blocks") is not None:
extended_conf += c["num_blocks"] * [
{c_i: c[c_i] for c_i in c if c_i != "num_blocks"}
]
else:
extended_conf += [c]
for i, c in enumerate(extended_conf):
block_type = c["block_type"]
if block_type == "branchformer":
module = build_branchformer_block(c, main_params)
elif block_type == "conformer":
module = build_conformer_block(c, main_params)
elif block_type == "conv1d":
module = build_conv1d_block(c, main_params["dynamic_chunk_training"])
elif block_type == "ebranchformer":
module = build_ebranchformer_block(c, main_params)
else:
raise NotImplementedError
fn_modules.append(module)
return MultiBlocks(
[fn() for fn in fn_modules],
output_size,
norm_class=main_params["after_norm_class"],
norm_args=main_params["after_norm_args"],
blockdrop_rate=main_params["blockdrop_rate"],
)
| 12,952 | 29.767221 | 86 | py |
espnet | espnet-master/espnet2/asr_transducer/encoder/encoder.py | """Encoder for Transducer model."""
from typing import Any, Dict, List, Tuple
import torch
from typeguard import check_argument_types
from espnet2.asr_transducer.encoder.building import (
build_body_blocks,
build_input_block,
build_main_parameters,
build_positional_encoding,
)
from espnet2.asr_transducer.encoder.validation import validate_architecture
from espnet2.asr_transducer.utils import (
TooShortUttError,
check_short_utt,
make_chunk_mask,
make_source_mask,
)
class Encoder(torch.nn.Module):
"""Encoder module definition.
Args:
input_size: Input size.
body_conf: Encoder body configuration.
input_conf: Encoder input configuration.
main_conf: Encoder main configuration.
"""
def __init__(
self,
input_size: int,
body_conf: List[Dict[str, Any]],
input_conf: Dict[str, Any] = {},
main_conf: Dict[str, Any] = {},
) -> None:
"""Construct an Encoder object."""
super().__init__()
assert check_argument_types()
embed_size, output_size = validate_architecture(
input_conf, body_conf, input_size
)
main_params = build_main_parameters(**main_conf)
self.embed = build_input_block(input_size, input_conf)
self.pos_enc = build_positional_encoding(embed_size, main_params)
self.encoders = build_body_blocks(body_conf, main_params, output_size)
self.output_size = output_size
self.dynamic_chunk_training = main_params["dynamic_chunk_training"]
self.short_chunk_threshold = main_params["short_chunk_threshold"]
self.short_chunk_size = main_params["short_chunk_size"]
self.num_left_chunks = main_params["num_left_chunks"]
def reset_cache(self, left_context: int, device: torch.device) -> None:
"""Initialize/Reset encoder cache for streaming.
Args:
left_context: Number of previous frames (AFTER subsampling) the attention
module can see in current chunk.
device: Device ID.
"""
return self.encoders.reset_streaming_cache(left_context, device)
def forward(
self,
x: torch.Tensor,
x_len: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Encode input sequences.
Args:
x: Encoder input features. (B, T_in, F)
x_len: Encoder input features lengths. (B,)
Returns:
x: Encoder outputs. (B, T_out, D_enc)
x_len: Encoder outputs lenghts. (B,)
"""
short_status, limit_size = check_short_utt(
self.embed.subsampling_factor, x.size(1)
)
if short_status:
raise TooShortUttError(
f"has {x.size(1)} frames and is too short for subsampling "
+ f"(it needs more than {limit_size} frames), return empty results",
x.size(1),
limit_size,
)
mask = make_source_mask(x_len)
x, mask = self.embed(x, mask)
pos_enc = self.pos_enc(x)
if self.dynamic_chunk_training:
max_len = x.size(1)
chunk_size = torch.randint(1, max_len, (1,)).item()
if chunk_size > (max_len * self.short_chunk_threshold):
chunk_size = max_len
else:
chunk_size = (chunk_size % self.short_chunk_size) + 1
chunk_mask = make_chunk_mask(
x.size(1),
chunk_size,
num_left_chunks=self.num_left_chunks,
device=x.device,
)
else:
chunk_mask = None
x = self.encoders(
x,
pos_enc,
mask,
chunk_mask=chunk_mask,
)
return x, mask.eq(0).sum(1)
def chunk_forward(
self,
x: torch.Tensor,
x_len: torch.Tensor,
processed_frames: torch.tensor,
left_context: int = 32,
) -> torch.Tensor:
"""Encode input sequences as chunks.
Args:
x: Encoder input features. (1, T_in, F)
x_len: Encoder input features lengths. (1,)
processed_frames: Number of frames already seen.
left_context: Number of previous frames (AFTER subsampling) the attention
module can see in current chunk.
Returns:
x: Encoder outputs. (B, T_out, D_enc)
"""
mask = make_source_mask(x_len)
x, mask = self.embed(x, mask)
x = x[:, 1:-1, :]
mask = mask[:, 1:-1]
pos_enc = self.pos_enc(x, left_context=left_context)
processed_mask = (
torch.arange(left_context, device=x.device).view(1, left_context).flip(1)
)
processed_mask = processed_mask >= processed_frames
mask = torch.cat([processed_mask, mask], dim=1)
x = self.encoders.chunk_forward(
x,
pos_enc,
mask,
left_context=left_context,
)
return x
| 5,102 | 27.830508 | 85 | py |
espnet | espnet-master/espnet2/asr_transducer/encoder/modules/convolution.py | """Convolution modules for X-former blocks."""
from typing import Dict, Optional, Tuple
import torch
class ConformerConvolution(torch.nn.Module):
"""ConformerConvolution module definition.
Args:
channels: The number of channels.
kernel_size: Size of the convolving kernel.
activation: Activation function.
norm_args: Normalization module arguments.
causal: Whether to use causal convolution (set to True if streaming).
"""
def __init__(
self,
channels: int,
kernel_size: int,
activation: torch.nn.Module = torch.nn.ReLU(),
norm_args: Dict = {},
causal: bool = False,
) -> None:
"""Construct an ConformerConvolution object."""
super().__init__()
assert (kernel_size - 1) % 2 == 0
self.kernel_size = kernel_size
self.pointwise_conv1 = torch.nn.Conv1d(
channels,
2 * channels,
kernel_size=1,
stride=1,
padding=0,
)
if causal:
self.lorder = kernel_size - 1
padding = 0
else:
self.lorder = 0
padding = (kernel_size - 1) // 2
self.depthwise_conv = torch.nn.Conv1d(
channels,
channels,
kernel_size,
stride=1,
padding=padding,
groups=channels,
)
self.norm = torch.nn.BatchNorm1d(channels, **norm_args)
self.pointwise_conv2 = torch.nn.Conv1d(
channels,
channels,
kernel_size=1,
stride=1,
padding=0,
)
self.activation = activation
def forward(
self,
x: torch.Tensor,
mask: Optional[torch.Tensor] = None,
cache: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Compute convolution module.
Args:
x: ConformerConvolution input sequences. (B, T, D_hidden)
mask: Source mask. (B, T_2)
cache: ConformerConvolution input cache. (1, D_hidden, conv_kernel)
Returns:
x: ConformerConvolution output sequences. (B, ?, D_hidden)
cache: ConformerConvolution output cache. (1, D_hidden, conv_kernel)
"""
x = self.pointwise_conv1(x.transpose(1, 2))
x = torch.nn.functional.glu(x, dim=1)
if mask is not None:
x.masked_fill_(mask.unsqueeze(1).expand_as(x), 0.0)
if self.lorder > 0:
if cache is None:
x = torch.nn.functional.pad(x, (self.lorder, 0), "constant", 0.0)
else:
x = torch.cat([cache, x], dim=2)
cache = x[..., -self.lorder :]
x = self.depthwise_conv(x)
x = self.activation(self.norm(x))
x = self.pointwise_conv2(x).transpose(1, 2)
return x, cache
class ConvolutionalSpatialGatingUnit(torch.nn.Module):
"""Convolutional Spatial Gating Unit module definition.
Args:
size: Initial size to determine the number of channels.
kernel_size: Size of the convolving kernel.
norm_class: Normalization module class.
norm_args: Normalization module arguments.
dropout_rate: Dropout rate.
causal: Whether to use causal convolution (set to True if streaming).
"""
def __init__(
self,
size: int,
kernel_size: int,
norm_class: torch.nn.Module = torch.nn.LayerNorm,
norm_args: Dict = {},
dropout_rate: float = 0.0,
causal: bool = False,
) -> None:
"""Construct a ConvolutionalSpatialGatingUnit object."""
super().__init__()
channels = size // 2
self.kernel_size = kernel_size
if causal:
self.lorder = kernel_size - 1
padding = 0
else:
self.lorder = 0
padding = (kernel_size - 1) // 2
self.conv = torch.nn.Conv1d(
channels,
channels,
kernel_size,
stride=1,
padding=padding,
groups=channels,
)
self.norm = norm_class(channels, **norm_args)
self.activation = torch.nn.Identity()
self.dropout = torch.nn.Dropout(dropout_rate)
def forward(
self,
x: torch.Tensor,
mask: Optional[torch.Tensor] = None,
cache: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Compute convolution module.
Args:
x: ConvolutionalSpatialGatingUnit input sequences. (B, T, D_hidden)
mask: Source mask. (B, T_2)
cache: ConvolutionalSpationGatingUnit input cache.
(1, D_hidden, conv_kernel)
Returns:
x: ConvolutionalSpatialGatingUnit output sequences. (B, ?, D_hidden)
"""
x_r, x_g = x.chunk(2, dim=-1)
x_g = self.norm(x_g).transpose(1, 2)
if mask is not None:
x_g.masked_fill_(mask.unsqueeze(1).expand_as(x_g), 0.0)
if self.lorder > 0:
if cache is None:
x_g = torch.nn.functional.pad(x_g, (self.lorder, 0), "constant", 0.0)
else:
x_g = torch.cat([cache, x_g], dim=2)
cache = x_g[..., -self.lorder :]
x_g = self.conv(x_g).transpose(1, 2)
x = self.dropout(x_r * self.activation(x_g))
return x, cache
class DepthwiseConvolution(torch.nn.Module):
"""Depth-wise Convolution module definition.
Args:
size: Initial size to determine the number of channels.
kernel_size: Size of the convolving kernel.
causal: Whether to use causal convolution (set to True if streaming).
"""
def __init__(
self,
size: int,
kernel_size: int,
causal: bool = False,
) -> None:
"""Construct a DepthwiseConvolution object."""
super().__init__()
channels = size + size
self.kernel_size = kernel_size
if causal:
self.lorder = kernel_size - 1
padding = 0
else:
self.lorder = 0
padding = (kernel_size - 1) // 2
self.conv = torch.nn.Conv1d(
channels,
channels,
kernel_size,
stride=1,
padding=padding,
groups=channels,
)
def forward(
self,
x: torch.Tensor,
mask: Optional[torch.Tensor] = None,
cache: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Compute convolution module.
Args:
x: DepthwiseConvolution input sequences. (B, T, D_hidden)
mask: Source mask. (B, T_2)
cache: DepthwiseConvolution input cache. (1, conv_kernel, D_hidden)
Returns:
x: DepthwiseConvolution output sequences. (B, ?, D_hidden)
"""
x = x.transpose(1, 2)
if mask is not None:
x.masked_fill_(mask.unsqueeze(1).expand_as(x), 0.0)
if self.lorder > 0:
if cache is None:
x = torch.nn.functional.pad(x, (self.lorder, 0), "constant", 0.0)
else:
x = torch.cat([cache, x], dim=2)
cache = x[..., -self.lorder :]
x = self.conv(x).transpose(1, 2)
return x, cache
| 7,416 | 26.675373 | 85 | py |
espnet | espnet-master/espnet2/asr_transducer/encoder/modules/multi_blocks.py | """MultiBlocks for encoder architecture."""
from typing import Dict, List, Optional
import torch
class MultiBlocks(torch.nn.Module):
"""MultiBlocks definition.
Args:
block_list: Individual blocks of the encoder architecture.
output_size: Architecture output size.
norm_class: Normalization module class.
norm_args: Normalization module arguments.
blockdrop_rate: Probability threshold of dropping out each block.
"""
def __init__(
self,
block_list: List[torch.nn.Module],
output_size: int,
norm_class: torch.nn.Module = torch.nn.LayerNorm,
norm_args: Optional[Dict] = None,
blockdrop_rate: int = 0.0,
) -> None:
"""Construct a MultiBlocks object."""
super().__init__()
self.blocks = torch.nn.ModuleList(block_list)
self.norm_blocks = norm_class(output_size, **norm_args)
self.blockdrop_rate = blockdrop_rate
self.blockdrop_decay = 1.0 / len(self.blocks)
self.keep_probs = torch.ones(len(self.blocks))
def reset_streaming_cache(self, left_context: int, device: torch.device) -> None:
"""Initialize/Reset encoder streaming cache.
Args:
left_context: Number of previous frames the attention module can see
in current chunk (used by Conformer and Branchformer block).
device: Device to use for cache tensor.
"""
for block in self.blocks:
block.reset_streaming_cache(left_context, device)
def forward(
self,
x: torch.Tensor,
pos_enc: torch.Tensor,
mask: torch.Tensor,
chunk_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""Forward each block of the encoder architecture.
Args:
x: MultiBlocks input sequences. (B, T, D_block_1)
pos_enc: Positional embedding sequences.
mask: Source mask. (B, T)
chunk_mask: Chunk mask. (T_2, T_2)
Returns:
x: Output sequences. (B, T, D_block_N)
"""
self.keep_probs[:-1].uniform_()
for idx, block in enumerate(self.blocks):
if not self.training or (
self.keep_probs[idx]
>= (self.blockdrop_rate * (self.blockdrop_decay * idx))
):
x, mask, pos_enc = block(x, pos_enc, mask, chunk_mask=chunk_mask)
x = self.norm_blocks(x)
return x
def chunk_forward(
self,
x: torch.Tensor,
pos_enc: torch.Tensor,
mask: torch.Tensor,
left_context: int = 0,
) -> torch.Tensor:
"""Forward each block of the encoder architecture.
Args:
x: MultiBlocks input sequences. (B, T, D_block_1)
pos_enc: Positional embedding sequences. (B, 2 * (T - 1), D_att)
mask: Source mask. (B, T_2)
left_context: Number of previous frames the attention module can see
in current chunk (used by Conformer and Branchformer block).
Returns:
x: MultiBlocks output sequences. (B, T, D_block_N)
"""
for block in self.blocks:
x, pos_enc = block.chunk_forward(
x,
pos_enc,
mask,
left_context=left_context,
)
x = self.norm_blocks(x)
return x
| 3,435 | 29.40708 | 86 | py |
espnet | espnet-master/espnet2/asr_transducer/encoder/modules/positional_encoding.py | """Positional encoding modules."""
import math
import torch
from espnet.nets.pytorch_backend.transformer.embedding import _pre_hook
class RelPositionalEncoding(torch.nn.Module):
"""Relative positional encoding.
Args:
size: Module size.
max_len: Maximum input length.
dropout_rate: Dropout rate.
"""
def __init__(
self, size: int, dropout_rate: float = 0.0, max_len: int = 5000
) -> None:
"""Construct a RelativePositionalEncoding object."""
super().__init__()
self.size = size
self.pe = None
self.dropout = torch.nn.Dropout(p=dropout_rate)
self.extend_pe(torch.tensor(0.0).expand(1, max_len))
self._register_load_state_dict_pre_hook(_pre_hook)
def extend_pe(self, x: torch.Tensor, left_context: int = 0) -> None:
"""Reset positional encoding.
Args:
x: Input sequences. (B, T, ?)
left_context: Number of previous frames the attention module can see
in current chunk.
"""
time1 = x.size(1) + left_context
if self.pe is not None:
if self.pe.size(1) >= time1 * 2 - 1:
if self.pe.dtype != x.dtype or self.pe.device != x.device:
self.pe = self.pe.to(device=x.device, dtype=x.dtype)
return
pe_positive = torch.zeros(time1, self.size)
pe_negative = torch.zeros(time1, self.size)
position = torch.arange(0, time1, dtype=torch.float32).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, self.size, 2, dtype=torch.float32)
* -(math.log(10000.0) / self.size)
)
pe_positive[:, 0::2] = torch.sin(position * div_term)
pe_positive[:, 1::2] = torch.cos(position * div_term)
pe_positive = torch.flip(pe_positive, [0]).unsqueeze(0)
pe_negative[:, 0::2] = torch.sin(-1 * position * div_term)
pe_negative[:, 1::2] = torch.cos(-1 * position * div_term)
pe_negative = pe_negative[1:].unsqueeze(0)
self.pe = torch.cat([pe_positive, pe_negative], dim=1).to(
dtype=x.dtype, device=x.device
)
def forward(self, x: torch.Tensor, left_context: int = 0) -> torch.Tensor:
"""Compute positional encoding.
Args:
x: Input sequences. (B, T, ?)
left_context: Number of previous frames the attention module can see
in current chunk.
Returns:
pos_enc: Positional embedding sequences. (B, 2 * (T - 1), ?)
"""
self.extend_pe(x, left_context=left_context)
time1 = x.size(1) + left_context
pos_enc = self.pe[
:, self.pe.size(1) // 2 - time1 + 1 : self.pe.size(1) // 2 + x.size(1)
]
pos_enc = self.dropout(pos_enc)
return pos_enc
| 2,878 | 29.62766 | 82 | py |
espnet | espnet-master/espnet2/asr_transducer/encoder/modules/normalization.py | """Normalization modules for X-former blocks."""
from typing import Dict, Optional, Tuple
import torch
def get_normalization(
normalization_type: str,
eps: Optional[float] = None,
partial: Optional[float] = None,
) -> Tuple[torch.nn.Module, Dict]:
"""Get normalization module and arguments given parameters.
Args:
normalization_type: Normalization module type.
eps: Value added to the denominator.
partial: Value defining the part of the input used for RMS stats (RMSNorm).
Return:
: Normalization module class
: Normalization module arguments
"""
norm = {
"basic_norm": (
BasicNorm,
{"eps": eps if eps is not None else 0.25},
),
"layer_norm": (torch.nn.LayerNorm, {"eps": eps if eps is not None else 1e-12}),
"rms_norm": (
RMSNorm,
{
"eps": eps if eps is not None else 1e-05,
"partial": partial if partial is not None else -1.0,
},
),
"scale_norm": (
ScaleNorm,
{"eps": eps if eps is not None else 1e-05},
),
}
return norm[normalization_type]
class BasicNorm(torch.nn.Module):
"""BasicNorm module definition.
Reference: https://github.com/k2-fsa/icefall/pull/288
Args:
normalized_shape: Expected size.
eps: Value added to the denominator for numerical stability.
"""
def __init__(
self,
normalized_shape: int,
eps: float = 0.25,
) -> None:
"""Construct a BasicNorm object."""
super().__init__()
self.eps = torch.nn.Parameter(torch.tensor(eps).log().detach())
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Compute basic normalization.
Args:
x: Input sequences. (B, T, D_hidden)
Returns:
: Output sequences. (B, T, D_hidden)
"""
scales = (torch.mean(x.pow(2), dim=-1, keepdim=True) + self.eps.exp()) ** -0.5
return x * scales
class RMSNorm(torch.nn.Module):
"""RMSNorm module definition.
Reference: https://arxiv.org/pdf/1910.07467.pdf
Args:
normalized_shape: Expected size.
eps: Value added to the denominator for numerical stability.
partial: Value defining the part of the input used for RMS stats.
"""
def __init__(
self,
normalized_shape: int,
eps: float = 1e-5,
partial: float = 0.0,
) -> None:
"""Construct a RMSNorm object."""
super().__init__()
self.normalized_shape = normalized_shape
self.partial = True if 0 < partial < 1 else False
self.p = partial
self.eps = eps
self.scale = torch.nn.Parameter(torch.ones(normalized_shape))
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Compute RMS normalization.
Args:
x: Input sequences. (B, T, D_hidden)
Returns:
x: Output sequences. (B, T, D_hidden)
"""
if self.partial:
partial_size = int(self.normalized_shape * self.p)
partial_x, _ = torch.split(
x, [partial_size, self.normalized_shape - partial_size], dim=-1
)
norm_x = partial_x.norm(2, dim=-1, keepdim=True)
d_x = partial_size
else:
norm_x = x.norm(2, dim=-1, keepdim=True)
d_x = self.normalized_shape
rms_x = norm_x * d_x ** (-1.0 / 2)
x = self.scale * (x / (rms_x + self.eps))
return x
class ScaleNorm(torch.nn.Module):
"""ScaleNorm module definition.
Reference: https://arxiv.org/pdf/1910.05895.pdf
Args:
normalized_shape: Expected size.
eps: Value added to the denominator for numerical stability.
"""
def __init__(self, normalized_shape: int, eps: float = 1e-5) -> None:
"""Construct a ScaleNorm object."""
super().__init__()
self.eps = eps
self.scale = torch.nn.Parameter(torch.tensor(normalized_shape**0.5))
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Compute scale normalization.
Args:
x: Input sequences. (B, T, D_hidden)
Returns:
: Output sequences. (B, T, D_hidden)
"""
norm = self.scale / torch.norm(x, dim=-1, keepdim=True).clamp(min=self.eps)
return x * norm
| 4,454 | 25.052632 | 87 | py |
espnet | espnet-master/espnet2/asr_transducer/encoder/modules/attention.py | """Multi-Head attention layers with relative positional encoding."""
import math
from typing import Optional, Tuple
import torch
class RelPositionMultiHeadedAttention(torch.nn.Module):
"""RelPositionMultiHeadedAttention definition.
Args:
num_heads: Number of attention heads.
embed_size: Embedding size.
dropout_rate: Dropout rate.
"""
def __init__(
self,
num_heads: int,
embed_size: int,
dropout_rate: float = 0.0,
simplified_attention_score: bool = False,
) -> None:
"""Construct an MultiHeadedAttention object."""
super().__init__()
self.d_k = embed_size // num_heads
self.num_heads = num_heads
assert self.d_k * num_heads == embed_size, (
"embed_size (%d) must be divisible by num_heads (%d)",
(embed_size, num_heads),
)
self.linear_q = torch.nn.Linear(embed_size, embed_size)
self.linear_k = torch.nn.Linear(embed_size, embed_size)
self.linear_v = torch.nn.Linear(embed_size, embed_size)
self.linear_out = torch.nn.Linear(embed_size, embed_size)
if simplified_attention_score:
self.linear_pos = torch.nn.Linear(embed_size, num_heads)
self.compute_att_score = self.compute_simplified_attention_score
else:
self.linear_pos = torch.nn.Linear(embed_size, embed_size, bias=False)
self.pos_bias_u = torch.nn.Parameter(torch.Tensor(num_heads, self.d_k))
self.pos_bias_v = torch.nn.Parameter(torch.Tensor(num_heads, self.d_k))
torch.nn.init.xavier_uniform_(self.pos_bias_u)
torch.nn.init.xavier_uniform_(self.pos_bias_v)
self.compute_att_score = self.compute_attention_score
self.dropout = torch.nn.Dropout(p=dropout_rate)
self.attn = None
def rel_shift(self, x: torch.Tensor, left_context: int = 0) -> torch.Tensor:
"""Compute relative positional encoding.
Args:
x: Input sequence. (B, H, T_1, 2 * T_1 - 1)
left_context: Number of previous frames to use for current chunk
attention computation.
Returns:
x: Output sequence. (B, H, T_1, T_2)
"""
batch_size, n_heads, time1, n = x.shape
time2 = time1 + left_context
batch_stride, n_heads_stride, time1_stride, n_stride = x.stride()
return x.as_strided(
(batch_size, n_heads, time1, time2),
(batch_stride, n_heads_stride, time1_stride - n_stride, n_stride),
storage_offset=(n_stride * (time1 - 1)),
)
def compute_simplified_attention_score(
self,
query: torch.Tensor,
key: torch.Tensor,
pos_enc: torch.Tensor,
left_context: int = 0,
) -> torch.Tensor:
"""Simplified attention score computation.
Reference: https://github.com/k2-fsa/icefall/pull/458
Args:
query: Transformed query tensor. (B, H, T_1, d_k)
key: Transformed key tensor. (B, H, T_2, d_k)
pos_enc: Positional embedding tensor. (B, 2 * T_1 - 1, size)
left_context: Number of previous frames to use for current chunk
attention computation.
Returns:
: Attention score. (B, H, T_1, T_2)
"""
pos_enc = self.linear_pos(pos_enc)
matrix_ac = torch.matmul(query, key.transpose(2, 3))
matrix_bd = self.rel_shift(
pos_enc.transpose(1, 2).unsqueeze(2).repeat(1, 1, query.size(2), 1),
left_context=left_context,
)
return (matrix_ac + matrix_bd) / math.sqrt(self.d_k)
def compute_attention_score(
self,
query: torch.Tensor,
key: torch.Tensor,
pos_enc: torch.Tensor,
left_context: int = 0,
) -> torch.Tensor:
"""Attention score computation.
Args:
query: Transformed query tensor. (B, H, T_1, d_k)
key: Transformed key tensor. (B, H, T_2, d_k)
pos_enc: Positional embedding tensor. (B, 2 * T_1 - 1, size)
left_context: Number of previous frames to use for current chunk
attention computation.
Returns:
: Attention score. (B, H, T_1, T_2)
"""
p = self.linear_pos(pos_enc).view(pos_enc.size(0), -1, self.num_heads, self.d_k)
query = query.transpose(1, 2)
q_with_bias_u = (query + self.pos_bias_u).transpose(1, 2)
q_with_bias_v = (query + self.pos_bias_v).transpose(1, 2)
matrix_ac = torch.matmul(q_with_bias_u, key.transpose(-2, -1))
matrix_bd = torch.matmul(q_with_bias_v, p.permute(0, 2, 3, 1))
matrix_bd = self.rel_shift(matrix_bd, left_context=left_context)
return (matrix_ac + matrix_bd) / math.sqrt(self.d_k)
def forward_qkv(
self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Transform query, key and value.
Args:
query: Query tensor. (B, T_1, size)
key: Key tensor. (B, T_2, size)
v: Value tensor. (B, T_2, size)
Returns:
q: Transformed query tensor. (B, H, T_1, d_k)
k: Transformed key tensor. (B, H, T_2, d_k)
v: Transformed value tensor. (B, H, T_2, d_k)
"""
n_batch = query.size(0)
q = (
self.linear_q(query)
.view(n_batch, -1, self.num_heads, self.d_k)
.transpose(1, 2)
)
k = (
self.linear_k(key)
.view(n_batch, -1, self.num_heads, self.d_k)
.transpose(1, 2)
)
v = (
self.linear_v(value)
.view(n_batch, -1, self.num_heads, self.d_k)
.transpose(1, 2)
)
return q, k, v
def forward_attention(
self,
value: torch.Tensor,
scores: torch.Tensor,
mask: torch.Tensor,
chunk_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""Compute attention context vector.
Args:
value: Transformed value. (B, H, T_2, d_k)
scores: Attention score. (B, H, T_1, T_2)
mask: Source mask. (B, T_2)
chunk_mask: Chunk mask. (T_1, T_1)
Returns:
attn_output: Transformed value weighted by attention score. (B, T_1, H * d_k)
"""
batch_size = scores.size(0)
mask = mask.unsqueeze(1).unsqueeze(2)
if chunk_mask is not None:
mask = chunk_mask.unsqueeze(0).unsqueeze(1) | mask
scores = scores.masked_fill(mask, float("-inf"))
self.attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0)
attn_output = self.dropout(self.attn)
attn_output = torch.matmul(attn_output, value)
attn_output = self.linear_out(
attn_output.transpose(1, 2)
.contiguous()
.view(batch_size, -1, self.num_heads * self.d_k)
)
return attn_output
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
pos_enc: torch.Tensor,
mask: torch.Tensor,
chunk_mask: Optional[torch.Tensor] = None,
left_context: int = 0,
) -> torch.Tensor:
"""Compute scaled dot product attention with rel. positional encoding.
Args:
query: Query tensor. (B, T_1, size)
key: Key tensor. (B, T_2, size)
value: Value tensor. (B, T_2, size)
pos_enc: Positional embedding tensor. (B, 2 * T_1 - 1, size)
mask: Source mask. (B, T_2)
chunk_mask: Chunk mask. (T_1, T_1)
left_context: Number of previous frames to use for current chunk
attention computation.
Returns:
: Output tensor. (B, T_1, H * d_k)
"""
q, k, v = self.forward_qkv(query, key, value)
scores = self.compute_att_score(q, k, pos_enc, left_context=left_context)
return self.forward_attention(v, scores, mask, chunk_mask=chunk_mask)
| 8,228 | 31.270588 | 88 | py |
espnet | espnet-master/espnet2/asr_transducer/encoder/blocks/branchformer.py | """Branchformer block for Transducer encoder."""
from typing import Dict, Optional, Tuple
import torch
class Branchformer(torch.nn.Module):
"""Branchformer module definition.
Reference: https://arxiv.org/pdf/2207.02971.pdf
Args:
block_size: Input/output size.
linear_size: Linear layers' hidden size.
self_att: Self-attention module instance.
conv_mod: Convolution module instance.
norm_class: Normalization class.
norm_args: Normalization module arguments.
dropout_rate: Dropout rate.
"""
def __init__(
self,
block_size: int,
linear_size: int,
self_att: torch.nn.Module,
conv_mod: torch.nn.Module,
norm_class: torch.nn.Module = torch.nn.LayerNorm,
norm_args: Dict = {},
dropout_rate: float = 0.0,
) -> None:
"""Construct a Branchformer object."""
super().__init__()
self.self_att = self_att
self.conv_mod = conv_mod
self.channel_proj1 = torch.nn.Sequential(
torch.nn.Linear(block_size, linear_size), torch.nn.GELU()
)
self.channel_proj2 = torch.nn.Linear(linear_size // 2, block_size)
self.merge_proj = torch.nn.Linear(block_size + block_size, block_size)
self.norm_self_att = norm_class(block_size, **norm_args)
self.norm_mlp = norm_class(block_size, **norm_args)
self.norm_final = norm_class(block_size, **norm_args)
self.dropout = torch.nn.Dropout(dropout_rate)
self.block_size = block_size
self.linear_size = linear_size
self.cache = None
def reset_streaming_cache(self, left_context: int, device: torch.device) -> None:
"""Initialize/Reset self-attention and convolution modules cache for streaming.
Args:
left_context: Number of previous frames the attention module can see
in current chunk.
device: Device to use for cache tensor.
"""
self.cache = [
torch.zeros(
(1, left_context, self.block_size),
device=device,
),
torch.zeros(
(
1,
self.linear_size // 2,
self.conv_mod.kernel_size - 1,
),
device=device,
),
]
def forward(
self,
x: torch.Tensor,
pos_enc: torch.Tensor,
mask: torch.Tensor,
chunk_mask: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Encode input sequences.
Args:
x: Branchformer input sequences. (B, T, D_block)
pos_enc: Positional embedding sequences. (B, 2 * (T - 1), D_block)
mask: Source mask. (B, T)
chunk_mask: Chunk mask. (T_2, T_2)
Returns:
x: Branchformer output sequences. (B, T, D_block)
mask: Source mask. (B, T)
pos_enc: Positional embedding sequences. (B, 2 * (T - 1), D_block)
"""
x1 = x
x2 = x
x1 = self.norm_self_att(x1)
x1 = self.dropout(
self.self_att(x1, x1, x1, pos_enc, mask=mask, chunk_mask=chunk_mask)
)
x2 = self.norm_mlp(x2)
x2 = self.channel_proj1(x2)
x2, _ = self.conv_mod(x2, mask)
x2 = self.channel_proj2(x2)
x2 = self.dropout(x2)
x = x + self.dropout(self.merge_proj(torch.cat([x1, x2], dim=-1)))
x = self.norm_final(x)
return x, mask, pos_enc
def chunk_forward(
self,
x: torch.Tensor,
pos_enc: torch.Tensor,
mask: torch.Tensor,
left_context: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Encode chunk of input sequence.
Args:
x: Branchformer input sequences. (B, T, D_block)
pos_enc: Positional embedding sequences. (B, 2 * (T - 1), D_block)
mask: Source mask. (B, T_2)
left_context: Number of previous frames the attention module can see
in current chunk.
Returns:
x: Branchformer output sequences. (B, T, D_block)
pos_enc: Positional embedding sequences. (B, 2 * (T - 1), D_block)
"""
x1 = x
x2 = x
x1 = self.norm_self_att(x1)
if left_context > 0:
key = torch.cat([self.cache[0], x1], dim=1)
else:
key = x1
val = key
att_cache = key[:, -left_context:, :]
x1 = self.self_att(x1, key, val, pos_enc, mask=mask, left_context=left_context)
x2 = self.norm_mlp(x2)
x2 = self.channel_proj1(x2)
x2, conv_cache = self.conv_mod(x2, cache=self.cache[1])
x2 = self.channel_proj2(x2)
x = x + self.merge_proj(torch.cat([x1, x2], dim=-1))
x = self.norm_final(x)
self.cache = [att_cache, conv_cache]
return x, pos_enc
| 5,017 | 28.00578 | 87 | py |
espnet | espnet-master/espnet2/asr_transducer/encoder/blocks/conv1d.py | """Conv1d block for Transducer encoder."""
from typing import Optional, Tuple, Union
import torch
class Conv1d(torch.nn.Module):
"""Conv1d module definition.
Args:
input_size: Input dimension.
output_size: Output dimension.
kernel_size: Size of the convolving kernel.
stride: Stride of the convolution.
dilation: Spacing between the kernel points.
groups: Number of blocked connections from input channels to output channels.
bias: Whether to add a learnable bias to the output.
batch_norm: Whether to use batch normalization after convolution.
relu: Whether to use a ReLU activation after convolution.
causal: Whether to use causal convolution (set to True if streaming).
dropout_rate: Dropout rate.
"""
def __init__(
self,
input_size: int,
output_size: int,
kernel_size: Union[int, Tuple],
stride: Union[int, Tuple] = 1,
dilation: Union[int, Tuple] = 1,
groups: Union[int, Tuple] = 1,
bias: bool = True,
batch_norm: bool = False,
relu: bool = True,
causal: bool = False,
dropout_rate: float = 0.0,
) -> None:
"""Construct a Conv1d object."""
super().__init__()
if causal:
self.lorder = kernel_size - 1
stride = 1
else:
self.lorder = 0
stride = stride
self.conv = torch.nn.Conv1d(
input_size,
output_size,
kernel_size,
stride=stride,
dilation=dilation,
groups=groups,
bias=bias,
)
self.dropout = torch.nn.Dropout(p=dropout_rate)
if relu:
self.relu_func = torch.nn.ReLU()
if batch_norm:
self.bn = torch.nn.BatchNorm1d(output_size)
self.out_pos = torch.nn.Linear(input_size, output_size)
self.input_size = input_size
self.output_size = output_size
self.relu = relu
self.batch_norm = batch_norm
self.causal = causal
self.kernel_size = kernel_size
self.padding = dilation * (kernel_size - 1)
self.stride = stride
self.cache = None
def reset_streaming_cache(self, left_context: int, device: torch.device) -> None:
"""Initialize/Reset Conv1d cache for streaming.
Args:
left_context: Number of previous frames the attention module can see
in current chunk (not used here).
device: Device to use for cache tensor.
"""
self.cache = torch.zeros(
(1, self.input_size, self.kernel_size - 1), device=device
)
def forward(
self,
x: torch.Tensor,
pos_enc: torch.Tensor,
mask: Optional[torch.Tensor] = None,
chunk_mask: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Encode input sequences.
Args:
x: Conv1d input sequences. (B, T, D_in)
pos_enc: Positional embedding sequences. (B, 2 * (T - 1), D_in)
mask: Source mask. (B, T)
chunk_mask: Chunk mask. (T_2, T_2)
Returns:
x: Conv1d output sequences. (B, sub(T), D_out)
mask: Source mask. (B, T) or (B, sub(T))
pos_enc: Positional embedding sequences.
(B, 2 * (T - 1), D_att) or (B, 2 * (sub(T) - 1), D_out)
"""
x = x.transpose(1, 2)
if self.lorder > 0:
x = torch.nn.functional.pad(x, (self.lorder, 0), "constant", 0.0)
else:
mask = self.create_new_mask(mask)
pos_enc = self.create_new_pos_enc(pos_enc)
x = self.conv(x)
if self.batch_norm:
x = self.bn(x)
x = self.dropout(x)
if self.relu:
x = self.relu_func(x)
x = x.transpose(1, 2)
return x, mask, self.out_pos(pos_enc)
def chunk_forward(
self,
x: torch.Tensor,
pos_enc: torch.Tensor,
mask: torch.Tensor,
left_context: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Encode chunk of input sequence.
Args:
x: Conv1d input sequences. (B, T, D_in)
pos_enc: Positional embedding sequences. (B, 2 * (T - 1), D_in)
mask: Source mask. (B, T)
left_context: Number of previous frames the attention module can see
in current chunk (not used here).
Returns:
x: Conv1d output sequences. (B, T, D_out)
pos_enc: Positional embedding sequences. (B, 2 * (T - 1), D_out)
"""
x = torch.cat([self.cache, x.transpose(1, 2)], dim=2)
self.cache = x[:, :, -self.lorder :]
x = self.conv(x)
if self.batch_norm:
x = self.bn(x)
x = self.dropout(x)
if self.relu:
x = self.relu_func(x)
x = x.transpose(1, 2)
return x, self.out_pos(pos_enc)
def create_new_mask(self, mask: torch.Tensor) -> torch.Tensor:
"""Create new mask for output sequences.
Args:
mask: Mask of input sequences. (B, T)
Returns:
mask: Mask of output sequences. (B, sub(T))
"""
if self.padding != 0:
mask = mask[:, : -self.padding]
return mask[:, :: self.stride]
def create_new_pos_enc(self, pos_enc: torch.Tensor) -> torch.Tensor:
"""Create new positional embedding vector.
Args:
pos_enc: Input sequences positional embedding.
(B, 2 * (T - 1), D_in)
Returns:
pos_enc: Output sequences positional embedding.
(B, 2 * (sub(T) - 1), D_in)
"""
pos_enc_positive = pos_enc[:, : pos_enc.size(1) // 2 + 1, :]
pos_enc_negative = pos_enc[:, pos_enc.size(1) // 2 :, :]
if self.padding != 0:
pos_enc_positive = pos_enc_positive[:, : -self.padding, :]
pos_enc_negative = pos_enc_negative[:, : -self.padding, :]
pos_enc_positive = pos_enc_positive[:, :: self.stride, :]
pos_enc_negative = pos_enc_negative[:, :: self.stride, :]
pos_enc = torch.cat([pos_enc_positive, pos_enc_negative[:, 1:, :]], dim=1)
return pos_enc
| 6,398 | 28.353211 | 85 | py |
espnet | espnet-master/espnet2/asr_transducer/encoder/blocks/conformer.py | """Conformer block for Transducer encoder."""
from typing import Dict, Optional, Tuple
import torch
class Conformer(torch.nn.Module):
"""Conformer module definition.
Args:
block_size: Input/output size.
self_att: Self-attention module instance.
feed_forward: Feed-forward module instance.
feed_forward_macaron: Feed-forward module instance for macaron network.
conv_mod: Convolution module instance.
norm_class: Normalization module class.
norm_args: Normalization module arguments.
dropout_rate: Dropout rate.
"""
def __init__(
self,
block_size: int,
self_att: torch.nn.Module,
feed_forward: torch.nn.Module,
feed_forward_macaron: torch.nn.Module,
conv_mod: torch.nn.Module,
norm_class: torch.nn.Module = torch.nn.LayerNorm,
norm_args: Dict = {},
dropout_rate: float = 0.0,
) -> None:
"""Construct a Conformer object."""
super().__init__()
self.self_att = self_att
self.feed_forward = feed_forward
self.feed_forward_macaron = feed_forward_macaron
self.feed_forward_scale = 0.5
self.conv_mod = conv_mod
self.norm_feed_forward = norm_class(block_size, **norm_args)
self.norm_self_att = norm_class(block_size, **norm_args)
self.norm_macaron = norm_class(block_size, **norm_args)
self.norm_conv = norm_class(block_size, **norm_args)
self.norm_final = norm_class(block_size, **norm_args)
self.dropout = torch.nn.Dropout(dropout_rate)
self.block_size = block_size
self.cache = None
def reset_streaming_cache(self, left_context: int, device: torch.device) -> None:
"""Initialize/Reset self-attention and convolution modules cache for streaming.
Args:
left_context: Number of previous frames the attention module can see
in current chunk.
device: Device to use for cache tensor.
"""
self.cache = [
torch.zeros(
(1, left_context, self.block_size),
device=device,
),
torch.zeros(
(
1,
self.block_size,
self.conv_mod.kernel_size - 1,
),
device=device,
),
]
def forward(
self,
x: torch.Tensor,
pos_enc: torch.Tensor,
mask: torch.Tensor,
chunk_mask: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Encode input sequences.
Args:
x: Conformer input sequences. (B, T, D_block)
pos_enc: Positional embedding sequences. (B, 2 * (T - 1), D_block)
mask: Source mask. (B, T)
chunk_mask: Chunk mask. (T_2, T_2)
Returns:
x: Conformer output sequences. (B, T, D_block)
mask: Source mask. (B, T)
pos_enc: Positional embedding sequences. (B, 2 * (T - 1), D_block)
"""
residual = x
x = self.norm_macaron(x)
x = residual + self.feed_forward_scale * self.dropout(
self.feed_forward_macaron(x)
)
residual = x
x = self.norm_self_att(x)
x = residual + self.dropout(
self.self_att(
x,
x,
x,
pos_enc,
mask,
chunk_mask=chunk_mask,
)
)
residual = x
x = self.norm_conv(x)
x, _ = self.conv_mod(x, mask=mask)
x = residual + self.dropout(x)
residual = x
x = self.norm_feed_forward(x)
x = residual + self.feed_forward_scale * self.dropout(self.feed_forward(x))
x = self.norm_final(x)
return x, mask, pos_enc
def chunk_forward(
self,
x: torch.Tensor,
pos_enc: torch.Tensor,
mask: torch.Tensor,
left_context: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Encode chunk of input sequence.
Args:
x: Conformer input sequences. (B, T, D_block)
pos_enc: Positional embedding sequences. (B, 2 * (T - 1), D_block)
mask: Source mask. (B, T_2)
left_context: Number of previous frames the attention module can see
in current chunk.
Returns:
x: Conformer output sequences. (B, T, D_block)
pos_enc: Positional embedding sequences. (B, 2 * (T - 1), D_block)
"""
residual = x
x = self.norm_macaron(x)
x = residual + self.feed_forward_scale * self.feed_forward_macaron(x)
residual = x
x = self.norm_self_att(x)
if left_context > 0:
key = torch.cat([self.cache[0], x], dim=1)
else:
key = x
att_cache = key[:, -left_context:, :]
x = residual + self.self_att(
x,
key,
key,
pos_enc,
mask,
left_context=left_context,
)
residual = x
x = self.norm_conv(x)
x, conv_cache = self.conv_mod(x, cache=self.cache[1])
x = residual + x
residual = x
x = self.norm_feed_forward(x)
x = residual + self.feed_forward_scale * self.feed_forward(x)
x = self.norm_final(x)
self.cache = [att_cache, conv_cache]
return x, pos_enc
| 5,560 | 27.085859 | 87 | py |
espnet | espnet-master/espnet2/asr_transducer/encoder/blocks/conv_input.py | """ConvInput block for Transducer encoder."""
from typing import Optional, Tuple, Union
import torch
from espnet2.asr_transducer.utils import get_convinput_module_parameters
class ConvInput(torch.nn.Module):
"""ConvInput module definition.
Args:
input_size: Input size.
conv_size: Convolution size.
subsampling_factor: Subsampling factor.
vgg_like: Whether to use a VGG-like network.
output_size: Block output dimension.
"""
def __init__(
self,
input_size: int,
conv_size: Union[int, Tuple],
subsampling_factor: int = 4,
vgg_like: bool = True,
output_size: Optional[int] = None,
) -> None:
"""Construct a ConvInput object."""
super().__init__()
self.subsampling_factor = subsampling_factor
self.vgg_like = vgg_like
if vgg_like:
conv_size1, conv_size2 = conv_size
self.maxpool_kernel1, output_proj = get_convinput_module_parameters(
input_size, conv_size2, subsampling_factor, is_vgg=True
)
self.conv = torch.nn.Sequential(
torch.nn.Conv2d(1, conv_size1, 3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv2d(conv_size1, conv_size1, 3, stride=1, padding=0),
torch.nn.ReLU(),
torch.nn.MaxPool2d(
self.maxpool_kernel1, stride=2, padding=0, ceil_mode=True
),
torch.nn.Conv2d(conv_size1, conv_size2, 3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv2d(conv_size2, conv_size2, 3, stride=1, padding=0),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True),
)
else:
(
self.conv_kernel2,
self.conv_stride2,
), output_proj = get_convinput_module_parameters(
input_size, conv_size, subsampling_factor, is_vgg=False
)
self.conv = torch.nn.Sequential(
torch.nn.Conv2d(1, conv_size, 3, 2),
torch.nn.ReLU(),
torch.nn.Conv2d(
conv_size, conv_size, self.conv_kernel2, self.conv_stride2
),
torch.nn.ReLU(),
)
self.min_frame_length = 7 if subsampling_factor < 6 else 11
if output_size is not None:
self.output = torch.nn.Linear(output_proj, output_size)
self.output_size = output_size
else:
self.output = None
self.output_size = output_proj
def forward(
self, x: torch.Tensor, mask: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Encode input sequences.
Args:
x: ConvInput input sequences. (B, T, D_feats)
mask: Mask of input sequences. (B, 1, T)
Returns:
x: ConvInput output sequences. (B, sub(T), D_out)
mask: Mask of output sequences. (B, 1, sub(T))
"""
x = self.conv(x.unsqueeze(1))
b, c, t, f = x.size()
x = x.transpose(1, 2).contiguous().view(b, t, c * f)
if self.output is not None:
x = self.output(x)
if mask is not None:
mask = mask[:, : x.size(1)]
return x, mask
| 3,412 | 30.311927 | 80 | py |
espnet | espnet-master/espnet2/asr_transducer/encoder/blocks/ebranchformer.py | """E-Branchformer block for Transducer encoder."""
from typing import Dict, Optional, Tuple
import torch
class EBranchformer(torch.nn.Module):
"""E-Branchformer module definition.
Reference: https://arxiv.org/pdf/2210.00077.pdf
Args:
block_size: Input/output size.
linear_size: Linear layers' hidden size.
self_att: Self-attention module instance.
feed_forward: Feed-forward module instance.
feed_forward_macaron: Feed-forward module instance for macaron network.
conv_mod: ConvolutionalSpatialGatingUnit module instance.
depthwise_conv_mod: DepthwiseConvolution module instance.
norm_class: Normalization class.
norm_args: Normalization module arguments.
dropout_rate: Dropout rate.
"""
def __init__(
self,
block_size: int,
linear_size: int,
self_att: torch.nn.Module,
feed_forward: torch.nn.Module,
feed_forward_macaron: torch.nn.Module,
conv_mod: torch.nn.Module,
depthwise_conv_mod: torch.nn.Module,
norm_class: torch.nn.Module = torch.nn.LayerNorm,
norm_args: Dict = {},
dropout_rate: float = 0.0,
) -> None:
"""Construct a E-Branchformer object."""
super().__init__()
self.self_att = self_att
self.feed_forward = feed_forward
self.feed_forward_macaron = feed_forward_macaron
self.feed_forward_scale = 0.5
self.conv_mod = conv_mod
self.depthwise_conv_mod = depthwise_conv_mod
self.channel_proj1 = torch.nn.Sequential(
torch.nn.Linear(block_size, linear_size), torch.nn.GELU()
)
self.channel_proj2 = torch.nn.Linear(linear_size // 2, block_size)
self.merge_proj = torch.nn.Linear((block_size + block_size), block_size)
self.norm_self_att = norm_class(block_size, **norm_args)
self.norm_feed_forward = norm_class(block_size, **norm_args)
self.norm_feed_forward_macaron = norm_class(block_size, **norm_args)
self.norm_mlp = norm_class(block_size, **norm_args)
self.norm_final = norm_class(block_size, **norm_args)
self.dropout = torch.nn.Dropout(dropout_rate)
self.block_size = block_size
self.linear_size = linear_size
self.cache = None
def reset_streaming_cache(self, left_context: int, device: torch.device) -> None:
"""Initialize/Reset self-attention and convolution modules cache for streaming.
Args:
left_context: Number of previous frames the attention module can see
in current chunk.
device: Device to use for cache tensor.
"""
self.cache = [
torch.zeros(
(1, left_context, self.block_size),
device=device,
),
torch.zeros(
(
1,
self.linear_size // 2,
self.conv_mod.kernel_size - 1,
),
device=device,
),
torch.zeros(
(
1,
self.block_size + self.block_size,
self.depthwise_conv_mod.kernel_size - 1,
),
device=device,
),
]
def forward(
self,
x: torch.Tensor,
pos_enc: torch.Tensor,
mask: torch.Tensor,
chunk_mask: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Encode input sequences.
Args:
x: E-Branchformer input sequences. (B, T, D_block)
pos_enc: Positional embedding sequences. (B, 2 * (T - 1), D_block)
mask: Source mask. (B, T)
chunk_mask: Chunk mask. (T_2, T_2)
Returns:
x: E-Branchformer output sequences. (B, T, D_block)
mask: Source mask. (B, T)
pos_enc: Positional embedding sequences. (B, 2 * (T - 1), D_block)
"""
residual = x
x = self.norm_feed_forward_macaron(x)
x = residual + self.feed_forward_scale * self.dropout(
self.feed_forward_macaron(x)
)
x1 = x
x2 = x
x1 = self.norm_self_att(x1)
x1 = self.dropout(
self.self_att(x1, x1, x1, pos_enc, mask=mask, chunk_mask=chunk_mask)
)
x2 = self.norm_mlp(x2)
x2 = self.channel_proj1(x2)
x2, _ = self.conv_mod(x2, mask=mask)
x2 = self.dropout(self.channel_proj2(x2))
x_concat = torch.cat([x1, x2], dim=-1)
x_depth, _ = self.depthwise_conv_mod(x_concat, mask=mask)
x = x + self.merge_proj(x_concat + x_depth)
residual = x
x = self.norm_feed_forward(x)
x = residual + self.feed_forward_scale * self.dropout(self.feed_forward(x))
x = self.norm_final(x)
return x, mask, pos_enc
def chunk_forward(
self,
x: torch.Tensor,
pos_enc: torch.Tensor,
mask: torch.Tensor,
left_context: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Encode chunk of input sequence.
Args:
x: E-Branchformer input sequences. (B, T, D_block)
pos_enc: Positional embedding sequences. (B, 2 * (T - 1), D_block)
mask: Source mask. (B, T_2)
left_context: Number of previous frames the attention module can see
in current chunk.
Returns:
x: E-Branchformer output sequences. (B, T, D_block)
pos_enc: Positional embedding sequences. (B, 2 * (T - 1), D_block)
"""
residual = x
x = self.norm_feed_forward_macaron(x)
x = residual + self.feed_forward_scale * self.feed_forward_macaron(x)
x1 = x
x2 = x
x1 = self.norm_self_att(x1)
if left_context > 0:
key = torch.cat([self.cache[0], x1], dim=1)
else:
key = x1
att_cache = key[:, -left_context:, :]
x1 = self.self_att(x1, key, key, pos_enc, mask=mask, left_context=left_context)
x2 = self.norm_mlp(x2)
x2 = self.channel_proj1(x2)
x2, conv_cache = self.conv_mod(x2, cache=self.cache[1])
x2 = self.channel_proj2(x2)
x_concat = torch.cat([x1, x2], dim=-1)
x_depth, merge_cache = self.depthwise_conv_mod(x_concat, cache=self.cache[2])
x = x + self.merge_proj(x_concat + x_depth)
residual = x
x = self.norm_feed_forward(x)
x = residual + self.feed_forward_scale * self.feed_forward(x)
x = self.norm_final(x)
self.cache = [att_cache, conv_cache, merge_cache]
return x, pos_enc
| 6,771 | 29.781818 | 87 | py |
espnet | espnet-master/espnet2/asr_transducer/decoder/rnn_decoder.py | """RNN decoder definition for Transducer models."""
from typing import List, Optional, Tuple
import torch
from typeguard import check_argument_types
from espnet2.asr_transducer.beam_search_transducer import Hypothesis
from espnet2.asr_transducer.decoder.abs_decoder import AbsDecoder
class RNNDecoder(AbsDecoder):
"""RNN decoder module.
Args:
vocab_size: Vocabulary size.
embed_size: Embedding size.
hidden_size: Hidden size..
rnn_type: Decoder layers type.
num_layers: Number of decoder layers.
dropout_rate: Dropout rate for decoder layers.
embed_dropout_rate: Dropout rate for embedding layer.
embed_pad: Embedding padding symbol ID.
"""
def __init__(
self,
vocab_size: int,
embed_size: int = 256,
hidden_size: int = 256,
rnn_type: str = "lstm",
num_layers: int = 1,
dropout_rate: float = 0.0,
embed_dropout_rate: float = 0.0,
embed_pad: int = 0,
) -> None:
"""Construct a RNNDecoder object."""
super().__init__()
assert check_argument_types()
if rnn_type not in ("lstm", "gru"):
raise ValueError(f"Not supported: rnn_type={rnn_type}")
self.embed = torch.nn.Embedding(vocab_size, embed_size, padding_idx=embed_pad)
self.dropout_embed = torch.nn.Dropout(p=embed_dropout_rate)
rnn_class = torch.nn.LSTM if rnn_type == "lstm" else torch.nn.GRU
self.rnn = torch.nn.ModuleList(
[rnn_class(embed_size, hidden_size, 1, batch_first=True)]
)
for _ in range(1, num_layers):
self.rnn += [rnn_class(hidden_size, hidden_size, 1, batch_first=True)]
self.dropout_rnn = torch.nn.ModuleList(
[torch.nn.Dropout(p=dropout_rate) for _ in range(num_layers)]
)
self.dlayers = num_layers
self.dtype = rnn_type
self.output_size = hidden_size
self.vocab_size = vocab_size
self.device = next(self.parameters()).device
self.score_cache = {}
def forward(self, labels: torch.Tensor) -> torch.Tensor:
"""Encode source label sequences.
Args:
labels: Label ID sequences. (B, L)
Returns:
out: Decoder output sequences. (B, U, D_dec)
"""
states = self.init_state(labels.size(0))
embed = self.dropout_embed(self.embed(labels))
out, _ = self.rnn_forward(embed, states)
return out
def rnn_forward(
self,
x: torch.Tensor,
state: Tuple[torch.Tensor, Optional[torch.Tensor]],
) -> Tuple[torch.Tensor, Tuple[torch.Tensor, Optional[torch.Tensor]]]:
"""Encode source label sequences.
Args:
x: RNN input sequences. (B, D_emb)
state: Decoder hidden states. ((N, B, D_dec), (N, B, D_dec) or None)
Returns:
x: RNN output sequences. (B, D_dec)
(h_next, c_next): Decoder hidden states.
(N, B, D_dec), (N, B, D_dec) or None)
"""
h_prev, c_prev = state
h_next, c_next = self.init_state(x.size(0))
for layer in range(self.dlayers):
if self.dtype == "lstm":
x, (h_next[layer : layer + 1], c_next[layer : layer + 1]) = self.rnn[
layer
](x, hx=(h_prev[layer : layer + 1], c_prev[layer : layer + 1]))
else:
x, h_next[layer : layer + 1] = self.rnn[layer](
x, hx=h_prev[layer : layer + 1]
)
x = self.dropout_rnn[layer](x)
return x, (h_next, c_next)
def score(
self,
label_sequence: List[int],
states: Tuple[torch.Tensor, Optional[torch.Tensor]],
) -> Tuple[torch.Tensor, Tuple[torch.Tensor, Optional[torch.Tensor]]]:
"""One-step forward hypothesis.
Args:
label_sequence: Current label sequence.
states: Decoder hidden states.
((N, 1, D_dec), (N, 1, D_dec) or None)
Returns:
out: Decoder output sequence. (1, D_dec)
states: Decoder hidden states.
((N, 1, D_dec), (N, 1, D_dec) or None)
"""
str_labels = "_".join(map(str, label_sequence))
if str_labels in self.score_cache:
out, states = self.score_cache[str_labels]
else:
label = torch.full(
(1, 1),
label_sequence[-1],
dtype=torch.long,
device=self.device,
)
embed = self.embed(label)
out, states = self.rnn_forward(embed, states)
self.score_cache[str_labels] = (out, states)
return out[0], states
def batch_score(
self,
hyps: List[Hypothesis],
) -> Tuple[torch.Tensor, Tuple[torch.Tensor, Optional[torch.Tensor]]]:
"""One-step forward hypotheses.
Args:
hyps: Hypotheses.
Returns:
out: Decoder output sequences. (B, D_dec)
states: Decoder hidden states. ((N, B, D_dec), (N, B, D_dec) or None)
"""
labels = torch.tensor(
[[h.yseq[-1]] for h in hyps], dtype=torch.long, device=self.device
)
embed = self.embed(labels)
states = self.create_batch_states([h.dec_state for h in hyps])
out, states = self.rnn_forward(embed, states)
return out.squeeze(1), states
def set_device(self, device: torch.device) -> None:
"""Set GPU device to use.
Args:
device: Device ID.
"""
self.device = device
def init_state(
self, batch_size: int
) -> Tuple[torch.Tensor, Optional[torch.tensor]]:
"""Initialize decoder states.
Args:
batch_size: Batch size.
Returns:
: Initial decoder hidden states. ((N, B, D_dec), (N, B, D_dec) or None)
"""
h_n = torch.zeros(
self.dlayers,
batch_size,
self.output_size,
device=self.device,
)
if self.dtype == "lstm":
c_n = torch.zeros(
self.dlayers,
batch_size,
self.output_size,
device=self.device,
)
return (h_n, c_n)
return (h_n, None)
def select_state(
self, states: Tuple[torch.Tensor, Optional[torch.Tensor]], idx: int
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Get specified ID state from decoder hidden states.
Args:
states: Decoder hidden states. ((N, B, D_dec), (N, B, D_dec) or None)
idx: State ID to extract.
Returns:
: Decoder hidden state for given ID. ((N, 1, D_dec), (N, 1, D_dec) or None)
"""
return (
states[0][:, idx : idx + 1, :],
states[1][:, idx : idx + 1, :] if self.dtype == "lstm" else None,
)
def create_batch_states(
self,
new_states: List[Tuple[torch.Tensor, Optional[torch.Tensor]]],
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Create decoder hidden states.
Args:
new_states: Decoder hidden states.
[B x ((N, 1, D_dec), (N, 1, D_dec) or None)]
Returns:
states: Decoder hidden states. ((N, B, D_dec), (N, B, D_dec) or None)
"""
return (
torch.cat([s[0] for s in new_states], dim=1),
torch.cat([s[1] for s in new_states], dim=1)
if self.dtype == "lstm"
else None,
)
| 7,707 | 28.532567 | 87 | py |
espnet | espnet-master/espnet2/asr_transducer/decoder/stateless_decoder.py | """Stateless decoder definition for Transducer models."""
from typing import Any, List, Optional, Tuple
import torch
from typeguard import check_argument_types
from espnet2.asr_transducer.beam_search_transducer import Hypothesis
from espnet2.asr_transducer.decoder.abs_decoder import AbsDecoder
class StatelessDecoder(AbsDecoder):
"""Stateless Transducer decoder module.
Args:
vocab_size: Output size.
embed_size: Embedding size.
embed_dropout_rate: Dropout rate for embedding layer.
embed_pad: Embed/Blank symbol ID.
"""
def __init__(
self,
vocab_size: int,
embed_size: int = 256,
embed_dropout_rate: float = 0.0,
embed_pad: int = 0,
) -> None:
"""Construct a StatelessDecoder object."""
super().__init__()
assert check_argument_types()
self.embed = torch.nn.Embedding(vocab_size, embed_size, padding_idx=embed_pad)
self.embed_dropout_rate = torch.nn.Dropout(p=embed_dropout_rate)
self.output_size = embed_size
self.vocab_size = vocab_size
self.device = next(self.parameters()).device
self.score_cache = {}
def forward(
self,
labels: torch.Tensor,
states: Optional[Any] = None,
) -> torch.Tensor:
"""Encode source label sequences.
Args:
labels: Label ID sequences. (B, L)
states: Decoder hidden states. None
Returns:
embed: Decoder output sequences. (B, U, D_emb)
"""
embed = self.embed_dropout_rate(self.embed(labels))
return embed
def score(
self,
label_sequence: List[int],
states: Optional[Any] = None,
) -> Tuple[torch.Tensor, None]:
"""One-step forward hypothesis.
Args:
label_sequence: Current label sequence.
states: Decoder hidden states. None
Returns:
: Decoder output sequence. (1, D_emb)
state: Decoder hidden states. None
"""
str_labels = "_".join(map(str, label_sequence))
if str_labels in self.score_cache:
embed = self.score_cache[str_labels]
else:
label = torch.full(
(1, 1),
label_sequence[-1],
dtype=torch.long,
device=self.device,
)
embed = self.embed(label)
self.score_cache[str_labels] = embed
return embed[0], None
def batch_score(self, hyps: List[Hypothesis]) -> Tuple[torch.Tensor, None]:
"""One-step forward hypotheses.
Args:
hyps: Hypotheses.
Returns:
out: Decoder output sequences. (B, D_dec)
states: Decoder hidden states. None
"""
labels = torch.tensor(
[[h.yseq[-1]] for h in hyps], dtype=torch.long, device=self.device
)
embed = self.embed(labels)
return embed.squeeze(1), None
def set_device(self, device: torch.device) -> None:
"""Set GPU device to use.
Args:
device: Device ID.
"""
self.device = device
def init_state(self, batch_size: int) -> None:
"""Initialize decoder states.
Args:
batch_size: Batch size.
Returns:
: Initial decoder hidden states. None
"""
return None
def select_state(self, states: Optional[torch.Tensor], idx: int) -> None:
"""Get specified ID state from decoder hidden states.
Args:
states: Decoder hidden states. None
idx: State ID to extract.
Returns:
: Decoder hidden state for given ID. None
"""
return None
def create_batch_states(
self,
new_states: List[Optional[torch.Tensor]],
) -> None:
"""Create decoder hidden states.
Args:
new_states: Decoder hidden states. [N x None]
Returns:
states: Decoder hidden states. None
"""
return None
| 4,095 | 24.128834 | 86 | py |
espnet | espnet-master/espnet2/asr_transducer/decoder/abs_decoder.py | """Abstract decoder definition for Transducer models."""
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
class AbsDecoder(torch.nn.Module, ABC):
"""Abstract decoder module."""
@abstractmethod
def forward(self, labels: torch.Tensor) -> torch.Tensor:
"""Encode source label sequences.
Args:
labels: Label ID sequences.
Returns:
: Decoder output sequences.
"""
raise NotImplementedError
@abstractmethod
def score(
self,
label_sequence: List[int],
states: Union[
List[Dict[str, torch.Tensor]],
List[torch.Tensor],
Tuple[torch.Tensor, Optional[torch.Tensor]],
],
) -> Tuple[
torch.Tensor,
Union[
List[Dict[str, torch.Tensor]],
List[torch.Tensor],
Tuple[torch.Tensor, Optional[torch.Tensor]],
],
]:
"""One-step forward hypothesis.
Args:
label_sequence: Current label sequence.
state: Decoder hidden states.
Returns:
out: Decoder output sequence.
state: Decoder hidden states.
"""
raise NotImplementedError
@abstractmethod
def batch_score(
self,
hyps: List[Any],
) -> Tuple[
torch.Tensor,
Union[
List[Dict[str, torch.Tensor]],
List[torch.Tensor],
Tuple[torch.Tensor, Optional[torch.Tensor]],
],
]:
"""One-step forward hypotheses.
Args:
hyps: Hypotheses.
Returns:
out: Decoder output sequences.
states: Decoder hidden states.
"""
raise NotImplementedError
@abstractmethod
def set_device(self, device: torch.Tensor) -> None:
"""Set GPU device to use.
Args:
device: Device ID.
"""
raise NotImplementedError
@abstractmethod
def init_state(
self, batch_size: int
) -> Union[
List[Dict[str, torch.Tensor]],
List[torch.Tensor],
Tuple[torch.Tensor, Optional[torch.tensor]],
]:
"""Initialize decoder states.
Args:
batch_size: Batch size.
Returns:
: Decoder hidden states.
"""
raise NotImplementedError
@abstractmethod
def select_state(
self,
states: Union[
List[Dict[str, torch.Tensor]],
List[torch.Tensor],
Tuple[torch.Tensor, Optional[torch.Tensor]],
],
idx: int = 0,
) -> Union[
List[Dict[str, torch.Tensor]],
List[torch.Tensor],
Tuple[torch.Tensor, Optional[torch.Tensor]],
]:
"""Get specified ID state from batch of states, if provided.
Args:
states: Decoder hidden states.
idx: State ID to extract.
Returns:
: Decoder hidden state for given ID.
"""
raise NotImplementedError
@abstractmethod
def create_batch_states(
self,
new_states: List[
Union[
List[Dict[str, Optional[torch.Tensor]]],
List[List[torch.Tensor]],
Tuple[torch.Tensor, Optional[torch.Tensor]],
],
],
) -> Union[
List[Dict[str, torch.Tensor]],
List[torch.Tensor],
Tuple[torch.Tensor, Optional[torch.Tensor]],
]:
"""Create batch of decoder hidden states given a list of new states.
Args:
new_states: Decoder hidden states.
Returns:
: Decoder hidden states.
"""
raise NotImplementedError
| 3,755 | 22.622642 | 76 | py |
espnet | espnet-master/espnet2/asr_transducer/decoder/mega_decoder.py | """MEGA decoder definition for Transducer models."""
import math
from typing import Dict, List, Optional, Tuple
import torch
from typeguard import check_argument_types
from espnet2.asr_transducer.activation import get_activation
from espnet2.asr_transducer.beam_search_transducer import Hypothesis
from espnet2.asr_transducer.decoder.abs_decoder import AbsDecoder
from espnet2.asr_transducer.decoder.blocks.mega import MEGA
from espnet2.asr_transducer.decoder.modules.mega.feed_forward import (
NormalizedPositionwiseFeedForward,
)
from espnet2.asr_transducer.normalization import get_normalization
class MEGADecoder(AbsDecoder):
"""MEGA decoder module.
Based on https://arxiv.org/pdf/2209.10655.pdf.
Args:
vocab_size: Vocabulary size.
block_size: Input/Output size.
linear_size: NormalizedPositionwiseFeedForward hidden size.
qk_size: Shared query and key size for attention module.
v_size: Value size for attention module.
num_heads: Number of EMA heads.
rel_pos_bias: Type of relative position bias in attention module.
max_positions: Maximum number of position for RelativePositionBias.
truncation_length: Maximum length for truncation in EMA module.
normalization_type: Normalization layer type.
normalization_args: Normalization layer arguments.
activation_type: Activation function type.
activation_args: Activation function arguments.
chunk_size: Chunk size for attention computation (-1 = full context).
num_blocks: Number of MEGA blocks.
dropout_rate: Dropout rate for MEGA internal modules.
embed_dropout_rate: Dropout rate for embedding layer.
att_dropout_rate: Dropout rate for the attention module.
ema_dropout_rate: Dropout rate for the EMA module.
ffn_dropout_rate: Dropout rate for the feed-forward module.
embed_pad: Embedding padding symbol ID.
"""
def __init__(
self,
vocab_size: int,
block_size: int = 512,
linear_size: int = 1024,
qk_size: int = 128,
v_size: int = 1024,
num_heads: int = 4,
rel_pos_bias_type: str = "simple",
max_positions: int = 2048,
truncation_length: Optional[int] = None,
normalization_type: str = "layer_norm",
normalization_args: Dict = {},
activation_type: str = "swish",
activation_args: Dict = {},
chunk_size: int = -1,
num_blocks: int = 4,
dropout_rate: float = 0.0,
embed_dropout_rate: float = 0.0,
att_dropout_rate: float = 0.0,
ema_dropout_rate: float = 0.0,
ffn_dropout_rate: float = 0.0,
embed_pad: int = 0,
) -> None:
"""Construct a MEGADecoder object."""
super().__init__()
assert check_argument_types()
self.embed = torch.nn.Embedding(vocab_size, block_size, padding_idx=embed_pad)
self.dropout_embed = torch.nn.Dropout(p=embed_dropout_rate)
activation = get_activation(activation_type, **activation_args)
norm_class, norm_args = get_normalization(
normalization_type, **normalization_args
)
self.mega_blocks = torch.nn.ModuleList(
[
torch.nn.ModuleList(
[
MEGA(
block_size,
num_heads=num_heads,
qk_size=qk_size,
v_size=v_size,
activation=activation,
normalization=norm_class(block_size, **norm_args),
rel_pos_bias_type=rel_pos_bias_type,
max_positions=max_positions,
truncation_length=truncation_length,
chunk_size=chunk_size,
dropout_rate=dropout_rate,
att_dropout_rate=att_dropout_rate,
ema_dropout_rate=ema_dropout_rate,
),
NormalizedPositionwiseFeedForward(
block_size,
linear_size,
normalization=norm_class(block_size, **norm_args),
activation=activation,
dropout_rate=ffn_dropout_rate,
),
]
)
for _ in range(num_blocks)
]
)
self.final_norm = norm_class(block_size, **norm_args)
self.vocab_size = vocab_size
self.output_size = block_size
self.chunk_size = chunk_size
self.mega_num_heads = num_heads
self.mega_att_k_size = qk_size
self.mega_att_v_size = v_size
self.mega_ema_size = block_size
self.mega_ema_num_heads = num_heads
self.pad_idx = embed_pad
self.num_blocks = num_blocks
self.score_cache = {}
self.device = next(self.parameters()).device
def forward(self, labels: torch.Tensor) -> torch.Tensor:
"""Encode source label sequences.
Args:
labels: Decoder input sequences. (B, L)
Returns:
out: Decoder output sequences. (B, U, D_dec)
"""
batch, length = labels.size()
if 0 < self.chunk_size < length and length % self.chunk_size != 0:
num_paddings = (
math.ceil(length / self.chunk_size) * self.chunk_size - length
)
labels = torch.nn.functional.pad(
labels, (0, num_paddings), value=self.pad_idx
)
else:
num_paddings = 0
mask = (labels == self.pad_idx).unsqueeze(1)
mask[..., 0] = False
mask = mask.to(device=labels.device, dtype=torch.bool)
_length = self.chunk_size if 0 < self.chunk_size < length else length
attn_mask = torch.ones(
(_length, _length), device=labels.device, dtype=torch.bool
)
attn_mask = torch.triu(attn_mask, 1, out=attn_mask).unsqueeze(0)
x = self.dropout_embed(self.embed(labels)).transpose(0, 1)
for idx, (mega_block, nffn) in enumerate(self.mega_blocks):
x, _ = mega_block(x, mask=mask, attn_mask=attn_mask)
x = nffn(x)
out = self.final_norm(x).transpose(0, 1)
if num_paddings > 0:
out = out[:, :length, :]
return out
def inference(
self,
labels: torch.Tensor,
states: List[Dict[str, torch.Tensor]],
) -> Tuple[torch.Tensor, List[Dict[str, torch.Tensor]]]:
"""Encode source label sequences.
Args:
labels: Decoder input sequences. (B, L)
states: Decoder hidden states. [B x Dict]
Returns:
out: Decoder output sequences. (B, U, D_dec)
new_states: Decoder hidden states. [B x Dict]
"""
x = self.embed(labels).transpose(0, 1)
new_states = []
for idx, (mega_block, nffn) in enumerate(self.mega_blocks):
x, new_state = mega_block(x, state=states[idx])
x = nffn(x)
new_states.append(new_state)
out = self.final_norm(x).transpose(0, 1)
return out, new_states
def set_device(self, device: torch.device) -> None:
"""Set GPU device to use.
Args:
device: Device ID.
"""
self.device = device
def score(
self,
label_sequence: List[int],
states: List[Dict[str, torch.Tensor]],
) -> Tuple[torch.Tensor, List[Dict[str, torch.Tensor]]]:
"""One-step forward hypothesis.
Args:
label_sequence: Current label sequence.
states: Decoder hidden states. (??)
Returns:
: Decoder output sequence. (D_dec)
states: Decoder hidden states. (??)
"""
str_labels = "_".join(map(str, label_sequence))
if str_labels in self.score_cache:
out, states = self.score_cache[str_labels]
else:
label = torch.full(
(1, 1), label_sequence[-1], dtype=torch.long, device=self.device
)
out, states = self.inference(label, states=states)
self.score_cache[str_labels] = (out, states)
return out[0], states
def batch_score(
self, hyps: List[Hypothesis]
) -> Tuple[torch.Tensor, List[Dict[str, torch.Tensor]]]:
"""One-step forward hypotheses.
Args:
hyps: Hypotheses.
Returns:
out:
states:
"""
labels = torch.tensor(
[[h.yseq[-1]] for h in hyps], dtype=torch.long, device=self.device
)
states = self.create_batch_states([h.dec_state for h in hyps])
out, states = self.inference(labels, states=states)
return out.squeeze(1), states
def init_state(self, batch_size: int = 0) -> List[Dict[str, torch.Tensor]]:
"""Initialize MEGADecoder states.
Args:
batch_size: Batch size.
Returns:
states: Decoder hidden states. [N x Dict]
"""
return [
{
"ema_state": torch.zeros(
(self.output_size, self.mega_ema_num_heads), device=self.device
),
"prev_key": torch.zeros(
(1, 1, self.mega_att_k_size), device=self.device
),
"prev_value": torch.zeros(
(1, 1, self.mega_att_v_size), device=self.device
),
}
for _ in range(self.num_blocks)
]
def select_state(
self,
states: List[Dict[str, torch.Tensor]],
idx: int,
) -> List[Dict[str, torch.Tensor]]:
"""Select ID state from batch of decoder hidden states.
Args:
states: Decoder hidden states. [N x Dict]
Returns:
: Decoder hidden states for given ID. [N x Dict]
"""
return [
{
"ema_state": states[n_b]["ema_state"][idx],
"prev_key": states[n_b]["prev_key"][idx],
"prev_value": states[n_b]["prev_value"][idx],
}
for n_b in range(self.num_blocks)
]
def stack_qk_states(
self, state_list: List[torch.Tensor], dim: int
) -> List[torch.Tensor]:
"""Stack query or key states with different lengths.
Args:
state_list: List of query or key states.
Returns:
new_state: Query/Key state.
"""
max_len = max([(state.size(0)) for state in state_list])
new_state = torch.zeros((len(state_list), max_len, dim))
for idx, state in enumerate(state_list):
new_state[idx, -state.size(0) :, :] = state
return new_state
def create_batch_states(
self,
new_states: List[List[Dict[str, torch.Tensor]]],
) -> List[Dict[str, torch.Tensor]]:
"""Create batch of decoder hidden states given a list of new states.
Args:
new_states: Decoder hidden states. [B x [N x Dict]]
Returns:
: Decoder hidden states. [N x Dict]
"""
return [
{
"ema_state": torch.stack(
[state[n_b]["ema_state"] for state in new_states]
),
"prev_key": self.stack_qk_states(
[state[n_b]["prev_key"] for state in new_states],
self.mega_att_k_size,
),
"prev_value": self.stack_qk_states(
[state[n_b]["prev_value"] for state in new_states],
self.mega_att_v_size,
),
}
for n_b in range(self.num_blocks)
]
| 11,982 | 31.040107 | 86 | py |
espnet | espnet-master/espnet2/asr_transducer/decoder/rwkv_decoder.py | """RWKV decoder definition for Transducer models."""
import math
from typing import Dict, List, Optional, Tuple
import torch
from typeguard import check_argument_types
from espnet2.asr_transducer.beam_search_transducer import Hypothesis
from espnet2.asr_transducer.decoder.abs_decoder import AbsDecoder
from espnet2.asr_transducer.decoder.blocks.rwkv import RWKV
from espnet2.asr_transducer.normalization import get_normalization
class RWKVDecoder(AbsDecoder):
"""RWKV decoder module.
Based on https://arxiv.org/pdf/2305.13048.pdf.
Args:
vocab_size: Vocabulary size.
block_size: Input/Output size.
context_size: Context size for WKV computation.
linear_size: FeedForward hidden size.
attention_size: SelfAttention hidden size.
normalization_type: Normalization layer type.
normalization_args: Normalization layer arguments.
num_blocks: Number of RWKV blocks.
rescale_every: Whether to rescale input every N blocks (inference only).
embed_dropout_rate: Dropout rate for embedding layer.
att_dropout_rate: Dropout rate for the attention module.
ffn_dropout_rate: Dropout rate for the feed-forward module.
embed_pad: Embedding padding symbol ID.
"""
def __init__(
self,
vocab_size: int,
block_size: int = 512,
context_size: int = 1024,
linear_size: Optional[int] = None,
attention_size: Optional[int] = None,
normalization_type: str = "layer_norm",
normalization_args: Dict = {},
num_blocks: int = 4,
rescale_every: int = 0,
embed_dropout_rate: float = 0.0,
att_dropout_rate: float = 0.0,
ffn_dropout_rate: float = 0.0,
embed_pad: int = 0,
) -> None:
"""Construct a RWKVDecoder object."""
super().__init__()
assert check_argument_types()
norm_class, norm_args = get_normalization(
normalization_type, **normalization_args
)
linear_size = block_size * 4 if linear_size is None else linear_size
attention_size = block_size if attention_size is None else attention_size
self.embed = torch.nn.Embedding(vocab_size, block_size, padding_idx=embed_pad)
self.dropout_embed = torch.nn.Dropout(p=embed_dropout_rate)
self.rwkv_blocks = torch.nn.ModuleList(
[
RWKV(
block_size,
linear_size,
attention_size,
context_size,
block_id,
num_blocks,
normalization_class=norm_class,
normalization_args=norm_args,
att_dropout_rate=att_dropout_rate,
ffn_dropout_rate=ffn_dropout_rate,
)
for block_id in range(num_blocks)
]
)
self.embed_norm = norm_class(block_size, **norm_args)
self.final_norm = norm_class(block_size, **norm_args)
self.block_size = block_size
self.attention_size = attention_size
self.output_size = block_size
self.vocab_size = vocab_size
self.context_size = context_size
self.rescale_every = rescale_every
self.rescaled_layers = False
self.pad_idx = embed_pad
self.num_blocks = num_blocks
self.score_cache = {}
self.device = next(self.parameters()).device
def forward(self, labels: torch.Tensor) -> torch.Tensor:
"""Encode source label sequences.
Args:
labels: Decoder input sequences. (B, L)
Returns:
out: Decoder output sequences. (B, U, D_dec)
"""
batch, length = labels.size()
assert (
length <= self.context_size
), "Context size is too short for current length: %d versus %d" % (
length,
self.context_size,
)
x = self.embed_norm(self.embed(labels))
x = self.dropout_embed(x)
for block in self.rwkv_blocks:
x, _ = block(x)
x = self.final_norm(x)
return x
def inference(
self,
labels: torch.Tensor,
states: torch.Tensor,
) -> Tuple[torch.Tensor, List[torch.Tensor]]:
"""Encode source label sequences.
Args:
labels: Decoder input sequences. (B, L)
states: Decoder hidden states. [5 x (B, D_att/D_dec, N)]
Returns:
out: Decoder output sequences. (B, U, D_dec)
states: Decoder hidden states. [5 x (B, D_att/D_dec, N)]
"""
x = self.embed_norm(self.embed(labels))
for idx, block in enumerate(self.rwkv_blocks):
x, states = block(x, state=states)
if self.rescaled_layers and (idx + 1) % self.rescale_every == 0:
x = x / 2
x = self.final_norm(x)
return x, states
def set_device(self, device: torch.device) -> None:
"""Set GPU device to use.
Args:
device: Device ID.
"""
self.device = device
def score(
self,
label_sequence: List[int],
states: List[torch.Tensor],
) -> Tuple[torch.Tensor, List[torch.Tensor]]:
"""One-step forward hypothesis.
Args:
label_sequence: Current label sequence.
states: Decoder hidden states. [5 x (1, 1, D_att/D_dec, N)]
Returns:
: Decoder output sequence. (D_dec)
states: Decoder hidden states. [5 x (1, 1, D_att/D_dec, N)]
"""
label = torch.full(
(1, 1), label_sequence[-1], dtype=torch.long, device=self.device
)
# (b-flo): FIX ME. Monkey patched for now.
states = self.create_batch_states([states])
out, states = self.inference(label, states)
return out[0], states
def batch_score(
self, hyps: List[Hypothesis]
) -> Tuple[torch.Tensor, List[torch.Tensor]]:
"""One-step forward hypotheses.
Args:
hyps: Hypotheses.
Returns:
out: Decoder output sequence. (B, D_dec)
states: Decoder hidden states. [5 x (B, 1, D_att/D_dec, N)]
"""
labels = torch.tensor(
[[h.yseq[-1]] for h in hyps], dtype=torch.long, device=self.device
)
states = self.create_batch_states([h.dec_state for h in hyps])
out, states = self.inference(labels, states)
return out.squeeze(1), states
def init_state(self, batch_size: int = 1) -> List[torch.Tensor]:
"""Initialize RWKVDecoder states.
Args:
batch_size: Batch size.
Returns:
states: Decoder hidden states. [5 x (B, 1, D_att/D_dec, N)]
"""
hidden_sizes = [
self.attention_size if i > 1 else self.block_size for i in range(5)
]
state = [
torch.zeros(
(batch_size, 1, hidden_sizes[i], self.num_blocks),
dtype=torch.float32,
device=self.device,
)
for i in range(5)
]
state[4] -= 1e-30
return state
def select_state(
self,
states: List[torch.Tensor],
idx: int,
) -> List[torch.Tensor]:
"""Select ID state from batch of decoder hidden states.
Args:
states: Decoder hidden states. [5 x (B, 1, D_att/D_dec, N)]
Returns:
: Decoder hidden states for given ID. [5 x (1, 1, D_att/D_dec, N)]
"""
return [states[i][idx : idx + 1, ...] for i in range(5)]
def create_batch_states(
self,
new_states: List[List[Dict[str, torch.Tensor]]],
) -> List[torch.Tensor]:
"""Create batch of decoder hidden states given a list of new states.
Args:
new_states: Decoder hidden states. [B x [5 x (1, 1, D_att/D_dec, N)]
Returns:
: Decoder hidden states. [5 x (B, 1, D_att/D_dec, N)]
"""
batch_size = len(new_states)
return [
torch.cat([new_states[j][i] for j in range(batch_size)], dim=0)
for i in range(5)
]
| 8,252 | 28.370107 | 86 | py |
espnet | espnet-master/espnet2/asr_transducer/decoder/modules/mega/feed_forward.py | """Normalized position-wise feed-forward module for MEGA block."""
import torch
class NormalizedPositionwiseFeedForward(torch.nn.Module):
"""NormalizedPositionFeedForward module definition.
Args:
size: Input/Output size.
hidden_size: Hidden size.
normalization: Normalization module.
activation: Activation function.
dropout_rate: Dropout rate.
"""
def __init__(
self,
size: int,
hidden_size: int,
normalization: torch.nn.Module = torch.nn.LayerNorm,
activation: torch.nn.Module = torch.nn.ReLU,
dropout_rate: float = 0.0,
) -> None:
"""Construct an NormalizedPositionwiseFeedForward object."""
super().__init__()
self.linear1 = torch.nn.Linear(size, hidden_size)
self.linear2 = torch.nn.Linear(hidden_size, size)
self.normalization = normalization
self.activation = activation
self.dropout = torch.nn.Dropout(p=dropout_rate)
self.hidden_dropout = torch.nn.Dropout(p=dropout_rate)
self.reset_parameters()
def reset_parameters(self, val: float = 0.0, std: float = 0.02) -> None:
"""Reset module parameters.
Args:
val: Initialization value.
std: Standard deviation.
"""
torch.nn.init.normal_(self.linear1.weight, mean=val, std=std)
torch.nn.init.constant_(self.linear1.bias, val)
torch.nn.init.normal_(self.linear2.weight, mean=val, std=std)
torch.nn.init.constant_(self.linear2.bias, val)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Compute feed-forward module.
Args:
x: NormalizedPositionwiseFeedForward input sequences. (B, L, size)
Returns:
x: NormalizedPositionwiseFeedForward output sequences. (B, L, size)
"""
residual = x
x = self.hidden_dropout(self.activation(self.linear1(x)))
x = self.dropout(self.linear2(x))
x = x + residual
x = self.normalization(x)
return x
| 2,076 | 27.452055 | 79 | py |
espnet | espnet-master/espnet2/asr_transducer/decoder/modules/mega/multi_head_damped_ema.py | """Multi-head Damped Exponential Moving Average (EMA) module for MEGA block.
Based/modified from https://github.com/facebookresearch/mega/blob/main/fairseq/modules/moving_average_gated_attention.py
Most variables are renamed according to https://github.com/huggingface/transformers/blob/main/src/transformers/models/mega/modeling_mega.py.
""" # noqa
import math
from typing import Dict, Optional, Tuple, Union
import torch
class MultiHeadDampedEMA(torch.nn.Module):
"""MultiHeadDampedEMA module definition.
Args:
size: Module size.
num_heads: Number of attention heads.
activation: Activation function type.
truncation_length: Maximum length for truncation.
"""
def __init__(
self,
size: int,
num_heads: int = 4,
activation: torch.nn.Module = torch.nn.ReLU(),
truncation_length: Optional[int] = None,
) -> None:
"""Construct an MultiHeadDampedEMA object."""
super().__init__()
self.damping_factor = torch.nn.Parameter(torch.Tensor(size, num_heads, 1))
self.decay_factor = torch.nn.Parameter(torch.Tensor(size, num_heads, 1))
self.ema_expansion_matrix = torch.nn.Parameter(torch.Tensor(size, num_heads, 1))
self.kernel_projection_matrix = torch.nn.Parameter(
torch.Tensor(size, num_heads)
)
self.residual_weight = torch.nn.Parameter(torch.Tensor(size))
self.scaling = math.sqrt(1.0 / num_heads)
self.truncation_length = truncation_length
self.activation = activation
self._kernel = None
self._coeffs = None
self.num_heads = num_heads
self.reset_parameters()
def reset_parameters(
self, val: float = 0.0, std1: float = 0.2, std2: float = 1.0
) -> None:
"""Reset module parameters.
Args:
val: Initialization value.
std1: Main standard deviation.
std2: Secondary standard deviation.
"""
with torch.no_grad():
torch.nn.init.normal_(self.damping_factor, mean=val, std=std1)
torch.nn.init.normal_(self.decay_factor, mean=val, std=std1)
ema_exp_val = torch.ones(self.num_heads, 1)
if self.num_heads > 1:
idx = torch.tensor(list(range(1, self.num_heads, 2)))
ema_exp_val.index_fill_(0, idx, -1.0)
self.ema_expansion_matrix.normal_(mean=val, std=0.02).add_(ema_exp_val)
torch.nn.init.normal_(self.kernel_projection_matrix, mean=val, std=std2)
torch.nn.init.normal_(self.residual_weight, mean=val, std=std2)
def compute_ema_coefficients(self) -> Tuple[torch.Tensor, torch.Tensor]:
"""Compute EMA coefficients.
Args:
None
Returns:
damping_factor: Damping factor / P-th order coefficient.
(size, num_heads, 1)
prev_timestep_weight: Previous timestep weight / Q-th order coefficient.
(size, num_heads, 1)
"""
self._coeffs = None
damping_factor = torch.sigmoid(self.damping_factor)
decay_factor = torch.sigmoid(self.decay_factor)
prev_timestep_weight = 1.0 - damping_factor * decay_factor
return damping_factor, prev_timestep_weight
def compute_ema_kernel(self, length: int) -> torch.Tensor:
"""Compute EMA kernel / vandermonde product.
Args:
length: Sequence length.
Returns:
: EMA kernel / Vandermonde product. (size, L)
"""
self._kernel = None
damping_factor, prev_timestep_weight = self.compute_ema_coefficients()
vander = torch.arange(length).to(damping_factor).view(1, 1, length) * torch.log(
prev_timestep_weight
)
kernel = (damping_factor * self.ema_expansion_matrix) * torch.exp(vander)
return torch.einsum(
"dnl, dn -> dl", kernel, self.kernel_projection_matrix * self.scaling
)
def get_ema_coefficients(self) -> Tuple[torch.Tensor, torch.Tensor]:
"""Get EMA coefficients.
Args:
None
Returns:
: Damping factor / P-th order coefficient. (size, num_heads, 1)
: Previous timestep weight / Q-th order coefficient. (size, num_heads, 1)
"""
if self._coeffs is None:
self._coeffs = self.compute_ema_coefficients()
return self._coeffs
def ema_one_step(
self, x: torch.Tensor, state: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Perform exponential moving average for a single step.
Args:
x: MultiHeadDampedEMA input sequences. (B, D, 1)
state: MultiHeadDampedEMA state. (B, D, num_heads)
Returns:
out: MultiHeadDamped output sequences. (B, 1, D)
new_state: MultiHeadDampedEMA state. (B, D, num_heads)
"""
damping_factor, prev_timestep_weight = self.get_ema_coefficients()
new_state = (damping_factor * self.ema_expansion_matrix).squeeze(-1) * x
if state is not None:
new_state = new_state + prev_timestep_weight.squeeze(-1) * state
out = torch.einsum(
"bdn, dn -> bd", new_state, self.kernel_projection_matrix * self.scaling
)
return out.unsqueeze(0), new_state
def forward(
self,
x: torch.Tensor,
mask: Optional[torch.Tensor] = None,
state: Optional[Dict[str, torch.Tensor]] = None,
) -> Union[torch.Tensor, Optional[torch.Tensor]]:
"""Compute multi-dimensional damped EMA.
Args:
x: MultiHeadDampedEMA input sequence. (L, B, D)
mask: Sequence mask. (B, 1, L)
state: MultiHeadDampedEMA state. (B, D, num_heads)
Returns:
x: MultiHeadDampedEMA output sequence. (B, L, D)
new_state: MultiHeadDampedEMA state. (B, D, num_heads)
"""
length = x.size(0)
residual = x * self.residual_weight
x = x.permute(1, 2, 0)
if mask is not None:
x = x.masked_fill(mask, 0.0)
if state is not None:
ema_output, new_state = self.ema_one_step(x, state=state["ema_state"])
ema_output = self.activation(ema_output + residual)
return ema_output, new_state
kernel = self.compute_ema_kernel(
length
if self.truncation_length is None
else min(self.truncation_length, length)
)
input_fft = torch.fft.rfft(x.float(), n=(2 * length))
kernel_fft = torch.fft.rfft(kernel.float(), n=(2 * length))
ema_output = torch.fft.irfft((input_fft * kernel_fft), n=(2 * length))[
..., :length
]
ema_output = ema_output.type_as(x)
ema_output = self.activation(ema_output.permute(2, 0, 1) + residual)
return ema_output, None
| 7,009 | 30.576577 | 140 | py |
espnet | espnet-master/espnet2/asr_transducer/decoder/modules/mega/positional_bias.py | """Positional bias related modules.
Based/modified from https://github.com/facebookresearch/mega/blob/main/fairseq/modules/relative_positional_bias.py
""" # noqa
import math
from typing import Tuple
import torch
class RelativePositionBias(torch.nn.Module):
"""RelativePositionBias module definition.
Args:
max_positions: Maximum number of relative positions.
"""
def __init__(self, max_positions: int) -> None:
"""Construct a RelativePositionBias object."""
super().__init__()
self.max_positions = max_positions
self.relative_position_bias = torch.nn.Parameter(
torch.Tensor(2 * self.max_positions - 1)
)
self.reset_parameters()
def reset_parameters(self, val: float = 0.0, std: float = 0.02) -> None:
"""Reset module parameters.
Args:
val: Initialization value.
std: Standard deviation.
"""
torch.nn.init.normal_(self.relative_position_bias, mean=val, std=std)
def forward(self, length: int) -> torch.Tensor:
"""Compute relative position bias.
Args:
length: Sequence length.
Returns:
tile: Relative position bias. (L, L)
"""
if length > self.max_positions:
raise ValueError(
f"Length {length} is too long for the maximum number of "
f"allowed positions {self.max_positions}."
)
bias = self.relative_position_bias[
(self.max_positions - length) : (self.max_positions + length - 1)
]
bias = torch.nn.functional.pad(bias, (0, length))
tile = torch.tile(bias, (length,))[:-length]
tile = tile.view(length, (3 * length - 2))
start = (2 * length - 1) // 2
end = tile.size(1) - start
tile = tile[:, start:end]
return tile
class RotaryRelativePositionBias(torch.nn.Module):
"""RotaryRelativePositionBias module definition.
Args:
size: Module embedding size.
max_positions: Maximum number of relative positions.
"""
def __init__(self, size: int, max_positions: int = 2048) -> None:
"""Construct a RotaryRelativePositionBias object."""
super().__init__()
self.sine, self.cosine = RotaryRelativePositionBias.get_sinusoid_embeddings(
max_positions, size
)
self.alpha = torch.nn.Parameter(torch.Tensor(1, size))
self.beta = torch.nn.Parameter(torch.Tensor(1, size))
self.register_buffer("_pe", torch.FloatTensor(1))
self.size = size
self.max_positions = max_positions
self.reset_parameters()
def reset_parameters(self, val: float = 0.0, std: float = 0.02) -> None:
"""Reset module parameters.
Args:
val: Initialization value.
std: Standard deviation.
"""
torch.nn.init.normal_(self.alpha, mean=val, std=std)
torch.nn.init.normal_(self.beta, mean=val, std=std)
@staticmethod
def get_sinusoid_embeddings(
max_positions: int,
size: int,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Compute sinusoidal positional embeddings.
Args:
max_positions: Maximum number of positions.
size: Input size.
Returns:
: Sine elements. (max_positions, size // 2)
: Cos elements. (max_positions, size // 2)
"""
half_size = size // 2
emb = math.log(10000) / half_size
emb = torch.exp(torch.arange(half_size, dtype=torch.float) * -emb)
emb = torch.arange(max_positions, dtype=torch.float).unsqueeze(
1
) * emb.unsqueeze(0)
return torch.sin(emb), torch.cos(emb)
def rotary(self, x: torch.Tensor) -> torch.Tensor:
"""Compute rotary positional embeddings.
Args:
x: Input sequence. (L, size)
Returns:
x: Rotary positional embeddings. (L, size)
"""
length, dim = x.size()
x1, x2 = torch.chunk(x, 2, dim=-1)
if self.sine is None or length > self.sine.size(0):
self.sine, self.cosine = RotaryRelativePositionBias.get_sinusoid_embeddings(
length, dim
)
self.max_positions = length
self.sine = self.sine.to(self._pe)
self.cosine = self.cosine.to(self._pe)
sin = self.sine[:length]
cos = self.cosine[:length]
x = torch.cat([x1 * cos - x2 * sin, x2 * cos + x1 * sin], dim=1)
return x
def forward(self, length: int) -> torch.Tensor:
"""Compute rotary relative position bias.
Args:
length: Sequence length.
Returns:
bias: Rotary relative position bias. (L, L)
"""
alpha = self.rotary(self.alpha.expand(length, self.size))
beta = self.rotary(self.beta.expand(length, self.size))
bias = torch.einsum("mk, nk -> mn", alpha, beta)
return bias
| 5,037 | 26.232432 | 114 | py |
espnet | espnet-master/espnet2/asr_transducer/decoder/modules/rwkv/feed_forward.py | """Feed-forward (channel mixing) module for RWKV block.
Based/Modified from https://github.com/BlinkDL/RWKV-LM/blob/main/RWKV-v4/src/model.py
Some variables are renamed according to https://github.com/huggingface/transformers/blob/main/src/transformers/models/rwkv/modeling_rwkv.py.
""" # noqa
from typing import List, Optional, Tuple
import torch
class FeedForward(torch.nn.Module):
"""FeedForward module definition.
Args:
size: Input/Output size.
hidden_size: Hidden size.
block_id: Block index.
num_blocks: Number of blocks in the architecture.
"""
def __init__(
self, size: int, hidden_size: int, block_id: int, num_blocks: int
) -> None:
"""Construct a FeedForward object."""
super().__init__()
self.time_shift = torch.nn.ZeroPad2d((0, 0, 1, -1))
self.time_mix_key = torch.nn.Parameter(torch.empty(1, 1, size))
self.time_mix_receptance = torch.nn.Parameter(torch.empty(1, 1, size))
self.proj_key = torch.nn.Linear(size, hidden_size, bias=True)
self.proj_value = torch.nn.Linear(hidden_size, size, bias=True)
self.proj_receptance = torch.nn.Linear(size, size, bias=True)
self.block_id = block_id
self.reset_parameters(size, block_id, num_blocks)
def reset_parameters(self, size: int, block_id: int, num_blocks: int) -> None:
"""Reset module parameters.
Args:
size: Block size.
block_id: Block index.
num_blocks: Number of blocks in the architecture.
"""
ratio_1_to_almost0 = 1.0 - (block_id / num_blocks)
time_weight = torch.ones(1, 1, size)
for i in range(size):
time_weight[0, 0, i] = i / size
with torch.no_grad():
self.time_mix_key.data = torch.pow(time_weight, ratio_1_to_almost0)
self.time_mix_receptance.data = torch.pow(time_weight, ratio_1_to_almost0)
def forward(
self, x: torch.Tensor, state: Optional[List[torch.Tensor]] = None
) -> Tuple[torch.Tensor, Optional[List[torch.Tensor]]]:
"""Compute channel mixing.
Args:
x: FeedForward input sequences. (B, U, size)
state: Decoder hidden state. [5 x (B, 1, size, N)]
Returns:
x: FeedForward output sequences. (B, U, size)
state: Decoder hidden state. [5 x (B, 1, size, N)]
"""
shifted_x = (
self.time_shift(x) if state is None else state[0][..., self.block_id]
)
key = x * self.time_mix_key + shifted_x * (1 - self.time_mix_key)
receptance = x * self.time_mix_receptance + shifted_x * (
1 - self.time_mix_receptance
)
key = torch.square(torch.relu(self.proj_key(key)))
value = self.proj_value(key)
receptance = torch.sigmoid(self.proj_receptance(receptance))
if state is not None:
state[0][..., self.block_id] = x
x = receptance * value
return x, state
| 3,038 | 30.329897 | 140 | py |
espnet | espnet-master/espnet2/asr_transducer/decoder/modules/rwkv/attention.py | """Attention (time mixing) modules for RWKV block.
Based/Modified from https://github.com/BlinkDL/RWKV-LM/blob/main/RWKV-v4/src/model.py.
Some variables are renamed according to https://github.com/huggingface/transformers/blob/main/src/transformers/models/rwkv/modeling_rwkv.py.
""" # noqa
import math
from importlib.util import find_spec
from pathlib import Path
from typing import List, Optional, Tuple, Union
import torch
wkv_kernel = None
class WKVLinearAttention(torch.autograd.Function):
"""WKVLinearAttention function definition."""
@staticmethod
def forward(
ctx,
time_decay: torch.Tensor,
time_first: torch.Tensor,
key: torch.Tensor,
value: torch.tensor,
) -> torch.Tensor:
"""WKVLinearAttention function forward pass.
Args:
time_decay: Channel-wise time decay vector. (D_att)
time_first: Channel-wise time first vector. (D_att)
key: Key tensor. (B, U, D_att)
value: Value tensor. (B, U, D_att)
Returns:
out: Weighted Key-Value tensor. (B, U, D_att)
"""
batch, length, dim = key.size()
assert length <= wkv_kernel.context_size, (
f"Cannot process key of length {length} while context_size "
f"is ({wkv_kernel.context_size}). Limit should be increased."
)
assert batch * dim % min(dim, 32) == 0, (
f"batch size ({batch}) by dimension ({dim}) should be a multiple of "
f"{min(dim, 32)}"
)
ctx.input_dtype = key.dtype
time_decay = -torch.exp(time_decay.contiguous())
time_first = time_first.contiguous()
key = key.contiguous()
value = value.contiguous()
out = torch.empty_like(key, memory_format=torch.contiguous_format)
wkv_kernel.forward(time_decay, time_first, key, value, out)
ctx.save_for_backward(time_decay, time_first, key, value, out)
return out
@staticmethod
def backward(
ctx, grad_output: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""WKVLinearAttention function backward pass.
Args:
grad_output: Output gradient. (B, U, D_att)
Returns:
grad_time_decay: Gradient for channel-wise time decay vector. (D_att)
grad_time_first: Gradient for channel-wise time first vector. (D_att)
grad_key: Gradient for key tensor. (B, U, D_att)
grad_value: Gradient for value tensor. (B, U, D_att)
"""
time_decay, time_first, key, value, output = ctx.saved_tensors
grad_dtype = ctx.input_dtype
batch, _, dim = key.size()
grad_time_decay = torch.empty(
(batch, dim),
memory_format=torch.contiguous_format,
dtype=time_decay.dtype,
device=time_decay.device,
)
grad_time_first = torch.empty(
(batch, dim),
memory_format=torch.contiguous_format,
dtype=time_decay.dtype,
device=time_decay.device,
)
grad_key = torch.empty_like(key, memory_format=torch.contiguous_format)
grad_value = torch.empty_like(value, memory_format=torch.contiguous_format)
wkv_kernel.backward(
time_decay,
time_first,
key,
value,
output,
grad_output.contiguous(),
grad_time_decay,
grad_time_first,
grad_key,
grad_value,
)
grad_time_decay = torch.sum(grad_time_decay, dim=0)
grad_time_first = torch.sum(grad_time_first, dim=0)
return (
grad_time_decay,
grad_time_first,
grad_key,
grad_value,
)
def load_wkv_kernel(context_size: int) -> None:
"""Load WKV CUDA kernel.
Args:
context_size: Context size.
"""
from torch.utils.cpp_extension import load
global wkv_kernel
if wkv_kernel is not None and wkv_kernel.context_size == context_size:
return
if find_spec("ninja") is None:
raise ImportError(
"Ninja package was not found. WKV kernel module can't be loaded "
"for training. Please, 'pip install ninja' in your environment."
)
if not torch.cuda.is_available():
raise ImportError(
"CUDA is currently a requirement for WKV kernel loading. "
"Please set your devices properly and launch again."
)
kernel_folder = Path(__file__).resolve().parent / "cuda"
kernel_files = [kernel_folder / f for f in ["wkv_op.cpp", "wkv_cuda.cu"]]
kernel_cflags = [
"-t 4",
"-std=c++17",
"-res-usage",
"--maxrregcount 60",
"--use_fast_math",
"-O3",
"-Xptxas -O3",
"--extra-device-vectorization",
f"-DTmax={context_size}",
]
wkv_kernel = load(
name=f"wkv_{context_size}",
sources=kernel_files,
verbose=False,
extra_cuda_cflags=kernel_cflags,
)
wkv_kernel.context_size = context_size
class SelfAttention(torch.nn.Module):
"""SelfAttention module definition.
Args:
size: Input/Output size.
attention_size: Attention hidden size.
context_size: Context size for WKV kernel.
block_id: Block index.
num_blocks: Number of blocks in the architecture.
"""
def __init__(
self,
size: int,
attention_size: int,
context_size: int,
block_id: int,
num_blocks: int,
) -> None:
"""Construct a SelfAttention object."""
super().__init__()
load_wkv_kernel(context_size)
self.time_shift = torch.nn.ZeroPad2d((0, 0, 1, -1))
self.time_decay = torch.nn.Parameter(torch.empty(attention_size))
self.time_first = torch.nn.Parameter(torch.empty(attention_size))
self.time_mix_key = torch.nn.Parameter(torch.empty(1, 1, size))
self.time_mix_value = torch.nn.Parameter(torch.empty(1, 1, size))
self.time_mix_receptance = torch.nn.Parameter(torch.empty(1, 1, size))
self.proj_key = torch.nn.Linear(size, attention_size, bias=True)
self.proj_value = torch.nn.Linear(size, attention_size, bias=True)
self.proj_receptance = torch.nn.Linear(size, attention_size, bias=True)
self.proj_output = torch.nn.Linear(attention_size, size, bias=True)
self.block_id = block_id
self.reset_parameters(size, attention_size, block_id, num_blocks)
def reset_parameters(
self, size: int, attention_size: int, block_id: int, num_blocks: int
) -> None:
"""Reset module parameters.
Args:
size: Block size.
attention_size: Attention hidden size.
block_id: Block index.
num_blocks: Number of blocks in the architecture.
"""
ratio_0_to_1 = block_id / (num_blocks - 1)
ratio_1_to_almost0 = 1.0 - (block_id / num_blocks)
time_weight = torch.ones(1, 1, size)
for i in range(size):
time_weight[0, 0, i] = i / size
decay_speed = [
-5 + 8 * (h / (attention_size - 1)) ** (0.7 + 1.3 * ratio_0_to_1)
for h in range(attention_size)
]
decay_speed = torch.tensor(
decay_speed, dtype=self.time_decay.dtype, device=self.time_decay.device
)
zigzag = (
torch.tensor(
[(i + 1) % 3 - 1 for i in range(attention_size)],
dtype=self.time_first.dtype,
device=self.time_first.device,
)
* 0.5
)
with torch.no_grad():
self.time_decay.data = decay_speed
self.time_first.data = torch.ones_like(
self.time_first * math.log(0.3) + zigzag
)
self.time_mix_key.data = torch.pow(time_weight, ratio_1_to_almost0)
self.time_mix_value.data = (
torch.pow(time_weight, ratio_1_to_almost0) + 0.3 * ratio_0_to_1
)
self.time_mix_receptance.data = torch.pow(
time_weight, 0.5 * ratio_1_to_almost0
)
@torch.no_grad()
def wkv_linear_attention(
self,
time_decay: torch.Tensor,
time_first: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
state: Tuple[torch.Tensor, torch.Tensor, torch.Tensor],
) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
"""Compute WKV with state (i.e.: for inference).
Args:
time_decay: Channel-wise time decay vector. (D_att)
time_first: Channel-wise time first vector. (D_att)
key: Key tensor. (B, 1, D_att)
value: Value tensor. (B, 1, D_att)
state: Decoder hidden states. [3 x (B, D_att)]
Returns:
output: Weighted Key-Value. (B, 1, D_att)
state: Decoder hidden states. [3 x (B, 1, D_att)]
"""
num_state, den_state, max_state = state
max_for_output = torch.maximum(max_state, (time_first + key))
e1 = torch.exp(max_state - max_for_output)
e2 = torch.exp((time_first + key) - max_for_output)
numerator = e1 * num_state + e2 * value
denominator = e1 * den_state + e2
max_for_state = torch.maximum(key, (max_state + time_decay))
e1 = torch.exp((max_state + time_decay) - max_for_state)
e2 = torch.exp(key - max_for_state)
wkv = numerator / denominator
state = [e1 * num_state + e2 * value, e1 * den_state + e2, max_for_state]
return wkv, state
def forward(
self,
x: torch.Tensor,
state: Optional[List[torch.Tensor]] = None,
) -> Tuple[torch.Tensor, Optional[List[torch.Tensor]]]:
"""Compute time mixing.
Args:
x: SelfAttention input sequences. (B, U, size)
state: Decoder hidden states. [5 x (B, 1, D_att, N)]
Returns:
x: SelfAttention output sequences. (B, U, size)
"""
shifted_x = (
self.time_shift(x) if state is None else state[1][..., self.block_id]
)
key = x * self.time_mix_key + shifted_x * (1 - self.time_mix_key)
value = x * self.time_mix_value + shifted_x * (1 - self.time_mix_value)
receptance = x * self.time_mix_receptance + shifted_x * (
1 - self.time_mix_receptance
)
key = self.proj_key(key)
value = self.proj_value(value)
receptance = torch.sigmoid(self.proj_receptance(receptance))
if state is not None:
state[1][..., self.block_id] = x
wkv, att_state = self.wkv_linear_attention(
self.time_decay,
self.time_first,
key,
value,
tuple(s[..., self.block_id] for s in state[2:]),
)
state[2][..., self.block_id] = att_state[0]
state[3][..., self.block_id] = att_state[1]
state[4][..., self.block_id] = att_state[2]
else:
wkv = WKVLinearAttention.apply(self.time_decay, self.time_first, key, value)
x = self.proj_output(receptance * wkv)
return x, state
| 11,396 | 29.886179 | 140 | py |
espnet | espnet-master/espnet2/asr_transducer/decoder/blocks/rwkv.py | """Receptance Weighted Key Value (RWKV) block definition.
Based/modified from https://github.com/BlinkDL/RWKV-LM/blob/main/RWKV-v4/src/model.py
"""
from typing import Dict, Optional, Tuple
import torch
from espnet2.asr_transducer.decoder.modules.rwkv.attention import SelfAttention
from espnet2.asr_transducer.decoder.modules.rwkv.feed_forward import FeedForward
class RWKV(torch.nn.Module):
"""RWKV module.
Args:
size: Input/Output size.
linear_size: Feed-forward hidden size.
attention_size: SelfAttention hidden size.
context_size: Context size for WKV computation.
block_id: Block index.
num_blocks: Number of blocks in the architecture.
normalization_class: Normalization layer class.
normalization_args: Normalization layer arguments.
att_dropout_rate: Dropout rate for the attention module.
ffn_dropout_rate: Dropout rate for the feed-forward module.
"""
def __init__(
self,
size: int,
linear_size: int,
attention_size: int,
context_size: int,
block_id: int,
num_blocks: int,
normalization_class: torch.nn.Module = torch.nn.LayerNorm,
normalization_args: Dict = {},
att_dropout_rate: float = 0.0,
ffn_dropout_rate: float = 0.0,
) -> None:
"""Construct a RWKV object."""
super().__init__()
self.layer_norm_att = normalization_class(size, **normalization_args)
self.layer_norm_ffn = normalization_class(size, **normalization_args)
self.att = SelfAttention(
size, attention_size, context_size, block_id, num_blocks
)
self.dropout_att = torch.nn.Dropout(p=att_dropout_rate)
self.ffn = FeedForward(size, linear_size, block_id, num_blocks)
self.dropout_ffn = torch.nn.Dropout(p=ffn_dropout_rate)
def forward(
self,
x: torch.Tensor,
state: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Compute receptance weighted key value.
Args:
x: RWKV input sequences. (B, L, size)
state: Decoder hidden states. [5 x (B, D_att/size, N)]
Returns:
x: RWKV output sequences. (B, L, size)
x: Decoder hidden states. [5 x (B, D_att/size, N)]
"""
att, state = self.att(self.layer_norm_att(x), state=state)
x = x + self.dropout_att(att)
ffn, state = self.ffn(self.layer_norm_ffn(x), state=state)
x = x + self.dropout_ffn(ffn)
return x, state
| 2,602 | 30.743902 | 85 | py |
espnet | espnet-master/espnet2/asr_transducer/decoder/blocks/mega.py | """Moving Average Equipped Gated Attention (MEGA) block definition.
Based/modified from https://github.com/facebookresearch/mega/blob/main/fairseq/modules/moving_average_gated_attention.py
Most variables are renamed according to https://github.com/huggingface/transformers/blob/main/src/transformers/models/mega/modeling_mega.py.
""" # noqa
from typing import Dict, Optional, Tuple
import torch
from espnet2.asr_transducer.decoder.modules.mega.multi_head_damped_ema import (
MultiHeadDampedEMA,
)
from espnet2.asr_transducer.decoder.modules.mega.positional_bias import (
RelativePositionBias,
RotaryRelativePositionBias,
)
class MEGA(torch.nn.Module):
"""MEGA module.
Args:
size: Input/Output size.
num_heads: Number of EMA heads.
qk_size: Shared query and key size for attention module.
v_size: Value size for attention module.
qk_v_size: (QK, V) sizes for attention module.
activation: Activation function type.
normalization: Normalization module.
rel_pos_bias_type: Type of relative position bias in attention module.
max_positions: Maximum number of position for RelativePositionBias.
truncation_length: Maximum length for truncation in EMA module.
chunk_size: Chunk size for attention computation (-1 = full context).
dropout_rate: Dropout rate for inner modules.
att_dropout_rate: Dropout rate for the attention module.
ema_dropout_rate: Dropout rate for the EMA module.
"""
def __init__(
self,
size: int = 512,
num_heads: int = 4,
qk_size: int = 128,
v_size: int = 1024,
activation: torch.nn.Module = torch.nn.ReLU(),
normalization: torch.nn.Module = torch.nn.LayerNorm,
rel_pos_bias_type: str = "simple",
max_positions: int = 2048,
truncation_length: Optional[int] = None,
chunk_size: int = -1,
dropout_rate: float = 0.0,
att_dropout_rate: float = 0.0,
ema_dropout_rate: float = 0.0,
) -> None:
"""Construct a MEGA object."""
super().__init__()
self.multihead_damped_ema = MultiHeadDampedEMA(
size,
num_heads=num_heads,
activation=activation,
truncation_length=truncation_length,
)
if chunk_size > 0:
max_positions = chunk_size
if rel_pos_bias_type == "rotary":
self.rel_pos_bias = RotaryRelativePositionBias(qk_size, max_positions)
elif rel_pos_bias_type == "simple":
self.rel_pos_bias = RelativePositionBias(max_positions)
else:
raise ValueError(
"Only 'rotary' and 'simple' are valid values for rel_pos_bias_type"
)
self.proj_v = torch.nn.Linear(size, v_size)
self.proj_mx = torch.nn.Linear(size, qk_size + v_size + 2 * size)
self.proj_h = torch.nn.Linear(v_size, size)
self.qk_weight = torch.nn.Parameter(torch.Tensor(2, qk_size))
self.qk_bias = torch.nn.Parameter(torch.Tensor(2, qk_size))
self.scaling = qk_size**-0.5
self.activation = activation
self.normalization = normalization
self.dropout = torch.nn.Dropout(p=dropout_rate)
self.dropout_attn = torch.nn.Dropout(p=att_dropout_rate)
self.dropout_ema = torch.nn.Dropout(p=ema_dropout_rate)
self.qk_size = qk_size
self.v_size = v_size
self.size = size
self.chunk_size = chunk_size
self.reset_parameters()
def reset_parameters(self, val: int = 0.0, std: int = 0.02) -> None:
"""Reset module parameters.
Args:
val: Initialization value.
std: Standard deviation.
"""
torch.nn.init.normal_(self.proj_v.weight, mean=val, std=std)
torch.nn.init.constant_(self.proj_v.bias, val)
torch.nn.init.normal_(self.proj_mx.weight, mean=val, std=std)
torch.nn.init.constant_(self.proj_mx.bias, val)
torch.nn.init.normal_(self.proj_h.weight, mean=val, std=std)
torch.nn.init.constant_(self.proj_h.bias, val)
torch.nn.init.normal_(self.qk_weight, mean=val, std=std)
torch.nn.init.constant_(self.qk_bias, val)
def softmax_attention(
self,
query: torch.Tensor,
key: torch.Tensor,
mask: Optional[torch.Tensor] = None,
attn_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""Compute attention weights with softmax.
Args:
query: Query tensor. (B, 1, L, D)
key: Key tensor. (B, 1, L, D)
mask: Sequence mask. (B, 1, L)
attn_mask: Attention mask. (1, L, L)
Returns:
attn_weights: Attention weights. (B, 1, L, L)
"""
length = key.size(2)
bias = self.rel_pos_bias(length)
if length != query.size(2):
bias = bias[-1:]
query = query * self.scaling
qk = torch.matmul(query, key.transpose(2, 3)) + bias
if attn_mask is not None:
qk = qk.masked_fill(attn_mask.unsqueeze(1), float("-inf"))
if mask is not None:
mask_all = mask.all(dim=-1, keepdim=True)
mask = torch.logical_and(mask, ~mask_all)
qk = qk.masked_fill(mask.unsqueeze(2), float("-inf"))
attn_weights = torch.softmax(qk, dim=-1, dtype=torch.float32).type_as(qk)
return attn_weights
def forward(
self,
x: torch.Tensor,
mask: Optional[torch.Tensor] = None,
attn_mask: Optional[torch.Tensor] = None,
state: Optional[Dict[str, Optional[torch.Tensor]]] = None,
) -> Tuple[torch.Tensor, Optional[Dict[str, Optional[torch.Tensor]]]]:
"""Compute moving average equiped gated attention.
Args:
x: MEGA input sequences. (L, B, size)
mask: MEGA input sequence masks. (B, 1, L)
attn_mask: MEGA attention mask. (1, L, L)
state: Decoder hidden states.
Returns:
x: MEGA output sequences. (B, L, size)
state: Decoder hidden states.
"""
length, batch, size = x.size()
residual = x
value = self.activation(self.proj_v(x))
ema_output, ema_state = self.multihead_damped_ema(x, mask=mask, state=state)
ema_output = self.dropout_ema(ema_output)
base = self.proj_mx(ema_output)
residual_weight, qk_gates, intermediate_state = torch.split(
base, [self.size, self.qk_size + self.v_size, self.size], dim=-1
)
residual_weight = torch.sigmoid(residual_weight)
qk, att_gate = torch.split(
self.activation(qk_gates), [self.qk_size, self.v_size], dim=-1
)
qk = qk.unsqueeze(2) * self.qk_weight + self.qk_bias
query, key = torch.unbind(qk, dim=2)
query = query.transpose(0, 1)
key = key.transpose(0, 1)
value = value.transpose(0, 1)
if state is not None:
if state["prev_key"] is not None:
key = torch.cat([state["prev_key"], key], dim=1)
if state["prev_value"] is not None:
value = torch.cat([state["prev_value"], value], dim=1)
if self.chunk_size > 0 and (key.size(1) % self.chunk_size) == 0:
# (b-flo): In the original version, the Q and K states are deleted when
# reaching chunk_size (i.e. set to None). It's an issue for beam-batched
# decoding algorithms where we stack states of different lengths/paths.
# Until revision, we keep the last predicted Q and K instead.
state = {
"prev_key": key[:, -1:, :],
"prev_value": value[:, -1:, :],
"ema_state": ema_state,
}
else:
state = {"prev_key": key, "prev_value": value, "ema_state": ema_state}
if self.chunk_size <= 0:
query = query.unsqueeze(1)
key = key.unsqueeze(1)
value = value.unsqueeze(1)
else:
ctx_size = key.size(1)
if length < self.chunk_size:
query = query.unsqueeze(1)
else:
num_chunks = length // self.chunk_size
query = query.reshape(batch, num_chunks, self.chunk_size, self.qk_size)
if ctx_size < self.chunk_size:
key = key.unsqueeze(1)
value = value.unsqueeze(1)
else:
num_chunks = ctx_size // self.chunk_size
key = key.reshape(batch, num_chunks, self.chunk_size, self.qk_size)
value = value.reshape(batch, num_chunks, self.chunk_size, self.v_size)
if mask is not None:
mask = mask.view(batch, num_chunks, self.chunk_size)
attn_weights = self.softmax_attention(
query, key, mask=mask, attn_mask=attn_mask
)
value = self.dropout(value)
kernel = self.dropout_attn(attn_weights)
weighted_self_out = (
torch.matmul(kernel, value).view(batch, length, self.v_size).transpose(0, 1)
)
weighted_self_out = self.dropout(
self.activation(
intermediate_state + self.proj_h(weighted_self_out * att_gate)
)
)
x = torch.addcmul(residual, residual_weight, weighted_self_out - residual)
x = self.normalization(x)
return x, state
| 9,571 | 32.704225 | 140 | py |
espnet | espnet-master/espnet2/schedulers/noam_lr.py | """Noam learning rate scheduler module."""
import warnings
from typing import Union
import torch
from torch.optim.lr_scheduler import _LRScheduler
from typeguard import check_argument_types
from espnet2.schedulers.abs_scheduler import AbsBatchStepScheduler
class NoamLR(_LRScheduler, AbsBatchStepScheduler):
"""The LR scheduler proposed by Noam
Ref:
"Attention Is All You Need", https://arxiv.org/pdf/1706.03762.pdf
FIXME(kamo): PyTorch doesn't provide _LRScheduler as public class,
thus the behaviour isn't guaranteed at forward PyTorch version.
NOTE(kamo): The "model_size" in original implementation is derived from
the model, but in this implementation, this parameter is a constant value.
You need to change it if the model is changed.
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
model_size: Union[int, float] = 320,
warmup_steps: Union[int, float] = 25000,
last_epoch: int = -1,
):
assert check_argument_types()
self.model_size = model_size
self.warmup_steps = warmup_steps
lr = list(optimizer.param_groups)[0]["lr"]
new_lr = self.lr_for_WarmupLR(lr)
warnings.warn(
f"NoamLR is deprecated. "
f"Use WarmupLR(warmup_steps={warmup_steps}) with Optimizer(lr={new_lr})",
)
# __init__() must be invoked before setting field
# because step() is also invoked in __init__()
super().__init__(optimizer, last_epoch)
def lr_for_WarmupLR(self, lr: float) -> float:
return lr / self.model_size**0.5 / self.warmup_steps**0.5
def __repr__(self):
return (
f"{self.__class__.__name__}(model_size={self.model_size}, "
f"warmup_steps={self.warmup_steps})"
)
def get_lr(self):
step_num = self.last_epoch + 1
return [
lr
* self.model_size**-0.5
* min(step_num**-0.5, step_num * self.warmup_steps**-1.5)
for lr in self.base_lrs
]
| 2,068 | 30.348485 | 85 | py |
espnet | espnet-master/espnet2/schedulers/abs_scheduler.py | from abc import ABC, abstractmethod
import torch.optim.lr_scheduler as L
class AbsScheduler(ABC):
@abstractmethod
def step(self, epoch: int = None):
pass
@abstractmethod
def state_dict(self):
pass
@abstractmethod
def load_state_dict(self, state):
pass
# If you need to define custom scheduler, please inherit these classes
class AbsBatchStepScheduler(AbsScheduler):
@abstractmethod
def step(self, epoch: int = None):
pass
@abstractmethod
def state_dict(self):
pass
@abstractmethod
def load_state_dict(self, state):
pass
class AbsEpochStepScheduler(AbsScheduler):
@abstractmethod
def step(self, epoch: int = None):
pass
@abstractmethod
def state_dict(self):
pass
@abstractmethod
def load_state_dict(self, state):
pass
class AbsValEpochStepScheduler(AbsEpochStepScheduler):
@abstractmethod
def step(self, val, epoch: int = None):
pass
@abstractmethod
def state_dict(self):
pass
@abstractmethod
def load_state_dict(self, state):
pass
# Create alias type to check the type
# Note(kamo): Currently PyTorch doesn't provide the base class
# to judge these classes.
AbsValEpochStepScheduler.register(L.ReduceLROnPlateau)
for s in [
L.ReduceLROnPlateau,
L.LambdaLR,
L.StepLR,
L.MultiStepLR,
L.MultiStepLR,
L.ExponentialLR,
L.CosineAnnealingLR,
]:
AbsEpochStepScheduler.register(s)
AbsBatchStepScheduler.register(L.CyclicLR)
for s in [
L.OneCycleLR,
L.CosineAnnealingWarmRestarts,
]:
AbsBatchStepScheduler.register(s)
| 1,664 | 18.821429 | 70 | py |
espnet | espnet-master/espnet2/schedulers/warmup_reducelronplateau.py | """ReduceLROnPlateau (with Warm up) learning rate scheduler module."""
from typing import Union
import torch
from torch import inf
from typeguard import check_argument_types
from espnet2.schedulers.abs_scheduler import (
AbsBatchStepScheduler,
AbsValEpochStepScheduler,
)
class WarmupReduceLROnPlateau(AbsBatchStepScheduler, AbsValEpochStepScheduler):
"""The WarmupReduceLROnPlateau scheduler.
This scheduler is the combination of WarmupLR and ReduceLROnPlateau:
WarmupLR:
lr = optimizer.lr * warmup_step ** 0.5
* min(step ** -0.5, step * warmup_step ** -1.5)
WarmupReduceLROnPlateau:
if step <= warmup_step:
lr = optimizer.lr * warmup_step ** 0.5
* min(step ** -0.5, step * warmup_step ** -1.5)
else:
lr = (
optimizer.lr * factor
if no improvement for a 'patience' number of epochs
else optimizer.lr
)
Note that the maximum lr equals to optimizer.lr in this scheduler.
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
# for WarmupLR
warmup_steps: Union[int, float] = 25000,
# for ReduceLROnPlateau
mode="min",
factor=0.1,
patience=10,
threshold=1e-4,
threshold_mode="rel",
cooldown=0,
min_lr=0,
eps=1e-8,
verbose=False,
):
assert check_argument_types()
self.warmup_steps = warmup_steps
self.step_num = 0
self.lr_scale = warmup_steps**-1
# Initialize base learning rates
for group in optimizer.param_groups:
if "initial_lr" not in group:
group.setdefault("initial_lr", group["lr"])
self.base_lrs = [group["initial_lr"] for group in optimizer.param_groups]
if factor >= 1.0:
raise ValueError("Factor should be < 1.0.")
self.factor = factor
# Attach optimizer
self.optimizer = optimizer
if isinstance(min_lr, list) or isinstance(min_lr, tuple):
if len(min_lr) != len(optimizer.param_groups):
raise ValueError(
"expected {} min_lrs, got {}".format(
len(optimizer.param_groups), len(min_lr)
)
)
self.min_lrs = list(min_lr)
else:
self.min_lrs = [min_lr] * len(optimizer.param_groups)
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0
self.mode = mode
self.threshold = threshold
self.threshold_mode = threshold_mode
self.best = None
self.num_bad_epochs = None
self.mode_worse = None # the worse value for the chosen mode
self.eps = eps
self.last_epoch = 0
self._init_is_better(
mode=mode, threshold=threshold, threshold_mode=threshold_mode
)
self._reset()
def __repr__(self):
return (
f"{self.__class__.__name__}(warmup_steps={self.warmup_steps}, "
f"mode={self.mode}, factor={self.factor}, patience={self.patience}"
)
def step(self, metrics=None, epoch=None):
if metrics is None:
# WarmupLR
self.step_num += 1
if self.step_num <= self.warmup_steps:
for param_group, lr in zip(self.optimizer.param_groups, self.base_lrs):
param_group["lr"] = lr * self.lr_scale * self.step_num
else:
# ReduceLROnPlateau
self._step_reducelronplateau(metrics, epoch=epoch)
def _reset(self):
"""Resets num_bad_epochs counter and cooldown counter."""
self.best = self.mode_worse
self.cooldown_counter = 0
self.num_bad_epochs = 0
def _step_reducelronplateau(self, metrics=None, epoch=None):
# convert `metrics` to float, in case it's a zero-dim Tensor
current = float(metrics)
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch
if self.is_better(current, self.best):
self.best = current
self.num_bad_epochs = 0
else:
self.num_bad_epochs += 1
if self.in_cooldown:
self.cooldown_counter -= 1
self.num_bad_epochs = 0 # ignore any bad epochs in cooldown
if self.num_bad_epochs > self.patience:
self._reduce_lr(epoch)
self.cooldown_counter = self.cooldown
self.num_bad_epochs = 0
self._last_lr = [group["lr"] for group in self.optimizer.param_groups]
def _reduce_lr(self, epoch):
for i, param_group in enumerate(self.optimizer.param_groups):
old_lr = float(param_group["lr"])
new_lr = max(old_lr * self.factor, self.min_lrs[i])
if old_lr - new_lr > self.eps:
param_group["lr"] = new_lr
if self.verbose:
epoch_str = ("%.2f" if isinstance(epoch, float) else "%.5d") % epoch
print(
"Epoch {}: reducing learning rate"
" of group {} to {:.4e}.".format(epoch_str, i, new_lr)
)
@property
def in_cooldown(self):
return self.cooldown_counter > 0
def is_better(self, a, best):
if self.mode == "min" and self.threshold_mode == "rel":
rel_epsilon = 1.0 - self.threshold
return a < best * rel_epsilon
elif self.mode == "min" and self.threshold_mode == "abs":
return a < best - self.threshold
elif self.mode == "max" and self.threshold_mode == "rel":
rel_epsilon = self.threshold + 1.0
return a > best * rel_epsilon
else: # mode == 'max' and epsilon_mode == 'abs':
return a > best + self.threshold
def _init_is_better(self, mode, threshold, threshold_mode):
if mode not in {"min", "max"}:
raise ValueError("mode " + mode + " is unknown!")
if threshold_mode not in {"rel", "abs"}:
raise ValueError("threshold mode " + threshold_mode + " is unknown!")
if mode == "min":
self.mode_worse = inf
else: # mode == 'max':
self.mode_worse = -inf
self.mode = mode
self.threshold = threshold
self.threshold_mode = threshold_mode
def state_dict(self):
return {
key: value for key, value in self.__dict__.items() if key != "optimizer"
}
def load_state_dict(self, state_dict):
self.__dict__.update(state_dict)
self._init_is_better(
mode=self.mode, threshold=self.threshold, threshold_mode=self.threshold_mode
)
| 6,832 | 32.826733 | 88 | py |
espnet | espnet-master/espnet2/schedulers/warmup_lr.py | """Warm up learning rate scheduler module."""
from typing import Union
import torch
from torch.optim.lr_scheduler import _LRScheduler
from typeguard import check_argument_types
from espnet2.schedulers.abs_scheduler import AbsBatchStepScheduler
class WarmupLR(_LRScheduler, AbsBatchStepScheduler):
"""The WarmupLR scheduler
This scheduler is almost same as NoamLR Scheduler except for following difference:
NoamLR:
lr = optimizer.lr * model_size ** -0.5
* min(step ** -0.5, step * warmup_step ** -1.5)
WarmupLR:
lr = optimizer.lr * warmup_step ** 0.5
* min(step ** -0.5, step * warmup_step ** -1.5)
Note that the maximum lr equals to optimizer.lr in this scheduler.
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
warmup_steps: Union[int, float] = 25000,
last_epoch: int = -1,
):
assert check_argument_types()
self.warmup_steps = warmup_steps
# __init__() must be invoked before setting field
# because step() is also invoked in __init__()
super().__init__(optimizer, last_epoch)
def __repr__(self):
return f"{self.__class__.__name__}(warmup_steps={self.warmup_steps})"
def get_lr(self):
step_num = self.last_epoch + 1
return [
lr
* self.warmup_steps**0.5
* min(step_num**-0.5, step_num * self.warmup_steps**-1.5)
for lr in self.base_lrs
]
| 1,495 | 28.333333 | 86 | py |
espnet | espnet-master/espnet2/schedulers/warmup_step_lr.py | """Step (with Warm up) learning rate scheduler module."""
from typing import Union
import torch
from torch.optim.lr_scheduler import _LRScheduler
from typeguard import check_argument_types
from espnet2.schedulers.abs_scheduler import AbsBatchStepScheduler
class WarmupStepLR(_LRScheduler, AbsBatchStepScheduler):
"""The WarmupStepLR scheduler.
This scheduler is the combination of WarmupLR and StepLR:
WarmupLR:
lr = optimizer.lr * warmup_step ** 0.5
* min(step ** -0.5, step * warmup_step ** -1.5)
WarmupStepLR:
if step <= warmup_step:
lr = optimizer.lr * warmup_step ** 0.5
* min(step ** -0.5, step * warmup_step ** -1.5)
else:
lr = optimizer.lr * (gamma ** (epoch//step_size))
Note that the maximum lr equals to optimizer.lr in this scheduler.
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
# for WarmupLR
warmup_steps: Union[int, float] = 25000,
# for StepLR
steps_per_epoch: int = 10000,
step_size: int = 1,
gamma: float = 0.1,
last_epoch: int = -1,
):
assert check_argument_types()
self.warmup_steps = warmup_steps
self.step_num = 0
self.epoch_num = 0
# NOTE: This number should be adjusted accordingly
# once batch_size/ngpu/num_nodes is changed.
# To get the exact number of iterations per epoch, refer to
# https://github.com/espnet/espnet/discussions/4404
self.steps_per_epoch = steps_per_epoch
self.warmup_epoch = warmup_steps // steps_per_epoch
self.lr_scale = warmup_steps**-1
# after warmup_steps, decrease lr by `gamma` every `step_size` epochs
self.step_size = step_size
self.gamma = gamma
# __init__() must be invoked before setting field
# because step() is also invoked in __init__()
super().__init__(optimizer, last_epoch)
def __repr__(self):
return (
f"{self.__class__.__name__}(warmup_steps={self.warmup_steps}, "
f"steps_per_epoch={self.steps_per_epoch},"
f" step_size={self.step_size}, gamma={self.gamma})"
)
def get_lr(self):
self.step_num += 1
if self.step_num % self.steps_per_epoch == 0:
self.epoch_num += 1
if self.step_num <= self.warmup_steps:
return [lr * self.lr_scale * self.step_num for lr in self.base_lrs]
else:
return [
lr
* self.gamma ** ((self.epoch_num - self.warmup_epoch) // self.step_size)
for lr in self.base_lrs
]
| 2,699 | 31.53012 | 88 | py |
espnet | espnet-master/espnet2/utils/sized_dict.py | import collections
import sys
from torch import multiprocessing
def get_size(obj, seen=None):
"""Recursively finds size of objects
Taken from https://github.com/bosswissam/pysize
"""
size = sys.getsizeof(obj)
if seen is None:
seen = set()
obj_id = id(obj)
if obj_id in seen:
return 0
# Important mark as seen *before* entering recursion to gracefully handle
# self-referential objects
seen.add(obj_id)
if isinstance(obj, dict):
size += sum([get_size(v, seen) for v in obj.values()])
size += sum([get_size(k, seen) for k in obj.keys()])
elif hasattr(obj, "__dict__"):
size += get_size(obj.__dict__, seen)
elif isinstance(obj, (list, set, tuple)):
size += sum([get_size(i, seen) for i in obj])
return size
class SizedDict(collections.abc.MutableMapping):
def __init__(self, shared: bool = False, data: dict = None):
if data is None:
data = {}
if shared:
# NOTE(kamo): Don't set manager as a field because Manager, which includes
# weakref object, causes following error with method="spawn",
# "TypeError: can't pickle weakref objects"
self.cache = multiprocessing.Manager().dict(**data)
else:
self.manager = None
self.cache = dict(**data)
self.size = 0
def __setitem__(self, key, value):
if key in self.cache:
self.size -= get_size(self.cache[key])
else:
self.size += sys.getsizeof(key)
self.size += get_size(value)
self.cache[key] = value
def __getitem__(self, key):
return self.cache[key]
def __delitem__(self, key):
self.size -= get_size(self.cache[key])
self.size -= sys.getsizeof(key)
del self.cache[key]
def __iter__(self):
return iter(self.cache)
def __contains__(self, key):
return key in self.cache
def __len__(self):
return len(self.cache)
| 2,027 | 25.684211 | 86 | py |
espnet | espnet-master/espnet2/utils/griffin_lim.py | #!/usr/bin/env python3
"""Griffin-Lim related modules."""
# Copyright 2019 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import logging
from functools import partial
from typing import Optional
import librosa
import numpy as np
import torch
from packaging.version import parse as V
from typeguard import check_argument_types
EPS = 1e-10
def logmel2linear(
lmspc: np.ndarray,
fs: int,
n_fft: int,
n_mels: int,
fmin: int = None,
fmax: int = None,
) -> np.ndarray:
"""Convert log Mel filterbank to linear spectrogram.
Args:
lmspc: Log Mel filterbank (T, n_mels).
fs: Sampling frequency.
n_fft: The number of FFT points.
n_mels: The number of mel basis.
f_min: Minimum frequency to analyze.
f_max: Maximum frequency to analyze.
Returns:
Linear spectrogram (T, n_fft // 2 + 1).
"""
assert lmspc.shape[1] == n_mels
fmin = 0 if fmin is None else fmin
fmax = fs / 2 if fmax is None else fmax
mspc = np.power(10.0, lmspc)
mel_basis = librosa.filters.mel(
sr=fs, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax
)
inv_mel_basis = np.linalg.pinv(mel_basis)
return np.maximum(EPS, np.dot(inv_mel_basis, mspc.T).T)
def griffin_lim(
spc: np.ndarray,
n_fft: int,
n_shift: int,
win_length: int = None,
window: Optional[str] = "hann",
n_iter: Optional[int] = 32,
) -> np.ndarray:
"""Convert linear spectrogram into waveform using Griffin-Lim.
Args:
spc: Linear spectrogram (T, n_fft // 2 + 1).
n_fft: The number of FFT points.
n_shift: Shift size in points.
win_length: Window length in points.
window: Window function type.
n_iter: The number of iterations.
Returns:
Reconstructed waveform (N,).
"""
# assert the size of input linear spectrogram
assert spc.shape[1] == n_fft // 2 + 1
if V(librosa.__version__) >= V("0.7.0"):
# use librosa's fast Grriffin-Lim algorithm
spc = np.abs(spc.T)
y = librosa.griffinlim(
S=spc,
n_iter=n_iter,
hop_length=n_shift,
win_length=win_length,
window=window,
center=True if spc.shape[1] > 1 else False,
)
else:
# use slower version of Grriffin-Lim algorithm
logging.warning(
"librosa version is old. use slow version of Grriffin-Lim algorithm."
"if you want to use fast Griffin-Lim, please update librosa via "
"`source ./path.sh && pip install librosa==0.7.0`."
)
cspc = np.abs(spc).astype(np.complex).T
angles = np.exp(2j * np.pi * np.random.rand(*cspc.shape))
y = librosa.istft(cspc * angles, n_shift, win_length, window=window)
for i in range(n_iter):
angles = np.exp(
1j
* np.angle(librosa.stft(y, n_fft, n_shift, win_length, window=window))
)
y = librosa.istft(cspc * angles, n_shift, win_length, window=window)
return y
# TODO(kan-bayashi): write as torch.nn.Module
class Spectrogram2Waveform(object):
"""Spectrogram to waveform conversion module."""
def __init__(
self,
n_fft: int,
n_shift: int,
fs: int = None,
n_mels: int = None,
win_length: int = None,
window: Optional[str] = "hann",
fmin: int = None,
fmax: int = None,
griffin_lim_iters: Optional[int] = 8,
):
"""Initialize module.
Args:
fs: Sampling frequency.
n_fft: The number of FFT points.
n_shift: Shift size in points.
n_mels: The number of mel basis.
win_length: Window length in points.
window: Window function type.
f_min: Minimum frequency to analyze.
f_max: Maximum frequency to analyze.
griffin_lim_iters: The number of iterations.
"""
assert check_argument_types()
self.fs = fs
self.logmel2linear = (
partial(
logmel2linear, fs=fs, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax
)
if n_mels is not None
else None
)
self.griffin_lim = partial(
griffin_lim,
n_fft=n_fft,
n_shift=n_shift,
win_length=win_length,
window=window,
n_iter=griffin_lim_iters,
)
self.params = dict(
n_fft=n_fft,
n_shift=n_shift,
win_length=win_length,
window=window,
n_iter=griffin_lim_iters,
)
if n_mels is not None:
self.params.update(fs=fs, n_mels=n_mels, fmin=fmin, fmax=fmax)
def __repr__(self):
retval = f"{self.__class__.__name__}("
for k, v in self.params.items():
retval += f"{k}={v}, "
retval += ")"
return retval
def __call__(self, spc: torch.Tensor) -> torch.Tensor:
"""Convert spectrogram to waveform.
Args:
spc: Log Mel filterbank (T_feats, n_mels)
or linear spectrogram (T_feats, n_fft // 2 + 1).
Returns:
Tensor: Reconstructed waveform (T_wav,).
"""
device = spc.device
dtype = spc.dtype
spc = spc.cpu().numpy()
if self.logmel2linear is not None:
spc = self.logmel2linear(spc)
wav = self.griffin_lim(spc)
return torch.tensor(wav).to(device=device, dtype=dtype)
| 5,607 | 28.208333 | 86 | py |
espnet | espnet-master/espnet2/tasks/enh_tse.py | import argparse
from typing import Callable, Collection, Dict, List, Optional, Tuple
import numpy as np
import torch
from typeguard import check_argument_types, check_return_type
from espnet2.enh.espnet_model_tse import ESPnetExtractionModel
from espnet2.enh.extractor.abs_extractor import AbsExtractor
from espnet2.enh.extractor.td_speakerbeam_extractor import TDSpeakerBeamExtractor
from espnet2.tasks.abs_task import AbsTask
from espnet2.tasks.enh import (
criterion_choices,
decoder_choices,
encoder_choices,
loss_wrapper_choices,
)
from espnet2.torch_utils.initialize import initialize
from espnet2.train.class_choices import ClassChoices
from espnet2.train.collate_fn import CommonCollateFn
from espnet2.train.preprocessor import AbsPreprocessor, TSEPreprocessor
from espnet2.train.trainer import Trainer
from espnet2.utils.get_default_kwargs import get_default_kwargs
from espnet2.utils.nested_dict_action import NestedDictAction
from espnet2.utils.types import int_or_none, str2bool, str_or_none
extractor_choices = ClassChoices(
name="extractor",
classes=dict(
td_speakerbeam=TDSpeakerBeamExtractor,
),
type_check=AbsExtractor,
default="td_speakerbeam",
)
preprocessor_choices = ClassChoices(
name="preprocessor",
classes=dict(
tse=TSEPreprocessor,
),
type_check=AbsPreprocessor,
default="tse",
)
MAX_REFERENCE_NUM = 100
class TargetSpeakerExtractionTask(AbsTask):
# If you need more than one optimizers, change this value
num_optimizers: int = 1
class_choices_list = [
# --encoder and --encoder_conf
encoder_choices,
# --extractor and --extractor_conf
extractor_choices,
# --decoder and --decoder_conf
decoder_choices,
# --preprocessor and --preprocessor_conf
preprocessor_choices,
]
# If you need to modify train() or eval() procedures, change Trainer class here
trainer = Trainer
@classmethod
def add_task_arguments(cls, parser: argparse.ArgumentParser):
group = parser.add_argument_group(description="Task related")
# NOTE(kamo): add_arguments(..., required=True) can't be used
# to provide --print_config mode. Instead of it, do as
# required = parser.get_default("required")
group.add_argument(
"--init",
type=lambda x: str_or_none(x.lower()),
default=None,
help="The initialization method",
choices=[
"chainer",
"xavier_uniform",
"xavier_normal",
"kaiming_uniform",
"kaiming_normal",
None,
],
)
group.add_argument(
"--model_conf",
action=NestedDictAction,
default=get_default_kwargs(ESPnetExtractionModel),
help="The keyword arguments for model class.",
)
group.add_argument(
"--criterions",
action=NestedDictAction,
default=[
{
"name": "si_snr",
"conf": {},
"wrapper": "fixed_order",
"wrapper_conf": {},
},
],
help="The criterions binded with the loss wrappers.",
)
group = parser.add_argument_group(description="Preprocess related")
group.add_argument(
"--train_spk2enroll",
type=str_or_none,
default=None,
help="The scp file containing the mapping from speakerID to enrollment\n"
"(This is used to sample the target-speaker enrollment signal)",
)
group.add_argument(
"--enroll_segment",
type=int_or_none,
default=None,
help="Truncate the enrollment audio to the specified length if not None",
)
group.add_argument(
"--load_spk_embedding",
type=str2bool,
default=False,
help="Whether to load speaker embeddings instead of enrollments",
)
group.add_argument(
"--load_all_speakers",
type=str2bool,
default=False,
help="Whether to load target-speaker for all speakers in each sample",
)
# inherited from EnhPreprocessor
group.add_argument(
"--rir_scp",
type=str_or_none,
default=None,
help="The file path of rir scp file.",
)
group.add_argument(
"--rir_apply_prob",
type=float,
default=1.0,
help="THe probability for applying RIR convolution.",
)
group.add_argument(
"--noise_scp",
type=str_or_none,
default=None,
help="The file path of noise scp file.",
)
group.add_argument(
"--noise_apply_prob",
type=float,
default=1.0,
help="The probability applying Noise adding.",
)
group.add_argument(
"--noise_db_range",
type=str,
default="13_15",
help="The range of signal-to-noise ratio (SNR) level in decibel.",
)
group.add_argument(
"--short_noise_thres",
type=float,
default=0.5,
help="If len(noise) / len(speech) is smaller than this threshold during "
"dynamic mixing, a warning will be displayed.",
)
group.add_argument(
"--speech_volume_normalize",
type=str_or_none,
default=None,
help="Scale the maximum amplitude to the given value or range. "
"e.g. --speech_volume_normalize 1.0 scales it to 1.0.\n"
"--speech_volume_normalize 0.5_1.0 scales it to a random number in "
"the range [0.5, 1.0)",
)
group.add_argument(
"--use_reverberant_ref",
type=str2bool,
default=False,
help="Whether to use reverberant speech references "
"instead of anechoic ones",
)
group.add_argument(
"--num_spk",
type=int,
default=1,
help="Number of speakers in the input signal.",
)
group.add_argument(
"--num_noise_type",
type=int,
default=1,
help="Number of noise types.",
)
group.add_argument(
"--sample_rate",
type=int,
default=8000,
help="Sampling rate of the data (in Hz).",
)
group.add_argument(
"--force_single_channel",
type=str2bool,
default=False,
help="Whether to force all data to be single-channel.",
)
group.add_argument(
"--channel_reordering",
type=str2bool,
default=False,
help="Whether to randomly reorder the channels of the "
"multi-channel signals.",
)
group.add_argument(
"--categories",
nargs="+",
default=[],
type=str,
help="The set of all possible categories in the dataset. Used to add the "
"category information to each sample",
)
for class_choices in cls.class_choices_list:
# Append --<name> and --<name>_conf.
# e.g. --encoder and --encoder_conf
class_choices.add_arguments(group)
@classmethod
def build_collate_fn(
cls, args: argparse.Namespace, train: bool
) -> Callable[
[Collection[Tuple[str, Dict[str, np.ndarray]]]],
Tuple[List[str], Dict[str, torch.Tensor]],
]:
assert check_argument_types()
return CommonCollateFn(float_pad_value=0.0, int_pad_value=0)
@classmethod
def build_preprocess_fn(
cls, args: argparse.Namespace, train: bool
) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
assert check_argument_types()
retval = TSEPreprocessor(
train=train,
train_spk2enroll=args.train_spk2enroll,
enroll_segment=getattr(args, "enroll_segment", None),
load_spk_embedding=getattr(args, "load_spk_embedding", False),
load_all_speakers=getattr(args, "load_all_speakers", False),
# inherited from EnhPreprocessor
rir_scp=getattr(args, "rir_scp", None),
rir_apply_prob=getattr(args, "rir_apply_prob", 1.0),
noise_scp=getattr(args, "noise_scp", None),
noise_apply_prob=getattr(args, "noise_apply_prob", 1.0),
noise_db_range=getattr(args, "noise_db_range", "13_15"),
short_noise_thres=getattr(args, "short_noise_thres", 0.5),
speech_volume_normalize=getattr(args, "speech_volume_normalize", None),
use_reverberant_ref=getattr(args, "use_reverberant_ref", None),
num_spk=getattr(args, "num_spk", 1),
num_noise_type=getattr(args, "num_noise_type", 1),
sample_rate=getattr(args, "sample_rate", 8000),
force_single_channel=getattr(args, "force_single_channel", False),
channel_reordering=getattr(args, "channel_reordering", False),
categories=getattr(args, "categories", None),
)
assert check_return_type(retval)
return retval
@classmethod
def required_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
if not inference:
retval = ("speech_mix", "enroll_ref1", "speech_ref1")
else:
# Inference mode
retval = ("speech_mix", "enroll_ref1")
return retval
@classmethod
def optional_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
retval = ["enroll_ref{}".format(n) for n in range(2, MAX_REFERENCE_NUM + 1)]
if "speech_ref1" in retval:
retval += [
"speech_ref{}".format(n) for n in range(2, MAX_REFERENCE_NUM + 1)
]
else:
retval += [
"speech_ref{}".format(n) for n in range(1, MAX_REFERENCE_NUM + 1)
]
retval += ["category"]
retval = tuple(retval)
assert check_return_type(retval)
return retval
@classmethod
def build_model(cls, args: argparse.Namespace) -> ESPnetExtractionModel:
assert check_argument_types()
encoder = encoder_choices.get_class(args.encoder)(**args.encoder_conf)
extractor = extractor_choices.get_class(args.extractor)(
encoder.output_dim, **args.extractor_conf
)
decoder = decoder_choices.get_class(args.decoder)(**args.decoder_conf)
loss_wrappers = []
if getattr(args, "criterions", None) is not None:
# This check is for the compatibility when load models
# that packed by older version
for ctr in args.criterions:
criterion_conf = ctr.get("conf", {})
criterion = criterion_choices.get_class(ctr["name"])(**criterion_conf)
loss_wrapper = loss_wrapper_choices.get_class(ctr["wrapper"])(
criterion=criterion, **ctr["wrapper_conf"]
)
loss_wrappers.append(loss_wrapper)
# 1. Build model
model = ESPnetExtractionModel(
encoder=encoder,
extractor=extractor,
decoder=decoder,
loss_wrappers=loss_wrappers,
**args.model_conf
)
# FIXME(kamo): Should be done in model?
# 2. Initialize
if args.init is not None:
initialize(model, args.init)
assert check_return_type(model)
return model
| 11,924 | 33.665698 | 86 | py |
espnet | espnet-master/espnet2/tasks/enh_s2t.py | import argparse
import copy
import logging
from typing import Callable, Collection, Dict, List, Optional, Tuple
import numpy as np
import torch
from typeguard import check_argument_types, check_return_type
from espnet2.asr.ctc import CTC
from espnet2.asr.espnet_model import ESPnetASRModel
from espnet2.diar.espnet_model import ESPnetDiarizationModel
from espnet2.enh.espnet_enh_s2t_model import ESPnetEnhS2TModel
from espnet2.enh.espnet_model import ESPnetEnhancementModel
from espnet2.tasks.abs_task import AbsTask
from espnet2.tasks.asr import ASRTask
from espnet2.tasks.asr import decoder_choices as asr_decoder_choices_
from espnet2.tasks.asr import encoder_choices as asr_encoder_choices_
from espnet2.tasks.asr import frontend_choices, normalize_choices
from espnet2.tasks.asr import postencoder_choices as asr_postencoder_choices_
from espnet2.tasks.asr import preencoder_choices as asr_preencoder_choices_
from espnet2.tasks.asr import specaug_choices
from espnet2.tasks.diar import DiarizationTask
from espnet2.tasks.diar import attractor_choices as diar_attractor_choices_
from espnet2.tasks.diar import decoder_choices as diar_decoder_choices_
from espnet2.tasks.diar import encoder_choices as diar_encoder_choices_
from espnet2.tasks.diar import frontend_choices as diar_front_end_choices_
from espnet2.tasks.diar import label_aggregator_choices
from espnet2.tasks.diar import normalize_choices as diar_normalize_choices_
from espnet2.tasks.diar import specaug_choices as diar_specaug_choices_
from espnet2.tasks.enh import EnhancementTask
from espnet2.tasks.enh import decoder_choices as enh_decoder_choices_
from espnet2.tasks.enh import encoder_choices as enh_encoder_choices_
from espnet2.tasks.enh import mask_module_choices as enh_mask_module_choices_
from espnet2.tasks.enh import separator_choices as enh_separator_choices_
from espnet2.tasks.st import STTask
from espnet2.tasks.st import decoder_choices as st_decoder_choices_
from espnet2.tasks.st import encoder_choices as st_encoder_choices_
from espnet2.tasks.st import extra_asr_decoder_choices as st_extra_asr_decoder_choices_
from espnet2.tasks.st import extra_mt_decoder_choices as st_extra_mt_decoder_choices_
from espnet2.tasks.st import postencoder_choices as st_postencoder_choices_
from espnet2.tasks.st import preencoder_choices as st_preencoder_choices_
from espnet2.text.phoneme_tokenizer import g2p_choices
from espnet2.torch_utils.initialize import initialize
from espnet2.train.collate_fn import CommonCollateFn
from espnet2.train.preprocessor import (
CommonPreprocessor,
CommonPreprocessor_multi,
MutliTokenizerCommonPreprocessor,
)
from espnet2.train.trainer import Trainer
from espnet2.utils.get_default_kwargs import get_default_kwargs
from espnet2.utils.nested_dict_action import NestedDictAction
from espnet2.utils.types import int_or_none, str2bool, str_or_none
# Enhancement
enh_encoder_choices = copy.deepcopy(enh_encoder_choices_)
enh_encoder_choices.name = "enh_encoder"
enh_decoder_choices = copy.deepcopy(enh_decoder_choices_)
enh_decoder_choices.name = "enh_decoder"
enh_separator_choices = copy.deepcopy(enh_separator_choices_)
enh_separator_choices.name = "enh_separator"
enh_mask_module_choices = copy.deepcopy(enh_mask_module_choices_)
enh_mask_module_choices.name = "enh_mask_module"
# ASR (also SLU)
asr_preencoder_choices = copy.deepcopy(asr_preencoder_choices_)
asr_preencoder_choices.name = "asr_preencoder"
asr_encoder_choices = copy.deepcopy(asr_encoder_choices_)
asr_encoder_choices.name = "asr_encoder"
asr_postencoder_choices = copy.deepcopy(asr_postencoder_choices_)
asr_postencoder_choices.name = "asr_postencoder"
asr_decoder_choices = copy.deepcopy(asr_decoder_choices_)
asr_decoder_choices.name = "asr_decoder"
# ST
st_preencoder_choices = copy.deepcopy(st_preencoder_choices_)
st_preencoder_choices.name = "st_preencoder"
st_encoder_choices = copy.deepcopy(st_encoder_choices_)
st_encoder_choices.name = "st_encoder"
st_postencoder_choices = copy.deepcopy(st_postencoder_choices_)
st_postencoder_choices.name = "st_postencoder"
st_decoder_choices = copy.deepcopy(st_decoder_choices_)
st_decoder_choices.name = "st_decoder"
st_extra_asr_decoder_choices = copy.deepcopy(st_extra_asr_decoder_choices_)
st_extra_asr_decoder_choices.name = "st_extra_asr_decoder"
st_extra_mt_decoder_choices = copy.deepcopy(st_extra_mt_decoder_choices_)
st_extra_mt_decoder_choices.name = "st_extra_mt_decoder"
# DIAR
diar_frontend_choices = copy.deepcopy(diar_front_end_choices_)
diar_frontend_choices.name = "diar_frontend"
diar_specaug_choices = copy.deepcopy(diar_specaug_choices_)
diar_specaug_choices.name = "diar_specaug"
diar_normalize_choices = copy.deepcopy(diar_normalize_choices_)
diar_normalize_choices.name = "diar_normalize"
diar_encoder_choices = copy.deepcopy(diar_encoder_choices_)
diar_encoder_choices.name = "diar_encoder"
diar_decoder_choices = copy.deepcopy(diar_decoder_choices_)
diar_decoder_choices.name = "diar_decoder"
diar_attractor_choices = copy.deepcopy(diar_attractor_choices_)
diar_attractor_choices.name = "diar_attractor"
MAX_REFERENCE_NUM = 100
name2task = dict(
enh=EnhancementTask,
asr=ASRTask,
st=STTask,
diar=DiarizationTask,
)
# More can be added to the following attributes
enh_attributes = [
"encoder",
"encoder_conf",
"separator",
"separator_conf",
"mask_module",
"mask_module_conf",
"decoder",
"decoder_conf",
"criterions",
]
asr_attributes = [
"token_list",
"input_size",
"frontend",
"frontend_conf",
"specaug",
"specaug_conf",
"normalize",
"normalize_conf",
"preencoder",
"preencoder_conf",
"encoder",
"encoder_conf",
"postencoder",
"postencoder_conf",
"decoder",
"decoder_conf",
"ctc_conf",
]
st_attributes = [
"token_list",
"src_token_list",
"input_size",
"frontend",
"frontend_conf",
"specaug",
"specaug_conf",
"normalize",
"normalize_conf",
"preencoder",
"preencoder_conf",
"encoder",
"encoder_conf",
"postencoder",
"postencoder_conf",
"decoder",
"decoder_conf",
"ctc_conf",
"extra_asr_decoder",
"extra_asr_decoder_conf",
"extra_mt_decoder",
"extra_mt_decoder_conf",
]
diar_attributes = [
"input_size",
"num_spk",
"frontend",
"frontend_conf",
"specaug",
"specaug_conf",
"normalize",
"normalize_conf",
"encoder",
"encoder_conf",
"decoder",
"decoder_conf",
"attractor",
"attractor_conf",
"label_aggregator",
"label_aggregator_conf",
]
class EnhS2TTask(AbsTask):
# If you need more than one optimizers, change this value
num_optimizers: int = 1
# Add variable objects configurations
class_choices_list = [
# --enh_encoder and --enh_encoder_conf
enh_encoder_choices,
# --enh_separator and --enh_separator_conf
enh_separator_choices,
# --enh_decoder and --enh_decoder_conf
enh_decoder_choices,
# --enh_mask_module and --enh_mask_module_conf
enh_mask_module_choices,
# --frontend and --frontend_conf
frontend_choices,
# --specaug and --specaug_conf
specaug_choices,
# --normalize and --normalize_conf
normalize_choices,
# --asr_preencoder and --asr_preencoder_conf
asr_preencoder_choices,
# --asr_encoder and --asr_encoder_conf
asr_encoder_choices,
# --asr_postencoder and --asr_postencoder_conf
asr_postencoder_choices,
# --asr_decoder and --asr_decoder_conf
asr_decoder_choices,
# --st_preencoder and --st_preencoder_conf
st_preencoder_choices,
# --st_encoder and --st_encoder_conf
st_encoder_choices,
# --st_postencoder and --st_postencoder_conf
st_postencoder_choices,
# --st_decoder and --st_decoder_conf
st_decoder_choices,
# --st_extra_asr_decoder and --st_extra_asr_decoder_conf
st_extra_asr_decoder_choices,
# --st_extra_mt_decoder and --st_extra_mt_decoder_conf
st_extra_mt_decoder_choices,
# --diar_frontend and --diar_frontend_conf
diar_frontend_choices,
# --diar_specaug and --diar_specaug_conf
diar_specaug_choices,
# --diar_normalize and --diar_normalize_conf
diar_normalize_choices,
# --diar_encoder and --diar_encoder_conf
diar_encoder_choices,
# --diar_decoder and --diar_decoder_conf
diar_decoder_choices,
# --label_aggregator and --label_aggregator_conf
label_aggregator_choices,
# --diar_attractor and --diar_attractor_conf
diar_attractor_choices,
]
# If you need to modify train() or eval() procedures, change Trainer class here
trainer = Trainer
@classmethod
def add_task_arguments(cls, parser: argparse.ArgumentParser):
group = parser.add_argument_group(description="Task related")
group.add_argument(
"--token_list",
type=str_or_none,
default=None,
help="A text mapping int-id to token",
)
group.add_argument(
"--src_token_list",
type=str_or_none,
default=None,
help="A text mapping int-id to token (for source language)",
)
group.add_argument(
"--init",
type=lambda x: str_or_none(x.lower()),
default=None,
help="The initialization method",
choices=[
"chainer",
"xavier_uniform",
"xavier_normal",
"kaiming_uniform",
"kaiming_normal",
None,
],
)
group.add_argument(
"--input_size",
type=int_or_none,
default=None,
help="The number of input dimension of the feature",
)
group.add_argument(
"--ctc_conf",
action=NestedDictAction,
default=get_default_kwargs(CTC),
help="The keyword arguments for CTC class.",
)
group.add_argument(
"--enh_criterions",
action=NestedDictAction,
default=[
{
"name": "si_snr",
"conf": {},
"wrapper": "fixed_order",
"wrapper_conf": {},
},
],
help="The criterions binded with the loss wrappers.",
)
group.add_argument(
"--diar_num_spk",
type=int_or_none,
default=None,
help="The number of speakers (for each recording) for diar submodel class",
)
group.add_argument(
"--diar_input_size",
type=int_or_none,
default=None,
help="The number of input dimension of the feature",
)
group.add_argument(
"--enh_model_conf",
action=NestedDictAction,
default=get_default_kwargs(ESPnetEnhancementModel),
help="The keyword arguments for enh submodel class.",
)
group.add_argument(
"--asr_model_conf",
action=NestedDictAction,
default=get_default_kwargs(ESPnetASRModel),
help="The keyword arguments for asr submodel class.",
)
group.add_argument(
"--st_model_conf",
action=NestedDictAction,
default=get_default_kwargs(ESPnetEnhancementModel),
help="The keyword arguments for st submodel class.",
)
group.add_argument(
"--diar_model_conf",
action=NestedDictAction,
default=get_default_kwargs(ESPnetDiarizationModel),
help="The keyword arguments for diar submodel class.",
)
group.add_argument(
"--subtask_series",
type=str,
nargs="+",
default=("enh", "asr"),
choices=["enh", "asr", "st", "diar"],
help="The series of subtasks in the pipeline.",
)
group.add_argument(
"--model_conf",
action=NestedDictAction,
default=get_default_kwargs(ESPnetEnhS2TModel),
help="The keyword arguments for model class.",
)
group = parser.add_argument_group(description="Preprocess related")
group.add_argument(
"--use_preprocessor",
type=str2bool,
default=False,
help="Apply preprocessing to data or not",
)
group.add_argument(
"--token_type",
type=str,
default="bpe",
choices=["bpe", "char", "word", "phn"],
help="The text will be tokenized " "in the specified level token",
)
group.add_argument(
"--bpemodel",
type=str_or_none,
default=None,
help="The model file of sentencepiece",
)
group.add_argument(
"--src_token_type",
type=str,
default="bpe",
choices=["bpe", "char", "word", "phn"],
help="The source text will be tokenized " "in the specified level token",
)
group.add_argument(
"--src_bpemodel",
type=str_or_none,
default=None,
help="The model file of sentencepiece (for source language)",
)
group.add_argument(
"--non_linguistic_symbols",
type=str_or_none,
help="non_linguistic_symbols file path",
)
group.add_argument(
"--cleaner",
type=str_or_none,
choices=[None, "tacotron", "jaconv", "vietnamese"],
default=None,
help="Apply text cleaning",
)
group.add_argument(
"--g2p",
type=str_or_none,
choices=g2p_choices,
default=None,
help="Specify g2p method if --token_type=phn",
)
group.add_argument(
"--text_name",
nargs="+",
default=["text"],
type=str,
help="Specify the text_name attribute used in the preprocessor",
)
for class_choices in cls.class_choices_list:
# Append --<name> and --<name>_conf.
# e.g. --encoder and --encoder_conf
class_choices.add_arguments(group)
@classmethod
def build_collate_fn(
cls, args: argparse.Namespace, train: bool
) -> Callable[
[Collection[Tuple[str, Dict[str, np.ndarray]]]],
Tuple[List[str], Dict[str, torch.Tensor]],
]:
assert check_argument_types()
# NOTE(kamo): int value = 0 is reserved by CTC-blank symbol
return CommonCollateFn(float_pad_value=0.0, int_pad_value=-1)
@classmethod
def build_preprocess_fn(
cls, args: argparse.Namespace, train: bool
) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
assert check_argument_types()
if args.use_preprocessor:
if "st" in args.subtask_series:
retval = MutliTokenizerCommonPreprocessor(
train=train,
token_type=[args.token_type, args.src_token_type],
token_list=[args.token_list, args.src_token_list],
bpemodel=[args.bpemodel, args.src_bpemodel],
non_linguistic_symbols=args.non_linguistic_symbols,
text_cleaner=args.cleaner,
g2p_type=args.g2p,
# NOTE(kamo): Check attribute existence for backward compatibility
rir_scp=args.rir_scp if hasattr(args, "rir_scp") else None,
rir_apply_prob=args.rir_apply_prob
if hasattr(args, "rir_apply_prob")
else 1.0,
noise_scp=args.noise_scp if hasattr(args, "noise_scp") else None,
noise_apply_prob=args.noise_apply_prob
if hasattr(args, "noise_apply_prob")
else 1.0,
noise_db_range=args.noise_db_range
if hasattr(args, "noise_db_range")
else "13_15",
short_noise_thres=args.short_noise_thres
if hasattr(args, "short_noise_thres")
else 0.5,
speech_volume_normalize=args.speech_volume_normalize
if hasattr(args, "speech_volume_normalize")
else None,
speech_name="speech",
text_name=["text", "src_text"],
)
elif "diar" in args.subtask_series:
retval = CommonPreprocessor(train=train)
else:
retval = CommonPreprocessor_multi(
train=train,
token_type=args.token_type,
token_list=args.token_list,
bpemodel=args.bpemodel,
non_linguistic_symbols=args.non_linguistic_symbols,
text_name=getattr(args, "text_name", ["text"]),
text_cleaner=args.cleaner,
g2p_type=args.g2p,
)
else:
retval = None
assert check_return_type(retval)
return retval
@classmethod
def required_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
if not inference:
retval = ("speech", "speech_ref1")
else:
# Recognition mode
retval = ("speech",)
return retval
@classmethod
def optional_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
retval = ["text", "dereverb_ref1"]
st = 2 if "speech_ref1" in retval else 1
retval += ["speech_ref{}".format(n) for n in range(st, MAX_REFERENCE_NUM + 1)]
retval += ["noise_ref{}".format(n) for n in range(1, MAX_REFERENCE_NUM + 1)]
retval += ["text_spk{}".format(n) for n in range(1, MAX_REFERENCE_NUM + 1)]
retval += ["src_text"]
retval = tuple(retval)
assert check_return_type(retval)
return retval
@classmethod
def build_model(cls, args: argparse.Namespace) -> ESPnetEnhS2TModel:
assert check_argument_types()
# Build submodels in the order of subtask_series
model_conf = args.model_conf.copy()
for _, subtask in enumerate(args.subtask_series):
subtask_conf = dict(
init=None, model_conf=eval(f"args.{subtask}_model_conf")
)
for attr in eval(f"{subtask}_attributes"):
subtask_conf[attr] = (
getattr(args, subtask + "_" + attr, None)
if getattr(args, subtask + "_" + attr, None) is not None
else getattr(args, attr, None)
)
if subtask in ["asr", "st", "diar"]:
m_subtask = "s2t"
elif subtask in ["enh"]:
m_subtask = subtask
else:
raise ValueError(f"{subtask} not supported.")
logging.info(f"Building {subtask} task model, using config: {subtask_conf}")
model_conf[f"{m_subtask}_model"] = name2task[subtask].build_model(
argparse.Namespace(**subtask_conf)
)
# 8. Build model
model = ESPnetEnhS2TModel(**model_conf)
# FIXME(kamo): Should be done in model?
# 9. Initialize
if args.init is not None:
initialize(model, args.init)
assert check_return_type(model)
return model
| 19,866 | 34.225177 | 88 | py |
espnet | espnet-master/espnet2/tasks/slu.py | import argparse
import logging
from typing import Callable, Dict, Optional, Tuple
import numpy as np
from typeguard import check_argument_types, check_return_type
from espnet2.asr.ctc import CTC
from espnet2.asr.decoder.abs_decoder import AbsDecoder
from espnet2.asr.decoder.mlm_decoder import MLMDecoder
from espnet2.asr.decoder.rnn_decoder import RNNDecoder
from espnet2.asr.decoder.transducer_decoder import TransducerDecoder
from espnet2.asr.decoder.transformer_decoder import (
DynamicConvolution2DTransformerDecoder,
DynamicConvolutionTransformerDecoder,
LightweightConvolution2DTransformerDecoder,
LightweightConvolutionTransformerDecoder,
TransformerDecoder,
)
from espnet2.asr.encoder.abs_encoder import AbsEncoder
from espnet2.asr.encoder.branchformer_encoder import BranchformerEncoder
from espnet2.asr.encoder.conformer_encoder import ConformerEncoder
from espnet2.asr.encoder.contextual_block_conformer_encoder import (
ContextualBlockConformerEncoder,
)
from espnet2.asr.encoder.contextual_block_transformer_encoder import (
ContextualBlockTransformerEncoder,
)
from espnet2.asr.encoder.hubert_encoder import (
FairseqHubertEncoder,
FairseqHubertPretrainEncoder,
)
from espnet2.asr.encoder.longformer_encoder import LongformerEncoder
from espnet2.asr.encoder.rnn_encoder import RNNEncoder
from espnet2.asr.encoder.transformer_encoder import TransformerEncoder
from espnet2.asr.encoder.vgg_rnn_encoder import VGGRNNEncoder
from espnet2.asr.encoder.wav2vec2_encoder import FairSeqWav2Vec2Encoder
from espnet2.asr.frontend.abs_frontend import AbsFrontend
from espnet2.asr.frontend.default import DefaultFrontend
from espnet2.asr.frontend.fused import FusedFrontends
from espnet2.asr.frontend.s3prl import S3prlFrontend
from espnet2.asr.frontend.windowing import SlidingWindow
from espnet2.asr.postencoder.abs_postencoder import AbsPostEncoder
from espnet2.asr.postencoder.hugging_face_transformers_postencoder import (
HuggingFaceTransformersPostEncoder,
)
from espnet2.asr.preencoder.abs_preencoder import AbsPreEncoder
from espnet2.asr.preencoder.linear import LinearProjection
from espnet2.asr.preencoder.sinc import LightweightSincConvs
from espnet2.asr.specaug.abs_specaug import AbsSpecAug
from espnet2.asr.specaug.specaug import SpecAug
from espnet2.asr_transducer.joint_network import JointNetwork
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.layers.global_mvn import GlobalMVN
from espnet2.layers.utterance_mvn import UtteranceMVN
from espnet2.slu.espnet_model import ESPnetSLUModel
from espnet2.slu.postdecoder.abs_postdecoder import AbsPostDecoder
from espnet2.slu.postdecoder.hugging_face_transformers_postdecoder import (
HuggingFaceTransformersPostDecoder,
)
from espnet2.slu.postencoder.conformer_postencoder import ConformerPostEncoder
from espnet2.slu.postencoder.transformer_postencoder import TransformerPostEncoder
from espnet2.tasks.asr import ASRTask
from espnet2.text.phoneme_tokenizer import g2p_choices
from espnet2.torch_utils.initialize import initialize
from espnet2.train.abs_espnet_model import AbsESPnetModel
from espnet2.train.class_choices import ClassChoices
from espnet2.train.preprocessor import SLUPreprocessor
from espnet2.train.trainer import Trainer
from espnet2.utils.get_default_kwargs import get_default_kwargs
from espnet2.utils.nested_dict_action import NestedDictAction
from espnet2.utils.types import float_or_none, int_or_none, str2bool, str_or_none
frontend_choices = ClassChoices(
name="frontend",
classes=dict(
default=DefaultFrontend,
sliding_window=SlidingWindow,
s3prl=S3prlFrontend,
fused=FusedFrontends,
),
type_check=AbsFrontend,
default="default",
)
specaug_choices = ClassChoices(
name="specaug",
classes=dict(specaug=SpecAug),
type_check=AbsSpecAug,
default=None,
optional=True,
)
normalize_choices = ClassChoices(
"normalize",
classes=dict(
global_mvn=GlobalMVN,
utterance_mvn=UtteranceMVN,
),
type_check=AbsNormalize,
default="utterance_mvn",
optional=True,
)
model_choices = ClassChoices(
"model",
classes=dict(
espnet=ESPnetSLUModel,
),
type_check=AbsESPnetModel,
default="espnet",
)
preencoder_choices = ClassChoices(
name="preencoder",
classes=dict(
sinc=LightweightSincConvs,
linear=LinearProjection,
),
type_check=AbsPreEncoder,
default=None,
optional=True,
)
encoder_choices = ClassChoices(
"encoder",
classes=dict(
conformer=ConformerEncoder,
transformer=TransformerEncoder,
contextual_block_transformer=ContextualBlockTransformerEncoder,
contextual_block_conformer=ContextualBlockConformerEncoder,
vgg_rnn=VGGRNNEncoder,
rnn=RNNEncoder,
wav2vec2=FairSeqWav2Vec2Encoder,
hubert=FairseqHubertEncoder,
hubert_pretrain=FairseqHubertPretrainEncoder,
longformer=LongformerEncoder,
branchformer=BranchformerEncoder,
),
type_check=AbsEncoder,
default="rnn",
)
postencoder_choices = ClassChoices(
name="postencoder",
classes=dict(
hugging_face_transformers=HuggingFaceTransformersPostEncoder,
conformer=ConformerPostEncoder,
transformer=TransformerPostEncoder,
),
type_check=AbsPostEncoder,
default=None,
optional=True,
)
deliberationencoder_choices = ClassChoices(
name="deliberationencoder",
classes=dict(
hugging_face_transformers=HuggingFaceTransformersPostEncoder,
conformer=ConformerPostEncoder,
transformer=TransformerPostEncoder,
),
type_check=AbsPostEncoder,
default=None,
optional=True,
)
decoder_choices = ClassChoices(
"decoder",
classes=dict(
transformer=TransformerDecoder,
lightweight_conv=LightweightConvolutionTransformerDecoder,
lightweight_conv2d=LightweightConvolution2DTransformerDecoder,
dynamic_conv=DynamicConvolutionTransformerDecoder,
dynamic_conv2d=DynamicConvolution2DTransformerDecoder,
rnn=RNNDecoder,
transducer=TransducerDecoder,
mlm=MLMDecoder,
),
type_check=AbsDecoder,
default="rnn",
)
postdecoder_choices = ClassChoices(
name="postdecoder",
classes=dict(
hugging_face_transformers=HuggingFaceTransformersPostDecoder,
),
type_check=AbsPostDecoder,
default=None,
optional=True,
)
class SLUTask(ASRTask):
# If you need more than one optimizers, change this value
num_optimizers: int = 1
# Add variable objects configurations
class_choices_list = [
# --frontend and --frontend_conf
frontend_choices,
# --specaug and --specaug_conf
specaug_choices,
# --normalize and --normalize_conf
normalize_choices,
# --model and --model_conf
model_choices,
# --preencoder and --preencoder_conf
preencoder_choices,
# --encoder and --encoder_conf
encoder_choices,
# --postencoder and --postencoder_conf
postencoder_choices,
# --deliberationencoder and --deliberationencoder_conf
deliberationencoder_choices,
# --decoder and --decoder_conf
decoder_choices,
# --postdecoder and --postdecoder_conf
postdecoder_choices,
]
# If you need to modify train() or eval() procedures, change Trainer class here
trainer = Trainer
@classmethod
def add_task_arguments(cls, parser: argparse.ArgumentParser):
group = parser.add_argument_group(description="Task related")
# NOTE(kamo): add_arguments(..., required=True) can't be used
# to provide --print_config mode. Instead of it, do as
required = parser.get_default("required")
required += ["token_list"]
group.add_argument(
"--token_list",
type=str_or_none,
default=None,
help="A text mapping int-id to token",
)
group.add_argument(
"--transcript_token_list",
type=str_or_none,
default=None,
help="A text mapping int-id to token for transcripts",
)
group.add_argument(
"--two_pass",
type=str2bool,
default=False,
help="Run 2-pass SLU",
)
group.add_argument(
"--pre_postencoder_norm",
type=str2bool,
default=False,
help="pre_postencoder_norm",
)
group.add_argument(
"--init",
type=lambda x: str_or_none(x.lower()),
default=None,
help="The initialization method",
choices=[
"chainer",
"xavier_uniform",
"xavier_normal",
"kaiming_uniform",
"kaiming_normal",
None,
],
)
group.add_argument(
"--input_size",
type=int_or_none,
default=None,
help="The number of input dimension of the feature",
)
group.add_argument(
"--ctc_conf",
action=NestedDictAction,
default=get_default_kwargs(CTC),
help="The keyword arguments for CTC class.",
)
group.add_argument(
"--joint_net_conf",
action=NestedDictAction,
default=None,
help="The keyword arguments for joint network class.",
)
group = parser.add_argument_group(description="Preprocess related")
group.add_argument(
"--use_preprocessor",
type=str2bool,
default=True,
help="Apply preprocessing to data or not",
)
group.add_argument(
"--token_type",
type=str,
default="bpe",
choices=["bpe", "char", "word", "phn"],
help="The text will be tokenized " "in the specified level token",
)
group.add_argument(
"--bpemodel",
type=str_or_none,
default=None,
help="The model file of sentencepiece",
)
parser.add_argument(
"--non_linguistic_symbols",
type=str_or_none,
help="non_linguistic_symbols file path",
)
group.add_argument(
"--cleaner",
type=str_or_none,
choices=[None, "tacotron", "jaconv", "vietnamese"],
default=None,
help="Apply text cleaning",
)
group.add_argument(
"--g2p",
type=str_or_none,
choices=g2p_choices,
default=None,
help="Specify g2p method if --token_type=phn",
)
group.add_argument(
"--speech_volume_normalize",
type=float_or_none,
default=None,
help="Scale the maximum amplitude to the given value.",
)
group.add_argument(
"--rir_scp",
type=str_or_none,
default=None,
help="The file path of rir scp file.",
)
group.add_argument(
"--rir_apply_prob",
type=float,
default=1.0,
help="THe probability for applying RIR convolution.",
)
group.add_argument(
"--noise_scp",
type=str_or_none,
default=None,
help="The file path of noise scp file.",
)
group.add_argument(
"--noise_apply_prob",
type=float,
default=1.0,
help="The probability applying Noise adding.",
)
group.add_argument(
"--noise_db_range",
type=str,
default="13_15",
help="The range of noise decibel level.",
)
group.add_argument(
"--short_noise_thres",
type=float,
default=0.5,
help="If len(noise) / len(speech) is smaller than this threshold during "
"dynamic mixing, a warning will be displayed.",
)
for class_choices in cls.class_choices_list:
# Append --<name> and --<name>_conf.
# e.g. --encoder and --encoder_conf
class_choices.add_arguments(group)
@classmethod
def build_preprocess_fn(
cls, args: argparse.Namespace, train: bool
) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
assert check_argument_types()
if args.use_preprocessor:
retval = SLUPreprocessor(
train=train,
token_type=args.token_type,
token_list=args.token_list,
transcript_token_list=None
if "transcript_token_list" not in args
else args.transcript_token_list,
bpemodel=args.bpemodel,
non_linguistic_symbols=args.non_linguistic_symbols,
text_cleaner=args.cleaner,
g2p_type=args.g2p,
# NOTE(kamo): Check attribute existence for backward compatibility
rir_scp=args.rir_scp if hasattr(args, "rir_scp") else None,
rir_apply_prob=args.rir_apply_prob
if hasattr(args, "rir_apply_prob")
else 1.0,
noise_scp=args.noise_scp if hasattr(args, "noise_scp") else None,
noise_apply_prob=args.noise_apply_prob
if hasattr(args, "noise_apply_prob")
else 1.0,
noise_db_range=args.noise_db_range
if hasattr(args, "noise_db_range")
else "13_15",
short_noise_thres=args.short_noise_thres
if hasattr(args, "short_noise_thres")
else 0.5,
speech_volume_normalize=args.speech_volume_normalize
if hasattr(args, "rir_scp")
else None,
)
else:
retval = None
assert check_return_type(retval)
return retval
@classmethod
def required_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
if not inference:
retval = ("speech", "text")
else:
# Recognition mode
retval = ("speech",)
return retval
@classmethod
def optional_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
retval = ("transcript",)
assert check_return_type(retval)
return retval
@classmethod
def build_model(cls, args: argparse.Namespace) -> ESPnetSLUModel:
assert check_argument_types()
if isinstance(args.token_list, str):
with open(args.token_list, encoding="utf-8") as f:
token_list = [line.rstrip() for line in f]
# Overwriting token_list to keep it as "portable".
args.token_list = list(token_list)
elif isinstance(args.token_list, (tuple, list)):
token_list = list(args.token_list)
else:
raise RuntimeError("token_list must be str or list")
if "transcript_token_list" in args:
if args.transcript_token_list is not None:
if isinstance(args.transcript_token_list, str):
with open(args.transcript_token_list, encoding="utf-8") as f:
transcript_token_list = [line.rstrip() for line in f]
# Overwriting token_list to keep it as "portable".
args.transcript_token_list = list(transcript_token_list)
elif isinstance(args.token_list, (tuple, list)):
transcript_token_list = list(args.transcript_token_list)
else:
raise RuntimeError(" Transcript token_list must be str or list")
vocab_size = len(token_list)
logging.info(f"Vocabulary size: {vocab_size }")
# 1. frontend
if args.input_size is None:
# Extract features in the model
frontend_class = frontend_choices.get_class(args.frontend)
frontend = frontend_class(**args.frontend_conf)
input_size = frontend.output_size()
else:
# Give features from data-loader
args.frontend = None
args.frontend_conf = {}
frontend = None
input_size = args.input_size
# 2. Data augmentation for spectrogram
if args.specaug is not None:
specaug_class = specaug_choices.get_class(args.specaug)
specaug = specaug_class(**args.specaug_conf)
else:
specaug = None
# 3. Normalization layer
if args.normalize is not None:
normalize_class = normalize_choices.get_class(args.normalize)
normalize = normalize_class(**args.normalize_conf)
else:
normalize = None
# 4. Pre-encoder input block
# NOTE(kan-bayashi): Use getattr to keep the compatibility
if getattr(args, "preencoder", None) is not None:
preencoder_class = preencoder_choices.get_class(args.preencoder)
preencoder = preencoder_class(**args.preencoder_conf)
input_size = preencoder.output_size()
else:
preencoder = None
# 4. Encoder
encoder_class = encoder_choices.get_class(args.encoder)
encoder = encoder_class(input_size=input_size, **args.encoder_conf)
# 5. Post-encoder block
# NOTE(kan-bayashi): Use getattr to keep the compatibility
encoder_output_size = encoder.output_size()
if getattr(args, "postencoder", None) is not None:
postencoder_class = postencoder_choices.get_class(args.postencoder)
postencoder = postencoder_class(
input_size=encoder_output_size, **args.postencoder_conf
)
encoder_output_size = postencoder.output_size()
else:
postencoder = None
if getattr(args, "deliberationencoder", None) is not None:
deliberationencoder_class = deliberationencoder_choices.get_class(
args.deliberationencoder
)
deliberationencoder = deliberationencoder_class(
input_size=encoder_output_size, **args.deliberationencoder_conf
)
encoder_output_size = deliberationencoder.output_size()
else:
deliberationencoder = None
if getattr(args, "postdecoder", None) is not None:
postdecoder_class = postdecoder_choices.get_class(args.postdecoder)
postdecoder = postdecoder_class(**args.postdecoder_conf)
encoder_output_size = encoder_output_size
else:
postdecoder = None
# 5. Decoder
decoder_class = decoder_choices.get_class(args.decoder)
if args.decoder == "transducer":
decoder = decoder_class(
vocab_size,
embed_pad=0,
**args.decoder_conf,
)
joint_network = JointNetwork(
vocab_size,
encoder.output_size(),
decoder.dunits,
**args.joint_net_conf,
)
else:
decoder = decoder_class(
vocab_size=vocab_size,
encoder_output_size=encoder_output_size,
**args.decoder_conf,
)
joint_network = None
# 6. CTC
ctc = CTC(
odim=vocab_size, encoder_output_size=encoder_output_size, **args.ctc_conf
)
# 7. Build model
try:
model_class = model_choices.get_class(args.model)
except AttributeError:
model_class = model_choices.get_class("espnet")
if "transcript_token_list" in args:
if args.transcript_token_list is not None:
args.model_conf["transcript_token_list"] = transcript_token_list
args.model_conf["two_pass"] = args.two_pass
args.model_conf["pre_postencoder_norm"] = args.pre_postencoder_norm
model = model_class(
vocab_size=vocab_size,
frontend=frontend,
specaug=specaug,
normalize=normalize,
preencoder=preencoder,
encoder=encoder,
postencoder=postencoder,
deliberationencoder=deliberationencoder,
decoder=decoder,
postdecoder=postdecoder,
ctc=ctc,
joint_network=joint_network,
token_list=token_list,
**args.model_conf,
)
# FIXME(kamo): Should be done in model?
# 8. Initialize
if args.init is not None:
initialize(model, args.init)
assert check_return_type(model)
return model
| 20,979 | 34.260504 | 85 | py |
espnet | espnet-master/espnet2/tasks/st.py | import argparse
import logging
from typing import Callable, Collection, Dict, List, Optional, Tuple
import numpy as np
import torch
from typeguard import check_argument_types, check_return_type
from espnet2.asr.ctc import CTC
from espnet2.asr.decoder.abs_decoder import AbsDecoder
from espnet2.asr.decoder.rnn_decoder import RNNDecoder
from espnet2.asr.decoder.transformer_decoder import (
DynamicConvolution2DTransformerDecoder,
DynamicConvolutionTransformerDecoder,
LightweightConvolution2DTransformerDecoder,
LightweightConvolutionTransformerDecoder,
TransformerDecoder,
)
from espnet2.asr.encoder.abs_encoder import AbsEncoder
from espnet2.asr.encoder.conformer_encoder import ConformerEncoder
from espnet2.asr.encoder.contextual_block_transformer_encoder import (
ContextualBlockTransformerEncoder,
)
from espnet2.asr.encoder.hubert_encoder import (
FairseqHubertEncoder,
FairseqHubertPretrainEncoder,
)
from espnet2.asr.encoder.rnn_encoder import RNNEncoder
from espnet2.asr.encoder.transformer_encoder import TransformerEncoder
from espnet2.asr.encoder.vgg_rnn_encoder import VGGRNNEncoder
from espnet2.asr.encoder.wav2vec2_encoder import FairSeqWav2Vec2Encoder
from espnet2.asr.frontend.abs_frontend import AbsFrontend
from espnet2.asr.frontend.default import DefaultFrontend
from espnet2.asr.frontend.s3prl import S3prlFrontend
from espnet2.asr.frontend.windowing import SlidingWindow
from espnet2.asr.postencoder.abs_postencoder import AbsPostEncoder
from espnet2.asr.postencoder.hugging_face_transformers_postencoder import (
HuggingFaceTransformersPostEncoder,
)
from espnet2.asr.preencoder.abs_preencoder import AbsPreEncoder
from espnet2.asr.preencoder.linear import LinearProjection
from espnet2.asr.preencoder.sinc import LightweightSincConvs
from espnet2.asr.specaug.abs_specaug import AbsSpecAug
from espnet2.asr.specaug.specaug import SpecAug
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.layers.global_mvn import GlobalMVN
from espnet2.layers.utterance_mvn import UtteranceMVN
from espnet2.st.espnet_model import ESPnetSTModel
from espnet2.tasks.abs_task import AbsTask
from espnet2.text.phoneme_tokenizer import g2p_choices
from espnet2.torch_utils.initialize import initialize
from espnet2.train.class_choices import ClassChoices
from espnet2.train.collate_fn import CommonCollateFn
from espnet2.train.preprocessor import MutliTokenizerCommonPreprocessor
from espnet2.train.trainer import Trainer
from espnet2.utils.get_default_kwargs import get_default_kwargs
from espnet2.utils.nested_dict_action import NestedDictAction
from espnet2.utils.types import float_or_none, int_or_none, str2bool, str_or_none
frontend_choices = ClassChoices(
name="frontend",
classes=dict(
default=DefaultFrontend,
sliding_window=SlidingWindow,
s3prl=S3prlFrontend,
),
type_check=AbsFrontend,
default="default",
)
specaug_choices = ClassChoices(
name="specaug",
classes=dict(specaug=SpecAug),
type_check=AbsSpecAug,
default=None,
optional=True,
)
normalize_choices = ClassChoices(
"normalize",
classes=dict(
global_mvn=GlobalMVN,
utterance_mvn=UtteranceMVN,
),
type_check=AbsNormalize,
default="utterance_mvn",
optional=True,
)
preencoder_choices = ClassChoices(
name="preencoder",
classes=dict(
sinc=LightweightSincConvs,
linear=LinearProjection,
),
type_check=AbsPreEncoder,
default=None,
optional=True,
)
encoder_choices = ClassChoices(
"encoder",
classes=dict(
conformer=ConformerEncoder,
transformer=TransformerEncoder,
contextual_block_transformer=ContextualBlockTransformerEncoder,
vgg_rnn=VGGRNNEncoder,
rnn=RNNEncoder,
wav2vec2=FairSeqWav2Vec2Encoder,
hubert=FairseqHubertEncoder,
hubert_pretrain=FairseqHubertPretrainEncoder,
),
type_check=AbsEncoder,
default="rnn",
)
postencoder_choices = ClassChoices(
name="postencoder",
classes=dict(
hugging_face_transformers=HuggingFaceTransformersPostEncoder,
),
type_check=AbsPostEncoder,
default=None,
optional=True,
)
decoder_choices = ClassChoices(
"decoder",
classes=dict(
transformer=TransformerDecoder,
lightweight_conv=LightweightConvolutionTransformerDecoder,
lightweight_conv2d=LightweightConvolution2DTransformerDecoder,
dynamic_conv=DynamicConvolutionTransformerDecoder,
dynamic_conv2d=DynamicConvolution2DTransformerDecoder,
rnn=RNNDecoder,
),
type_check=AbsDecoder,
default="rnn",
)
extra_asr_decoder_choices = ClassChoices(
"extra_asr_decoder",
classes=dict(
transformer=TransformerDecoder,
lightweight_conv=LightweightConvolutionTransformerDecoder,
lightweight_conv2d=LightweightConvolution2DTransformerDecoder,
dynamic_conv=DynamicConvolutionTransformerDecoder,
dynamic_conv2d=DynamicConvolution2DTransformerDecoder,
rnn=RNNDecoder,
),
type_check=AbsDecoder,
default="rnn",
)
extra_mt_decoder_choices = ClassChoices(
"extra_mt_decoder",
classes=dict(
transformer=TransformerDecoder,
lightweight_conv=LightweightConvolutionTransformerDecoder,
lightweight_conv2d=LightweightConvolution2DTransformerDecoder,
dynamic_conv=DynamicConvolutionTransformerDecoder,
dynamic_conv2d=DynamicConvolution2DTransformerDecoder,
rnn=RNNDecoder,
),
type_check=AbsDecoder,
default="rnn",
)
class STTask(AbsTask):
# If you need more than one optimizers, change this value
num_optimizers: int = 1
# Add variable objects configurations
class_choices_list = [
# --frontend and --frontend_conf
frontend_choices,
# --specaug and --specaug_conf
specaug_choices,
# --normalize and --normalize_conf
normalize_choices,
# --preencoder and --preencoder_conf
preencoder_choices,
# --encoder and --encoder_conf
encoder_choices,
# --postencoder and --postencoder_conf
postencoder_choices,
# --decoder and --decoder_conf
decoder_choices,
# --extra_asr_decoder and --extra_asr_decoder_conf
extra_asr_decoder_choices,
# --extra_mt_decoder and --extra_mt_decoder_conf
extra_mt_decoder_choices,
]
# If you need to modify train() or eval() procedures, change Trainer class here
trainer = Trainer
@classmethod
def add_task_arguments(cls, parser: argparse.ArgumentParser):
group = parser.add_argument_group(description="Task related")
# NOTE(kamo): add_arguments(..., required=True) can't be used
# to provide --print_config mode. Instead of it, do as
required = parser.get_default("required")
required += ["token_list"]
group.add_argument(
"--token_list",
type=str_or_none,
default=None,
help="A text mapping int-id to token (for target language)",
)
group.add_argument(
"--src_token_list",
type=str_or_none,
default=None,
help="A text mapping int-id to token (for source language)",
)
group.add_argument(
"--init",
type=lambda x: str_or_none(x.lower()),
default=None,
help="The initialization method",
choices=[
"chainer",
"xavier_uniform",
"xavier_normal",
"kaiming_uniform",
"kaiming_normal",
None,
],
)
group.add_argument(
"--input_size",
type=int_or_none,
default=None,
help="The number of input dimension of the feature",
)
group.add_argument(
"--ctc_conf",
action=NestedDictAction,
default=get_default_kwargs(CTC),
help="The keyword arguments for CTC class.",
)
group.add_argument(
"--model_conf",
action=NestedDictAction,
default=get_default_kwargs(ESPnetSTModel),
help="The keyword arguments for model class.",
)
group = parser.add_argument_group(description="Preprocess related")
group.add_argument(
"--use_preprocessor",
type=str2bool,
default=True,
help="Apply preprocessing to data or not",
)
group.add_argument(
"--token_type",
type=str,
default="bpe",
choices=["bpe", "char", "word", "phn"],
help="The target text will be tokenized " "in the specified level token",
)
group.add_argument(
"--src_token_type",
type=str,
default="bpe",
choices=["bpe", "char", "word", "phn", "none"],
help="The source text will be tokenized " "in the specified level token",
)
group.add_argument(
"--bpemodel",
type=str_or_none,
default=None,
help="The model file of sentencepiece (for target language)",
)
group.add_argument(
"--src_bpemodel",
type=str_or_none,
default=None,
help="The model file of sentencepiece (for source language)",
)
group.add_argument(
"--non_linguistic_symbols",
type=str_or_none,
help="non_linguistic_symbols file path",
)
group.add_argument(
"--cleaner",
type=str_or_none,
choices=[None, "tacotron", "jaconv", "vietnamese"],
default=None,
help="Apply text cleaning",
)
group.add_argument(
"--g2p",
type=str_or_none,
choices=g2p_choices,
default=None,
help="Specify g2p method if --token_type=phn",
)
group.add_argument(
"--speech_volume_normalize",
type=float_or_none,
default=None,
help="Scale the maximum amplitude to the given value.",
)
group.add_argument(
"--rir_scp",
type=str_or_none,
default=None,
help="The file path of rir scp file.",
)
group.add_argument(
"--rir_apply_prob",
type=float,
default=1.0,
help="THe probability for applying RIR convolution.",
)
group.add_argument(
"--noise_scp",
type=str_or_none,
default=None,
help="The file path of noise scp file.",
)
group.add_argument(
"--noise_apply_prob",
type=float,
default=1.0,
help="The probability applying Noise adding.",
)
group.add_argument(
"--noise_db_range",
type=str,
default="13_15",
help="The range of noise decibel level.",
)
group.add_argument(
"--short_noise_thres",
type=float,
default=0.5,
help="If len(noise) / len(speech) is smaller than this threshold during "
"dynamic mixing, a warning will be displayed.",
)
for class_choices in cls.class_choices_list:
# Append --<name> and --<name>_conf.
# e.g. --encoder and --encoder_conf
class_choices.add_arguments(group)
@classmethod
def build_collate_fn(
cls, args: argparse.Namespace, train: bool
) -> Callable[
[Collection[Tuple[str, Dict[str, np.ndarray]]]],
Tuple[List[str], Dict[str, torch.Tensor]],
]:
assert check_argument_types()
# NOTE(kamo): int value = 0 is reserved by CTC-blank symbol
return CommonCollateFn(float_pad_value=0.0, int_pad_value=-1)
@classmethod
def build_preprocess_fn(
cls, args: argparse.Namespace, train: bool
) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
assert check_argument_types()
if args.src_token_type == "none":
args.src_token_type = None
if args.use_preprocessor:
retval = MutliTokenizerCommonPreprocessor(
train=train,
token_type=[args.token_type, args.src_token_type],
token_list=[args.token_list, args.src_token_list],
bpemodel=[args.bpemodel, args.src_bpemodel],
non_linguistic_symbols=args.non_linguistic_symbols,
text_cleaner=args.cleaner,
g2p_type=args.g2p,
# NOTE(kamo): Check attribute existence for backward compatibility
rir_scp=args.rir_scp if hasattr(args, "rir_scp") else None,
rir_apply_prob=args.rir_apply_prob
if hasattr(args, "rir_apply_prob")
else 1.0,
noise_scp=args.noise_scp if hasattr(args, "noise_scp") else None,
noise_apply_prob=args.noise_apply_prob
if hasattr(args, "noise_apply_prob")
else 1.0,
noise_db_range=args.noise_db_range
if hasattr(args, "noise_db_range")
else "13_15",
short_noise_thres=args.short_noise_thres
if hasattr(args, "short_noise_thres")
else 0.5,
speech_volume_normalize=args.speech_volume_normalize
if hasattr(args, "speech_volume_normalize")
else None,
speech_name="speech",
text_name=["text", "src_text"],
)
else:
retval = None
assert check_return_type(retval)
return retval
@classmethod
def required_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
if not inference:
retval = ("speech", "text")
else:
# Recognition mode
retval = ("speech",)
return retval
@classmethod
def optional_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
if not inference:
retval = ("src_text",)
else:
retval = ()
assert check_return_type(retval)
return retval
@classmethod
def build_model(cls, args: argparse.Namespace) -> ESPnetSTModel:
assert check_argument_types()
if isinstance(args.token_list, str):
with open(args.token_list, encoding="utf-8") as f:
token_list = [line.rstrip() for line in f]
# Overwriting token_list to keep it as "portable".
args.token_list = list(token_list)
elif isinstance(args.token_list, (tuple, list)):
token_list = list(args.token_list)
else:
raise RuntimeError("token_list must be str or list")
vocab_size = len(token_list)
logging.info(f"Vocabulary size: {vocab_size }")
if args.src_token_list is not None:
if isinstance(args.src_token_list, str):
with open(args.src_token_list, encoding="utf-8") as f:
src_token_list = [line.rstrip() for line in f]
# Overwriting src_token_list to keep it as "portable".
args.src_token_list = list(src_token_list)
elif isinstance(args.src_token_list, (tuple, list)):
src_token_list = list(args.src_token_list)
else:
raise RuntimeError("token_list must be str or list")
src_vocab_size = len(src_token_list)
logging.info(f"Source vocabulary size: {src_vocab_size }")
else:
src_token_list, src_vocab_size = None, None
# 1. frontend
if args.input_size is None:
# Extract features in the model
frontend_class = frontend_choices.get_class(args.frontend)
frontend = frontend_class(**args.frontend_conf)
input_size = frontend.output_size()
else:
# Give features from data-loader
args.frontend = None
args.frontend_conf = {}
frontend = None
input_size = args.input_size
# 2. Data augmentation for spectrogram
if args.specaug is not None:
specaug_class = specaug_choices.get_class(args.specaug)
specaug = specaug_class(**args.specaug_conf)
else:
specaug = None
# 3. Normalization layer
if args.normalize is not None:
normalize_class = normalize_choices.get_class(args.normalize)
normalize = normalize_class(**args.normalize_conf)
else:
normalize = None
# 4. Pre-encoder input block
# NOTE(kan-bayashi): Use getattr to keep the compatibility
if getattr(args, "preencoder", None) is not None:
preencoder_class = preencoder_choices.get_class(args.preencoder)
preencoder = preencoder_class(**args.preencoder_conf)
input_size = preencoder.output_size()
else:
preencoder = None
# 4. Encoder
encoder_class = encoder_choices.get_class(args.encoder)
encoder = encoder_class(input_size=input_size, **args.encoder_conf)
# 5. Post-encoder block
# NOTE(kan-bayashi): Use getattr to keep the compatibility
encoder_output_size = encoder.output_size()
if getattr(args, "postencoder", None) is not None:
postencoder_class = postencoder_choices.get_class(args.postencoder)
postencoder = postencoder_class(
input_size=encoder_output_size, **args.postencoder_conf
)
encoder_output_size = postencoder.output_size()
else:
postencoder = None
# 5. Decoder
decoder_class = decoder_choices.get_class(args.decoder)
decoder = decoder_class(
vocab_size=vocab_size,
encoder_output_size=encoder_output_size,
**args.decoder_conf,
)
# 6. CTC
if src_token_list is not None:
ctc = CTC(
odim=src_vocab_size,
encoder_output_size=encoder_output_size,
**args.ctc_conf,
)
else:
ctc = None
# 7. ASR extra decoder
if (
getattr(args, "extra_asr_decoder", None) is not None
and src_token_list is not None
):
extra_asr_decoder_class = extra_asr_decoder_choices.get_class(
args.extra_asr_decoder
)
extra_asr_decoder = extra_asr_decoder_class(
vocab_size=src_vocab_size,
encoder_output_size=encoder_output_size,
**args.extra_asr_decoder_conf,
)
else:
extra_asr_decoder = None
# 8. MT extra decoder
if getattr(args, "extra_mt_decoder", None) is not None:
extra_mt_decoder_class = extra_mt_decoder_choices.get_class(
args.extra_mt_decoder
)
extra_mt_decoder = extra_mt_decoder_class(
vocab_size=vocab_size,
encoder_output_size=encoder_output_size,
**args.extra_mt_decoder_conf,
)
else:
extra_asr_decoder = None
# 8. Build model
model = ESPnetSTModel(
vocab_size=vocab_size,
src_vocab_size=src_vocab_size,
frontend=frontend,
specaug=specaug,
normalize=normalize,
preencoder=preencoder,
encoder=encoder,
postencoder=postencoder,
decoder=decoder,
ctc=ctc,
extra_asr_decoder=extra_asr_decoder,
extra_mt_decoder=extra_mt_decoder,
token_list=token_list,
src_token_list=src_token_list,
**args.model_conf,
)
# FIXME(kamo): Should be done in model?
# 9. Initialize
if args.init is not None:
initialize(model, args.init)
assert check_return_type(model)
return model
| 20,374 | 34.068847 | 85 | py |
espnet | espnet-master/espnet2/tasks/hubert.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Thanks to Abdelrahman Mohamed and Wei-Ning Hsu's help in this implementation,
# Their origial Hubert work is in:
# Paper: https://arxiv.org/pdf/2106.07447.pdf
# Code in Fairseq: https://github.com/pytorch/fairseq/tree/master/examples/hubert
import argparse
import logging
from typing import Callable, Collection, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from typeguard import check_argument_types, check_return_type
from espnet2.asr.encoder.abs_encoder import AbsEncoder
from espnet2.asr.encoder.hubert_encoder import ( # noqa: H301
FairseqHubertPretrainEncoder,
TorchAudioHuBERTPretrainEncoder,
)
from espnet2.asr.frontend.abs_frontend import AbsFrontend
from espnet2.asr.frontend.default import DefaultFrontend
from espnet2.asr.frontend.windowing import SlidingWindow
from espnet2.asr.preencoder.abs_preencoder import AbsPreEncoder
from espnet2.asr.preencoder.sinc import LightweightSincConvs
from espnet2.asr.specaug.abs_specaug import AbsSpecAug
from espnet2.asr.specaug.specaug import SpecAug
from espnet2.hubert.espnet_model import (
HubertPretrainModel,
TorchAudioHubertPretrainModel,
)
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.layers.global_mvn import GlobalMVN
from espnet2.layers.utterance_mvn import UtteranceMVN
from espnet2.tasks.abs_task import AbsTask
from espnet2.text.phoneme_tokenizer import g2p_choices
from espnet2.torch_utils.initialize import initialize
from espnet2.train.abs_espnet_model import AbsESPnetModel
from espnet2.train.class_choices import ClassChoices
from espnet2.train.collate_fn import HuBERTCollateFn
from espnet2.train.preprocessor import CommonPreprocessor
from espnet2.train.trainer import Trainer
from espnet2.utils.nested_dict_action import NestedDictAction
from espnet2.utils.types import float_or_none, int_or_none, str2bool, str_or_none
frontend_choices = ClassChoices(
name="frontend",
classes=dict(default=DefaultFrontend, sliding_window=SlidingWindow),
type_check=AbsFrontend,
default="default",
)
specaug_choices = ClassChoices(
name="specaug",
classes=dict(specaug=SpecAug),
type_check=AbsSpecAug,
default=None,
optional=True,
)
normalize_choices = ClassChoices(
"normalize",
classes=dict(
global_mvn=GlobalMVN,
utterance_mvn=UtteranceMVN,
),
type_check=AbsNormalize,
default="utterance_mvn",
optional=True,
)
preencoder_choices = ClassChoices(
name="preencoder",
classes=dict(
sinc=LightweightSincConvs,
),
type_check=AbsPreEncoder,
default=None,
optional=True,
)
encoder_choices = ClassChoices(
"encoder",
classes=dict(
hubert_pretrain=FairseqHubertPretrainEncoder,
torchaudio_hubert=TorchAudioHuBERTPretrainEncoder,
),
type_check=AbsEncoder,
default="hubert_pretrain",
)
model_choices = ClassChoices(
"model",
classes=dict(
fairseq=HubertPretrainModel,
torchaudio=TorchAudioHubertPretrainModel,
),
type_check=AbsESPnetModel,
default="fairseq",
)
class HubertTask(AbsTask):
# If you need more than one optimizers, change this value
num_optimizers: int = 1
# Add variable objects configurations
class_choices_list = [
# --frontend and --frontend_conf
frontend_choices,
# --specaug and --specaug_conf
specaug_choices,
# --normalize and --normalize_conf
normalize_choices,
# --preencoder and --preencoder_conf
preencoder_choices,
# --encoder and --encoder_conf
encoder_choices,
# --model and --model_conf
model_choices,
]
# If you need to modify train() or eval() procedures, change Trainer class here
trainer = Trainer
@classmethod
def add_task_arguments(cls, parser: argparse.ArgumentParser):
group = parser.add_argument_group(description="Task related")
# NOTE(kamo): add_arguments(..., required=True) can't be used
# to provide --print_config mode. Instead of it, do as
required = parser.get_default("required")
required += ["token_list"]
group.add_argument(
"--token_list",
type=str_or_none,
default=None,
help="A text mapping int-id to token",
)
group.add_argument(
"--init",
type=lambda x: str_or_none(x.lower()),
default=None,
help="The initialization method",
choices=[
"chainer",
"xavier_uniform",
"xavier_normal",
"kaiming_uniform",
"kaiming_normal",
None,
],
)
group.add_argument(
"--collate_fn_conf",
action=NestedDictAction,
default=dict(),
help="The keyword arguments for collate_fn class.",
)
group.add_argument(
"--input_size",
type=int_or_none,
default=None,
help="The number of input dimension of the feature",
)
group.add_argument(
"--num_classes",
type=int,
default=None,
help="The number of classes in hubert",
)
group = parser.add_argument_group(description="Preprocess related")
group.add_argument(
"--use_preprocessor",
type=str2bool,
default=True,
help="Apply preprocessing to data or not",
)
group.add_argument(
"--token_type",
type=str,
default="bpe",
choices=["bpe", "char", "word", "phn"],
help="The text will be tokenized " "in the specified level token",
)
group.add_argument(
"--bpemodel",
type=str_or_none,
default=None,
help="The model file of sentencepiece",
)
group.add_argument(
"--non_linguistic_symbols",
type=str_or_none,
help="non_linguistic_symbols file path",
)
group.add_argument(
"--cleaner",
type=str_or_none,
choices=[None, "tacotron", "jaconv", "vietnamese"],
default=None,
help="Apply text cleaning",
)
group.add_argument(
"--g2p",
type=str_or_none,
choices=g2p_choices,
default=None,
help="Specify g2p method if --token_type=phn",
)
group.add_argument(
"--speech_volume_normalize",
type=float_or_none,
default=None,
help="Scale the maximum amplitude to the given value.",
)
group.add_argument(
"--rir_scp",
type=str_or_none,
default=None,
help="The file path of rir scp file.",
)
group.add_argument(
"--rir_apply_prob",
type=float,
default=1.0,
help="THe probability for applying RIR convolution.",
)
group.add_argument(
"--noise_scp",
type=str_or_none,
default=None,
help="The file path of noise scp file.",
)
group.add_argument(
"--noise_apply_prob",
type=float,
default=1.0,
help="The probability applying Noise adding.",
)
group.add_argument(
"--noise_db_range",
type=str,
default="13_15",
help="The range of noise decibel level.",
)
parser.add_argument(
"--pred_masked_weight",
type=float,
default=1.0,
help="weight for predictive loss for masked frames",
)
parser.add_argument(
"--pred_nomask_weight",
type=float,
default=0.0,
help="weight for predictive loss for unmasked frames",
)
parser.add_argument(
"--loss_weights",
type=float,
default=0.0,
help="weights for additional loss terms (not first one)",
)
for class_choices in cls.class_choices_list:
# Append --<name> and --<name>_conf.
# e.g. --encoder and --encoder_conf
class_choices.add_arguments(group)
@classmethod
def build_collate_fn(
cls, args: argparse.Namespace, train: bool
) -> Callable[
[Collection[Tuple[str, Dict[str, np.ndarray]]]],
Tuple[List[str], Dict[str, torch.Tensor]],
]:
assert check_argument_types()
return HuBERTCollateFn(
float_pad_value=0.0,
int_pad_value=-1,
label_downsampling=args.collate_fn_conf.get("label_downsampling", 1),
pad=args.collate_fn_conf.get("pad", False),
rand_crop=args.collate_fn_conf.get("rand_crop", True),
crop_audio=not args.collect_stats,
)
@classmethod
def build_preprocess_fn(
cls, args: argparse.Namespace, train: bool
) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
assert check_argument_types()
if args.use_preprocessor:
retval = CommonPreprocessor(
train=train,
token_type=args.token_type,
token_list=args.token_list,
bpemodel=args.bpemodel,
non_linguistic_symbols=args.non_linguistic_symbols,
text_cleaner=args.cleaner,
g2p_type=args.g2p,
# NOTE(kamo): Check attribute existence for backward compatibility
rir_scp=args.rir_scp if hasattr(args, "rir_scp") else None,
rir_apply_prob=args.rir_apply_prob
if hasattr(args, "rir_apply_prob")
else 1.0,
noise_scp=args.noise_scp if hasattr(args, "noise_scp") else None,
noise_apply_prob=args.noise_apply_prob
if hasattr(args, "noise_apply_prob")
else 1.0,
noise_db_range=args.noise_db_range
if hasattr(args, "noise_db_range")
else "13_15",
short_noise_thres=args.short_noise_thres
if hasattr(args, "short_noise_thres")
else 0.5,
speech_volume_normalize=args.speech_volume_normalize
if hasattr(args, "rir_scp")
else None,
)
else:
retval = None
assert check_return_type(retval)
return retval
@classmethod
def required_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
if not inference:
retval = ("speech", "text")
else:
# Recognition mode
retval = ("speech",)
return retval
@classmethod
def optional_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
retval = ()
assert check_return_type(retval)
return retval
@classmethod
def build_model(
cls, args: argparse.Namespace
) -> Union[HubertPretrainModel, TorchAudioHubertPretrainModel]:
assert check_argument_types()
if isinstance(args.token_list, str):
with open(args.token_list, encoding="utf-8") as f:
token_list = [line.rstrip() for line in f]
# Overwriting token_list to keep it as "portable".
args.token_list = list(token_list)
elif isinstance(args.token_list, (tuple, list)):
token_list = list(args.token_list)
else:
raise RuntimeError("token_list must be str or list")
vocab_size = len(token_list)
logging.info(f"Vocabulary size: {vocab_size }")
# 1. frontend
if args.input_size is None:
# Extract features in the model
frontend_class = frontend_choices.get_class(args.frontend)
frontend = frontend_class(**args.frontend_conf)
input_size = frontend.output_size()
else:
# Give features from data-loader
args.frontend = None
args.frontend_conf = {}
frontend = None
input_size = args.input_size
# 2. Data augmentation for spectrogram
if args.specaug is not None:
specaug_class = specaug_choices.get_class(args.specaug)
specaug = specaug_class(**args.specaug_conf)
else:
specaug = None
# 3. Normalization layer
if args.normalize is not None:
normalize_class = normalize_choices.get_class(args.normalize)
normalize = normalize_class(**args.normalize_conf)
else:
normalize = None
# 4. Pre-encoder input block
# NOTE(kan-bayashi): Use getattr to keep the compatibility
if getattr(args, "preencoder", None) is not None:
preencoder_class = preencoder_choices.get_class(args.preencoder)
preencoder = preencoder_class(**args.preencoder_conf)
input_size = preencoder.output_size()
else:
preencoder = None
# 4. Encoder
encoder_class = encoder_choices.get_class(args.encoder)
encoder = encoder_class(
input_size=input_size,
num_classes=args.num_classes,
**args.encoder_conf,
)
# 8. Build model
try:
model_class = model_choices.get_class(args.model)
except AttributeError:
model_class = model_choices.get_class("fairseq")
model = model_class(
vocab_size=vocab_size,
frontend=frontend,
specaug=specaug,
normalize=normalize,
preencoder=preencoder,
encoder=encoder,
token_list=token_list,
**args.model_conf,
)
# 9. Initialize
if args.init is not None:
initialize(model, args.init)
assert check_return_type(model)
return model
| 14,204 | 32.266979 | 85 | py |
espnet | espnet-master/espnet2/tasks/svs.py | """Singing-voice-synthesis task."""
import argparse
import logging
from pathlib import Path
from typing import Callable, Collection, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
import yaml
from typeguard import check_argument_types, check_return_type
from espnet2.gan_svs.joint import JointScore2Wav
from espnet2.gan_svs.vits import VITS
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.layers.global_mvn import GlobalMVN
from espnet2.svs.abs_svs import AbsSVS
from espnet2.svs.espnet_model import ESPnetSVSModel
from espnet2.svs.feats_extract.score_feats_extract import (
FrameScoreFeats,
SyllableScoreFeats,
)
from espnet2.svs.naive_rnn.naive_rnn import NaiveRNN
from espnet2.svs.naive_rnn.naive_rnn_dp import NaiveRNNDP
# TODO(Yuning): Models to be added
from espnet2.svs.singing_tacotron.singing_tacotron import singing_tacotron
from espnet2.svs.xiaoice.XiaoiceSing import XiaoiceSing
# from espnet2.svs.encoder_decoder.transformer.transformer import Transformer
# from espnet2.svs.mlp_singer.mlp_singer import MLPSinger
# from espnet2.svs.glu_transformer.glu_transformer import GLU_Transformer
from espnet2.tasks.abs_task import AbsTask
from espnet2.train.class_choices import ClassChoices
from espnet2.train.collate_fn import CommonCollateFn
from espnet2.train.preprocessor import SVSPreprocessor
from espnet2.train.trainer import Trainer
from espnet2.tts.feats_extract.abs_feats_extract import AbsFeatsExtract
from espnet2.tts.feats_extract.dio import Dio
from espnet2.tts.feats_extract.energy import Energy
from espnet2.tts.feats_extract.linear_spectrogram import LinearSpectrogram
from espnet2.tts.feats_extract.log_mel_fbank import LogMelFbank
from espnet2.tts.feats_extract.log_spectrogram import LogSpectrogram
from espnet2.tts.feats_extract.ying import Ying
# from espnet2.svs.xiaoice.XiaoiceSing import XiaoiceSing_noDP
# from espnet2.svs.bytesing.bytesing import ByteSing
from espnet2.tts.utils import ParallelWaveGANPretrainedVocoder
from espnet2.utils.get_default_kwargs import get_default_kwargs
from espnet2.utils.griffin_lim import Spectrogram2Waveform
from espnet2.utils.nested_dict_action import NestedDictAction
from espnet2.utils.types import int_or_none, str2bool, str_or_none
# TODO(Yuning): Add singing augmentation
feats_extractor_choices = ClassChoices(
"feats_extract",
classes=dict(
fbank=LogMelFbank,
spectrogram=LogSpectrogram,
linear_spectrogram=LinearSpectrogram,
),
type_check=AbsFeatsExtract,
default="fbank",
)
score_feats_extractor_choices = ClassChoices(
"score_feats_extract",
classes=dict(
frame_score_feats=FrameScoreFeats, syllable_score_feats=SyllableScoreFeats
),
type_check=AbsFeatsExtract,
default="frame_score_feats",
)
pitch_extractor_choices = ClassChoices(
"pitch_extract",
classes=dict(dio=Dio),
type_check=AbsFeatsExtract,
default=None,
optional=True,
)
energy_extractor_choices = ClassChoices(
"energy_extract",
classes=dict(energy=Energy),
type_check=AbsFeatsExtract,
default=None,
optional=True,
)
normalize_choices = ClassChoices(
"normalize",
classes=dict(global_mvn=GlobalMVN),
type_check=AbsNormalize,
default="global_mvn",
optional=True,
)
pitch_normalize_choices = ClassChoices(
"pitch_normalize",
classes=dict(global_mvn=GlobalMVN),
type_check=AbsNormalize,
default=None,
optional=True,
)
ying_extractor_choices = ClassChoices(
"ying_extract",
classes=dict(ying=Ying),
type_check=AbsFeatsExtract,
default=None,
optional=True,
)
energy_normalize_choices = ClassChoices(
"energy_normalize",
classes=dict(global_mvn=GlobalMVN),
type_check=AbsNormalize,
default=None,
optional=True,
)
svs_choices = ClassChoices(
"svs",
classes=dict(
# transformer=Transformer,
# glu_transformer=GLU_Transformer,
# bytesing=ByteSing,
naive_rnn=NaiveRNN,
naive_rnn_dp=NaiveRNNDP,
xiaoice=XiaoiceSing,
# xiaoice_noDP=XiaoiceSing_noDP,
vits=VITS,
joint_score2wav=JointScore2Wav,
# mlp=MLPSinger,
singing_tacotron=singing_tacotron,
),
type_check=AbsSVS,
default="naive_rnn",
)
class SVSTask(AbsTask):
num_optimizers: int = 1
# Add variable objects configurations
class_choices_list = [
# --score_extractor and --score_extractor_conf
score_feats_extractor_choices,
# --feats_extractor and --feats_extractor_conf
feats_extractor_choices,
# --normalize and --normalize_conf
normalize_choices,
# --svs and --svs_conf
svs_choices,
# --pitch_extract and --pitch_extract_conf
pitch_extractor_choices,
# --pitch_normalize and --pitch_normalize_conf
pitch_normalize_choices,
# --ying_extract and --ying_extract_conf
ying_extractor_choices,
# --energy_extract and --energy_extract_conf
energy_extractor_choices,
# --energy_normalize and --energy_normalize_conf
energy_normalize_choices,
]
# If you need to modify train() or eval() procedures, change Trainer class here
trainer = Trainer
@classmethod
def add_task_arguments(cls, parser: argparse.ArgumentParser):
# NOTE(kamo): Use '_' instead of '-' to avoid confusion
assert check_argument_types()
group = parser.add_argument_group(description="Task related")
# NOTE(kamo): add_arguments(..., required=True) can't be used
# to provide --print_config mode. Instead of it, do as
required = parser.get_default("required")
required += ["token_list"]
group.add_argument(
"--token_list",
type=str_or_none,
default=None,
help="A text mapping int-id to token",
)
group.add_argument(
"--odim",
type=int_or_none,
default=None,
help="The number of dimension of output feature",
)
group.add_argument(
"--model_conf",
action=NestedDictAction,
default=get_default_kwargs(ESPnetSVSModel),
help="The keyword arguments for model class.",
)
group = parser.add_argument_group(description="Preprocess related")
group.add_argument(
"--use_preprocessor",
type=str2bool,
default=True,
help="Apply preprocessing to data or not",
)
group.add_argument(
"--token_type",
type=str,
default="phn",
choices=["bpe", "char", "word", "phn"],
help="The text will be tokenized in the specified level token",
)
group.add_argument(
"--bpemodel",
type=str_or_none,
default=None,
help="The model file of sentencepiece",
)
parser.add_argument(
"--non_linguistic_symbols",
type=str_or_none,
help="non_linguistic_symbols file path",
)
parser.add_argument(
"--cleaner",
type=str_or_none,
choices=[None, "tacotron", "jaconv", "vietnamese"],
default=None,
help="Apply text cleaning",
)
parser.add_argument(
"--g2p",
type=str_or_none,
choices=[
None,
"g2p_en",
"g2p_en_no_space",
"pyopenjtalk",
"pyopenjtalk_kana",
"pyopenjtalk_accent",
"pyopenjtalk_accent_with_pause",
"pypinyin_g2p",
"pypinyin_g2p_phone",
"pypinyin_g2p_phone_without_prosody",
"espeak_ng_arabic",
],
default=None,
help="Specify g2p method if --token_type=phn",
)
parser.add_argument(
"--fs",
type=int,
default=24000, # BUG: another fs in feats_extract_conf
help="sample rate",
)
for class_choices in cls.class_choices_list:
# Append --<name> and --<name>_conf.
# e.g. --encoder and --encoder_conf
class_choices.add_arguments(group)
@classmethod
def build_collate_fn(
cls, args: argparse.Namespace, train: bool
) -> Callable[
[Collection[Tuple[str, Dict[str, np.ndarray]]]],
Tuple[List[str], Dict[str, torch.Tensor]],
]:
assert check_argument_types()
return CommonCollateFn(
float_pad_value=0.0,
int_pad_value=0,
not_sequence=["spembs", "sids", "lids"],
)
@classmethod
def build_preprocess_fn(
cls, args: argparse.Namespace, train: bool
) -> Optional[Callable[[str, Dict[str, np.array], float], Dict[str, np.ndarray]]]:
assert check_argument_types()
if args.use_preprocessor:
retval = SVSPreprocessor(
train=train,
token_type=args.token_type,
token_list=args.token_list,
bpemodel=args.bpemodel,
non_linguistic_symbols=args.non_linguistic_symbols,
text_cleaner=args.cleaner,
g2p_type=args.g2p,
fs=args.fs,
hop_length=args.feats_extract_conf["hop_length"],
)
else:
retval = None
# FIXME (jiatong): sometimes checking is not working here
# assert check_return_type(retval)
return retval
@classmethod
def required_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
if not inference:
retval = ("text", "singing", "score", "label")
else:
# Inference mode
retval = ("text", "score", "label")
return retval
@classmethod
def optional_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
if not inference:
retval = (
"spembs",
"durations",
"pitch",
"energy",
"sids",
"lids",
"feats",
"ying",
)
else:
# Inference mode
retval = ("spembs", "singing", "pitch", "durations", "sids", "lids")
return retval
@classmethod
def build_model(cls, args: argparse.Namespace) -> ESPnetSVSModel:
assert check_argument_types()
if isinstance(args.token_list, str):
with open(args.token_list, encoding="utf-8") as f:
token_list = [line.rstrip() for line in f]
# "args" is saved as it is in a yaml file by BaseTask.main().
# Overwriting token_list to keep it as "portable".
args.token_list = token_list.copy()
elif isinstance(args.token_list, (tuple, list)):
token_list = args.token_list.copy()
else:
raise RuntimeError("token_list must be str or dict")
vocab_size = len(token_list)
logging.info(f"Vocabulary size: {vocab_size }")
# 1. feats_extract
if args.odim is None:
# Extract features in the model
feats_extract_class = feats_extractor_choices.get_class(args.feats_extract)
feats_extract = feats_extract_class(**args.feats_extract_conf)
odim = feats_extract.output_size()
else:
# Give features from data-loader
args.feats_extract = None
args.feats_extract_conf = None
feats_extract = None
odim = args.odim
# 2. Normalization layer
if args.normalize is not None:
normalize_class = normalize_choices.get_class(args.normalize)
normalize = normalize_class(**args.normalize_conf)
else:
normalize = None
# 3. SVS
svs_class = svs_choices.get_class(args.svs)
svs = svs_class(idim=vocab_size, odim=odim, **args.svs_conf)
# 4. Extra components
score_feats_extract = None
pitch_extract = None
ying_extract = None
energy_extract = None
pitch_normalize = None
energy_normalize = None
logging.info(f"args:{args}")
if getattr(args, "score_feats_extract", None) is not None:
score_feats_extract_class = score_feats_extractor_choices.get_class(
args.score_feats_extract
)
score_feats_extract = score_feats_extract_class(
**args.score_feats_extract_conf
)
if getattr(args, "pitch_extract", None) is not None:
pitch_extract_class = pitch_extractor_choices.get_class(args.pitch_extract)
if args.pitch_extract_conf.get("reduction_factor", None) is not None:
assert args.pitch_extract_conf.get(
"reduction_factor", None
) == args.svs_conf.get("reduction_factor", 1)
else:
args.pitch_extract_conf["reduction_factor"] = args.svs_conf.get(
"reduction_factor", 1
)
pitch_extract = pitch_extract_class(**args.pitch_extract_conf)
if getattr(args, "ying_extract", None) is not None:
ying_extract_class = ying_extractor_choices.get_class(
args.ying_extract,
)
ying_extract = ying_extract_class(
**args.ying_extract_conf,
)
if getattr(args, "energy_extract", None) is not None:
if args.energy_extract_conf.get("reduction_factor", None) is not None:
assert args.energy_extract_conf.get(
"reduction_factor", None
) == args.svs_conf.get("reduction_factor", 1)
else:
args.energy_extract_conf["reduction_factor"] = args.svs_conf.get(
"reduction_factor", 1
)
energy_extract_class = energy_extractor_choices.get_class(
args.energy_extract
)
energy_extract = energy_extract_class(**args.energy_extract_conf)
if getattr(args, "pitch_normalize", None) is not None:
pitch_normalize_class = pitch_normalize_choices.get_class(
args.pitch_normalize
)
pitch_normalize = pitch_normalize_class(**args.pitch_normalize_conf)
if getattr(args, "energy_normalize", None) is not None:
energy_normalize_class = energy_normalize_choices.get_class(
args.energy_normalize
)
energy_normalize = energy_normalize_class(**args.energy_normalize_conf)
# 5. Build model
model = ESPnetSVSModel(
text_extract=score_feats_extract,
feats_extract=feats_extract,
score_feats_extract=score_feats_extract,
label_extract=score_feats_extract,
pitch_extract=pitch_extract,
ying_extract=ying_extract,
duration_extract=score_feats_extract,
energy_extract=energy_extract,
normalize=normalize,
pitch_normalize=pitch_normalize,
energy_normalize=energy_normalize,
svs=svs,
**args.model_conf,
)
assert check_return_type(model)
return model
@classmethod
def build_vocoder_from_file(
cls,
vocoder_config_file: Union[Path, str] = None,
vocoder_file: Union[Path, str] = None,
model: Optional[ESPnetSVSModel] = None,
device: str = "cpu",
):
logging.info(f"vocoder_config_file: {vocoder_config_file}")
logging.info(f"vocoder_file: {vocoder_file}")
# Build vocoder
if vocoder_file is None:
# If vocoder file is not provided, use griffin-lim as a vocoder
vocoder_conf = {}
if vocoder_config_file is not None:
vocoder_config_file = Path(vocoder_config_file)
with vocoder_config_file.open("r", encoding="utf-8") as f:
vocoder_conf = yaml.safe_load(f)
if model.feats_extract is not None:
vocoder_conf.update(model.feats_extract.get_parameters())
if (
"n_fft" in vocoder_conf
and "n_shift" in vocoder_conf
and "fs" in vocoder_conf
):
return Spectrogram2Waveform(**vocoder_conf)
else:
logging.warning("Vocoder is not available. Skipped its building.")
return None
elif str(vocoder_file).endswith(".pkl"):
# If the extension is ".pkl", the model is trained with parallel_wavegan
vocoder = ParallelWaveGANPretrainedVocoder(
vocoder_file, vocoder_config_file
)
return vocoder.to(device)
else:
raise ValueError(f"{vocoder_file} is not supported format.")
| 17,151 | 34.219713 | 87 | py |
espnet | espnet-master/espnet2/tasks/diar.py | import argparse
from typing import Callable, Collection, Dict, List, Optional, Tuple
import numpy as np
import torch
from typeguard import check_argument_types, check_return_type
from espnet2.asr.encoder.abs_encoder import AbsEncoder
from espnet2.asr.encoder.conformer_encoder import ConformerEncoder
from espnet2.asr.encoder.rnn_encoder import RNNEncoder
from espnet2.asr.encoder.transformer_encoder import TransformerEncoder
from espnet2.asr.frontend.abs_frontend import AbsFrontend
from espnet2.asr.frontend.default import DefaultFrontend
from espnet2.asr.frontend.s3prl import S3prlFrontend
from espnet2.asr.frontend.windowing import SlidingWindow
from espnet2.asr.specaug.abs_specaug import AbsSpecAug
from espnet2.asr.specaug.specaug import SpecAug
from espnet2.diar.attractor.abs_attractor import AbsAttractor
from espnet2.diar.attractor.rnn_attractor import RnnAttractor
from espnet2.diar.decoder.abs_decoder import AbsDecoder
from espnet2.diar.decoder.linear_decoder import LinearDecoder
from espnet2.diar.espnet_model import ESPnetDiarizationModel
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.layers.global_mvn import GlobalMVN
from espnet2.layers.label_aggregation import LabelAggregate
from espnet2.layers.utterance_mvn import UtteranceMVN
from espnet2.tasks.abs_task import AbsTask
from espnet2.torch_utils.initialize import initialize
from espnet2.train.class_choices import ClassChoices
from espnet2.train.collate_fn import CommonCollateFn
from espnet2.train.preprocessor import CommonPreprocessor
from espnet2.train.trainer import Trainer
from espnet2.utils.get_default_kwargs import get_default_kwargs
from espnet2.utils.nested_dict_action import NestedDictAction
from espnet2.utils.types import int_or_none, str2bool, str_or_none
frontend_choices = ClassChoices(
name="frontend",
classes=dict(
default=DefaultFrontend,
sliding_window=SlidingWindow,
s3prl=S3prlFrontend,
),
type_check=AbsFrontend,
default="default",
optional=True,
)
specaug_choices = ClassChoices(
name="specaug",
classes=dict(specaug=SpecAug),
type_check=AbsSpecAug,
default=None,
optional=True,
)
normalize_choices = ClassChoices(
"normalize",
classes=dict(
global_mvn=GlobalMVN,
utterance_mvn=UtteranceMVN,
),
type_check=AbsNormalize,
default="utterance_mvn",
optional=True,
)
label_aggregator_choices = ClassChoices(
"label_aggregator",
classes=dict(label_aggregator=LabelAggregate),
default="label_aggregator",
)
encoder_choices = ClassChoices(
"encoder",
classes=dict(
conformer=ConformerEncoder,
transformer=TransformerEncoder,
rnn=RNNEncoder,
),
type_check=AbsEncoder,
default="transformer",
)
decoder_choices = ClassChoices(
"decoder",
classes=dict(linear=LinearDecoder),
type_check=AbsDecoder,
default="linear",
)
attractor_choices = ClassChoices(
"attractor",
classes=dict(
rnn=RnnAttractor,
),
type_check=AbsAttractor,
default=None,
optional=True,
)
class DiarizationTask(AbsTask):
# If you need more than one optimizer, change this value
num_optimizers: int = 1
# Add variable objects configurations
class_choices_list = [
# --frontend and --frontend_conf
frontend_choices,
# --specaug and --specaug_conf
specaug_choices,
# --normalize and --normalize_conf
normalize_choices,
# --encoder and --encoder_conf
encoder_choices,
# --decoder and --decoder_conf
decoder_choices,
# --label_aggregator and --label_aggregator_conf
label_aggregator_choices,
# --attractor and --attractor_conf
attractor_choices,
]
# If you need to modify train() or eval() procedures, change Trainer class here
trainer = Trainer
@classmethod
def add_task_arguments(cls, parser: argparse.ArgumentParser):
group = parser.add_argument_group(description="Task related")
group.add_argument(
"--num_spk",
type=int_or_none,
default=None,
help="The number fo speakers (for each recording) used in system training",
)
group.add_argument(
"--init",
type=lambda x: str_or_none(x.lower()),
default=None,
help="The initialization method",
choices=[
"chainer",
"xavier_uniform",
"xavier_normal",
"kaiming_uniform",
"kaiming_normal",
None,
],
)
group.add_argument(
"--input_size",
type=int_or_none,
default=None,
help="The number of input dimension of the feature",
)
group.add_argument(
"--model_conf",
action=NestedDictAction,
default=get_default_kwargs(ESPnetDiarizationModel),
help="The keyword arguments for model class.",
)
group = parser.add_argument_group(description="Preprocess related")
group.add_argument(
"--use_preprocessor",
type=str2bool,
default=True,
help="Apply preprocessing to data or not",
)
for class_choices in cls.class_choices_list:
# Append --<name> and --<name>_conf.
# e.g. --encoder and --encoder_conf
class_choices.add_arguments(group)
@classmethod
def build_collate_fn(
cls, args: argparse.Namespace, train: bool
) -> Callable[
[Collection[Tuple[str, Dict[str, np.ndarray]]]],
Tuple[List[str], Dict[str, torch.Tensor]],
]:
assert check_argument_types()
# NOTE(kamo): int value = 0 is reserved by CTC-blank symbol
return CommonCollateFn(float_pad_value=0.0, int_pad_value=-1)
@classmethod
def build_preprocess_fn(
cls, args: argparse.Namespace, train: bool
) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
assert check_argument_types()
if args.use_preprocessor:
# FIXME (jiatong): add more argument here
retval = CommonPreprocessor(train=train)
else:
retval = None
assert check_return_type(retval)
return retval
@classmethod
def required_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
if not inference:
retval = ("speech", "spk_labels")
else:
# Recognition mode
retval = ("speech",)
return retval
@classmethod
def optional_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
# (Note: jiatong): no optional data names for now
retval = ()
assert check_return_type(retval)
return retval
@classmethod
def build_model(cls, args: argparse.Namespace) -> ESPnetDiarizationModel:
assert check_argument_types()
# 1. frontend
if args.input_size is None:
# Extract features in the model
frontend_class = frontend_choices.get_class(args.frontend)
frontend = frontend_class(**args.frontend_conf)
input_size = frontend.output_size()
elif args.input_size is not None and args.frontend is not None:
frontend_class = frontend_choices.get_class(args.frontend)
frontend = frontend_class(**args.frontend_conf)
input_size = args.input_size + frontend.output_size()
else:
# Give features from data-loader
args.frontend = None
args.frontend_conf = {}
frontend = None
input_size = args.input_size
# 2. Data augmentation for spectrogram
if args.specaug is not None:
specaug_class = specaug_choices.get_class(args.specaug)
specaug = specaug_class(**args.specaug_conf)
else:
specaug = None
# 3. Normalization layer
if args.normalize is not None:
normalize_class = normalize_choices.get_class(args.normalize)
normalize = normalize_class(**args.normalize_conf)
else:
normalize = None
# 4. Label Aggregator layer
label_aggregator_class = label_aggregator_choices.get_class(
args.label_aggregator
)
label_aggregator = label_aggregator_class(**args.label_aggregator_conf)
# 5. Encoder
encoder_class = encoder_choices.get_class(args.encoder)
# Note(jiatong): Diarization may not use subsampling when processing
encoder = encoder_class(input_size=input_size, **args.encoder_conf)
# 6a. Decoder
decoder_class = decoder_choices.get_class(args.decoder)
decoder = decoder_class(
num_spk=args.num_spk,
encoder_output_size=encoder.output_size(),
**args.decoder_conf,
)
# 6b. Attractor
if getattr(args, "attractor", None) is not None:
attractor_class = attractor_choices.get_class(args.attractor)
attractor = attractor_class(
encoder_output_size=encoder.output_size(),
**args.attractor_conf,
)
else:
attractor = None
# 7. Build model
model = ESPnetDiarizationModel(
frontend=frontend,
specaug=specaug,
normalize=normalize,
label_aggregator=label_aggregator,
encoder=encoder,
decoder=decoder,
attractor=attractor,
**args.model_conf,
)
# FIXME(kamo): Should be done in model?
# 8. Initialize
if args.init is not None:
initialize(model, args.init)
assert check_return_type(model)
return model
| 9,963 | 31.993377 | 87 | py |
espnet | espnet-master/espnet2/tasks/mt.py | import argparse
import logging
from typing import Callable, Collection, Dict, List, Optional, Tuple
import numpy as np
import torch
from typeguard import check_argument_types, check_return_type
from espnet2.asr.ctc import CTC
from espnet2.asr.decoder.abs_decoder import AbsDecoder
from espnet2.asr.decoder.rnn_decoder import RNNDecoder
from espnet2.asr.decoder.transformer_decoder import (
DynamicConvolution2DTransformerDecoder,
DynamicConvolutionTransformerDecoder,
LightweightConvolution2DTransformerDecoder,
LightweightConvolutionTransformerDecoder,
TransformerDecoder,
)
from espnet2.asr.discrete_asr_espnet_model import ESPnetDiscreteASRModel
from espnet2.asr.encoder.abs_encoder import AbsEncoder
from espnet2.asr.encoder.branchformer_encoder import BranchformerEncoder
from espnet2.asr.encoder.conformer_encoder import ConformerEncoder
from espnet2.asr.encoder.contextual_block_transformer_encoder import (
ContextualBlockTransformerEncoder,
)
from espnet2.asr.encoder.e_branchformer_encoder import EBranchformerEncoder
from espnet2.asr.encoder.rnn_encoder import RNNEncoder
from espnet2.asr.encoder.transformer_encoder import TransformerEncoder
from espnet2.asr.encoder.vgg_rnn_encoder import VGGRNNEncoder
from espnet2.asr.frontend.abs_frontend import AbsFrontend
from espnet2.asr.postencoder.abs_postencoder import AbsPostEncoder
from espnet2.asr.postencoder.hugging_face_transformers_postencoder import (
HuggingFaceTransformersPostEncoder,
)
from espnet2.asr.preencoder.abs_preencoder import AbsPreEncoder
from espnet2.asr.preencoder.linear import LinearProjection
from espnet2.asr.preencoder.sinc import LightweightSincConvs
from espnet2.asr.specaug.abs_specaug import AbsSpecAug
from espnet2.asr.specaug.specaug import SpecAug
from espnet2.mt.espnet_model import ESPnetMTModel
from espnet2.mt.frontend.embedding import Embedding
from espnet2.tasks.abs_task import AbsTask
from espnet2.text.phoneme_tokenizer import g2p_choices
from espnet2.torch_utils.initialize import initialize
from espnet2.train.abs_espnet_model import AbsESPnetModel
from espnet2.train.class_choices import ClassChoices
from espnet2.train.collate_fn import CommonCollateFn
from espnet2.train.preprocessor import MutliTokenizerCommonPreprocessor
from espnet2.train.trainer import Trainer
from espnet2.utils.get_default_kwargs import get_default_kwargs
from espnet2.utils.nested_dict_action import NestedDictAction
from espnet2.utils.types import int_or_none, str2bool, str_or_none
frontend_choices = ClassChoices(
name="frontend",
classes=dict(
embed=Embedding,
),
type_check=AbsFrontend,
default="embed",
)
specaug_choices = ClassChoices(
name="specaug",
classes=dict(
specaug=SpecAug,
),
type_check=AbsSpecAug,
default=None,
optional=True,
)
preencoder_choices = ClassChoices(
name="preencoder",
classes=dict(
sinc=LightweightSincConvs,
linear=LinearProjection,
),
type_check=AbsPreEncoder,
default=None,
optional=True,
)
encoder_choices = ClassChoices(
"encoder",
classes=dict(
conformer=ConformerEncoder,
transformer=TransformerEncoder,
contextual_block_transformer=ContextualBlockTransformerEncoder,
vgg_rnn=VGGRNNEncoder,
rnn=RNNEncoder,
branchformer=BranchformerEncoder,
e_branchformer=EBranchformerEncoder,
),
type_check=AbsEncoder,
default="rnn",
)
postencoder_choices = ClassChoices(
name="postencoder",
classes=dict(
hugging_face_transformers=HuggingFaceTransformersPostEncoder,
),
type_check=AbsPostEncoder,
default=None,
optional=True,
)
decoder_choices = ClassChoices(
"decoder",
classes=dict(
transformer=TransformerDecoder,
lightweight_conv=LightweightConvolutionTransformerDecoder,
lightweight_conv2d=LightweightConvolution2DTransformerDecoder,
dynamic_conv=DynamicConvolutionTransformerDecoder,
dynamic_conv2d=DynamicConvolution2DTransformerDecoder,
rnn=RNNDecoder,
),
type_check=AbsDecoder,
default="rnn",
)
model_choices = ClassChoices(
"model",
classes=dict(
mt=ESPnetMTModel,
discrete_asr=ESPnetDiscreteASRModel,
),
type_check=AbsESPnetModel,
default="mt",
)
class MTTask(AbsTask):
# If you need more than one optimizers, change this value
num_optimizers: int = 1
# Add variable objects configurations
class_choices_list = [
# --frontend and --frontend_conf
frontend_choices,
# --specaug and --specaug_conf
specaug_choices,
# --preencoder and --preencoder_conf
preencoder_choices,
# --encoder and --encoder_conf
encoder_choices,
# --postencoder and --postencoder_conf
postencoder_choices,
# --decoder and --decoder_conf
decoder_choices,
# --model and --model_conf
model_choices,
]
# If you need to modify train() or eval() procedures, change Trainer class here
trainer = Trainer
@classmethod
def add_task_arguments(cls, parser: argparse.ArgumentParser):
group = parser.add_argument_group(description="Task related")
# NOTE(kamo): add_arguments(..., required=True) can't be used
# to provide --print_config mode. Instead of it, do as
required = parser.get_default("required")
required += ["src_token_list", "token_list"]
group.add_argument(
"--token_list",
type=str_or_none,
default=None,
help="A text mapping int-id to token (for target language)",
)
group.add_argument(
"--src_token_list",
type=str_or_none,
default=None,
help="A text mapping int-id to token (for source language)",
)
group.add_argument(
"--init",
type=lambda x: str_or_none(x.lower()),
default=None,
help="The initialization method",
choices=[
"chainer",
"xavier_uniform",
"xavier_normal",
"kaiming_uniform",
"kaiming_normal",
None,
],
)
group.add_argument(
"--input_size",
type=int_or_none,
default=None,
help="The number of input dimension of the feature",
)
group.add_argument(
"--ctc_conf",
action=NestedDictAction,
default=get_default_kwargs(CTC),
help="The keyword arguments for CTC class.",
)
group = parser.add_argument_group(description="Preprocess related")
group.add_argument(
"--use_preprocessor",
type=str2bool,
default=True,
help="Apply preprocessing to data or not",
)
group.add_argument(
"--token_type",
type=str,
default="bpe",
choices=["bpe", "char", "word", "phn"],
help="The target text will be tokenized " "in the specified level token",
)
group.add_argument(
"--src_token_type",
type=str,
default="bpe",
choices=["bpe", "char", "word", "phn"],
help="The source text will be tokenized " "in the specified level token",
)
group.add_argument(
"--bpemodel",
type=str_or_none,
default=None,
help="The model file of sentencepiece (for target language)",
)
group.add_argument(
"--src_bpemodel",
type=str_or_none,
default=None,
help="The model file of sentencepiece (for source language)",
)
parser.add_argument(
"--non_linguistic_symbols",
type=str_or_none,
help="non_linguistic_symbols file path",
)
parser.add_argument(
"--cleaner",
type=str_or_none,
choices=[None, "tacotron", "jaconv", "vietnamese"],
default=None,
help="Apply text cleaning",
)
parser.add_argument(
"--g2p",
type=str_or_none,
choices=g2p_choices,
default=None,
help="Specify g2p method if --token_type=phn",
)
parser.add_argument(
"--tokenizer_encode_conf",
type=dict,
default=None,
help="Tokenization encoder conf, "
"e.g. BPE dropout: enable_sampling=True, alpha=0.1, nbest_size=-1",
)
parser.add_argument(
"--src_tokenizer_encode_conf",
type=dict,
default=None,
help="Src tokenization encoder conf, "
"e.g. BPE dropout: enable_sampling=True, alpha=0.1, nbest_size=-1",
)
for class_choices in cls.class_choices_list:
# Append --<name> and --<name>_conf.
# e.g. --encoder and --encoder_conf
class_choices.add_arguments(group)
@classmethod
def build_collate_fn(
cls, args: argparse.Namespace, train: bool
) -> Callable[
[Collection[Tuple[str, Dict[str, np.ndarray]]]],
Tuple[List[str], Dict[str, torch.Tensor]],
]:
assert check_argument_types()
# NOTE(kamo): int value = 0 is reserved by CTC-blank symbol
return CommonCollateFn(float_pad_value=0.0, int_pad_value=-1)
@classmethod
def build_preprocess_fn(
cls, args: argparse.Namespace, train: bool
) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
assert check_argument_types()
if args.use_preprocessor:
retval = MutliTokenizerCommonPreprocessor(
train=train,
token_type=[args.token_type, args.src_token_type],
token_list=[args.token_list, args.src_token_list],
bpemodel=[args.bpemodel, args.src_bpemodel],
non_linguistic_symbols=args.non_linguistic_symbols,
text_cleaner=args.cleaner,
g2p_type=args.g2p,
text_name=["text", "src_text"],
tokenizer_encode_conf=[
args.tokenizer_encode_conf,
args.src_tokenizer_encode_conf,
]
if train
else [dict(), dict()],
)
else:
retval = None
assert check_return_type(retval)
return retval
@classmethod
def required_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
if not inference:
retval = ("src_text", "text")
else:
# Recognition mode
retval = ("src_text",)
return retval
@classmethod
def optional_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
if not inference:
retval = ()
else:
retval = ()
assert check_return_type(retval)
return retval
@classmethod
def build_model(cls, args: argparse.Namespace) -> ESPnetMTModel:
assert check_argument_types()
if isinstance(args.token_list, str):
with open(args.token_list, encoding="utf-8") as f:
token_list = [line.rstrip() for line in f]
# Overwriting token_list to keep it as "portable".
args.token_list = list(token_list)
elif isinstance(args.token_list, (tuple, list)):
token_list = list(args.token_list)
else:
raise RuntimeError("token_list must be str or list")
vocab_size = len(token_list)
logging.info(f"Vocabulary size: {vocab_size }")
if args.src_token_list is not None:
if isinstance(args.src_token_list, str):
with open(args.src_token_list, encoding="utf-8") as f:
src_token_list = [line.rstrip() for line in f]
# Overwriting src_token_list to keep it as "portable".
args.src_token_list = list(src_token_list)
elif isinstance(args.src_token_list, (tuple, list)):
src_token_list = list(args.src_token_list)
else:
raise RuntimeError("token_list must be str or list")
src_vocab_size = len(src_token_list)
logging.info(f"Source vocabulary size: {src_vocab_size }")
else:
src_token_list, src_vocab_size = None, None
# 1. frontend
if args.input_size is None:
# Extract features in the model
frontend_class = frontend_choices.get_class(args.frontend)
frontend = frontend_class(input_size=src_vocab_size, **args.frontend_conf)
input_size = frontend.output_size()
else:
# Give features from data-loader
args.frontend = None
args.frontend_conf = {}
frontend = None
input_size = args.input_size
# 2. Data augmentation for spectrogram
if getattr(args, "specaug", None) is not None:
specaug_class = specaug_choices.get_class(args.specaug)
specaug = specaug_class(**args.specaug_conf)
else:
specaug = None
# 3. Pre-encoder input block
# NOTE(kan-bayashi): Use getattr to keep the compatibility
if getattr(args, "preencoder", None) is not None:
preencoder_class = preencoder_choices.get_class(args.preencoder)
preencoder = preencoder_class(**args.preencoder_conf)
input_size = preencoder.output_size()
else:
preencoder = None
# 4. Encoder
encoder_class = encoder_choices.get_class(args.encoder)
encoder = encoder_class(input_size=input_size, **args.encoder_conf)
# 5. Post-encoder block
# NOTE(kan-bayashi): Use getattr to keep the compatibility
encoder_output_size = encoder.output_size()
if getattr(args, "postencoder", None) is not None:
postencoder_class = postencoder_choices.get_class(args.postencoder)
postencoder = postencoder_class(
input_size=encoder_output_size, **args.postencoder_conf
)
encoder_output_size = postencoder.output_size()
else:
postencoder = None
# 5. Decoder
decoder_class = decoder_choices.get_class(args.decoder)
decoder = decoder_class(
vocab_size=vocab_size,
encoder_output_size=encoder_output_size,
**args.decoder_conf,
)
# 6. CTC
ctc = CTC(
odim=vocab_size, encoder_output_size=encoder_output_size, **args.ctc_conf
)
# 8. Build model
try:
model_class = model_choices.get_class(args.model)
if args.model == "discrete_asr":
extra_model_conf = dict(ctc=ctc, specaug=specaug)
else:
extra_model_conf = dict()
except AttributeError:
model_class = model_choices.get_class("mt")
extra_model_conf = dict()
model = model_class(
vocab_size=vocab_size,
src_vocab_size=src_vocab_size,
frontend=frontend,
preencoder=preencoder,
encoder=encoder,
postencoder=postencoder,
decoder=decoder,
token_list=token_list,
src_token_list=src_token_list,
**args.model_conf,
**extra_model_conf,
)
# FIXME(kamo): Should be done in model?
# 9. Initialize
if args.init is not None:
initialize(model, args.init)
assert check_return_type(model)
return model
| 15,892 | 33.85307 | 86 | py |
espnet | espnet-master/espnet2/tasks/asr_transducer.py | """ASR Transducer Task."""
import argparse
import logging
import os
from typing import Callable, Collection, Dict, List, Optional, Tuple
import numpy as np
import torch
from typeguard import check_argument_types, check_return_type
from espnet2.asr.frontend.abs_frontend import AbsFrontend
from espnet2.asr.frontend.default import DefaultFrontend
from espnet2.asr.frontend.windowing import SlidingWindow
from espnet2.asr.specaug.abs_specaug import AbsSpecAug
from espnet2.asr.specaug.specaug import SpecAug
from espnet2.asr_transducer.decoder.abs_decoder import AbsDecoder
from espnet2.asr_transducer.decoder.mega_decoder import MEGADecoder
from espnet2.asr_transducer.decoder.rnn_decoder import RNNDecoder
from espnet2.asr_transducer.decoder.rwkv_decoder import RWKVDecoder
from espnet2.asr_transducer.decoder.stateless_decoder import StatelessDecoder
from espnet2.asr_transducer.encoder.encoder import Encoder
from espnet2.asr_transducer.espnet_transducer_model import ESPnetASRTransducerModel
from espnet2.asr_transducer.joint_network import JointNetwork
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.layers.global_mvn import GlobalMVN
from espnet2.layers.utterance_mvn import UtteranceMVN
from espnet2.tasks.abs_task import AbsTask
from espnet2.text.phoneme_tokenizer import g2p_choices
from espnet2.train.class_choices import ClassChoices
from espnet2.train.collate_fn import CommonCollateFn
from espnet2.train.preprocessor import CommonPreprocessor
from espnet2.train.trainer import Trainer
from espnet2.utils.get_default_kwargs import get_default_kwargs
from espnet2.utils.nested_dict_action import NestedDictAction
from espnet2.utils.types import float_or_none, int_or_none, str2bool, str_or_none
frontend_choices = ClassChoices(
name="frontend",
classes=dict(
default=DefaultFrontend,
sliding_window=SlidingWindow,
),
type_check=AbsFrontend,
default="default",
)
specaug_choices = ClassChoices(
"specaug",
classes=dict(
specaug=SpecAug,
),
type_check=AbsSpecAug,
default=None,
optional=True,
)
normalize_choices = ClassChoices(
"normalize",
classes=dict(
global_mvn=GlobalMVN,
utterance_mvn=UtteranceMVN,
),
type_check=AbsNormalize,
default="utterance_mvn",
optional=True,
)
decoder_choices = ClassChoices(
"decoder",
classes=dict(
mega=MEGADecoder,
rnn=RNNDecoder,
rwkv=RWKVDecoder,
stateless=StatelessDecoder,
),
type_check=AbsDecoder,
default="rnn",
)
class ASRTransducerTask(AbsTask):
"""ASR Transducer Task definition."""
num_optimizers: int = 1
class_choices_list = [
frontend_choices,
specaug_choices,
normalize_choices,
decoder_choices,
]
trainer = Trainer
@classmethod
def add_task_arguments(cls, parser: argparse.ArgumentParser):
"""Add Transducer task arguments.
Args:
cls: ASRTransducerTask object.
parser: Transducer arguments parser.
"""
group = parser.add_argument_group(description="Task related.")
required = parser.get_default("required")
required += ["token_list"]
group.add_argument(
"--token_list",
type=str_or_none,
default=None,
help="Integer-string mapper for tokens.",
)
group.add_argument(
"--input_size",
type=int_or_none,
default=None,
help="The number of dimensions for input features.",
)
group.add_argument(
"--init",
type=str_or_none,
default=None,
help="Type of model initialization to use.",
)
group.add_argument(
"--model_conf",
action=NestedDictAction,
default=get_default_kwargs(ESPnetASRTransducerModel),
help="The keyword arguments for the model class.",
)
group.add_argument(
"--encoder_conf",
action=NestedDictAction,
default={},
help="The keyword arguments for the encoder class.",
)
group.add_argument(
"--joint_network_conf",
action=NestedDictAction,
default={},
help="The keyword arguments for the joint network class.",
)
group = parser.add_argument_group(description="Preprocess related.")
group.add_argument(
"--use_preprocessor",
type=str2bool,
default=True,
help="Whether to apply preprocessing to input data.",
)
group.add_argument(
"--token_type",
type=str,
default="bpe",
choices=["bpe", "char", "word", "phn"],
help="The type of tokens to use during tokenization.",
)
group.add_argument(
"--bpemodel",
type=str_or_none,
default=None,
help="The path of the sentencepiece model.",
)
group.add_argument(
"--non_linguistic_symbols",
type=str_or_none,
help="The 'non_linguistic_symbols' file path.",
)
group.add_argument(
"--cleaner",
type=str_or_none,
choices=[None, "tacotron", "jaconv", "vietnamese"],
default=None,
help="Text cleaner to use.",
)
group.add_argument(
"--g2p",
type=str_or_none,
choices=g2p_choices,
default=None,
help="g2p method to use if --token_type=phn.",
)
group.add_argument(
"--speech_volume_normalize",
type=float_or_none,
default=None,
help="Normalization value for maximum amplitude scaling.",
)
group.add_argument(
"--rir_scp",
type=str_or_none,
default=None,
help="The RIR SCP file path.",
)
group.add_argument(
"--rir_apply_prob",
type=float,
default=1.0,
help="The probability of the applied RIR convolution.",
)
group.add_argument(
"--noise_scp",
type=str_or_none,
default=None,
help="The path of noise SCP file.",
)
group.add_argument(
"--noise_apply_prob",
type=float,
default=1.0,
help="The probability of the applied noise addition.",
)
group.add_argument(
"--noise_db_range",
type=str,
default="13_15",
help="The range of the noise decibel level.",
)
for class_choices in cls.class_choices_list:
# Append --<name> and --<name>_conf.
# e.g. --decoder and --decoder_conf
class_choices.add_arguments(group)
@classmethod
def build_collate_fn(
cls, args: argparse.Namespace, train: bool
) -> Callable[
[Collection[Tuple[str, Dict[str, np.ndarray]]]],
Tuple[List[str], Dict[str, torch.Tensor]],
]:
"""Build collate function.
Args:
cls: ASRTransducerTask object.
args: Task arguments.
train: Training mode.
Return:
: Callable collate function.
"""
assert check_argument_types()
return CommonCollateFn(float_pad_value=0.0, int_pad_value=-1)
@classmethod
def build_preprocess_fn(
cls, args: argparse.Namespace, train: bool
) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
"""Build pre-processing function.
Args:
cls: ASRTransducerTask object.
args: Task arguments.
train: Training mode.
Return:
: Callable pre-processing function.
"""
assert check_argument_types()
if args.use_preprocessor:
retval = CommonPreprocessor(
train=train,
token_type=args.token_type,
token_list=args.token_list,
bpemodel=args.bpemodel,
non_linguistic_symbols=args.non_linguistic_symbols,
text_cleaner=args.cleaner,
g2p_type=args.g2p,
rir_scp=args.rir_scp if hasattr(args, "rir_scp") else None,
rir_apply_prob=args.rir_apply_prob
if hasattr(args, "rir_apply_prob")
else 1.0,
noise_scp=args.noise_scp if hasattr(args, "noise_scp") else None,
noise_apply_prob=args.noise_apply_prob
if hasattr(args, "noise_apply_prob")
else 1.0,
noise_db_range=args.noise_db_range
if hasattr(args, "noise_db_range")
else "13_15",
speech_volume_normalize=args.speech_volume_normalize
if hasattr(args, "rir_scp")
else None,
)
else:
retval = None
assert check_return_type(retval)
return retval
@classmethod
def required_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
"""Required data depending on task mode.
Args:
cls: ASRTransducerTask object.
train: Training mode.
inference: Inference mode.
Return:
retval: Required task data.
"""
if not inference:
retval = ("speech", "text")
else:
retval = ("speech",)
return retval
@classmethod
def optional_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
"""Optional data depending on task mode.
Args:
cls: ASRTransducerTask object.
train: Training mode.
inference: Inference mode.
Return:
retval: Optional task data.
"""
retval = ()
assert check_return_type(retval)
return retval
@classmethod
def build_model(cls, args: argparse.Namespace) -> ESPnetASRTransducerModel:
"""Required data depending on task mode.
Args:
cls: ASRTransducerTask object.
args: Task arguments.
Return:
model: ASR Transducer model.
"""
assert check_argument_types()
if isinstance(args.token_list, str):
with open(args.token_list, encoding="utf-8") as f:
token_list = [line.rstrip() for line in f]
# Overwriting token_list to keep it as "portable".
args.token_list = list(token_list)
elif isinstance(args.token_list, (tuple, list)):
token_list = list(args.token_list)
else:
raise RuntimeError("token_list must be str or list")
vocab_size = len(token_list)
if hasattr(args, "scheduler_conf"):
args.model_conf["warmup_steps"] = args.scheduler_conf.get(
"warmup_steps", 25000
)
logging.info(f"Vocabulary size: {vocab_size }")
# 1. frontend
if args.input_size is None:
# Extract features in the model
frontend_class = frontend_choices.get_class(args.frontend)
frontend = frontend_class(**args.frontend_conf)
input_size = frontend.output_size()
else:
# Give features from data-loader
frontend = None
input_size = args.input_size
# 2. Data augmentation for spectrogram
if args.specaug is not None:
specaug_class = specaug_choices.get_class(args.specaug)
specaug = specaug_class(**args.specaug_conf)
else:
specaug = None
# 3. Normalization layer
if args.normalize is not None:
normalize_class = normalize_choices.get_class(args.normalize)
normalize = normalize_class(**args.normalize_conf)
else:
normalize = None
# 4. Encoder
encoder = Encoder(input_size, **args.encoder_conf)
encoder_output_size = encoder.output_size
# 5. Decoder
decoder_class = decoder_choices.get_class(args.decoder)
decoder = decoder_class(
vocab_size,
**args.decoder_conf,
)
decoder_output_size = decoder.output_size
# 6. Joint Network
joint_network = JointNetwork(
vocab_size,
encoder_output_size,
decoder_output_size,
**args.joint_network_conf,
)
# 7. Build model
model = ESPnetASRTransducerModel(
vocab_size=vocab_size,
token_list=token_list,
frontend=frontend,
specaug=specaug,
normalize=normalize,
encoder=encoder,
decoder=decoder,
joint_network=joint_network,
**args.model_conf,
)
# 8. Initialize model
if args.init is not None:
raise NotImplementedError(
"Currently not supported.",
"Initialization part will be reworked in a short future.",
)
assert check_return_type(model)
return model
| 13,389 | 29.501139 | 83 | py |
espnet | espnet-master/espnet2/tasks/gan_svs.py | # Copyright 2021 Tomoki Hayashi
# Copyright 2022 Yifeng Yu
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""GAN-based Singing-voice-synthesis task."""
import argparse
import logging
from typing import Callable, Collection, Dict, List, Optional, Tuple
import numpy as np
import torch
from typeguard import check_argument_types, check_return_type
from espnet2.gan_svs.abs_gan_svs import AbsGANSVS
from espnet2.gan_svs.espnet_model import ESPnetGANSVSModel
from espnet2.gan_svs.joint import JointScore2Wav
from espnet2.gan_svs.vits import VITS
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.layers.global_mvn import GlobalMVN
from espnet2.layers.utterance_mvn import UtteranceMVN
from espnet2.svs.feats_extract.score_feats_extract import (
FrameScoreFeats,
SyllableScoreFeats,
)
from espnet2.tasks.abs_task import AbsTask, optim_classes
from espnet2.text.phoneme_tokenizer import g2p_choices
from espnet2.train.class_choices import ClassChoices
from espnet2.train.collate_fn import CommonCollateFn
from espnet2.train.gan_trainer import GANTrainer
from espnet2.train.preprocessor import SVSPreprocessor
from espnet2.tts.feats_extract.abs_feats_extract import AbsFeatsExtract
from espnet2.tts.feats_extract.dio import Dio
from espnet2.tts.feats_extract.energy import Energy
from espnet2.tts.feats_extract.linear_spectrogram import LinearSpectrogram
from espnet2.tts.feats_extract.log_mel_fbank import LogMelFbank
from espnet2.tts.feats_extract.log_spectrogram import LogSpectrogram
from espnet2.tts.feats_extract.ying import Ying
from espnet2.utils.get_default_kwargs import get_default_kwargs
from espnet2.utils.nested_dict_action import NestedDictAction
from espnet2.utils.types import int_or_none, str2bool, str_or_none
feats_extractor_choices = ClassChoices(
"feats_extract",
classes=dict(
fbank=LogMelFbank,
log_spectrogram=LogSpectrogram,
linear_spectrogram=LinearSpectrogram,
),
type_check=AbsFeatsExtract,
default="linear_spectrogram",
)
score_feats_extractor_choices = ClassChoices(
"score_feats_extract",
classes=dict(
frame_score_feats=FrameScoreFeats, syllable_score_feats=SyllableScoreFeats
),
type_check=AbsFeatsExtract,
default="frame_score_feats",
)
pitch_extractor_choices = ClassChoices(
"pitch_extract",
classes=dict(dio=Dio),
type_check=AbsFeatsExtract,
default=None,
optional=True,
)
ying_extractor_choices = ClassChoices(
"ying_extract",
classes=dict(ying=Ying),
type_check=AbsFeatsExtract,
default=None,
optional=True,
)
energy_extractor_choices = ClassChoices(
"energy_extract",
classes=dict(energy=Energy),
type_check=AbsFeatsExtract,
default=None,
optional=True,
)
normalize_choices = ClassChoices(
"normalize",
classes=dict(
global_mvn=GlobalMVN,
utterance_mvn=UtteranceMVN,
),
type_check=AbsNormalize,
default=None,
optional=True,
)
pitch_normalize_choices = ClassChoices(
"pitch_normalize",
classes=dict(
global_mvn=GlobalMVN,
utterance_mvn=UtteranceMVN,
),
type_check=AbsNormalize,
default=None,
optional=True,
)
energy_normalize_choices = ClassChoices(
"energy_normalize",
classes=dict(
global_mvn=GlobalMVN,
utterance_mvn=UtteranceMVN,
),
type_check=AbsNormalize,
default=None,
optional=True,
)
svs_choices = ClassChoices(
"svs",
classes=dict(
vits=VITS,
joint_score2wav=JointScore2Wav,
),
type_check=AbsGANSVS,
default="vits",
)
class GANSVSTask(AbsTask):
"""GAN-based Singing-voice-synthesis task."""
# GAN requires two optimizers
num_optimizers: int = 2
# Add variable objects configurations
class_choices_list = [
# --score_extractor and --score_extractor_conf
score_feats_extractor_choices,
# --feats_extractor and --feats_extractor_conf
feats_extractor_choices,
# --normalize and --normalize_conf
normalize_choices,
# --svs and --svs_conf
svs_choices,
# --pitch_extract and --pitch_extract_conf
pitch_extractor_choices,
# --pitch_normalize and --pitch_normalize_conf
pitch_normalize_choices,
# --ying_extract and --ying_extract_conf
ying_extractor_choices,
# --energy_extract and --energy_extract_conf
energy_extractor_choices,
# --energy_normalize and --energy_normalize_conf
energy_normalize_choices,
]
# Use GANTrainer instead of Trainer
trainer = GANTrainer
@classmethod
def add_task_arguments(cls, parser: argparse.ArgumentParser):
# NOTE(kamo): Use '_' instead of '-' to avoid confusion
assert check_argument_types()
group = parser.add_argument_group(description="Task related")
# NOTE(kamo): add_arguments(..., required=True) can't be used
# to provide --print_config mode. Instead of it, do as
required = parser.get_default("required")
required += ["token_list"]
group.add_argument(
"--token_list",
type=str_or_none,
default=None,
help="A text mapping int-id to token",
)
group.add_argument(
"--odim",
type=int_or_none,
default=None,
help="The number of dimension of output feature",
)
group.add_argument(
"--model_conf",
action=NestedDictAction,
default=get_default_kwargs(ESPnetGANSVSModel),
help="The keyword arguments for model class.",
)
group = parser.add_argument_group(description="Preprocess related")
group.add_argument(
"--use_preprocessor",
type=str2bool,
default=True,
help="Apply preprocessing to data or not",
)
group.add_argument(
"--token_type",
type=str,
default="phn",
choices=["bpe", "char", "word", "phn"],
help="The text will be tokenized in the specified level token",
)
group.add_argument(
"--bpemodel",
type=str_or_none,
default=None,
help="The model file of sentencepiece",
)
parser.add_argument(
"--non_linguistic_symbols",
type=str_or_none,
help="non_linguistic_symbols file path",
)
parser.add_argument(
"--cleaner",
type=str_or_none,
choices=[None, "tacotron", "jaconv", "vietnamese", "korean_cleaner"],
default=None,
help="Apply text cleaning",
)
parser.add_argument(
"--g2p",
type=str_or_none,
choices=g2p_choices,
default=None,
help="Specify g2p method if --token_type=phn",
)
parser.add_argument(
"--fs",
type=int,
default=24000, # BUG: another fs in feats_extract_conf
help="sample rate",
)
for class_choices in cls.class_choices_list:
# Append --<name> and --<name>_conf.
# e.g. --encoder and --encoder_conf
class_choices.add_arguments(group)
@classmethod
def build_collate_fn(
cls, args: argparse.Namespace, train: bool
) -> Callable[
[Collection[Tuple[str, Dict[str, np.ndarray]]]],
Tuple[List[str], Dict[str, torch.Tensor]],
]:
assert check_argument_types()
return CommonCollateFn(
float_pad_value=0.0,
int_pad_value=0,
not_sequence=["spembs", "sids", "lids"],
)
@classmethod
def build_preprocess_fn(
cls, args: argparse.Namespace, train: bool
) -> Optional[Callable[[str, Dict[str, np.array], float], Dict[str, np.ndarray]]]:
assert check_argument_types()
if args.use_preprocessor:
retval = SVSPreprocessor(
train=train,
token_type=args.token_type,
token_list=args.token_list,
bpemodel=args.bpemodel,
non_linguistic_symbols=args.non_linguistic_symbols,
text_cleaner=args.cleaner,
g2p_type=args.g2p,
fs=args.fs,
hop_length=args.feats_extract_conf["hop_length"],
)
else:
retval = None
# FIXME (jiatong): sometimes checking is not working here
# assert check_return_type(retval)
return retval
# TODO(Yuning): check new names
@classmethod
def required_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
if not inference:
retval = ("text", "singing", "score", "label")
else:
# Inference mode
retval = ("text", "score", "label")
return retval
@classmethod
def optional_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
if not inference:
retval = (
"spembs",
"durations",
"pitch",
"energy",
"sids",
"lids",
"feats",
"ying",
)
else:
# Inference mode
retval = ("spembs", "singing", "pitch", "durations", "sids", "lids")
return retval
@classmethod
def build_model(cls, args: argparse.Namespace) -> ESPnetGANSVSModel:
assert check_argument_types()
if isinstance(args.token_list, str):
with open(args.token_list, encoding="utf-8") as f:
token_list = [line.rstrip() for line in f]
# "args" is saved as it is in a yaml file by BaseTask.main().
# Overwriting token_list to keep it as "portable".
args.token_list = token_list.copy()
elif isinstance(args.token_list, (tuple, list)):
token_list = args.token_list.copy()
else:
raise RuntimeError("token_list must be str or dict")
vocab_size = len(token_list)
logging.info(f"Vocabulary size: {vocab_size }")
# 1. feats_extract
if args.odim is None:
# Extract features in the model
feats_extract_class = feats_extractor_choices.get_class(args.feats_extract)
feats_extract = feats_extract_class(**args.feats_extract_conf)
odim = feats_extract.output_size()
else:
# Give features from data-loader
args.feats_extract = None
args.feats_extract_conf = None
feats_extract = None
odim = args.odim
# 2. Normalization layer
if args.normalize is not None:
normalize_class = normalize_choices.get_class(args.normalize)
normalize = normalize_class(**args.normalize_conf)
else:
normalize = None
# 3. SVS
svs_class = svs_choices.get_class(args.svs)
svs = svs_class(idim=vocab_size, odim=odim, **args.svs_conf)
# 4. Extra components
score_feats_extract = None
pitch_extract = None
ying_extract = None
energy_extract = None
pitch_normalize = None
energy_normalize = None
logging.info(f"args:{args}")
if getattr(args, "score_feats_extract", None) is not None:
score_feats_extract_class = score_feats_extractor_choices.get_class(
args.score_feats_extract
)
score_feats_extract = score_feats_extract_class(
**args.score_feats_extract_conf
)
if getattr(args, "pitch_extract", None) is not None:
pitch_extract_class = pitch_extractor_choices.get_class(
args.pitch_extract,
)
pitch_extract = pitch_extract_class(
**args.pitch_extract_conf,
)
if getattr(args, "ying_extract", None) is not None:
ying_extract_class = ying_extractor_choices.get_class(
args.ying_extract,
)
ying_extract = ying_extract_class(
**args.ying_extract_conf,
)
if getattr(args, "energy_extract", None) is not None:
energy_extract_class = energy_extractor_choices.get_class(
args.energy_extract,
)
energy_extract = energy_extract_class(
**args.energy_extract_conf,
)
if getattr(args, "pitch_normalize", None) is not None:
pitch_normalize_class = pitch_normalize_choices.get_class(
args.pitch_normalize,
)
pitch_normalize = pitch_normalize_class(
**args.pitch_normalize_conf,
)
if getattr(args, "energy_normalize", None) is not None:
energy_normalize_class = energy_normalize_choices.get_class(
args.energy_normalize,
)
energy_normalize = energy_normalize_class(
**args.energy_normalize_conf,
)
# 5. Build model
model = ESPnetGANSVSModel(
text_extract=score_feats_extract,
feats_extract=feats_extract,
score_feats_extract=score_feats_extract,
label_extract=score_feats_extract,
pitch_extract=pitch_extract,
ying_extract=ying_extract,
duration_extract=score_feats_extract,
energy_extract=energy_extract,
normalize=normalize,
pitch_normalize=pitch_normalize,
energy_normalize=energy_normalize,
svs=svs,
**args.model_conf,
)
assert check_return_type(model)
return model
@classmethod
def build_optimizers(
cls,
args: argparse.Namespace,
model: ESPnetGANSVSModel,
) -> List[torch.optim.Optimizer]:
# check
assert hasattr(model.svs, "generator")
assert hasattr(model.svs, "discriminator")
# define generator optimizer
optim_g_class = optim_classes.get(args.optim)
if optim_g_class is None:
raise ValueError(f"must be one of {list(optim_classes)}: {args.optim}")
if args.sharded_ddp:
try:
import fairscale
except ImportError:
raise RuntimeError("Requiring fairscale. Do 'pip install fairscale'")
optim_g = fairscale.optim.oss.OSS(
params=model.svs.generator.parameters(),
optim=optim_g_class,
**args.optim_conf,
)
else:
optim_g = optim_g_class(
model.svs.generator.parameters(),
**args.optim_conf,
)
optimizers = [optim_g]
# define discriminator optimizer
optim_d_class = optim_classes.get(args.optim2)
if optim_d_class is None:
raise ValueError(f"must be one of {list(optim_classes)}: {args.optim2}")
if args.sharded_ddp:
try:
import fairscale
except ImportError:
raise RuntimeError("Requiring fairscale. Do 'pip install fairscale'")
optim_d = fairscale.optim.oss.OSS(
params=model.svs.discriminator.parameters(),
optim=optim_d_class,
**args.optim2_conf,
)
else:
optim_d = optim_d_class(
model.svs.discriminator.parameters(),
**args.optim2_conf,
)
optimizers += [optim_d]
return optimizers
| 15,786 | 32.305907 | 87 | py |
espnet | espnet-master/espnet2/tasks/asvspoof.py | import argparse
import logging
from typing import Callable, Collection, Dict, List, Optional, Tuple
import numpy as np
import torch
from typeguard import check_argument_types, check_return_type
from espnet2.asr.encoder.abs_encoder import AbsEncoder
# TODO1 (checkpoint 2): import conformer class class
from espnet2.asr.encoder.transformer_encoder import TransformerEncoder
from espnet2.asr.frontend.abs_frontend import AbsFrontend
from espnet2.asr.frontend.default import DefaultFrontend
from espnet2.asr.frontend.fused import FusedFrontends
from espnet2.asr.frontend.s3prl import S3prlFrontend
from espnet2.asr.frontend.windowing import SlidingWindow
from espnet2.asr.preencoder.abs_preencoder import AbsPreEncoder
from espnet2.asr.preencoder.linear import LinearProjection
from espnet2.asr.preencoder.sinc import LightweightSincConvs
from espnet2.asr.specaug.abs_specaug import AbsSpecAug
from espnet2.asr.specaug.specaug import SpecAug
from espnet2.asvspoof.decoder.abs_decoder import AbsDecoder
from espnet2.asvspoof.decoder.linear_decoder import LinearDecoder
from espnet2.asvspoof.espnet_model import ESPnetASVSpoofModel
from espnet2.asvspoof.loss.abs_loss import AbsASVSpoofLoss
from espnet2.asvspoof.loss.am_softmax_loss import ASVSpoofAMSoftmaxLoss
from espnet2.asvspoof.loss.binary_loss import ASVSpoofBinaryLoss
from espnet2.asvspoof.loss.oc_softmax_loss import ASVSpoofOCSoftmaxLoss
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.layers.global_mvn import GlobalMVN
from espnet2.layers.utterance_mvn import UtteranceMVN
from espnet2.tasks.abs_task import AbsTask
from espnet2.torch_utils.initialize import initialize
from espnet2.train.class_choices import ClassChoices
from espnet2.train.collate_fn import CommonCollateFn
from espnet2.train.preprocessor import CommonPreprocessor
from espnet2.train.trainer import Trainer
from espnet2.utils.nested_dict_action import NestedDictAction
from espnet2.utils.types import int_or_none, str2bool, str_or_none
frontend_choices = ClassChoices(
name="frontend",
classes=dict(
default=DefaultFrontend,
sliding_window=SlidingWindow,
s3prl=S3prlFrontend,
fused=FusedFrontends,
),
type_check=AbsFrontend,
default="default",
)
specaug_choices = ClassChoices(
name="specaug",
classes=dict(
specaug=SpecAug,
),
type_check=AbsSpecAug,
default=None,
optional=True,
)
normalize_choices = ClassChoices(
"normalize",
classes=dict(
global_mvn=GlobalMVN,
utterance_mvn=UtteranceMVN,
),
type_check=AbsNormalize,
default="utterance_mvn",
optional=True,
)
preencoder_choices = ClassChoices(
name="preencoder",
classes=dict(
sinc=LightweightSincConvs,
linear=LinearProjection,
),
type_check=AbsPreEncoder,
default=None,
optional=True,
)
encoder_choices = ClassChoices(
"encoder",
classes=dict(
# TODO2 (checkpoint 2): add conformer option in encoder
transformer=TransformerEncoder,
),
type_check=AbsEncoder,
default="transformer",
)
decoder_choices = ClassChoices(
"decoder",
classes=dict(
linear=LinearDecoder,
),
type_check=AbsDecoder,
default="linear",
)
losses_choices = ClassChoices(
name="losses",
classes=dict(
binary_loss=ASVSpoofBinaryLoss,
am_softmax_loss=ASVSpoofAMSoftmaxLoss,
oc_softmax_loss=ASVSpoofOCSoftmaxLoss,
),
type_check=AbsASVSpoofLoss,
default=None,
)
class ASVSpoofTask(AbsTask):
# If you need more than one optimizers, change this value
num_optimizers: int = 1
# Add variable objects configurations
class_choices_list = [
# --frontend and --frontend_conf
frontend_choices,
# --specaug and --specaug_conf
specaug_choices,
# --normalize and --normalize_conf
normalize_choices,
# --preencoder and --preencoder_conf
preencoder_choices,
# --encoder and --encoder_conf
encoder_choices,
# --decoder and --decoder_conf
decoder_choices,
]
# If you need to modify train() or eval() procedures, change Trainer class here
trainer = Trainer
@classmethod
def add_task_arguments(cls, parser: argparse.ArgumentParser):
group = parser.add_argument_group(description="Task related")
# NOTE(kamo): add_arguments(..., required=True) can't be used
# to provide --print_config mode. Instead of it, do as
group.add_argument(
"--init",
type=lambda x: str_or_none(x.lower()),
default=None,
help="The initialization method",
choices=[
"chainer",
"xavier_uniform",
"xavier_normal",
"kaiming_uniform",
"kaiming_normal",
None,
],
)
group.add_argument(
"--input_size",
type=int_or_none,
default=None,
help="The number of input dimension of the feature",
)
group = parser.add_argument_group(description="Preprocess related")
group.add_argument(
"--use_preprocessor",
type=str2bool,
default=True,
help="Apply preprocessing to data or not",
)
group.add_argument(
"--losses",
action=NestedDictAction,
default=[
{
"name": "sigmoid_loss",
"conf": {},
},
],
help="The criterions binded with the loss wrappers.",
)
for class_choices in cls.class_choices_list:
# Append --<name> and --<name>_conf.
# e.g. --encoder and --encoder_conf
class_choices.add_arguments(group)
@classmethod
def build_collate_fn(
cls, args: argparse.Namespace, train: bool
) -> Callable[
[Collection[Tuple[str, Dict[str, np.ndarray]]]],
Tuple[List[str], Dict[str, torch.Tensor]],
]:
assert check_argument_types()
# NOTE(kamo): int value = 0 is reserved by CTC-blank symbol
return CommonCollateFn(float_pad_value=0.0, int_pad_value=-1)
@classmethod
def build_preprocess_fn(
cls, args: argparse.Namespace, train: bool
) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
assert check_argument_types()
if args.use_preprocessor:
retval = CommonPreprocessor(
train=train,
)
else:
retval = None
assert check_return_type(retval)
return retval
@classmethod
def required_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
if not inference:
retval = ("speech", "label")
else:
# Recognition mode
retval = ("speech",)
return retval
@classmethod
def optional_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
retval = ()
assert check_return_type(retval)
return retval
@classmethod
def build_model(cls, args: argparse.Namespace) -> ESPnetASVSpoofModel:
assert check_argument_types()
# 1. frontend
if args.input_size is None:
# Extract features in the model
frontend_class = frontend_choices.get_class(args.frontend)
frontend = frontend_class(**args.frontend_conf)
input_size = frontend.output_size()
else:
# Give features from data-loader
args.frontend = None
args.frontend_conf = {}
frontend = None
input_size = args.input_size
# 2. Data augmentation for spectrogram
if args.specaug is not None:
specaug_class = specaug_choices.get_class(args.specaug)
specaug = specaug_class(**args.specaug_conf)
else:
specaug = None
# 3. Normalization layer
if args.normalize is not None:
normalize_class = normalize_choices.get_class(args.normalize)
normalize = normalize_class(**args.normalize_conf)
else:
normalize = None
# 4. Pre-encoder input block
# NOTE(kan-bayashi): Use getattr to keep the compatibility
if getattr(args, "preencoder", None) is not None:
preencoder_class = preencoder_choices.get_class(args.preencoder)
preencoder = preencoder_class(**args.preencoder_conf)
input_size = preencoder.output_size()
else:
preencoder = None
# 4. Encoder
encoder_class = encoder_choices.get_class(args.encoder)
encoder = encoder_class(input_size=input_size, **args.encoder_conf)
encoder_output_size = encoder.output_size()
# 5. Decoder
decoder_class = decoder_choices.get_class(args.decoder)
decoder = decoder_class(
encoder_output_size=encoder_output_size,
**args.decoder_conf,
)
# 6. Loss definition
losses = {}
if getattr(args, "losses", None) is not None:
# This check is for the compatibility when load models
# that packed by older version
for ctr in args.losses:
if "softmax" in ctr["name"]:
loss = losses_choices.get_class(ctr["name"])(
enc_dim=encoder_output_size, **ctr["conf"]
)
else:
loss = losses_choices.get_class(ctr["name"])(**ctr["conf"])
losses[ctr["name"]] = loss
# 7. Build model
model = ESPnetASVSpoofModel(
frontend=frontend,
specaug=specaug,
normalize=normalize,
preencoder=preencoder,
encoder=encoder,
decoder=decoder,
losses=losses,
)
# 8. Initialize
if args.init is not None:
initialize(model, args.init)
assert check_return_type(model)
return model
| 10,200 | 31.487261 | 83 | py |
espnet | espnet-master/espnet2/tasks/asr.py | import argparse
import logging
from typing import Callable, Collection, Dict, List, Optional, Tuple
import numpy as np
import torch
from typeguard import check_argument_types, check_return_type
from espnet2.asr.ctc import CTC
from espnet2.asr.decoder.abs_decoder import AbsDecoder
from espnet2.asr.decoder.hugging_face_transformers_decoder import ( # noqa: H301
HuggingFaceTransformersDecoder,
)
from espnet2.asr.decoder.mlm_decoder import MLMDecoder
from espnet2.asr.decoder.rnn_decoder import RNNDecoder
from espnet2.asr.decoder.s4_decoder import S4Decoder
from espnet2.asr.decoder.transducer_decoder import TransducerDecoder
from espnet2.asr.decoder.transformer_decoder import (
DynamicConvolution2DTransformerDecoder,
DynamicConvolutionTransformerDecoder,
LightweightConvolution2DTransformerDecoder,
LightweightConvolutionTransformerDecoder,
TransformerDecoder,
)
from espnet2.asr.decoder.whisper_decoder import OpenAIWhisperDecoder
from espnet2.asr.encoder.abs_encoder import AbsEncoder
from espnet2.asr.encoder.branchformer_encoder import BranchformerEncoder
from espnet2.asr.encoder.conformer_encoder import ConformerEncoder
from espnet2.asr.encoder.contextual_block_conformer_encoder import (
ContextualBlockConformerEncoder,
)
from espnet2.asr.encoder.contextual_block_transformer_encoder import (
ContextualBlockTransformerEncoder,
)
from espnet2.asr.encoder.e_branchformer_encoder import EBranchformerEncoder
from espnet2.asr.encoder.hubert_encoder import (
FairseqHubertEncoder,
FairseqHubertPretrainEncoder,
TorchAudioHuBERTPretrainEncoder,
)
from espnet2.asr.encoder.longformer_encoder import LongformerEncoder
from espnet2.asr.encoder.rnn_encoder import RNNEncoder
from espnet2.asr.encoder.transformer_encoder import TransformerEncoder
from espnet2.asr.encoder.transformer_encoder_multispkr import (
TransformerEncoder as TransformerEncoderMultiSpkr,
)
from espnet2.asr.encoder.vgg_rnn_encoder import VGGRNNEncoder
from espnet2.asr.encoder.wav2vec2_encoder import FairSeqWav2Vec2Encoder
from espnet2.asr.encoder.whisper_encoder import OpenAIWhisperEncoder
from espnet2.asr.espnet_model import ESPnetASRModel
from espnet2.asr.frontend.abs_frontend import AbsFrontend
from espnet2.asr.frontend.default import DefaultFrontend
from espnet2.asr.frontend.fused import FusedFrontends
from espnet2.asr.frontend.s3prl import S3prlFrontend
from espnet2.asr.frontend.whisper import WhisperFrontend
from espnet2.asr.frontend.windowing import SlidingWindow
from espnet2.asr.maskctc_model import MaskCTCModel
from espnet2.asr.pit_espnet_model import ESPnetASRModel as PITESPnetModel
from espnet2.asr.postencoder.abs_postencoder import AbsPostEncoder
from espnet2.asr.postencoder.hugging_face_transformers_postencoder import (
HuggingFaceTransformersPostEncoder,
)
from espnet2.asr.preencoder.abs_preencoder import AbsPreEncoder
from espnet2.asr.preencoder.linear import LinearProjection
from espnet2.asr.preencoder.sinc import LightweightSincConvs
from espnet2.asr.specaug.abs_specaug import AbsSpecAug
from espnet2.asr.specaug.specaug import SpecAug
from espnet2.asr_transducer.joint_network import JointNetwork
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.layers.global_mvn import GlobalMVN
from espnet2.layers.utterance_mvn import UtteranceMVN
from espnet2.tasks.abs_task import AbsTask
from espnet2.text.phoneme_tokenizer import g2p_choices
from espnet2.torch_utils.initialize import initialize
from espnet2.train.abs_espnet_model import AbsESPnetModel
from espnet2.train.class_choices import ClassChoices
from espnet2.train.collate_fn import CommonCollateFn
from espnet2.train.preprocessor import (
AbsPreprocessor,
CommonPreprocessor,
CommonPreprocessor_multi,
)
from espnet2.train.trainer import Trainer
from espnet2.utils.get_default_kwargs import get_default_kwargs
from espnet2.utils.nested_dict_action import NestedDictAction
from espnet2.utils.types import float_or_none, int_or_none, str2bool, str_or_none
frontend_choices = ClassChoices(
name="frontend",
classes=dict(
default=DefaultFrontend,
sliding_window=SlidingWindow,
s3prl=S3prlFrontend,
fused=FusedFrontends,
whisper=WhisperFrontend,
),
type_check=AbsFrontend,
default="default",
)
specaug_choices = ClassChoices(
name="specaug",
classes=dict(
specaug=SpecAug,
),
type_check=AbsSpecAug,
default=None,
optional=True,
)
normalize_choices = ClassChoices(
"normalize",
classes=dict(
global_mvn=GlobalMVN,
utterance_mvn=UtteranceMVN,
),
type_check=AbsNormalize,
default="utterance_mvn",
optional=True,
)
model_choices = ClassChoices(
"model",
classes=dict(
espnet=ESPnetASRModel,
maskctc=MaskCTCModel,
pit_espnet=PITESPnetModel,
),
type_check=AbsESPnetModel,
default="espnet",
)
preencoder_choices = ClassChoices(
name="preencoder",
classes=dict(
sinc=LightweightSincConvs,
linear=LinearProjection,
),
type_check=AbsPreEncoder,
default=None,
optional=True,
)
encoder_choices = ClassChoices(
"encoder",
classes=dict(
conformer=ConformerEncoder,
transformer=TransformerEncoder,
transformer_multispkr=TransformerEncoderMultiSpkr,
contextual_block_transformer=ContextualBlockTransformerEncoder,
contextual_block_conformer=ContextualBlockConformerEncoder,
vgg_rnn=VGGRNNEncoder,
rnn=RNNEncoder,
wav2vec2=FairSeqWav2Vec2Encoder,
hubert=FairseqHubertEncoder,
hubert_pretrain=FairseqHubertPretrainEncoder,
torchaudiohubert=TorchAudioHuBERTPretrainEncoder,
longformer=LongformerEncoder,
branchformer=BranchformerEncoder,
whisper=OpenAIWhisperEncoder,
e_branchformer=EBranchformerEncoder,
),
type_check=AbsEncoder,
default="rnn",
)
postencoder_choices = ClassChoices(
name="postencoder",
classes=dict(
hugging_face_transformers=HuggingFaceTransformersPostEncoder,
),
type_check=AbsPostEncoder,
default=None,
optional=True,
)
decoder_choices = ClassChoices(
"decoder",
classes=dict(
transformer=TransformerDecoder,
lightweight_conv=LightweightConvolutionTransformerDecoder,
lightweight_conv2d=LightweightConvolution2DTransformerDecoder,
dynamic_conv=DynamicConvolutionTransformerDecoder,
dynamic_conv2d=DynamicConvolution2DTransformerDecoder,
rnn=RNNDecoder,
transducer=TransducerDecoder,
mlm=MLMDecoder,
whisper=OpenAIWhisperDecoder,
hugging_face_transformers=HuggingFaceTransformersDecoder,
s4=S4Decoder,
),
type_check=AbsDecoder,
default=None,
optional=True,
)
preprocessor_choices = ClassChoices(
"preprocessor",
classes=dict(
default=CommonPreprocessor,
multi=CommonPreprocessor_multi,
),
type_check=AbsPreprocessor,
default="default",
)
class ASRTask(AbsTask):
# If you need more than one optimizers, change this value
num_optimizers: int = 1
# Add variable objects configurations
class_choices_list = [
# --frontend and --frontend_conf
frontend_choices,
# --specaug and --specaug_conf
specaug_choices,
# --normalize and --normalize_conf
normalize_choices,
# --model and --model_conf
model_choices,
# --preencoder and --preencoder_conf
preencoder_choices,
# --encoder and --encoder_conf
encoder_choices,
# --postencoder and --postencoder_conf
postencoder_choices,
# --decoder and --decoder_conf
decoder_choices,
# --preprocessor and --preprocessor_conf
preprocessor_choices,
]
# If you need to modify train() or eval() procedures, change Trainer class here
trainer = Trainer
@classmethod
def add_task_arguments(cls, parser: argparse.ArgumentParser):
group = parser.add_argument_group(description="Task related")
# NOTE(kamo): add_arguments(..., required=True) can't be used
# to provide --print_config mode. Instead of it, do as
required = parser.get_default("required")
required += ["token_list"]
group.add_argument(
"--token_list",
type=str_or_none,
default=None,
help="A text mapping int-id to token",
)
group.add_argument(
"--init",
type=lambda x: str_or_none(x.lower()),
default=None,
help="The initialization method",
choices=[
"chainer",
"xavier_uniform",
"xavier_normal",
"kaiming_uniform",
"kaiming_normal",
None,
],
)
group.add_argument(
"--input_size",
type=int_or_none,
default=None,
help="The number of input dimension of the feature",
)
group.add_argument(
"--ctc_conf",
action=NestedDictAction,
default=get_default_kwargs(CTC),
help="The keyword arguments for CTC class.",
)
group.add_argument(
"--joint_net_conf",
action=NestedDictAction,
default=None,
help="The keyword arguments for joint network class.",
)
group = parser.add_argument_group(description="Preprocess related")
group.add_argument(
"--use_preprocessor",
type=str2bool,
default=True,
help="Apply preprocessing to data or not",
)
group.add_argument(
"--token_type",
type=str,
default="bpe",
choices=[
"bpe",
"char",
"word",
"phn",
"hugging_face",
"whisper_en",
"whisper_multilingual",
],
help="The text will be tokenized " "in the specified level token",
)
group.add_argument(
"--bpemodel",
type=str_or_none,
default=None,
help="The model file of sentencepiece",
)
parser.add_argument(
"--non_linguistic_symbols",
type=str_or_none,
help="non_linguistic_symbols file path",
)
group.add_argument(
"--cleaner",
type=str_or_none,
choices=[
None,
"tacotron",
"jaconv",
"vietnamese",
"whisper_en",
"whisper_basic",
],
default=None,
help="Apply text cleaning",
)
group.add_argument(
"--g2p",
type=str_or_none,
choices=g2p_choices,
default=None,
help="Specify g2p method if --token_type=phn",
)
group.add_argument(
"--speech_volume_normalize",
type=float_or_none,
default=None,
help="Scale the maximum amplitude to the given value.",
)
group.add_argument(
"--rir_scp",
type=str_or_none,
default=None,
help="The file path of rir scp file.",
)
group.add_argument(
"--rir_apply_prob",
type=float,
default=1.0,
help="THe probability for applying RIR convolution.",
)
group.add_argument(
"--noise_scp",
type=str_or_none,
default=None,
help="The file path of noise scp file.",
)
group.add_argument(
"--noise_apply_prob",
type=float,
default=1.0,
help="The probability applying Noise adding.",
)
group.add_argument(
"--noise_db_range",
type=str,
default="13_15",
help="The range of noise decibel level.",
)
group.add_argument(
"--short_noise_thres",
type=float,
default=0.5,
help="If len(noise) / len(speech) is smaller than this threshold during "
"dynamic mixing, a warning will be displayed.",
)
group.add_argument(
"--aux_ctc_tasks",
type=str,
nargs="+",
default=[],
help="Auxillary tasks to train on using CTC loss. ",
)
for class_choices in cls.class_choices_list:
# Append --<name> and --<name>_conf.
# e.g. --encoder and --encoder_conf
class_choices.add_arguments(group)
@classmethod
def build_collate_fn(
cls, args: argparse.Namespace, train: bool
) -> Callable[
[Collection[Tuple[str, Dict[str, np.ndarray]]]],
Tuple[List[str], Dict[str, torch.Tensor]],
]:
assert check_argument_types()
# NOTE(kamo): int value = 0 is reserved by CTC-blank symbol
return CommonCollateFn(float_pad_value=0.0, int_pad_value=-1)
@classmethod
def build_preprocess_fn(
cls, args: argparse.Namespace, train: bool
) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
assert check_argument_types()
if args.use_preprocessor:
try:
_ = getattr(args, "preprocessor")
except AttributeError:
setattr(args, "preprocessor", "default")
setattr(args, "preprocessor_conf", dict())
except Exception as e:
raise e
preprocessor_class = preprocessor_choices.get_class(args.preprocessor)
retval = preprocessor_class(
train=train,
token_type=args.token_type,
token_list=args.token_list,
bpemodel=args.bpemodel,
non_linguistic_symbols=args.non_linguistic_symbols,
text_cleaner=args.cleaner,
g2p_type=args.g2p,
# NOTE(kamo): Check attribute existence for backward compatibility
rir_scp=args.rir_scp if hasattr(args, "rir_scp") else None,
rir_apply_prob=args.rir_apply_prob
if hasattr(args, "rir_apply_prob")
else 1.0,
noise_scp=args.noise_scp if hasattr(args, "noise_scp") else None,
noise_apply_prob=args.noise_apply_prob
if hasattr(args, "noise_apply_prob")
else 1.0,
noise_db_range=args.noise_db_range
if hasattr(args, "noise_db_range")
else "13_15",
short_noise_thres=args.short_noise_thres
if hasattr(args, "short_noise_thres")
else 0.5,
speech_volume_normalize=args.speech_volume_normalize
if hasattr(args, "rir_scp")
else None,
aux_task_names=args.aux_ctc_tasks
if hasattr(args, "aux_ctc_tasks")
else None,
**args.preprocessor_conf,
)
else:
retval = None
assert check_return_type(retval)
return retval
@classmethod
def required_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
if not inference:
retval = ("speech", "text")
else:
# Recognition mode
retval = ("speech",)
return retval
@classmethod
def optional_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
MAX_REFERENCE_NUM = 4
retval = ["text_spk{}".format(n) for n in range(2, MAX_REFERENCE_NUM + 1)]
retval = tuple(retval)
logging.info(f"Optional Data Names: {retval }")
assert check_return_type(retval)
return retval
@classmethod
def build_model(cls, args: argparse.Namespace) -> ESPnetASRModel:
assert check_argument_types()
if isinstance(args.token_list, str):
with open(args.token_list, encoding="utf-8") as f:
token_list = [line.rstrip() for line in f]
# Overwriting token_list to keep it as "portable".
args.token_list = list(token_list)
elif isinstance(args.token_list, (tuple, list)):
token_list = list(args.token_list)
else:
raise RuntimeError("token_list must be str or list")
# If use multi-blank transducer criterion,
# big blank symbols are added just before the standard blank
if args.model_conf.get("transducer_multi_blank_durations", None) is not None:
sym_blank = args.model_conf.get("sym_blank", "<blank>")
blank_idx = token_list.index(sym_blank)
for dur in args.model_conf.get("transducer_multi_blank_durations"):
if f"<blank{dur}>" not in token_list: # avoid this during inference
token_list.insert(blank_idx, f"<blank{dur}>")
args.token_list = token_list
vocab_size = len(token_list)
logging.info(f"Vocabulary size: {vocab_size }")
# 1. frontend
if args.input_size is None:
# Extract features in the model
frontend_class = frontend_choices.get_class(args.frontend)
frontend = frontend_class(**args.frontend_conf)
input_size = frontend.output_size()
else:
# Give features from data-loader
args.frontend = None
args.frontend_conf = {}
frontend = None
input_size = args.input_size
# 2. Data augmentation for spectrogram
if args.specaug is not None:
specaug_class = specaug_choices.get_class(args.specaug)
specaug = specaug_class(**args.specaug_conf)
else:
specaug = None
# 3. Normalization layer
if args.normalize is not None:
normalize_class = normalize_choices.get_class(args.normalize)
normalize = normalize_class(**args.normalize_conf)
else:
normalize = None
# 4. Pre-encoder input block
# NOTE(kan-bayashi): Use getattr to keep the compatibility
if getattr(args, "preencoder", None) is not None:
preencoder_class = preencoder_choices.get_class(args.preencoder)
preencoder = preencoder_class(**args.preencoder_conf)
input_size = preencoder.output_size()
else:
preencoder = None
# 4. Encoder
encoder_class = encoder_choices.get_class(args.encoder)
encoder = encoder_class(input_size=input_size, **args.encoder_conf)
# 5. Post-encoder block
# NOTE(kan-bayashi): Use getattr to keep the compatibility
encoder_output_size = encoder.output_size()
if getattr(args, "postencoder", None) is not None:
postencoder_class = postencoder_choices.get_class(args.postencoder)
postencoder = postencoder_class(
input_size=encoder_output_size, **args.postencoder_conf
)
encoder_output_size = postencoder.output_size()
else:
postencoder = None
# 5. Decoder
if getattr(args, "decoder", None) is not None:
decoder_class = decoder_choices.get_class(args.decoder)
if args.decoder == "transducer":
decoder = decoder_class(
vocab_size,
embed_pad=0,
**args.decoder_conf,
)
joint_network = JointNetwork(
vocab_size,
encoder.output_size(),
decoder.dunits,
**args.joint_net_conf,
)
else:
decoder = decoder_class(
vocab_size=vocab_size,
encoder_output_size=encoder_output_size,
**args.decoder_conf,
)
joint_network = None
else:
decoder = None
joint_network = None
# 6. CTC
ctc = CTC(
odim=vocab_size, encoder_output_size=encoder_output_size, **args.ctc_conf
)
# 7. Build model
try:
model_class = model_choices.get_class(args.model)
except AttributeError:
model_class = model_choices.get_class("espnet")
model = model_class(
vocab_size=vocab_size,
frontend=frontend,
specaug=specaug,
normalize=normalize,
preencoder=preencoder,
encoder=encoder,
postencoder=postencoder,
decoder=decoder,
ctc=ctc,
joint_network=joint_network,
token_list=token_list,
**args.model_conf,
)
# FIXME(kamo): Should be done in model?
# 8. Initialize
if args.init is not None:
initialize(model, args.init)
assert check_return_type(model)
return model
| 21,301 | 33.693811 | 85 | py |
espnet | espnet-master/espnet2/tasks/lm.py | import argparse
import logging
from typing import Callable, Collection, Dict, List, Optional, Tuple
import numpy as np
import torch
from typeguard import check_argument_types, check_return_type
from espnet2.lm.abs_model import AbsLM
from espnet2.lm.espnet_model import ESPnetLanguageModel
from espnet2.lm.seq_rnn_lm import SequentialRNNLM
from espnet2.lm.transformer_lm import TransformerLM
from espnet2.tasks.abs_task import AbsTask
from espnet2.text.phoneme_tokenizer import g2p_choices
from espnet2.torch_utils.initialize import initialize
from espnet2.train.class_choices import ClassChoices
from espnet2.train.collate_fn import CommonCollateFn
from espnet2.train.preprocessor import CommonPreprocessor
from espnet2.train.trainer import Trainer
from espnet2.utils.get_default_kwargs import get_default_kwargs
from espnet2.utils.nested_dict_action import NestedDictAction
from espnet2.utils.types import str2bool, str_or_none
lm_choices = ClassChoices(
"lm",
classes=dict(
seq_rnn=SequentialRNNLM,
transformer=TransformerLM,
),
type_check=AbsLM,
default="seq_rnn",
)
class LMTask(AbsTask):
# If you need more than one optimizers, change this value
num_optimizers: int = 1
# Add variable objects configurations
class_choices_list = [lm_choices]
# If you need to modify train() or eval() procedures, change Trainer class here
trainer = Trainer
@classmethod
def add_task_arguments(cls, parser: argparse.ArgumentParser):
# NOTE(kamo): Use '_' instead of '-' to avoid confusion
assert check_argument_types()
group = parser.add_argument_group(description="Task related")
# NOTE(kamo): add_arguments(..., required=True) can't be used
# to provide --print_config mode. Instead of it, do as
required = parser.get_default("required")
required += ["token_list"]
group.add_argument(
"--token_list",
type=str_or_none,
default=None,
help="A text mapping int-id to token",
)
group.add_argument(
"--init",
type=lambda x: str_or_none(x.lower()),
default=None,
help="The initialization method",
choices=[
"chainer",
"xavier_uniform",
"xavier_normal",
"kaiming_uniform",
"kaiming_normal",
None,
],
)
group.add_argument(
"--model_conf",
action=NestedDictAction,
default=get_default_kwargs(ESPnetLanguageModel),
help="The keyword arguments for model class.",
)
group = parser.add_argument_group(description="Preprocess related")
group.add_argument(
"--use_preprocessor",
type=str2bool,
default=True,
help="Apply preprocessing to data or not",
)
group.add_argument(
"--token_type",
type=str,
default="bpe",
choices=["bpe", "char", "word"],
help="",
)
group.add_argument(
"--bpemodel",
type=str_or_none,
default=None,
help="The model file fo sentencepiece",
)
parser.add_argument(
"--non_linguistic_symbols",
type=str_or_none,
help="non_linguistic_symbols file path",
)
parser.add_argument(
"--cleaner",
type=str_or_none,
choices=[None, "tacotron", "jaconv", "vietnamese"],
default=None,
help="Apply text cleaning",
)
parser.add_argument(
"--g2p",
type=str_or_none,
choices=g2p_choices,
default=None,
help="Specify g2p method if --token_type=phn",
)
for class_choices in cls.class_choices_list:
# Append --<name> and --<name>_conf.
# e.g. --encoder and --encoder_conf
class_choices.add_arguments(group)
assert check_return_type(parser)
return parser
@classmethod
def build_collate_fn(
cls, args: argparse.Namespace, train: bool
) -> Callable[
[Collection[Tuple[str, Dict[str, np.ndarray]]]],
Tuple[List[str], Dict[str, torch.Tensor]],
]:
assert check_argument_types()
return CommonCollateFn(int_pad_value=0)
@classmethod
def build_preprocess_fn(
cls, args: argparse.Namespace, train: bool
) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
assert check_argument_types()
if args.use_preprocessor:
retval = CommonPreprocessor(
train=train,
token_type=args.token_type,
token_list=args.token_list,
bpemodel=args.bpemodel,
text_cleaner=args.cleaner,
g2p_type=args.g2p,
non_linguistic_symbols=args.non_linguistic_symbols,
)
else:
retval = None
assert check_return_type(retval)
return retval
@classmethod
def required_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
retval = ("text",)
return retval
@classmethod
def optional_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
retval = ()
return retval
@classmethod
def build_model(cls, args: argparse.Namespace) -> ESPnetLanguageModel:
assert check_argument_types()
if isinstance(args.token_list, str):
with open(args.token_list, encoding="utf-8") as f:
token_list = [line.rstrip() for line in f]
# "args" is saved as it is in a yaml file by BaseTask.main().
# Overwriting token_list to keep it as "portable".
args.token_list = token_list.copy()
elif isinstance(args.token_list, (tuple, list)):
token_list = args.token_list.copy()
else:
raise RuntimeError("token_list must be str or dict")
vocab_size = len(token_list)
logging.info(f"Vocabulary size: {vocab_size }")
# 1. Build LM model
lm_class = lm_choices.get_class(args.lm)
lm = lm_class(vocab_size=vocab_size, **args.lm_conf)
# 2. Build ESPnetModel
# Assume the last-id is sos_and_eos
model = ESPnetLanguageModel(lm=lm, vocab_size=vocab_size, **args.model_conf)
# FIXME(kamo): Should be done in model?
# 3. Initialize
if args.init is not None:
initialize(model, args.init)
assert check_return_type(model)
return model
| 6,804 | 31.716346 | 84 | py |
espnet | espnet-master/espnet2/tasks/uasr.py | import argparse
import logging
from typing import Callable, Collection, Dict, List, Optional, Tuple
import numpy as np
import torch
from typeguard import check_argument_types, check_return_type
from espnet2.asr.frontend.abs_frontend import AbsFrontend
from espnet2.asr.frontend.default import DefaultFrontend
from espnet2.asr.frontend.fused import FusedFrontends
from espnet2.asr.frontend.s3prl import S3prlFrontend
from espnet2.asr.frontend.windowing import SlidingWindow
from espnet2.tasks.abs_task import AbsTask, optim_classes
from espnet2.torch_utils.initialize import initialize
from espnet2.train.class_choices import ClassChoices
from espnet2.train.collate_fn import CommonCollateFn
from espnet2.train.preprocessor import CommonPreprocessor
from espnet2.train.uasr_trainer import UASRTrainer
from espnet2.uasr.discriminator.abs_discriminator import AbsDiscriminator
from espnet2.uasr.discriminator.conv_discriminator import ConvDiscriminator
from espnet2.uasr.espnet_model import ESPnetUASRModel
from espnet2.uasr.generator.abs_generator import AbsGenerator
from espnet2.uasr.generator.conv_generator import ConvGenerator
from espnet2.uasr.loss.abs_loss import AbsUASRLoss
from espnet2.uasr.loss.discriminator_loss import UASRDiscriminatorLoss
from espnet2.uasr.loss.gradient_penalty import UASRGradientPenalty
from espnet2.uasr.loss.phoneme_diversity_loss import UASRPhonemeDiversityLoss
from espnet2.uasr.loss.pseudo_label_loss import UASRPseudoLabelLoss
from espnet2.uasr.loss.smoothness_penalty import UASRSmoothnessPenalty
from espnet2.uasr.segmenter.abs_segmenter import AbsSegmenter
from espnet2.uasr.segmenter.join_segmenter import JoinSegmenter
from espnet2.utils.nested_dict_action import NestedDictAction
from espnet2.utils.types import int_or_none, str2bool, str_or_none
frontend_choices = ClassChoices(
name="frontend",
classes=dict(
default=DefaultFrontend,
sliding_window=SlidingWindow,
s3prl=S3prlFrontend,
fused=FusedFrontends,
),
type_check=AbsFrontend,
default="default",
)
segmenter_choices = ClassChoices(
name="segmenter",
classes=dict(
join=JoinSegmenter,
),
type_check=AbsSegmenter,
default=None,
optional=True,
)
discriminator_choices = ClassChoices(
name="discriminator",
classes=dict(
conv=ConvDiscriminator,
),
type_check=AbsDiscriminator,
default="conv",
)
generator_choices = ClassChoices(
name="generator",
classes=dict(
conv=ConvGenerator,
),
type_check=AbsGenerator,
default="conv",
)
loss_choices = ClassChoices(
name="loss",
classes=dict(
discriminator_loss=UASRDiscriminatorLoss,
gradient_penalty=UASRGradientPenalty,
smoothness_penalty=UASRSmoothnessPenalty,
phoneme_diversity_loss=UASRPhonemeDiversityLoss,
pseudo_label_loss=UASRPseudoLabelLoss,
),
type_check=AbsUASRLoss,
default="discriminator_loss",
)
class UASRTask(AbsTask):
# If you need more than one optimizers, change this value
num_optimizers: int = 2
# Add variable objects configurations
class_choices_list = [
# --frontend and --frontend_conf
frontend_choices,
# --segmenter and --segmenter_conf
segmenter_choices,
# --discriminator and --discriminator_conf
discriminator_choices,
# --generator and --generator_conf
generator_choices,
loss_choices,
]
# If you need to modify train() or eval() procedures, change Trainer class here
trainer = UASRTrainer
@classmethod
def add_task_arguments(cls, parser: argparse.ArgumentParser):
group = parser.add_argument_group(description="Task related")
# NOTE(kamo): add_arguments(..., required=True) can't be used
# to provide --print_config mode. Instead of it, do as
required = parser.get_default("required")
required += ["token_list"]
group.add_argument(
"--token_list",
type=str_or_none,
default=None,
help="A text mapping int-id to token",
)
group.add_argument(
"--init",
type=lambda x: str_or_none(x.lower()),
default=None,
help="The initialization method",
choices=[
"chainer",
"xavier_uniform",
"xavier_normal",
"kaiming_uniform",
"kaiming_normal",
None,
],
)
group.add_argument(
"--input_size",
type=int_or_none,
default=None,
help="The number of input dimension of the feature",
)
group = parser.add_argument_group(description="Preprocess related")
group.add_argument(
"--use_preprocessor",
type=str2bool,
default=True,
help="Apply preprocessing to data or not",
)
group.add_argument(
"--token_type",
type=str,
default="phn",
choices=["phn"],
help="The text will be tokenized " "in the specified level token",
)
group.add_argument(
"--bpemodel",
type=str_or_none,
default=None,
help="The model file of sentencepiece",
)
parser.add_argument(
"--non_linguistic_symbols",
type=str_or_none,
help="non_linguistic_symbols file path",
)
group.add_argument(
"--cleaner",
type=str_or_none,
choices=[None, "tacotron", "jaconv", "vietnamese"],
default=None,
help="Apply text cleaning",
)
group.add_argument(
"--losses",
action=NestedDictAction,
default=[
{
"name": "discriminator_loss",
"conf": {},
},
],
help="The criterions binded with the loss wrappers.",
# Loss format would be like:
# losses:
# - name: loss1
# conf:
# weight: 1.0
# smoothed: false
# - name: loss2
# conf:
# weight: 0.1
# smoothed: false
)
group = parser.add_argument_group(description="Task related")
group.add_argument(
"--kenlm_path",
type=str,
help="path of n-gram kenlm for validation",
)
parser.add_argument(
"--int_pad_value",
type=int,
default=0,
help="Integer padding value for real token sequence",
)
parser.add_argument(
"--fairseq_checkpoint",
type=str,
help="Fairseq checkpoint to initialize model",
)
for class_choices in cls.class_choices_list:
# Append --<name> and --<name>_conf.
# e.g. --encoder and --encoder_conf
class_choices.add_arguments(group)
@classmethod
def build_collate_fn(
cls, args: argparse.Namespace, train: bool
) -> Callable[
[Collection[Tuple[str, Dict[str, np.ndarray]]]],
Tuple[List[str], Dict[str, torch.Tensor]],
]:
assert check_argument_types()
# NOTE(kamo): int value = 0 is reserved by CTC-blank symbol
return CommonCollateFn(float_pad_value=0.0, int_pad_value=args.int_pad_value)
@classmethod
def build_preprocess_fn(
cls, args: argparse.Namespace, train: bool
) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
assert check_argument_types()
if args.use_preprocessor:
retval = CommonPreprocessor(
train=train,
token_type=args.token_type,
token_list=args.token_list,
bpemodel=args.bpemodel,
non_linguistic_symbols=args.non_linguistic_symbols,
text_cleaner=args.cleaner,
)
else:
retval = None
assert check_return_type(retval)
return retval
@classmethod
def required_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
if not inference:
retval = ("speech", "text")
else:
# Recognition mode
retval = ("speech",)
return retval
@classmethod
def optional_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
retval = ("pseudo_labels", "input_cluster_id")
assert check_return_type(retval)
return retval
@classmethod
def build_model(cls, args: argparse.Namespace) -> ESPnetUASRModel:
assert check_argument_types()
if isinstance(args.token_list, str):
with open(args.token_list, encoding="utf-8") as f:
token_list = [line.rstrip() for line in f]
# Overwriting token_list to keep it as "portable".
args.token_list = list(token_list)
elif isinstance(args.token_list, (tuple, list)):
token_list = list(args.token_list)
else:
raise RuntimeError("token_list must be str or list")
vocab_size = len(token_list)
logging.info(f"Vocabulary size: {vocab_size}")
# load from fairseq checkpoint
load_fairseq_model = False
cfg = None
if args.fairseq_checkpoint is not None:
load_fairseq_model = True
ckpt = args.fairseq_checkpoint
logging.info(f"Loading parameters from fairseq: {ckpt}")
state_dict = torch.load(ckpt)
if "cfg" in state_dict and state_dict["cfg"] is not None:
model_cfg = state_dict["cfg"]["model"]
logging.info(f"Building model from {model_cfg}")
else:
raise RuntimeError(f"Bad 'cfg' in state_dict of {ckpt}")
# 1. frontend
if args.write_collected_feats:
# Extract features in the model
# Note(jiatong): if we use write_collected_feats=True (we use
# pre-extracted feature for training): we still initial
# frontend to allow inference with raw speech signal
# but the frontend is not used in training
frontend_class = frontend_choices.get_class(args.frontend)
frontend = frontend_class(**args.frontend_conf)
if args.input_size is None:
input_size = frontend.output_size()
else:
input_size = args.input_size
else:
# Give features from data-loader
args.frontend = None
args.frontend_conf = {}
frontend = None
input_size = args.input_size
# 2. Segmenter
if args.segmenter is not None:
segmenter_class = segmenter_choices.get_class(args.segmenter)
segmenter = segmenter_class(cfg=cfg, **args.segmenter_conf)
else:
segmenter = None
# 3. Discriminator
discriminator_class = discriminator_choices.get_class(args.discriminator)
discriminator = discriminator_class(
cfg=cfg, input_dim=vocab_size, **args.discriminator_conf
)
# 4. Generator
generator_class = generator_choices.get_class(args.generator)
generator = generator_class(
cfg=cfg, input_dim=input_size, output_dim=vocab_size, **args.generator_conf
)
# 5. Loss definition
losses = {}
if getattr(args, "losses", None) is not None:
# This check is for the compatibility when load models
# that packed by older version
for ctr in args.losses:
logging.info("initialize loss: {}".format(ctr["name"]))
if ctr["name"] == "gradient_penalty":
loss = loss_choices.get_class(ctr["name"])(
discriminator=discriminator, **ctr["conf"]
)
else:
loss = loss_choices.get_class(ctr["name"])(**ctr["conf"])
losses[ctr["name"]] = loss
# 6. Build model
logging.info(f"kenlm_path is: {args.kenlm_path}")
model = ESPnetUASRModel(
cfg=cfg,
frontend=frontend,
segmenter=segmenter,
discriminator=discriminator,
generator=generator,
losses=losses,
kenlm_path=args.kenlm_path,
token_list=args.token_list,
max_epoch=args.max_epoch,
vocab_size=vocab_size,
use_collected_training_feats=args.write_collected_feats,
)
# FIXME(kamo): Should be done in model?
# 7. Initialize
if load_fairseq_model:
logging.info(f"Initializing model from {ckpt}")
model.load_state_dict(state_dict["model"], strict=False)
else:
if args.init is not None:
initialize(model, args.init)
assert check_return_type(model)
return model
@classmethod
def build_optimizers(
cls,
args: argparse.Namespace,
model: ESPnetUASRModel,
) -> List[torch.optim.Optimizer]:
# check
assert hasattr(model, "generator")
assert hasattr(model, "discriminator")
generator_param_list = list(model.generator.parameters())
discriminator_param_list = list(model.discriminator.parameters())
# Add optional sets of model parameters
if model.use_segmenter is not None:
generator_param_list += list(model.segmenter.parameters())
if (
"pseudo_label_loss" in model.losses.keys()
and model.losses["pseudo_label_loss"].weight > 0
):
generator_param_list += list(
model.losses["pseudo_label_loss"].decoder.parameters()
)
# define generator optimizer
optim_generator_class = optim_classes.get(args.optim)
if optim_generator_class is None:
raise ValueError(
f"must be one of {list(optim_classes)}: {args.optim_generator}"
)
optim_generator = optim_generator_class(
generator_param_list,
**args.optim_conf,
)
optimizers = [optim_generator]
# define discriminator optimizer
optim_discriminator_class = optim_classes.get(args.optim2)
if optim_discriminator_class is None:
raise ValueError(
f"must be one of {list(optim_classes)}: {args.optim_discriminator}"
)
optim_discriminator = optim_discriminator_class(
discriminator_param_list,
**args.optim2_conf,
)
optimizers += [optim_discriminator]
return optimizers
| 15,031 | 33.796296 | 87 | py |
espnet | espnet-master/espnet2/tasks/tts.py | """Text-to-speech task."""
import argparse
import logging
from pathlib import Path
from typing import Callable, Collection, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
import yaml
from typeguard import check_argument_types, check_return_type
from espnet2.gan_tts.jets import JETS
from espnet2.gan_tts.joint import JointText2Wav
from espnet2.gan_tts.vits import VITS
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.layers.global_mvn import GlobalMVN
from espnet2.tasks.abs_task import AbsTask
from espnet2.text.phoneme_tokenizer import g2p_choices
from espnet2.train.class_choices import ClassChoices
from espnet2.train.collate_fn import CommonCollateFn
from espnet2.train.preprocessor import CommonPreprocessor
from espnet2.train.trainer import Trainer
from espnet2.tts.abs_tts import AbsTTS
from espnet2.tts.espnet_model import ESPnetTTSModel
from espnet2.tts.fastspeech import FastSpeech
from espnet2.tts.fastspeech2 import FastSpeech2
from espnet2.tts.feats_extract.abs_feats_extract import AbsFeatsExtract
from espnet2.tts.feats_extract.dio import Dio
from espnet2.tts.feats_extract.energy import Energy
from espnet2.tts.feats_extract.linear_spectrogram import LinearSpectrogram
from espnet2.tts.feats_extract.log_mel_fbank import LogMelFbank
from espnet2.tts.feats_extract.log_spectrogram import LogSpectrogram
from espnet2.tts.prodiff import ProDiff
from espnet2.tts.tacotron2 import Tacotron2
from espnet2.tts.transformer import Transformer
from espnet2.tts.utils import ParallelWaveGANPretrainedVocoder
from espnet2.utils.get_default_kwargs import get_default_kwargs
from espnet2.utils.griffin_lim import Spectrogram2Waveform
from espnet2.utils.nested_dict_action import NestedDictAction
from espnet2.utils.types import int_or_none, str2bool, str_or_none
feats_extractor_choices = ClassChoices(
"feats_extract",
classes=dict(
fbank=LogMelFbank,
spectrogram=LogSpectrogram,
linear_spectrogram=LinearSpectrogram,
),
type_check=AbsFeatsExtract,
default="fbank",
)
pitch_extractor_choices = ClassChoices(
"pitch_extract",
classes=dict(dio=Dio),
type_check=AbsFeatsExtract,
default=None,
optional=True,
)
energy_extractor_choices = ClassChoices(
"energy_extract",
classes=dict(energy=Energy),
type_check=AbsFeatsExtract,
default=None,
optional=True,
)
normalize_choices = ClassChoices(
"normalize",
classes=dict(global_mvn=GlobalMVN),
type_check=AbsNormalize,
default="global_mvn",
optional=True,
)
pitch_normalize_choices = ClassChoices(
"pitch_normalize",
classes=dict(global_mvn=GlobalMVN),
type_check=AbsNormalize,
default=None,
optional=True,
)
energy_normalize_choices = ClassChoices(
"energy_normalize",
classes=dict(global_mvn=GlobalMVN),
type_check=AbsNormalize,
default=None,
optional=True,
)
tts_choices = ClassChoices(
"tts",
classes=dict(
tacotron2=Tacotron2,
transformer=Transformer,
fastspeech=FastSpeech,
fastspeech2=FastSpeech2,
prodiff=ProDiff,
# NOTE(kan-bayashi): available only for inference
vits=VITS,
joint_text2wav=JointText2Wav,
jets=JETS,
),
type_check=AbsTTS,
default="tacotron2",
)
class TTSTask(AbsTask):
# If you need more than one optimizers, change this value
num_optimizers: int = 1
# Add variable objects configurations
class_choices_list = [
# --feats_extractor and --feats_extractor_conf
feats_extractor_choices,
# --normalize and --normalize_conf
normalize_choices,
# --tts and --tts_conf
tts_choices,
# --pitch_extract and --pitch_extract_conf
pitch_extractor_choices,
# --pitch_normalize and --pitch_normalize_conf
pitch_normalize_choices,
# --energy_extract and --energy_extract_conf
energy_extractor_choices,
# --energy_normalize and --energy_normalize_conf
energy_normalize_choices,
]
# If you need to modify train() or eval() procedures, change Trainer class here
trainer = Trainer
@classmethod
def add_task_arguments(cls, parser: argparse.ArgumentParser):
# NOTE(kamo): Use '_' instead of '-' to avoid confusion
assert check_argument_types()
group = parser.add_argument_group(description="Task related")
# NOTE(kamo): add_arguments(..., required=True) can't be used
# to provide --print_config mode. Instead of it, do as
required = parser.get_default("required")
required += ["token_list"]
group.add_argument(
"--token_list",
type=str_or_none,
default=None,
help="A text mapping int-id to token",
)
group.add_argument(
"--odim",
type=int_or_none,
default=None,
help="The number of dimension of output feature",
)
group.add_argument(
"--model_conf",
action=NestedDictAction,
default=get_default_kwargs(ESPnetTTSModel),
help="The keyword arguments for model class.",
)
group = parser.add_argument_group(description="Preprocess related")
group.add_argument(
"--use_preprocessor",
type=str2bool,
default=True,
help="Apply preprocessing to data or not",
)
group.add_argument(
"--token_type",
type=str,
default="phn",
choices=["bpe", "char", "word", "phn"],
help="The text will be tokenized in the specified level token",
)
group.add_argument(
"--bpemodel",
type=str_or_none,
default=None,
help="The model file of sentencepiece",
)
parser.add_argument(
"--non_linguistic_symbols",
type=str_or_none,
help="non_linguistic_symbols file path",
)
parser.add_argument(
"--cleaner",
type=str_or_none,
choices=[None, "tacotron", "jaconv", "vietnamese", "korean_cleaner"],
default=None,
help="Apply text cleaning",
)
parser.add_argument(
"--g2p",
type=str_or_none,
choices=g2p_choices,
default=None,
help="Specify g2p method if --token_type=phn",
)
for class_choices in cls.class_choices_list:
# Append --<name> and --<name>_conf.
# e.g. --encoder and --encoder_conf
class_choices.add_arguments(group)
@classmethod
def build_collate_fn(
cls, args: argparse.Namespace, train: bool
) -> Callable[
[Collection[Tuple[str, Dict[str, np.ndarray]]]],
Tuple[List[str], Dict[str, torch.Tensor]],
]:
assert check_argument_types()
return CommonCollateFn(
float_pad_value=0.0,
int_pad_value=0,
not_sequence=["spembs", "sids", "lids"],
)
@classmethod
def build_preprocess_fn(
cls, args: argparse.Namespace, train: bool
) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
assert check_argument_types()
if args.use_preprocessor:
retval = CommonPreprocessor(
train=train,
token_type=args.token_type,
token_list=args.token_list,
bpemodel=args.bpemodel,
non_linguistic_symbols=args.non_linguistic_symbols,
text_cleaner=args.cleaner,
g2p_type=args.g2p,
)
else:
retval = None
assert check_return_type(retval)
return retval
@classmethod
def required_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
if not inference:
retval = ("text", "speech")
else:
# Inference mode
retval = ("text",)
return retval
@classmethod
def optional_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
if not inference:
retval = (
"spembs",
"durations",
"pitch",
"energy",
"sids",
"lids",
)
else:
# Inference mode
retval = (
"spembs",
"speech",
"durations",
"pitch",
"energy",
"sids",
"lids",
)
return retval
@classmethod
def build_model(cls, args: argparse.Namespace) -> ESPnetTTSModel:
assert check_argument_types()
if isinstance(args.token_list, str):
with open(args.token_list, encoding="utf-8") as f:
token_list = [line[0] + line[1:].rstrip() for line in f]
# "args" is saved as it is in a yaml file by BaseTask.main().
# Overwriting token_list to keep it as "portable".
args.token_list = token_list.copy()
elif isinstance(args.token_list, (tuple, list)):
token_list = args.token_list.copy()
else:
raise RuntimeError("token_list must be str or dict")
vocab_size = len(token_list)
logging.info(f"Vocabulary size: {vocab_size }")
# 1. feats_extract
if args.odim is None:
# Extract features in the model
feats_extract_class = feats_extractor_choices.get_class(args.feats_extract)
feats_extract = feats_extract_class(**args.feats_extract_conf)
odim = feats_extract.output_size()
else:
# Give features from data-loader
args.feats_extract = None
args.feats_extract_conf = None
feats_extract = None
odim = args.odim
# 2. Normalization layer
if args.normalize is not None:
normalize_class = normalize_choices.get_class(args.normalize)
normalize = normalize_class(**args.normalize_conf)
else:
normalize = None
# 3. TTS
tts_class = tts_choices.get_class(args.tts)
tts = tts_class(idim=vocab_size, odim=odim, **args.tts_conf)
# 4. Extra components
pitch_extract = None
energy_extract = None
pitch_normalize = None
energy_normalize = None
if getattr(args, "pitch_extract", None) is not None:
pitch_extract_class = pitch_extractor_choices.get_class(args.pitch_extract)
if args.pitch_extract_conf.get("reduction_factor", None) is not None:
assert args.pitch_extract_conf.get(
"reduction_factor", None
) == args.tts_conf.get("reduction_factor", 1)
else:
args.pitch_extract_conf["reduction_factor"] = args.tts_conf.get(
"reduction_factor", 1
)
pitch_extract = pitch_extract_class(**args.pitch_extract_conf)
if getattr(args, "energy_extract", None) is not None:
if args.energy_extract_conf.get("reduction_factor", None) is not None:
assert args.energy_extract_conf.get(
"reduction_factor", None
) == args.tts_conf.get("reduction_factor", 1)
else:
args.energy_extract_conf["reduction_factor"] = args.tts_conf.get(
"reduction_factor", 1
)
energy_extract_class = energy_extractor_choices.get_class(
args.energy_extract
)
energy_extract = energy_extract_class(**args.energy_extract_conf)
if getattr(args, "pitch_normalize", None) is not None:
pitch_normalize_class = pitch_normalize_choices.get_class(
args.pitch_normalize
)
pitch_normalize = pitch_normalize_class(**args.pitch_normalize_conf)
if getattr(args, "energy_normalize", None) is not None:
energy_normalize_class = energy_normalize_choices.get_class(
args.energy_normalize
)
energy_normalize = energy_normalize_class(**args.energy_normalize_conf)
# 5. Build model
model = ESPnetTTSModel(
feats_extract=feats_extract,
pitch_extract=pitch_extract,
energy_extract=energy_extract,
normalize=normalize,
pitch_normalize=pitch_normalize,
energy_normalize=energy_normalize,
tts=tts,
**args.model_conf,
)
assert check_return_type(model)
return model
@classmethod
def build_vocoder_from_file(
cls,
vocoder_config_file: Union[Path, str] = None,
vocoder_file: Union[Path, str] = None,
model: Optional[ESPnetTTSModel] = None,
device: str = "cpu",
):
# Build vocoder
if vocoder_file is None:
# If vocoder file is not provided, use griffin-lim as a vocoder
vocoder_conf = {}
if vocoder_config_file is not None:
vocoder_config_file = Path(vocoder_config_file)
with vocoder_config_file.open("r", encoding="utf-8") as f:
vocoder_conf = yaml.safe_load(f)
if model.feats_extract is not None:
vocoder_conf.update(model.feats_extract.get_parameters())
if (
"n_fft" in vocoder_conf
and "n_shift" in vocoder_conf
and "fs" in vocoder_conf
):
return Spectrogram2Waveform(**vocoder_conf)
else:
logging.warning("Vocoder is not available. Skipped its building.")
return None
elif str(vocoder_file).endswith(".pkl"):
# If the extension is ".pkl", the model is trained with parallel_wavegan
vocoder = ParallelWaveGANPretrainedVocoder(
vocoder_file, vocoder_config_file
)
return vocoder.to(device)
else:
raise ValueError(f"{vocoder_file} is not supported format.")
| 14,328 | 33.94878 | 87 | py |
espnet | espnet-master/espnet2/tasks/enh.py | import argparse
import copy
import os
from typing import Callable, Collection, Dict, List, Optional, Tuple
import numpy as np
import torch
from typeguard import check_argument_types, check_return_type
from espnet2.diar.layers.abs_mask import AbsMask
from espnet2.diar.layers.multi_mask import MultiMask
from espnet2.diar.separator.tcn_separator_nomask import TCNSeparatorNomask
from espnet2.enh.decoder.abs_decoder import AbsDecoder
from espnet2.enh.decoder.conv_decoder import ConvDecoder
from espnet2.enh.decoder.null_decoder import NullDecoder
from espnet2.enh.decoder.stft_decoder import STFTDecoder
from espnet2.enh.encoder.abs_encoder import AbsEncoder
from espnet2.enh.encoder.conv_encoder import ConvEncoder
from espnet2.enh.encoder.null_encoder import NullEncoder
from espnet2.enh.encoder.stft_encoder import STFTEncoder
from espnet2.enh.espnet_model import ESPnetEnhancementModel
from espnet2.enh.loss.criterions.abs_loss import AbsEnhLoss
from espnet2.enh.loss.criterions.tf_domain import (
FrequencyDomainAbsCoherence,
FrequencyDomainDPCL,
FrequencyDomainL1,
FrequencyDomainMSE,
)
from espnet2.enh.loss.criterions.time_domain import (
CISDRLoss,
MultiResL1SpecLoss,
SDRLoss,
SISNRLoss,
SNRLoss,
TimeDomainL1,
TimeDomainMSE,
)
from espnet2.enh.loss.wrappers.abs_wrapper import AbsLossWrapper
from espnet2.enh.loss.wrappers.dpcl_solver import DPCLSolver
from espnet2.enh.loss.wrappers.fixed_order import FixedOrderSolver
from espnet2.enh.loss.wrappers.mixit_solver import MixITSolver
from espnet2.enh.loss.wrappers.multilayer_pit_solver import MultiLayerPITSolver
from espnet2.enh.loss.wrappers.pit_solver import PITSolver
from espnet2.enh.separator.abs_separator import AbsSeparator
from espnet2.enh.separator.asteroid_models import AsteroidModel_Converter
from espnet2.enh.separator.conformer_separator import ConformerSeparator
from espnet2.enh.separator.dan_separator import DANSeparator
from espnet2.enh.separator.dc_crn_separator import DC_CRNSeparator
from espnet2.enh.separator.dccrn_separator import DCCRNSeparator
from espnet2.enh.separator.dpcl_e2e_separator import DPCLE2ESeparator
from espnet2.enh.separator.dpcl_separator import DPCLSeparator
from espnet2.enh.separator.dprnn_separator import DPRNNSeparator
from espnet2.enh.separator.dptnet_separator import DPTNetSeparator
from espnet2.enh.separator.fasnet_separator import FaSNetSeparator
from espnet2.enh.separator.ineube_separator import iNeuBe
from espnet2.enh.separator.neural_beamformer import NeuralBeamformer
from espnet2.enh.separator.rnn_separator import RNNSeparator
from espnet2.enh.separator.skim_separator import SkiMSeparator
from espnet2.enh.separator.svoice_separator import SVoiceSeparator
from espnet2.enh.separator.tcn_separator import TCNSeparator
from espnet2.enh.separator.tfgridnet_separator import TFGridNet
from espnet2.enh.separator.transformer_separator import TransformerSeparator
from espnet2.iterators.abs_iter_factory import AbsIterFactory
from espnet2.tasks.abs_task import AbsTask
from espnet2.torch_utils.initialize import initialize
from espnet2.train.class_choices import ClassChoices
from espnet2.train.collate_fn import CommonCollateFn
from espnet2.train.distributed_utils import DistributedOption
from espnet2.train.preprocessor import (
AbsPreprocessor,
DynamicMixingPreprocessor,
EnhPreprocessor,
)
from espnet2.train.trainer import Trainer
from espnet2.utils.get_default_kwargs import get_default_kwargs
from espnet2.utils.nested_dict_action import NestedDictAction
from espnet2.utils.types import str2bool, str_or_none
encoder_choices = ClassChoices(
name="encoder",
classes=dict(stft=STFTEncoder, conv=ConvEncoder, same=NullEncoder),
type_check=AbsEncoder,
default="stft",
)
separator_choices = ClassChoices(
name="separator",
classes=dict(
asteroid=AsteroidModel_Converter,
conformer=ConformerSeparator,
dan=DANSeparator,
dc_crn=DC_CRNSeparator,
dccrn=DCCRNSeparator,
dpcl=DPCLSeparator,
dpcl_e2e=DPCLE2ESeparator,
dprnn=DPRNNSeparator,
dptnet=DPTNetSeparator,
fasnet=FaSNetSeparator,
rnn=RNNSeparator,
skim=SkiMSeparator,
svoice=SVoiceSeparator,
tcn=TCNSeparator,
transformer=TransformerSeparator,
wpe_beamformer=NeuralBeamformer,
tcn_nomask=TCNSeparatorNomask,
ineube=iNeuBe,
tfgridnet=TFGridNet,
),
type_check=AbsSeparator,
default="rnn",
)
mask_module_choices = ClassChoices(
name="mask_module",
classes=dict(multi_mask=MultiMask),
type_check=AbsMask,
default="multi_mask",
)
decoder_choices = ClassChoices(
name="decoder",
classes=dict(stft=STFTDecoder, conv=ConvDecoder, same=NullDecoder),
type_check=AbsDecoder,
default="stft",
)
loss_wrapper_choices = ClassChoices(
name="loss_wrappers",
classes=dict(
pit=PITSolver,
fixed_order=FixedOrderSolver,
multilayer_pit=MultiLayerPITSolver,
dpcl=DPCLSolver,
mixit=MixITSolver,
),
type_check=AbsLossWrapper,
default=None,
)
criterion_choices = ClassChoices(
name="criterions",
classes=dict(
ci_sdr=CISDRLoss,
coh=FrequencyDomainAbsCoherence,
sdr=SDRLoss,
si_snr=SISNRLoss,
snr=SNRLoss,
l1=FrequencyDomainL1,
dpcl=FrequencyDomainDPCL,
l1_fd=FrequencyDomainL1,
l1_td=TimeDomainL1,
mse=FrequencyDomainMSE,
mse_fd=FrequencyDomainMSE,
mse_td=TimeDomainMSE,
mr_l1_tfd=MultiResL1SpecLoss,
),
type_check=AbsEnhLoss,
default=None,
)
preprocessor_choices = ClassChoices(
name="preprocessor",
classes=dict(
dynamic_mixing=DynamicMixingPreprocessor,
enh=EnhPreprocessor,
),
type_check=AbsPreprocessor,
default=None,
)
MAX_REFERENCE_NUM = 100
class EnhancementTask(AbsTask):
# If you need more than one optimizers, change this value
num_optimizers: int = 1
class_choices_list = [
# --encoder and --encoder_conf
encoder_choices,
# --separator and --separator_conf
separator_choices,
# --decoder and --decoder_conf
decoder_choices,
# --mask_module and --mask_module_conf
mask_module_choices,
# --preprocessor and --preprocessor_conf
preprocessor_choices,
]
# If you need to modify train() or eval() procedures, change Trainer class here
trainer = Trainer
@classmethod
def add_task_arguments(cls, parser: argparse.ArgumentParser):
group = parser.add_argument_group(description="Task related")
# NOTE(kamo): add_arguments(..., required=True) can't be used
# to provide --print_config mode. Instead of it, do as
# required = parser.get_default("required")
group.add_argument(
"--init",
type=lambda x: str_or_none(x.lower()),
default=None,
help="The initialization method",
choices=[
"chainer",
"xavier_uniform",
"xavier_normal",
"kaiming_uniform",
"kaiming_normal",
None,
],
)
group.add_argument(
"--model_conf",
action=NestedDictAction,
default=get_default_kwargs(ESPnetEnhancementModel),
help="The keyword arguments for model class.",
)
group.add_argument(
"--criterions",
action=NestedDictAction,
default=[
{
"name": "si_snr",
"conf": {},
"wrapper": "fixed_order",
"wrapper_conf": {},
},
],
help="The criterions binded with the loss wrappers.",
)
group = parser.add_argument_group(description="Preprocess related")
group.add_argument(
"--speech_volume_normalize",
type=str_or_none,
default=None,
help="Scale the maximum amplitude to the given value or range. "
"e.g. --speech_volume_normalize 1.0 scales it to 1.0.\n"
"--speech_volume_normalize 0.5_1.0 scales it to a random number in "
"the range [0.5, 1.0)",
)
group.add_argument(
"--rir_scp",
type=str_or_none,
default=None,
help="The file path of rir scp file.",
)
group.add_argument(
"--rir_apply_prob",
type=float,
default=1.0,
help="THe probability for applying RIR convolution.",
)
group.add_argument(
"--noise_scp",
type=str_or_none,
default=None,
help="The file path of noise scp file.",
)
group.add_argument(
"--noise_apply_prob",
type=float,
default=1.0,
help="The probability applying Noise adding.",
)
group.add_argument(
"--noise_db_range",
type=str,
default="13_15",
help="The range of signal-to-noise ratio (SNR) level in decibel.",
)
group.add_argument(
"--short_noise_thres",
type=float,
default=0.5,
help="If len(noise) / len(speech) is smaller than this threshold during "
"dynamic mixing, a warning will be displayed.",
)
group.add_argument(
"--use_reverberant_ref",
type=str2bool,
default=False,
help="Whether to use reverberant speech references "
"instead of anechoic ones",
)
group.add_argument(
"--num_spk",
type=int,
default=1,
help="Number of speakers in the input signal.",
)
group.add_argument(
"--num_noise_type",
type=int,
default=1,
help="Number of noise types.",
)
group.add_argument(
"--sample_rate",
type=int,
default=8000,
help="Sampling rate of the data (in Hz).",
)
group.add_argument(
"--force_single_channel",
type=str2bool,
default=False,
help="Whether to force all data to be single-channel.",
)
group.add_argument(
"--channel_reordering",
type=str2bool,
default=False,
help="Whether to randomly reorder the channels of the "
"multi-channel signals.",
)
group.add_argument(
"--categories",
nargs="+",
default=[],
type=str,
help="The set of all possible categories in the dataset. Used to add the "
"category information to each sample",
)
group.add_argument(
"--dynamic_mixing",
type=str2bool,
default=False,
help="Apply dynamic mixing",
)
group.add_argument(
"--utt2spk",
type=str_or_none,
default=None,
help="The file path of utt2spk file. Only used in dynamic_mixing mode.",
)
group.add_argument(
"--dynamic_mixing_gain_db",
type=float,
default=0.0,
help="Random gain (in dB) for dynamic mixing sources",
)
for class_choices in cls.class_choices_list:
# Append --<name> and --<name>_conf.
# e.g. --encoder and --encoder_conf
class_choices.add_arguments(group)
@classmethod
def build_collate_fn(
cls, args: argparse.Namespace, train: bool
) -> Callable[
[Collection[Tuple[str, Dict[str, np.ndarray]]]],
Tuple[List[str], Dict[str, torch.Tensor]],
]:
assert check_argument_types()
return CommonCollateFn(float_pad_value=0.0, int_pad_value=0)
@classmethod
def build_preprocess_fn(
cls, args: argparse.Namespace, train: bool
) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
assert check_argument_types()
use_preprocessor = getattr(args, "preprocessor", None) is not None
if use_preprocessor:
# TODO(simpleoier): To make this as simple as model parts, e.g. encoder
if args.preprocessor == "dynamic_mixing":
retval = preprocessor_choices.get_class(args.preprocessor)(
train=train,
source_scp=os.path.join(
os.path.dirname(args.train_data_path_and_name_and_type[0][0]),
args.preprocessor_conf.get("source_scp_name", "spk1.scp"),
),
ref_num=args.preprocessor_conf.get(
"ref_num", args.separator_conf["num_spk"]
),
dynamic_mixing_gain_db=args.preprocessor_conf.get(
"dynamic_mixing_gain_db", 0.0
),
speech_name=args.preprocessor_conf.get("speech_name", "speech_mix"),
speech_ref_name_prefix=args.preprocessor_conf.get(
"speech_ref_name_prefix", "speech_ref"
),
mixture_source_name=args.preprocessor_conf.get(
"mixture_source_name", None
),
utt2spk=getattr(args, "utt2spk", None),
categories=args.preprocessor_conf.get("categories", None),
)
elif args.preprocessor == "enh":
retval = preprocessor_choices.get_class(args.preprocessor)(
train=train,
# NOTE(kamo): Check attribute existence for backward compatibility
rir_scp=getattr(args, "rir_scp", None),
rir_apply_prob=getattr(args, "rir_apply_prob", 1.0),
noise_scp=getattr(args, "noise_scp", None),
noise_apply_prob=getattr(args, "noise_apply_prob", 1.0),
noise_db_range=getattr(args, "noise_db_range", "13_15"),
short_noise_thres=getattr(args, "short_noise_thres", 0.5),
speech_volume_normalize=getattr(
args, "speech_volume_normalize", None
),
use_reverberant_ref=getattr(args, "use_reverberant_ref", None),
num_spk=getattr(args, "num_spk", 1),
num_noise_type=getattr(args, "num_noise_type", 1),
sample_rate=getattr(args, "sample_rate", 8000),
force_single_channel=getattr(args, "force_single_channel", False),
channel_reordering=getattr(args, "channel_reordering", False),
categories=getattr(args, "categories", None),
)
else:
raise ValueError(
f"Preprocessor type {args.preprocessor} is not supported."
)
else:
retval = None
assert check_return_type(retval)
return retval
@classmethod
def required_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
if not inference:
retval = ("speech_ref1",)
else:
# Inference mode
retval = ("speech_mix",)
return retval
@classmethod
def optional_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
retval = ["speech_mix"]
retval += ["dereverb_ref{}".format(n) for n in range(1, MAX_REFERENCE_NUM + 1)]
retval += ["speech_ref{}".format(n) for n in range(2, MAX_REFERENCE_NUM + 1)]
retval += ["noise_ref{}".format(n) for n in range(1, MAX_REFERENCE_NUM + 1)]
retval += ["category"]
retval = tuple(retval)
assert check_return_type(retval)
return retval
@classmethod
def build_model(cls, args: argparse.Namespace) -> ESPnetEnhancementModel:
assert check_argument_types()
encoder = encoder_choices.get_class(args.encoder)(**args.encoder_conf)
separator = separator_choices.get_class(args.separator)(
encoder.output_dim, **args.separator_conf
)
decoder = decoder_choices.get_class(args.decoder)(**args.decoder_conf)
if args.separator.endswith("nomask"):
mask_module = mask_module_choices.get_class(args.mask_module)(
input_dim=encoder.output_dim,
**args.mask_module_conf,
)
else:
mask_module = None
loss_wrappers = []
if getattr(args, "criterions", None) is not None:
# This check is for the compatibility when load models
# that packed by older version
for ctr in args.criterions:
criterion_conf = ctr.get("conf", {})
criterion = criterion_choices.get_class(ctr["name"])(**criterion_conf)
loss_wrapper = loss_wrapper_choices.get_class(ctr["wrapper"])(
criterion=criterion, **ctr["wrapper_conf"]
)
loss_wrappers.append(loss_wrapper)
# 1. Build model
model = ESPnetEnhancementModel(
encoder=encoder,
separator=separator,
decoder=decoder,
loss_wrappers=loss_wrappers,
mask_module=mask_module,
**args.model_conf,
)
# FIXME(kamo): Should be done in model?
# 2. Initialize
if args.init is not None:
initialize(model, args.init)
assert check_return_type(model)
return model
@classmethod
def build_iter_factory(
cls,
args: argparse.Namespace,
distributed_option: DistributedOption,
mode: str,
kwargs: dict = None,
) -> AbsIterFactory:
dynamic_mixing = getattr(args, "dynamic_mixing", False)
if dynamic_mixing and mode == "train":
args = copy.deepcopy(args)
args.fold_length = args.fold_length[0:1]
return super().build_iter_factory(args, distributed_option, mode, kwargs)
| 18,429 | 34.717054 | 88 | py |
espnet | espnet-master/espnet2/tasks/gan_tts.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""GAN-based text-to-speech task."""
import argparse
import logging
from typing import Callable, Collection, Dict, List, Optional, Tuple
import numpy as np
import torch
from typeguard import check_argument_types, check_return_type
from espnet2.gan_tts.abs_gan_tts import AbsGANTTS
from espnet2.gan_tts.espnet_model import ESPnetGANTTSModel
from espnet2.gan_tts.jets import JETS
from espnet2.gan_tts.joint import JointText2Wav
from espnet2.gan_tts.vits import VITS
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.layers.global_mvn import GlobalMVN
from espnet2.layers.utterance_mvn import UtteranceMVN
from espnet2.tasks.abs_task import AbsTask, optim_classes
from espnet2.text.phoneme_tokenizer import g2p_choices
from espnet2.train.class_choices import ClassChoices
from espnet2.train.collate_fn import CommonCollateFn
from espnet2.train.gan_trainer import GANTrainer
from espnet2.train.preprocessor import CommonPreprocessor
from espnet2.tts.feats_extract.abs_feats_extract import AbsFeatsExtract
from espnet2.tts.feats_extract.dio import Dio
from espnet2.tts.feats_extract.energy import Energy
from espnet2.tts.feats_extract.linear_spectrogram import LinearSpectrogram
from espnet2.tts.feats_extract.log_mel_fbank import LogMelFbank
from espnet2.tts.feats_extract.log_spectrogram import LogSpectrogram
from espnet2.utils.get_default_kwargs import get_default_kwargs
from espnet2.utils.nested_dict_action import NestedDictAction
from espnet2.utils.types import int_or_none, str2bool, str_or_none
feats_extractor_choices = ClassChoices(
"feats_extract",
classes=dict(
fbank=LogMelFbank,
log_spectrogram=LogSpectrogram,
linear_spectrogram=LinearSpectrogram,
),
type_check=AbsFeatsExtract,
default="linear_spectrogram",
)
normalize_choices = ClassChoices(
"normalize",
classes=dict(
global_mvn=GlobalMVN,
utterance_mvn=UtteranceMVN,
),
type_check=AbsNormalize,
default=None,
optional=True,
)
tts_choices = ClassChoices(
"tts",
classes=dict(
vits=VITS,
joint_text2wav=JointText2Wav,
jets=JETS,
),
type_check=AbsGANTTS,
default="vits",
)
pitch_extractor_choices = ClassChoices(
"pitch_extract",
classes=dict(dio=Dio),
type_check=AbsFeatsExtract,
default=None,
optional=True,
)
energy_extractor_choices = ClassChoices(
"energy_extract",
classes=dict(energy=Energy),
type_check=AbsFeatsExtract,
default=None,
optional=True,
)
pitch_normalize_choices = ClassChoices(
"pitch_normalize",
classes=dict(
global_mvn=GlobalMVN,
utterance_mvn=UtteranceMVN,
),
type_check=AbsNormalize,
default=None,
optional=True,
)
energy_normalize_choices = ClassChoices(
"energy_normalize",
classes=dict(
global_mvn=GlobalMVN,
utterance_mvn=UtteranceMVN,
),
type_check=AbsNormalize,
default=None,
optional=True,
)
class GANTTSTask(AbsTask):
"""GAN-based text-to-speech task."""
# GAN requires two optimizers
num_optimizers: int = 2
# Add variable objects configurations
class_choices_list = [
# --feats_extractor and --feats_extractor_conf
feats_extractor_choices,
# --normalize and --normalize_conf
normalize_choices,
# --tts and --tts_conf
tts_choices,
# --pitch_extract and --pitch_extract_conf
pitch_extractor_choices,
# --pitch_normalize and --pitch_normalize_conf
pitch_normalize_choices,
# --energy_extract and --energy_extract_conf
energy_extractor_choices,
# --energy_normalize and --energy_normalize_conf
energy_normalize_choices,
]
# Use GANTrainer instead of Trainer
trainer = GANTrainer
@classmethod
def add_task_arguments(cls, parser: argparse.ArgumentParser):
# NOTE(kamo): Use '_' instead of '-' to avoid confusion
assert check_argument_types()
group = parser.add_argument_group(description="Task related")
# NOTE(kamo): add_arguments(..., required=True) can't be used
# to provide --print_config mode. Instead of it, do as
required = parser.get_default("required")
required += ["token_list"]
group.add_argument(
"--token_list",
type=str_or_none,
default=None,
help="A text mapping int-id to token",
)
group.add_argument(
"--odim",
type=int_or_none,
default=None,
help="The number of dimension of output feature",
)
group.add_argument(
"--model_conf",
action=NestedDictAction,
default=get_default_kwargs(ESPnetGANTTSModel),
help="The keyword arguments for model class.",
)
group = parser.add_argument_group(description="Preprocess related")
group.add_argument(
"--use_preprocessor",
type=str2bool,
default=True,
help="Apply preprocessing to data or not",
)
group.add_argument(
"--token_type",
type=str,
default="phn",
choices=["bpe", "char", "word", "phn"],
help="The text will be tokenized in the specified level token",
)
group.add_argument(
"--bpemodel",
type=str_or_none,
default=None,
help="The model file of sentencepiece",
)
parser.add_argument(
"--non_linguistic_symbols",
type=str_or_none,
help="non_linguistic_symbols file path",
)
parser.add_argument(
"--cleaner",
type=str_or_none,
choices=[None, "tacotron", "jaconv", "vietnamese", "korean_cleaner"],
default=None,
help="Apply text cleaning",
)
parser.add_argument(
"--g2p",
type=str_or_none,
choices=g2p_choices,
default=None,
help="Specify g2p method if --token_type=phn",
)
for class_choices in cls.class_choices_list:
# Append --<name> and --<name>_conf.
# e.g. --encoder and --encoder_conf
class_choices.add_arguments(group)
@classmethod
def build_collate_fn(
cls, args: argparse.Namespace, train: bool
) -> Callable[
[Collection[Tuple[str, Dict[str, np.ndarray]]]],
Tuple[List[str], Dict[str, torch.Tensor]],
]:
assert check_argument_types()
return CommonCollateFn(
float_pad_value=0.0,
int_pad_value=0,
not_sequence=["spembs", "sids", "lids"],
)
@classmethod
def build_preprocess_fn(
cls, args: argparse.Namespace, train: bool
) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
assert check_argument_types()
if args.use_preprocessor:
retval = CommonPreprocessor(
train=train,
token_type=args.token_type,
token_list=args.token_list,
bpemodel=args.bpemodel,
non_linguistic_symbols=args.non_linguistic_symbols,
text_cleaner=args.cleaner,
g2p_type=args.g2p,
)
else:
retval = None
assert check_return_type(retval)
return retval
@classmethod
def required_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
if not inference:
retval = ("text", "speech")
else:
# Inference mode
retval = ("text",)
return retval
@classmethod
def optional_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
if not inference:
retval = (
"spembs",
"durations",
"pitch",
"energy",
"sids",
"lids",
)
else:
# Inference mode
retval = (
"spembs",
"speech",
"durations",
"pitch",
"energy",
"sids",
"lids",
)
return retval
@classmethod
def build_model(cls, args: argparse.Namespace) -> ESPnetGANTTSModel:
assert check_argument_types()
if isinstance(args.token_list, str):
with open(args.token_list, encoding="utf-8") as f:
token_list = [line[0] + line[1:].rstrip() for line in f]
# "args" is saved as it is in a yaml file by BaseTask.main().
# Overwriting token_list to keep it as "portable".
args.token_list = token_list.copy()
elif isinstance(args.token_list, (tuple, list)):
token_list = args.token_list.copy()
else:
raise RuntimeError("token_list must be str or dict")
vocab_size = len(token_list)
logging.info(f"Vocabulary size: {vocab_size }")
# 1. feats_extract
if args.odim is None:
# Extract features in the model
feats_extract_class = feats_extractor_choices.get_class(args.feats_extract)
feats_extract = feats_extract_class(**args.feats_extract_conf)
odim = feats_extract.output_size()
else:
# Give features from data-loader
args.feats_extract = None
args.feats_extract_conf = None
feats_extract = None
odim = args.odim
# 2. Normalization layer
if args.normalize is not None:
normalize_class = normalize_choices.get_class(args.normalize)
normalize = normalize_class(**args.normalize_conf)
else:
normalize = None
# 3. TTS
tts_class = tts_choices.get_class(args.tts)
tts = tts_class(idim=vocab_size, odim=odim, **args.tts_conf)
# 4. Extra components
pitch_extract = None
energy_extract = None
pitch_normalize = None
energy_normalize = None
if getattr(args, "pitch_extract", None) is not None:
pitch_extract_class = pitch_extractor_choices.get_class(
args.pitch_extract,
)
pitch_extract = pitch_extract_class(
**args.pitch_extract_conf,
)
if getattr(args, "energy_extract", None) is not None:
energy_extract_class = energy_extractor_choices.get_class(
args.energy_extract,
)
energy_extract = energy_extract_class(
**args.energy_extract_conf,
)
if getattr(args, "pitch_normalize", None) is not None:
pitch_normalize_class = pitch_normalize_choices.get_class(
args.pitch_normalize,
)
pitch_normalize = pitch_normalize_class(
**args.pitch_normalize_conf,
)
if getattr(args, "energy_normalize", None) is not None:
energy_normalize_class = energy_normalize_choices.get_class(
args.energy_normalize,
)
energy_normalize = energy_normalize_class(
**args.energy_normalize_conf,
)
# 5. Build model
model = ESPnetGANTTSModel(
feats_extract=feats_extract,
normalize=normalize,
pitch_extract=pitch_extract,
pitch_normalize=pitch_normalize,
energy_extract=energy_extract,
energy_normalize=energy_normalize,
tts=tts,
**args.model_conf,
)
assert check_return_type(model)
return model
@classmethod
def build_optimizers(
cls,
args: argparse.Namespace,
model: ESPnetGANTTSModel,
) -> List[torch.optim.Optimizer]:
# check
assert hasattr(model.tts, "generator")
assert hasattr(model.tts, "discriminator")
# define generator optimizer
optim_g_class = optim_classes.get(args.optim)
if optim_g_class is None:
raise ValueError(f"must be one of {list(optim_classes)}: {args.optim}")
if args.sharded_ddp:
try:
import fairscale
except ImportError:
raise RuntimeError("Requiring fairscale. Do 'pip install fairscale'")
optim_g = fairscale.optim.oss.OSS(
params=model.tts.generator.parameters(),
optim=optim_g_class,
**args.optim_conf,
)
else:
optim_g = optim_g_class(
model.tts.generator.parameters(),
**args.optim_conf,
)
optimizers = [optim_g]
# define discriminator optimizer
optim_d_class = optim_classes.get(args.optim2)
if optim_d_class is None:
raise ValueError(f"must be one of {list(optim_classes)}: {args.optim2}")
if args.sharded_ddp:
try:
import fairscale
except ImportError:
raise RuntimeError("Requiring fairscale. Do 'pip install fairscale'")
optim_d = fairscale.optim.oss.OSS(
params=model.tts.discriminator.parameters(),
optim=optim_d_class,
**args.optim2_conf,
)
else:
optim_d = optim_d_class(
model.tts.discriminator.parameters(),
**args.optim2_conf,
)
optimizers += [optim_d]
return optimizers
| 13,798 | 31.854762 | 87 | py |
espnet | espnet-master/espnet2/tasks/abs_task.py | """Abstract task module."""
import argparse
import functools
import logging
import os
import sys
from abc import ABC, abstractmethod
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import humanfriendly
import numpy as np
import torch
import torch.multiprocessing
import torch.nn
import torch.optim
import yaml
from packaging.version import parse as V
from torch.utils.data import DataLoader
from typeguard import check_argument_types, check_return_type
from espnet import __version__
from espnet2.iterators.abs_iter_factory import AbsIterFactory
from espnet2.iterators.chunk_iter_factory import ChunkIterFactory
from espnet2.iterators.multiple_iter_factory import MultipleIterFactory
from espnet2.iterators.sequence_iter_factory import SequenceIterFactory
from espnet2.main_funcs.collect_stats import collect_stats
from espnet2.optimizers.optim_groups import configure_optimizer
from espnet2.optimizers.sgd import SGD
from espnet2.samplers.build_batch_sampler import BATCH_TYPES, build_batch_sampler
from espnet2.samplers.unsorted_batch_sampler import UnsortedBatchSampler
from espnet2.schedulers.noam_lr import NoamLR
from espnet2.schedulers.warmup_lr import WarmupLR
from espnet2.schedulers.warmup_reducelronplateau import WarmupReduceLROnPlateau
from espnet2.schedulers.warmup_step_lr import WarmupStepLR
from espnet2.torch_utils.load_pretrained_model import load_pretrained_model
from espnet2.torch_utils.model_summary import model_summary
from espnet2.torch_utils.pytorch_version import pytorch_cudnn_version
from espnet2.torch_utils.set_all_random_seed import set_all_random_seed
from espnet2.train.abs_espnet_model import AbsESPnetModel
from espnet2.train.class_choices import ClassChoices
from espnet2.train.dataset import DATA_TYPES, AbsDataset, ESPnetDataset
from espnet2.train.distributed_utils import (
DistributedOption,
free_port,
get_master_port,
get_node_rank,
get_num_nodes,
resolve_distributed_mode,
)
from espnet2.train.iterable_dataset import IterableESPnetDataset
from espnet2.train.trainer import Trainer
from espnet2.utils import config_argparse
from espnet2.utils.build_dataclass import build_dataclass
from espnet2.utils.get_default_kwargs import get_default_kwargs
from espnet2.utils.nested_dict_action import NestedDictAction
from espnet2.utils.types import (
humanfriendly_parse_size_or_none,
int_or_none,
str2bool,
str2triple_str,
str_or_int,
str_or_none,
)
from espnet2.utils.yaml_no_alias_safe_dump import yaml_no_alias_safe_dump
from espnet.utils.cli_utils import get_commandline_args
try:
import wandb
except Exception:
wandb = None
if V(torch.__version__) >= V("1.5.0"):
from torch.multiprocessing.spawn import ProcessContext
else:
from torch.multiprocessing.spawn import SpawnContext as ProcessContext
optim_classes = dict(
adam=torch.optim.Adam,
adamw=torch.optim.AdamW,
sgd=SGD,
adadelta=torch.optim.Adadelta,
adagrad=torch.optim.Adagrad,
adamax=torch.optim.Adamax,
asgd=torch.optim.ASGD,
lbfgs=torch.optim.LBFGS,
rmsprop=torch.optim.RMSprop,
rprop=torch.optim.Rprop,
)
if V(torch.__version__) >= V("1.10.0"):
# From 1.10.0, RAdam is officially supported
optim_classes.update(
radam=torch.optim.RAdam,
)
try:
import torch_optimizer
optim_classes.update(
accagd=torch_optimizer.AccSGD,
adabound=torch_optimizer.AdaBound,
adamod=torch_optimizer.AdaMod,
diffgrad=torch_optimizer.DiffGrad,
lamb=torch_optimizer.Lamb,
novograd=torch_optimizer.NovoGrad,
pid=torch_optimizer.PID,
# torch_optimizer<=0.0.1a10 doesn't support
# qhadam=torch_optimizer.QHAdam,
qhm=torch_optimizer.QHM,
sgdw=torch_optimizer.SGDW,
yogi=torch_optimizer.Yogi,
)
if V(torch_optimizer.__version__) < V("0.2.0"):
# From 0.2.0, RAdam is dropped
optim_classes.update(
radam=torch_optimizer.RAdam,
)
del torch_optimizer
except ImportError:
pass
try:
import apex
optim_classes.update(
fusedadam=apex.optimizers.FusedAdam,
fusedlamb=apex.optimizers.FusedLAMB,
fusednovograd=apex.optimizers.FusedNovoGrad,
fusedsgd=apex.optimizers.FusedSGD,
)
del apex
except ImportError:
pass
try:
import fairscale
except ImportError:
fairscale = None
scheduler_classes = dict(
ReduceLROnPlateau=torch.optim.lr_scheduler.ReduceLROnPlateau,
lambdalr=torch.optim.lr_scheduler.LambdaLR,
steplr=torch.optim.lr_scheduler.StepLR,
multisteplr=torch.optim.lr_scheduler.MultiStepLR,
exponentiallr=torch.optim.lr_scheduler.ExponentialLR,
CosineAnnealingLR=torch.optim.lr_scheduler.CosineAnnealingLR,
noamlr=NoamLR,
warmuplr=WarmupLR,
warmupsteplr=WarmupStepLR,
warmupReducelronplateau=WarmupReduceLROnPlateau,
cycliclr=torch.optim.lr_scheduler.CyclicLR,
onecyclelr=torch.optim.lr_scheduler.OneCycleLR,
CosineAnnealingWarmRestarts=torch.optim.lr_scheduler.CosineAnnealingWarmRestarts,
)
# To lower keys
optim_classes = {k.lower(): v for k, v in optim_classes.items()}
scheduler_classes = {k.lower(): v for k, v in scheduler_classes.items()}
@dataclass
class IteratorOptions:
preprocess_fn: callable
collate_fn: callable
data_path_and_name_and_type: list
shape_files: list
batch_size: int
batch_bins: int
batch_type: str
max_cache_size: float
max_cache_fd: int
distributed: bool
num_batches: Optional[int]
num_iters_per_epoch: Optional[int]
train: bool
class AbsTask(ABC):
# Use @staticmethod, or @classmethod,
# instead of instance method to avoid God classes
# If you need more than one optimizers, change this value in inheritance
num_optimizers: int = 1
trainer = Trainer
class_choices_list: List[ClassChoices] = []
def __init__(self):
raise RuntimeError("This class can't be instantiated.")
@classmethod
@abstractmethod
def add_task_arguments(cls, parser: argparse.ArgumentParser):
pass
@classmethod
@abstractmethod
def build_collate_fn(
cls, args: argparse.Namespace, train: bool
) -> Callable[[Sequence[Dict[str, np.ndarray]]], Dict[str, torch.Tensor]]:
"""Return "collate_fn", which is a callable object and given to DataLoader.
>>> from torch.utils.data import DataLoader
>>> loader = DataLoader(collate_fn=cls.build_collate_fn(args, train=True), ...)
In many cases, you can use our common collate_fn.
"""
raise NotImplementedError
@classmethod
@abstractmethod
def build_preprocess_fn(
cls, args: argparse.Namespace, train: bool
) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
raise NotImplementedError
@classmethod
@abstractmethod
def required_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
"""Define the required names by Task
This function is used by
>>> cls.check_task_requirements()
If your model is defined as following,
>>> from espnet2.train.abs_espnet_model import AbsESPnetModel
>>> class Model(AbsESPnetModel):
... def forward(self, input, output, opt=None): pass
then "required_data_names" should be as
>>> required_data_names = ('input', 'output')
"""
raise NotImplementedError
@classmethod
@abstractmethod
def optional_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
"""Define the optional names by Task
This function is used by
>>> cls.check_task_requirements()
If your model is defined as follows,
>>> from espnet2.train.abs_espnet_model import AbsESPnetModel
>>> class Model(AbsESPnetModel):
... def forward(self, input, output, opt=None): pass
then "optional_data_names" should be as
>>> optional_data_names = ('opt',)
"""
raise NotImplementedError
@classmethod
@abstractmethod
def build_model(cls, args: argparse.Namespace) -> AbsESPnetModel:
raise NotImplementedError
@classmethod
def get_parser(cls) -> config_argparse.ArgumentParser:
assert check_argument_types()
class ArgumentDefaultsRawTextHelpFormatter(
argparse.RawTextHelpFormatter,
argparse.ArgumentDefaultsHelpFormatter,
):
pass
parser = config_argparse.ArgumentParser(
description="base parser",
formatter_class=ArgumentDefaultsRawTextHelpFormatter,
)
# NOTE(kamo): Use '_' instead of '-' to avoid confusion.
# I think '-' looks really confusing if it's written in yaml.
# NOTE(kamo): add_arguments(..., required=True) can't be used
# to provide --print_config mode. Instead of it, do as
parser.set_defaults(required=["output_dir"])
group = parser.add_argument_group("Common configuration")
group.add_argument(
"--print_config",
action="store_true",
help="Print the config file and exit",
)
group.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
group.add_argument(
"--dry_run",
type=str2bool,
default=False,
help="Perform process without training",
)
group.add_argument(
"--iterator_type",
type=str,
choices=["sequence", "chunk", "task", "none"],
default="sequence",
help="Specify iterator type",
)
group.add_argument("--output_dir", type=str_or_none, default=None)
group.add_argument(
"--ngpu",
type=int,
default=0,
help="The number of gpus. 0 indicates CPU mode",
)
group.add_argument("--seed", type=int, default=0, help="Random seed")
group.add_argument(
"--num_workers",
type=int,
default=1,
help="The number of workers used for DataLoader",
)
group.add_argument(
"--num_att_plot",
type=int,
default=3,
help="The number images to plot the outputs from attention. "
"This option makes sense only when attention-based model. "
"We can also disable the attention plot by setting it 0",
)
group = parser.add_argument_group("distributed training related")
group.add_argument(
"--dist_backend",
default="nccl",
type=str,
help="distributed backend",
)
group.add_argument(
"--dist_init_method",
type=str,
default="env://",
help='if init_method="env://", env values of "MASTER_PORT", "MASTER_ADDR", '
'"WORLD_SIZE", and "RANK" are referred.',
)
group.add_argument(
"--dist_world_size",
default=None,
type=int_or_none,
help="number of nodes for distributed training",
)
group.add_argument(
"--dist_rank",
type=int_or_none,
default=None,
help="node rank for distributed training",
)
group.add_argument(
# Not starting with "dist_" for compatibility to launch.py
"--local_rank",
type=int_or_none,
default=None,
help="local rank for distributed training. This option is used if "
"--multiprocessing_distributed=false",
)
group.add_argument(
"--dist_master_addr",
default=None,
type=str_or_none,
help="The master address for distributed training. "
"This value is used when dist_init_method == 'env://'",
)
group.add_argument(
"--dist_master_port",
default=None,
type=int_or_none,
help="The master port for distributed training"
"This value is used when dist_init_method == 'env://'",
)
group.add_argument(
"--dist_launcher",
default=None,
type=str_or_none,
choices=["slurm", "mpi", None],
help="The launcher type for distributed training",
)
group.add_argument(
"--multiprocessing_distributed",
default=False,
type=str2bool,
help="Use multi-processing distributed training to launch "
"N processes per node, which has N GPUs. This is the "
"fastest way to use PyTorch for either single node or "
"multi node data parallel training",
)
group.add_argument(
"--unused_parameters",
type=str2bool,
default=False,
help="Whether to use the find_unused_parameters in "
"torch.nn.parallel.DistributedDataParallel ",
)
group.add_argument(
"--sharded_ddp",
default=False,
type=str2bool,
help="Enable sharded training provided by fairscale",
)
group = parser.add_argument_group("cudnn mode related")
group.add_argument(
"--cudnn_enabled",
type=str2bool,
default=torch.backends.cudnn.enabled,
help="Enable CUDNN",
)
group.add_argument(
"--cudnn_benchmark",
type=str2bool,
default=torch.backends.cudnn.benchmark,
help="Enable cudnn-benchmark mode",
)
group.add_argument(
"--cudnn_deterministic",
type=str2bool,
default=True,
help="Enable cudnn-deterministic mode",
)
group = parser.add_argument_group("collect stats mode related")
group.add_argument(
"--collect_stats",
type=str2bool,
default=False,
help='Perform on "collect stats" mode',
)
group.add_argument(
"--write_collected_feats",
type=str2bool,
default=False,
help='Write the output features from the model when "collect stats" mode',
)
group = parser.add_argument_group("Trainer related")
group.add_argument(
"--max_epoch",
type=int,
default=40,
help="The maximum number epoch to train",
)
group.add_argument(
"--patience",
type=int_or_none,
default=None,
help="Number of epochs to wait without improvement "
"before stopping the training",
)
group.add_argument(
"--val_scheduler_criterion",
type=str,
nargs=2,
default=("valid", "loss"),
help="The criterion used for the value given to the lr scheduler. "
'Give a pair referring the phase, "train" or "valid",'
'and the criterion name. The mode specifying "min" or "max" can '
"be changed by --scheduler_conf",
)
group.add_argument(
"--early_stopping_criterion",
type=str,
nargs=3,
default=("valid", "loss", "min"),
help="The criterion used for judging of early stopping. "
'Give a pair referring the phase, "train" or "valid",'
'the criterion name and the mode, "min" or "max", e.g. "acc,max".',
)
group.add_argument(
"--best_model_criterion",
type=str2triple_str,
nargs="+",
default=[
("train", "loss", "min"),
("valid", "loss", "min"),
("train", "acc", "max"),
("valid", "acc", "max"),
],
help="The criterion used for judging of the best model. "
'Give a pair referring the phase, "train" or "valid",'
'the criterion name, and the mode, "min" or "max", e.g. "acc,max".',
)
group.add_argument(
"--keep_nbest_models",
type=int,
nargs="+",
default=[10],
help="Remove previous snapshots excluding the n-best scored epochs",
)
group.add_argument(
"--nbest_averaging_interval",
type=int,
default=0,
help="The epoch interval to apply model averaging and save nbest models",
)
group.add_argument(
"--grad_clip",
type=float,
default=5.0,
help="Gradient norm threshold to clip",
)
group.add_argument(
"--grad_clip_type",
type=float,
default=2.0,
help="The type of the used p-norm for gradient clip. Can be inf",
)
group.add_argument(
"--grad_noise",
type=str2bool,
default=False,
help="The flag to switch to use noise injection to "
"gradients during training",
)
group.add_argument(
"--accum_grad",
type=int,
default=1,
help="The number of gradient accumulation",
)
group.add_argument(
"--no_forward_run",
type=str2bool,
default=False,
help="Just only iterating data loading without "
"model forwarding and training",
)
group.add_argument(
"--resume",
type=str2bool,
default=False,
help="Enable resuming if checkpoint is existing",
)
group.add_argument(
"--train_dtype",
default="float32",
choices=["float16", "float32", "float64"],
help="Data type for training.",
)
group.add_argument(
"--use_amp",
type=str2bool,
default=False,
help="Enable Automatic Mixed Precision. This feature requires pytorch>=1.6",
)
group.add_argument(
"--log_interval",
type=int_or_none,
default=None,
help="Show the logs every the number iterations in each epochs at the "
"training phase. If None is given, it is decided according the number "
"of training samples automatically .",
)
group.add_argument(
"--use_matplotlib",
type=str2bool,
default=True,
help="Enable matplotlib logging",
)
group.add_argument(
"--use_tensorboard",
type=str2bool,
default=True,
help="Enable tensorboard logging",
)
group.add_argument(
"--create_graph_in_tensorboard",
type=str2bool,
default=False,
help="Whether to create graph in tensorboard",
)
group.add_argument(
"--use_wandb",
type=str2bool,
default=False,
help="Enable wandb logging",
)
group.add_argument(
"--wandb_project",
type=str,
default=None,
help="Specify wandb project",
)
group.add_argument(
"--wandb_id",
type=str,
default=None,
help="Specify wandb id",
)
group.add_argument(
"--wandb_entity",
type=str,
default=None,
help="Specify wandb entity",
)
group.add_argument(
"--wandb_name",
type=str,
default=None,
help="Specify wandb run name",
)
group.add_argument(
"--wandb_model_log_interval",
type=int,
default=-1,
help="Set the model log period",
)
group.add_argument(
"--detect_anomaly",
type=str2bool,
default=False,
help="Set torch.autograd.set_detect_anomaly",
)
group = parser.add_argument_group("Pretraining model related")
group.add_argument("--pretrain_path", help="This option is obsoleted")
group.add_argument(
"--init_param",
type=str,
default=[],
nargs="*",
help="Specify the file path used for initialization of parameters. "
"The format is '<file_path>:<src_key>:<dst_key>:<exclude_keys>', "
"where file_path is the model file path, "
"src_key specifies the key of model states to be used in the model file, "
"dst_key specifies the attribute of the model to be initialized, "
"and exclude_keys excludes keys of model states for the initialization."
"e.g.\n"
" # Load all parameters"
" --init_param some/where/model.pth\n"
" # Load only decoder parameters"
" --init_param some/where/model.pth:decoder:decoder\n"
" # Load only decoder parameters excluding decoder.embed"
" --init_param some/where/model.pth:decoder:decoder:decoder.embed\n"
" --init_param some/where/model.pth:decoder:decoder:decoder.embed\n",
)
group.add_argument(
"--ignore_init_mismatch",
type=str2bool,
default=False,
help="Ignore size mismatch when loading pre-trained model",
)
group.add_argument(
"--freeze_param",
type=str,
default=[],
nargs="*",
help="Freeze parameters",
)
group = parser.add_argument_group("BatchSampler related")
group.add_argument(
"--num_iters_per_epoch",
type=int_or_none,
default=None,
help="Restrict the number of iterations for training per epoch",
)
group.add_argument(
"--batch_size",
type=int,
default=20,
help="The mini-batch size used for training. Used if batch_type='unsorted',"
" 'sorted', or 'folded'.",
)
group.add_argument(
"--valid_batch_size",
type=int_or_none,
default=None,
help="If not given, the value of --batch_size is used",
)
group.add_argument(
"--batch_bins",
type=int,
default=1000000,
help="The number of batch bins. Used if batch_type='length' or 'numel'",
)
group.add_argument(
"--valid_batch_bins",
type=int_or_none,
default=None,
help="If not given, the value of --batch_bins is used",
)
group.add_argument("--train_shape_file", type=str, action="append", default=[])
group.add_argument("--valid_shape_file", type=str, action="append", default=[])
group = parser.add_argument_group("Sequence iterator related")
_batch_type_help = ""
for key, value in BATCH_TYPES.items():
_batch_type_help += f'"{key}":\n{value}\n'
group.add_argument(
"--batch_type",
type=str,
default="folded",
choices=list(BATCH_TYPES),
help=_batch_type_help,
)
group.add_argument(
"--valid_batch_type",
type=str_or_none,
default=None,
choices=list(BATCH_TYPES) + [None],
help="If not given, the value of --batch_type is used",
)
group.add_argument("--fold_length", type=int, action="append", default=[])
group.add_argument(
"--sort_in_batch",
type=str,
default="descending",
choices=["descending", "ascending"],
help="Sort the samples in each mini-batches by the sample "
'lengths. To enable this, "shape_file" must have the length information.',
)
group.add_argument(
"--sort_batch",
type=str,
default="descending",
choices=["descending", "ascending"],
help="Sort mini-batches by the sample lengths",
)
group.add_argument(
"--multiple_iterator",
type=str2bool,
default=False,
help="Use multiple iterator mode",
)
group = parser.add_argument_group("Chunk iterator related")
group.add_argument(
"--chunk_length",
type=str_or_int,
default=500,
help="Specify chunk length. e.g. '300', '300,400,500', or '300-400'."
"If multiple numbers separated by command are given, "
"one of them is selected randomly for each samples. "
"If two numbers are given with '-', it indicates the range of the choices. "
"Note that if the sequence length is shorter than the all chunk_lengths, "
"the sample is discarded. ",
)
group.add_argument(
"--chunk_shift_ratio",
type=float,
default=0.5,
help="Specify the shift width of chunks. If it's less than 1, "
"allows the overlapping and if bigger than 1, there are some gaps "
"between each chunk.",
)
group.add_argument(
"--num_cache_chunks",
type=int,
default=1024,
help="Shuffle in the specified number of chunks and generate mini-batches "
"More larger this value, more randomness can be obtained.",
)
group.add_argument(
"--chunk_excluded_key_prefixes",
type=str,
nargs="+",
default=[],
help="List of key prefixes. Keys that satisfy either condition below "
"will be excluded from the length consistency check in ChunkIterFactory:\n"
" - exactly match one of the prefixes in `chunk_excluded_key_prefixes`\n"
" - have one of the prefixes in `chunk_excluded_key_prefixes` and "
"end with numbers",
)
group = parser.add_argument_group("Dataset related")
_data_path_and_name_and_type_help = (
"Give three words splitted by comma. It's used for the training data. "
"e.g. '--train_data_path_and_name_and_type some/path/a.scp,foo,sound'. "
"The first value, some/path/a.scp, indicates the file path, "
"and the second, foo, is the key name used for the mini-batch data, "
"and the last, sound, decides the file type. "
"This option is repeatable, so you can input any number of features "
"for your task. Supported file types are as follows:\n\n"
)
for key, dic in DATA_TYPES.items():
_data_path_and_name_and_type_help += f'"{key}":\n{dic["help"]}\n\n'
group.add_argument(
"--train_data_path_and_name_and_type",
type=str2triple_str,
action="append",
default=[],
help=_data_path_and_name_and_type_help,
)
group.add_argument(
"--valid_data_path_and_name_and_type",
type=str2triple_str,
action="append",
default=[],
)
group.add_argument(
"--allow_variable_data_keys",
type=str2bool,
default=False,
help="Allow the arbitrary keys for mini-batch with ignoring "
"the task requirements",
)
group.add_argument(
"--max_cache_size",
type=humanfriendly.parse_size,
default=0.0,
help="The maximum cache size for data loader. e.g. 10MB, 20GB.",
)
group.add_argument(
"--max_cache_fd",
type=int,
default=32,
help="The maximum number of file descriptors to be kept "
"as opened for ark files. "
"This feature is only valid when data type is 'kaldi_ark'.",
)
group.add_argument(
"--valid_max_cache_size",
type=humanfriendly_parse_size_or_none,
default=None,
help="The maximum cache size for validation data loader. e.g. 10MB, 20GB. "
"If None, the 5 percent size of --max_cache_size",
)
group = parser.add_argument_group("Optimizer related")
group.add_argument(
"--exclude_weight_decay",
type=str2bool,
default=False,
help="Exclude weight decay in optimizer for model bias, normalization, "
"or other special parameters",
)
group.add_argument(
"--exclude_weight_decay_conf",
action=NestedDictAction,
default=dict(),
help="The keyword arguments for configuring weight decay in optimizer. "
"e.g., 'bias_weight_decay': False will set zero weight decay for bias "
"params. See also espnet2.optimizers.optim_groups.configure_optimizer.",
)
for i in range(1, cls.num_optimizers + 1):
suf = "" if i == 1 else str(i)
group.add_argument(
f"--optim{suf}",
type=lambda x: x.lower(),
default="adadelta",
choices=list(optim_classes),
help="The optimizer type",
)
group.add_argument(
f"--optim{suf}_conf",
action=NestedDictAction,
default=dict(),
help="The keyword arguments for optimizer",
)
group.add_argument(
f"--scheduler{suf}",
type=lambda x: str_or_none(x.lower()),
default=None,
choices=list(scheduler_classes) + [None],
help="The lr scheduler type",
)
group.add_argument(
f"--scheduler{suf}_conf",
action=NestedDictAction,
default=dict(),
help="The keyword arguments for lr scheduler",
)
cls.trainer.add_arguments(parser)
cls.add_task_arguments(parser)
assert check_return_type(parser)
return parser
@classmethod
def build_optimizers(
cls,
args: argparse.Namespace,
model: torch.nn.Module,
) -> List[torch.optim.Optimizer]:
if cls.num_optimizers != 1:
raise RuntimeError(
"build_optimizers() must be overridden if num_optimizers != 1"
)
optim_class = optim_classes.get(args.optim)
if optim_class is None:
raise ValueError(f"must be one of {list(optim_classes)}: {args.optim}")
if args.sharded_ddp:
if fairscale is None:
raise RuntimeError("Requiring fairscale. Do 'pip install fairscale'")
optim = fairscale.optim.oss.OSS(
params=model.parameters(), optim=optim_class, **args.optim_conf
)
else:
if args.exclude_weight_decay:
optim = configure_optimizer(
model,
optim_class,
args.optim_conf,
args.exclude_weight_decay_conf,
)
else:
optim = optim_class(model.parameters(), **args.optim_conf)
optimizers = [optim]
return optimizers
@classmethod
def exclude_opts(cls) -> Tuple[str, ...]:
"""The options not to be shown by --print_config"""
return "required", "print_config", "config", "ngpu"
@classmethod
def get_default_config(cls) -> Dict[str, Any]:
"""Return the configuration as dict.
This method is used by print_config()
"""
def get_class_type(name: str, classes: dict):
_cls = classes.get(name)
if _cls is None:
raise ValueError(f"must be one of {list(classes)}: {name}")
return _cls
# This method is used only for --print_config
assert check_argument_types()
parser = cls.get_parser()
args, _ = parser.parse_known_args()
config = vars(args)
# Excludes the options not to be shown
for k in AbsTask.exclude_opts():
config.pop(k)
for i in range(1, cls.num_optimizers + 1):
suf = "" if i == 1 else str(i)
name = config[f"optim{suf}"]
optim_class = get_class_type(name, optim_classes)
conf = get_default_kwargs(optim_class)
# Overwrite the default by the arguments,
conf.update(config[f"optim{suf}_conf"])
# and set it again
config[f"optim{suf}_conf"] = conf
name = config[f"scheduler{suf}"]
if name is not None:
scheduler_class = get_class_type(name, scheduler_classes)
conf = get_default_kwargs(scheduler_class)
# Overwrite the default by the arguments,
conf.update(config[f"scheduler{suf}_conf"])
# and set it again
config[f"scheduler{suf}_conf"] = conf
for class_choices in cls.class_choices_list:
if getattr(args, class_choices.name) is not None:
class_obj = class_choices.get_class(getattr(args, class_choices.name))
conf = get_default_kwargs(class_obj)
name = class_choices.name
# Overwrite the default by the arguments,
conf.update(config[f"{name}_conf"])
# and set it again
config[f"{name}_conf"] = conf
return config
@classmethod
def check_required_command_args(cls, args: argparse.Namespace):
assert check_argument_types()
for k in vars(args):
if "-" in k:
raise RuntimeError(f'Use "_" instead of "-": parser.get_parser("{k}")')
required = ", ".join(
f"--{a}" for a in args.required if getattr(args, a) is None
)
if len(required) != 0:
parser = cls.get_parser()
parser.print_help(file=sys.stderr)
p = Path(sys.argv[0]).name
print(file=sys.stderr)
print(
f"{p}: error: the following arguments are required: " f"{required}",
file=sys.stderr,
)
sys.exit(2)
@classmethod
def check_task_requirements(
cls,
dataset: Union[AbsDataset, IterableESPnetDataset],
allow_variable_data_keys: bool,
train: bool,
inference: bool = False,
) -> None:
"""Check if the dataset satisfy the requirement of current Task"""
assert check_argument_types()
mes = (
f"If you intend to use an additional input, modify "
f'"{cls.__name__}.required_data_names()" or '
f'"{cls.__name__}.optional_data_names()". '
f"Otherwise you need to set --allow_variable_data_keys true "
)
for k in cls.required_data_names(train, inference):
if not dataset.has_name(k):
raise RuntimeError(
f'"{cls.required_data_names(train, inference)}" are required for'
f' {cls.__name__}. but "{dataset.names()}" are input.\n{mes}'
)
if not allow_variable_data_keys:
task_keys = cls.required_data_names(
train, inference
) + cls.optional_data_names(train, inference)
for k in dataset.names():
if k not in task_keys:
raise RuntimeError(
f"The data-name must be one of {task_keys} "
f'for {cls.__name__}: "{k}" is not allowed.\n{mes}'
)
@classmethod
def print_config(cls, file=sys.stdout) -> None:
assert check_argument_types()
# Shows the config: e.g. python train.py asr --print_config
config = cls.get_default_config()
file.write(yaml_no_alias_safe_dump(config, indent=4, sort_keys=False))
@classmethod
def main(cls, args: argparse.Namespace = None, cmd: Sequence[str] = None):
assert check_argument_types()
print(get_commandline_args(), file=sys.stderr)
if args is None:
parser = cls.get_parser()
args = parser.parse_args(cmd)
args.version = __version__
if args.pretrain_path is not None:
raise RuntimeError("--pretrain_path is deprecated. Use --init_param")
if args.print_config:
cls.print_config()
sys.exit(0)
cls.check_required_command_args(args)
# "distributed" is decided using the other command args
resolve_distributed_mode(args)
if not args.distributed or not args.multiprocessing_distributed:
cls.main_worker(args)
else:
assert args.ngpu > 1, args.ngpu
# Multi-processing distributed mode: e.g. 2node-4process-4GPU
# | Host1 | Host2 |
# | Process1 | Process2 | <= Spawn processes
# |Child1|Child2|Child1|Child2|
# |GPU1 |GPU2 |GPU1 |GPU2 |
# See also the following usage of --multiprocessing-distributed:
# https://github.com/pytorch/examples/blob/master/imagenet/main.py
num_nodes = get_num_nodes(args.dist_world_size, args.dist_launcher)
if num_nodes == 1:
args.dist_master_addr = "localhost"
args.dist_rank = 0
# Single node distributed training with multi-GPUs
if (
args.dist_init_method == "env://"
and get_master_port(args.dist_master_port) is None
):
# Get the unused port
args.dist_master_port = free_port()
# Assume that nodes use same number of GPUs each other
args.dist_world_size = args.ngpu * num_nodes
node_rank = get_node_rank(args.dist_rank, args.dist_launcher)
# The following block is copied from:
# https://github.com/pytorch/pytorch/blob/master/torch/multiprocessing/spawn.py
error_queues = []
processes = []
mp = torch.multiprocessing.get_context("spawn")
for i in range(args.ngpu):
# Copy args
local_args = argparse.Namespace(**vars(args))
local_args.local_rank = i
local_args.dist_rank = args.ngpu * node_rank + i
local_args.ngpu = 1
process = mp.Process(
target=cls.main_worker,
args=(local_args,),
daemon=False,
)
process.start()
processes.append(process)
error_queues.append(mp.SimpleQueue())
# Loop on join until it returns True or raises an exception.
while not ProcessContext(processes, error_queues).join():
pass
@classmethod
def main_worker(cls, args: argparse.Namespace):
assert check_argument_types()
# 0. Init distributed process
distributed_option = build_dataclass(DistributedOption, args)
# Setting distributed_option.dist_rank, etc.
distributed_option.init_options()
# NOTE(kamo): Don't use logging before invoking logging.basicConfig()
if not distributed_option.distributed or distributed_option.dist_rank == 0:
if not distributed_option.distributed:
_rank = ""
else:
_rank = (
f":{distributed_option.dist_rank}/"
f"{distributed_option.dist_world_size}"
)
# NOTE(kamo):
# logging.basicConfig() is invoked in main_worker() instead of main()
# because it can be invoked only once in a process.
# FIXME(kamo): Should we use logging.getLogger()?
logging.basicConfig(
level=args.log_level,
format=f"[{os.uname()[1].split('.')[0]}{_rank}]"
f" %(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
# Suppress logging if RANK != 0
logging.basicConfig(
level="ERROR",
format=f"[{os.uname()[1].split('.')[0]}"
f":{distributed_option.dist_rank}/{distributed_option.dist_world_size}]"
f" %(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
# Invoking torch.distributed.init_process_group
distributed_option.init_torch_distributed()
# 1. Set random-seed
set_all_random_seed(args.seed)
torch.backends.cudnn.enabled = args.cudnn_enabled
torch.backends.cudnn.benchmark = args.cudnn_benchmark
torch.backends.cudnn.deterministic = args.cudnn_deterministic
if args.detect_anomaly:
logging.info("Invoking torch.autograd.set_detect_anomaly(True)")
torch.autograd.set_detect_anomaly(args.detect_anomaly)
if (
args.collect_stats
and getattr(args, "model_conf", None) is not None
and not args.model_conf.get("extract_feats_in_collect_stats", True)
):
model = None
logging.info("Skipping model building in collect_stats stage.")
else:
# 2. Build model
model = cls.build_model(args=args)
if not isinstance(model, AbsESPnetModel):
raise RuntimeError(
f"model must inherit {AbsESPnetModel.__name__},"
f" but got {type(model)}"
)
model = model.to(
dtype=getattr(torch, args.train_dtype),
device="cuda" if args.ngpu > 0 else "cpu",
)
for t in args.freeze_param:
for k, p in model.named_parameters():
if k.startswith(t + ".") or k == t:
logging.info(f"Setting {k}.requires_grad = False")
p.requires_grad = False
# 3. Build optimizer
optimizers = cls.build_optimizers(args, model=model)
# 4. Build schedulers
schedulers = []
for i, optim in enumerate(optimizers, 1):
suf = "" if i == 1 else str(i)
name = getattr(args, f"scheduler{suf}")
conf = getattr(args, f"scheduler{suf}_conf")
if name is not None:
cls_ = scheduler_classes.get(name)
if cls_ is None:
raise ValueError(
f"must be one of {list(scheduler_classes)}: {name}"
)
scheduler = cls_(optim, **conf)
else:
scheduler = None
schedulers.append(scheduler)
logging.info(pytorch_cudnn_version())
logging.info(model_summary(model))
for i, (o, s) in enumerate(zip(optimizers, schedulers), 1):
suf = "" if i == 1 else str(i)
logging.info(f"Optimizer{suf}:\n{o}")
logging.info(f"Scheduler{suf}: {s}")
# 5. Dump "args" to config.yaml
# NOTE(kamo): "args" should be saved after object-buildings are done
# because they are allowed to modify "args".
output_dir = Path(args.output_dir)
if not distributed_option.distributed or distributed_option.dist_rank == 0:
output_dir.mkdir(parents=True, exist_ok=True)
with (output_dir / "config.yaml").open("w", encoding="utf-8") as f:
logging.info(
f'Saving the configuration in {output_dir / "config.yaml"}'
)
yaml_no_alias_safe_dump(vars(args), f, indent=4, sort_keys=False)
if args.dry_run:
pass
elif args.collect_stats:
# Perform on collect_stats mode. This mode has two roles
# - Derive the length and dimension of all input data
# - Accumulate feats, square values, and the length for whitening
logging.info(args)
if args.valid_batch_size is None:
args.valid_batch_size = args.batch_size
if len(args.train_shape_file) != 0:
train_key_file = args.train_shape_file[0]
else:
train_key_file = None
if len(args.valid_shape_file) != 0:
valid_key_file = args.valid_shape_file[0]
else:
valid_key_file = None
if model and not getattr(model, "extract_feats_in_collect_stats", True):
model = None
logging.info("Skipping collect_feats in collect_stats stage.")
collect_stats(
model=model,
train_iter=cls.build_streaming_iterator(
data_path_and_name_and_type=args.train_data_path_and_name_and_type,
key_file=train_key_file,
batch_size=args.batch_size,
dtype=args.train_dtype,
num_workers=args.num_workers,
allow_variable_data_keys=args.allow_variable_data_keys,
ngpu=args.ngpu,
preprocess_fn=cls.build_preprocess_fn(args, train=False),
collate_fn=cls.build_collate_fn(args, train=False),
),
valid_iter=cls.build_streaming_iterator(
data_path_and_name_and_type=args.valid_data_path_and_name_and_type,
key_file=valid_key_file,
batch_size=args.valid_batch_size,
dtype=args.train_dtype,
num_workers=args.num_workers,
allow_variable_data_keys=args.allow_variable_data_keys,
ngpu=args.ngpu,
preprocess_fn=cls.build_preprocess_fn(args, train=False),
collate_fn=cls.build_collate_fn(args, train=False),
),
output_dir=output_dir,
ngpu=args.ngpu,
log_interval=args.log_interval,
write_collected_feats=args.write_collected_feats,
)
else:
# 6. Loads pre-trained model
for p in args.init_param:
logging.info(f"Loading pretrained params from {p}")
load_pretrained_model(
model=model,
init_param=p,
ignore_init_mismatch=args.ignore_init_mismatch,
# NOTE(kamo): "cuda" for torch.load always indicates cuda:0
# in PyTorch<=1.4
map_location=f"cuda:{torch.cuda.current_device()}"
if args.ngpu > 0
else "cpu",
)
# 7. Build iterator factories
if args.multiple_iterator:
train_iter_factory = cls.build_multiple_iter_factory(
args=args,
distributed_option=distributed_option,
mode="train",
)
else:
train_iter_factory = cls.build_iter_factory(
args=args,
distributed_option=distributed_option,
mode="train",
)
valid_iter_factory = cls.build_iter_factory(
args=args,
distributed_option=distributed_option,
mode="valid",
)
if not args.use_matplotlib and args.num_att_plot != 0:
args.num_att_plot = 0
logging.info("--use_matplotlib false => Changing --num_att_plot to 0")
if args.num_att_plot != 0:
plot_attention_iter_factory = cls.build_iter_factory(
args=args,
distributed_option=distributed_option,
mode="plot_att",
)
else:
plot_attention_iter_factory = None
# 8. Start training
if args.use_wandb:
if wandb is None:
raise RuntimeError("Please install wandb")
try:
wandb.login()
except wandb.errors.UsageError:
logging.info("wandb not configured! run `wandb login` to enable")
args.use_wandb = False
if args.use_wandb:
if (
not distributed_option.distributed
or distributed_option.dist_rank == 0
):
if args.wandb_project is None:
project = "ESPnet_" + cls.__name__
else:
project = args.wandb_project
if args.wandb_name is None:
name = str(Path(".").resolve()).replace("/", "_")
else:
name = args.wandb_name
wandb.init(
entity=args.wandb_entity,
project=project,
name=name,
dir=str(output_dir),
id=args.wandb_id,
resume=args.resume,
)
wandb.config.update(args)
else:
# wandb also supports grouping for distributed training,
# but we only logs aggregated data,
# so it's enough to perform on rank0 node.
args.use_wandb = False
# Don't give args to trainer.run() directly!!!
# Instead of it, define "Options" object and build here.
trainer_options = cls.trainer.build_options(args)
cls.trainer.run(
model=model,
optimizers=optimizers,
schedulers=schedulers,
train_iter_factory=train_iter_factory,
valid_iter_factory=valid_iter_factory,
plot_attention_iter_factory=plot_attention_iter_factory,
trainer_options=trainer_options,
distributed_option=distributed_option,
)
if args.use_wandb and wandb.run:
wandb.finish()
@classmethod
def build_iter_options(
cls,
args: argparse.Namespace,
distributed_option: DistributedOption,
mode: str,
):
if mode == "train":
preprocess_fn = cls.build_preprocess_fn(args, train=True)
collate_fn = cls.build_collate_fn(args, train=True)
data_path_and_name_and_type = args.train_data_path_and_name_and_type
shape_files = args.train_shape_file
batch_size = args.batch_size
batch_bins = args.batch_bins
batch_type = args.batch_type
max_cache_size = args.max_cache_size
max_cache_fd = args.max_cache_fd
distributed = distributed_option.distributed
num_batches = None
num_iters_per_epoch = args.num_iters_per_epoch
train = True
elif mode == "valid":
preprocess_fn = cls.build_preprocess_fn(args, train=False)
collate_fn = cls.build_collate_fn(args, train=False)
data_path_and_name_and_type = args.valid_data_path_and_name_and_type
shape_files = args.valid_shape_file
if args.valid_batch_type is None:
batch_type = args.batch_type
else:
batch_type = args.valid_batch_type
if args.valid_batch_size is None:
batch_size = args.batch_size
else:
batch_size = args.valid_batch_size
if args.valid_batch_bins is None:
batch_bins = args.batch_bins
else:
batch_bins = args.valid_batch_bins
if args.valid_max_cache_size is None:
# Cache 5% of maximum size for validation loader
max_cache_size = 0.05 * args.max_cache_size
else:
max_cache_size = args.valid_max_cache_size
max_cache_fd = args.max_cache_fd
distributed = distributed_option.distributed
num_batches = None
num_iters_per_epoch = None
train = False
elif mode == "plot_att":
preprocess_fn = cls.build_preprocess_fn(args, train=False)
collate_fn = cls.build_collate_fn(args, train=False)
data_path_and_name_and_type = args.valid_data_path_and_name_and_type
shape_files = args.valid_shape_file
batch_type = "unsorted"
batch_size = 1
batch_bins = 0
num_batches = args.num_att_plot
max_cache_fd = args.max_cache_fd
# num_att_plot should be a few sample ~ 3, so cache all data.
max_cache_size = np.inf if args.max_cache_size != 0.0 else 0.0
# always False because plot_attention performs on RANK0
distributed = False
num_iters_per_epoch = None
train = False
else:
raise NotImplementedError(f"mode={mode}")
return IteratorOptions(
preprocess_fn=preprocess_fn,
collate_fn=collate_fn,
data_path_and_name_and_type=data_path_and_name_and_type,
shape_files=shape_files,
batch_type=batch_type,
batch_size=batch_size,
batch_bins=batch_bins,
num_batches=num_batches,
max_cache_size=max_cache_size,
max_cache_fd=max_cache_fd,
distributed=distributed,
num_iters_per_epoch=num_iters_per_epoch,
train=train,
)
@classmethod
def build_iter_factory(
cls,
args: argparse.Namespace,
distributed_option: DistributedOption,
mode: str,
kwargs: dict = None,
) -> AbsIterFactory:
"""Build a factory object of mini-batch iterator.
This object is invoked at every epochs to build the iterator for each epoch
as following:
>>> iter_factory = cls.build_iter_factory(...)
>>> for epoch in range(1, max_epoch):
... for keys, batch in iter_fatory.build_iter(epoch):
... model(**batch)
The mini-batches for each epochs are fully controlled by this class.
Note that the random seed used for shuffling is decided as "seed + epoch" and
the generated mini-batches can be reproduces when resuming.
Note that the definition of "epoch" doesn't always indicate
to run out of the whole training corpus.
"--num_iters_per_epoch" option restricts the number of iterations for each epoch
and the rest of samples for the originally epoch are left for the next epoch.
e.g. If The number of mini-batches equals to 4, the following two are same:
- 1 epoch without "--num_iters_per_epoch"
- 4 epoch with "--num_iters_per_epoch" == 4
"""
assert check_argument_types()
iter_options = cls.build_iter_options(args, distributed_option, mode)
# Overwrite iter_options if any kwargs is given
if kwargs is not None:
for k, v in kwargs.items():
setattr(iter_options, k, v)
if args.iterator_type == "sequence":
return cls.build_sequence_iter_factory(
args=args,
iter_options=iter_options,
mode=mode,
)
elif args.iterator_type == "chunk":
return cls.build_chunk_iter_factory(
args=args,
iter_options=iter_options,
mode=mode,
)
elif args.iterator_type == "task":
return cls.build_task_iter_factory(
args=args,
iter_options=iter_options,
mode=mode,
)
else:
raise RuntimeError(f"Not supported: iterator_type={args.iterator_type}")
@classmethod
def build_sequence_iter_factory(
cls, args: argparse.Namespace, iter_options: IteratorOptions, mode: str
) -> AbsIterFactory:
assert check_argument_types()
dataset = ESPnetDataset(
iter_options.data_path_and_name_and_type,
float_dtype=args.train_dtype,
preprocess=iter_options.preprocess_fn,
max_cache_size=iter_options.max_cache_size,
max_cache_fd=iter_options.max_cache_fd,
)
cls.check_task_requirements(
dataset, args.allow_variable_data_keys, train=iter_options.train
)
if Path(
Path(iter_options.data_path_and_name_and_type[0][0]).parent, "utt2category"
).exists():
utt2category_file = str(
Path(
Path(iter_options.data_path_and_name_and_type[0][0]).parent,
"utt2category",
)
)
logging.warning("Reading " + utt2category_file)
else:
utt2category_file = None
batch_sampler = build_batch_sampler(
type=iter_options.batch_type,
shape_files=iter_options.shape_files,
fold_lengths=args.fold_length,
batch_size=iter_options.batch_size,
batch_bins=iter_options.batch_bins,
sort_in_batch=args.sort_in_batch,
sort_batch=args.sort_batch,
drop_last=False,
min_batch_size=torch.distributed.get_world_size()
if iter_options.distributed
else 1,
utt2category_file=utt2category_file,
)
batches = list(batch_sampler)
if iter_options.num_batches is not None:
batches = batches[: iter_options.num_batches]
bs_list = [len(batch) for batch in batches]
logging.info(f"[{mode}] dataset:\n{dataset}")
logging.info(f"[{mode}] Batch sampler: {batch_sampler}")
logging.info(
f"[{mode}] mini-batch sizes summary: N-batch={len(bs_list)}, "
f"mean={np.mean(bs_list):.1f}, min={np.min(bs_list)}, max={np.max(bs_list)}"
)
if iter_options.distributed:
world_size = torch.distributed.get_world_size()
rank = torch.distributed.get_rank()
for batch in batches:
if len(batch) < world_size:
raise RuntimeError(
f"The batch-size must be equal or more than world_size: "
f"{len(batch)} < {world_size}"
)
batches = [batch[rank::world_size] for batch in batches]
return SequenceIterFactory(
dataset=dataset,
batches=batches,
seed=args.seed,
num_iters_per_epoch=iter_options.num_iters_per_epoch,
shuffle=iter_options.train,
num_workers=args.num_workers,
collate_fn=iter_options.collate_fn,
pin_memory=args.ngpu > 0,
)
@classmethod
def build_chunk_iter_factory(
cls,
args: argparse.Namespace,
iter_options: IteratorOptions,
mode: str,
) -> AbsIterFactory:
assert check_argument_types()
dataset = ESPnetDataset(
iter_options.data_path_and_name_and_type,
float_dtype=args.train_dtype,
preprocess=iter_options.preprocess_fn,
max_cache_size=iter_options.max_cache_size,
max_cache_fd=iter_options.max_cache_fd,
)
cls.check_task_requirements(
dataset, args.allow_variable_data_keys, train=iter_options.train
)
if len(iter_options.shape_files) == 0:
key_file = iter_options.data_path_and_name_and_type[0][0]
else:
key_file = iter_options.shape_files[0]
batch_sampler = UnsortedBatchSampler(batch_size=1, key_file=key_file)
batches = list(batch_sampler)
if iter_options.num_batches is not None:
batches = batches[: iter_options.num_batches]
logging.info(f"[{mode}] dataset:\n{dataset}")
if iter_options.distributed:
world_size = torch.distributed.get_world_size()
rank = torch.distributed.get_rank()
if len(batches) < world_size:
raise RuntimeError("Number of samples is smaller than world_size")
if iter_options.batch_size < world_size:
raise RuntimeError("batch_size must be equal or more than world_size")
if rank < iter_options.batch_size % world_size:
batch_size = iter_options.batch_size // world_size + 1
else:
batch_size = iter_options.batch_size // world_size
num_cache_chunks = args.num_cache_chunks // world_size
# NOTE(kamo): Split whole corpus by sample numbers without considering
# each of the lengths, therefore the number of iteration counts are not
# always equal to each other and the iterations are limitted
# by the fewest iterations.
# i.e. the samples over the counts are discarded.
batches = batches[rank::world_size]
else:
batch_size = iter_options.batch_size
num_cache_chunks = args.num_cache_chunks
return ChunkIterFactory(
dataset=dataset,
batches=batches,
seed=args.seed,
batch_size=batch_size,
# For chunk iterator,
# --num_iters_per_epoch doesn't indicate the number of iterations,
# but indicates the number of samples.
num_samples_per_epoch=iter_options.num_iters_per_epoch,
shuffle=iter_options.train,
num_workers=args.num_workers,
collate_fn=iter_options.collate_fn,
pin_memory=args.ngpu > 0,
chunk_length=args.chunk_length,
chunk_shift_ratio=args.chunk_shift_ratio,
num_cache_chunks=num_cache_chunks,
excluded_key_prefixes=args.chunk_excluded_key_prefixes,
)
# NOTE(kamo): Not abstract class
@classmethod
def build_task_iter_factory(
cls,
args: argparse.Namespace,
iter_options: IteratorOptions,
mode: str,
) -> AbsIterFactory:
"""Build task specific iterator factory
Example:
>>> class YourTask(AbsTask):
... @classmethod
... def add_task_arguments(cls, parser: argparse.ArgumentParser):
... parser.set_defaults(iterator_type="task")
...
... @classmethod
... def build_task_iter_factory(
... cls,
... args: argparse.Namespace,
... iter_options: IteratorOptions,
... mode: str,
... ):
... return FooIterFactory(...)
...
... @classmethod
... def build_iter_options(
.... args: argparse.Namespace,
... distributed_option: DistributedOption,
... mode: str
... ):
... # if you need to customize options object
"""
raise NotImplementedError
@classmethod
def build_multiple_iter_factory(
cls, args: argparse.Namespace, distributed_option: DistributedOption, mode: str
):
assert check_argument_types()
iter_options = cls.build_iter_options(args, distributed_option, mode)
assert len(iter_options.data_path_and_name_and_type) > 0, len(
iter_options.data_path_and_name_and_type
)
# 1. Sanity check
num_splits = None
for path in [
path for path, _, _ in iter_options.data_path_and_name_and_type
] + list(iter_options.shape_files):
if not Path(path).is_dir():
raise RuntimeError(f"{path} is not a directory")
p = Path(path) / "num_splits"
if not p.exists():
raise FileNotFoundError(f"{p} is not found")
with p.open() as f:
_num_splits = int(f.read())
if num_splits is not None and num_splits != _num_splits:
raise RuntimeError(
f"Number of splits are mismathed: "
f"{iter_options.data_path_and_name_and_type[0][0]} and {path}"
)
num_splits = _num_splits
for i in range(num_splits):
p = Path(path) / f"split.{i}"
if not p.exists():
raise FileNotFoundError(f"{p} is not found")
# 2. Create functions to build an iter factory for each splits
data_path_and_name_and_type_list = [
[
(str(Path(p) / f"split.{i}"), n, t)
for p, n, t in iter_options.data_path_and_name_and_type
]
for i in range(num_splits)
]
shape_files_list = [
[str(Path(s) / f"split.{i}") for s in iter_options.shape_files]
for i in range(num_splits)
]
num_iters_per_epoch_list = [
(iter_options.num_iters_per_epoch + i) // num_splits
if iter_options.num_iters_per_epoch is not None
else None
for i in range(num_splits)
]
max_cache_size = iter_options.max_cache_size / num_splits
# Note that iter-factories are built for each epoch at runtime lazily.
build_funcs = [
functools.partial(
cls.build_iter_factory,
args,
distributed_option,
mode,
kwargs=dict(
data_path_and_name_and_type=_data_path_and_name_and_type,
shape_files=_shape_files,
num_iters_per_epoch=_num_iters_per_epoch,
max_cache_size=max_cache_size,
),
)
for (
_data_path_and_name_and_type,
_shape_files,
_num_iters_per_epoch,
) in zip(
data_path_and_name_and_type_list,
shape_files_list,
num_iters_per_epoch_list,
)
]
# 3. Build MultipleIterFactory
return MultipleIterFactory(
build_funcs=build_funcs, shuffle=iter_options.train, seed=args.seed
)
@classmethod
def build_streaming_iterator(
cls,
data_path_and_name_and_type,
preprocess_fn,
collate_fn,
key_file: str = None,
batch_size: int = 1,
dtype: str = np.float32,
num_workers: int = 1,
allow_variable_data_keys: bool = False,
ngpu: int = 0,
inference: bool = False,
) -> DataLoader:
"""Build DataLoader using iterable dataset"""
assert check_argument_types()
# For backward compatibility for pytorch DataLoader
if collate_fn is not None:
kwargs = dict(collate_fn=collate_fn)
else:
kwargs = {}
dataset = IterableESPnetDataset(
data_path_and_name_and_type,
float_dtype=dtype,
preprocess=preprocess_fn,
key_file=key_file,
)
if dataset.apply_utt2category:
kwargs.update(batch_size=1)
else:
kwargs.update(batch_size=batch_size)
cls.check_task_requirements(
dataset, allow_variable_data_keys, train=False, inference=inference
)
return DataLoader(
dataset=dataset,
pin_memory=ngpu > 0,
num_workers=num_workers,
**kwargs,
)
# ~~~~~~~~~ The methods below are mainly used for inference ~~~~~~~~~
@classmethod
def build_model_from_file(
cls,
config_file: Union[Path, str] = None,
model_file: Union[Path, str] = None,
device: str = "cpu",
) -> Tuple[AbsESPnetModel, argparse.Namespace]:
"""Build model from the files.
This method is used for inference or fine-tuning.
Args:
config_file: The yaml file saved when training.
model_file: The model file saved when training.
device: Device type, "cpu", "cuda", or "cuda:N".
"""
assert check_argument_types()
if config_file is None:
assert model_file is not None, (
"The argument 'model_file' must be provided "
"if the argument 'config_file' is not specified."
)
config_file = Path(model_file).parent / "config.yaml"
else:
config_file = Path(config_file)
with config_file.open("r", encoding="utf-8") as f:
args = yaml.safe_load(f)
args = argparse.Namespace(**args)
model = cls.build_model(args)
if not isinstance(model, AbsESPnetModel):
raise RuntimeError(
f"model must inherit {AbsESPnetModel.__name__}, but got {type(model)}"
)
model.to(device)
if model_file is not None:
if device == "cuda":
# NOTE(kamo): "cuda" for torch.load always indicates cuda:0
# in PyTorch<=1.4
device = f"cuda:{torch.cuda.current_device()}"
try:
model.load_state_dict(torch.load(model_file, map_location=device))
except RuntimeError:
# Note(simpleoier): the following part is to be compatible with
# pretrained model using earlier versions before `0a625088`
state_dict = torch.load(model_file, map_location=device)
if any(["frontend.upstream.model" in k for k in state_dict.keys()]):
if any(
[
"frontend.upstream.upstream.model" in k
for k in dict(model.named_parameters())
]
):
state_dict = {
k.replace(
"frontend.upstream.model",
"frontend.upstream.upstream.model",
): v
for k, v in state_dict.items()
}
model.load_state_dict(state_dict)
else:
raise
else:
raise
return model, args
| 71,532 | 36.432234 | 91 | py |
espnet | espnet-master/espnet2/asvspoof/espnet_model.py | # Copyright 2022 Jiatong Shi (Carnegie Mellon University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import logging
from contextlib import contextmanager
from itertools import permutations
from typing import Dict, Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from packaging.version import parse as V
from typeguard import check_argument_types
from espnet2.asr.encoder.abs_encoder import AbsEncoder
from espnet2.asr.frontend.abs_frontend import AbsFrontend
from espnet2.asr.preencoder.abs_preencoder import AbsPreEncoder
from espnet2.asr.specaug.abs_specaug import AbsSpecAug
from espnet2.asvspoof.decoder.abs_decoder import AbsDecoder
from espnet2.asvspoof.loss.abs_loss import AbsASVSpoofLoss
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet2.train.abs_espnet_model import AbsESPnetModel
from espnet.nets.pytorch_backend.nets_utils import to_device
if V(torch.__version__) >= V("1.6.0"):
from torch.cuda.amp import autocast
else:
# Nothing to do if torch<1.6.0
@contextmanager
def autocast(enabled=True):
yield
class ESPnetASVSpoofModel(AbsESPnetModel):
"""ASV Spoofing model
A simple ASV Spoofing model
"""
def __init__(
self,
frontend: Optional[AbsFrontend],
specaug: Optional[AbsSpecAug],
normalize: Optional[AbsNormalize],
encoder: AbsEncoder,
preencoder: Optional[AbsPreEncoder],
decoder: AbsDecoder,
losses: Dict[str, AbsASVSpoofLoss],
):
assert check_argument_types()
super().__init__()
self.preencoder = preencoder
self.encoder = encoder
self.normalize = normalize
self.frontend = frontend
self.specaug = specaug
self.decoder = decoder
self.losses = losses
def forward(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor = None,
label: torch.Tensor = None,
**kwargs,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Frontend + Encoder + Decoder + Calc loss
Args:
speech: (Batch, samples)
spk_labels: (Batch, )
kwargs: "utt_id" is among the input.
"""
assert speech.shape[0] == label.shape[0], (speech.shape, label.shape)
batch_size = speech.shape[0]
# 1. Encoder
encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
# 2. Decoder (baiscally a predction layer after encoder_out)
pred = self.decoder(encoder_out, encoder_out_lens)
if "oc_softmax_loss" in self.losses:
loss = (
self.losses["oc_softmax_loss"](label, encoder_out)
* self.losses["oc_softmax_loss"].weight
)
pred = self.losses["am_softmax_loss"].score(encoder_out)
elif "am_softmax_loss" in self.losses:
loss = (
self.losses["am_softmax_loss"](label, encoder_out)
* self.losses["am_softmax_loss"].weight
)
pred = self.losses["am_softmax_loss"].score(encoder_out)
else:
loss = (
self.losses["binary_loss"](pred, label)
* self.losses["binary_loss"].weight
)
acc = torch.sum(((pred.view(-1) > 0.5) == (label.view(-1) > 0.5))) / batch_size
stats = dict(
loss=loss.detach(),
acc=acc.detach(),
)
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
return loss, stats, weight
def collect_feats(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
**kwargs,
) -> Dict[str, torch.Tensor]:
feats, feats_lengths = self._extract_feats(speech, speech_lengths)
return {"feats": feats, "feats_lengths": feats_lengths}
def encode(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Frontend + Encoder
Args:
speech: (Batch, Length, ...)
speech_lengths: (Batch,)
bottleneck_feats: (Batch, Length, ...): used for enh + diar
"""
with autocast(False):
# 1. Extract feats
feats, feats_lengths = self._extract_feats(speech, speech_lengths)
# 2. Data augmentation
if self.specaug is not None and self.training:
feats, feats_lengths = self.specaug(feats, feats_lengths)
# 3. Normalization for feature: e.g. Global-CMVN, Utterance-CMVN
if self.normalize is not None:
feats, feats_lengths = self.normalize(feats, feats_lengths)
# Pre-encoder, e.g. used for raw input data
if self.preencoder is not None:
feats, feats_lengths = self.preencoder(feats, feats_lengths)
# 4. Forward encoder
# feats: (Batch, Length, Dim)
# -> encoder_out: (Batch, Length2, Dim)
encoder_out, encoder_out_lens, _ = self.encoder(feats, feats_lengths)
assert encoder_out.size(0) == speech.size(0), (
encoder_out.size(),
speech.size(0),
)
assert encoder_out.size(1) <= encoder_out_lens.max(), (
encoder_out.size(),
encoder_out_lens.max(),
)
return encoder_out, encoder_out_lens
def _extract_feats(
self, speech: torch.Tensor, speech_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
batch_size = speech.shape[0]
speech_lengths = (
speech_lengths
if speech_lengths is not None
else torch.ones(batch_size).int() * speech.shape[1]
)
assert speech_lengths.dim() == 1, speech_lengths.shape
# for data-parallel
speech = speech[:, : speech_lengths.max()]
if self.frontend is not None:
# Frontend
# e.g. STFT and Feature extract
# data_loader may send time-domain signal in this case
# speech (Batch, NSamples) -> feats: (Batch, NFrames, Dim)
feats, feats_lengths = self.frontend(speech, speech_lengths)
else:
# No frontend and no feature extract
feats, feats_lengths = speech, speech_lengths
return feats, feats_lengths
| 6,487 | 33.510638 | 87 | py |
espnet | espnet-master/espnet2/asvspoof/loss/binary_loss.py | import torch
from espnet2.asvspoof.loss.abs_loss import AbsASVSpoofLoss
from espnet.nets.pytorch_backend.nets_utils import to_device
class ASVSpoofBinaryLoss(AbsASVSpoofLoss):
"""Binary loss for ASV Spoofing."""
def __init__(
self,
weight: float = 1.0,
):
super().__init__()
self.weight = weight
self.sigmoid = torch.nn.Sigmoid()
self.loss = torch.nn.BCELoss(reduction="mean")
def forward(self, pred: torch.Tensor, label: torch.Tensor, **kwargs):
"""Forward.
Args:
pred (torch.Tensor): prediction probability [Batch, 2]
label (torch.Tensor): ground truth label [Batch, 2]
"""
loss = self.loss(self.sigmoid(pred.view(-1)), label.view(-1).float())
return loss
def score(self, pred: torch.Tensor):
return pred
| 854 | 27.5 | 77 | py |
espnet | espnet-master/espnet2/asvspoof/loss/abs_loss.py | from abc import ABC, abstractmethod
import torch
EPS = torch.finfo(torch.get_default_dtype()).eps
class AbsASVSpoofLoss(torch.nn.Module, ABC):
"""Base class for all ASV Spoofing loss modules."""
# the name will be the key that appears in the reporter
@property
def name(self) -> str:
return NotImplementedError
@abstractmethod
def forward(
self,
ref,
inf,
) -> torch.Tensor:
# the return tensor should be shape of (batch)
raise NotImplementedError
@abstractmethod
def score(
self,
pred,
) -> torch.Tensor:
raise NotImplemented
| 646 | 19.870968 | 59 | py |
espnet | espnet-master/espnet2/asvspoof/loss/am_softmax_loss.py | import torch
from espnet2.asvspoof.loss.abs_loss import AbsASVSpoofLoss
from espnet.nets.pytorch_backend.nets_utils import to_device
class ASVSpoofAMSoftmaxLoss(AbsASVSpoofLoss):
"""Binary loss for ASV Spoofing."""
def __init__(
self,
weight: float = 1.0,
enc_dim: int = 128,
s: float = 20,
m: float = 0.5,
):
super(ASVSpoofAMSoftmaxLoss).__init__()
self.weight = weight
self.enc_dim = enc_dim
self.s = s
self.m = m
self.centers = torch.nn.Parameter(torch.randn(2, enc_dim))
self.sigmoid = torch.nn.Sigmoid()
self.loss = torch.nn.BCELoss(reduction="mean")
def forward(self, label: torch.Tensor, emb: torch.Tensor, **kwargs):
"""Forward.
Args:
label (torch.Tensor): ground truth label [Batch, 1]
emb (torch.Tensor): encoder embedding output [Batch, T, enc_dim]
"""
batch_size = emb.shape[0]
emb = torch.mean(emb, dim=1)
norms = torch.norm(emb, p=2, dim=-1, keepdim=True)
nfeat = torch.div(emb, norms)
norms_c = torch.norm(self.centers, p=2, dim=-1, keepdim=True)
ncenters = torch.div(self.centers, norms_c)
logits = torch.matmul(nfeat, torch.transpose(ncenters, 0, 1))
y_onehot = torch.FloatTensor(batch_size, 2)
y_onehot.zero_()
y_onehot = torch.autograd.Variable(y_onehot)
y_onehot.scatter_(1, torch.unsqueeze(label, dim=-1), self.m)
margin_logits = self.s * (logits - y_onehot)
loss = self.loss(self.sigmoid(margin_logits[:, 0]), label.view(-1).float())
return loss
def score(self, emb: torch.Tensor):
"""Prediction.
Args:
emb (torch.Tensor): encoder embedding output [Batch, T, enc_dim]
"""
emb = torch.mean(emb, dim=1)
norms = torch.norm(emb, p=2, dim=-1, keepdim=True)
nfeat = torch.div(emb, norms)
norms_c = torch.norm(self.centers, p=2, dim=-1, keepdim=True)
ncenters = torch.div(self.centers, norms_c)
logits = torch.matmul(nfeat, torch.transpose(ncenters, 0, 1))
return logits[:, 0]
| 2,180 | 33.619048 | 83 | py |
espnet | espnet-master/espnet2/asvspoof/loss/oc_softmax_loss.py | import torch
from espnet2.asvspoof.loss.abs_loss import AbsASVSpoofLoss
from espnet.nets.pytorch_backend.nets_utils import to_device
class ASVSpoofOCSoftmaxLoss(AbsASVSpoofLoss):
"""Binary loss for ASV Spoofing."""
def __init__(
self,
weight: float = 1.0,
enc_dim: int = 128,
m_real: float = 0.5,
m_fake: float = 0.2,
alpha: float = 20.0,
):
super(ASVSpoofOCSoftmaxLoss).__init__()
self.weight = weight
self.feat_dim = enc_dim
self.m_real = m_real
self.m_fake = m_fake
self.alpha = alpha
self.center = torch.nn.Parameter(torch.randn(1, self.feat_dim))
torch.nn.init.kaiming_uniform_(self.center, 0.25)
self.softplus = torch.nn.Softplus()
def forward(self, label: torch.Tensor, emb: torch.Tensor, **kwargs):
"""Forward.
Args:
label (torch.Tensor): ground truth label [Batch, 1]
emb (torch.Tensor): encoder embedding output [Batch, T, enc_dim]
"""
emb = torch.mean(emb, dim=1)
w = torch.nn.functional.normalize(self.center, p=2, dim=1)
x = torch.nn.functional.normalize(emb, p=2, dim=1)
# TODO1 (exercise 2): compute scores based on w and x
# TODO2 (exercise 2): calculate the score bias based on m_real and m_fake
# TODO3 (exercise 2): apply alpha and softplus
# TODO4 (exercise 2): returnthe final loss
return None
def score(self, emb: torch.Tensor):
"""Prediction.
Args:
emb (torch.Tensor): encoder embedding output [Batch, T, enc_dim]
"""
emb = torch.mean(emb, dim=1)
w = torch.nn.functional.normalize(self.center, p=2, dim=1)
x = torch.nn.functional.normalize(emb, p=2, dim=1)
# TODO5 (exercise 2): compute scores
| 1,850 | 31.473684 | 81 | py |
espnet | espnet-master/espnet2/asvspoof/decoder/linear_decoder.py | from typing import Optional
import torch
from espnet2.asvspoof.decoder.abs_decoder import AbsDecoder
class LinearDecoder(AbsDecoder):
"""Linear decoder for speaker diarization"""
def __init__(
self,
encoder_output_size: int,
):
super().__init__()
# TODO1 (checkpoint3): initialize a linear projection layer
def forward(self, input: torch.Tensor, ilens: Optional[torch.Tensor]):
"""Forward.
Args:
input (torch.Tensor): hidden_space [Batch, T, F]
ilens (torch.Tensor): input lengths [Batch]
"""
# TODO2 (checkpoint3): compute mean over time-domain (dimension 1)
# TODO3 (checkpoint3): apply the projection layer
# TODO4 (checkpoint3): change the return value
return None
| 805 | 25.866667 | 74 | py |
espnet | espnet-master/espnet2/asvspoof/decoder/abs_decoder.py | from abc import ABC, abstractmethod
from typing import Tuple
import torch
class AbsDecoder(torch.nn.Module, ABC):
@abstractmethod
def forward(
self,
input: torch.Tensor,
ilens: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
raise NotImplementedError
| 304 | 19.333333 | 43 | py |
espnet | espnet-master/utils/generate_wav_from_fbank.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""This code is based on https://github.com/kan-bayashi/PytorchWaveNetVocoder."""
# Copyright 2019 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import logging
import os
import time
import h5py
import numpy as np
import pysptk
import torch
from scipy.io.wavfile import write
from sklearn.preprocessing import StandardScaler
from espnet.nets.pytorch_backend.wavenet import WaveNet, decode_mu_law, encode_mu_law
from espnet.utils.cli_readers import file_reader_helper
from espnet.utils.cli_utils import get_commandline_args
class TimeInvariantMLSAFilter(object):
"""Time invariant MLSA filter.
This module is used to perform noise shaping described in
`An investigation of noise shaping with perceptual
weighting for WaveNet-based speech generation`_.
Args:
coef (ndaaray): MLSA filter coefficient (D,).
alpha (float): All pass constant value.
n_shift (int): Shift length in points.
.. _`An investigation of noise shaping with perceptual
weighting for WaveNet-based speech generation`:
https://ieeexplore.ieee.org/abstract/document/8461332
"""
def __init__(self, coef, alpha, n_shift):
self.coef = coef
self.n_shift = n_shift
self.mlsa_filter = pysptk.synthesis.Synthesizer(
pysptk.synthesis.MLSADF(order=coef.shape[0] - 1, alpha=alpha),
hopsize=n_shift,
)
def __call__(self, y):
"""Apply time invariant MLSA filter.
Args:
y (ndarray): Waveform signal normalized from -1 to 1 (N,).
Returns:
y (ndarray): Filtered waveform signal normalized from -1 to 1 (N,).
"""
# check shape and type
assert len(y.shape) == 1
y = np.float64(y)
# get frame number and then replicate mlsa coef
num_frames = int(len(y) / self.n_shift) + 1
coef = np.tile(self.coef, [num_frames, 1])
return self.mlsa_filter.synthesis(y, coef)
def get_parser():
parser = argparse.ArgumentParser(
description="generate wav from FBANK using wavenet vocoder",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("--fs", type=int, default=22050, help="Sampling frequency")
parser.add_argument("--n_fft", type=int, default=1024, help="FFT length in point")
parser.add_argument(
"--n_shift", type=int, default=256, help="Shift length in point"
)
parser.add_argument("--model", type=str, default=None, help="WaveNet model")
parser.add_argument(
"--filetype",
type=str,
default="mat",
choices=["mat", "hdf5"],
help="Specify the file format for the rspecifier. "
'"mat" is the matrix format in kaldi',
)
parser.add_argument("rspecifier", type=str, help="Input feature e.g. scp:feat.scp")
parser.add_argument("outdir", type=str, help="Output directory")
return parser
def main():
parser = get_parser()
args = parser.parse_args()
# logging info
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.info(get_commandline_args())
# check directory
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# load model config
model_dir = os.path.dirname(args.model)
train_args = torch.load(os.path.join(model_dir, "model.conf"))
# load statistics
scaler = StandardScaler()
with h5py.File(os.path.join(model_dir, "stats.h5")) as f:
scaler.mean_ = f["/melspc/mean"][()]
scaler.scale_ = f["/melspc/scale"][()]
# TODO(kan-bayashi): include following info as default
coef = f["/mlsa/coef"][()]
alpha = f["/mlsa/alpha"][()]
# define MLSA filter for noise shaping
mlsa_filter = TimeInvariantMLSAFilter(
coef=coef,
alpha=alpha,
n_shift=args.n_shift,
)
# define model and laod parameters
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model = WaveNet(
n_quantize=train_args.n_quantize,
n_aux=train_args.n_aux,
n_resch=train_args.n_resch,
n_skipch=train_args.n_skipch,
dilation_depth=train_args.dilation_depth,
dilation_repeat=train_args.dilation_repeat,
kernel_size=train_args.kernel_size,
upsampling_factor=train_args.upsampling_factor,
)
model.load_state_dict(torch.load(args.model, map_location="cpu")["model"])
model.eval()
model.to(device)
for idx, (utt_id, lmspc) in enumerate(
file_reader_helper(args.rspecifier, args.filetype), 1
):
logging.info("(%d) %s" % (idx, utt_id))
# perform preprocesing
x = encode_mu_law(
np.zeros((1)), mu=train_args.n_quantize
) # quatize initial seed waveform
h = scaler.transform(lmspc) # normalize features
# convert to tensor
x = torch.tensor(x, dtype=torch.long, device=device) # (1,)
h = torch.tensor(h, dtype=torch.float, device=device) # (T, n_aux)
# get length of waveform
n_samples = (h.shape[0] - 1) * args.n_shift + args.n_fft
# generate
start_time = time.time()
with torch.no_grad():
y = model.generate(x, h, n_samples, interval=100)
logging.info(
"generation speed = %s (sec / sample)"
% ((time.time() - start_time) / (len(y) - 1))
)
y = decode_mu_law(y, mu=train_args.n_quantize)
# apply mlsa filter for noise shaping
y = mlsa_filter(y)
# save as .wav file
write(
os.path.join(args.outdir, "%s.wav" % utt_id),
args.fs,
(y * np.iinfo(np.int16).max).astype(np.int16),
)
if __name__ == "__main__":
main()
| 5,966 | 30.571429 | 87 | py |
espnet | espnet-master/utils/average_checkpoints.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import json
import os
import numpy as np
def main():
if args.log is not None:
with open(args.log) as f:
logs = json.load(f)
val_scores = []
for log in logs:
if log["epoch"] > args.max_epoch:
continue
if args.metric == "acc":
if "validation/main/acc" in log.keys():
val_scores += [[log["epoch"], log["validation/main/acc"]]]
elif args.metric == "perplexity":
if "val_perplexity" in log.keys():
val_scores += [[log["epoch"], 1 / log["val_perplexity"]]]
elif args.metric == "loss":
if "validation/main/loss" in log.keys():
val_scores += [[log["epoch"], -log["validation/main/loss"]]]
elif args.metric == "bleu":
if "validation/main/bleu" in log.keys():
val_scores += [[log["epoch"], log["validation/main/bleu"]]]
elif args.metric == "cer":
if "validation/main/cer" in log.keys():
val_scores += [[log["epoch"], -log["validation/main/cer"]]]
elif args.metric == "cer_ctc":
if "validation/main/cer_ctc" in log.keys():
val_scores += [[log["epoch"], -log["validation/main/cer_ctc"]]]
else:
# Keep original order for compatibility
if "validation/main/acc" in log.keys():
val_scores += [[log["epoch"], log["validation/main/acc"]]]
elif "val_perplexity" in log.keys():
val_scores += [[log["epoch"], 1 / log["val_perplexity"]]]
elif "validation/main/loss" in log.keys():
val_scores += [[log["epoch"], -log["validation/main/loss"]]]
if len(val_scores) == 0:
raise ValueError("%s is not found in log." % args.metric)
val_scores = np.array(val_scores)
sort_idx = np.argsort(val_scores[:, -1])
sorted_val_scores = val_scores[sort_idx][::-1]
print("metric: %s" % args.metric)
print("best val scores = " + str(sorted_val_scores[: args.num, 1]))
print(
"selected epochs = "
+ str(sorted_val_scores[: args.num, 0].astype(np.int64))
)
last = [
os.path.dirname(args.snapshots[0]) + "/snapshot.ep.%d" % (int(epoch))
for epoch in sorted_val_scores[: args.num, 0]
]
else:
last = sorted(args.snapshots, key=os.path.getmtime)
last = last[-args.num :]
print("average over", last)
avg = None
if args.backend == "pytorch":
import torch
# sum
for path in last:
states = torch.load(path, map_location=torch.device("cpu"))["model"]
if avg is None:
avg = states
else:
for k in avg.keys():
avg[k] += states[k]
# average
for k in avg.keys():
if avg[k] is not None:
if avg[k].is_floating_point():
avg[k] /= args.num
else:
avg[k] //= args.num
torch.save(avg, args.out)
elif args.backend == "chainer":
# sum
for path in last:
states = np.load(path)
if avg is None:
keys = [x.split("main/")[1] for x in states if "model" in x]
avg = dict()
for k in keys:
avg[k] = states["updater/model:main/{}".format(k)]
else:
for k in keys:
avg[k] += states["updater/model:main/{}".format(k)]
# average
for k in keys:
if avg[k] is not None:
avg[k] /= args.num
np.savez_compressed(args.out, **avg)
os.rename("{}.npz".format(args.out), args.out) # numpy save with .npz extension
else:
raise ValueError("Incorrect type of backend")
def get_parser():
parser = argparse.ArgumentParser(description="average models from snapshot")
parser.add_argument("--snapshots", required=True, type=str, nargs="+")
parser.add_argument("--out", required=True, type=str)
parser.add_argument("--num", default=10, type=int)
parser.add_argument("--backend", default="chainer", type=str)
parser.add_argument("--log", default=None, type=str, nargs="?")
parser.add_argument(
"--metric",
default="",
type=str,
nargs="?",
choices=["acc", "bleu", "cer", "cer_ctc", "loss", "perplexity"],
)
parser.add_argument(
"--max-epoch",
default=10000000,
type=int,
nargs="?",
)
return parser
if __name__ == "__main__":
args = get_parser().parse_args()
main()
| 4,882 | 34.384058 | 88 | py |
espnet | espnet-master/egs/wsj/asr1/local/filtering_samples.py | #!/usr/bin/env python3
# Copyright 2020 Shanghai Jiao Tong University (Wangyou Zhang)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import json
import sys
from functools import reduce
from operator import mul
from espnet.bin.asr_train import get_parser
from espnet.nets.pytorch_backend.nets_utils import get_subsample
from espnet.utils.dynamic_import import dynamic_import
if __name__ == "__main__":
cmd_args = sys.argv[1:]
parser = get_parser(required=False)
parser.add_argument("--data-json", type=str, help="data.json")
parser.add_argument(
"--mode-subsample", type=str, required=True, help='One of ("asr", "mt", "st")'
)
parser.add_argument(
"--min-io-delta",
type=float,
help="An additional parameter "
"for controlling the input-output length difference",
default=0.0,
)
parser.add_argument(
"--output-json-path",
type=str,
required=True,
help="Output path of the filtered json file",
)
args, _ = parser.parse_known_args(cmd_args)
if args.model_module is None:
model_module = "espnet.nets." + args.backend + "_backend.e2e_asr:E2E"
else:
model_module = args.model_module
module_name = model_module.split(":")[0].split(".")[-1]
# One of ("rnn", "rnn-t", "rnn_mix", "rnn_mulenc", "transformer")
if module_name == "e2e_asr":
arch_subsample = "rnn"
elif module_name == "e2e_asr_transducer":
arch_subsample = "rnn-t"
elif module_name == "e2e_asr_mix":
arch_subsample = "rnn_mix"
elif module_name == "e2e_asr_mulenc":
arch_subsample = "rnn_mulenc"
elif "transformer" in module_name:
arch_subsample = "transformer"
else:
raise ValueError("Unsupported model module: %s" % model_module)
model_class = dynamic_import(model_module)
model_class.add_arguments(parser)
args = parser.parse_args(cmd_args)
# subsampling info
if hasattr(args, "etype") and args.etype.startswith("vgg"):
# Subsampling is not performed for vgg*.
# It is performed in max pooling layers at CNN.
min_io_ratio = 4
else:
subsample = get_subsample(args, mode=args.mode_subsample, arch=arch_subsample)
# the minimum input-output length ratio for all samples
min_io_ratio = reduce(mul, subsample)
# load dictionary
with open(args.data_json, "rb") as f:
j = json.load(f)["utts"]
# remove samples with IO ratio smaller than `min_io_ratio`
for key in list(j.keys()):
ilen = j[key]["input"][0]["shape"][0]
olen = min(x["shape"][0] for x in j[key]["output"])
if float(ilen) - float(olen) * min_io_ratio < args.min_io_delta:
j.pop(key)
print("'{}' removed".format(key))
jsonstring = json.dumps({"utts": j}, indent=4, ensure_ascii=False, sort_keys=True)
with open(args.output_json_path, "w") as f:
f.write(jsonstring)
| 2,983 | 33.298851 | 86 | py |
espnet | espnet-master/doc/conf.py | # -*- coding: utf-8 -*-
# flake8: noqa
#
# ESPnet documentation build configuration file, created by
# sphinx-quickstart on Thu Dec 7 15:46:00 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("../espnet/nets"))
sys.path.insert(0, os.path.abspath("../utils"))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"nbsphinx",
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
"sphinx.ext.mathjax",
"sphinx.ext.todo",
"sphinxarg.ext",
"sphinx_markdown_tables",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = '.rst'
source_suffix = [".rst", ".md"]
# enable to markdown
from recommonmark.parser import CommonMarkParser
source_parsers = {
".md": CommonMarkParser,
}
# AutoStructify setting ref: https://qiita.com/pashango2/items/d1b379b699af85b529ce
from recommonmark.transform import AutoStructify
github_doc_root = "https://github.com/rtfd/recommonmark/tree/master/doc/"
def setup(app):
app.add_config_value(
"recommonmark_config",
{
"url_resolver": lambda url: github_doc_root + url,
"auto_toc_tree_section": "Contents",
},
True,
)
app.add_transform(AutoStructify)
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"ESPnet"
copyright = u"2017, Shinji Watanabe"
author = u"Shinji Watanabe"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import espnet
version = espnet.__version__
# The full version, including alpha/beta/rc tags.
release = espnet.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = [
"_build",
"Thumbs.db",
".DS_Store",
"README.md",
# NOTE: because these genearate files are directly included
# from the other files, we should exclude these files manually.
"_gen/modules.rst",
"_gen/utils_sh.rst",
"_gen/utils_py.rst",
"_gen/espnet_bin.rst",
"_gen/espnet-bin.rst",
]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'nature'
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
"**": [
"relations.html", # needs 'show_related': True theme option to display
"searchbox.html",
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "ESPnetdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "ESPnet.tex", u"ESPnet Documentation", u"Shinji Watanabe", "manual"),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "espnet", u"ESPnet Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"ESPnet",
u"ESPnet Documentation",
author,
"ESPnet",
"One line description of project.",
"Miscellaneous",
),
]
autoclass_content = "both"
# NOTE(kan-bayashi): Do not update outputs in notebook automatically.
nbsphinx_execute = "never"
| 6,528 | 28.677273 | 86 | py |
espnet | espnet-master/egs2/TEMPLATE/asr1/pyscripts/feats/feats_cluster_faiss.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
# Dongji Gao (2022)
#
# Adapted from fairseq/examples/wav2vec/unsupervised/\
# scripts/wav2vec_cluster_faiss.py
# to fit the scp data format
# This source code is licensed under the MIT license in
# https://github.com/facebookresearch/fairseq
import argparse
import gc
import os
import os.path as osp
import random
from collections import namedtuple
import faiss
import numpy as np
import soundfile as sf
import torch
import tqdm
def get_parser():
parser = argparse.ArgumentParser(
description="compute kmeans codebook from kaldi-computed feats"
)
parser.add_argument("feats_scp", help="location of feature scp files")
parser.add_argument("--save_dir", help="where to save the output", required=True)
parser.add_argument(
"--sample_pct",
"-r",
type=float,
help="percentage of timesteps to sample",
default=0,
)
parser.add_argument(
"--faiss_specs",
"-f",
type=str,
help="faiss index specs; separated by space "
"format is: PCAx_NORM_CLUSx_SPHERICAL -> "
"PCAx if exists first apply PCA "
"NORM if exists, normalize the vector by L2 norm "
"CLUSx must exist, cluster to x clusters "
"SPEHRICAL if exists, apply spherical kmeans",
default="l2",
)
return parser
faiss_spec = namedtuple("faiss_spec", ["pca", "norm", "n_clus", "sphere", "spec_str"])
def parse_faiss_specs(specs_str):
specs = []
for ss in specs_str.split():
comps = ss.split("_")
pca = 0
norm = False
n_clus = 0
sphere = False
for c in comps:
if c.startswith("PCA"):
pca = int(c[3:])
elif c == "NORM":
norm = True
elif c.startswith("CLUS"):
n_clus = int(c[4:])
elif c == "SPHERICAL":
sphere = True
assert n_clus > 0
specs.append(
faiss_spec(pca=pca, norm=norm, n_clus=n_clus, sphere=sphere, spec_str=ss)
)
return specs
def main():
parser = get_parser()
args = parser.parse_args()
faiss_specs = parse_faiss_specs(args.faiss_specs)
print("Faiss Specs:", faiss_specs)
feats_list = []
with open(args.feats_scp, "r") as f_scp:
for line in f_scp.readlines():
_, feat_file = line.split()
feat = np.load(feat_file)
feats_list.append(feat)
feats = np.concatenate(feats_list)
os.makedirs(args.save_dir, exist_ok=True)
# np.save(feat_path, feats)
gc.collect()
reload = False
for spec in faiss_specs:
print("Processing spec", spec)
if reload:
print("Reloading...")
del feats
gc.collect()
feats = np.load(feat_path + ".npy")
save_path = osp.join(args.save_dir, spec.spec_str)
os.makedirs(save_path, exist_ok=True)
d = feats.shape[-1]
x = feats
if spec.pca > 0:
print("Computing PCA")
pca = faiss.PCAMatrix(d, spec.pca)
pca.train(x)
d = spec.pca
b = faiss.vector_to_array(pca.b)
A = faiss.vector_to_array(pca.A).reshape(pca.d_out, pca.d_in)
np.save(osp.join(save_path, "pca_A"), A.T)
np.save(osp.join(save_path, "pca_b"), b)
print("Applying PCA")
x = pca.apply_py(x)
if spec.norm:
reload = spec.pca <= 0
print("Normalizing")
faiss.normalize_L2(x)
print("Computing kmeans")
kmeans = faiss.Kmeans(
d,
spec.n_clus,
niter=50,
verbose=True,
spherical=spec.sphere,
max_points_per_centroid=feats.shape[0],
gpu=True,
nredo=3,
)
kmeans.train(x)
np.save(osp.join(save_path, "centroids"), kmeans.centroids)
del kmeans
del x
gc.collect()
if __name__ == "__main__":
main()
| 4,160 | 26.019481 | 86 | py |
espnet | espnet-master/egs2/TEMPLATE/asr1/pyscripts/feats/merge_clusters.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
# Dongji Gao (2022)
#
# Adapted from fairseq/examples/wav2vec/unsupervised/\
# scripts/merge_clusters.py
# to fit the scp data format
# This source code is licensed under the MIT license in
# https://github.com/facebookresearch/fairseq
import argparse
import os
import os.path as osp
import random
from shutil import copyfile
import numpy as np
import torch
import tqdm
from npy_append_array import NpyAppendArray
def get_parser():
parser = argparse.ArgumentParser(
description="transforms features via a given pca and stored them in target dir"
)
parser.add_argument("source", help="directory with features")
parser.add_argument("--split", help="which split to read", required=True)
parser.add_argument("--save_dir", help="where to save the output", required=True)
parser.add_argument("--cluster_dir", help="where the clusters are")
parser.add_argument(
"--pooling",
type=str,
default="mean",
choices=["mean", "sample"],
help="how to pool",
)
return parser
def main():
parser = get_parser()
args = parser.parse_args()
source_path = osp.join(args.source, args.split)
cluster_path = osp.join(args.cluster_dir, args.split + ".cluster")
print(f"data path: {source_path}")
features = np.load(source_path + ".npy", mmap_mode="r")
sizes = []
offsets = []
offset = 0
with open(source_path + ".lengths", "r") as len_f:
for line in len_f.readlines():
utt_id, length = line.split()
length = int(length)
sizes.append(length)
offsets.append(offset)
offset += length
clusters = []
with open(cluster_path, "r") as cf:
for line in cf.readlines():
line_list = line.split()
items = line_list[1:]
items = list(map(int, items))
clusters.append(items)
os.makedirs(args.save_dir, exist_ok=True)
save_path = osp.join(args.save_dir, args.split)
if osp.exists(save_path + ".npy"):
os.remove(save_path + ".npy")
npaa = NpyAppendArray(save_path + ".npy")
def merge(feats, clust):
feats = torch.from_numpy(feats.copy())
clust = torch.LongTensor(clust)
_, counts = clust.unique_consecutive(return_counts=True)
curr = 0
merged = []
for c in counts:
c = c.item()
start = curr
end = curr + c
curr += c
if args.pooling == "mean":
new_x = feats[start:end].mean(dim=0)
elif args.pooling == "sample":
new_x = feats[start + int(random.random() * c)]
else:
raise NotImplementedError()
merged.append(new_x)
return torch.stack(merged, dim=0).numpy()
with open(save_path + ".lengths_pure", "w") as l_f:
for size, offset, clust in tqdm.tqdm(
zip(sizes, offsets, clusters), total=len(sizes)
):
end = size + offset
feats = features[offset:end]
feats = merge(feats, clust)
print(len(feats), file=l_f)
npaa.append(feats)
if __name__ == "__main__":
main()
| 3,325 | 28.433628 | 87 | py |
espnet | espnet-master/egs2/TEMPLATE/asr1/pyscripts/feats/ssl_feature_utils.py | import json
import logging
import os
import re
import sys
from typing import Optional, Union
import numpy as np
import soundfile as sf
import torch
import torchaudio
from espnet2.asr.frontend.s3prl import S3prlFrontend
from espnet.utils.cli_readers import file_reader_helper
from espnet.utils.cli_utils import is_scipy_wav_style
from espnet.utils.cli_writers import file_writer_helper
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("s3prl_feature_loader")
def format_feature_conf_str(feature_conf: str):
# 1. removing any extraneous white spaces
feature_conf = re.sub(r"\s", "", feature_conf)
# Surrounding any word/path with "
feature_conf = re.sub(r"([\w\.\-/]+)", r'"\1"', feature_conf)
# Replacing = with :
feature_conf = re.sub(r"=", ": ", feature_conf)
try:
feature_conf = json.loads(feature_conf)
except Exception as e:
logger.warning(f"Failure in parsing feature_conf {feature_conf}")
raise e
return feature_conf
def dump_feature(
reader, in_filetype, rspecifier, out_filetype, wspecifier, write_num_frames=None
):
with file_writer_helper(
wspecifier,
filetype=out_filetype,
write_num_frames=write_num_frames,
) as writer:
for utt, mat in file_reader_helper(rspecifier, in_filetype):
if is_scipy_wav_style(mat):
# If data is sound file, then got as Tuple[int, ndarray]
rate, mat = mat
mat = mat.astype(np.float64, order="C") / 32768.0
nsample = len(mat)
feat = reader.get_feats(mat, nsample).numpy()
writer[utt] = feat
logger.info("finished successfully")
class BaseFeatureReader(object):
def __init__(self):
raise NotImplementedError
def load_audio(self, path, ref_len=None):
wav, sr = sf.read(path)
assert sr == self.sample_rate, sr
if wav.ndim == 2:
wav = wav.mean(-1)
if ref_len is not None and abs(ref_len - len(wav)) > 160:
logging.warning(f"ref {ref_len} != read {len(wav)} ({path})")
return wav
def get_feats(self, data, ref_len=None):
raise NotImplementedError
class MfccFeatureReader(BaseFeatureReader):
def __init__(
self,
sample_rate=16000,
**kwargs, # placeholder for unused arguments
):
self.sample_rate = sample_rate
def get_feats(self, data, ref_len=None):
if isinstance(data, str):
x = self.load_audio(data, ref_len=ref_len)
elif isinstance(data, np.ndarray):
x = data
else:
raise TypeError(f"Unexpected data type of argument 1: {type(data)}.")
with torch.no_grad():
x = torch.from_numpy(x).view(1, -1).float()
mfcc = torchaudio.compliance.kaldi.mfcc(
waveform=x,
sample_frequency=self.sample_rate,
use_energy=False,
).transpose(
0, 1
) # (freq, time)
delta = torchaudio.functional.compute_deltas(mfcc)
ddelta = torchaudio.functional.compute_deltas(delta)
concat = (
torch.cat([mfcc, delta, ddelta], dim=0).transpose(0, 1).contiguous()
)
return concat
class HubertFeatureReader(BaseFeatureReader):
def __init__(
self,
hubert_url,
hubert_dir_path,
layer,
sample_rate=16000,
max_chunk=1600000,
use_gpu=True,
):
self.sample_rate = sample_rate
self.device = "cuda" if use_gpu and torch.cuda.is_available() else "cpu"
from espnet2.asr.encoder.hubert_encoder import FairseqHubertEncoder
e = FairseqHubertEncoder(0, hubert_url, hubert_dir_path)
self.model = e.encoders.to(self.device).eval()
self.layer = layer
self.max_chunk = max_chunk
logger.info(f" max_chunk = {self.max_chunk}")
def get_feats(self, data, ref_len=None):
if isinstance(data, str):
x = self.load_audio(data, ref_len=ref_len)
elif isinstance(data, np.ndarray):
x = data
else:
raise TypeError(f"Unexpected data type of argument 1: {type(data)}.")
with torch.no_grad():
x = torch.from_numpy(x).float().to(self.device)
x = x.view(1, -1)
feat = []
for start in range(0, x.size(1), self.max_chunk):
x_chunk = x[:, start : start + self.max_chunk]
feat_chunk, _ = self.model.extract_features(
source=x_chunk,
padding_mask=None,
mask=False,
output_layer=self.layer,
)
feat.append(feat_chunk)
return torch.cat(feat, 1).squeeze(0).cpu()
class ESPnetHubertFeatureReader(BaseFeatureReader):
def __init__(
self,
hubert_model_path,
layer,
sample_rate=16000,
max_chunk=1600000,
use_gpu=True,
):
self.sample_rate = sample_rate
self.device = "cuda" if use_gpu and torch.cuda.is_available() else "cpu"
from espnet2.tasks.hubert import HubertTask
hubert_model, hubert_train_args = HubertTask.build_model_from_file(
None,
hubert_model_path,
self.device,
)
self.model = hubert_model.encoder.hubert_pretrain_model.eval()
self.layer = layer
self.max_chunk = max_chunk
logger.info(f" max_chunk = {self.max_chunk}")
def get_feats(self, data, ref_len=None):
if isinstance(data, str):
x = self.load_audio(data, ref_len=ref_len)
elif isinstance(data, np.ndarray):
x = data
else:
raise TypeError(f"Unexpected data type of argument 1: {type(data)}.")
with torch.inference_mode():
x = torch.from_numpy(x).float().to(self.device)
x = x.view(1, -1)
feat = self.model.wav2vec2.extract_features(
x,
num_layers=self.layer,
)[
0
][-1][
0
] # (time, feat_dim)
return feat.cpu()
class S3PRLFeatureReader(BaseFeatureReader):
def __init__(
self,
fs: Union[int, str] = 16000,
s3prl_conf: Optional[dict] = None,
download_dir: str = None,
multilayer_feature: bool = False,
layer: int = -1,
use_gpu: bool = True,
):
self.model = S3prlFrontend(
fs=fs,
frontend_conf=s3prl_conf,
download_dir=download_dir,
multilayer_feature=multilayer_feature,
layer=layer,
)
self.device = "cuda" if use_gpu and torch.cuda.is_available() else "cpu"
self.model = self.model.to(self.device)
def get_feats(self, data: Union[str, np.ndarray], ref_len=None):
if isinstance(data, str):
x = self.load_audio(data, ref_len=ref_len)
elif isinstance(data, np.ndarray):
x = data
else:
raise TypeError(f"Unexpected data type of argument 1: {type(data)}.")
with torch.no_grad():
x = torch.from_numpy(x).float().to(self.device)
x = x.view(1, -1)
feat, _ = self.model(x, torch.LongTensor([ref_len]))
return feat.squeeze(0).cpu()
| 7,609 | 30.97479 | 84 | py |
espnet | espnet-master/egs2/TEMPLATE/asr1/pyscripts/feats/dump_km_label.py | # The learn_kmeans.py uses code from Fairseq:
# https://github.com/pytorch/fairseq/blob/master/examples/hubert/simple_kmeans/dump_km_label.py
#
# Thanks to Abdelrahman Mohamed and Wei-Ning Hsu's help in this implementation,
# Their origial Hubert work is in:
# Paper: https://arxiv.org/pdf/2106.07447.pdf
# Code in Fairseq: https://github.com/pytorch/fairseq/tree/master/examples/hubert
import argparse
import logging
import os
import sys
import joblib
import numpy as np
import torch
from ssl_feature_utils import (
ESPnetHubertFeatureReader,
HubertFeatureReader,
MfccFeatureReader,
S3PRLFeatureReader,
format_feature_conf_str,
)
from espnet2.utils.types import str2bool
from espnet.utils.cli_readers import file_reader_helper
from espnet.utils.cli_utils import is_scipy_wav_style
from espnet.utils.cli_writers import file_writer_helper
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("dump_km_label")
feature_reader_choice = dict(
mfcc=MfccFeatureReader,
fairseq_hubert=HubertFeatureReader,
espnet_hubert=ESPnetHubertFeatureReader,
s3prl=S3PRLFeatureReader,
)
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--km_path", type=str, required=True)
parser.add_argument("--use_gpu", type=str2bool, default=False)
parser.add_argument("--online_feature_extract", type=str2bool, default=False)
parser.add_argument("--feature_conf", type=str, default=None)
parser.add_argument(
"--in_filetype",
type=str,
default="sound",
choices=["mat", "hdf5", "sound.hdf5", "sound"],
help="Specify the file format for the rspecifier. "
'"mat" is the matrix format in kaldi',
)
parser.add_argument(
"--out_filetype",
type=str,
default="mat",
choices=["mat", "hdf5", "sound.hdf5", "sound"],
help="Specify the file format for the rspecifier. "
'"mat" is the matrix format in kaldi',
)
parser.add_argument(
"rspecifier", type=str, help="Read specifier for feats. e.g. ark:some.ark"
)
parser.add_argument(
"wspecifier", type=str, help="Write specifier for labels. e.g. ark,t:some.txt"
)
return parser
class ApplyKmeans(object):
def __init__(self, km_path, use_gpu):
self.km_model = joblib.load(km_path)
self.C_np = self.km_model.cluster_centers_.transpose()
self.Cnorm_np = (self.C_np**2).sum(0, keepdims=True)
self.C = torch.from_numpy(self.C_np)
self.Cnorm = torch.from_numpy(self.Cnorm_np)
if use_gpu and torch.cuda.is_available():
self.C = self.C.cuda()
self.Cnorm = self.Cnorm.cuda()
def __call__(self, x):
if isinstance(x, torch.Tensor):
x = x.to(self.C.device)
dist = (
x.pow(2).sum(1, keepdim=True) - 2 * torch.matmul(x, self.C) + self.Cnorm
)
return dist.argmin(dim=1).cpu().numpy()
else:
dist = (
(x**2).sum(1, keepdims=True)
- 2 * np.matmul(x, self.C_np)
+ self.Cnorm_np
)
return np.argmin(dist, axis=1)
def dump_label(
rspecifier,
in_filetype,
wspecifier,
out_filetype,
km_path,
use_gpu,
online_feature_extract,
**kwargs
):
if online_feature_extract:
assert "feature_conf" in kwargs
# need to wrap arguments with double-quotes for json string
feature_conf = format_feature_conf_str(kwargs["feature_conf"])
else:
feature_conf = None
apply_kmeans = ApplyKmeans(km_path, use_gpu=use_gpu)
if not online_feature_extract:
# dumped ssl feature in kaldi ark format
with file_writer_helper(
wspecifier,
filetype=out_filetype,
) as writer:
for utt, feat in file_reader_helper(rspecifier, in_filetype):
lab = apply_kmeans(feat)
writer[utt] = lab
else:
assert feature_conf["type"] in feature_reader_choice
reader_class = feature_reader_choice[feature_conf["type"]]
reader_conf = feature_conf.get("conf", dict())
if reader_conf.get("multilayer_feature", None):
reader_conf["multilayer_feature"] = str2bool(
reader_conf["multilayer_feature"]
)
if reader_conf.get("layer", None):
reader_conf["layer"] = int(reader_conf["layer"])
reader = reader_class(**reader_conf)
with file_writer_helper(
wspecifier,
filetype=out_filetype,
) as writer:
for utt, mat in file_reader_helper(rspecifier, in_filetype):
if is_scipy_wav_style(mat):
# If data is sound file, then got as Tuple[int, ndarray]
rate, mat = mat
mat = mat.astype(np.float64, order="C") / 32768.0
nsample = len(mat)
feat = reader.get_feats(mat, nsample).numpy()
lab = apply_kmeans(feat)
writer[utt] = lab
logger.info("finished successfully")
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
logging.info(str(args))
dump_label(**vars(args))
| 5,471 | 30.448276 | 99 | py |
espnet | espnet-master/egs2/TEMPLATE/asr1/pyscripts/feats/mean_pool_scp.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
# Dongji Gao (2022)
#
# Adapted from fairseq/examples/wav2vec/unsupervised/\
# scripts/mean_pool.py
# to fit the scp data format
# This source code is licensed under the MIT license in
# https://github.com/facebookresearch/fairseq
import argparse
import math
import os
import os.path as osp
from shutil import copyfile
import numpy as np
import torch
import torch.nn.functional as F
import tqdm
from npy_append_array import NpyAppendArray
def get_parser():
parser = argparse.ArgumentParser(description="mean pooling of the representation")
parser.add_argument("source", help="directory with features")
parser.add_argument("--split", help="which split to read", required=True)
parser.add_argument("--root", help="root of espnet", required=True)
parser.add_argument("--save_dir", help="where to save the output", required=True)
parser.add_argument(
"--subsample_rate", type=float, default=0.5, help="size to subsample data to"
)
parser.add_argument("--utt_id", type=str)
parser.add_argument(
"--remove_extra",
action="store_true",
help="if true, removes extra states, otherwise pads with 0s",
)
return parser
def main():
parser = get_parser()
args = parser.parse_args()
source_path = osp.join(args.source, args.split)
print(f"data path: {source_path}")
features = np.load(source_path + ".npy", mmap_mode="r")
os.makedirs(args.save_dir, exist_ok=True)
save_path = osp.join(args.save_dir, args.split)
if osp.exists(save_path + ".npy"):
os.remove(save_path + ".npy")
utt_ids = []
lengths = []
with open(source_path + ".lengths", "r") as lf:
for line in lf.readlines():
utt_id, length = line.split()
utt_ids.append(utt_id)
lengths.append(length)
fsz = features.shape[-1]
start = 0
output_dir = f"{save_path}/"
os.makedirs(output_dir, exist_ok=True)
scp_file = f"{save_path}/feats.scp"
prefix = args.root
with torch.no_grad():
with open(save_path + ".lengths", "w") as lengths_out, open(
scp_file, "w"
) as sf:
for length, utt_id in tqdm.tqdm(zip(lengths, utt_ids)):
utt_id = utt_id.rstrip()
length = int(length)
end = start + length
feats = features[start:end]
start += length
x = torch.from_numpy(feats).cuda()
target_num = math.ceil(length * args.subsample_rate)
rem = length % target_num
if rem > 0:
if args.remove_extra:
to_rem = target_num - rem
target_num -= 1
x = x[:-to_rem]
else:
to_add = target_num - rem
x = F.pad(x, [0, 0, 0, to_add])
x[-to_add:] = x[-to_add - 1]
x = x.view(target_num, -1, fsz)
x = x.mean(dim=-2)
print(target_num, file=lengths_out)
feat_file = f"{output_dir}/{utt_id}.npy"
np.save(feat_file, x.cpu().numpy())
sf.write(f"{utt_id} {feat_file}\n")
if __name__ == "__main__":
main()
| 3,406 | 30.841121 | 86 | py |
espnet | espnet-master/egs2/TEMPLATE/asr1/pyscripts/feats/apply_pca.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Adapted from fairseq/examples/wav2vec/unsupervised/scripts/apply_pca.py
# to fit the scp data format
# This source code is licensed under the MIT license in
# https://github.com/facebookresearch/fairseq
import argparse
import math
import os
import os.path as osp
from shutil import copyfile
import numpy as np
import torch
import tqdm
from npy_append_array import NpyAppendArray
def get_parser():
parser = argparse.ArgumentParser(
description="transforms features via a given pca and stored them in target dir"
)
parser.add_argument("feats_scp", help="features scp file")
parser.add_argument("--split", help="which split to read", required=True)
parser.add_argument("--save_dir", help="where to save the output", required=True)
parser.add_argument(
"--pca_path",
type=str,
help="pca location. will append _A.npy and _b.npy",
required=True,
)
parser.add_argument("--batch_size", type=int, default=2048000, help="batch size")
return parser
def main():
parser = get_parser()
args = parser.parse_args()
print(f"data path: {args.feats_scp}")
os.makedirs(args.save_dir, exist_ok=True)
x = []
length_file = f"{args.save_dir}/{args.split}.lengths"
with open(args.feats_scp, "r") as f_scp, open(length_file, "w") as lf:
for line in f_scp.readlines():
utt_id, feats_path = line.split()
feats = np.load(feats_path)
x.append(feats)
lf.write(f"{utt_id} {feats.shape[0]}\n")
features = np.vstack(x)
pca_A = torch.from_numpy(np.load(args.pca_path + "_A.npy")).cuda()
pca_b = torch.from_numpy(np.load(args.pca_path + "_b.npy")).cuda()
save_path = osp.join(args.save_dir, args.split)
npaa = NpyAppendArray(save_path + ".npy")
batches = math.ceil(features.shape[0] / args.batch_size)
with torch.no_grad():
for b in tqdm.trange(batches):
start = b * args.batch_size
end = start + args.batch_size
x = torch.from_numpy(features[start:end]).cuda()
x = torch.matmul(x, pca_A) + pca_b
npaa.append(x.cpu().numpy())
if __name__ == "__main__":
main()
| 2,291 | 28.384615 | 87 | py |
espnet | espnet-master/egs2/TEMPLATE/asr1/pyscripts/feats/feats_apply_cluster_faiss.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
# Dongji Gao (2022)
#
# Adapted from fairseq/examples/wav2vec/unsupervised/\
# scripts/wav2vec_apply_cluster_faiss.py
# to fit the scp data format
# This source code is licensed under the MIT license in
# https://github.com/facebookresearch/fairseq
import argparse
import logging
import os
import os.path as osp
import sys
import faiss
import numpy as np
import torch
import torch.nn.functional as F
import tqdm
from feats_cluster_faiss import parse_faiss_specs
def get_parser():
parser = argparse.ArgumentParser(description="apply clusters")
parser.add_argument("data", help="location of feature scp files")
parser.add_argument("--split", help="split to process", required=True)
parser.add_argument("--labels", help="split to process", default="phn")
parser.add_argument("--output_path", help="output_path", required=True)
parser.add_argument("--model_path", help="model_path", required=True)
parser.add_argument(
"--layer",
"-l",
type=int,
help="which layer to read",
default=14,
)
parser.add_argument(
"--max_tsz",
type=int,
help="batch kmeans up to this much",
default=14,
)
parser.add_argument(
"--delimiter",
type=str,
help="delimiter for output cluster list",
default=" ",
)
parser.add_argument(
"--faiss_specs",
"-f",
type=str,
help="faiss index specs; separated by space "
"format is: PCAx_NORM_CLUSx_SPHERICAL -> "
"PCAx if exists first apply PCA "
"NORM if exists, normalize the vector by L2 norm "
"CLUSx must exist, cluster to x clusters "
"SPEHRICAL if exists, apply spherical kmeans",
default="l2",
)
return parser
def get_iterator(args):
label_path = osp.join(args.data, f"{args.split}.{args.labels}")
if osp.exists(label_path):
lp = open(label_path, "r")
else:
lp = None
with open(args.data, "r") as fp:
lines = fp.read().split("\n")
files = [line.rstrip() for line in lines if len(line) > 0]
if lp is not None:
lbls = [line.rstrip() for line in lp]
else:
lbls = [None] * len(files)
num = len(files)
def iterate():
for scp_line, lbl in zip(files, lbls):
fname, feats_path = scp_line.split()
feats = np.load(feats_path)
yield feats, fname, lbl
return iterate, num
def main():
parser = get_parser()
args = parser.parse_args()
try:
faiss_spec = parse_faiss_specs(args.faiss_specs.rstrip("/"))[0]
except Exception:
print(spec)
raise
print("Faiss Spec:", faiss_spec, file=sys.stderr)
if faiss_spec.pca:
A = torch.from_numpy(np.load(osp.join(args.model_path, "pca_A.npy"))).cuda()
b = torch.from_numpy(np.load(osp.join(args.model_path, "pca_b.npy"))).cuda()
print("Loaded PCA", file=sys.stderr)
centroids = np.load(osp.join(args.model_path, "centroids.npy"))
print("Loaded centroids", centroids.shape, file=sys.stderr)
res = faiss.StandardGpuResources()
index_flat = (
faiss.IndexFlatL2(centroids.shape[1])
if not faiss_spec.sphere
else faiss.IndexFlatIP(centroids.shape[1])
)
faiss_index = faiss.index_cpu_to_gpu(res, 0, index_flat)
faiss_index.add(centroids)
generator, num = get_iterator(args)
iterator = generator()
if not osp.exists(args.output_path):
os.makedirs(args.output_path)
had_labels = False
label_path = osp.join(args.output_path, f"{args.split}.{args.labels}")
output_cluster = f"{args.output_path}/{args.split}.cluster"
with torch.no_grad():
with open(output_cluster, "w") as oc, open(label_path, "w") as lp:
for f, fname, lbl in tqdm.tqdm(iterator, total=num):
if faiss_spec.pca:
f = torch.mm(f, A) + b
if faiss_spec.norm:
f = F.normalize(f, p=2, dim=-1)
_, z = faiss_index.search(f, 1)
cluster = f"{args.delimiter}".join(str(x.item()) for x in z)
oc.write(f"{fname} {cluster}\n")
if lbl is not None:
print(lbl, file=lp)
had_labels = True
if not had_labels:
os.remove(label_path)
if __name__ == "__main__":
main()
| 4,575 | 28.522581 | 84 | py |
espnet | espnet-master/egs2/TEMPLATE/asr1/pyscripts/k2/compile_hlg.py | #!/usr/bin/env python3
# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang)
# 2022 Johns Hopkins University (author: Dongji Gao)
#
# This script is adapted from \
# k2-fsa/icefall/blob/master/egs/librispeech/ASR/local/compile_hlg.py
#
# See https://github.com/k2-fsa/icefall/blob/master/LICENSE
# for clarification regarding multiple authors
"""
This script takes as input lang_dir and generates HLG from
- H, the ctc topology, built from tokens contained in lang_dir/lexicon.txt
(Dongji: Add a H topology with no blank symbol for UASR decoding)
- L, the lexicon, built from lang_dir/L_disambig.pt
Caution: We use a lexicon that contains disambiguation symbols
- G, the LM, built from data/lm/G_3_gram.fst.txt
The generated HLG is saved in $lang_dir/HLG.pt
"""
import argparse
import logging
from pathlib import Path
import k2
import torch
from icefall.lexicon import Lexicon
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--lang_dir",
type=str,
help="""Lang directory.
""",
)
parser.add_argument(
"--graph_dir",
type=str,
help="""Graph directory.
""",
)
parser.add_argument(
"--ngram_num",
type=int,
default=4,
help="""Max order in language model.
""",
)
return parser.parse_args()
def make_h_no_blank(max_token_id, self_loop_penalty=-3):
num_states = max_token_id + 1
final_state = num_states
arcs = ""
for i in range(num_states):
for j in range(1, num_states):
if i == j:
arcs += f"{i} {i} {i} 0 {self_loop_penalty}\n"
else:
arcs += f"{i} {j} {j} {j} 0.0\n"
arcs += f"{i} {final_state} -1 -1 0.0\n"
arcs += f"{final_state}"
ans = k2.Fsa.from_str(arcs, num_aux_labels=1)
ans = k2.arc_sort(ans)
return ans
def compile_HLG(lang_dir: str, graph_dir: str, ngram_num: int) -> k2.Fsa:
"""
Args:
lang_dir:
The language directory contains lexicon fst
graph_dir:
The graph and output directory contains grammar fst
ngram_num:
Max order of n-gram language model
Return:
An FSA representing HLG.
"""
lexicon = Lexicon(lang_dir)
max_token_id = max(lexicon.tokens)
logging.info(f"Building ctc_topo. max_token_id: {max_token_id}")
H = make_h_no_blank(max_token_id)
L = k2.Fsa.from_dict(torch.load(f"{lang_dir}/L_disambig.pt"))
logging.info(f"Loading G_{ngram_num}_gram.fst.txt")
with open(f"{graph_dir}/G_{ngram_num}_gram.fst.txt") as f:
G = k2.Fsa.from_openfst(f.read(), acceptor=False)
torch.save(G.as_dict(), f"{graph_dir}/G_{ngram_num}_gram.pt")
first_token_disambig_id = lexicon.token_table["#0"]
first_word_disambig_id = lexicon.word_table["#0"]
L = k2.arc_sort(L)
G = k2.arc_sort(G)
logging.info("Intersecting L and G")
LG = k2.compose(L, G)
logging.info(f"LG shape: {LG.shape}")
logging.info("Connecting LG")
LG = k2.connect(LG)
logging.info(f"LG shape after k2.connect: {LG.shape}")
logging.info(type(LG.aux_labels))
logging.info("Determinizing LG")
LG = k2.determinize(LG)
logging.info(type(LG.aux_labels))
logging.info("Connecting LG after k2.determinize")
LG = k2.connect(LG)
logging.info("Removing disambiguation symbols on LG")
LG.labels[LG.labels >= first_token_disambig_id] = 0
# See https://github.com/k2-fsa/k2/issues/874
# for why we need to set LG.properties to None
LG.__dict__["_properties"] = None
assert isinstance(LG.aux_labels, k2.RaggedTensor)
LG.aux_labels.values[LG.aux_labels.values >= first_word_disambig_id] = 0
LG = k2.remove_epsilon(LG)
logging.info(f"LG shape after k2.remove_epsilon: {LG.shape}")
LG = k2.connect(LG)
LG.aux_labels = LG.aux_labels.remove_values_eq(0)
logging.info("Arc sorting LG")
LG = k2.arc_sort(LG)
logging.info("Composing H and LG")
# CAUTION: The name of the inner_labels is fixed
# to `tokens`. If you want to change it, please
# also change other places in icefall that are using
# it.
HLG = k2.compose(H, LG, inner_labels="tokens")
logging.info("Connecting HLG")
HLG = k2.connect(HLG)
logging.info("Arc sorting HLG")
HLG = k2.arc_sort(HLG)
logging.info(f"HLG.shape: {HLG.shape}")
return HLG
def main():
args = get_args()
lang_dir = Path(args.lang_dir)
graph_dir = Path(args.graph_dir)
logging.info(f"Processing {lang_dir} and {graph_dir}")
HLG = compile_HLG(lang_dir, graph_dir, args.ngram_num)
logging.info(f"Saving HLG.pt to {graph_dir}")
torch.save(HLG.as_dict(), f"{graph_dir}/HLG.pt")
if __name__ == "__main__":
formatter = "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
logging.basicConfig(format=formatter, level=logging.INFO)
main()
| 4,986 | 26.860335 | 81 | py |
espnet | espnet-master/egs2/TEMPLATE/asr1/pyscripts/k2/prepare_lang.py | #!/usrilbin/env python3
# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang)
# 2022 Johns Hopkins University (author: Dongji Gao)
#
# This script is adapted from \
# k2-fsa/icefall/blob/master/egs/librispeech/ASR/local/prepare_lang.py
#
# See https://github.com/k2-fsa/icefall/blob/master/LICENSE
# for clarification regarding multiple authors
"""
This script takes as input a lexicon file "data/lang_phone/lexicon.txt"
consisting of words and tokens (i.e., phones) and does the following:
1. Add disambiguation symbols to the lexicon and generate lexicon_disambig.txt
2. Generate tokens.txt, the token table mapping a token to a unique integer.
(Dongji: Add option to use existing tokens.txt (--token_list YOUR_TOKEN_LIST))
3. Generate words.txt, the word table mapping a word to a unique integer.
4. Generate L.pt, in k2 format. It can be loaded by
d = torch.load("L.pt")
lexicon = k2.Fsa.from_dict(d)
5. Generate L_disambig.pt, in k2 format.
"""
import argparse
import math
from collections import defaultdict
from pathlib import Path
from typing import Any, Dict, List, Tuple
import k2
import torch
from icefall.lexicon import read_lexicon, write_lexicon
from espnet2.utils.types import str2bool
Lexicon = List[Tuple[str, List[str]]]
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--lang_dir",
type=str,
help="""Input and output directory.
It should contain a file lexicon.txt.
Generated files by this script are saved into this directory.
""",
)
parser.add_argument(
"--debug",
type=str2bool,
default=False,
help="""True for debugging, which will generate
a visualization of the lexicon FST.
Caution: If your lexicon contains hundreds of thousands
of lines, please set it to False!
""",
)
parser.add_argument(
"--token_list",
type=str,
default="",
)
parser.add_argument(
"--sil_token",
type=str,
default="sil",
)
parser.add_argument(
"--sil_prob",
type=float,
default=0.5,
)
return parser.parse_args()
def write_mapping(filename: str, sym2id: Dict[str, int]) -> None:
"""Write a symbol to ID mapping to a file.
Note:
No need to implement `read_mapping` as it can be done
through :func:`k2.SymbolTable.from_file`.
Args:
filename:
Filename to save the mapping.
sym2id:
A dict mapping symbols to IDs.
Returns:
Return None.
"""
with open(filename, "w", encoding="utf-8") as f:
for sym, i in sym2id.items():
f.write(f"{sym} {i}\n")
def get_tokens(lexicon: Lexicon) -> List[str]:
"""Get tokens from a lexicon.
Args:
lexicon:
It is the return value of :func:`read_lexicon`.
Returns:
Return a list of unique tokens.
"""
ans = set()
for _, tokens in lexicon:
ans.update(tokens)
sorted_ans = sorted(list(ans))
return sorted_ans
def get_words(lexicon: Lexicon) -> List[str]:
"""Get words from a lexicon.
Args:
lexicon:
It is the return value of :func:`read_lexicon`.
Returns:
Return a list of unique words.
"""
ans = set()
for word, _ in lexicon:
ans.add(word)
sorted_ans = sorted(list(ans))
return sorted_ans
def add_disambig_symbols(lexicon: Lexicon) -> Tuple[Lexicon, int]:
"""It adds pseudo-token disambiguation symbols #1, #2 and so on
at the ends of tokens to ensure that all pronunciations are different,
and that none is a prefix of another.
See also add_lex_disambig.pl from kaldi.
Args:
lexicon:
It is returned by :func:`read_lexicon`.
Returns:
Return a tuple with two elements:
- The output lexicon with disambiguation symbols
- The ID of the max disambiguation symbol that appears
in the lexicon
"""
# (1) Work out the count of each token-sequence in the
# lexicon.
count = defaultdict(int)
for _, tokens in lexicon:
count[" ".join(tokens)] += 1
# (2) For each left sub-sequence of each token-sequence, note down
# that it exists (for identifying prefixes of longer strings).
issubseq = defaultdict(int)
for _, tokens in lexicon:
tokens = tokens.copy()
tokens.pop()
while tokens:
issubseq[" ".join(tokens)] = 1
tokens.pop()
# (3) For each entry in the lexicon:
# if the token sequence is unique and is not a
# prefix of another word, no disambig symbol.
# Else output #1, or #2, #3, ... if the same token-seq
# has already been assigned a disambig symbol.
ans = []
# We start with #1 since #0 has its own purpose
first_allowed_disambig = 1
max_disambig = first_allowed_disambig - 1
last_used_disambig_symbol_of = defaultdict(int)
for word, tokens in lexicon:
tokenseq = " ".join(tokens)
assert tokenseq != ""
if issubseq[tokenseq] == 0 and count[tokenseq] == 1:
ans.append((word, tokens))
continue
cur_disambig = last_used_disambig_symbol_of[tokenseq]
if cur_disambig == 0:
cur_disambig = first_allowed_disambig
else:
cur_disambig += 1
if cur_disambig > max_disambig:
max_disambig = cur_disambig
last_used_disambig_symbol_of[tokenseq] = cur_disambig
tokenseq += f" #{cur_disambig}"
ans.append((word, tokenseq.split()))
return ans, max_disambig
def generate_id_map(symbols: List[str]) -> Dict[str, int]:
"""Generate ID maps, i.e., map a symbol to a unique ID.
Args:
symbols:
A list of unique symbols.
Returns:
A dict containing the mapping between symbols and IDs.
"""
return {sym: i for i, sym in enumerate(symbols)}
def add_self_loops(
arcs: List[List[Any]], disambig_token: int, disambig_word: int
) -> List[List[Any]]:
"""Adds self-loops to states of an FST to propagate disambiguation symbols
through it. They are added on each state with non-epsilon output symbols
on at least one arc out of the state.
See also fstaddselfloops.pl from Kaldi. One difference is that
Kaldi uses OpenFst style FSTs and it has multiple final states.
This function uses k2 style FSTs and it does not need to add self-loops
to the final state.
The input label of a self-loop is `disambig_token`, while the output
label is `disambig_word`.
Args:
arcs:
A list-of-list. The sublist contains
`[src_state, dest_state, label, aux_label, score]`
disambig_token:
It is the token ID of the symbol `#0`.
disambig_word:
It is the word ID of the symbol `#0`.
Return:
Return new `arcs` containing self-loops.
"""
states_needs_self_loops = set()
for arc in arcs:
src, dst, ilabel, olabel, score = arc
if olabel != 0:
states_needs_self_loops.add(src)
ans = []
for s in states_needs_self_loops:
ans.append([s, s, disambig_token, disambig_word, 0])
return arcs + ans
def lexicon_to_fst_nosil(
lexicon: Lexicon,
token2id: Dict[str, int],
word2id: Dict[str, int],
need_self_loops: bool = False,
):
start_state = 0
next_state = 1
arcs = []
assert token2id["<eps>"] == 0
assert word2id["<eps>"] == 0
eps = 0
for word, tokens in lexicon:
assert len(tokens) > 0, f"{word} has no pronunciations"
cur_state = start_state
word = word2id[word]
tokens = [token2id[i] for i in tokens]
for i in range(len(tokens) - 1):
w = word if i == 0 else eps
arcs.append([cur_state, next_state, tokens[i], w, 0])
cur_state = next_state
next_state += 1
i = len(tokens) - 1
w = word if i == 0 else eps
arcs.append([cur_state, start_state, tokens[i], w, 0])
if need_self_loops:
disambig_token = token2id["#0"]
disambig_word = word2id["#0"]
arcs = add_self_loops(
arcs,
disambig_token=disambig_token,
disambig_word=disambig_word,
)
final_state = next_state
arcs.append([start_state, final_state, -1, -1, 0])
arcs.append([final_state])
arcs = sorted(arcs, key=lambda arcs: arcs[0])
arcs = [[str(i) for i in arc] for arc in arcs]
arcs = [" ".join(arc) for arc in arcs]
arcs = "\n".join(arcs)
fsa = k2.Fsa.from_str(arcs, acceptor=False)
return fsa
def lexicon_to_fst(
lexicon: Lexicon,
token2id: Dict[str, int],
word2id: Dict[str, int],
sil_token: str = "SIL",
sil_prob: float = 0.5,
need_self_loops: bool = False,
) -> k2.Fsa:
"""Convert a lexicon to an FST (in k2 format) with optional silence at
the beginning and end of each word.
Args:
lexicon:
The input lexicon. See also :func:`read_lexicon`
token2id:
A dict mapping tokens to IDs.
word2id:
A dict mapping words to IDs.
sil_token:
The silence token.
sil_prob:
The probability for adding a silence at the beginning and end
of the word.
need_self_loops:
If True, add self-loop to states with non-epsilon output symbols
on at least one arc out of the state. The input label for this
self loop is `token2id["#0"]` and the output label is `word2id["#0"]`.
Returns:
Return an instance of `k2.Fsa` representing the given lexicon.
"""
assert sil_prob > 0.0 and sil_prob < 1.0
# CAUTION: we use score, i.e, negative cost.
sil_score = math.log(sil_prob)
no_sil_score = math.log(1.0 - sil_prob)
start_state = 0
loop_state = 1 # words enter and leave from here
sil_state = 2 # words terminate here when followed by silence; this state
# has a silence transition to loop_state.
next_state = 3 # the next un-allocated state, will be incremented as we go.
arcs = []
assert token2id["<eps>"] == 0
assert word2id["<eps>"] == 0
eps = 0
sil_token = token2id[sil_token]
arcs.append([start_state, loop_state, eps, eps, no_sil_score])
arcs.append([start_state, sil_state, eps, eps, sil_score])
arcs.append([sil_state, loop_state, sil_token, eps, 0])
for word, tokens in lexicon:
assert len(tokens) > 0, f"{word} has no pronunciations"
cur_state = loop_state
word = word2id[word]
tokens = [token2id[i] for i in tokens]
for i in range(len(tokens) - 1):
w = word if i == 0 else eps
arcs.append([cur_state, next_state, tokens[i], w, 0])
cur_state = next_state
next_state += 1
# now for the last token of this word
# It has two out-going arcs, one to the loop state,
# the other one to the sil_state.
i = len(tokens) - 1
w = word if i == 0 else eps
arcs.append([cur_state, loop_state, tokens[i], w, no_sil_score])
arcs.append([cur_state, sil_state, tokens[i], w, sil_score])
if need_self_loops:
disambig_token = token2id["#0"]
disambig_word = word2id["#0"]
arcs = add_self_loops(
arcs,
disambig_token=disambig_token,
disambig_word=disambig_word,
)
final_state = next_state
arcs.append([loop_state, final_state, -1, -1, 0])
arcs.append([final_state])
arcs = sorted(arcs, key=lambda arc: arc[0])
arcs = [[str(i) for i in arc] for arc in arcs]
arcs = [" ".join(arc) for arc in arcs]
arcs = "\n".join(arcs)
fsa = k2.Fsa.from_str(arcs, acceptor=False)
return fsa
def main():
args = get_args()
lang_dir = Path(args.lang_dir)
lexicon_filename = lang_dir / "lexicon.txt"
sil_token = args.sil_token
sil_prob = args.sil_prob
lexicon = read_lexicon(lexicon_filename)
if args.token_list is None:
tokens = get_tokens(lexicon)
else:
tokens = []
with open(args.token_list, "r") as tl:
for line in tl.readlines():
token = line.split()[0]
assert token not in tokens
tokens.append(token)
words = get_words(lexicon)
lexicon_disambig, max_disambig = add_disambig_symbols(lexicon)
for i in range(max_disambig + 1):
disambig = f"#{i}"
assert disambig not in tokens
tokens.append(f"#{i}")
if "<eps>" not in tokens:
tokens = ["<eps>"] + tokens
assert "<eps>" not in words
assert "#0" not in words
assert "<s>" not in words
assert "</s>" not in words
words = ["<eps>", "<unk>"] + words + ["#0", "<s>", "</s>"]
token2id = generate_id_map(tokens)
word2id = generate_id_map(words)
write_mapping(lang_dir / "tokens.txt", token2id)
write_mapping(lang_dir / "words.txt", word2id)
write_lexicon(lang_dir / "lexicon_disambig.txt", lexicon_disambig)
if sil_prob == 0:
L = lexicon_to_fst_nosil(
lexicon,
token2id=token2id,
word2id=word2id,
)
L_disambig = lexicon_to_fst_nosil(
lexicon_disambig,
token2id=token2id,
word2id=word2id,
need_self_loops=True,
)
else:
L = lexicon_to_fst(
lexicon,
token2id=token2id,
word2id=word2id,
sil_token=sil_token,
sil_prob=sil_prob,
)
L_disambig = lexicon_to_fst(
lexicon_disambig,
token2id=token2id,
word2id=word2id,
sil_token=sil_token,
sil_prob=sil_prob,
need_self_loops=True,
)
torch.save(L.as_dict(), lang_dir / "L.pt")
torch.save(L_disambig.as_dict(), lang_dir / "L_disambig.pt")
if args.debug:
labels_sym = k2.SymbolTable.from_file(lang_dir / "tokens.txt")
aux_labels_sym = k2.SymbolTable.from_file(lang_dir / "words.txt")
L.labels_sym = labels_sym
L.aux_labels_sym = aux_labels_sym
L.draw(f"{lang_dir / 'L.svg'}", title="L.pt")
L_disambig.labels_sym = labels_sym
L_disambig.aux_labels_sym = aux_labels_sym
L_disambig.draw(f"{lang_dir / 'L_disambig.svg'}", title="L_disambig.pt")
if __name__ == "__main__":
main()
| 14,465 | 28.704312 | 81 | py |
espnet | espnet-master/egs2/TEMPLATE/asr1/pyscripts/utils/evaluate_cfsd.py | #!/usr/bin/env python3
# Copyright 2020 Wen-Chin Huang and Tomoki Hayashi
# Copyright 2023 Dan Lim
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Evaluate Conditional Frechet Speech Distance
between generated and groundtruth audios
using the s3prl pretrained models."""
import argparse
import fnmatch
import logging
import os
from typing import List
import librosa
import numpy as np
import soundfile as sf
import torch
from scipy import linalg
from espnet2.asr.frontend.s3prl import S3prlFrontend
# from https://github.com/bioinf-jku/TTUR
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of the pool_3 layer of the
inception net ( like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations of the pool_3 layer, precalcualted
on an representive data set.
-- sigma1: The covariance matrix over activations of the pool_3 layer for
generated samples.
-- sigma2: The covariance matrix over activations of the pool_3 layer,
precalcualted on an representive data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert (
mu1.shape == mu2.shape
), "Training and test mean vectors have different lengths"
assert (
sigma1.shape == sigma2.shape
), "Training and test covariances have different dimensions"
diff = mu1 - mu2
# product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = (
"fid calculation produces singular product;"
"adding %s to diagonal of cov estimates" % eps
)
warnings.warn(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError("Imaginary component {}".format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean
def find_files(
root_dir: str, query: List[str] = ["*.flac", "*.wav"], include_root_dir: bool = True
) -> List[str]:
"""Find files recursively.
Args:
root_dir (str): Root root_dir to find.
query (List[str]): Query to find.
include_root_dir (bool): If False, root_dir name is not included.
Returns:
List[str]: List of found filenames.
"""
files = []
for root, dirnames, filenames in os.walk(root_dir, followlinks=True):
for q in query:
for filename in fnmatch.filter(filenames, q):
files.append(os.path.join(root, filename))
if not include_root_dir:
files = [file_.replace(root_dir + "/", "") for file_ in files]
return files
def _get_basename(path: str) -> str:
return os.path.splitext(os.path.split(path)[-1])[0]
def get_parser() -> argparse.Namespace:
"""Get argument parser."""
parser = argparse.ArgumentParser(
description="Evaluate Conditional Frechet Speech Distance."
)
parser.add_argument(
"gen_wavdir_or_wavscp",
type=str,
help="Path of directory or wav.scp for generated waveforms.",
)
parser.add_argument(
"gt_wavdir_or_wavscp",
type=str,
help="Path of directory or wav.scp for ground truth waveforms.",
)
parser.add_argument(
"--outdir",
type=str,
help="Path of directory to write the results.",
)
# analysis related
parser.add_argument(
"--pretrained_model",
default="wav2vec2",
type=str,
help="S3prl pretrained upstream model.",
)
parser.add_argument("--device", type=str, default="cuda:0", help="Inference device")
parser.add_argument(
"--verbose",
default=1,
type=int,
help="Verbosity level. Higher is more logging.",
)
return parser
def main():
"""Run CFSD calculation."""
args = get_parser().parse_args()
# logging info
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# find files
if os.path.isdir(args.gen_wavdir_or_wavscp):
gen_files = sorted(find_files(args.gen_wavdir_or_wavscp))
else:
with open(args.gen_wavdir_or_wavscp) as f:
gen_files = [line.strip().split(None, 1)[1] for line in f.readlines()]
if gen_files[0].endswith("|"):
raise ValueError("Not supported wav.scp format.")
if os.path.isdir(args.gt_wavdir_or_wavscp):
gt_files = sorted(find_files(args.gt_wavdir_or_wavscp))
else:
with open(args.gt_wavdir_or_wavscp) as f:
gt_files = [line.strip().split(None, 1)[1] for line in f.readlines()]
if gt_files[0].endswith("|"):
raise ValueError("Not supported wav.scp format.")
if len(gen_files) == 0:
raise FileNotFoundError("Not found any generated audio files.")
if len(gen_files) > len(gt_files):
raise ValueError(
"#groundtruth files are less than #generated files "
f"(#gen={len(gen_files)} vs. #gt={len(gt_files)}). "
"Please check the groundtruth directory."
)
logging.info("The number of utterances = %d" % len(gen_files))
if torch.cuda.is_available() and ("cuda" in args.device):
device = args.device
else:
device = "cpu"
s3prl_frontend = S3prlFrontend(
download_dir="./hub",
frontend_conf={"upstream": args.pretrained_model},
)
s3prl_frontend.to(device)
# calculate CFSD
cfsd_dict = dict()
for i, gen_path in enumerate(gen_files):
corresponding_list = list(
filter(lambda gt_path: _get_basename(gt_path) in gen_path, gt_files)
)
assert len(corresponding_list) == 1
gt_path = corresponding_list[0]
gt_basename = _get_basename(gt_path)
# load wav file as float64
gen_x, gen_fs = sf.read(gen_path, dtype="float64")
gt_x, gt_fs = sf.read(gt_path, dtype="float64")
# NOTE: resample because s3prl models support only 16kHz audio currently.
gen_x = librosa.resample(gen_x, orig_sr=gen_fs, target_sr=16000)
gt_x = librosa.resample(gt_x, orig_sr=gt_fs, target_sr=16000)
# prepare input
gen_x = torch.FloatTensor(gen_x).unsqueeze(0).to(device)
gen_x_length = torch.LongTensor([gen_x.shape[1]]).to(device)
gt_x = torch.FloatTensor(gt_x).unsqueeze(0).to(device)
gt_x_length = torch.LongTensor([gt_x.shape[1]]).to(device)
# speech embedding
gen_embeds, gen_embeds_len = s3prl_frontend(gen_x, gen_x_length) # (B,H)
gt_embeds, gt_embeds_len = s3prl_frontend(gt_x, gt_x_length) # (B,H)
gen_embeds = gen_embeds.detach().cpu().numpy()[0]
gt_embeds = gt_embeds.detach().cpu().numpy()[0]
# speech distance
gen_mu = np.mean(gen_embeds, axis=0)
gt_mu = np.mean(gt_embeds, axis=0)
gen_sigma = np.cov(gen_embeds, rowvar=False)
gt_sigma = np.cov(gt_embeds, rowvar=False)
cfsd = calculate_frechet_distance(gen_mu, gen_sigma, gt_mu, gt_sigma)
logging.info(f"{gt_basename} {cfsd:.4f}")
cfsd_dict[gt_basename] = cfsd
# calculate statistics
mean_cfsd = np.mean(np.array([v for v in cfsd_dict.values()]))
std_cfsd = np.std(np.array([v for v in cfsd_dict.values()]))
logging.info(f"Average: {mean_cfsd:.4f} ± {std_cfsd:.4f}")
# write results
if args.outdir is None:
if os.path.isdir(args.gen_wavdir_or_wavscp):
args.outdir = args.gen_wavdir_or_wavscp
else:
args.outdir = os.path.dirname(args.gen_wavdir_or_wavscp)
os.makedirs(args.outdir, exist_ok=True)
with open(f"{args.outdir}/utt2cfsd", "w") as f:
for utt_id in sorted(cfsd_dict.keys()):
cfsd = cfsd_dict[utt_id]
f.write(f"{utt_id} {cfsd:.4f}\n")
with open(f"{args.outdir}/cfsd_avg_result.txt", "w") as f:
f.write(f"#utterances: {len(gen_files)}\n")
f.write(f"Average: {mean_cfsd:.4f} ± {std_cfsd:.4f}")
logging.info("Successfully finished CFSD evaluation.")
if __name__ == "__main__":
main()
| 9,455 | 32.892473 | 88 | py |
espnet | espnet-master/egs2/TEMPLATE/asr1/pyscripts/utils/evaluate_secs.py | #!/usr/bin/env python3
# Copyright 2020 Wen-Chin Huang and Tomoki Hayashi
# Copyright 2023 Dan Lim
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Evaluate Speaker Embedding Cosine Similarity
between generated and groundtruth audios using X-vector
of the speechbrain pretrained models"""
import argparse
import fnmatch
import logging
import os
from typing import List
import librosa
import numpy as np
import soundfile as sf
import torch
from scipy import spatial
from speechbrain.dataio.preprocess import AudioNormalizer
from speechbrain.pretrained import EncoderClassifier
class XVExtractor:
"""Extract X-vector from speechbrain pretrained models"""
def __init__(self, args, device):
self.device = device
self.audio_norm = AudioNormalizer()
self.model = EncoderClassifier.from_hparams(
source=args.pretrained_model, run_opts={"device": device}
)
def __call__(self, wav, in_sr):
wav = self.audio_norm(torch.from_numpy(wav), in_sr).to(self.device)
embeds = self.model.encode_batch(wav).detach().cpu().numpy()[0]
return embeds
def find_files(
root_dir: str, query: List[str] = ["*.flac", "*.wav"], include_root_dir: bool = True
) -> List[str]:
"""Find files recursively.
Args:
root_dir (str): Root root_dir to find.
query (List[str]): Query to find.
include_root_dir (bool): If False, root_dir name is not included.
Returns:
List[str]: List of found filenames.
"""
files = []
for root, dirnames, filenames in os.walk(root_dir, followlinks=True):
for q in query:
for filename in fnmatch.filter(filenames, q):
files.append(os.path.join(root, filename))
if not include_root_dir:
files = [file_.replace(root_dir + "/", "") for file_ in files]
return files
def _get_basename(path: str) -> str:
return os.path.splitext(os.path.split(path)[-1])[0]
def get_parser() -> argparse.Namespace:
"""Get argument parser."""
parser = argparse.ArgumentParser(
description="Evaluate Speaker Embedding Cosine Similarity."
)
parser.add_argument(
"gen_wavdir_or_wavscp",
type=str,
help="Path of directory or wav.scp for generated waveforms.",
)
parser.add_argument(
"gt_wavdir_or_wavscp",
type=str,
help="Path of directory or wav.scp for ground truth waveforms.",
)
parser.add_argument(
"--outdir",
type=str,
help="Path of directory to write the results.",
)
# analysis related
parser.add_argument(
"--pretrained_model",
default="speechbrain/spkrec-ecapa-voxceleb",
type=str,
help="Speechbrain pretrained model.",
)
parser.add_argument("--device", type=str, default="cuda:0", help="Inference device")
parser.add_argument(
"--verbose",
default=1,
type=int,
help="Verbosity level. Higher is more logging.",
)
return parser
def main():
"""Run SECS calculation."""
args = get_parser().parse_args()
# logging info
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# find files
if os.path.isdir(args.gen_wavdir_or_wavscp):
gen_files = sorted(find_files(args.gen_wavdir_or_wavscp))
else:
with open(args.gen_wavdir_or_wavscp) as f:
gen_files = [line.strip().split(None, 1)[1] for line in f.readlines()]
if gen_files[0].endswith("|"):
raise ValueError("Not supported wav.scp format.")
if os.path.isdir(args.gt_wavdir_or_wavscp):
gt_files = sorted(find_files(args.gt_wavdir_or_wavscp))
else:
with open(args.gt_wavdir_or_wavscp) as f:
gt_files = [line.strip().split(None, 1)[1] for line in f.readlines()]
if gt_files[0].endswith("|"):
raise ValueError("Not supported wav.scp format.")
if len(gen_files) == 0:
raise FileNotFoundError("Not found any generated audio files.")
if len(gen_files) > len(gt_files):
raise ValueError(
"#groundtruth files are less than #generated files "
f"(#gen={len(gen_files)} vs. #gt={len(gt_files)}). "
"Please check the groundtruth directory."
)
logging.info("The number of utterances = %d" % len(gen_files))
if torch.cuda.is_available() and ("cuda" in args.device):
device = args.device
else:
device = "cpu"
xv_extractor = XVExtractor(args, device)
# calculate SECS
secs_dict = dict()
for i, gen_path in enumerate(gen_files):
corresponding_list = list(
filter(lambda gt_path: _get_basename(gt_path) in gen_path, gt_files)
)
assert len(corresponding_list) == 1
gt_path = corresponding_list[0]
gt_basename = _get_basename(gt_path)
# load wav file as int16
gen_x, gen_fs = sf.read(gen_path, dtype="int16")
gt_x, gt_fs = sf.read(gt_path, dtype="int16")
fs = gen_fs
if gen_fs != gt_fs:
gt_x = librosa.resample(gt_x.astype(np.float), gt_fs, gen_fs)
# Amp Normalization -1 ~ 1
gen_amax = np.amax(np.absolute(gen_x))
gen_x = gen_x.astype(np.float32) / gen_amax
gt_amax = np.amax(np.absolute(gt_x))
gt_x = gt_x.astype(np.float32) / gt_amax
# X-vector embedding
gen_embeds = xv_extractor(gen_x, fs)
gt_embeds = xv_extractor(gt_x, fs)
# Cosine Similarity
secs = 1 - spatial.distance.cosine(gen_embeds[0], gt_embeds[0])
logging.info(f"{gt_basename} {secs:.4f}")
secs_dict[gt_basename] = secs
# calculate statistics
mean_secs = np.mean(np.array([v for v in secs_dict.values()]))
std_secs = np.std(np.array([v for v in secs_dict.values()]))
logging.info(f"Average: {mean_secs:.4f} ± {std_secs:.4f}")
# write results
if args.outdir is None:
if os.path.isdir(args.gen_wavdir_or_wavscp):
args.outdir = args.gen_wavdir_or_wavscp
else:
args.outdir = os.path.dirname(args.gen_wavdir_or_wavscp)
os.makedirs(args.outdir, exist_ok=True)
with open(f"{args.outdir}/utt2secs", "w") as f:
for utt_id in sorted(secs_dict.keys()):
secs = secs_dict[utt_id]
f.write(f"{utt_id} {secs:.4f}\n")
with open(f"{args.outdir}/secs_avg_result.txt", "w") as f:
f.write(f"#utterances: {len(gen_files)}\n")
f.write(f"Average: {mean_secs:.4f} ± {std_secs:.4f}")
logging.info("Successfully finished SECS evaluation.")
if __name__ == "__main__":
main()
| 7,171 | 31.6 | 88 | py |
espnet | espnet-master/egs2/TEMPLATE/asr1/pyscripts/utils/extract_xvectors.py | #!/usr/bin/env python3
# 2022, Hitachi LTD.; Nelson Yalta
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import logging
import os
import sys
from pathlib import Path
import kaldiio
import librosa
import numpy as np
import torch
from tqdm.contrib import tqdm
from espnet2.fileio.sound_scp import SoundScpReader
def get_parser():
"""Construct the parser."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("--pretrained_model", type=str, help="Pretrained model.")
parser.add_argument(
"--toolkit",
type=str,
help="Toolkit for Extracting X-vectors.",
choices=["espnet", "speechbrain", "rawnet"],
)
parser.add_argument("--verbose", type=int, default=1, help="Verbosity level.")
parser.add_argument("--device", type=str, default="cuda:0", help="Inference device")
parser.add_argument(
"in_folder", type=Path, help="Path to the input kaldi data directory."
)
parser.add_argument(
"out_folder",
type=Path,
help="Output folder to save the xvectors.",
)
return parser
class XVExtractor:
def __init__(self, args, device):
self.toolkit = args.toolkit
self.device = device
if self.toolkit == "speechbrain":
from speechbrain.dataio.preprocess import AudioNormalizer
from speechbrain.pretrained import EncoderClassifier
self.audio_norm = AudioNormalizer()
self.model = EncoderClassifier.from_hparams(
source=args.pretrained_model, run_opts={"device": device}
)
elif self.toolkit == "rawnet":
from RawNet3 import RawNet3
from RawNetBasicBlock import Bottle2neck
self.model = RawNet3(
Bottle2neck,
model_scale=8,
context=True,
summed=True,
encoder_type="ECA",
nOut=256,
out_bn=False,
sinc_stride=10,
log_sinc=True,
norm_sinc="mean",
grad_mult=1,
)
tools_dir = Path(os.getcwd()).parent.parent.parent / "tools"
self.model.load_state_dict(
torch.load(
tools_dir / "RawNet/python/RawNet3/models/weights/model.pt",
map_location=lambda storage, loc: storage,
)["model"]
)
self.model.to(device).eval()
def rawnet_extract_embd(self, audio, n_samples=48000, n_segments=10):
if len(audio.shape) > 1:
raise ValueError(
"RawNet3 supports mono input only."
f"Input data has a shape of {audio.shape}."
)
if len(audio) < n_samples: # RawNet3 was trained using utterances of 3 seconds
shortage = n_samples - len(audio) + 1
audio = np.pad(audio, (0, shortage), "wrap")
audios = []
startframe = np.linspace(0, len(audio) - n_samples, num=n_segments)
for asf in startframe:
audios.append(audio[int(asf) : int(asf) + n_samples])
audios = torch.from_numpy(np.stack(audios, axis=0).astype(np.float32)).to(
self.device
)
with torch.no_grad():
output = self.model(audios)
return output.mean(0).detach().cpu().numpy()
def __call__(self, wav, in_sr):
if self.toolkit == "speechbrain":
wav = self.audio_norm(torch.from_numpy(wav), in_sr).to(self.device)
embeds = self.model.encode_batch(wav).detach().cpu().numpy()[0]
elif self.toolkit == "rawnet":
wav = librosa.resample(wav, orig_sr=in_sr, target_sr=16000)
embeds = self.rawnet_extract_embd(wav)
return embeds
def main(argv):
"""Load the model, generate kernel and bandpass plots."""
parser = get_parser()
args = parser.parse_args(argv)
if args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
if torch.cuda.is_available() and ("cuda" in args.device):
device = args.device
else:
device = "cpu"
if args.toolkit in ("speechbrain", "rawnet"):
# Prepare spk2utt for mean x-vector
spk2utt = dict()
with open(os.path.join(args.in_folder, "spk2utt"), "r") as reader:
for line in reader:
details = line.split()
spk2utt[details[0]] = details[1:]
wav_scp = SoundScpReader(os.path.join(args.in_folder, "wav.scp"), np.float32)
os.makedirs(args.out_folder, exist_ok=True)
writer_utt = kaldiio.WriteHelper(
"ark,scp:{0}/xvector.ark,{0}/xvector.scp".format(args.out_folder)
)
writer_spk = kaldiio.WriteHelper(
"ark,scp:{0}/spk_xvector.ark,{0}/spk_xvector.scp".format(args.out_folder)
)
xv_extractor = XVExtractor(args, device)
for speaker in tqdm(spk2utt):
xvectors = list()
for utt in spk2utt[speaker]:
in_sr, wav = wav_scp[utt]
# X-vector Embedding
embeds = xv_extractor(wav, in_sr)
writer_utt[utt] = np.squeeze(embeds)
xvectors.append(embeds)
# Speaker Normalization
embeds = np.mean(np.stack(xvectors, 0), 0)
writer_spk[speaker] = embeds
writer_utt.close()
writer_spk.close()
elif args.toolkit == "espnet":
raise NotImplementedError(
"Follow details at: https://github.com/espnet/espnet/issues/3040"
)
else:
raise ValueError(
"Unkown type of toolkit. Only supported: speechbrain, rawnet, espnet, kaldi"
)
if __name__ == "__main__":
main(sys.argv[1:])
| 6,207 | 32.923497 | 88 | py |
espnet | espnet-master/egs2/TEMPLATE/asr1/pyscripts/utils/sklearn_km.py | # The sklearn_km.py uses code from Fairseq:
# https://github.com/pytorch/fairseq/blob/master/examples/hubert/simple_kmeans/learn_kmeans.py
#
# Thanks to Abdelrahman Mohamed and Wei-Ning Hsu's help in this implementation,
# Their origial Hubert work is in:
# Paper: https://arxiv.org/pdf/2106.07447.pdf
# Code in Fairseq: https://github.com/pytorch/fairseq/tree/master/examples/hubert
import argparse
import logging
import math
import os
import sys
import warnings
from random import sample
import fairseq
import joblib
import numpy as np
import soundfile as sf
import torch
import torchaudio
import tqdm
from feature_loader import HubertFeatureReader, MfccFeatureReader
from sklearn.cluster import MiniBatchKMeans
from espnet2.asr.encoder.hubert_encoder import FairseqHubertEncoder
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logger = logging.getLogger("sklearn_kmeans")
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"--feats-dir", type=str, help="folder contains wav.scp for training"
)
parser.add_argument(
"--n-clusters", default=100, type=int, help="number of clusters for K-Means"
)
parser.add_argument("--nj", default=1, type=int, help="only support mfcc")
parser.add_argument("--seed", default=0, type=int)
parser.add_argument("--fs", type=int, default=16000)
parser.add_argument("--feature-type", type=str, default="mfcc")
parser.add_argument("--hubert-model-url", type=str, default=None)
parser.add_argument("--hubert-model-path", type=str, default=None)
parser.add_argument(
"--portion", type=float, default=1.0, help="Using a subset of the data."
)
group = parser.add_argument_group(description="K-means model.")
group.add_argument("--km-path", type=str, help="path for k-means model.")
group.add_argument("--init", default="k-means++")
group.add_argument("--max-iter", default=100, type=int)
group.add_argument("--batch-size", default=10000, type=int)
group.add_argument("--tol", default=0.0, type=float)
group.add_argument("--max-no-improvement", default=100, type=int)
group.add_argument("--n-init", default=20, type=int)
group.add_argument("--reassignment-ratio", default=0.0, type=float)
return parser
def get_path_iterator(wav, portion=0.1):
with open(wav, "r") as f:
lines = [line.rstrip() for line in f]
lines = sample(lines, int(portion * len(lines)))
def iterate():
for line in lines:
utt_id, path = line.split(" ")
yield utt_id, f"{path}"
return iterate, len(lines)
def get_mfcc_feature(feats_dir, fs, nj, portion):
reader = MfccFeatureReader(fs)
print(f"{feats_dir}/wav.scp")
generator, num = get_path_iterator(f"{feats_dir}/wav.scp", portion)
iterator = generator()
if nj > 1:
feats = joblib.Parallel(n_jobs=nj)(
joblib.delayed(reader.get_feats)(path)
for utt_id, path in tqdm.tqdm(iterator, total=num)
)
else:
feats = []
for utt_id, path in tqdm.tqdm(iterator, total=num):
feat = reader.get_feats(path)
feats.append(feat.cpu().numpy())
np.random.shuffle(feat)
logger.info("Getting MFCC feature successfully")
return np.vstack(feats)
def get_hubert_feature(feats_dir, fs, portion, url, dir, layer):
reader = HubertFeatureReader(fs, url, dir, layer)
generator, num = get_path_iterator(f"{feats_dir}/wav.scp", portion)
iterator = generator()
feats = []
for utt_id, path in tqdm.tqdm(iterator, total=num):
feat = reader.get_feats(path)
feats.append(feat.cpu().numpy())
np.random.shuffle(feat)
logger.info("Getting HuBERT feature successfully")
return np.vstack(feats)
def load_feature(
feats_dir,
fs,
nj,
portion,
feature_type,
hubert_model_url,
hubert_model_path,
):
# generate mfcc feature
if feature_type == "mfcc":
feat = get_mfcc_feature(feats_dir, fs, nj, portion)
elif "hubert" in feature_type:
hlayer = int(feature_type.replace("hubert", ""))
feat = get_hubert_feature(
feats_dir, fs, portion, hubert_model_url, hubert_model_path, hlayer
)
else:
raise ValueError(f"feature_type: {feature_type}")
return feat
def train_km_model(
n_clusters,
init,
max_iter,
batch_size,
tol,
max_no_improvement,
n_init,
reassignment_ratio,
):
return MiniBatchKMeans(
n_clusters=n_clusters,
init=init,
max_iter=max_iter,
batch_size=batch_size,
verbose=1,
compute_labels=False,
tol=tol,
max_no_improvement=max_no_improvement,
init_size=None,
n_init=n_init,
reassignment_ratio=reassignment_ratio,
)
def learn_kmeans(
feats,
km_path,
n_clusters,
init,
max_iter,
batch_size,
tol,
max_no_improvement,
n_init,
reassignment_ratio,
):
km_model = train_km_model(
n_clusters,
init,
max_iter,
batch_size,
tol,
max_no_improvement,
n_init,
reassignment_ratio,
)
km_model.fit(feats)
joblib.dump(km_model, f"{km_path}")
inertia = -km_model.score(feats) / len(feats)
logger.info("total intertia: %.5f", inertia)
logger.info("K-means training successfully")
def main(args):
np.random.seed(args.seed)
print("Loading Features")
feats = load_feature(
feats_dir=args.feats_dir,
fs=args.fs,
nj=args.nj,
portion=args.portion,
feature_type=args.feature_type.lower(),
hubert_model_path=args.hubert_model_path,
hubert_model_url=args.hubert_model_url,
)
print("Learning kmeans")
learn_kmeans(
feats,
km_path=args.km_path,
n_clusters=args.n_clusters,
init=args.init,
max_iter=args.max_iter,
batch_size=args.batch_size,
tol=args.tol,
max_no_improvement=args.max_no_improvement,
n_init=args.n_init,
reassignment_ratio=args.reassignment_ratio,
)
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
logging.info(str(args))
main(args)
| 6,389 | 27.526786 | 98 | py |
espnet | espnet-master/egs2/TEMPLATE/asr1/pyscripts/utils/evaluate_whisper_inference.py | #!/usr/bin/env python3
import argparse
import logging
import os
import sys
from distutils.version import LooseVersion
from pathlib import Path
from typing import Any, List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
import torch.quantization
import whisper
from typeguard import check_argument_types, check_return_type
from espnet2.fileio.datadir_writer import DatadirWriter
from espnet2.torch_utils.device_funcs import to_device
from espnet2.torch_utils.set_all_random_seed import set_all_random_seed
from espnet2.utils import config_argparse
from espnet2.utils.types import str2bool, str2triple_str, str_or_none
from espnet.utils.cli_utils import get_commandline_args
class Speech2Text:
"""Speech2Text class"""
def __init__(
self,
model_tag: str = "base",
device: str = "cpu",
):
assert check_argument_types()
self.model = whisper.load_model(model_tag).to(device)
self.device = device
@torch.no_grad()
def __call__(self, speech: str) -> Optional[str]:
"""Inference
Args:
data: Input speech data
Returns:
text
"""
assert check_argument_types()
# Input as audio signal
result = self.model.transcribe(speech)
return result["text"]
def inference(
output_dir: str,
ngpu: int,
seed: int,
num_workers: int,
log_level: Union[int, str],
data_path_and_name_and_type: str,
key_file: Optional[str],
model_tag: Optional[str],
allow_variable_data_keys: bool,
):
assert check_argument_types()
if ngpu > 1:
raise NotImplementedError("only single GPU decoding is supported")
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
if ngpu >= 1:
device = "cuda"
else:
device = "cpu"
# 1. Set random-seed
set_all_random_seed(seed)
# 2. Build speech2text
speech2text = Speech2Text(
model_tag=model_tag,
device=device,
)
# 3. Build data-iterator
info_list = []
wavscp = open(data_path_and_name_and_type, "r", encoding="utf-8")
for line in wavscp.readlines():
info_list.append(line.split(maxsplit=1))
# 7 .Start for-loop
# FIXME(kamo): The output format should be discussed about
with DatadirWriter(output_dir) as writer:
for key, audio_file in info_list:
# N-best list of (text, token, token_int, hyp_object)
results = speech2text(os.path.abspath(audio_file.strip()))
# Normal ASR
ibest_writer = writer[f"1best_recog"]
# Write the result to each file
ibest_writer["text"][key] = results
def get_parser():
parser = config_argparse.ArgumentParser(
description="ASR Decoding",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Note(kamo): Use '_' instead of '-' as separator.
# '-' is confusing if written in yaml.
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument("--output_dir", type=str, required=True)
parser.add_argument(
"--ngpu",
type=int,
default=0,
help="The number of gpus. 0 indicates CPU mode",
)
parser.add_argument("--seed", type=int, default=0, help="Random seed")
parser.add_argument(
"--num_workers",
type=int,
default=1,
help="The number of workers used for DataLoader",
)
group = parser.add_argument_group("Input data related")
group.add_argument(
"--data_path_and_name_and_type",
type=str,
required=True,
)
group.add_argument("--key_file", type=str_or_none)
group.add_argument("--allow_variable_data_keys", type=str2bool, default=False)
group = parser.add_argument_group("The model configuration related")
group.add_argument(
"--model_tag",
type=str,
help="Pretrained model tag. If specify this option, *_train_config and "
"*_file will be overwritten",
)
return parser
def main(cmd=None):
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
kwargs.pop("config", None)
inference(**kwargs)
if __name__ == "__main__":
main()
| 4,560 | 25.829412 | 82 | py |
espnet | espnet-master/egs2/TEMPLATE/asr1/pyscripts/utils/plot_sinc_filters.py | #!/usr/bin/env python3
# 2020, Technische Universität München; Nicolas Lindae, Ludwig Kürzinger
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Visualize Sinc convolution filters.
Description:
This program loads a pretrained Sinc convolution of an ESPnet2 ASR model and
plots filters, as well as the bandpass frequencies. The learned filter values
are automatically read out from a trained model file (`*.pth`). Plots are
saved to the specified output directory.
"""
import argparse
import sys
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import torch
def get_parser():
"""Construct the parser."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("--sample_rate", type=int, default=16000, help="Sampling rate.")
parser.add_argument(
"--all", action="store_true", help="Plot every filter in its own plot."
)
parser.add_argument(
"--filetype", type=str, default="png", help="Filetype (svg, png)."
)
parser.add_argument(
"--filter-key",
type=str,
default="preencoder.filters.f",
help="Name of the torch module the Sinc filter parameters are stored"
" within the model file.",
)
parser.add_argument(
"--scale",
type=str,
default="mel",
choices=["mel", "bark"],
help="Filter bank initialization values.",
)
parser.add_argument(
"model_path", type=str, help="Path to the trained model file (*.pth)."
)
parser.add_argument(
"out_folder",
type=Path,
nargs="?",
default=Path("plot_sinc_filters").absolute(),
help="Output folder to save the plots in.",
)
return parser
def convert_parameter_to_frequencies(f1, f2, sample_rate, sorted):
"""Convert parameters to frequencies.
Parameters f1 and f2 denote frequencies normed to the sampling frequency.
Args:
f1: Lower frequency relative to sample rate.
f2: Higher frequency relative to sample rate.
sample_rate: Sample rate.
sorted: Sort filters by their center frequency.
Returns:
f_mins: Absolute lower frequency.
f_maxs: Absolute higher frequency.
f_mins: Absolute center frequency.
"""
f_mins = np.abs(f1) * sample_rate
f_maxs = (np.abs(f1) + np.abs(f2 - f1)) * sample_rate
f_mins = np.clip(f_mins, 0, sample_rate / 2)
f_maxs = np.clip(f_maxs, 0, sample_rate / 2)
f_mids = (f_maxs + f_mins) / 2
if sorted:
order = np.argsort(f_mids)
f_mins, f_mids, f_maxs = f_mins[order], f_mids[order], f_maxs[order]
return f_mins, f_maxs, f_mids
def plot_filtergraph(
filters: torch.Tensor,
sample_rate: int,
img_path: str,
sorted: bool = True,
logscale: bool = False,
scale: str = "mel",
):
"""Plot the Sinc filter bandpass frequencies.
Args:
filters: Filter parameters.
sample_rate: Sample rate of signal.
img_path: Output plot file.
sorted: Sort bandpasses by center frequency.
logscale: Set Y axis to logarithmic scale.
"""
if scale == "mel":
from espnet2.layers.sinc_conv import MelScale
f = MelScale.bank(128, sample_rate).detach().cpu().numpy()
elif scale == "bark":
from espnet2.layers.sinc_conv import BarkScale
f = BarkScale.bank(128, sample_rate).detach().cpu().numpy()
else:
raise NotImplementedError
f = f / sample_rate
f1, f2 = f[:, 0], f[:, 1]
f_mins, f_maxs, f_mids = convert_parameter_to_frequencies(
filters[:, 0], filters[:, 1], sample_rate, sorted
)
mel_mins, mel_maxs, mel_mids = convert_parameter_to_frequencies(
f1, f2, sample_rate, sorted
)
x = np.arange(len(f_mids))
plt.clf()
if logscale:
plt.yscale("log")
plt.xlabel("filter index")
plt.ylabel("f [Hz]")
ax = plt.gca()
ax.plot(x, mel_mins, color="blue", label="mel filters")
ax.plot(x, mel_maxs, color="blue")
ax.plot(x, mel_mids, "--", color="darkblue")
ax.fill_between(x, mel_mins, mel_maxs, color="blue", alpha=0.3)
ax.plot(x, f_mins, color="green", label="learned filters")
ax.plot(x, f_maxs, color="green")
ax.plot(x, f_mids, "--", color="darkgreen")
ax.fill_between(x, f_mins, f_maxs, color="green", alpha=0.3)
ax.legend(loc="upper left", prop={"size": 15})
plt.savefig(img_path, bbox_inches="tight")
print("Plotted %s" % img_path)
def plot_filter_kernels(filters: torch.Tensor, sample_rate: int, args):
"""Plot the Sinc filter kernels.
Args:
filters (torch.Tensor): Filter parameters.
sample_rate (int): Sample rate of Signal.
args (dict): Dictionary with output options.
"""
from espnet2.layers.sinc_conv import SincConv
print(
"When plotting filter kernels, make sure the script has the"
" correct SincConv settings (currently hard-coded)."
)
convs = SincConv(1, 128, 101)
# unlearned
convs._create_filters(convs.f.device)
pre_kernels = convs.sinc_filters.detach().numpy()
pre_filters = convs.f.detach().numpy()
f_mins = np.abs(pre_filters[:, 0])
f_maxs = np.abs(pre_filters[:, 0]) + np.abs(pre_filters[:, 1] - pre_filters[:, 0])
F_mins, F_maxs = f_mins * sample_rate, f_maxs * sample_rate
pre_F_mins, pre_F_maxs = (
np.round(F_mins).astype(np.int64),
np.round(F_maxs).astype(np.int64),
)
# learned
convs.f = torch.nn.Parameter(torch.Tensor(filters))
convs._create_filters(convs.f.device)
kernels = convs.sinc_filters.detach().numpy()
f_mins = np.abs(filters[:, 0])
f_maxs = np.abs(filters[:, 0]) + np.abs(filters[:, 1] - filters[:, 0])
F_mins, F_maxs = f_mins * sample_rate, f_maxs * sample_rate
F_mins, F_maxs = (
np.round(F_mins).astype(np.int64),
np.round(F_maxs).astype(np.int64),
)
F_mins, F_maxs = (
np.clip(F_mins, 0, sample_rate / 2.0),
np.clip(F_maxs, 0, sample_rate / 2.0),
)
x_f = np.linspace(0.0, np.max(F_maxs), int(np.max(F_maxs)) + 1)
x = np.arange(kernels.shape[2])
if args.all:
for i in range(len(kernels)):
pre_kernel = pre_kernels[i][0]
plt.clf()
plt.xticks([])
plt.yticks([])
plt.plot(x, pre_kernel)
img_name = "filter_pre_kernel_%s.%s" % (str(i).zfill(2), args.filetype)
img_path = str(args.out_folder / img_name)
plt.savefig(img_path, bbox_inches="tight")
print("Plotted %s" % img_path)
kernel = kernels[i][0]
plt.clf()
plt.xticks([])
plt.yticks([])
plt.plot(x, kernel)
img_name = "filter_kernel_%s.%s" % (str(i).zfill(2), args.filetype)
img_path = str(args.out_folder / img_name)
plt.savefig(img_path, bbox_inches="tight")
print("Plotted %s" % img_path)
plt.clf()
plt.xlabel("kernel index")
plt.plot(x, kernel)
plt.plot(x, pre_kernel, "--", alpha=0.5)
img_name = "filter_kernel_both_%s.%s" % (str(i).zfill(2), args.filetype)
img_path = str(args.out_folder / img_name)
plt.savefig(img_path, bbox_inches="tight")
print("Plotted %s" % img_path)
y = np.zeros_like(x_f)
y[F_mins[i] : F_maxs[i]] = 1.0
plt.clf()
plt.plot(x_f, y)
img_name = "filter_freq_%s.%s" % (str(i).zfill(2), args.filetype)
img_path = str(args.out_folder / img_name)
plt.savefig(img_path, bbox_inches="tight")
print("Plotted %s" % img_path)
pre_y = np.zeros_like(x_f)
pre_y[pre_F_mins[i] : pre_F_maxs[i]] = 1.0
plt.clf()
plt.plot(x_f, y)
plt.plot(x_f, pre_y)
img_name = "filter_freq_both_%s.%s" % (str(i).zfill(2), args.filetype)
img_path = args.out_folder / img_name
plt.savefig(img_path, bbox_inches="tight")
print("Plotted %s" % img_path)
plt.clf()
filters = [32, 71, 113, 126]
fig, axs = plt.subplots(2, 2, sharex=True, sharey="row")
axs[0, 0].plot(x, kernels[filters[0]][0])
axs[0, 0].plot(x, pre_kernels[filters[0]][0], "--", alpha=0.5)
axs[0, 1].plot(x, kernels[filters[1]][0])
axs[0, 1].plot(x, pre_kernels[filters[1]][0], "--", alpha=0.5)
axs[1, 0].plot(x, kernels[filters[2]][0])
axs[1, 0].plot(x, pre_kernels[filters[2]][0], "--", alpha=0.5)
axs[1, 1].plot(x, kernels[filters[3]][0])
axs[1, 1].plot(x, pre_kernels[filters[3]][0], "--", alpha=0.5)
img_name = "filter_kernel_ensemble2.%s" % (args.filetype)
img_path = str(args.out_folder / img_name)
plt.savefig(img_path, bbox_inches="tight")
plt.close(fig)
print("Plotted %s" % img_path)
def plot_filters(indices, filename, F_mins, F_maxs, output_folder):
"""Plot filters bandwidths.
Args:
indices: Sorted indices of filters.
filename: Output filename (png or svg).
F_mins: Minimum frequencies.
F_maxs: Maximum frequencies.
output_folder: Output folder.
"""
x = np.linspace(0, np.max(F_maxs), np.max(F_maxs) + 1)
plt.clf()
height = 1
for i in indices:
y = np.zeros_like(x)
y[F_mins[i] : F_maxs[i]] = height
height += 1
plt.plot(x, y)
img_path = str(output_folder / filename)
plt.savefig(img_path, bbox_inches="tight")
print("Plotted %s" % img_path)
def main(argv):
"""Load the model, generate kernel and bandpass plots."""
parser = get_parser()
args = parser.parse_args(argv)
model_path = args.model_path
sample_rate = args.sample_rate
model = torch.load(model_path, map_location="cpu")
if "model" in model: # snapshots vs. model.acc.best
model = model["model"]
if args.filter_key not in model:
raise ValueError(
f"The loaded model file does not contain the learned"
f" filters in {args.filter_key}"
)
filters = model[args.filter_key]
if not filters.type() == "torch.FloatTensor":
raise TypeError("The loaded filter values are not of type torch.FloatTensor")
filters = filters.detach().cpu().numpy()
f_mins = np.abs(filters[:, 0])
f_maxs = np.abs(filters[:, 0]) + np.abs(filters[:, 1] - filters[:, 0])
F_mins, F_maxs = f_mins * sample_rate, f_maxs * sample_rate
F_mins, F_maxs = (
np.round(F_mins).astype(np.int64),
np.round(F_maxs).astype(np.int64),
)
# Create output folder if it does not yet exist
args.out_folder.mkdir(parents=True, exist_ok=True)
plot_filter_kernels(filters, sample_rate, args)
plot_filters(
range(len(F_mins)),
"filters.%s" % args.filetype,
F_mins,
F_maxs,
args.out_folder,
)
plot_filters(
np.argsort(F_maxs - F_mins),
"filters_len_sort.%s" % args.filetype,
F_mins,
F_maxs,
args.out_folder,
)
plot_filters(
np.argsort(F_mins),
"filters_min_sort.%s" % args.filetype,
F_mins,
F_maxs,
args.out_folder,
)
img_path = str(args.out_folder / f"filtergraph.{args.filetype}")
plot_filtergraph(
filters, sample_rate=sample_rate, img_path=img_path, scale=args.scale
)
img_path = str(args.out_folder / f"filtergraph_unsorted.{args.filetype}")
plot_filtergraph(
filters,
sample_rate=sample_rate,
img_path=img_path,
sorted=False,
scale=args.scale,
)
if __name__ == "__main__":
main(sys.argv[1:])
| 11,828 | 32.041899 | 88 | py |
espnet | espnet-master/egs2/TEMPLATE/asr1/pyscripts/utils/learn_kmeans.py | # The learn_kmeans.py uses code from Fairseq:
# https://github.com/pytorch/fairseq/blob/master/examples/hubert/simple_kmeans/learn_kmeans.py
#
# Thanks to Abdelrahman Mohamed and Wei-Ning Hsu's help in this implementation,
# Their origial Hubert work is in:
# Paper: https://arxiv.org/pdf/2106.07447.pdf
# Code in Fairseq: https://github.com/pytorch/fairseq/tree/master/examples/hubert
import argparse
import logging
import os
import random
import sys
import joblib
import numpy as np
from sklearn.cluster import MiniBatchKMeans
from espnet.utils.cli_readers import file_reader_helper
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("learn_kmeans")
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--km_path", type=str, required=True)
parser.add_argument("--n_clusters", type=int, required=True)
parser.add_argument("--seed", default=0, type=int)
parser.add_argument(
"--percent", default=-1, type=float, help="sample a subset; -1 for all"
)
parser.add_argument("--init", default="k-means++")
parser.add_argument("--max_iter", default=100, type=int)
parser.add_argument("--batch_size", default=10000, type=int)
parser.add_argument("--tol", default=0.0, type=float)
parser.add_argument("--max_no_improvement", default=100, type=int)
parser.add_argument("--n_init", default=20, type=int)
parser.add_argument("--reassignment_ratio", default=0.0, type=float)
parser.add_argument(
"--in_filetype",
type=str,
default="sound",
choices=["mat", "hdf5", "sound.hdf5", "sound"],
help="Specify the file format for the rspecifier. "
'"mat" is the matrix format in kaldi',
)
parser.add_argument(
"rspecifier",
type=str,
nargs="+",
help="Read specifier for feats. e.g. ark:some.ark",
)
return parser
def get_km_model(
n_clusters,
init,
max_iter,
batch_size,
tol,
max_no_improvement,
n_init,
reassignment_ratio,
):
return MiniBatchKMeans(
n_clusters=n_clusters,
init=init,
max_iter=max_iter,
batch_size=batch_size,
verbose=1,
compute_labels=False,
tol=tol,
max_no_improvement=max_no_improvement,
init_size=None,
n_init=n_init,
reassignment_ratio=reassignment_ratio,
)
def load_feature_shard(rspecifier, in_filetype, percent):
feats = []
for utt, feat in file_reader_helper(rspecifier, in_filetype):
feats.append(feat)
if percent < 0:
return np.concatenate(feats, axis=0)
else:
nsample = int(np.ceil(len(feats) * percent))
sampled_feat = random.sample(feats, nsample)
sampled_feat = np.concatenate(
sampled_feat,
axis=0,
)
logger.info(
(
f"sampled {nsample} utterances, {len(sampled_feat)} frames "
f"from rspecifier {rspecifier}"
)
)
return sampled_feat
def load_feature(rspecifiers, in_filetype, percent):
assert percent <= 1.0
if not isinstance(rspecifiers, list):
rspecifiers = [rspecifiers]
feat = np.concatenate(
[
load_feature_shard(rspecifier, in_filetype, percent)
for rspecifier in rspecifiers
],
axis=0,
)
logging.info(f"loaded feature with dimension {feat.shape}")
return feat
def learn_kmeans(
rspecifier,
in_filetype,
km_path,
n_clusters,
seed,
percent,
init,
max_iter,
batch_size,
tol,
n_init,
reassignment_ratio,
max_no_improvement,
):
np.random.seed(seed)
feat = load_feature(rspecifier, in_filetype, percent)
km_model = get_km_model(
n_clusters,
init,
max_iter,
batch_size,
tol,
max_no_improvement,
n_init,
reassignment_ratio,
)
km_model.fit(feat)
joblib.dump(km_model, km_path)
inertia = -km_model.score(feat) / len(feat)
logger.info("total intertia: %.5f", inertia)
logger.info("finished successfully")
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
logging.info(str(args))
learn_kmeans(**vars(args))
| 4,472 | 25.945783 | 98 | py |
espnet | espnet-master/egs2/TEMPLATE/asr1/pyscripts/utils/calculate_speech_metrics.py | #!/usr/bin/env python3
import argparse
import logging
import sys
from typing import List, Union
import numpy as np
import torch
from mir_eval.separation import bss_eval_sources
from pystoi import stoi
from typeguard import check_argument_types
from espnet2.enh.encoder.stft_encoder import STFTEncoder
from espnet2.enh.espnet_model import ESPnetEnhancementModel
from espnet2.fileio.datadir_writer import DatadirWriter
from espnet2.fileio.sound_scp import SoundScpReader
from espnet2.utils import config_argparse
from espnet.utils.cli_utils import get_commandline_args
def scoring(
output_dir: str,
dtype: str,
log_level: Union[int, str],
key_file: str,
ref_scp: List[str],
inf_scp: List[str],
ref_channel: int,
metrics: List[str],
frame_size: int = 512,
frame_hop: int = 256,
):
assert check_argument_types()
for metric in metrics:
assert metric in (
"STOI",
"ESTOI",
"SNR",
"SI_SNR",
"SDR",
"SAR",
"SIR",
"framewise-SNR",
), metric
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
assert len(ref_scp) == len(inf_scp), ref_scp
num_spk = len(ref_scp)
keys = [
line.rstrip().split(maxsplit=1)[0] for line in open(key_file, encoding="utf-8")
]
ref_readers = [SoundScpReader(f, dtype=dtype) for f in ref_scp]
inf_readers = [SoundScpReader(f, dtype=dtype) for f in inf_scp]
# get sample rate
fs, _ = ref_readers[0][keys[0]]
# check keys
for inf_reader, ref_reader in zip(inf_readers, ref_readers):
assert inf_reader.keys() == ref_reader.keys()
stft = STFTEncoder(n_fft=frame_size, hop_length=frame_hop)
do_bss_eval = "SDR" in metrics or "SAR" in metrics or "SIR" in metrics
with DatadirWriter(output_dir) as writer:
for key in keys:
ref_audios = [ref_reader[key][1] for ref_reader in ref_readers]
inf_audios = [inf_reader[key][1] for inf_reader in inf_readers]
ref = np.array(ref_audios)
inf = np.array(inf_audios)
if ref.ndim > inf.ndim:
# multi-channel reference and single-channel output
ref = ref[..., ref_channel]
assert ref.shape == inf.shape, (ref.shape, inf.shape)
elif ref.ndim < inf.ndim:
# single-channel reference and multi-channel output
raise ValueError(
"Reference must be multi-channel when the "
"network output is multi-channel."
)
elif ref.ndim == inf.ndim == 3:
# multi-channel reference and output
ref = ref[..., ref_channel]
inf = inf[..., ref_channel]
if do_bss_eval or num_spk > 1:
sdr, sir, sar, perm = bss_eval_sources(
ref, inf, compute_permutation=True
)
else:
perm = [0]
ilens = torch.LongTensor([ref.shape[1]])
# (num_spk, T, F)
ref_spec, flens = stft(torch.from_numpy(ref), ilens)
inf_spec, _ = stft(torch.from_numpy(inf), ilens)
for i in range(num_spk):
p = int(perm[i])
for metric in metrics:
name = f"{metric}_spk{i + 1}"
if metric == "STOI":
writer[name][key] = str(
stoi(ref[i], inf[p], fs_sig=fs, extended=False)
)
elif metric == "ESTOI":
writer[name][key] = str(
stoi(ref[i], inf[p], fs_sig=fs, extended=True)
)
elif metric == "SNR":
si_snr_score = -float(
ESPnetEnhancementModel.snr_loss(
torch.from_numpy(ref[i][None, ...]),
torch.from_numpy(inf[p][None, ...]),
)
)
writer[name][key] = str(si_snr_score)
elif metric == "SI_SNR":
si_snr_score = -float(
ESPnetEnhancementModel.si_snr_loss(
torch.from_numpy(ref[i][None, ...]),
torch.from_numpy(inf[p][None, ...]),
)
)
writer[name][key] = str(si_snr_score)
elif metric == "SDR":
writer[name][key] = str(sdr[i])
elif metric == "SAR":
writer[name][key] = str(sar[i])
elif metric == "SIR":
writer[name][key] = str(sir[i])
elif metric == "framewise-SNR":
framewise_snr = -ESPnetEnhancementModel.snr_loss(
ref_spec[i].abs(), inf_spec[i].abs()
)
writer[name][key] = " ".join(map(str, framewise_snr.tolist()))
else:
raise ValueError("Unsupported metric: %s" % metric)
# save permutation assigned script file
writer[f"wav_spk{i + 1}"][key] = inf_readers[perm[i]].data[key]
def get_parser():
parser = config_argparse.ArgumentParser(
description="Frontend inference",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Note(kamo): Use '_' instead of '-' as separator.
# '-' is confusing if written in yaml.
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument("--output_dir", type=str, required=True)
parser.add_argument(
"--dtype",
default="float32",
choices=["float16", "float32", "float64"],
help="Data type",
)
group = parser.add_argument_group("Input data related")
group.add_argument(
"--ref_scp",
type=str,
required=True,
action="append",
)
group.add_argument(
"--inf_scp",
type=str,
required=True,
action="append",
)
group.add_argument("--key_file", type=str)
group.add_argument("--metrics", type=str, action="append")
group.add_argument("--ref_channel", type=int, default=0)
group.add_argument(
"--frame_size",
type=int,
default=512,
help="STFT frame size in samples, for calculating framewise-* metrics",
)
group.add_argument(
"--frame_hop",
type=int,
default=256,
help="STFT frame hop in samples, for calculating framewise-* metrics",
)
return parser
def main(cmd=None):
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
kwargs.pop("config", None)
scoring(**kwargs)
if __name__ == "__main__":
main()
| 7,320 | 32.582569 | 87 | py |
espnet | espnet-master/egs2/TEMPLATE/asr1/steps/libs/nnet3/xconfig/convolution.py | # Copyright 2018 Johns Hopkins University (Author: Dan Povey)
# 2016 Vijayaditya Peddinti
# Apache 2.0.
""" This module has the implementation of convolutional layers.
"""
from __future__ import print_function
from __future__ import division
import math
import re
import sys
from libs.nnet3.xconfig.basic_layers import XconfigLayerBase
# This class is for lines like the following:
#
# conv-batchnorm-layer name=conv2 height-in=40 height-out=40 \
# num-filters-out=64 height-offsets=-1,0,1 time-offsets=-1,0,1 \
# required-time-offsets=0
# or (with NormalizeLayer instead of batch-norm, and with subsampling on the height axis):
# conv-renorm-layer name=conv3 height-in=40 height-out=20 \
# height-subsample-out=2 num-filters-out=128 height-offsets=-1,0,1 \
# time-offsets=-1,0,1 required-time-offsets=0
#
# You don't specify subsampling on the time axis explicitly, it's implicit
# in the 'time-offsets' which are the same as the splicing indexes in a TDNN,
# and which, unlike the height offsets, operate relative to a fixed clock,
# so that after subsampling by a factor of 2, we'd expect all time-offsets
# of subsequent layers to be a factor of 2. You don't specify the input
# num-filters either; it's worked out from the input height and the input dim.
#
# The layer-name encodes the use (or not) of batch normalization, so that if you
# want to skip batch normalization you could just call it 'conv-layer'.
#
# If batch-normalization is used, it's *spatial* batch-normalization, meaning
# that the offset and scale is specific to the output filter, but shared across
# all time and height offsets.
#
# Most of the configuration values mirror same-named values in class
# TimeHeightConvolutionComponent, and for a deeper understanding of what's going
# on you should look at the comment by its declaration, in
# src/nnet3/nnet-convolutional-component.h.
#
# Parameters of the class, and their defaults if they have defaults:
#
# input='[-1]' Descriptor giving the input of the layer.
# height-in The height of the input image, e.g. 40 if the input
# is MFCCs. The num-filters-in is worked out as
# (dimension of input) / height-in. If the preceding
# layer is a convolutional layer, height-in should be
# the same as the height-out of the preceding layer.
# height-subsample-out=1 The height subsampling factor, will be e.g. 2 if you
# want to subsample by a factor of 2 on the height
# axis.
# height-out The height of the output image. This will normally
# be <= (height-in / height-subsample-out).
# Zero-padding on the height axis may be implied by a
# combination of this and height-offsets-in, e.g. if
# height-out==height-in and height-subsample-out=1
# and height-offsets=-2,-1,0,1 then we'd be padding
# by 2 pixels on the bottom and 1 on the top; see
# comments in nnet-convolutional-layers.h for more
# details.
# height-offsets The offsets on the height axis that define what
# inputs require for each output pixel; will
# often be something like -1,0,1 (if zero-padding
# on height axis) or 0,1,2 otherwise. These are
# comparable to TDNN splicing offsets; e.g. if
# height-offsets=-1,0,1 then height 10 at the output
# would take input from heights 9,10,11 at the input.
# num-filters-out The number of output filters. The output dimension
# of this layer is num-filters-out * height-out; the
# filter dim varies the fastest (filter-stride == 1).
# time-offsets The input offsets on the time axis; these are
# interpreted just like the splicing indexes in TDNNs.
# E.g. if time-offsets=-2,0,2 then time 100 at the
# output would require times 98,100,102 at the input.
# required-time-offsets The subset of 'time-offsets' that are required in
# order to produce an output; if the set has fewer
# elements than 'time-offsets' then it implies some
# kind of zero-padding on the time axis is allowed.
# Defaults to the same as 'time-offsets'. For speech
# tasks we recommend not to set this, as the normal
# padding approach is to pad with copies of the
# first/last frame, which is handled automatically in
# the calling code.
# target-rms=1.0 Only applicable if the layer type is
# conv-batchnorm-layer or
# conv-normalize-layer. This will affect the
# scaling of the output features (larger -> larger),
# and sometimes we set target-rms=0.5 for the layer
# prior to the final layer to make the final layer
# train more slowly.
# self-repair-scale=2.0e-05 This affects the ReLu's. It is a scale on the
# 'self-repair' mechanism that nudges the inputs to the
# ReLUs into the appropriate range in cases where
# the unit is active either too little of the time
# (<10%) or too much of the time (>90%).
#
# The following initialization and natural-gradient related options are, if
# provided, passed through to the config file; if not, they are left at the
# defaults in the code. See nnet-convolutional-component.h for more information.
#
# param-stddev, bias-stddev, max-change, learning-rate-factor (float)
# use-natural-gradient (bool)
# rank-in, rank-out (int)
# num-minibatches-history (float)
# alpha-in, alpha-out (float)
# the following is also passed into the convolution components, if specified:
# l2-regularize (float)
class XconfigConvLayer(XconfigLayerBase):
def __init__(self, first_token, key_to_value, prev_names = None):
for operation in first_token.split('-')[:-1]:
assert operation in ['conv', 'renorm', 'batchnorm', 'relu',
'noconv', 'dropout', 'so']
XconfigLayerBase.__init__(self, first_token, key_to_value, prev_names)
def set_default_configs(self):
self.config = {'input':'[-1]',
'height-in':-1,
'height-subsample-out':1,
'height-out':-1,
'height-offsets':'',
'num-filters-out':-1,
'time-offsets':'',
'required-time-offsets':'',
'target-rms':1.0,
'self-repair-scale': 2.0e-05,
'self-repair-lower-threshold': 0.05,
# the following are not really inspected by this level of
# code, just passed through (but not if left at '').
'param-stddev':'', 'bias-stddev':'',
'max-change': 0.75, 'learning-rate-factor':'',
'use-natural-gradient':'',
'rank-in':'', 'rank-out':'', 'num-minibatches-history':'',
'alpha-in':'', 'alpha-out':'', 'l2-regularize':'',
'dropout-proportion': 0.5}
def set_derived_configs(self):
# sets 'num-filters-in'.
input_dim = self.descriptors['input']['dim']
height_in = self.config['height-in']
if height_in <= 0:
raise RuntimeError("height-in must be specified");
if input_dim % height_in != 0:
raise RuntimeError("Input dimension {0} is not a multiple of height-in={1}".format(
input_dim, height_in))
self.config['num-filters-in'] = input_dim // height_in
# Check whether 'str' is a sorted, unique, nonempty list of integers, like -1,0,1.,
# returns true if so.
def check_offsets_var(self, str):
try:
a = [ int(x) for x in str.split(",") ]
if len(a) == 0:
return False
for i in range(len(a) - 1):
if a[i] >= a[i+1]:
return False
return True
except:
return False
def check_configs(self):
# Do some basic checking of the configs. The component-level code does
# some more thorough checking, but if you set the height-out too small it
# prints it as a warning, which the user may not see, so at a minimum we
# want to check for that here.
height_subsample_out = self.config['height-subsample-out']
height_in = self.config['height-in']
height_out = self.config['height-out']
if height_subsample_out <= 0:
raise RuntimeError("height-subsample-out has invalid value {0}.".format(
height_subsample_out))
# we already checked height-in in set_derived_configs.
if height_out <= 0:
raise RuntimeError("height-out has invalid value {0}.".format(
height_out))
if height_out * height_subsample_out > height_in:
raise RuntimeError("The combination height-in={0}, height-out={1} and "
"height-subsample-out={2} does not look right "
"(height-out too large).".format(
height_in, height_out, height_subsample_out))
height_offsets = self.config['height-offsets']
time_offsets = self.config['time-offsets']
required_time_offsets = self.config['required-time-offsets']
if not 'noconv' in self.layer_type.split('-'):
# only check height-offsets, time-offsets and required-time-offsets if there
# is actually a convolution in this layer.
if not self.check_offsets_var(height_offsets):
raise RuntimeError("height-offsets={0} is not valid".format(height_offsets))
if not self.check_offsets_var(time_offsets):
raise RuntimeError("time-offsets={0} is not valid".format(time_offsets))
if required_time_offsets != "" and not self.check_offsets_var(required_time_offsets):
raise RuntimeError("required-time-offsets={0} is not valid".format(
required_time_offsets))
if height_out * height_subsample_out < \
height_in - len(height_offsets.split(',')):
raise RuntimeError("The combination height-in={0}, height-out={1} and "
"height-subsample-out={2} and height-offsets={3} "
"does not look right (height-out too small).")
if self.config['target-rms'] <= 0.0:
raise RuntimeError("Config value target-rms={0} is not valid".format(
self.config['target_rms']))
def auxiliary_outputs(self):
return []
def output_name(self, auxiliary_output = None):
assert auxiliary_output is None
# note: the [:-1] is to remove the '-layer'.
operations = self.layer_type.split('-')[:-1]
if operations[-1] == 'noconv':
operations = operations[:-1]
assert len(operations) >= 1
last_operation = operations[-1]
assert last_operation in ['relu', 'conv', 'renorm', 'batchnorm', 'dropout', 'so']
# we'll return something like 'layer1.batchnorm'.
return '{0}.{1}'.format(self.name, last_operation)
def output_dim(self, auxiliary_output = None):
assert auxiliary_output is None
return self.config['num-filters-out'] * self.config['height-out']
def get_full_config(self):
ans = []
config_lines = self._generate_cnn_config()
for line in config_lines:
for config_name in ['ref', 'final']:
# we do not support user specified matrices in CNN initialization
# so 'ref' and 'final' configs are the same.
ans.append((config_name, line))
return ans
# convenience function to generate the CNN config
def _generate_cnn_config(self):
configs = []
name = self.name
# These 3 variables will be updated as we add components.
cur_num_filters = self.config['num-filters-in']
cur_height = self.config['height-in']
cur_descriptor = self.descriptors['input']['final-string']
# note: the [:-1] is to remove the '-layer'.
operations = self.layer_type.split('-')[:-1]
if operations[-1] == 'noconv':
operations = operations[:-1]
# e.g.:
# operations = [ 'conv', 'relu', 'batchnorm' ]
# or:
# operations = [ 'relu', 'conv', 'renorm' ]
for operation in operations:
if operation == 'conv':
a = []
for opt_name in [
'param-stddev', 'bias-stddev', 'use-natural-gradient',
'max-change', 'rank-in', 'rank-out', 'num-minibatches-history',
'alpha-in', 'alpha-out', 'num-filters-in', 'num-filters-out',
'height-in','height-out', 'height-subsample-out',
'height-offsets', 'time-offsets', 'required-time-offsets',
'learning-rate-factor', 'l2-regularize' ]:
value = self.config[opt_name]
if value != '':
a.append('{0}={1}'.format(opt_name, value))
conv_opts = ' '.join(a)
configs.append('component name={0}.conv type=TimeHeightConvolutionComponent '
'{1}'.format(name, conv_opts))
configs.append('component-node name={0}.conv component={0}.conv '
'input={1}'.format(name, cur_descriptor))
cur_num_filters = self.config['num-filters-out']
cur_height = self.config['height-out']
elif operation == 'batchnorm':
configs.append('component name={0}.batchnorm type=BatchNormComponent dim={1} '
'block-dim={2} target-rms={3}'.format(
name, cur_num_filters * cur_height, cur_num_filters,
self.config['target-rms']))
configs.append('component-node name={0}.batchnorm component={0}.batchnorm '
'input={1}'.format(name, cur_descriptor))
elif operation == 'renorm':
configs.append('component name={0}.renorm type=NormalizeComponent '
'dim={1} target-rms={2}'.format(
name, cur_num_filters * cur_height,
self.config['target-rms']))
configs.append('component-node name={0}.renorm component={0}.renorm '
'input={1}'.format(name, cur_descriptor))
elif operation == 'relu':
configs.append('component name={0}.relu type=RectifiedLinearComponent '
'dim={1} block-dim={2} self-repair-scale={3} '
'self-repair-lower-threshold={4}'.format(
name, cur_num_filters * cur_height, cur_num_filters,
self.config['self-repair-scale'],
self.config['self-repair-lower-threshold']))
configs.append('component-node name={0}.relu component={0}.relu '
'input={1}'.format(name, cur_descriptor))
elif operation == 'dropout':
configs.append('component name={0}.dropout type=DropoutComponent '
'dim={1} dropout-proportion={2}'.format(
name, cur_num_filters * cur_height,
self.config['dropout-proportion']))
configs.append('component-node name={0}.dropout component={0}.dropout '
'input={1}'.format(name, cur_descriptor))
elif operation == 'so':
configs.append('component name={0}.so type=ScaleAndOffsetComponent '
'dim={1} block-dim={2}'.format(
name, cur_num_filters * cur_height, cur_num_filters))
configs.append('component-node name={0}.so component={0}.so '
'input={1}'.format(name, cur_descriptor))
else:
raise RuntimeError("Un-handled operation type: " + operation)
cur_descriptor = '{0}.{1}'.format(name, operation)
return configs
# This class is for lines like the following:
#
# res-block name=res1 num-filters=64 height=32 time-period=1
#
# It implements a residual block as in ResNets, with pre-activation, and with
# some small differences-- basically, instead of adding the input to the output,
# we put a convolutional layer in there but initialize it to the unit matrix and
# if you want you can give it a relatively small (or even zero) learning rate
# and max-change. And there is batch-norm in that path also.
#
# The number of filters is the same on the input and output; it is actually
# redundant to write it in the config file, because given that we know the
# height, we can work it out from the dimension of the input (as dimension =
# height * num-filters). But we allow it to be specified anyway, for clarity.
#
# Note: the res-block does not support subsampling or changing the number of
# filters. If you want to do that, we recommend that you should do it with a
# single relu-batchnorm-conv-layer.
#
# Here are the most important configuration values, with defaults shown if
# defaults exist:
#
# input='[-1]' Descriptor giving the input of the layer.
# height The input and output height of the image, e.g. 40. Note: the width
# is associated with the time dimension and is dealt with
# implicitly, so it's not specified here.
# num-filters The number of filters on the input and output, e.g. 64.
# It does not have to be specified; if it is not specified,
# we work it out from the input dimension.
# num-bottleneck-filters If specified then this will be a 'bottleneck'
# ResBlock, in which there is a 1x1 convolution from
# num-filters->num-bottleneck-filters, a 3x3 convolution
# from num-bottleneck-filters->num-bottleneck-filters, and
# a 1x1 convolution from num-bottleneck-filters->num-filters.
#
# time-period=1 Think of this as the stride in the time dimension. At the
# input of the network will always have time-period=1; then
# after subsampling once in time we'd have time-period=2; then
# after subsampling again we'd have time-period=4. Because of
# the way nnet3 works, subsampling on the time axis is an
# implicit, not explicit, operation.
# height-period=1 This will almost always be left at the default (1). It is
# analogous to time-period, but because the height, unlike the
# time, is explicitly subsampled, in normal topologies this should
# be left at 1.
#
# bypass-source=noop
# The output of this component is Sum(convolution, x), and
# this option controls what 'x' is. There are 3 options
# here: 'noop', 'input', 'relu' or 'batchnorm'. 'noop' is
# equivalent to 'input' in what it computes; it just
# inserts a 'noop' component in order to make the
# computation more efficient. For both 'noop' and
# 'input', x is the input to this component. If
# bypass-source=relu then we use the relu of the
# input; if 'batchnorm', then we use the relu+batchnorm of
# the input.
# allow-zero-padding=true By default this will allow zero-padding in the time
# dimension, meaning that you don't need extra frames at
# the input to compute the output. There may be ASR
# applications where you want to pad in the time dimension
# with repeats of the first or last frame (as we do for
# TDNNs), where it would be appropriate to write
# allow-zero-padding=false. Note: the way we have
# set it up, it does zero-padding on the height axis
# regardless
#
# Less important config variables:
# self-repair-scale=2.0e-05 This affects the ReLu's. It is a scale on the
# 'self-repair' mechanism that nudges the inputs to the
# ReLUs into the appropriate range in cases where
# the unit is active either too little of the time
# (<10%) or too much of the time (>90%).
# max-change=0.75 Max-parameter-change constant (per minibatch)
# used for convolutional components.
#
#
# The following natural-gradient-related configuration variables are passed in
# to the convolution components, if specified:
# use-natural-gradient (bool)
# rank-in, rank-out (int)
# num-minibatches-history (float)
# alpha-in, alpha-out (float)
# the following is also passed into the convolution components, if specified:
# l2-regularize (float)
#
class XconfigResBlock(XconfigLayerBase):
def __init__(self, first_token, key_to_value, prev_names = None):
assert first_token == 'res-block'
XconfigLayerBase.__init__(self, first_token, key_to_value, prev_names)
def set_default_configs(self):
self.config = {'input':'[-1]',
'height':-1,
'num-filters':-1,
'num-bottleneck-filters':-1,
'time-period':1,
'height-period':1,
'self-repair-scale': 2.0e-05,
'self-repair-lower-threshold1': 0.05,
'self-repair-lower-threshold2': 0.05,
'self-repair-lower-threshold3': 0.05,
'max-change': 0.75,
'allow-zero-padding': True,
'bypass-source' : 'noop',
# the following are not really inspected by this level of
# code, just passed through (but not if left at '').
'param-stddev':'', 'bias-stddev':'',
'use-natural-gradient':'',
'rank-in':'', 'rank-out':'',
'num-minibatches-history':'',
'alpha-in':'', 'alpha-out':'', 'l2-regularize':'' }
def set_derived_configs(self):
# set 'num-filters' or check it..
input_dim = self.descriptors['input']['dim']
height = self.config['height']
cur_num_filters = self.config['num-filters']
if cur_num_filters == -1:
if input_dim % height != 0:
raise RuntimeError("Specified image height {0} does not "
"divide the input dim {1}".format(
height, input_dim))
self.config['num-filters'] = input_dim / height
elif input_dim != cur_num_filters * height:
raise RuntimeError("Expected the input-dim to equal "
"height={0} * num-filters={1} = {2}, but "
"it is {3}".format(
height, cur_num_filters,
height * cur_num_filters,
input_dim));
def check_configs(self):
# we checked the dimensions in set_derived_configs.
if not self.config['bypass-source'] in [
'input', 'noop', 'relu', 'batchnorm' ]:
raise RuntimeError("Expected direct-convolution-source to "
"be input, relu or batchnorm, got: {1}".format(
self.config['direct-convolution-source']))
def auxiliary_outputs(self):
return []
def output_name(self, auxiliary_output = None):
bypass_source = self.config['bypass-source']
b = self.config['num-bottleneck-filters']
conv = ('{0}.conv2' if b <= 0 else '{0}.conv3').format(self.name)
if bypass_source == 'input':
residual = self.descriptors['input']['final-string']
elif bypass_source == 'noop':
# we let the noop be the sum of the convolutional part and the
# input, so just return the output of the no-op component.
return '{0}.noop'.format(self.name)
elif bypass_source == 'relu':
residual = '{0}.relu1'.format(self.name)
else:
assert bypass_source == 'batchnorm'
residual = '{0}.batchnorm1'.format(self.name)
return 'Sum({0}, {1})'.format(conv, residual)
def output_dim(self, auxiliary_output = None):
assert auxiliary_output is None
input_dim = self.descriptors['input']['dim']
return input_dim
def get_full_config(self):
ans = []
b = self.config['num-bottleneck-filters']
if b <= 0:
config_lines = self._generate_normal_resblock_config()
else:
config_lines = self._generate_bottleneck_resblock_config()
for line in config_lines:
for config_name in ['ref', 'final']:
# we do not support user specified matrices in CNN initialization
# so 'ref' and 'final' configs are the same.
ans.append((config_name, line))
return ans
# _generate_normal_resblock_config is a convenience function to generate the
# res-block config (the non-bottleneck version).
#
# The main path inside the res-block in the non-bottleneck case is as
# follows:
#
# input -> relu1 -> batchnorm1 -> conv1 -> relu2 -> batchnorm2 -> conv2
#
# We put the relu before the batchnorm because we think it makes more sense;
# because the Torch people seemed to find that this works better
# (https://github.com/gcr/torch-residual-networks/issues/5);
# and because in our batchnorm component we haven't implemented the beta and
# gamma; these would be essential to having it work before relu, but
# when before a convolution or linear component, they add no extra modeling
# power.
#
# The output of the res-block can be the sum of the last convolutional
# component (conv2), with the input. However, the option ('bypass-source')
# controls whether we sum with the raw input, or its relu or relu+batchnorm.
# If the term is going to be the raw input, we give the option ('noop') and
# to cache the output sum via a NoOpComponent)-- because due to how nnet3
# works, if we didn't do this, redundant summing operations would take
# place.
def _generate_normal_resblock_config(self):
configs = []
name = self.name
num_filters = self.config['num-filters']
assert self.config['num-bottleneck-filters'] == -1
height = self.config['height']
input_descriptor = self.descriptors['input']['final-string']
allow_zero_padding = self.config['allow-zero-padding']
height_period = self.config['height-period']
time_period = self.config['time-period']
# input -> relu1 -> batchnorm1 -> conv1 -> relu2 -> batchnorm2 -> conv2
cur_descriptor = input_descriptor
for n in [1, 2]:
# the ReLU
configs.append('component name={0}.relu{1} type=RectifiedLinearComponent '
'dim={2} block-dim={3} self-repair-scale={4} '
'self-repair-lower-threshold={5}'.format(
name, n, num_filters * height, num_filters,
self.config['self-repair-scale'],
self.config['self-repair-lower-threshold{0}'.format(n)]))
configs.append('component-node name={0}.relu{1} component={0}.relu{1} '
'input={2}'.format(name, n, cur_descriptor))
cur_descriptor = '{0}.relu{1}'.format(name, n)
# the batch-norm
configs.append('component name={0}.batchnorm{1} type=BatchNormComponent dim={2} '
'block-dim={3}'.format(
name, n, num_filters * height,
num_filters))
configs.append('component-node name={0}.batchnorm{1} component={0}.batchnorm{1} '
'input={2}'.format(name, n, cur_descriptor))
cur_descriptor = '{0}.batchnorm{1}'.format(name, n)
# the convolution.
a = []
for opt_name in [
'param-stddev', 'bias-stddev', 'use-natural-gradient',
'max-change', 'rank-in', 'rank-out', 'num-minibatches-history',
'alpha-in', 'alpha-out', 'l2-regularize' ]:
value = self.config[opt_name]
if value != '':
a.append('{0}={1}'.format(opt_name, value))
conv_opts = ('height-in={h} height-out={h} height-offsets=-{hp},0,{hp} '
'time-offsets=-{p},0,{p} '
'num-filters-in={f} num-filters-out={f} {r} {o}'.format(
h=height, hp=height_period, p=time_period, f=num_filters,
r=('required-time-offsets=0' if allow_zero_padding else ''),
o=' '.join(a)))
configs.append('component name={0}.conv{1} type=TimeHeightConvolutionComponent '
'{2}'.format(name, n, conv_opts))
configs.append('component-node name={0}.conv{1} component={0}.conv{1} '
'input={2}'.format(name, n, cur_descriptor))
cur_descriptor = '{0}.conv{1}'.format(name, n)
if self.config['bypass-source'] == 'noop':
dim = self.descriptors['input']['dim']
configs.append('component name={0}.noop dim={1} type=NoOpComponent'.format(
name, dim))
configs.append('component-node name={0}.noop component={0}.noop '
'input=Sum({1}, {0}.conv2)'.format(name,
input_descriptor))
# Note: the function 'output_name' is responsible for returning the
# descriptor corresponding to the output of the network.
return configs
# _generate_bottleneck_resblock_config is a convenience function to generate the
# res-block config (this is the bottleneck version, where there is
# a 3x3 kernel with a smaller number of filters than at the input and output,
# sandwiched between two 1x1 kernels.
#
# The main path inside the res-block in the bottleneck case is as follows:
#
# input -> relu1 -> batchnorm1 -> conv1 -> relu2 -> batchnorm2 -> conv2 ->
# relu3 -> batchnorm3 -> conv3
#
# power.
#
# The output of the res-block can be the sum of the last convolutional
# component (conv3), with the input. However we give the option
# ('bypass-source') to sum with the raw input, or its relu or
# relu+batchnorm. If the term is going to be the raw input, we give the
# option ('noop') and to cache the output sum via a NoOpComponent)-- because
# due to how nnet3 works, if we didn't do this, redundant summing operations
# would take place.
def _generate_bottleneck_resblock_config(self):
configs = []
name = self.name
num_filters = self.config['num-filters']
num_bottleneck_filters = self.config['num-bottleneck-filters']
assert num_bottleneck_filters > 0
height = self.config['height']
input_descriptor = self.descriptors['input']['final-string']
allow_zero_padding = self.config['allow-zero-padding']
height_period = self.config['height-period']
time_period = self.config['time-period']
# input -> relu1 -> batchnorm1 -> conv1 -> relu2 -> batchnorm2 -> conv2
cur_descriptor = input_descriptor
cur_num_filters = num_filters
for n in [1, 2, 3]:
# the ReLU
configs.append('component name={0}.relu{1} type=RectifiedLinearComponent '
'dim={2} block-dim={3} self-repair-scale={4} '
'self-repair-lower-threshold={5}'.format(
name, n, cur_num_filters * height, cur_num_filters,
self.config['self-repair-scale'],
self.config['self-repair-lower-threshold{0}'.format(n)]))
configs.append('component-node name={0}.relu{1} component={0}.relu{1} '
'input={2}'.format(name, n, cur_descriptor))
cur_descriptor = '{0}.relu{1}'.format(name, n)
# the batch-norm
configs.append('component name={0}.batchnorm{1} type=BatchNormComponent dim={2} '
'block-dim={3}'.format(
name, n, cur_num_filters * height,
cur_num_filters))
configs.append('component-node name={0}.batchnorm{1} component={0}.batchnorm{1} '
'input={2}'.format(name, n, cur_descriptor))
cur_descriptor = '{0}.batchnorm{1}'.format(name, n)
# the convolution.
a = []
for opt_name in [
'param-stddev', 'bias-stddev', 'use-natural-gradient',
'max-change', 'rank-in', 'rank-out', 'num-minibatches-history',
'alpha-in', 'alpha-out', 'l2-regularize' ]:
value = self.config[opt_name]
if value != '':
a.append('{0}={1}'.format(opt_name, value))
height_offsets = ('-{hp},0,{hp}'.format(hp=height_period) if n == 2 else '0')
time_offsets = ('-{t},0,{t}'.format(t=time_period) if n == 2 else '0')
next_num_filters = (num_filters if n == 3 else num_bottleneck_filters)
conv_opts = ('height-in={h} height-out={h} height-offsets={ho} time-offsets={to} '
'num-filters-in={fi} num-filters-out={fo} {r} {o}'.format(
h=height, ho=height_offsets, to=time_offsets,
fi=cur_num_filters, fo=next_num_filters,
r=('required-time-offsets=0' if allow_zero_padding else ''),
o=' '.join(a)))
configs.append('component name={0}.conv{1} type=TimeHeightConvolutionComponent '
'{2}'.format(name, n, conv_opts))
configs.append('component-node name={0}.conv{1} component={0}.conv{1} '
'input={2}'.format(name, n, cur_descriptor))
cur_descriptor = '{0}.conv{1}'.format(name, n)
cur_num_filters = next_num_filters
if self.config['bypass-source'] == 'noop':
dim = self.descriptors['input']['dim']
configs.append('component name={0}.noop dim={1} type=NoOpComponent'.format(
name, dim))
configs.append('component-node name={0}.noop component={0}.noop '
'input=Sum({1}, {0}.conv3)'.format(name,
input_descriptor))
# Note: the function 'output_name' is responsible for returning the
# descriptor corresponding to the output of the network.
return configs
# This class is for lines like the following:
#
# res2-block name=res1 num-filters=64 height=32 time-period=1
#
# It is a residual block with post-activations, which does not support
# downsampling (strided convolution) or changing the number of filters;
# for that, see res2-downsample-block.
# It's a pretty standard res-block, more standard than "res-block" (XconfigResBlock).
#
# The number of filters is the same on the input and output; it is actually
# redundant to write it in the config file, because given that we know the
# height, we can work it out from the dimension of the input (as dimension =
# height * num-filters). But we allow it to be specified anyway, for clarity.
#
# Here are the most important configuration values, with defaults shown if
# defaults exist:
#
# input='[-1]' Descriptor giving the input of the layer.
# height The input and output height of the image, e.g. 40. Note: the width
# is associated with the time dimension and is dealt with
# implicitly, so it's not specified here.
# num-filters The number of filters on the input and output, e.g. 64.
# It does not have to be specified; if it is not specified,
# we work it out from the input dimension.
# num-bottleneck-filters If specified then this will be a 'bottleneck'
# ResBlock, in which there is a 1x1 convolution from
# num-filters->num-bottleneck-filters, a 3x3 convolution
# from num-bottleneck-filters->num-bottleneck-filters, and
# a 1x1 convolution from num-bottleneck-filters->num-filters.
# time-period=1 Think of this as the stride in the time dimension. At the
# input of the network will always have time-period=1; then
# after subsampling once in time we'd have time-period=2; then
# after subsampling again we'd have time-period=4. Because of
# the way nnet3 works, subsampling on the time axis is an
# implicit, not explicit, operation.
# allow-zero-padding=true By default this will allow zero-padding in the time
# dimension, meaning that you don't need extra frames at
# the input to compute the output. There may be ASR
# applications where you want to pad in the time dimension
# with repeats of the first or last frame (as we do for
# TDNNs), where it would be appropriate to write
# allow-zero-padding=false. Note: the way we have
# set it up, it does zero-padding on the height axis
# regardless
#
# Less important config variables:
# self-repair-scale=2.0e-05 This affects the ReLu's. It is a scale on the
# 'self-repair' mechanism that nudges the inputs to the
# ReLUs into the appropriate range in cases where
# the unit is active either too little of the time
# (<10%) or too much of the time (>90%).
# max-change=0.75 Max-parameter-change constant (per minibatch)
# used for convolutional components.
#
#
# The following natural-gradient-related configuration variables are passed in
# to the convolution components, if specified:
# use-natural-gradient (bool)
# rank-in, rank-out (int)
# num-minibatches-history (float)
# alpha-in, alpha-out (float)
# the following is also passed into the convolution components, if specified:
# l2-regularize (float)
class XconfigRes2Block(XconfigLayerBase):
def __init__(self, first_token, key_to_value, prev_names = None):
assert first_token == 'res2-block'
XconfigLayerBase.__init__(self, first_token, key_to_value, prev_names)
def set_default_configs(self):
self.config = {'input':'[-1]',
'height':-1, # sets height-in and height-out
'height-in':-1,
'height-out':-1,
'num-filters':-1, # interpreted as num-filters-out.
'num-bottleneck-filters':-1,
'time-period':1,
'self-repair-scale': 2.0e-05,
'self-repair-lower-threshold1': 0.05,
'self-repair-lower-threshold2': 0.05,
'self-repair-lower-threshold3': 0.05,
'max-change': 0.75,
'allow-zero-padding': True,
# the following are not really inspected by this level of
# code, just passed through (but not if left at '').
'param-stddev':'', 'bias-stddev':'',
'use-natural-gradient':'',
'rank-in':'', 'rank-out':'',
'num-minibatches-history':'',
'alpha-in':'', 'alpha-out':'',
'l2-regularize':'' }
def set_derived_configs(self):
input_dim = self.descriptors['input']['dim']
if not ((self.config['height'] > 0 and self.config['height-in'] == -1 and
self.config['height-out'] == -1) or
(self.config['height-out'] > 0 and self.config['height-in'] > 0)):
raise RuntimeError("You must specify height, or height-in and height-out, for res2-block.")
if not (self.config['height-in'] > 0 and self.config['height-out'] > 0):
height = self.config['height']
if not height > 0:
raise RuntimeError("You must specify either height, or height-in and height-out, for "
"res2-block.")
self.config['height-in'] = height
self.config['height-out'] = height
height_in = self.config['height-in']
if input_dim % height_in != 0:
raise RuntimeError("Specified input image height {0} does not "
"divide the input dim {1}".format(
height_in, input_dim))
self.config['num-filters'] = input_dim / height
def check_configs(self):
if self.config['num-filters'] == -1:
raise RuntimeError("You must specify num-filters for res2-block.")
def auxiliary_outputs(self):
return []
def output_name(self, auxiliary_output = None):
b = self.config['num-bottleneck-filters']
return ('{0}.relu2' if b <= 0 else '{0}.relu3').format(self.name)
def output_dim(self, auxiliary_output = None):
assert auxiliary_output is None
return self.config['height-out'] * self.config['num-filters']
def get_full_config(self):
ans = []
b = self.config['num-bottleneck-filters']
if b <= 0:
config_lines = self._generate_normal_resblock_config()
else:
config_lines = self._generate_bottleneck_resblock_config()
for line in config_lines:
for config_name in ['ref', 'final']:
# we do not support user specified matrices in CNN initialization
# so 'ref' and 'final' configs are the same.
ans.append((config_name, line))
return ans
# _generate_normal_resblock_config is a convenience function to generate the
# res-block config (the non-bottleneck version).
#
# The main path inside the res-block in the non-bottleneck case is as
# follows:
#
# input -> conv1 -> batchnorm1 -> scaleoffset1 -> relu1 -> conv2 -> batchnorm2 -> scaleoffset2 -> relu2
#
# where the 'scaleoffsetN' are ScaleAndOffsetComponent, which conventionally would be
# considered part of the BatchNorm.
#
# The relu2 actually sees the sum of the input and 'scaleoffset2'-- which gives us the bypass
# connection.
def _generate_normal_resblock_config(self):
configs = []
name = self.name
assert self.config['num-bottleneck-filters'] == -1
input_dim = self.descriptors['input']['dim']
height_in = self.config['height-in']
height_out = self.config['height-out']
time_period_out = self.config['time-period']
if not input_dim % height_in == 0:
raise RuntimeError("input-dim {0} does not divide height-in {1}".format(
input_dim, height_in))
num_filters_in = input_dim / height_in
num_filters_out = self.config['num-filters']
if height_out != height_in:
if height_out < height_in / 2 - 1 or height_out > height_in / 2 + 1:
raise RuntimeError("Expected height-out to be about half height-in, or the same: "
"height-in={0} height-out={1}".format(height_in, height_out))
if not time_period_out % 2 == 0:
raise RuntimeError("Expected time-period to be a multiple of 2 if you are subsampling "
"on height.")
time_period_in = time_period_out / 2
height_subsample = 2
else:
time_period_in = time_period_out
height_subsample = 1
cur_time_period = time_period_in
cur_num_filters = num_filters_in
cur_height = height_in
input_descriptor = self.descriptors['input']['final-string']
allow_zero_padding = self.config['allow-zero-padding']
if height_subsample == 1 and num_filters_in == num_filters_out:
bypass_descriptor = input_descriptor
else:
bypass_descriptor = '{0}.conv_bypass'.format(name)
cur_descriptor = input_descriptor
# get miscellaneous convolution options passed in from the xconfig line
a = []
for opt_name in [
'param-stddev', 'bias-stddev', 'use-natural-gradient',
'max-change', 'rank-in', 'rank-out', 'num-minibatches-history',
'alpha-in', 'alpha-out', 'l2-regularize' ]:
value = self.config[opt_name]
if value != '':
a.append('{0}={1}'.format(opt_name, value))
misc_conv_opts = ' '.join(a)
for n in [1, 2]:
# the convolution.
conv_opts = ('height-in={hi} height-out={ho} height-offsets=-1,0,1 '
'height-subsample-out={hs} '
'time-offsets=-{p},0,{p} '
'num-filters-in={fi} num-filters-out={fo} {r} {o}'.format(
hi=cur_height, ho=height_out,
p=cur_time_period,
hs=(height_subsample if n == 1 else 1),
fi=cur_num_filters,
fo=num_filters_out,
r=('required-time-offsets=0' if allow_zero_padding else ''),
o=misc_conv_opts))
configs.append('component name={0}.conv{1} type=TimeHeightConvolutionComponent '
'{2}'.format(name, n, conv_opts))
configs.append('component-node name={0}.conv{1} component={0}.conv{1} '
'input={2}'.format(name, n, cur_descriptor))
cur_descriptor = '{0}.conv{1}'.format(name, n)
cur_num_filters = num_filters_out
cur_height = height_out
cur_time_period = time_period_out
# the batch-norm
configs.append('component name={0}.batchnorm{1} type=BatchNormComponent dim={2} '
'block-dim={3}'.format(
name, n, cur_num_filters * cur_height,
cur_num_filters))
configs.append('component-node name={0}.batchnorm{1} component={0}.batchnorm{1} '
'input={2}'.format(name, n, cur_descriptor))
cur_descriptor = '{0}.batchnorm{1}'.format(name, n)
# the scale-and-offset
configs.append('component name={0}.scaleoffset{1} type=ScaleAndOffsetComponent dim={2} '
'block-dim={3}'.format(
name, n, cur_num_filters * cur_height,
cur_num_filters))
configs.append('component-node name={0}.scaleoffset{1} component={0}.scaleoffset{1} '
'input={2}'.format(name, n, cur_descriptor))
cur_descriptor = '{0}.scaleoffset{1}'.format(name, n)
if n == 2:
# the bypass connection
cur_descriptor = 'Sum({0}, {1})'.format(cur_descriptor, bypass_descriptor)
# the ReLU
configs.append('component name={0}.relu{1} type=RectifiedLinearComponent '
'dim={2} block-dim={3} self-repair-scale={4} '
'self-repair-lower-threshold={5}'.format(
name, n, cur_num_filters * cur_height, cur_num_filters,
self.config['self-repair-scale'],
self.config['self-repair-lower-threshold{0}'.format(n)]))
configs.append('component-node name={0}.relu{1} component={0}.relu{1} '
'input={2}'.format(name, n, cur_descriptor))
cur_descriptor = '{0}.relu{1}'.format(name, n)
if bypass_descriptor != input_descriptor:
# We need to add the 1x1 bypass convolution because we're either doing height
# subsampling or changing the number of filters.
conv_opts = ('height-in={hi} height-out={ho} height-offsets=0 '
'time-offsets=0 height-subsample-out={hs} '
'num-filters-in={fi} num-filters-out={fo} {o}'.format(
hi=height_in, ho=height_out, hs=height_subsample,
fi=num_filters_in, fo=num_filters_out, o=misc_conv_opts))
configs.append('component name={0}.conv_bypass type=TimeHeightConvolutionComponent '
'{1}'.format(name, conv_opts))
configs.append('component-node name={0}.conv_bypass component={0}.conv_bypass '
'input={1}'.format(name, input_descriptor))
# Note: the function 'output_name' is responsible for returning the
# descriptor corresponding to the output of the network, which in
# this case would be '{0}.relu2'.format(name).
return configs
# _generate_bottleneck_resblock_config is a convenience function to generate the
# res-block config (this is the bottleneck version, where there is
# a 3x3 kernel with a smaller number of filters than at the input and output,
# sandwiched between two 1x1 kernels.
#
# The main path inside the res-block in the bottleneck case is as follows:
#
# input -> conv1 -> batchnorm1 -> scaleoffset1 -> relu1 ->
# conv2 -> batchnorm2 -> scaleoffset2 -> relu2 ->
# conv3 -> batchnorm3 -> scaleoffset3 -> relu3
#
# but the relu3 takes as its input the sum of 'input' and 'scaleoffset3'.
#
def _generate_bottleneck_resblock_config(self):
configs = []
name = self.name
num_bottleneck_filters = self.config['num-bottleneck-filters']
assert num_bottleneck_filters > 0
input_dim = self.descriptors['input']['dim']
height_in = self.config['height-in']
height_out = self.config['height-out']
input_descriptor = self.descriptors['input']['final-string']
allow_zero_padding = self.config['allow-zero-padding']
time_period_out = self.config['time-period']
if not input_dim % height_in == 0:
raise RuntimeError("input-dim={0} does not divide height-in={1}".format(
input_dim, height_in))
num_filters_in = input_dim / height_in
num_filters_out = self.config['num-filters']
if height_out != height_in:
if height_out < height_in / 2 - 1 or height_out > height_in / 2 + 1:
raise RuntimeError("Expected height-out to be about half height-in, or the same: "
"height-in={0} height-out={1}".format(height_in, height_out))
height_subsample = 2
else:
height_subsample = 1
cur_descriptor = input_descriptor
cur_num_filters = num_filters_in
cur_height = height_in
if height_subsample == 1 and num_filters_in == num_filters_out:
bypass_descriptor = input_descriptor
else:
bypass_descriptor = '{0}.conv_bypass'.format(name)
# get miscellaneous convolution options passed in from the xconfig line
a = []
for opt_name in [
'param-stddev', 'bias-stddev', 'use-natural-gradient',
'max-change', 'rank-in', 'rank-out', 'num-minibatches-history',
'alpha-in', 'alpha-out', 'l2-regularize' ]:
value = self.config[opt_name]
if value != '':
a.append('{0}={1}'.format(opt_name, value))
misc_conv_opts = ' '.join(a)
for n in [1, 2, 3]:
# the convolution.
height_offsets = ('-1,0,1' if n == 2 else '0')
this_height_subsample = height_subsample if n == 1 else 1
time_offsets = ('-{t},0,{t}'.format(t=time_period_out) if n == 2 else '0')
next_num_filters = (num_filters_out if n == 3 else num_bottleneck_filters)
conv_opts = ('height-in={h_in} height-out={h_out} height-offsets={ho} time-offsets={to} '
'num-filters-in={fi} num-filters-out={fo} height-subsample-out={hs} '
'{r} {o}'.format(
h_in=cur_height, h_out=height_out,
to=time_offsets, ho=height_offsets,
hs=this_height_subsample,
fi=cur_num_filters, fo=next_num_filters,
r=('required-time-offsets=0' if allow_zero_padding else ''),
o=misc_conv_opts))
configs.append('component name={0}.conv{1} type=TimeHeightConvolutionComponent '
'{2}'.format(name, n, conv_opts))
configs.append('component-node name={0}.conv{1} component={0}.conv{1} '
'input={2}'.format(name, n, cur_descriptor))
cur_num_filters = next_num_filters
cur_height = height_out
cur_descriptor = '{0}.conv{1}'.format(name, n)
# the batch-norm
configs.append('component name={0}.batchnorm{1} type=BatchNormComponent dim={2} '
'block-dim={3}'.format(
name, n, cur_num_filters * cur_height,
cur_num_filters))
configs.append('component-node name={0}.batchnorm{1} component={0}.batchnorm{1} '
'input={2}'.format(name, n, cur_descriptor))
cur_descriptor = '{0}.batchnorm{1}'.format(name, n)
# the scale and offset
configs.append('component name={0}.scaleoffset{1} type=ScaleAndOffsetComponent dim={2} '
'block-dim={3}'.format(
name, n, cur_num_filters * cur_height,
cur_num_filters))
configs.append('component-node name={0}.scaleoffset{1} component={0}.scaleoffset{1} '
'input={2}'.format(name, n, cur_descriptor))
cur_descriptor = '{0}.scaleoffset{1}'.format(name, n)
if n == 3:
# the bypass connection
cur_descriptor = 'Sum({0}, {1})'.format(cur_descriptor, bypass_descriptor)
# the ReLU
configs.append('component name={0}.relu{1} type=RectifiedLinearComponent '
'dim={2} block-dim={3} self-repair-scale={4} '
'self-repair-lower-threshold={5}'.format(
name, n, cur_num_filters * cur_height, cur_num_filters,
self.config['self-repair-scale'],
self.config['self-repair-lower-threshold{0}'.format(n)]))
configs.append('component-node name={0}.relu{1} component={0}.relu{1} '
'input={2}'.format(name, n, cur_descriptor))
cur_descriptor = '{0}.relu{1}'.format(name, n)
if bypass_descriptor != input_descriptor:
# We need to add the 1x1 bypass convolution because we're either doing height
# subsampling or changing the number of filters.
conv_opts = ('height-in={hi} height-out={ho} height-offsets=0 '
'time-offsets=0 height-subsample-out={hs} '
'num-filters-in={fi} num-filters-out={fo} {o}'.format(
hi=height_in, ho=height_out, hs=height_subsample,
fi=num_filters_in, fo=num_filters_out, o=misc_conv_opts))
configs.append('component name={0}.conv_bypass type=TimeHeightConvolutionComponent '
'{1}'.format(name, conv_opts))
configs.append('component-node name={0}.conv_bypass component={0}.conv_bypass '
'input={1}'.format(name, input_descriptor))
# Note: the function 'output_name' is responsible for returning the
# descriptor corresponding to the output of the network, which
# in this case will be '{0}.relu3'.format(name).
return configs
# This layer just maps to a single component, a SumBlockComponent. It's for
# doing channel averaging at the end of neural networks. See scripts for
# examples of how to use it.
# An example line using this layer is:
# channel-average-layer name=channel-average input=Append(2, 4, 6, 8) dim=64
# the configuration value 'dim' is the output dimension of this layer.
# The input dimension is expected to be a multiple of 'dim'. The output
# will be the average of 'dim'-sized blocks of the input.
class ChannelAverageLayer(XconfigLayerBase):
def __init__(self, first_token, key_to_value, prev_names = None):
assert first_token == "channel-average-layer"
XconfigLayerBase.__init__(self, first_token, key_to_value, prev_names)
def set_default_configs(self):
self.config = {'input':'[-1]',
'dim': -1 }
def set_derived_configs(self):
pass
def check_configs(self):
input_dim = self.descriptors['input']['dim']
dim = self.config['dim']
if dim <= 0:
raise RuntimeError("dim must be specified and > 0.")
if input_dim % dim != 0:
raise RuntimeError("input-dim={0} is not a multiple of dim={1}".format(
input_dim, dim))
def auxiliary_outputs(self):
return []
def output_name(self, auxiliary_output = None):
assert auxiliary_output is None
return self.name
def output_dim(self, auxiliary_output = None):
assert auxiliary_output is None
return self.config['dim']
def get_full_config(self):
ans = []
config_lines = self._generate_channel_average_config()
for line in config_lines:
for config_name in ['ref', 'final']:
ans.append((config_name, line))
return ans
def _generate_channel_average_config(self):
configs = []
name = self.name
input_dim = self.descriptors['input']['dim']
input_descriptor = self.descriptors['input']['final-string']
dim = self.config['dim']
# choose the scale that makes it an average rather than a sum.
scale = dim * 1.0 / input_dim
configs.append('component name={0} type=SumBlockComponent input-dim={1} '
'output-dim={2} scale={3}'.format(name, input_dim,
dim, scale))
configs.append('component-node name={0} component={0} input={1}'.format(
name, input_descriptor))
return configs
| 61,163 | 49.800664 | 107 | py |
espnet | espnet-master/egs2/lrs2/lipreading1/local/feature_extract/video_processing.py | import cvtransforms
import face_alignment
import numpy as np
import skimage.transform
import skvideo.io
import torch
from models import pretrained
def reload_model(model, path=""):
if not bool(path):
return model
else:
model_dict = model.state_dict()
pretrained_dict = torch.load(path, map_location="cpu")
pretrained_dict = {
k: v
for k, v in pretrained_dict.items()
if k in model_dict and v.size() == model_dict[k].size()
}
model_dict.update(pretrained_dict)
print("load {} parameters".format(len(pretrained_dict)))
model.load_state_dict(model_dict)
return model
class BoundingBox(object):
"""
A 2D bounding box
"""
def __init__(self, points):
if len(points) == 0:
raise ValueError("Can't compute bounding box of empty list")
self.minx, self.miny = 255, 255
self.maxx, self.maxy = 0, 0
for x, y in points:
# Set min coords
if x < self.minx:
self.minx = int(x)
if y < self.miny:
self.miny = int(y)
# Set max coords
if x > self.maxx:
self.maxx = int(x)
if y > self.maxy:
self.maxy = int(y)
if self.maxx <= self.minx or self.maxy <= self.miny:
print("Box failed, return center box")
self.minx, self.miny = 192, 192
self.maxx, self.maxy = 64, 64
@property
def width(self):
return self.maxx - self.minx
@property
def height(self):
return self.maxy - self.miny
def __repr__(self):
return "BoundingBox({}, {}, {}, {})".format(
self.minx, self.maxx, self.miny, self.maxy
)
def parse_scripts(scp_path, value_processor=lambda x: x, num_tokens=2):
"""
Parse kaldi's script(.scp) file
If num_tokens >= 2, function will check token number
"""
scp_dict = dict()
line = 0
with open(scp_path, "r") as f:
for raw_line in f:
scp_tokens = raw_line.strip().split()
line += 1
if num_tokens >= 2 and len(scp_tokens) != num_tokens or len(scp_tokens) < 2:
raise RuntimeError(
"For {}, format error in line[{:d}]: {}".format(
scp_path, line, raw_line
)
)
if num_tokens == 2:
key, value = scp_tokens
else:
key, value = scp_tokens[0], scp_tokens[1:]
if key in scp_dict:
raise ValueError(
"Duplicated key '{0}' exists in {1}".format(key, scp_path)
)
scp_dict[key] = value_processor(value)
return scp_dict
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = pretrained.Lipreading(mode="temporalConv", nClasses=500)
model = reload_model(model, "./local/feature_extract/lipread_lrw_pretrain.pt")
model = model.float()
model.eval()
model.to(device)
class VideoReader(object):
"""
Basic Reader Class
"""
def __init__(self, scp_path, value_processor=lambda x: x):
self.index_dict = parse_scripts(
scp_path, value_processor=value_processor, num_tokens=2
)
self.index_keys = list(self.index_dict.keys())
self.face_align_model = face_alignment.FaceAlignment(
face_alignment.LandmarksType._2D,
flip_input=False,
device="cuda:0" if torch.cuda.is_available() else "cpu",
)
def video_face_crop(self, input_video):
video = input_video
preds = []
for i in range(0, len(video), 3):
pred = self.face_align_model.get_landmarks(video[i])
if pred:
preds.append(pred[0])
preds = np.array(preds)
heatmap = np.median(preds, axis=0)
bounding_box = BoundingBox(heatmap[2:15])
croped = video[
:,
bounding_box.miny : bounding_box.maxy,
bounding_box.minx : bounding_box.maxx,
:,
]
crop_resize = np.zeros((np.shape(video)[0], 112, 112, np.shape(video)[-1]))
for i in range(len(croped)):
try:
crop_resize[i] = skimage.transform.resize(
croped[i], (112, 112), preserve_range=True
)
except Exception:
print(croped)
print("frame fails")
crop_resize = crop_resize.astype(np.uint8)
return crop_resize
def transform_to_gray(self, data):
r, g, b = data[..., 0], data[..., 1], data[..., 2]
data = (0.2989 * r + 0.5870 * g + 0.1140 * b) / 255
return data
def extract_feature(self, inputs):
inputs = cvtransforms.ColorNormalize(inputs)
inputs = torch.from_numpy(inputs)
inputs = inputs.unsqueeze(0).float()
inputs = inputs.unsqueeze(1)
with torch.no_grad():
outputs = model(inputs.to(device))
return outputs.cpu().numpy()
def _load(self, key):
# return path
video = skvideo.io.vread(self.index_dict[key])
v = self.video_face_crop(video)
v = self.transform_to_gray(v)
v = self.extract_feature(v)
return v
# return self.index_dict[key]
# number of utterance
def __len__(self):
return len(self.index_dict)
# avoid key error
def __contains__(self, key):
return key in self.index_dict
# sequential index
def __iter__(self):
for key in self.index_keys:
yield key, self._load(key)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
# random index, support str/int as index
def __getitem__(self, index):
if type(index) not in [int, str]:
raise IndexError("Unsupported index type: {}".format(type(index)))
if type(index) == int:
# from int index to key
num_utts = len(self.index_keys)
if index >= num_utts or index < 0:
raise KeyError(
"Interger index out of range, {:d} vs {:d}".format(index, num_utts)
)
index = self.index_keys[index]
if index not in self.index_dict:
raise KeyError("Missing utterance {}!".format(index))
return self._load(index)
| 6,492 | 29.483568 | 88 | py |
espnet | espnet-master/egs2/lrs2/lipreading1/local/feature_extract/models/pretrained.py | # coding: utf-8
import math
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(
in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False
)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.bnfc = nn.BatchNorm1d(num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x
x = self.fc(x)
x = self.bnfc(x)
return x
class GRU(nn.Module):
def __init__(
self, input_size, hidden_size, num_layers, num_classes, every_frame=True
):
super(GRU, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.every_frame = every_frame
self.gru = nn.GRU(
input_size, hidden_size, num_layers, batch_first=True, bidirectional=True
)
self.fc = nn.Linear(hidden_size * 2, num_classes)
def forward(self, x):
h0 = Variable(torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size))
out, _ = self.gru(x, h0)
if self.every_frame:
out = self.fc(out) # predictions based on every time step
else:
out = self.fc(out[:, -1, :]) # predictions based on last time-step
return out
class Lipreading(nn.Module):
def __init__(
self,
mode,
inputDim=256,
hiddenDim=512,
nClasses=500,
frameLen=29,
every_frame=True,
):
super(Lipreading, self).__init__()
self.mode = mode
self.inputDim = inputDim
self.hiddenDim = hiddenDim
self.nClasses = nClasses
self.frameLen = frameLen
self.every_frame = every_frame
self.nLayers = 2
# frontend3D
self.frontend3D = nn.Sequential(
nn.Conv3d(
1,
64,
kernel_size=(5, 7, 7),
stride=(1, 2, 2),
padding=(2, 3, 3),
bias=False,
),
nn.BatchNorm3d(64),
nn.ReLU(True),
nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1)),
)
# resnet
# self.resnet34 = ResNet(BasicBlock, [3, 4, 6, 3], num_classes=self.inputDim)
self.resnet18 = ResNet(BasicBlock, [2, 2, 2, 2], num_classes=self.inputDim)
# backend_conv
self.backend_conv1 = nn.Sequential(
nn.Conv1d(self.inputDim, 2 * self.inputDim, 5, 2, 0, bias=False),
nn.BatchNorm1d(2 * self.inputDim),
nn.ReLU(True),
nn.MaxPool1d(2, 2),
nn.Conv1d(2 * self.inputDim, 4 * self.inputDim, 5, 2, 0, bias=False),
nn.BatchNorm1d(4 * self.inputDim),
nn.ReLU(True),
)
self.backend_conv2 = nn.Sequential(
nn.Linear(4 * self.inputDim, self.inputDim),
nn.BatchNorm1d(self.inputDim),
nn.ReLU(True),
nn.Linear(self.inputDim, self.nClasses),
)
# backend_gru
self.gru = GRU(
self.inputDim, self.hiddenDim, self.nLayers, self.nClasses, self.every_frame
)
# initialize
self._initialize_weights()
def forward(self, x):
x = self.frontend3D(x)
x = x.transpose(1, 2)
x = x.contiguous()
x = x.view(-1, 64, x.size(3), x.size(4))
# x = self.resnet34(x)
x = self.resnet18(x)
return x
# if self.mode == 'temporalConv':
# x = x.view(-1, self.frameLen, self.inputDim)
# x = x.transpose(1, 2)
# x = self.backend_conv1(x)
# x = torch.mean(x, 2)
# x = self.backend_conv2(x)
# elif self.mode == 'backendGRU' or self.mode == 'finetuneGRU':
# x = x.view(-1, self.frameLen, self.inputDim)
# x = self.gru(x)
# else:
# raise Exception('No model is selected')
# return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
n = (
m.kernel_size[0]
* m.kernel_size[1]
* m.kernel_size[2]
* m.out_channels
)
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Conv1d):
n = m.kernel_size[0] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def lipreading(
mode, inputDim=256, hiddenDim=512, nClasses=500, frameLen=29, every_frame=True
):
model = Lipreading(
mode,
inputDim=inputDim,
hiddenDim=hiddenDim,
nClasses=nClasses,
frameLen=frameLen,
every_frame=every_frame,
)
return model
| 8,286 | 31.498039 | 88 | py |
espnet | espnet-master/egs2/wsj0_2mix/tse1/local/prepare_spk_embs_scp.py | from functools import partial
from pathlib import Path
import numpy as np
import onnxruntime as ort
import torch
import torchaudio
import torchaudio.compliance.kaldi as kaldi
from tqdm.contrib.concurrent import thread_map
def compute_fbank(
wav_path, num_mel_bins=80, frame_length=25, frame_shift=10, dither=0.0
):
"""Extract fbank.
Simlilar to the one in wespeaker.dataset.processor,
While integrating the wave reading and CMN.
"""
waveform, sample_rate = torchaudio.load(wav_path)
if sample_rate != 16000:
waveform = torchaudio.functional.resample(waveform, sample_rate, 16000)
waveform = waveform * (1 << 15)
mat = kaldi.fbank(
waveform,
num_mel_bins=num_mel_bins,
frame_length=frame_length,
frame_shift=frame_shift,
dither=dither,
sample_frequency=sample_rate,
window_type="hamming",
use_energy=False,
)
# CMN, without CVN
mat = mat - torch.mean(mat, dim=0)
return mat
def worker(uid_path, session, outdir):
outdir = Path(outdir).absolute()
uid, wav_path = uid_path
feats = compute_fbank(wav_path)
feats = feats.unsqueeze(0).numpy() # add batch dimension
embeddings = session.run(output_names=["embs"], input_feed={"feats": feats})
key, value = Path(wav_path).stem, np.squeeze(embeddings[0])
p = str(outdir / f"{key}.npy")
np.save(p, value)
return f"{uid} {p}\n"
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("scp", type=str, help="scp file containing paths to utterances")
parser.add_argument(
"--onnx_path",
type=str,
required=True,
help="Path to the pretrained model in ONNX format",
)
parser.add_argument(
"--outdir", type=str, required=True, help="Path to the output directory"
)
parser.add_argument(
"--max_workers",
type=int,
default=8,
help="Maximum number of workers to process audio files in parallel",
)
parser.add_argument(
"--max_chunksize",
type=int,
default=1000,
help="Maximum size of chunks sent to worker processes",
)
args = parser.parse_args()
so = ort.SessionOptions()
so.inter_op_num_threads = 1
so.intra_op_num_threads = 1
session = ort.InferenceSession(args.onnx_path, sess_options=so)
Path(args.outdir).mkdir(parents=True, exist_ok=True)
tup = []
with open(args.scp, "r") as f:
for line in f:
if not line.strip():
continue
uid, path = line.strip().split(maxsplit=1)
tup.append((uid, path))
# List[str]
ret = thread_map(
partial(worker, session=session, outdir=args.outdir),
tup,
max_workers=args.max_workers,
chunksize=args.max_chunksize,
)
with open(f"{args.outdir}/embs.scp", "w") as f:
for line in ret:
f.write(line)
| 2,982 | 27.961165 | 88 | py |
espnet | espnet-master/egs2/l3das22/enh1/local/metric.py | # The implementation of the metric for L3DAS22 in
# Guizzo. et al. "L3DAS22 Challenge: Learning 3D Audio
# Sources in a Real Office Environment"
# The code is based on:
# https://github.com/l3das/L3DAS22/blob/main/metrics.py
import argparse
import os
import sys
import warnings
import jiwer
import numpy as np
import soundfile as sf
import torch
import torchaudio
import transformers
from pystoi import stoi
from tqdm import tqdm
from transformers import Wav2Vec2ForMaskedLM, Wav2Vec2Tokenizer
# TASK 1 METRICS
warnings.filterwarnings("ignore", category=FutureWarning)
transformers.logging.set_verbosity_error()
wer_tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h")
wer_model = Wav2Vec2ForMaskedLM.from_pretrained("facebook/wav2vec2-base-960h")
def wer(clean_speech, denoised_speech):
"""
computes the word error rate(WER) score for 1 single data point
"""
def _transcription(clean_speech, denoised_speech):
# transcribe clean audio
input_values = wer_tokenizer(clean_speech, return_tensors="pt").input_values
logits = wer_model(input_values).logits
predicted_ids = torch.argmax(logits, dim=-1)
transcript_clean = wer_tokenizer.batch_decode(predicted_ids)[0]
# transcribe
input_values = wer_tokenizer(denoised_speech, return_tensors="pt").input_values
logits = wer_model(input_values).logits
predicted_ids = torch.argmax(logits, dim=-1)
transcript_estimate = wer_tokenizer.batch_decode(predicted_ids)[0]
return [transcript_clean, transcript_estimate]
transcript = _transcription(clean_speech, denoised_speech)
try: # if no words are predicted
wer_val = jiwer.wer(transcript[0], transcript[1])
except ValueError:
wer_val = None
return wer_val
def task1_metric(clean_speech, denoised_speech, sr=16000):
"""
Compute evaluation metric for task 1 as (stoi+(1-word error rate)/2)
This function computes such measure for 1 single datapoint
"""
WER = wer(clean_speech, denoised_speech)
if WER is not None: # if there is no speech in the segment
STOI = stoi(clean_speech, denoised_speech, sr, extended=False)
WER = np.clip(WER, 0.0, 1.0)
STOI = np.clip(STOI, 0.0, 1.0)
metric = (STOI + (1.0 - WER)) / 2.0
else:
metric = None
STOI = None
return metric, WER, STOI
def main(args):
# LOAD DATASET
enh = []
with open(args.predicted_path, "r") as f:
for line in f.readlines():
enh.append(line.split())
ref = []
with open(args.target_path, "r") as f:
for line in f.readlines():
ref.append(line.split())
print("COMPUTING TASK 1 METRICS")
print("M: Final Task 1 metric")
print("W: Word Error Rate")
print("S: Stoi")
WER = 0.0
STOI = 0.0
METRIC = 0.0
count = 0
with tqdm(total=len(ref)) as pbar:
for example_num, (key, ref_wav) in enumerate(ref):
assert key == enh[example_num][0]
target, sr = torchaudio.load(ref_wav)
outputs, sr = torchaudio.load(enh[example_num][1])
metric, wer, stoi = task1_metric(target.squeeze(0), outputs.squeeze(0), sr)
if metric is not None:
METRIC += (1.0 / float(example_num + 1)) * (metric - METRIC)
WER += (1.0 / float(example_num + 1)) * (wer - WER)
STOI += (1.0 / float(example_num + 1)) * (stoi - STOI)
else:
print("No voice activity on this frame")
pbar.set_description(
"M:"
+ str(np.round(METRIC, decimals=3))
+ ", W:"
+ str(np.round(WER, decimals=3))
+ ", S: "
+ str(np.round(STOI, decimals=3))
)
pbar.update(1)
count += 1
# print the results
results = {"word error rate": WER, "stoi": STOI, "task 1 metric": METRIC}
print("*******************************")
print("RESULTS")
for i in results:
print(i, results[i])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# dataset parameters
parser.add_argument("--predicted_path", type=str, default="")
parser.add_argument("--target_path", type=str, default="")
args = parser.parse_args()
main(args)
| 4,378 | 31.198529 | 87 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.