repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
espnet | espnet-master/espnet2/enh/layers/beamformer.py | """Beamformer module."""
from typing import List, Union
import torch
from packaging.version import parse as V
from torch_complex import functional as FC
from torch_complex.tensor import ComplexTensor
from espnet2.enh.layers.complex_utils import (
cat,
complex_norm,
einsum,
inverse,
is_complex,
is_torch_complex_tensor,
matmul,
reverse,
solve,
to_double,
)
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
EPS = torch.finfo(torch.double).eps
def prepare_beamformer_stats(
signal,
masks_speech,
mask_noise,
powers=None,
beamformer_type="mvdr",
bdelay=3,
btaps=5,
eps=1e-6,
):
"""Prepare necessary statistics for constructing the specified beamformer.
Args:
signal (torch.complex64/ComplexTensor): (..., F, C, T)
masks_speech (List[torch.Tensor]): (..., F, C, T) masks for all speech sources
mask_noise (torch.Tensor): (..., F, C, T) noise mask
powers (List[torch.Tensor]): powers for all speech sources (..., F, T)
used for wMPDR or WPD beamformers
beamformer_type (str): one of the pre-defined beamformer types
bdelay (int): delay factor, used for WPD beamformser
btaps (int): number of filter taps, used for WPD beamformser
eps (torch.Tensor): tiny constant
Returns:
beamformer_stats (dict): a dictionary containing all necessary statistics
e.g. "psd_n", "psd_speech", "psd_distortion"
Note:
* When `masks_speech` is a tensor or a single-element list, all returned
statistics are tensors;
* When `masks_speech` is a multi-element list, some returned statistics
can be a list, e.g., "psd_n" for MVDR, "psd_speech" and "psd_distortion".
"""
from espnet2.enh.layers.dnn_beamformer import BEAMFORMER_TYPES
assert beamformer_type in BEAMFORMER_TYPES, "%s is not supported yet"
if isinstance(masks_speech, (list, tuple)):
masks_speech = [to_double(m) for m in masks_speech]
else:
masks_speech = [to_double(masks_speech)]
num_spk = len(masks_speech)
if (
beamformer_type.startswith("wmpdr")
or beamformer_type.startswith("wpd")
or beamformer_type == "wlcmp"
or beamformer_type == "wmwf"
):
if powers is None:
power_input = signal.real**2 + signal.imag**2
# Averaging along the channel axis: (..., C, T) -> (..., T)
powers = [(power_input * m).mean(dim=-2) for m in masks_speech]
else:
assert len(powers) == num_spk, (len(powers), num_spk)
inverse_powers = [1 / torch.clamp(p, min=eps) for p in powers]
psd_speeches = [get_power_spectral_density_matrix(signal, m) for m in masks_speech]
if (
beamformer_type == "mvdr_souden"
or beamformer_type == "sdw_mwf"
or beamformer_type == "r1mwf"
or beamformer_type.startswith("mvdr_tfs")
or not beamformer_type.endswith("_souden")
):
# MVDR or other RTF-based formulas
if mask_noise is not None:
psd_bg = get_power_spectral_density_matrix(signal, to_double(mask_noise))
if num_spk == 1:
assert mask_noise is not None
psd_noise = psd_bg
else:
psd_noise = []
for i in range(num_spk):
if beamformer_type.startswith("mvdr_tfs"):
# NOTE: psd_noise is a list only for this beamformer
psd_noise_i = [psd for j, psd in enumerate(psd_speeches) if j != i]
else:
psd_sum = sum(psd for j, psd in enumerate(psd_speeches) if j != i)
psd_noise_i = (
psd_bg + psd_sum if mask_noise is not None else psd_sum
)
psd_noise.append(psd_noise_i)
if beamformer_type in (
"mvdr",
"mvdr_souden",
"mvdr_tfs_souden",
"sdw_mwf",
"r1mwf",
"lcmv",
"gev",
"gev_ban",
):
psd_n = psd_noise
elif beamformer_type == "mvdr_tfs":
psd_n = psd_noise
psd_noise = [sum(psd_noise_i) for psd_noise_i in psd_noise]
elif beamformer_type in ("mpdr", "mpdr_souden", "lcmp", "mwf"):
psd_n = einsum("...ct,...et->...ce", signal, signal.conj())
elif beamformer_type in ("wmpdr", "wmpdr_souden", "wlcmp", "wmwf"):
psd_n = [
einsum(
"...ct,...et->...ce",
signal * inv_p[..., None, :],
signal.conj(),
)
for inv_p in inverse_powers
]
elif beamformer_type in ("wpd", "wpd_souden"):
psd_n = [
get_covariances(signal, inv_p, bdelay, btaps, get_vector=False)
for inv_p in inverse_powers
]
if num_spk == 1:
psd_speeches = psd_speeches[0]
if isinstance(psd_n, (list, tuple)):
psd_n = psd_n[0]
if beamformer_type in (
"mvdr",
"mpdr",
"wmpdr",
"wpd",
"lcmp",
"wlcmp",
"lcmv",
"mvdr_tfs",
):
return {"psd_n": psd_n, "psd_speech": psd_speeches, "psd_distortion": psd_noise}
elif (
beamformer_type.endswith("_souden")
or beamformer_type.startswith("gev")
or beamformer_type == "mwf"
or beamformer_type == "wmwf"
or beamformer_type == "sdw_mwf"
or beamformer_type == "r1mwf"
):
return {"psd_n": psd_n, "psd_speech": psd_speeches}
def get_power_spectral_density_matrix(
xs, mask, normalization=True, reduction="mean", eps: float = 1e-15
):
"""Return cross-channel power spectral density (PSD) matrix
Args:
xs (torch.complex64/ComplexTensor): (..., F, C, T)
reduction (str): "mean" or "median"
mask (torch.Tensor): (..., F, C, T)
normalization (bool):
eps (float):
Returns
psd (torch.complex64/ComplexTensor): (..., F, C, C)
"""
if reduction == "mean":
# Averaging mask along C: (..., C, T) -> (..., 1, T)
mask = mask.mean(dim=-2, keepdim=True)
elif reduction == "median":
mask = mask.median(dim=-2, keepdim=True)
else:
raise ValueError("Unknown reduction mode: %s" % reduction)
# Normalized mask along T: (..., T)
if normalization:
# If assuming the tensor is padded with zero, the summation along
# the time axis is same regardless of the padding length.
mask = mask / (mask.sum(dim=-1, keepdim=True) + eps)
# outer product: (..., C_1, T) x (..., C_2, T) -> (..., C, C_2)
psd = einsum("...ct,...et->...ce", xs * mask, xs.conj())
return psd
def get_rtf(
psd_speech,
psd_noise,
mode="power",
reference_vector: Union[int, torch.Tensor] = 0,
iterations: int = 3,
):
"""Calculate the relative transfer function (RTF)
Algorithm of power method:
1) rtf = reference_vector
2) for i in range(iterations):
rtf = (psd_noise^-1 @ psd_speech) @ rtf
rtf = rtf / ||rtf||_2 # this normalization can be skipped
3) rtf = psd_noise @ rtf
4) rtf = rtf / rtf[..., ref_channel, :]
Note: 4) Normalization at the reference channel is not performed here.
Args:
psd_speech (torch.complex64/ComplexTensor):
speech covariance matrix (..., F, C, C)
psd_noise (torch.complex64/ComplexTensor):
noise covariance matrix (..., F, C, C)
mode (str): one of ("power", "evd")
"power": power method
"evd": eigenvalue decomposition
reference_vector (torch.Tensor or int): (..., C) or scalar
iterations (int): number of iterations in power method
Returns:
rtf (torch.complex64/ComplexTensor): (..., F, C, 1)
"""
if mode == "power":
phi = solve(psd_speech, psd_noise)
rtf = (
phi[..., reference_vector, None]
if isinstance(reference_vector, int)
else matmul(phi, reference_vector[..., None, :, None])
)
for _ in range(iterations - 2):
rtf = matmul(phi, rtf)
# rtf = rtf / complex_norm(rtf, dim=-1, keepdim=True)
rtf = matmul(psd_speech, rtf)
elif mode == "evd":
assert (
is_torch_1_9_plus
and is_torch_complex_tensor(psd_speech)
and is_torch_complex_tensor(psd_noise)
)
e_vec = generalized_eigenvalue_decomposition(psd_speech, psd_noise)[1]
rtf = matmul(psd_noise, e_vec[..., -1, None])
else:
raise ValueError("Unknown mode: %s" % mode)
return rtf
def get_mvdr_vector(
psd_s,
psd_n,
reference_vector: torch.Tensor,
diagonal_loading: bool = True,
diag_eps: float = 1e-7,
eps: float = 1e-8,
):
"""Return the MVDR (Minimum Variance Distortionless Response) vector:
h = (Npsd^-1 @ Spsd) / (Tr(Npsd^-1 @ Spsd)) @ u
Reference:
On optimal frequency-domain multichannel linear filtering
for noise reduction; M. Souden et al., 2010;
https://ieeexplore.ieee.org/document/5089420
Args:
psd_s (torch.complex64/ComplexTensor):
speech covariance matrix (..., F, C, C)
psd_n (torch.complex64/ComplexTensor):
observation/noise covariance matrix (..., F, C, C)
reference_vector (torch.Tensor): (..., C)
diagonal_loading (bool): Whether to add a tiny term to the diagonal of psd_n
diag_eps (float):
eps (float):
Returns:
beamform_vector (torch.complex64/ComplexTensor): (..., F, C)
""" # noqa: D400
if diagonal_loading:
psd_n = tik_reg(psd_n, reg=diag_eps, eps=eps)
numerator = solve(psd_s, psd_n)
# NOTE (wangyou): until PyTorch 1.9.0, torch.trace does not
# support bacth processing. Use FC.trace() as fallback.
# ws: (..., C, C) / (...,) -> (..., C, C)
ws = numerator / (FC.trace(numerator)[..., None, None] + eps)
# h: (..., F, C_1, C_2) x (..., C_2) -> (..., F, C_1)
beamform_vector = einsum("...fec,...c->...fe", ws, reference_vector)
return beamform_vector
def get_mvdr_vector_with_rtf(
psd_n: Union[torch.Tensor, ComplexTensor],
psd_speech: Union[torch.Tensor, ComplexTensor],
psd_noise: Union[torch.Tensor, ComplexTensor],
iterations: int = 3,
reference_vector: Union[int, torch.Tensor, None] = None,
diagonal_loading: bool = True,
diag_eps: float = 1e-7,
eps: float = 1e-8,
) -> Union[torch.Tensor, ComplexTensor]:
"""Return the MVDR (Minimum Variance Distortionless Response) vector
calculated with RTF:
h = (Npsd^-1 @ rtf) / (rtf^H @ Npsd^-1 @ rtf)
Reference:
On optimal frequency-domain multichannel linear filtering
for noise reduction; M. Souden et al., 2010;
https://ieeexplore.ieee.org/document/5089420
Args:
psd_n (torch.complex64/ComplexTensor):
observation/noise covariance matrix (..., F, C, C)
psd_speech (torch.complex64/ComplexTensor):
speech covariance matrix (..., F, C, C)
psd_noise (torch.complex64/ComplexTensor):
noise covariance matrix (..., F, C, C)
iterations (int): number of iterations in power method
reference_vector (torch.Tensor or int): (..., C) or scalar
diagonal_loading (bool): Whether to add a tiny term to the diagonal of psd_n
diag_eps (float):
eps (float):
Returns:
beamform_vector (torch.complex64/ComplexTensor): (..., F, C)
""" # noqa: H405, D205, D400
if diagonal_loading:
psd_noise = tik_reg(psd_noise, reg=diag_eps, eps=eps)
# (B, F, C, 1)
rtf = get_rtf(
psd_speech,
psd_noise,
mode="power",
reference_vector=reference_vector,
iterations=iterations,
)
# numerator: (..., C_1, C_2) x (..., C_2, 1) -> (..., C_1)
numerator = solve(rtf, psd_n).squeeze(-1)
denominator = einsum("...d,...d->...", rtf.squeeze(-1).conj(), numerator)
if reference_vector is not None:
if isinstance(reference_vector, int):
scale = rtf.squeeze(-1)[..., reference_vector, None].conj()
else:
scale = (rtf.squeeze(-1).conj() * reference_vector[..., None, :]).sum(
dim=-1, keepdim=True
)
beamforming_vector = numerator * scale / (denominator.real.unsqueeze(-1) + eps)
else:
beamforming_vector = numerator / (denominator.real.unsqueeze(-1) + eps)
return beamforming_vector
def apply_beamforming_vector(
beamform_vector: Union[torch.Tensor, ComplexTensor],
mix: Union[torch.Tensor, ComplexTensor],
) -> Union[torch.Tensor, ComplexTensor]:
# (..., C) x (..., C, T) -> (..., T)
es = einsum("...c,...ct->...t", beamform_vector.conj(), mix)
return es
def get_mwf_vector(
psd_s,
psd_n,
reference_vector: Union[torch.Tensor, int],
diagonal_loading: bool = True,
diag_eps: float = 1e-7,
eps: float = 1e-8,
):
"""Return the MWF (Minimum Multi-channel Wiener Filter) vector:
h = (Npsd^-1 @ Spsd) @ u
Args:
psd_s (torch.complex64/ComplexTensor):
speech covariance matrix (..., F, C, C)
psd_n (torch.complex64/ComplexTensor):
power-normalized observation covariance matrix (..., F, C, C)
reference_vector (torch.Tensor or int): (..., C) or scalar
diagonal_loading (bool): Whether to add a tiny term to the diagonal of psd_n
diag_eps (float):
eps (float):
Returns:
beamform_vector (torch.complex64/ComplexTensor): (..., F, C)
""" # noqa: D400
if diagonal_loading:
psd_n = tik_reg(psd_n, reg=diag_eps, eps=eps)
ws = solve(psd_s, psd_n)
# h: (..., F, C_1, C_2) x (..., C_2) -> (..., F, C_1)
if isinstance(reference_vector, int):
beamform_vector = ws[..., reference_vector]
else:
beamform_vector = einsum("...fec,...c->...fe", ws, reference_vector)
return beamform_vector
def get_sdw_mwf_vector(
psd_speech,
psd_noise,
reference_vector: Union[torch.Tensor, int],
denoising_weight: float = 1.0,
approx_low_rank_psd_speech: bool = False,
iterations: int = 3,
diagonal_loading: bool = True,
diag_eps: float = 1e-7,
eps: float = 1e-8,
):
"""Return the SDW-MWF (Speech Distortion Weighted Multi-channel Wiener Filter) vector
h = (Spsd + mu * Npsd)^-1 @ Spsd @ u
Reference:
[1] Spatially pre-processed speech distortion weighted multi-channel Wiener
filtering for noise reduction; A. Spriet et al, 2004
https://dl.acm.org/doi/abs/10.1016/j.sigpro.2004.07.028
[2] Rank-1 constrained multichannel Wiener filter for speech recognition in
noisy environments; Z. Wang et al, 2018
https://hal.inria.fr/hal-01634449/document
[3] Low-rank approximation based multichannel Wiener filter algorithms for
noise reduction with application in cochlear implants; R. Serizel, 2014
https://ieeexplore.ieee.org/document/6730918
Args:
psd_speech (torch.complex64/ComplexTensor):
speech covariance matrix (..., F, C, C)
psd_noise (torch.complex64/ComplexTensor):
noise covariance matrix (..., F, C, C)
reference_vector (torch.Tensor or int): (..., C) or scalar
denoising_weight (float): a trade-off parameter between noise reduction and
speech distortion.
A larger value leads to more noise reduction at the expense of more speech
distortion.
The plain MWF is obtained with `denoising_weight = 1` (by default).
approx_low_rank_psd_speech (bool): whether to replace original input psd_speech
with its low-rank approximation as in [2]
iterations (int): number of iterations in power method, only used when
`approx_low_rank_psd_speech = True`
diagonal_loading (bool): Whether to add a tiny term to the diagonal of psd_n
diag_eps (float):
eps (float):
Returns:
beamform_vector (torch.complex64/ComplexTensor): (..., F, C)
""" # noqa: H405, D205, D400, E501
if approx_low_rank_psd_speech:
if diagonal_loading:
psd_noise = tik_reg(psd_noise, reg=diag_eps, eps=eps)
# (B, F, C, 1)
recon_vec = get_rtf(
psd_speech,
psd_noise,
mode="power",
iterations=iterations,
reference_vector=reference_vector,
)
# Eq. (25) in Ref[2]
psd_speech_r1 = matmul(recon_vec, recon_vec.conj().transpose(-1, -2))
sigma_speech = FC.trace(psd_speech) / (FC.trace(psd_speech_r1) + eps)
psd_speech_r1 = psd_speech_r1 * sigma_speech[..., None, None]
# c.f. Eq. (62) in Ref[3]
psd_speech = psd_speech_r1
psd_n = psd_speech + denoising_weight * psd_noise
if diagonal_loading:
psd_n = tik_reg(psd_n, reg=diag_eps, eps=eps)
ws = solve(psd_speech, psd_n)
if isinstance(reference_vector, int):
beamform_vector = ws[..., reference_vector]
else:
beamform_vector = einsum("...fec,...c->...fe", ws, reference_vector)
return beamform_vector
def get_rank1_mwf_vector(
psd_speech,
psd_noise,
reference_vector: Union[torch.Tensor, int],
denoising_weight: float = 1.0,
approx_low_rank_psd_speech: bool = False,
iterations: int = 3,
diagonal_loading: bool = True,
diag_eps: float = 1e-7,
eps: float = 1e-8,
):
"""Return the R1-MWF (Rank-1 Multi-channel Wiener Filter) vector
h = (Npsd^-1 @ Spsd) / (mu + Tr(Npsd^-1 @ Spsd)) @ u
Reference:
[1] Rank-1 constrained multichannel Wiener filter for speech recognition in
noisy environments; Z. Wang et al, 2018
https://hal.inria.fr/hal-01634449/document
[2] Low-rank approximation based multichannel Wiener filter algorithms for
noise reduction with application in cochlear implants; R. Serizel, 2014
https://ieeexplore.ieee.org/document/6730918
Args:
psd_speech (torch.complex64/ComplexTensor):
speech covariance matrix (..., F, C, C)
psd_noise (torch.complex64/ComplexTensor):
noise covariance matrix (..., F, C, C)
reference_vector (torch.Tensor or int): (..., C) or scalar
denoising_weight (float): a trade-off parameter between noise reduction and
speech distortion.
A larger value leads to more noise reduction at the expense of more speech
distortion.
When `denoising_weight = 0`, it corresponds to MVDR beamformer.
approx_low_rank_psd_speech (bool): whether to replace original input psd_speech
with its low-rank approximation as in [1]
iterations (int): number of iterations in power method, only used when
`approx_low_rank_psd_speech = True`
diagonal_loading (bool): Whether to add a tiny term to the diagonal of psd_n
diag_eps (float):
eps (float):
Returns:
beamform_vector (torch.complex64/ComplexTensor): (..., F, C)
""" # noqa: H405, D205, D400
if approx_low_rank_psd_speech:
if diagonal_loading:
psd_noise = tik_reg(psd_noise, reg=diag_eps, eps=eps)
# (B, F, C, 1)
recon_vec = get_rtf(
psd_speech,
psd_noise,
mode="power",
iterations=iterations,
reference_vector=reference_vector,
)
# Eq. (25) in Ref[1]
psd_speech_r1 = matmul(recon_vec, recon_vec.conj().transpose(-1, -2))
sigma_speech = FC.trace(psd_speech) / (FC.trace(psd_speech_r1) + eps)
psd_speech_r1 = psd_speech_r1 * sigma_speech[..., None, None]
# c.f. Eq. (62) in Ref[2]
psd_speech = psd_speech_r1
elif diagonal_loading:
psd_noise = tik_reg(psd_noise, reg=diag_eps, eps=eps)
numerator = solve(psd_speech, psd_noise)
# NOTE (wangyou): until PyTorch 1.9.0, torch.trace does not
# support bacth processing. Use FC.trace() as fallback.
# ws: (..., C, C) / (...,) -> (..., C, C)
ws = numerator / (denoising_weight + FC.trace(numerator)[..., None, None] + eps)
# h: (..., F, C_1, C_2) x (..., C_2) -> (..., F, C_1)
if isinstance(reference_vector, int):
beamform_vector = ws[..., reference_vector]
else:
beamform_vector = einsum("...fec,...c->...fe", ws, reference_vector)
return beamform_vector
def get_rtf_matrix(
psd_speeches,
psd_noises,
diagonal_loading: bool = True,
ref_channel: int = 0,
rtf_iterations: int = 3,
diag_eps: float = 1e-7,
eps: float = 1e-8,
):
"""Calculate the RTF matrix with each column the relative transfer function
of the corresponding source.
""" # noqa: H405
assert isinstance(psd_speeches, list) and isinstance(psd_noises, list)
rtf_mat = cat(
[
get_rtf(
psd_speeches[spk],
tik_reg(psd_n, reg=diag_eps, eps=eps) if diagonal_loading else psd_n,
mode="power",
reference_vector=ref_channel,
iterations=rtf_iterations,
)
for spk, psd_n in enumerate(psd_noises)
],
dim=-1,
)
# normalize at the reference channel
return rtf_mat / rtf_mat[..., ref_channel, None, :]
def get_lcmv_vector_with_rtf(
psd_n: Union[torch.Tensor, ComplexTensor],
rtf_mat: Union[torch.Tensor, ComplexTensor],
reference_vector: Union[int, torch.Tensor, None] = None,
diagonal_loading: bool = True,
diag_eps: float = 1e-7,
eps: float = 1e-8,
) -> Union[torch.Tensor, ComplexTensor]:
"""Return the LCMV (Linearly Constrained Minimum Variance) vector
calculated with RTF:
h = (Npsd^-1 @ rtf_mat) @ (rtf_mat^H @ Npsd^-1 @ rtf_mat)^-1 @ p
Reference:
H. L. Van Trees, “Optimum array processing: Part IV of detection, estimation,
and modulation theory,” John Wiley & Sons, 2004. (Chapter 6.7)
Args:
psd_n (torch.complex64/ComplexTensor):
observation/noise covariance matrix (..., F, C, C)
rtf_mat (torch.complex64/ComplexTensor):
RTF matrix (..., F, C, num_spk)
reference_vector (torch.Tensor or int): (..., num_spk) or scalar
diagonal_loading (bool): Whether to add a tiny term to the diagonal of psd_n
diag_eps (float):
eps (float):
Returns:
beamform_vector (torch.complex64/ComplexTensor): (..., F, C)
""" # noqa: H405, D205, D400
if diagonal_loading:
psd_n = tik_reg(psd_n, reg=diag_eps, eps=eps)
# numerator: (..., C_1, C_2) x (..., C_2, num_spk) -> (..., C_1, num_spk)
numerator = solve(rtf_mat, psd_n)
denominator = matmul(rtf_mat.conj().transpose(-1, -2), numerator)
if isinstance(reference_vector, int):
ws = inverse(denominator)[..., reference_vector, None]
else:
ws = solve(reference_vector, denominator)
beamforming_vector = matmul(numerator, ws).squeeze(-1)
return beamforming_vector
def generalized_eigenvalue_decomposition(a: torch.Tensor, b: torch.Tensor, eps=1e-6):
"""Solves the generalized eigenvalue decomposition through Cholesky decomposition.
ported from https://github.com/asteroid-team/asteroid/blob/master/asteroid/dsp/beamforming.py#L464
a @ e_vec = e_val * b @ e_vec
|
| Cholesky decomposition on `b`:
| b = L @ L^H, where `L` is a lower triangular matrix
|
| Let C = L^-1 @ a @ L^-H, it is Hermitian.
|
=> C @ y = lambda * y
=> e_vec = L^-H @ y
Reference: https://www.netlib.org/lapack/lug/node54.html
Args:
a: A complex Hermitian or real symmetric matrix whose eigenvalues and
eigenvectors will be computed. (..., C, C)
b: A complex Hermitian or real symmetric definite positive matrix. (..., C, C)
Returns:
e_val: generalized eigenvalues (ascending order)
e_vec: generalized eigenvectors
""" # noqa: H405, E501
try:
cholesky = torch.linalg.cholesky(b)
except RuntimeError:
b = tik_reg(b, reg=eps, eps=eps)
cholesky = torch.linalg.cholesky(b)
inv_cholesky = cholesky.inverse()
# Compute C matrix L⁻1 a L^-H
cmat = inv_cholesky @ a @ inv_cholesky.conj().transpose(-1, -2)
# Performing the eigenvalue decomposition
e_val, e_vec = torch.linalg.eigh(cmat)
# Collecting the eigenvectors
e_vec = torch.matmul(inv_cholesky.conj().transpose(-1, -2), e_vec)
return e_val, e_vec
def gev_phase_correction(vector):
"""Phase correction to reduce distortions due to phase inconsistencies.
ported from https://github.com/fgnt/nn-gev/blob/master/fgnt/beamforming.py#L169
Args:
vector: Beamforming vector with shape (..., F, C)
Returns:
w: Phase corrected beamforming vectors
"""
B, F, C = vector.shape
correction = torch.empty_like(vector.real)
for f in range(F):
correction[:, f, :] = torch.exp(
(vector[:, f, :] * vector[:, f - 1, :].conj())
.sum(dim=-1, keepdim=True)
.angle()
)
if isinstance(vector, ComplexTensor):
correction = ComplexTensor(torch.cos(correction), -torch.sin(correction))
else:
correction = torch.exp(-1j * correction)
return vector * correction
def blind_analytic_normalization(ws, psd_noise, eps=1e-8):
"""Blind analytic normalization (BAN) for post-filtering
Args:
ws (torch.complex64/ComplexTensor): beamformer vector (..., F, C)
psd_noise (torch.complex64/ComplexTensor): noise PSD matrix (..., F, C, C)
eps (float)
Returns:
ws_ban (torch.complex64/ComplexTensor): normalized beamformer vector (..., F)
"""
C2 = psd_noise.size(-1) ** 2
denominator = einsum("...c,...ce,...e->...", ws.conj(), psd_noise, ws)
numerator = einsum(
"...c,...ce,...eo,...o->...", ws.conj(), psd_noise, psd_noise, ws
)
gain = (numerator + eps).sqrt() / (denominator * C2 + eps)
return gain
def get_gev_vector(
psd_noise: Union[torch.Tensor, ComplexTensor],
psd_speech: Union[torch.Tensor, ComplexTensor],
mode="power",
reference_vector: Union[int, torch.Tensor] = 0,
iterations: int = 3,
diagonal_loading: bool = True,
diag_eps: float = 1e-7,
eps: float = 1e-8,
) -> Union[torch.Tensor, ComplexTensor]:
"""Return the generalized eigenvalue (GEV) beamformer vector:
psd_speech @ h = lambda * psd_noise @ h
Reference:
Blind acoustic beamforming based on generalized eigenvalue decomposition;
E. Warsitz and R. Haeb-Umbach, 2007.
Args:
psd_noise (torch.complex64/ComplexTensor):
noise covariance matrix (..., F, C, C)
psd_speech (torch.complex64/ComplexTensor):
speech covariance matrix (..., F, C, C)
mode (str): one of ("power", "evd")
"power": power method
"evd": eigenvalue decomposition (only for torch builtin complex tensors)
reference_vector (torch.Tensor or int): (..., C) or scalar
iterations (int): number of iterations in power method
diagonal_loading (bool): Whether to add a tiny term to the diagonal of psd_n
diag_eps (float):
eps (float):
Returns:
beamform_vector (torch.complex64/ComplexTensor): (..., F, C)
""" # noqa: H405, D205, D400
if diagonal_loading:
psd_noise = tik_reg(psd_noise, reg=diag_eps, eps=eps)
if mode == "power":
phi = solve(psd_speech, psd_noise)
e_vec = (
phi[..., reference_vector, None]
if isinstance(reference_vector, int)
else matmul(phi, reference_vector[..., None, :, None])
)
for _ in range(iterations - 1):
e_vec = matmul(phi, e_vec)
# e_vec = e_vec / complex_norm(e_vec, dim=-1, keepdim=True)
e_vec = e_vec.squeeze(-1)
elif mode == "evd":
assert (
is_torch_1_9_plus
and is_torch_complex_tensor(psd_speech)
and is_torch_complex_tensor(psd_noise)
)
# e_vec = generalized_eigenvalue_decomposition(psd_speech, psd_noise)[1][...,-1]
e_vec = psd_noise.new_zeros(psd_noise.shape[:-1])
for f in range(psd_noise.shape[-3]):
try:
e_vec[..., f, :] = generalized_eigenvalue_decomposition(
psd_speech[..., f, :, :], psd_noise[..., f, :, :]
)[1][..., -1]
except RuntimeError:
# port from github.com/fgnt/nn-gev/blob/master/fgnt/beamforming.py#L106
print(
"GEV beamformer: LinAlg error for frequency {}".format(f),
flush=True,
)
C = psd_noise.size(-1)
e_vec[..., f, :] = (
psd_noise.new_ones(e_vec[..., f, :].shape)
/ FC.trace(psd_noise[..., f, :, :])
* C
)
else:
raise ValueError("Unknown mode: %s" % mode)
beamforming_vector = e_vec / complex_norm(e_vec, dim=-1, keepdim=True)
beamforming_vector = gev_phase_correction(beamforming_vector)
return beamforming_vector
def signal_framing(
signal: Union[torch.Tensor, ComplexTensor],
frame_length: int,
frame_step: int,
bdelay: int,
do_padding: bool = False,
pad_value: int = 0,
indices: List = None,
) -> Union[torch.Tensor, ComplexTensor]:
"""Expand `signal` into several frames, with each frame of length `frame_length`.
Args:
signal : (..., T)
frame_length: length of each segment
frame_step: step for selecting frames
bdelay: delay for WPD
do_padding: whether or not to pad the input signal at the beginning
of the time dimension
pad_value: value to fill in the padding
Returns:
torch.Tensor:
if do_padding: (..., T, frame_length)
else: (..., T - bdelay - frame_length + 2, frame_length)
"""
if isinstance(signal, ComplexTensor):
complex_wrapper = ComplexTensor
pad_func = FC.pad
elif is_torch_complex_tensor(signal):
complex_wrapper = torch.complex
pad_func = torch.nn.functional.pad
else:
pad_func = torch.nn.functional.pad
frame_length2 = frame_length - 1
# pad to the right at the last dimension of `signal` (time dimension)
if do_padding:
# (..., T) --> (..., T + bdelay + frame_length - 2)
signal = pad_func(
signal, (bdelay + frame_length2 - 1, 0), "constant", pad_value
)
do_padding = False
if indices is None:
# [[ 0, 1, ..., frame_length2 - 1, frame_length2 - 1 + bdelay ],
# [ 1, 2, ..., frame_length2, frame_length2 + bdelay ],
# [ 2, 3, ..., frame_length2 + 1, frame_length2 + 1 + bdelay ],
# ...
# [ T-bdelay-frame_length2, ..., T-1-bdelay, T-1 ]]
indices = [
[*range(i, i + frame_length2), i + frame_length2 + bdelay - 1]
for i in range(0, signal.shape[-1] - frame_length2 - bdelay + 1, frame_step)
]
if is_complex(signal):
real = signal_framing(
signal.real,
frame_length,
frame_step,
bdelay,
do_padding,
pad_value,
indices,
)
imag = signal_framing(
signal.imag,
frame_length,
frame_step,
bdelay,
do_padding,
pad_value,
indices,
)
return complex_wrapper(real, imag)
else:
# (..., T - bdelay - frame_length + 2, frame_length)
signal = signal[..., indices]
return signal
def get_covariances(
Y: Union[torch.Tensor, ComplexTensor],
inverse_power: torch.Tensor,
bdelay: int,
btaps: int,
get_vector: bool = False,
) -> Union[torch.Tensor, ComplexTensor]:
"""Calculates the power normalized spatio-temporal covariance
matrix of the framed signal.
Args:
Y : Complex STFT signal with shape (B, F, C, T)
inverse_power : Weighting factor with shape (B, F, T)
Returns:
Correlation matrix: (B, F, (btaps+1) * C, (btaps+1) * C)
Correlation vector: (B, F, btaps + 1, C, C)
""" # noqa: H405, D205, D400, D401
assert inverse_power.dim() == 3, inverse_power.dim()
assert inverse_power.size(0) == Y.size(0), (inverse_power.size(0), Y.size(0))
Bs, Fdim, C, T = Y.shape
# (B, F, C, T - bdelay - btaps + 1, btaps + 1)
Psi = signal_framing(Y, btaps + 1, 1, bdelay, do_padding=False)[
..., : T - bdelay - btaps + 1, :
]
# Reverse along btaps-axis:
# [tau, tau-bdelay, tau-bdelay-1, ..., tau-bdelay-frame_length+1]
Psi = reverse(Psi, dim=-1)
Psi_norm = Psi * inverse_power[..., None, bdelay + btaps - 1 :, None]
# let T' = T - bdelay - btaps + 1
# (B, F, C, T', btaps + 1) x (B, F, C, T', btaps + 1)
# -> (B, F, btaps + 1, C, btaps + 1, C)
covariance_matrix = einsum("bfdtk,bfetl->bfkdle", Psi, Psi_norm.conj())
# (B, F, btaps + 1, C, btaps + 1, C)
# -> (B, F, (btaps + 1) * C, (btaps + 1) * C)
covariance_matrix = covariance_matrix.view(
Bs, Fdim, (btaps + 1) * C, (btaps + 1) * C
)
if get_vector:
# (B, F, C, T', btaps + 1) x (B, F, C, T')
# --> (B, F, btaps +1, C, C)
covariance_vector = einsum(
"bfdtk,bfet->bfked", Psi_norm, Y[..., bdelay + btaps - 1 :].conj()
)
return covariance_matrix, covariance_vector
else:
return covariance_matrix
def get_WPD_filter(
Phi: Union[torch.Tensor, ComplexTensor],
Rf: Union[torch.Tensor, ComplexTensor],
reference_vector: torch.Tensor,
diagonal_loading: bool = True,
diag_eps: float = 1e-7,
eps: float = 1e-8,
) -> Union[torch.Tensor, ComplexTensor]:
"""Return the WPD vector.
WPD is the Weighted Power minimization Distortionless response
convolutional beamformer. As follows:
h = (Rf^-1 @ Phi_{xx}) / tr[(Rf^-1) @ Phi_{xx}] @ u
Reference:
T. Nakatani and K. Kinoshita, "A Unified Convolutional Beamformer
for Simultaneous Denoising and Dereverberation," in IEEE Signal
Processing Letters, vol. 26, no. 6, pp. 903-907, June 2019, doi:
10.1109/LSP.2019.2911179.
https://ieeexplore.ieee.org/document/8691481
Args:
Phi (torch.complex64/ComplexTensor): (B, F, (btaps+1) * C, (btaps+1) * C)
is the PSD of zero-padded speech [x^T(t,f) 0 ... 0]^T.
Rf (torch.complex64/ComplexTensor): (B, F, (btaps+1) * C, (btaps+1) * C)
is the power normalized spatio-temporal covariance matrix.
reference_vector (torch.Tensor): (B, (btaps+1) * C)
is the reference_vector.
diagonal_loading (bool): Whether to add a tiny term to the diagonal of psd_n
diag_eps (float):
eps (float):
Returns:
filter_matrix (torch.complex64/ComplexTensor): (B, F, (btaps + 1) * C)
"""
if diagonal_loading:
Rf = tik_reg(Rf, reg=diag_eps, eps=eps)
# numerator: (..., C_1, C_2) x (..., C_2, C_3) -> (..., C_1, C_3)
numerator = solve(Phi, Rf)
# NOTE (wangyou): until PyTorch 1.9.0, torch.trace does not
# support bacth processing. Use FC.trace() as fallback.
# ws: (..., C, C) / (...,) -> (..., C, C)
ws = numerator / (FC.trace(numerator)[..., None, None] + eps)
# h: (..., F, C_1, C_2) x (..., C_2) -> (..., F, C_1)
beamform_vector = einsum("...fec,...c->...fe", ws, reference_vector)
# (B, F, (btaps + 1) * C)
return beamform_vector
def get_WPD_filter_v2(
Phi: Union[torch.Tensor, ComplexTensor],
Rf: Union[torch.Tensor, ComplexTensor],
reference_vector: torch.Tensor,
diagonal_loading: bool = True,
diag_eps: float = 1e-7,
eps: float = 1e-8,
) -> Union[torch.Tensor, ComplexTensor]:
"""Return the WPD vector (v2).
This implementation is more efficient than `get_WPD_filter` as
it skips unnecessary computation with zeros.
Args:
Phi (torch.complex64/ComplexTensor): (B, F, C, C)
is speech PSD.
Rf (torch.complex64/ComplexTensor): (B, F, (btaps+1) * C, (btaps+1) * C)
is the power normalized spatio-temporal covariance matrix.
reference_vector (torch.Tensor): (B, C)
is the reference_vector.
diagonal_loading (bool):
Whether to add a tiny term to the diagonal of psd_n
diag_eps (float):
eps (float):
Returns:
filter_matrix (torch.complex64/ComplexTensor): (B, F, (btaps+1) * C)
"""
C = reference_vector.shape[-1]
if diagonal_loading:
Rf = tik_reg(Rf, reg=diag_eps, eps=eps)
inv_Rf = inverse(Rf)
# (B, F, (btaps+1) * C, C)
inv_Rf_pruned = inv_Rf[..., :C]
# numerator: (..., C_1, C_2) x (..., C_2, C_3) -> (..., C_1, C_3)
numerator = matmul(inv_Rf_pruned, Phi)
# NOTE (wangyou): until PyTorch 1.9.0, torch.trace does not
# support bacth processing. Use FC.trace() as fallback.
# ws: (..., (btaps+1) * C, C) / (...,) -> (..., (btaps+1) * C, C)
ws = numerator / (FC.trace(numerator[..., :C, :])[..., None, None] + eps)
# h: (..., F, C_1, C_2) x (..., C_2) -> (..., F, C_1)
beamform_vector = einsum("...fec,...c->...fe", ws, reference_vector)
# (B, F, (btaps+1) * C)
return beamform_vector
def get_WPD_filter_with_rtf(
psd_observed_bar: Union[torch.Tensor, ComplexTensor],
psd_speech: Union[torch.Tensor, ComplexTensor],
psd_noise: Union[torch.Tensor, ComplexTensor],
iterations: int = 3,
reference_vector: Union[int, torch.Tensor, None] = None,
diagonal_loading: bool = True,
diag_eps: float = 1e-7,
eps: float = 1e-15,
) -> Union[torch.Tensor, ComplexTensor]:
"""Return the WPD vector calculated with RTF.
WPD is the Weighted Power minimization Distortionless response
convolutional beamformer. As follows:
h = (Rf^-1 @ vbar) / (vbar^H @ R^-1 @ vbar)
Reference:
T. Nakatani and K. Kinoshita, "A Unified Convolutional Beamformer
for Simultaneous Denoising and Dereverberation," in IEEE Signal
Processing Letters, vol. 26, no. 6, pp. 903-907, June 2019, doi:
10.1109/LSP.2019.2911179.
https://ieeexplore.ieee.org/document/8691481
Args:
psd_observed_bar (torch.complex64/ComplexTensor):
stacked observation covariance matrix
psd_speech (torch.complex64/ComplexTensor):
speech covariance matrix (..., F, C, C)
psd_noise (torch.complex64/ComplexTensor):
noise covariance matrix (..., F, C, C)
iterations (int): number of iterations in power method
reference_vector (torch.Tensor or int): (..., C) or scalar
diagonal_loading (bool):
Whether to add a tiny term to the diagonal of psd_n
diag_eps (float):
eps (float):
Returns:
beamform_vector (torch.complex64/ComplexTensor)r: (..., F, C)
"""
if isinstance(psd_speech, ComplexTensor):
pad_func = FC.pad
elif is_torch_complex_tensor(psd_speech):
pad_func = torch.nn.functional.pad
else:
raise ValueError(
"Please update your PyTorch version to 1.9+ for complex support."
)
C = psd_noise.size(-1)
if diagonal_loading:
psd_noise = tik_reg(psd_noise, reg=diag_eps, eps=eps)
# (B, F, C, 1)
rtf = get_rtf(
psd_speech,
psd_noise,
mode="power",
reference_vector=reference_vector,
iterations=iterations,
)
# (B, F, (K+1)*C, 1)
rtf = pad_func(rtf, (0, 0, 0, psd_observed_bar.shape[-1] - C), "constant", 0)
# numerator: (..., C_1, C_2) x (..., C_2, 1) -> (..., C_1)
numerator = solve(rtf, psd_observed_bar).squeeze(-1)
denominator = einsum("...d,...d->...", rtf.squeeze(-1).conj(), numerator)
if reference_vector is not None:
if isinstance(reference_vector, int):
scale = rtf.squeeze(-1)[..., reference_vector, None].conj()
else:
scale = (
rtf.squeeze(-1)[:, :, :C].conj() * reference_vector[..., None, :]
).sum(dim=-1, keepdim=True)
beamforming_vector = numerator * scale / (denominator.real.unsqueeze(-1) + eps)
else:
beamforming_vector = numerator / (denominator.real.unsqueeze(-1) + eps)
return beamforming_vector
def perform_WPD_filtering(
filter_matrix: Union[torch.Tensor, ComplexTensor],
Y: Union[torch.Tensor, ComplexTensor],
bdelay: int,
btaps: int,
) -> Union[torch.Tensor, ComplexTensor]:
"""Perform WPD filtering.
Args:
filter_matrix: Filter matrix (B, F, (btaps + 1) * C)
Y : Complex STFT signal with shape (B, F, C, T)
Returns:
enhanced (torch.complex64/ComplexTensor): (B, F, T)
"""
# (B, F, C, T) --> (B, F, C, T, btaps + 1)
Ytilde = signal_framing(Y, btaps + 1, 1, bdelay, do_padding=True, pad_value=0)
Ytilde = reverse(Ytilde, dim=-1)
Bs, Fdim, C, T = Y.shape
# --> (B, F, T, btaps + 1, C) --> (B, F, T, (btaps + 1) * C)
Ytilde = Ytilde.permute(0, 1, 3, 4, 2).contiguous().view(Bs, Fdim, T, -1)
# (B, F, T, 1)
enhanced = einsum("...tc,...c->...t", Ytilde, filter_matrix.conj())
return enhanced
def tik_reg(mat, reg: float = 1e-8, eps: float = 1e-8):
"""Perform Tikhonov regularization (only modifying real part).
Args:
mat (torch.complex64/ComplexTensor): input matrix (..., C, C)
reg (float): regularization factor
eps (float)
Returns:
ret (torch.complex64/ComplexTensor): regularized matrix (..., C, C)
"""
# Add eps
C = mat.size(-1)
eye = torch.eye(C, dtype=mat.dtype, device=mat.device)
shape = [1 for _ in range(mat.dim() - 2)] + [C, C]
eye = eye.view(*shape).repeat(*mat.shape[:-2], 1, 1)
with torch.no_grad():
epsilon = FC.trace(mat).real[..., None, None] * reg
# in case that correlation_matrix is all-zero
epsilon = epsilon + eps
mat = mat + epsilon * eye
return mat
| 42,554 | 35.590714 | 102 | py |
espnet | espnet-master/espnet2/enh/layers/dprnn.py | # The implementation of DPRNN in
# Luo. et al. "Dual-path rnn: efficient long sequence modeling
# for time-domain single-channel speech separation."
#
# The code is based on:
# https://github.com/yluo42/TAC/blob/master/utility/models.py
# Licensed under CC BY-NC-SA 3.0 US.
#
import torch
import torch.nn as nn
from torch.autograd import Variable
EPS = torch.finfo(torch.get_default_dtype()).eps
class SingleRNN(nn.Module):
"""Container module for a single RNN layer.
args:
rnn_type: string, select from 'RNN', 'LSTM' and 'GRU'.
input_size: int, dimension of the input feature. The input should have shape
(batch, seq_len, input_size).
hidden_size: int, dimension of the hidden state.
dropout: float, dropout ratio. Default is 0.
bidirectional: bool, whether the RNN layers are bidirectional. Default is False.
"""
def __init__(
self, rnn_type, input_size, hidden_size, dropout=0, bidirectional=False
):
super().__init__()
rnn_type = rnn_type.upper()
assert rnn_type in [
"RNN",
"LSTM",
"GRU",
], f"Only support 'RNN', 'LSTM' and 'GRU', current type: {rnn_type}"
self.rnn_type = rnn_type
self.input_size = input_size
self.hidden_size = hidden_size
self.num_direction = int(bidirectional) + 1
self.rnn = getattr(nn, rnn_type)(
input_size,
hidden_size,
1,
batch_first=True,
bidirectional=bidirectional,
)
self.dropout = nn.Dropout(p=dropout)
# linear projection layer
self.proj = nn.Linear(hidden_size * self.num_direction, input_size)
def forward(self, input, state=None):
# input shape: batch, seq, dim
# input = input.to(device)
output = input
rnn_output, state = self.rnn(output, state)
rnn_output = self.dropout(rnn_output)
rnn_output = self.proj(
rnn_output.contiguous().view(-1, rnn_output.shape[2])
).view(output.shape)
return rnn_output, state
# dual-path RNN
class DPRNN(nn.Module):
"""Deep dual-path RNN.
args:
rnn_type: string, select from 'RNN', 'LSTM' and 'GRU'.
input_size: int, dimension of the input feature. The input should have shape
(batch, seq_len, input_size).
hidden_size: int, dimension of the hidden state.
output_size: int, dimension of the output size.
dropout: float, dropout ratio. Default is 0.
num_layers: int, number of stacked RNN layers. Default is 1.
bidirectional: bool, whether the RNN layers are bidirectional. Default is True.
"""
def __init__(
self,
rnn_type,
input_size,
hidden_size,
output_size,
dropout=0,
num_layers=1,
bidirectional=True,
):
super().__init__()
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
# dual-path RNN
self.row_rnn = nn.ModuleList([])
self.col_rnn = nn.ModuleList([])
self.row_norm = nn.ModuleList([])
self.col_norm = nn.ModuleList([])
for i in range(num_layers):
self.row_rnn.append(
SingleRNN(
rnn_type, input_size, hidden_size, dropout, bidirectional=True
)
) # intra-segment RNN is always noncausal
self.col_rnn.append(
SingleRNN(
rnn_type,
input_size,
hidden_size,
dropout,
bidirectional=bidirectional,
)
)
self.row_norm.append(nn.GroupNorm(1, input_size, eps=1e-8))
# default is to use noncausal LayerNorm for inter-chunk RNN.
# For causal setting change it to causal normalization accordingly.
self.col_norm.append(nn.GroupNorm(1, input_size, eps=1e-8))
# output layer
self.output = nn.Sequential(nn.PReLU(), nn.Conv2d(input_size, output_size, 1))
def forward(self, input):
# input shape: batch, N, dim1, dim2
# apply RNN on dim1 first and then dim2
# output shape: B, output_size, dim1, dim2
# input = input.to(device)
batch_size, _, dim1, dim2 = input.shape
output = input
for i in range(len(self.row_rnn)):
row_input = (
output.permute(0, 3, 2, 1)
.contiguous()
.view(batch_size * dim2, dim1, -1)
) # B*dim2, dim1, N
row_output, _ = self.row_rnn[i](row_input) # B*dim2, dim1, H
row_output = (
row_output.view(batch_size, dim2, dim1, -1)
.permute(0, 3, 2, 1)
.contiguous()
) # B, N, dim1, dim2
row_output = self.row_norm[i](row_output)
output = output + row_output
col_input = (
output.permute(0, 2, 3, 1)
.contiguous()
.view(batch_size * dim1, dim2, -1)
) # B*dim1, dim2, N
col_output, _ = self.col_rnn[i](col_input) # B*dim1, dim2, H
col_output = (
col_output.view(batch_size, dim1, dim2, -1)
.permute(0, 3, 1, 2)
.contiguous()
) # B, N, dim1, dim2
col_output = self.col_norm[i](col_output)
output = output + col_output
output = self.output(output) # B, output_size, dim1, dim2
return output
# dual-path RNN with transform-average-concatenate (TAC)
class DPRNN_TAC(nn.Module):
"""Deep duaL-path RNN with TAC applied to each layer/block.
args:
rnn_type: string, select from 'RNN', 'LSTM' and 'GRU'.
input_size: int, dimension of the input feature. The input should
have shape (batch, seq_len, input_size).
hidden_size: int, dimension of the hidden state.
output_size: int, dimension of the output size.
dropout: float, dropout ratio. Default is 0.
num_layers: int, number of stacked RNN layers. Default is 1.
bidirectional: bool, whether the RNN layers are bidirectional.
Default is False.
"""
def __init__(
self,
rnn_type,
input_size,
hidden_size,
output_size,
dropout=0,
num_layers=1,
bidirectional=True,
):
super(DPRNN_TAC, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
# DPRNN + TAC for 3D input (ch, N, T)
self.row_rnn = nn.ModuleList([])
self.col_rnn = nn.ModuleList([])
self.ch_transform = nn.ModuleList([])
self.ch_average = nn.ModuleList([])
self.ch_concat = nn.ModuleList([])
self.row_norm = nn.ModuleList([])
self.col_norm = nn.ModuleList([])
self.ch_norm = nn.ModuleList([])
for i in range(num_layers):
self.row_rnn.append(
SingleRNN(
rnn_type, input_size, hidden_size, dropout, bidirectional=True
)
) # intra-segment RNN is always noncausal
self.col_rnn.append(
SingleRNN(
rnn_type,
input_size,
hidden_size,
dropout,
bidirectional=bidirectional,
)
)
self.ch_transform.append(
nn.Sequential(nn.Linear(input_size, hidden_size * 3), nn.PReLU())
)
self.ch_average.append(
nn.Sequential(nn.Linear(hidden_size * 3, hidden_size * 3), nn.PReLU())
)
self.ch_concat.append(
nn.Sequential(nn.Linear(hidden_size * 6, input_size), nn.PReLU())
)
self.row_norm.append(nn.GroupNorm(1, input_size, eps=1e-8))
# default is to use noncausal LayerNorm for
# inter-chunk RNN and TAC modules.
# For causal setting change them to causal normalization
# techniques accordingly.
self.col_norm.append(nn.GroupNorm(1, input_size, eps=1e-8))
self.ch_norm.append(nn.GroupNorm(1, input_size, eps=1e-8))
# output layer
self.output = nn.Sequential(nn.PReLU(), nn.Conv2d(input_size, output_size, 1))
def forward(self, input, num_mic):
# input shape: batch, ch, N, dim1, dim2
# num_mic shape: batch,
# apply RNN on dim1 first, then dim2, then ch
batch_size, ch, N, dim1, dim2 = input.shape
output = input
for i in range(len(self.row_rnn)):
# intra-segment RNN
output = output.view(batch_size * ch, N, dim1, dim2)
row_input = (
output.permute(0, 3, 2, 1)
.contiguous()
.view(batch_size * ch * dim2, dim1, -1)
) # B*ch*dim2, dim1, N
row_output, _ = self.row_rnn[i](row_input) # B*ch*dim2, dim1, N
row_output = (
row_output.view(batch_size * ch, dim2, dim1, -1)
.permute(0, 3, 2, 1)
.contiguous()
) # B*ch, N, dim1, dim2
row_output = self.row_norm[i](row_output)
output = output + row_output # B*ch, N, dim1, dim2
# inter-segment RNN
col_input = (
output.permute(0, 2, 3, 1)
.contiguous()
.view(batch_size * ch * dim1, dim2, -1)
) # B*ch*dim1, dim2, N
col_output, _ = self.col_rnn[i](col_input) # B*dim1, dim2, N
col_output = (
col_output.view(batch_size * ch, dim1, dim2, -1)
.permute(0, 3, 1, 2)
.contiguous()
) # B*ch, N, dim1, dim2
col_output = self.col_norm[i](col_output)
output = output + col_output # B*ch, N, dim1, dim2
# TAC for cross-channel communication
ch_input = output.view(input.shape) # B, ch, N, dim1, dim2
ch_input = (
ch_input.permute(0, 3, 4, 1, 2).contiguous().view(-1, N)
) # B*dim1*dim2*ch, N
ch_output = self.ch_transform[i](ch_input).view(
batch_size, dim1 * dim2, ch, -1
) # B, dim1*dim2, ch, H
# mean pooling across channels
if num_mic.max() == 0:
# fixed geometry array
ch_mean = ch_output.mean(2).view(
batch_size * dim1 * dim2, -1
) # B*dim1*dim2, H
else:
# only consider valid channels
ch_mean = [
ch_output[b, :, : num_mic[b]].mean(1).unsqueeze(0)
for b in range(batch_size)
] # 1, dim1*dim2, H
ch_mean = torch.cat(ch_mean, 0).view(
batch_size * dim1 * dim2, -1
) # B*dim1*dim2, H
ch_output = ch_output.view(
batch_size * dim1 * dim2, ch, -1
) # B*dim1*dim2, ch, H
ch_mean = (
self.ch_average[i](ch_mean)
.unsqueeze(1)
.expand_as(ch_output)
.contiguous()
) # B*dim1*dim2, ch, H
ch_output = torch.cat([ch_output, ch_mean], 2) # B*dim1*dim2, ch, 2H
ch_output = self.ch_concat[i](
ch_output.view(-1, ch_output.shape[-1])
) # B*dim1*dim2*ch, N
ch_output = (
ch_output.view(batch_size, dim1, dim2, ch, -1)
.permute(0, 3, 4, 1, 2)
.contiguous()
) # B, ch, N, dim1, dim2
ch_output = self.ch_norm[i](
ch_output.view(batch_size * ch, N, dim1, dim2)
) # B*ch, N, dim1, dim2
output = output + ch_output
output = self.output(output) # B*ch, N, dim1, dim2
return output
def _pad_segment(input, segment_size):
# input is the features: (B, N, T)
batch_size, dim, seq_len = input.shape
segment_stride = segment_size // 2
rest = segment_size - (segment_stride + seq_len % segment_size) % segment_size
if rest > 0:
pad = Variable(torch.zeros(batch_size, dim, rest)).type(input.type())
input = torch.cat([input, pad], 2)
pad_aux = Variable(torch.zeros(batch_size, dim, segment_stride)).type(input.type())
input = torch.cat([pad_aux, input, pad_aux], 2)
return input, rest
def split_feature(input, segment_size):
# split the feature into chunks of segment size
# input is the features: (B, N, T)
input, rest = _pad_segment(input, segment_size)
batch_size, dim, seq_len = input.shape
segment_stride = segment_size // 2
segments1 = (
input[:, :, :-segment_stride]
.contiguous()
.view(batch_size, dim, -1, segment_size)
)
segments2 = (
input[:, :, segment_stride:]
.contiguous()
.view(batch_size, dim, -1, segment_size)
)
segments = (
torch.cat([segments1, segments2], 3)
.view(batch_size, dim, -1, segment_size)
.transpose(2, 3)
)
return segments.contiguous(), rest
def merge_feature(input, rest):
# merge the splitted features into full utterance
# input is the features: (B, N, L, K)
batch_size, dim, segment_size, _ = input.shape
segment_stride = segment_size // 2
input = (
input.transpose(2, 3).contiguous().view(batch_size, dim, -1, segment_size * 2)
) # B, N, K, L
input1 = (
input[:, :, :, :segment_size]
.contiguous()
.view(batch_size, dim, -1)[:, :, segment_stride:]
)
input2 = (
input[:, :, :, segment_size:]
.contiguous()
.view(batch_size, dim, -1)[:, :, :-segment_stride]
)
output = input1 + input2
if rest > 0:
output = output[:, :, :-rest]
return output.contiguous() # B, N, T
| 14,234 | 33.635036 | 88 | py |
espnet | espnet-master/espnet2/enh/layers/mask_estimator.py | from typing import Tuple, Union
import numpy as np
import torch
from packaging.version import parse as V
from torch.nn import functional as F
from torch_complex.tensor import ComplexTensor
from espnet2.enh.layers.complex_utils import is_complex
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
from espnet.nets.pytorch_backend.rnn.encoders import RNN, RNNP
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
class MaskEstimator(torch.nn.Module):
def __init__(
self, type, idim, layers, units, projs, dropout, nmask=1, nonlinear="sigmoid"
):
super().__init__()
subsample = np.ones(layers + 1, dtype=np.int64)
typ = type.lstrip("vgg").rstrip("p")
if type[-1] == "p":
self.brnn = RNNP(idim, layers, units, projs, subsample, dropout, typ=typ)
else:
self.brnn = RNN(idim, layers, units, projs, dropout, typ=typ)
self.type = type
self.nmask = nmask
self.linears = torch.nn.ModuleList(
[torch.nn.Linear(projs, idim) for _ in range(nmask)]
)
if nonlinear not in ("sigmoid", "relu", "tanh", "crelu"):
raise ValueError("Not supporting nonlinear={}".format(nonlinear))
self.nonlinear = nonlinear
def forward(
self, xs: Union[torch.Tensor, ComplexTensor], ilens: torch.LongTensor
) -> Tuple[Tuple[torch.Tensor, ...], torch.LongTensor]:
"""Mask estimator forward function.
Args:
xs: (B, F, C, T)
ilens: (B,)
Returns:
hs (torch.Tensor): The hidden vector (B, F, C, T)
masks: A tuple of the masks. (B, F, C, T)
ilens: (B,)
"""
assert xs.size(0) == ilens.size(0), (xs.size(0), ilens.size(0))
_, _, C, input_length = xs.size()
# (B, F, C, T) -> (B, C, T, F)
xs = xs.permute(0, 2, 3, 1)
# Calculate amplitude: (B, C, T, F) -> (B, C, T, F)
if is_complex(xs):
xs = (xs.real**2 + xs.imag**2) ** 0.5
# xs: (B, C, T, F) -> xs: (B * C, T, F)
xs = xs.contiguous().view(-1, xs.size(-2), xs.size(-1))
# ilens: (B,) -> ilens_: (B * C)
ilens_ = ilens[:, None].expand(-1, C).contiguous().view(-1)
# xs: (B * C, T, F) -> xs: (B * C, T, D)
xs, _, _ = self.brnn(xs, ilens_)
# xs: (B * C, T, D) -> xs: (B, C, T, D)
xs = xs.view(-1, C, xs.size(-2), xs.size(-1))
masks = []
for linear in self.linears:
# xs: (B, C, T, D) -> mask:(B, C, T, F)
mask = linear(xs)
if self.nonlinear == "sigmoid":
mask = torch.sigmoid(mask)
elif self.nonlinear == "relu":
mask = torch.relu(mask)
elif self.nonlinear == "tanh":
mask = torch.tanh(mask)
elif self.nonlinear == "crelu":
mask = torch.clamp(mask, min=0, max=1)
# Zero padding
mask.masked_fill(make_pad_mask(ilens, mask, length_dim=2), 0)
# (B, C, T, F) -> (B, F, C, T)
mask = mask.permute(0, 3, 1, 2)
# Take cares of multi gpu cases: If input_length > max(ilens)
if mask.size(-1) < input_length:
mask = F.pad(mask, [0, input_length - mask.size(-1)], value=0)
masks.append(mask)
return tuple(masks), ilens
| 3,390 | 34.322917 | 85 | py |
espnet | espnet-master/espnet2/enh/layers/dptnet.py | # The implementation of DPTNet proposed in
# J. Chen, Q. Mao, and D. Liu, “Dual-path transformer network:
# Direct context-aware modeling for end-to-end monaural speech
# separation,” in Proc. ISCA Interspeech, 2020, pp. 2642–2646.
#
# Ported from https://github.com/ujscjj/DPTNet
import torch.nn as nn
from espnet2.enh.layers.tcn import choose_norm
from espnet.nets.pytorch_backend.nets_utils import get_activation
class ImprovedTransformerLayer(nn.Module):
"""Container module of the (improved) Transformer proposed in [1].
Reference:
Dual-path transformer network: Direct context-aware modeling for end-to-end
monaural speech separation; Chen et al, Interspeech 2020.
Args:
rnn_type (str): select from 'RNN', 'LSTM' and 'GRU'.
input_size (int): Dimension of the input feature.
att_heads (int): Number of attention heads.
hidden_size (int): Dimension of the hidden state.
dropout (float): Dropout ratio. Default is 0.
activation (str): activation function applied at the output of RNN.
bidirectional (bool, optional): True for bidirectional Inter-Chunk RNN
(Intra-Chunk is always bidirectional).
norm (str, optional): Type of normalization to use.
"""
def __init__(
self,
rnn_type,
input_size,
att_heads,
hidden_size,
dropout=0.0,
activation="relu",
bidirectional=True,
norm="gLN",
):
super().__init__()
rnn_type = rnn_type.upper()
assert rnn_type in [
"RNN",
"LSTM",
"GRU",
], f"Only support 'RNN', 'LSTM' and 'GRU', current type: {rnn_type}"
self.rnn_type = rnn_type
self.att_heads = att_heads
self.self_attn = nn.MultiheadAttention(input_size, att_heads, dropout=dropout)
self.dropout = nn.Dropout(p=dropout)
self.norm_attn = choose_norm(norm, input_size)
self.rnn = getattr(nn, rnn_type)(
input_size,
hidden_size,
1,
batch_first=True,
bidirectional=bidirectional,
)
activation = get_activation(activation)
hdim = 2 * hidden_size if bidirectional else hidden_size
self.feed_forward = nn.Sequential(
activation, nn.Dropout(p=dropout), nn.Linear(hdim, input_size)
)
self.norm_ff = choose_norm(norm, input_size)
def forward(self, x, attn_mask=None):
# (batch, seq, input_size) -> (seq, batch, input_size)
src = x.permute(1, 0, 2)
# (seq, batch, input_size) -> (batch, seq, input_size)
out = self.self_attn(src, src, src, attn_mask=attn_mask)[0].permute(1, 0, 2)
out = self.dropout(out) + x
# ... -> (batch, input_size, seq) -> ...
out = self.norm_attn(out.transpose(-1, -2)).transpose(-1, -2)
out2 = self.feed_forward(self.rnn(out)[0])
out2 = self.dropout(out2) + out
return self.norm_ff(out2.transpose(-1, -2)).transpose(-1, -2)
class DPTNet(nn.Module):
"""Dual-path transformer network.
args:
rnn_type (str): select from 'RNN', 'LSTM' and 'GRU'.
input_size (int): dimension of the input feature.
Input size must be a multiple of `att_heads`.
hidden_size (int): dimension of the hidden state.
output_size (int): dimension of the output size.
att_heads (int): number of attention heads.
dropout (float): dropout ratio. Default is 0.
activation (str): activation function applied at the output of RNN.
num_layers (int): number of stacked RNN layers. Default is 1.
bidirectional (bool): whether the RNN layers are bidirectional. Default is True.
norm_type (str): type of normalization to use after each inter- or
intra-chunk Transformer block.
"""
def __init__(
self,
rnn_type,
input_size,
hidden_size,
output_size,
att_heads=4,
dropout=0,
activation="relu",
num_layers=1,
bidirectional=True,
norm_type="gLN",
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
# dual-path transformer
self.row_transformer = nn.ModuleList()
self.col_transformer = nn.ModuleList()
for i in range(num_layers):
self.row_transformer.append(
ImprovedTransformerLayer(
rnn_type,
input_size,
att_heads,
hidden_size,
dropout=dropout,
activation=activation,
bidirectional=True,
norm=norm_type,
)
) # intra-segment RNN is always noncausal
self.col_transformer.append(
ImprovedTransformerLayer(
rnn_type,
input_size,
att_heads,
hidden_size,
dropout=dropout,
activation=activation,
bidirectional=bidirectional,
norm=norm_type,
)
)
# output layer
self.output = nn.Sequential(nn.PReLU(), nn.Conv2d(input_size, output_size, 1))
def forward(self, input):
# input shape: batch, N, dim1, dim2
# apply Transformer on dim1 first and then dim2
# output shape: B, output_size, dim1, dim2
# input = input.to(device)
output = input
for i in range(len(self.row_transformer)):
output = self.intra_chunk_process(output, i)
output = self.inter_chunk_process(output, i)
output = self.output(output) # B, output_size, dim1, dim2
return output
def intra_chunk_process(self, x, layer_index):
batch, N, chunk_size, n_chunks = x.size()
x = x.transpose(1, -1).reshape(batch * n_chunks, chunk_size, N)
x = self.row_transformer[layer_index](x)
x = x.reshape(batch, n_chunks, chunk_size, N).permute(0, 3, 2, 1)
return x
def inter_chunk_process(self, x, layer_index):
batch, N, chunk_size, n_chunks = x.size()
x = x.permute(0, 2, 3, 1).reshape(batch * chunk_size, n_chunks, N)
x = self.col_transformer[layer_index](x)
x = x.reshape(batch, chunk_size, n_chunks, N).permute(0, 3, 1, 2)
return x
| 6,539 | 34.351351 | 88 | py |
espnet | espnet-master/espnet2/enh/layers/complex_utils.py | """Beamformer module."""
from typing import Sequence, Tuple, Union
import torch
from packaging.version import parse as V
from torch_complex import functional as FC
from torch_complex.tensor import ComplexTensor
EPS = torch.finfo(torch.double).eps
is_torch_1_8_plus = V(torch.__version__) >= V("1.8.0")
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
def new_complex_like(
ref: Union[torch.Tensor, ComplexTensor],
real_imag: Tuple[torch.Tensor, torch.Tensor],
):
if isinstance(ref, ComplexTensor):
return ComplexTensor(*real_imag)
elif is_torch_complex_tensor(ref):
return torch.complex(*real_imag)
else:
raise ValueError(
"Please update your PyTorch version to 1.9+ for complex support."
)
def is_torch_complex_tensor(c):
return (
not isinstance(c, ComplexTensor) and is_torch_1_9_plus and torch.is_complex(c)
)
def is_complex(c):
return isinstance(c, ComplexTensor) or is_torch_complex_tensor(c)
def to_double(c):
if not isinstance(c, ComplexTensor) and is_torch_1_9_plus and torch.is_complex(c):
return c.to(dtype=torch.complex128)
else:
return c.double()
def to_float(c):
if not isinstance(c, ComplexTensor) and is_torch_1_9_plus and torch.is_complex(c):
return c.to(dtype=torch.complex64)
else:
return c.float()
def cat(seq: Sequence[Union[ComplexTensor, torch.Tensor]], *args, **kwargs):
if not isinstance(seq, (list, tuple)):
raise TypeError(
"cat(): argument 'tensors' (position 1) must be tuple of Tensors, "
"not Tensor"
)
if isinstance(seq[0], ComplexTensor):
return FC.cat(seq, *args, **kwargs)
else:
return torch.cat(seq, *args, **kwargs)
def complex_norm(
c: Union[torch.Tensor, ComplexTensor], dim=-1, keepdim=False
) -> torch.Tensor:
if not is_complex(c):
raise TypeError("Input is not a complex tensor.")
if is_torch_complex_tensor(c):
return torch.norm(c, dim=dim, keepdim=keepdim)
else:
if dim is None:
return torch.sqrt((c.real**2 + c.imag**2).sum() + EPS)
else:
return torch.sqrt(
(c.real**2 + c.imag**2).sum(dim=dim, keepdim=keepdim) + EPS
)
def einsum(equation, *operands):
# NOTE: Do not mix ComplexTensor and torch.complex in the input!
# NOTE (wangyou): Until PyTorch 1.9.0, torch.einsum does not support
# mixed input with complex and real tensors.
if len(operands) == 1:
if isinstance(operands[0], (tuple, list)):
operands = operands[0]
complex_module = FC if isinstance(operands[0], ComplexTensor) else torch
return complex_module.einsum(equation, *operands)
elif len(operands) != 2:
op0 = operands[0]
same_type = all(op.dtype == op0.dtype for op in operands[1:])
if same_type:
_einsum = FC.einsum if isinstance(op0, ComplexTensor) else torch.einsum
return _einsum(equation, *operands)
else:
raise ValueError("0 or More than 2 operands are not supported.")
a, b = operands
if isinstance(a, ComplexTensor) or isinstance(b, ComplexTensor):
return FC.einsum(equation, a, b)
elif is_torch_1_9_plus and (torch.is_complex(a) or torch.is_complex(b)):
if not torch.is_complex(a):
o_real = torch.einsum(equation, a, b.real)
o_imag = torch.einsum(equation, a, b.imag)
return torch.complex(o_real, o_imag)
elif not torch.is_complex(b):
o_real = torch.einsum(equation, a.real, b)
o_imag = torch.einsum(equation, a.imag, b)
return torch.complex(o_real, o_imag)
else:
return torch.einsum(equation, a, b)
else:
return torch.einsum(equation, a, b)
def inverse(
c: Union[torch.Tensor, ComplexTensor]
) -> Union[torch.Tensor, ComplexTensor]:
if isinstance(c, ComplexTensor):
return c.inverse2()
else:
return c.inverse()
def matmul(
a: Union[torch.Tensor, ComplexTensor], b: Union[torch.Tensor, ComplexTensor]
) -> Union[torch.Tensor, ComplexTensor]:
# NOTE: Do not mix ComplexTensor and torch.complex in the input!
# NOTE (wangyou): Until PyTorch 1.9.0, torch.matmul does not support
# multiplication between complex and real tensors.
if isinstance(a, ComplexTensor) or isinstance(b, ComplexTensor):
return FC.matmul(a, b)
elif is_torch_1_9_plus and (torch.is_complex(a) or torch.is_complex(b)):
if not torch.is_complex(a):
o_real = torch.matmul(a, b.real)
o_imag = torch.matmul(a, b.imag)
return torch.complex(o_real, o_imag)
elif not torch.is_complex(b):
o_real = torch.matmul(a.real, b)
o_imag = torch.matmul(a.imag, b)
return torch.complex(o_real, o_imag)
else:
return torch.matmul(a, b)
else:
return torch.matmul(a, b)
def trace(a: Union[torch.Tensor, ComplexTensor]):
# NOTE (wangyou): until PyTorch 1.9.0, torch.trace does not
# support bacth processing. Use FC.trace() as fallback.
return FC.trace(a)
def reverse(a: Union[torch.Tensor, ComplexTensor], dim=0):
if isinstance(a, ComplexTensor):
return FC.reverse(a, dim=dim)
else:
return torch.flip(a, dims=(dim,))
def solve(b: Union[torch.Tensor, ComplexTensor], a: Union[torch.Tensor, ComplexTensor]):
"""Solve the linear equation ax = b."""
# NOTE: Do not mix ComplexTensor and torch.complex in the input!
# NOTE (wangyou): Until PyTorch 1.9.0, torch.solve does not support
# mixed input with complex and real tensors.
if isinstance(a, ComplexTensor) or isinstance(b, ComplexTensor):
if isinstance(a, ComplexTensor) and isinstance(b, ComplexTensor):
return FC.solve(b, a, return_LU=False)
else:
return matmul(inverse(a), b)
elif is_torch_1_9_plus and (torch.is_complex(a) or torch.is_complex(b)):
if torch.is_complex(a) and torch.is_complex(b):
return torch.linalg.solve(a, b)
else:
return matmul(inverse(a), b)
else:
if is_torch_1_8_plus:
return torch.linalg.solve(a, b)
else:
return torch.solve(b, a)[0]
def stack(seq: Sequence[Union[ComplexTensor, torch.Tensor]], *args, **kwargs):
if not isinstance(seq, (list, tuple)):
raise TypeError(
"stack(): argument 'tensors' (position 1) must be tuple of Tensors, "
"not Tensor"
)
if isinstance(seq[0], ComplexTensor):
return FC.stack(seq, *args, **kwargs)
else:
return torch.stack(seq, *args, **kwargs)
| 6,751 | 34.166667 | 88 | py |
espnet | espnet-master/espnet2/enh/layers/tcn.py | # Implementation of the TCN proposed in
# Luo. et al. "Conv-tasnet: Surpassing ideal time–frequency
# magnitude masking for speech separation."
#
# The code is based on:
# https://github.com/kaituoxu/Conv-TasNet/blob/master/src/conv_tasnet.py
# Licensed under MIT.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from espnet2.enh.layers.adapt_layers import make_adapt_layer
EPS = torch.finfo(torch.get_default_dtype()).eps
class TemporalConvNet(nn.Module):
def __init__(
self,
N,
B,
H,
P,
X,
R,
C,
Sc=None,
out_channel=None,
norm_type="gLN",
causal=False,
pre_mask_nonlinear="linear",
mask_nonlinear="relu",
):
"""Basic Module of tasnet.
Args:
N: Number of filters in autoencoder
B: Number of channels in bottleneck 1 * 1-conv block
H: Number of channels in convolutional blocks
P: Kernel size in convolutional blocks
X: Number of convolutional blocks in each repeat
R: Number of repeats
C: Number of speakers
Sc: Number of channels in skip-connection paths' 1x1-conv blocks
out_channel: Number of output channels
if it is None, `N` will be used instead.
norm_type: BN, gLN, cLN
causal: causal or non-causal
pre_mask_nonlinear: the non-linear function before masknet
mask_nonlinear: use which non-linear function to generate mask
"""
super().__init__()
# Hyper-parameter
self.C = C
self.mask_nonlinear = mask_nonlinear
self.skip_connection = Sc is not None
self.out_channel = N if out_channel is None else out_channel
if self.skip_connection:
assert Sc == B, (Sc, B)
# Components
# [M, N, K] -> [M, N, K]
layer_norm = ChannelwiseLayerNorm(N)
# [M, N, K] -> [M, B, K]
bottleneck_conv1x1 = nn.Conv1d(N, B, 1, bias=False)
# [M, B, K] -> [M, B, K]
repeats = []
self.receptive_field = 0
for r in range(R):
blocks = []
for x in range(X):
dilation = 2**x
if r == 0 and x == 0:
self.receptive_field += P
else:
self.receptive_field += (P - 1) * dilation
padding = (P - 1) * dilation if causal else (P - 1) * dilation // 2
blocks += [
TemporalBlock(
B,
H,
Sc,
P,
stride=1,
padding=padding,
dilation=dilation,
norm_type=norm_type,
causal=causal,
)
]
repeats += [nn.Sequential(*blocks)]
temporal_conv_net = nn.Sequential(*repeats)
# [M, B, K] -> [M, C*N, K]
mask_conv1x1 = nn.Conv1d(B, C * self.out_channel, 1, bias=False)
# Put together (for compatibility with older versions)
if pre_mask_nonlinear == "linear":
self.network = nn.Sequential(
layer_norm, bottleneck_conv1x1, temporal_conv_net, mask_conv1x1
)
else:
activ = {
"prelu": nn.PReLU(),
"relu": nn.ReLU(),
"tanh": nn.Tanh(),
"sigmoid": nn.Sigmoid(),
}[pre_mask_nonlinear]
self.network = nn.Sequential(
layer_norm, bottleneck_conv1x1, temporal_conv_net, activ, mask_conv1x1
)
def forward(self, mixture_w):
"""Keep this API same with TasNet.
Args:
mixture_w: [M, N, K], M is batch size
Returns:
est_mask: [M, C, N, K]
"""
M, N, K = mixture_w.size()
bottleneck = self.network[:2]
tcns = self.network[2]
masknet = self.network[3:]
output = bottleneck(mixture_w)
skip_conn = 0.0
for block in tcns:
for layer in block:
tcn_out = layer(output)
if self.skip_connection:
residual, skip = tcn_out
skip_conn = skip_conn + skip
else:
residual = tcn_out
output = output + residual
# Use residual output when no skip connection
if self.skip_connection:
score = masknet(skip_conn)
else:
score = masknet(output)
# [M, C*self.out_channel, K] -> [M, C, self.out_channel, K]
score = score.view(M, self.C, self.out_channel, K)
if self.mask_nonlinear == "softmax":
est_mask = F.softmax(score, dim=1)
elif self.mask_nonlinear == "relu":
est_mask = F.relu(score)
elif self.mask_nonlinear == "sigmoid":
est_mask = F.sigmoid(score)
elif self.mask_nonlinear == "tanh":
est_mask = F.tanh(score)
elif self.mask_nonlinear == "linear":
est_mask = score
else:
raise ValueError("Unsupported mask non-linear function")
return est_mask
class TemporalConvNetInformed(TemporalConvNet):
def __init__(
self,
N,
B,
H,
P,
X,
R,
Sc=None,
out_channel=None,
norm_type="gLN",
causal=False,
pre_mask_nonlinear="prelu",
mask_nonlinear="relu",
i_adapt_layer: int = 7,
adapt_layer_type: str = "mul",
adapt_enroll_dim: int = 128,
**adapt_layer_kwargs
):
"""Basic Module of TasNet with adaptation layers.
Args:
N: Number of filters in autoencoder
B: Number of channels in bottleneck 1 * 1-conv block
H: Number of channels in convolutional blocks
P: Kernel size in convolutional blocks
X: Number of convolutional blocks in each repeat
R: Number of repeats
Sc: Number of channels in skip-connection paths' 1x1-conv blocks
out_channel: Number of output channels
if it is None, `N` will be used instead.
norm_type: BN, gLN, cLN
causal: causal or non-causal
pre_mask_nonlinear: the non-linear function before masknet
mask_nonlinear: use which non-linear function to generate mask
i_adapt_layer: int, index of the adaptation layer
adapt_layer_type: str, type of adaptation layer
see espnet2.enh.layers.adapt_layers for options
adapt_enroll_dim: int, dimensionality of the speaker embedding
"""
super().__init__(
N,
B,
H,
P,
X,
R,
1,
Sc=Sc,
out_channel=out_channel,
norm_type=norm_type,
causal=causal,
pre_mask_nonlinear=pre_mask_nonlinear,
mask_nonlinear=mask_nonlinear,
)
self.i_adapt_layer = i_adapt_layer
self.adapt_enroll_dim = adapt_enroll_dim
self.adapt_layer_type = adapt_layer_type
self.adapt_layer = make_adapt_layer(
adapt_layer_type,
indim=B,
enrolldim=adapt_enroll_dim,
ninputs=2 if self.skip_connection else 1,
**adapt_layer_kwargs
)
def forward(self, mixture_w, enroll_emb):
"""TasNet forward with adaptation layers.
Args:
mixture_w: [M, N, K], M is batch size
enroll_emb: [M, 2*adapt_enroll_dim] if self.skip_connection
[M, adapt_enroll_dim] if not self.skip_connection
Returns:
est_mask: [M, N, K]
"""
M, N, K = mixture_w.size()
bottleneck = self.network[:2]
tcns = self.network[2]
masknet = self.network[3:]
output = bottleneck(mixture_w)
skip_conn = 0.0
for i, block in enumerate(tcns):
for j, layer in enumerate(block):
idx = i * len(block) + j
is_adapt_layer = idx == self.i_adapt_layer
tcn_out = layer(output)
if self.skip_connection:
residual, skip = tcn_out
if is_adapt_layer:
residual, skip = self.adapt_layer(
(residual, skip), torch.chunk(enroll_emb, 2, dim=1)
)
skip_conn = skip_conn + skip
else:
residual = tcn_out
if is_adapt_layer:
residual = self.adapt_layer(residual, enroll_emb)
output = output + residual
# Use residual output when no skip connection
if self.skip_connection:
score = masknet(skip_conn)
else:
score = masknet(output)
# [M, self.out_channel, K]
if self.mask_nonlinear == "softmax":
est_mask = F.softmax(score, dim=1)
elif self.mask_nonlinear == "relu":
est_mask = F.relu(score)
elif self.mask_nonlinear == "sigmoid":
est_mask = F.sigmoid(score)
elif self.mask_nonlinear == "tanh":
est_mask = F.tanh(score)
elif self.mask_nonlinear == "linear":
est_mask = score
else:
raise ValueError("Unsupported mask non-linear function")
return est_mask
class TemporalBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
skip_channels,
kernel_size,
stride,
padding,
dilation,
norm_type="gLN",
causal=False,
):
super().__init__()
self.skip_connection = skip_channels is not None
# [M, B, K] -> [M, H, K]
conv1x1 = nn.Conv1d(in_channels, out_channels, 1, bias=False)
prelu = nn.PReLU()
norm = choose_norm(norm_type, out_channels)
# [M, H, K] -> [M, B, K]
dsconv = DepthwiseSeparableConv(
out_channels,
in_channels,
skip_channels,
kernel_size,
stride,
padding,
dilation,
norm_type,
causal,
)
# Put together
self.net = nn.Sequential(conv1x1, prelu, norm, dsconv)
def forward(self, x):
"""Forward.
Args:
x: [M, B, K]
Returns:
[M, B, K]
"""
if self.skip_connection:
res_out, skip_out = self.net(x)
return res_out, skip_out
else:
res_out = self.net(x)
return res_out
class DepthwiseSeparableConv(nn.Module):
def __init__(
self,
in_channels,
out_channels,
skip_channels,
kernel_size,
stride,
padding,
dilation,
norm_type="gLN",
causal=False,
):
super().__init__()
# Use `groups` option to implement depthwise convolution
# [M, H, K] -> [M, H, K]
depthwise_conv = nn.Conv1d(
in_channels,
in_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=in_channels,
bias=False,
)
if causal:
chomp = Chomp1d(padding)
prelu = nn.PReLU()
norm = choose_norm(norm_type, in_channels)
# [M, H, K] -> [M, B, K]
pointwise_conv = nn.Conv1d(in_channels, out_channels, 1, bias=False)
# Put together
if causal:
self.net = nn.Sequential(depthwise_conv, chomp, prelu, norm, pointwise_conv)
else:
self.net = nn.Sequential(depthwise_conv, prelu, norm, pointwise_conv)
# skip connection
if skip_channels is not None:
self.skip_conv = nn.Conv1d(in_channels, skip_channels, 1, bias=False)
else:
self.skip_conv = None
def forward(self, x):
"""Forward.
Args:
x: [M, H, K]
Returns:
res_out: [M, B, K]
skip_out: [M, Sc, K]
"""
shared_block = self.net[:-1]
shared = shared_block(x)
res_out = self.net[-1](shared)
if self.skip_conv is None:
return res_out
skip_out = self.skip_conv(shared)
return res_out, skip_out
class Chomp1d(nn.Module):
"""To ensure the output length is the same as the input."""
def __init__(self, chomp_size):
super().__init__()
self.chomp_size = chomp_size
def forward(self, x):
"""Forward.
Args:
x: [M, H, Kpad]
Returns:
[M, H, K]
"""
return x[:, :, : -self.chomp_size].contiguous()
def check_nonlinear(nolinear_type):
if nolinear_type not in ["softmax", "relu"]:
raise ValueError("Unsupported nonlinear type")
def choose_norm(norm_type, channel_size, shape="BDT"):
"""The input of normalization will be (M, C, K), where M is batch size.
C is channel size and K is sequence length.
"""
if norm_type == "gLN":
return GlobalLayerNorm(channel_size, shape=shape)
elif norm_type == "cLN":
return ChannelwiseLayerNorm(channel_size, shape=shape)
elif norm_type == "BN":
# Given input (M, C, K), nn.BatchNorm1d(C) will accumulate statics
# along M and K, so this BN usage is right.
return nn.BatchNorm1d(channel_size)
elif norm_type == "GN":
return nn.GroupNorm(1, channel_size, eps=1e-8)
else:
raise ValueError("Unsupported normalization type")
class ChannelwiseLayerNorm(nn.Module):
"""Channel-wise Layer Normalization (cLN)."""
def __init__(self, channel_size, shape="BDT"):
super().__init__()
self.gamma = nn.Parameter(torch.Tensor(1, channel_size, 1)) # [1, N, 1]
self.beta = nn.Parameter(torch.Tensor(1, channel_size, 1)) # [1, N, 1]
self.reset_parameters()
assert shape in ["BDT", "BTD"]
self.shape = shape
def reset_parameters(self):
self.gamma.data.fill_(1)
self.beta.data.zero_()
def forward(self, y):
"""Forward.
Args:
y: [M, N, K], M is batch size, N is channel size, K is length
Returns:
cLN_y: [M, N, K]
"""
assert y.dim() == 3
if self.shape == "BTD":
y = y.transpose(1, 2).contiguous()
mean = torch.mean(y, dim=1, keepdim=True) # [M, 1, K]
var = torch.var(y, dim=1, keepdim=True, unbiased=False) # [M, 1, K]
cLN_y = self.gamma * (y - mean) / torch.pow(var + EPS, 0.5) + self.beta
if self.shape == "BTD":
cLN_y = cLN_y.transpose(1, 2).contiguous()
return cLN_y
class GlobalLayerNorm(nn.Module):
"""Global Layer Normalization (gLN)."""
def __init__(self, channel_size, shape="BDT"):
super().__init__()
self.gamma = nn.Parameter(torch.Tensor(1, channel_size, 1)) # [1, N, 1]
self.beta = nn.Parameter(torch.Tensor(1, channel_size, 1)) # [1, N, 1]
self.reset_parameters()
assert shape in ["BDT", "BTD"]
self.shape = shape
def reset_parameters(self):
self.gamma.data.fill_(1)
self.beta.data.zero_()
def forward(self, y):
"""Forward.
Args:
y: [M, N, K], M is batch size, N is channel size, K is length
Returns:
gLN_y: [M, N, K]
"""
if self.shape == "BTD":
y = y.transpose(1, 2).contiguous()
mean = y.mean(dim=(1, 2), keepdim=True) # [M, 1, 1]
var = (torch.pow(y - mean, 2)).mean(dim=(1, 2), keepdim=True)
gLN_y = self.gamma * (y - mean) / torch.pow(var + EPS, 0.5) + self.beta
if self.shape == "BTD":
gLN_y = gLN_y.transpose(1, 2).contiguous()
return gLN_y
| 16,165 | 30.268859 | 88 | py |
espnet | espnet-master/espnet2/enh/layers/dnn_beamformer.py | """DNN beamformer module."""
import logging
from typing import List, Optional, Tuple, Union
import torch
from packaging.version import parse as V
from torch.nn import functional as F
from torch_complex.tensor import ComplexTensor
import espnet2.enh.layers.beamformer as bf_v1
import espnet2.enh.layers.beamformer_th as bf_v2
from espnet2.enh.layers.complex_utils import stack, to_double, to_float
from espnet2.enh.layers.mask_estimator import MaskEstimator
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
is_torch_1_12_1_plus = V(torch.__version__) >= V("1.12.1")
BEAMFORMER_TYPES = (
# Minimum Variance Distortionless Response beamformer
"mvdr", # RTF-based formula
"mvdr_souden", # Souden's solution
# Minimum Power Distortionless Response beamformer
"mpdr", # RTF-based formula
"mpdr_souden", # Souden's solution
# weighted MPDR beamformer
"wmpdr", # RTF-based formula
"wmpdr_souden", # Souden's solution
# Weighted Power minimization Distortionless response beamformer
"wpd", # RTF-based formula
"wpd_souden", # Souden's solution
# Multi-channel Wiener Filter (MWF) and weighted MWF
"mwf",
"wmwf",
# Speech Distortion Weighted (SDW) MWF
"sdw_mwf",
# Rank-1 MWF
"r1mwf",
# Linearly Constrained Minimum Variance beamformer
"lcmv",
# Linearly Constrained Minimum Power beamformer
"lcmp",
# weighted Linearly Constrained Minimum Power beamformer
"wlcmp",
# Generalized Eigenvalue beamformer
"gev",
"gev_ban", # with blind analytic normalization (BAN) post-filtering
# time-frequency-bin-wise switching (TFS) MVDR beamformer
"mvdr_tfs",
"mvdr_tfs_souden",
)
class DNN_Beamformer(torch.nn.Module):
"""DNN mask based Beamformer.
Citation:
Multichannel End-to-end Speech Recognition; T. Ochiai et al., 2017;
http://proceedings.mlr.press/v70/ochiai17a/ochiai17a.pdf
"""
def __init__(
self,
bidim,
btype: str = "blstmp",
blayers: int = 3,
bunits: int = 300,
bprojs: int = 320,
num_spk: int = 1,
use_noise_mask: bool = True,
nonlinear: str = "sigmoid",
dropout_rate: float = 0.0,
badim: int = 320,
ref_channel: int = -1,
beamformer_type: str = "mvdr_souden",
rtf_iterations: int = 2,
mwf_mu: float = 1.0,
eps: float = 1e-6,
diagonal_loading: bool = True,
diag_eps: float = 1e-7,
mask_flooring: bool = False,
flooring_thres: float = 1e-6,
use_torch_solver: bool = True,
# False to use old APIs; True to use torchaudio-based new APIs
use_torchaudio_api: bool = False,
# only for WPD beamformer
btaps: int = 5,
bdelay: int = 3,
):
super().__init__()
bnmask = num_spk + 1 if use_noise_mask else num_spk
self.mask = MaskEstimator(
btype,
bidim,
blayers,
bunits,
bprojs,
dropout_rate,
nmask=bnmask,
nonlinear=nonlinear,
)
self.ref = (
AttentionReference(bidim, badim, eps=eps) if ref_channel < 0 else None
)
self.ref_channel = ref_channel
self.use_noise_mask = use_noise_mask
assert num_spk >= 1, num_spk
self.num_spk = num_spk
self.nmask = bnmask
if beamformer_type not in BEAMFORMER_TYPES:
raise ValueError("Not supporting beamformer_type=%s" % beamformer_type)
if (
beamformer_type == "mvdr_souden" or not beamformer_type.endswith("_souden")
) and not use_noise_mask:
if num_spk == 1:
logging.warning(
"Initializing %s beamformer without noise mask "
"estimator (single-speaker case)" % beamformer_type.upper()
)
logging.warning(
"(1 - speech_mask) will be used for estimating noise "
"PSD in %s beamformer!" % beamformer_type.upper()
)
else:
logging.warning(
"Initializing %s beamformer without noise mask "
"estimator (multi-speaker case)" % beamformer_type.upper()
)
logging.warning(
"Interference speech masks will be used for estimating "
"noise PSD in %s beamformer!" % beamformer_type.upper()
)
self.beamformer_type = beamformer_type
if not beamformer_type.endswith("_souden"):
assert rtf_iterations >= 2, rtf_iterations
# number of iterations in power method for estimating the RTF
self.rtf_iterations = rtf_iterations
# noise suppression weight in SDW-MWF
self.mwf_mu = mwf_mu
assert btaps >= 0 and bdelay >= 0, (btaps, bdelay)
self.btaps = btaps
self.bdelay = bdelay if self.btaps > 0 else 1
self.eps = eps
self.diagonal_loading = diagonal_loading
self.diag_eps = diag_eps
self.mask_flooring = mask_flooring
self.flooring_thres = flooring_thres
self.use_torch_solver = use_torch_solver
if not use_torch_solver:
logging.warning(
"The `use_torch_solver` argument has been deprecated. "
"Now it will always be true in DNN_Beamformer"
)
if use_torchaudio_api and is_torch_1_12_1_plus:
self.bf_func = bf_v2
else:
self.bf_func = bf_v1
def forward(
self,
data: Union[torch.Tensor, ComplexTensor],
ilens: torch.LongTensor,
powers: Optional[List[torch.Tensor]] = None,
oracle_masks: Optional[List[torch.Tensor]] = None,
) -> Tuple[Union[torch.Tensor, ComplexTensor], torch.LongTensor, torch.Tensor]:
"""DNN_Beamformer forward function.
Notation:
B: Batch
C: Channel
T: Time or Sequence length
F: Freq
Args:
data (torch.complex64/ComplexTensor): (B, T, C, F)
ilens (torch.Tensor): (B,)
powers (List[torch.Tensor] or None): used for wMPDR or WPD (B, F, T)
oracle_masks (List[torch.Tensor] or None): oracle masks (B, F, C, T)
if not None, oracle_masks will be used instead of self.mask
Returns:
enhanced (torch.complex64/ComplexTensor): (B, T, F)
ilens (torch.Tensor): (B,)
masks (torch.Tensor): (B, T, C, F)
"""
# data (B, T, C, F) -> (B, F, C, T)
data = data.permute(0, 3, 2, 1)
data_d = to_double(data)
# mask: [(B, F, C, T)]
if oracle_masks is not None:
masks = oracle_masks
else:
masks, _ = self.mask(data, ilens)
assert self.nmask == len(masks), len(masks)
# floor masks to increase numerical stability
if self.mask_flooring:
masks = [torch.clamp(m, min=self.flooring_thres) for m in masks]
if self.num_spk == 1: # single-speaker case
if self.use_noise_mask:
# (mask_speech, mask_noise)
mask_speech, mask_noise = masks
else:
# (mask_speech,)
mask_speech = masks[0]
mask_noise = 1 - mask_speech
if self.beamformer_type in ("lcmv", "lcmp", "wlcmp"):
raise NotImplementedError("Single source is not supported yet")
beamformer_stats = self.bf_func.prepare_beamformer_stats(
data_d,
[mask_speech],
mask_noise,
powers=powers,
beamformer_type=self.beamformer_type,
bdelay=self.bdelay,
btaps=self.btaps,
eps=self.eps,
)
if self.beamformer_type in ("mvdr", "mpdr", "wmpdr", "wpd"):
enhanced, ws = self.apply_beamforming(
data,
ilens,
beamformer_stats["psd_n"],
beamformer_stats["psd_speech"],
psd_distortion=beamformer_stats["psd_distortion"],
)
elif (
self.beamformer_type.endswith("_souden")
or self.beamformer_type == "mwf"
or self.beamformer_type == "wmwf"
or self.beamformer_type == "sdw_mwf"
or self.beamformer_type == "r1mwf"
or self.beamformer_type.startswith("gev")
):
enhanced, ws = self.apply_beamforming(
data,
ilens,
beamformer_stats["psd_n"],
beamformer_stats["psd_speech"],
)
else:
raise ValueError(
"Not supporting beamformer_type={}".format(self.beamformer_type)
)
# (..., F, T) -> (..., T, F)
enhanced = enhanced.transpose(-1, -2)
else: # multi-speaker case
if self.use_noise_mask:
# (mask_speech1, ..., mask_noise)
mask_speech = list(masks[:-1])
mask_noise = masks[-1]
else:
# (mask_speech1, ..., mask_speechX)
mask_speech = list(masks)
mask_noise = None
beamformer_stats = self.bf_func.prepare_beamformer_stats(
data_d,
mask_speech,
mask_noise,
powers=powers,
beamformer_type=self.beamformer_type,
bdelay=self.bdelay,
btaps=self.btaps,
eps=self.eps,
)
if self.beamformer_type in ("lcmv", "lcmp", "wlcmp"):
rtf_mat = self.bf_func.get_rtf_matrix(
beamformer_stats["psd_speech"],
beamformer_stats["psd_distortion"],
diagonal_loading=self.diagonal_loading,
ref_channel=self.ref_channel,
rtf_iterations=self.rtf_iterations,
diag_eps=self.diag_eps,
)
enhanced, ws = [], []
for i in range(self.num_spk):
# treat all other speakers' psd_speech as noises
if self.beamformer_type in ("mvdr", "mvdr_tfs", "wmpdr", "wpd"):
enh, w = self.apply_beamforming(
data,
ilens,
beamformer_stats["psd_n"][i],
beamformer_stats["psd_speech"][i],
psd_distortion=beamformer_stats["psd_distortion"][i],
)
elif self.beamformer_type in (
"mvdr_souden",
"mvdr_tfs_souden",
"wmpdr_souden",
"wpd_souden",
"wmwf",
"sdw_mwf",
"r1mwf",
"gev",
"gev_ban",
):
enh, w = self.apply_beamforming(
data,
ilens,
beamformer_stats["psd_n"][i],
beamformer_stats["psd_speech"][i],
)
elif self.beamformer_type == "mpdr":
enh, w = self.apply_beamforming(
data,
ilens,
beamformer_stats["psd_n"],
beamformer_stats["psd_speech"][i],
psd_distortion=beamformer_stats["psd_distortion"][i],
)
elif self.beamformer_type in ("mpdr_souden", "mwf"):
enh, w = self.apply_beamforming(
data,
ilens,
beamformer_stats["psd_n"],
beamformer_stats["psd_speech"][i],
)
elif self.beamformer_type == "lcmp":
enh, w = self.apply_beamforming(
data,
ilens,
beamformer_stats["psd_n"],
beamformer_stats["psd_speech"][i],
rtf_mat=rtf_mat,
spk=i,
)
elif self.beamformer_type in ("lcmv", "wlcmp"):
enh, w = self.apply_beamforming(
data,
ilens,
beamformer_stats["psd_n"][i],
beamformer_stats["psd_speech"][i],
rtf_mat=rtf_mat,
spk=i,
)
else:
raise ValueError(
"Not supporting beamformer_type={}".format(self.beamformer_type)
)
# (..., F, T) -> (..., T, F)
enh = enh.transpose(-1, -2)
enhanced.append(enh)
ws.append(w)
# (..., F, C, T) -> (..., T, C, F)
masks = [m.transpose(-1, -3) for m in masks]
return enhanced, ilens, masks
def apply_beamforming(
self,
data,
ilens,
psd_n,
psd_speech,
psd_distortion=None,
rtf_mat=None,
spk=0,
):
"""Beamforming with the provided statistics.
Args:
data (torch.complex64/ComplexTensor): (B, F, C, T)
ilens (torch.Tensor): (B,)
psd_n (torch.complex64/ComplexTensor):
Noise covariance matrix for MVDR (B, F, C, C)
Observation covariance matrix for MPDR/wMPDR (B, F, C, C)
Stacked observation covariance for WPD (B,F,(btaps+1)*C,(btaps+1)*C)
psd_speech (torch.complex64/ComplexTensor):
Speech covariance matrix (B, F, C, C)
psd_distortion (torch.complex64/ComplexTensor):
Noise covariance matrix (B, F, C, C)
rtf_mat (torch.complex64/ComplexTensor):
RTF matrix (B, F, C, num_spk)
spk (int): speaker index
Return:
enhanced (torch.complex64/ComplexTensor): (B, F, T)
ws (torch.complex64/ComplexTensor): (B, F) or (B, F, (btaps+1)*C)
"""
# u: (B, C)
if self.ref_channel < 0:
u, _ = self.ref(psd_speech.to(dtype=data.dtype), ilens)
u = u.double()
else:
if self.beamformer_type.endswith("_souden"):
# (optional) Create onehot vector for fixed reference microphone
u = torch.zeros(
*(data.size()[:-3] + (data.size(-2),)),
device=data.device,
dtype=torch.double
)
u[..., self.ref_channel].fill_(1)
else:
# for simplifying computation in RTF-based beamforming
u = self.ref_channel
if self.beamformer_type in ("mvdr", "mpdr", "wmpdr"):
ws = self.bf_func.get_mvdr_vector_with_rtf(
to_double(psd_n),
to_double(psd_speech),
to_double(psd_distortion),
iterations=self.rtf_iterations,
reference_vector=u,
diagonal_loading=self.diagonal_loading,
diag_eps=self.diag_eps,
)
enhanced = self.bf_func.apply_beamforming_vector(ws, to_double(data))
elif self.beamformer_type == "mvdr_tfs":
assert isinstance(psd_n, (list, tuple))
ws = [
self.bf_func.get_mvdr_vector_with_rtf(
to_double(psd_n_i),
to_double(psd_speech),
to_double(psd_distortion),
iterations=self.rtf_iterations,
reference_vector=u,
diagonal_loading=self.diagonal_loading,
diag_eps=self.diag_eps,
)
for psd_n_i in psd_n
]
enhanced = stack(
[self.bf_func.apply_beamforming_vector(w, to_double(data)) for w in ws]
)
with torch.no_grad():
index = enhanced.abs().argmin(dim=0, keepdims=True)
enhanced = enhanced.gather(0, index).squeeze(0)
ws = stack(ws, dim=0)
elif self.beamformer_type in (
"mpdr_souden",
"mvdr_souden",
"wmpdr_souden",
):
ws = self.bf_func.get_mvdr_vector(
to_double(psd_speech),
to_double(psd_n),
u,
diagonal_loading=self.diagonal_loading,
diag_eps=self.diag_eps,
)
enhanced = self.bf_func.apply_beamforming_vector(ws, to_double(data))
elif self.beamformer_type == "mvdr_tfs_souden":
assert isinstance(psd_n, (list, tuple))
ws = [
self.bf_func.get_mvdr_vector(
to_double(psd_speech),
to_double(psd_n_i),
u,
diagonal_loading=self.diagonal_loading,
diag_eps=self.diag_eps,
)
for psd_n_i in psd_n
]
enhanced = stack(
[self.bf_func.apply_beamforming_vector(w, to_double(data)) for w in ws]
)
with torch.no_grad():
index = enhanced.abs().argmin(dim=0, keepdims=True)
enhanced = enhanced.gather(0, index).squeeze(0)
ws = stack(ws, dim=0)
elif self.beamformer_type == "wpd":
ws = self.bf_func.get_WPD_filter_with_rtf(
to_double(psd_n),
to_double(psd_speech),
to_double(psd_distortion),
iterations=self.rtf_iterations,
reference_vector=u,
diagonal_loading=self.diagonal_loading,
diag_eps=self.diag_eps,
)
enhanced = self.bf_func.perform_WPD_filtering(
ws, to_double(data), self.bdelay, self.btaps
)
elif self.beamformer_type == "wpd_souden":
ws = self.bf_func.get_WPD_filter_v2(
to_double(psd_speech),
to_double(psd_n),
u,
diagonal_loading=self.diagonal_loading,
diag_eps=self.diag_eps,
)
enhanced = self.bf_func.perform_WPD_filtering(
ws, to_double(data), self.bdelay, self.btaps
)
elif self.beamformer_type in ("mwf", "wmwf"):
ws = self.bf_func.get_mwf_vector(
to_double(psd_speech),
to_double(psd_n),
u,
diagonal_loading=self.diagonal_loading,
diag_eps=self.diag_eps,
)
enhanced = self.bf_func.apply_beamforming_vector(ws, to_double(data))
elif self.beamformer_type == "sdw_mwf":
ws = self.bf_func.get_sdw_mwf_vector(
to_double(psd_speech),
to_double(psd_n),
u,
denoising_weight=self.mwf_mu,
diagonal_loading=self.diagonal_loading,
diag_eps=self.diag_eps,
)
enhanced = self.bf_func.apply_beamforming_vector(ws, to_double(data))
elif self.beamformer_type == "r1mwf":
ws = self.bf_func.get_rank1_mwf_vector(
to_double(psd_speech),
to_double(psd_n),
u,
denoising_weight=self.mwf_mu,
diagonal_loading=self.diagonal_loading,
diag_eps=self.diag_eps,
)
enhanced = self.bf_func.apply_beamforming_vector(ws, to_double(data))
elif self.beamformer_type in ("lcmp", "wlcmp", "lcmv"):
ws = self.bf_func.get_lcmv_vector_with_rtf(
to_double(psd_n),
to_double(rtf_mat),
reference_vector=spk,
diagonal_loading=self.diagonal_loading,
diag_eps=self.diag_eps,
)
enhanced = self.bf_func.apply_beamforming_vector(ws, to_double(data))
elif self.beamformer_type.startswith("gev"):
ws = self.bf_func.get_gev_vector(
to_double(psd_n),
to_double(psd_speech),
mode="power",
diagonal_loading=self.diagonal_loading,
diag_eps=self.diag_eps,
)
enhanced = self.bf_func.apply_beamforming_vector(ws, to_double(data))
if self.beamformer_type == "gev_ban":
gain = self.bf_func.blind_analytic_normalization(ws, to_double(psd_n))
enhanced = enhanced * gain.unsqueeze(-1)
else:
raise ValueError(
"Not supporting beamformer_type={}".format(self.beamformer_type)
)
return enhanced.to(dtype=data.dtype), ws.to(dtype=data.dtype)
def predict_mask(
self, data: Union[torch.Tensor, ComplexTensor], ilens: torch.LongTensor
) -> Tuple[Tuple[torch.Tensor, ...], torch.LongTensor]:
"""Predict masks for beamforming.
Args:
data (torch.complex64/ComplexTensor): (B, T, C, F), double precision
ilens (torch.Tensor): (B,)
Returns:
masks (torch.Tensor): (B, T, C, F)
ilens (torch.Tensor): (B,)
"""
masks, _ = self.mask(to_float(data.permute(0, 3, 2, 1)), ilens)
# (B, F, C, T) -> (B, T, C, F)
masks = [m.transpose(-1, -3) for m in masks]
return masks, ilens
class AttentionReference(torch.nn.Module):
def __init__(self, bidim, att_dim, eps=1e-6):
super().__init__()
self.mlp_psd = torch.nn.Linear(bidim, att_dim)
self.gvec = torch.nn.Linear(att_dim, 1)
self.eps = eps
def forward(
self,
psd_in: Union[torch.Tensor, ComplexTensor],
ilens: torch.LongTensor,
scaling: float = 2.0,
) -> Tuple[torch.Tensor, torch.LongTensor]:
"""Attention-based reference forward function.
Args:
psd_in (torch.complex64/ComplexTensor): (B, F, C, C)
ilens (torch.Tensor): (B,)
scaling (float):
Returns:
u (torch.Tensor): (B, C)
ilens (torch.Tensor): (B,)
"""
B, _, C = psd_in.size()[:3]
assert psd_in.size(2) == psd_in.size(3), psd_in.size()
# psd_in: (B, F, C, C)
psd = psd_in.masked_fill(
torch.eye(C, dtype=torch.bool, device=psd_in.device).type(torch.bool), 0
)
# psd: (B, F, C, C) -> (B, C, F)
psd = (psd.sum(dim=-1) / (C - 1)).transpose(-1, -2)
# Calculate amplitude
psd_feat = (psd.real**2 + psd.imag**2 + self.eps) ** 0.5
# (B, C, F) -> (B, C, F2)
mlp_psd = self.mlp_psd(psd_feat)
# (B, C, F2) -> (B, C, 1) -> (B, C)
e = self.gvec(torch.tanh(mlp_psd)).squeeze(-1)
u = F.softmax(scaling * e, dim=-1)
return u, ilens
| 23,362 | 37.174837 | 88 | py |
espnet | espnet-master/espnet2/enh/encoder/abs_encoder.py | from abc import ABC, abstractmethod
from typing import Tuple
import torch
class AbsEncoder(torch.nn.Module, ABC):
@abstractmethod
def forward(
self,
input: torch.Tensor,
ilens: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
raise NotImplementedError
@property
@abstractmethod
def output_dim(self) -> int:
raise NotImplementedError
def forward_streaming(self, input: torch.Tensor):
raise NotImplementedError
def streaming_frame(self, audio: torch.Tensor):
"""streaming_frame. It splits the continuous audio into frame-level
audio chunks in the streaming *simulation*. It is noted that this
function takes the entire long audio as input for a streaming simulation.
You may refer to this function to manage your streaming input
buffer in a real streaming application.
Args:
audio: (B, T)
Returns:
chunked: List [(B, frame_size),]
"""
NotImplementedError
| 1,041 | 27.162162 | 81 | py |
espnet | espnet-master/espnet2/enh/encoder/null_encoder.py | import torch
from espnet2.enh.encoder.abs_encoder import AbsEncoder
class NullEncoder(AbsEncoder):
"""Null encoder."""
def __init__(self):
super().__init__()
@property
def output_dim(self) -> int:
return 1
def forward(self, input: torch.Tensor, ilens: torch.Tensor):
"""Forward.
Args:
input (torch.Tensor): mixed speech [Batch, sample]
ilens (torch.Tensor): input lengths [Batch]
"""
return input, ilens
| 503 | 20 | 64 | py |
espnet | espnet-master/espnet2/enh/encoder/stft_encoder.py | import torch
from packaging.version import parse as V
from torch_complex.tensor import ComplexTensor
from espnet2.enh.encoder.abs_encoder import AbsEncoder
from espnet2.layers.stft import Stft
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
class STFTEncoder(AbsEncoder):
"""STFT encoder for speech enhancement and separation"""
def __init__(
self,
n_fft: int = 512,
win_length: int = None,
hop_length: int = 128,
window="hann",
center: bool = True,
normalized: bool = False,
onesided: bool = True,
use_builtin_complex: bool = True,
):
super().__init__()
self.stft = Stft(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
window=window,
center=center,
normalized=normalized,
onesided=onesided,
)
self._output_dim = n_fft // 2 + 1 if onesided else n_fft
self.use_builtin_complex = use_builtin_complex
self.win_length = win_length if win_length else n_fft
self.hop_length = hop_length
self.window = window
self.n_fft = n_fft
self.center = center
@property
def output_dim(self) -> int:
return self._output_dim
def forward(self, input: torch.Tensor, ilens: torch.Tensor):
"""Forward.
Args:
input (torch.Tensor): mixed speech [Batch, sample]
ilens (torch.Tensor): input lengths [Batch]
"""
# for supporting half-precision training
if input.dtype in (torch.float16, torch.bfloat16):
spectrum, flens = self.stft(input.float(), ilens)
spectrum = spectrum.to(dtype=input.dtype)
else:
spectrum, flens = self.stft(input, ilens)
if is_torch_1_9_plus and self.use_builtin_complex:
spectrum = torch.complex(spectrum[..., 0], spectrum[..., 1])
else:
spectrum = ComplexTensor(spectrum[..., 0], spectrum[..., 1])
return spectrum, flens
def _apply_window_func(self, input):
B = input.shape[0]
window_func = getattr(torch, f"{self.window}_window")
window = window_func(self.win_length, dtype=input.dtype, device=input.device)
n_pad_left = (self.n_fft - window.shape[0]) // 2
n_pad_right = self.n_fft - window.shape[0] - n_pad_left
windowed = input * window
windowed = torch.cat(
[torch.zeros(B, n_pad_left), windowed, torch.zeros(B, n_pad_right)], 1
)
return windowed
def forward_streaming(self, input: torch.Tensor):
"""Forward.
Args:
input (torch.Tensor): mixed speech [Batch, frame_length]
Return:
B, 1, F
"""
assert (
input.dim() == 2
), "forward_streaming only support for single-channel input currently."
windowed = self._apply_window_func(input)
feature = (
torch.fft.rfft(windowed) if self.stft.onesided else torch.fft.fft(windowed)
)
feature = feature.unsqueeze(1)
if not (is_torch_1_9_plus and self.use_builtin_complex):
feature = ComplexTensor(feature.real, feature.imag)
return feature
def streaming_frame(self, audio):
"""streaming_frame. It splits the continuous audio into frame-level
audio chunks in the streaming *simulation*. It is noted that this
function takes the entire long audio as input for a streaming simulation.
You may refer to this function to manage your streaming input
buffer in a real streaming application.
Args:
audio: (B, T)
Returns:
chunked: List [(B, frame_size),]
"""
if self.center:
pad_len = int(self.win_length // 2)
signal_dim = audio.dim()
extended_shape = [1] * (3 - signal_dim) + list(audio.size())
# the default STFT pad mode is "reflect",
# which is not configurable in STFT encoder,
# so, here we just use "reflect mode"
audio = torch.nn.functional.pad(
audio.view(extended_shape), [pad_len, pad_len], "reflect"
)
audio = audio.view(audio.shape[-signal_dim:])
_, audio_len = audio.shape
n_frames = 1 + (audio_len - self.win_length) // self.hop_length
strides = list(audio.stride())
shape = list(audio.shape[:-1]) + [self.win_length, n_frames]
strides = strides + [self.hop_length]
return audio.as_strided(shape, strides, storage_offset=0).unbind(dim=-1)
| 4,672 | 32.378571 | 87 | py |
espnet | espnet-master/espnet2/enh/encoder/conv_encoder.py | import math
import torch
from espnet2.enh.encoder.abs_encoder import AbsEncoder
class ConvEncoder(AbsEncoder):
"""Convolutional encoder for speech enhancement and separation"""
def __init__(
self,
channel: int,
kernel_size: int,
stride: int,
):
super().__init__()
self.conv1d = torch.nn.Conv1d(
1, channel, kernel_size=kernel_size, stride=stride, bias=False
)
self.stride = stride
self.kernel_size = kernel_size
self._output_dim = channel
@property
def output_dim(self) -> int:
return self._output_dim
def forward(self, input: torch.Tensor, ilens: torch.Tensor):
"""Forward.
Args:
input (torch.Tensor): mixed speech [Batch, sample]
ilens (torch.Tensor): input lengths [Batch]
Returns:
feature (torch.Tensor): mixed feature after encoder [Batch, flens, channel]
"""
assert input.dim() == 2, "Currently only support single channel input"
input = torch.unsqueeze(input, 1)
feature = self.conv1d(input)
feature = torch.nn.functional.relu(feature)
feature = feature.transpose(1, 2)
flens = (ilens - self.kernel_size) // self.stride + 1
return feature, flens
def forward_streaming(self, input: torch.Tensor):
output, _ = self.forward(input, 0)
return output
def streaming_frame(self, audio: torch.Tensor):
"""streaming_frame. It splits the continuous audio into frame-level
audio chunks in the streaming *simulation*. It is noted that this
function takes the entire long audio as input for a streaming simulation.
You may refer to this function to manage your streaming input
buffer in a real streaming application.
Args:
audio: (B, T)
Returns:
chunked: List [(B, frame_size),]
"""
batch_size, audio_len = audio.shape
hop_size = self.stride
frame_size = self.kernel_size
audio = [
audio[:, i * hop_size : i * hop_size + frame_size]
for i in range((audio_len - frame_size) // hop_size + 1)
]
return audio
if __name__ == "__main__":
input_audio = torch.randn((2, 100))
ilens = torch.LongTensor([100, 98])
nfft = 32
win_length = 28
hop = 10
encoder = ConvEncoder(kernel_size=nfft, stride=hop, channel=16)
frames, flens = encoder(input_audio, ilens)
splited = encoder.streaming_frame(input_audio)
sframes = [encoder.forward_streaming(s) for s in splited]
sframes = torch.cat(sframes, dim=1)
torch.testing.assert_allclose(sframes, frames)
| 2,731 | 26.877551 | 87 | py |
espnet | espnet-master/espnet2/enh/loss/criterions/time_domain.py | import logging
import math
from abc import ABC
import ci_sdr
import fast_bss_eval
import torch
from packaging.version import parse as V
from torch_complex.tensor import ComplexTensor
from espnet2.enh.loss.criterions.abs_loss import AbsEnhLoss
from espnet2.layers.stft import Stft
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
class TimeDomainLoss(AbsEnhLoss, ABC):
"""Base class for all time-domain Enhancement loss modules."""
@property
def name(self) -> str:
return self._name
@property
def only_for_test(self) -> bool:
return self._only_for_test
@property
def is_noise_loss(self) -> bool:
return self._is_noise_loss
@property
def is_dereverb_loss(self) -> bool:
return self._is_dereverb_loss
def __init__(
self,
name,
only_for_test=False,
is_noise_loss=False,
is_dereverb_loss=False,
):
super().__init__()
# only used during validation
self._only_for_test = only_for_test
# only used to calculate the noise-related loss
self._is_noise_loss = is_noise_loss
# only used to calculate the dereverberation-related loss
self._is_dereverb_loss = is_dereverb_loss
if is_noise_loss and is_dereverb_loss:
raise ValueError(
"`is_noise_loss` and `is_dereverb_loss` cannot be True at the same time"
)
if is_noise_loss and "noise" not in name:
name = name + "_noise"
if is_dereverb_loss and "dereverb" not in name:
name = name + "_dereverb"
self._name = name
EPS = torch.finfo(torch.get_default_dtype()).eps
class CISDRLoss(TimeDomainLoss):
"""CI-SDR loss
Reference:
Convolutive Transfer Function Invariant SDR Training
Criteria for Multi-Channel Reverberant Speech Separation;
C. Boeddeker et al., 2021;
https://arxiv.org/abs/2011.15003
Args:
ref: (Batch, samples)
inf: (Batch, samples)
filter_length (int): a time-invariant filter that allows
slight distortion via filtering
Returns:
loss: (Batch,)
"""
def __init__(
self,
filter_length=512,
name=None,
only_for_test=False,
is_noise_loss=False,
is_dereverb_loss=False,
):
_name = "ci_sdr_loss" if name is None else name
super().__init__(
_name,
only_for_test=only_for_test,
is_noise_loss=is_noise_loss,
is_dereverb_loss=is_dereverb_loss,
)
self.filter_length = filter_length
def forward(
self,
ref: torch.Tensor,
inf: torch.Tensor,
) -> torch.Tensor:
assert ref.shape == inf.shape, (ref.shape, inf.shape)
return ci_sdr.pt.ci_sdr_loss(
inf, ref, compute_permutation=False, filter_length=self.filter_length
)
class SNRLoss(TimeDomainLoss):
def __init__(
self,
eps=EPS,
name=None,
only_for_test=False,
is_noise_loss=False,
is_dereverb_loss=False,
):
_name = "snr_loss" if name is None else name
super().__init__(
_name,
only_for_test=only_for_test,
is_noise_loss=is_noise_loss,
is_dereverb_loss=is_dereverb_loss,
)
self.eps = float(eps)
def forward(self, ref: torch.Tensor, inf: torch.Tensor) -> torch.Tensor:
# the return tensor should be shape of (batch,)
noise = inf - ref
snr = 20 * (
torch.log10(torch.norm(ref, p=2, dim=1).clamp(min=self.eps))
- torch.log10(torch.norm(noise, p=2, dim=1).clamp(min=self.eps))
)
return -snr
class SDRLoss(TimeDomainLoss):
"""SDR loss.
filter_length: int
The length of the distortion filter allowed (default: ``512``)
use_cg_iter:
If provided, an iterative method is used to solve for the distortion
filter coefficients instead of direct Gaussian elimination.
This can speed up the computation of the metrics in case the filters
are long. Using a value of 10 here has been shown to provide
good accuracy in most cases and is sufficient when using this
loss to train neural separation networks.
clamp_db: float
clamp the output value in [-clamp_db, clamp_db]
zero_mean: bool
When set to True, the mean of all signals is subtracted prior.
load_diag:
If provided, this small value is added to the diagonal coefficients of
the system metrices when solving for the filter coefficients.
This can help stabilize the metric in the case where some of the reference
signals may sometimes be zero
"""
def __init__(
self,
filter_length=512,
use_cg_iter=None,
clamp_db=None,
zero_mean=True,
load_diag=None,
name=None,
only_for_test=False,
is_noise_loss=False,
is_dereverb_loss=False,
):
_name = "sdr_loss" if name is None else name
super().__init__(
_name,
only_for_test=only_for_test,
is_noise_loss=is_noise_loss,
is_dereverb_loss=is_dereverb_loss,
)
self.filter_length = filter_length
self.use_cg_iter = use_cg_iter
self.clamp_db = clamp_db
self.zero_mean = zero_mean
self.load_diag = load_diag
def forward(self, ref: torch.Tensor, est: torch.Tensor) -> torch.Tensor:
"""SDR forward.
Args:
ref: Tensor, (..., n_samples)
reference signal
est: Tensor (..., n_samples)
estimated signal
Returns:
loss: (...,)
the SDR loss (negative sdr)
"""
sdr_loss = fast_bss_eval.sdr_loss(
est=est,
ref=ref,
filter_length=self.filter_length,
use_cg_iter=self.use_cg_iter,
zero_mean=self.zero_mean,
clamp_db=self.clamp_db,
load_diag=self.load_diag,
pairwise=False,
)
return sdr_loss
class SISNRLoss(TimeDomainLoss):
"""SI-SNR (or named SI-SDR) loss
A more stable SI-SNR loss with clamp from `fast_bss_eval`.
Attributes:
clamp_db: float
clamp the output value in [-clamp_db, clamp_db]
zero_mean: bool
When set to True, the mean of all signals is subtracted prior.
eps: float
Deprecated. Kept for compatibility.
"""
def __init__(
self,
clamp_db=None,
zero_mean=True,
eps=None,
name=None,
only_for_test=False,
is_noise_loss=False,
is_dereverb_loss=False,
):
_name = "si_snr_loss" if name is None else name
super().__init__(
_name,
only_for_test=only_for_test,
is_noise_loss=is_noise_loss,
is_dereverb_loss=is_dereverb_loss,
)
self.clamp_db = clamp_db
self.zero_mean = zero_mean
if eps is not None:
logging.warning("Eps is deprecated in si_snr loss, set clamp_db instead.")
if self.clamp_db is None:
self.clamp_db = -math.log10(eps / (1 - eps)) * 10
def forward(self, ref: torch.Tensor, est: torch.Tensor) -> torch.Tensor:
"""SI-SNR forward.
Args:
ref: Tensor, (..., n_samples)
reference signal
est: Tensor (..., n_samples)
estimated signal
Returns:
loss: (...,)
the SI-SDR loss (negative si-sdr)
"""
assert torch.is_tensor(est) and torch.is_tensor(ref), est
si_snr = fast_bss_eval.si_sdr_loss(
est=est,
ref=ref,
zero_mean=self.zero_mean,
clamp_db=self.clamp_db,
pairwise=False,
)
return si_snr
class TimeDomainMSE(TimeDomainLoss):
def __init__(
self,
name=None,
only_for_test=False,
is_noise_loss=False,
is_dereverb_loss=False,
):
_name = "TD_MSE_loss" if name is None else name
super().__init__(
_name,
only_for_test=only_for_test,
is_noise_loss=is_noise_loss,
is_dereverb_loss=is_dereverb_loss,
)
def forward(self, ref, inf) -> torch.Tensor:
"""Time-domain MSE loss forward.
Args:
ref: (Batch, T) or (Batch, T, C)
inf: (Batch, T) or (Batch, T, C)
Returns:
loss: (Batch,)
"""
assert ref.shape == inf.shape, (ref.shape, inf.shape)
mseloss = (ref - inf).pow(2)
if ref.dim() == 3:
mseloss = mseloss.mean(dim=[1, 2])
elif ref.dim() == 2:
mseloss = mseloss.mean(dim=1)
else:
raise ValueError(
"Invalid input shape: ref={}, inf={}".format(ref.shape, inf.shape)
)
return mseloss
class TimeDomainL1(TimeDomainLoss):
def __init__(
self,
name=None,
only_for_test=False,
is_noise_loss=False,
is_dereverb_loss=False,
):
_name = "TD_L1_loss" if name is None else name
super().__init__(
_name,
only_for_test=only_for_test,
is_noise_loss=is_noise_loss,
is_dereverb_loss=is_dereverb_loss,
)
def forward(self, ref, inf) -> torch.Tensor:
"""Time-domain L1 loss forward.
Args:
ref: (Batch, T) or (Batch, T, C)
inf: (Batch, T) or (Batch, T, C)
Returns:
loss: (Batch,)
"""
assert ref.shape == inf.shape, (ref.shape, inf.shape)
l1loss = abs(ref - inf)
if ref.dim() == 3:
l1loss = l1loss.mean(dim=[1, 2])
elif ref.dim() == 2:
l1loss = l1loss.mean(dim=1)
else:
raise ValueError(
"Invalid input shape: ref={}, inf={}".format(ref.shape, inf.shape)
)
return l1loss
class MultiResL1SpecLoss(TimeDomainLoss):
"""Multi-Resolution L1 time-domain + STFT mag loss
Reference:
Lu, Y. J., Cornell, S., Chang, X., Zhang, W., Li, C., Ni, Z., ... & Watanabe, S.
Towards Low-Distortion Multi-Channel Speech Enhancement:
The ESPNET-Se Submission to the L3DAS22 Challenge. ICASSP 2022 p. 9201-9205.
Attributes:
window_sz: (list)
list of STFT window sizes.
hop_sz: (list, optional)
list of hop_sizes, default is each window_sz // 2.
eps: (float)
stability epsilon
time_domain_weight: (float)
weight for time domain loss.
"""
def __init__(
self,
window_sz=[512],
hop_sz=None,
eps=1e-8,
time_domain_weight=0.5,
name=None,
only_for_test=False,
):
_name = "TD_L1_loss" if name is None else name
super(MultiResL1SpecLoss, self).__init__(_name, only_for_test=only_for_test)
assert all([x % 2 == 0 for x in window_sz])
self.window_sz = window_sz
if hop_sz is None:
self.hop_sz = [x // 2 for x in window_sz]
else:
self.hop_sz = hop_sz
self.time_domain_weight = time_domain_weight
self.eps = eps
self.stft_encoders = torch.nn.ModuleList([])
for w, h in zip(self.window_sz, self.hop_sz):
stft_enc = Stft(
n_fft=w,
win_length=w,
hop_length=h,
window=None,
center=True,
normalized=False,
onesided=True,
)
self.stft_encoders.append(stft_enc)
@property
def name(self) -> str:
return "l1_timedomain+magspec_loss"
def get_magnitude(self, stft):
if is_torch_1_9_plus:
stft = torch.complex(stft[..., 0], stft[..., 1])
else:
stft = ComplexTensor(stft[..., 0], stft[..., 1])
return stft.abs()
def forward(
self,
target: torch.Tensor,
estimate: torch.Tensor,
):
"""forward.
Args:
target: (Batch, T)
estimate: (Batch, T)
Returns:
loss: (Batch,)
"""
assert target.shape == estimate.shape, (target.shape, estimate.shape)
half_precision = (torch.float16, torch.bfloat16)
if target.dtype in half_precision or estimate.dtype in half_precision:
target = target.float()
estimate = estimate.float()
# shape bsz, samples
scaling_factor = torch.sum(estimate * target, -1, keepdim=True) / (
torch.sum(estimate**2, -1, keepdim=True) + self.eps
)
time_domain_loss = torch.sum((estimate * scaling_factor - target).abs(), dim=-1)
if len(self.stft_encoders) == 0:
return time_domain_loss
else:
spectral_loss = torch.zeros_like(time_domain_loss)
for stft_enc in self.stft_encoders:
target_mag = self.get_magnitude(stft_enc(target)[0])
estimate_mag = self.get_magnitude(
stft_enc(estimate * scaling_factor)[0]
)
c_loss = torch.sum((estimate_mag - target_mag).abs(), dim=(1, 2))
spectral_loss += c_loss
return time_domain_loss * self.time_domain_weight + (
1 - self.time_domain_weight
) * spectral_loss / len(self.stft_encoders)
| 13,715 | 28.307692 | 88 | py |
espnet | espnet-master/espnet2/enh/loss/criterions/abs_loss.py | from abc import ABC, abstractmethod
import torch
EPS = torch.finfo(torch.get_default_dtype()).eps
class AbsEnhLoss(torch.nn.Module, ABC):
"""Base class for all Enhancement loss modules."""
# the name will be the key that appears in the reporter
@property
def name(self) -> str:
return NotImplementedError
# This property specifies whether the criterion will only
# be evaluated during the inference stage
@property
def only_for_test(self) -> bool:
return False
@abstractmethod
def forward(
self,
ref,
inf,
) -> torch.Tensor:
# the return tensor should be shape of (batch)
raise NotImplementedError
| 705 | 22.533333 | 61 | py |
espnet | espnet-master/espnet2/enh/loss/criterions/tf_domain.py | import math
from abc import ABC, abstractmethod
from functools import reduce
import torch
import torch.nn.functional as F
from packaging.version import parse as V
from espnet2.enh.layers.complex_utils import complex_norm, is_complex, new_complex_like
from espnet2.enh.loss.criterions.abs_loss import AbsEnhLoss
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
EPS = torch.finfo(torch.get_default_dtype()).eps
def _create_mask_label(mix_spec, ref_spec, noise_spec=None, mask_type="IAM"):
"""Create mask label.
Args:
mix_spec: ComplexTensor(B, T, [C,] F)
ref_spec: List[ComplexTensor(B, T, [C,] F), ...]
noise_spec: ComplexTensor(B, T, [C,] F)
only used for IBM and IRM
mask_type: str
Returns:
labels: List[Tensor(B, T, [C,] F), ...] or List[ComplexTensor(B, T, F), ...]
"""
# Must be upper case
mask_type = mask_type.upper()
assert mask_type in [
"IBM",
"IRM",
"IAM",
"PSM",
"NPSM",
"PSM^2",
"CIRM",
], f"mask type {mask_type} not supported"
mask_label = []
if ref_spec[0].ndim < mix_spec.ndim:
# (B, T, F) -> (B, T, 1, F)
ref_spec = [r.unsqueeze(2).expand_as(mix_spec.real) for r in ref_spec]
if noise_spec is not None and noise_spec.ndim < mix_spec.ndim:
# (B, T, F) -> (B, T, 1, F)
noise_spec = noise_spec.unsqueeze(2).expand_as(mix_spec.real)
for idx, r in enumerate(ref_spec):
mask = None
if mask_type == "IBM":
if noise_spec is None:
flags = [abs(r) >= abs(n) for n in ref_spec]
else:
flags = [abs(r) >= abs(n) for n in ref_spec + [noise_spec]]
mask = reduce(lambda x, y: x * y, flags)
mask = mask.int()
elif mask_type == "IRM":
beta = 0.5
res_spec = sum(n for i, n in enumerate(ref_spec) if i != idx)
if noise_spec is not None:
res_spec += noise_spec
mask = (abs(r).pow(2) / (abs(res_spec).pow(2) + EPS)).pow(beta)
elif mask_type == "IAM":
mask = abs(r) / (abs(mix_spec) + EPS)
mask = mask.clamp(min=0, max=1)
elif mask_type == "PSM" or mask_type == "NPSM":
phase_r = r / (abs(r) + EPS)
phase_mix = mix_spec / (abs(mix_spec) + EPS)
# cos(a - b) = cos(a)*cos(b) + sin(a)*sin(b)
cos_theta = phase_r.real * phase_mix.real + phase_r.imag * phase_mix.imag
mask = (abs(r) / (abs(mix_spec) + EPS)) * cos_theta
mask = (
mask.clamp(min=0, max=1)
if mask_type == "NPSM"
else mask.clamp(min=-1, max=1)
)
elif mask_type == "PSM^2":
# This is for training beamforming masks
phase_r = r / (abs(r) + EPS)
phase_mix = mix_spec / (abs(mix_spec) + EPS)
# cos(a - b) = cos(a)*cos(b) + sin(a)*sin(b)
cos_theta = phase_r.real * phase_mix.real + phase_r.imag * phase_mix.imag
mask = (abs(r).pow(2) / (abs(mix_spec).pow(2) + EPS)) * cos_theta
mask = mask.clamp(min=-1, max=1)
elif mask_type == "CIRM":
# Ref: Complex Ratio Masking for Monaural Speech Separation
denominator = mix_spec.real.pow(2) + mix_spec.imag.pow(2) + EPS
mask_real = (mix_spec.real * r.real + mix_spec.imag * r.imag) / denominator
mask_imag = (mix_spec.real * r.imag - mix_spec.imag * r.real) / denominator
mask = new_complex_like(mix_spec, [mask_real, mask_imag])
assert mask is not None, f"mask type {mask_type} not supported"
mask_label.append(mask)
return mask_label
class FrequencyDomainLoss(AbsEnhLoss, ABC):
"""Base class for all frequence-domain Enhancement loss modules."""
# The loss will be computed on mask or on spectrum
@property
@abstractmethod
def compute_on_mask() -> bool:
pass
# the mask type
@property
@abstractmethod
def mask_type() -> str:
pass
@property
def name(self) -> str:
return self._name
@property
def only_for_test(self) -> bool:
return self._only_for_test
@property
def is_noise_loss(self) -> bool:
return self._is_noise_loss
@property
def is_dereverb_loss(self) -> bool:
return self._is_dereverb_loss
def __init__(
self, name, only_for_test=False, is_noise_loss=False, is_dereverb_loss=False
):
super().__init__()
self._name = name
# only used during validation
self._only_for_test = only_for_test
# only used to calculate the noise-related loss
self._is_noise_loss = is_noise_loss
# only used to calculate the dereverberation-related loss
self._is_dereverb_loss = is_dereverb_loss
if is_noise_loss and is_dereverb_loss:
raise ValueError(
"`is_noise_loss` and `is_dereverb_loss` cannot be True at the same time"
)
def create_mask_label(self, mix_spec, ref_spec, noise_spec=None):
return _create_mask_label(
mix_spec=mix_spec,
ref_spec=ref_spec,
noise_spec=noise_spec,
mask_type=self.mask_type,
)
class FrequencyDomainMSE(FrequencyDomainLoss):
def __init__(
self,
compute_on_mask=False,
mask_type="IBM",
name=None,
only_for_test=False,
is_noise_loss=False,
is_dereverb_loss=False,
):
if name is not None:
_name = name
elif compute_on_mask:
_name = f"MSE_on_{mask_type}"
else:
_name = "MSE_on_Spec"
super().__init__(
_name,
only_for_test=only_for_test,
is_noise_loss=is_noise_loss,
is_dereverb_loss=is_dereverb_loss,
)
self._compute_on_mask = compute_on_mask
self._mask_type = mask_type
@property
def compute_on_mask(self) -> bool:
return self._compute_on_mask
@property
def mask_type(self) -> str:
return self._mask_type
def forward(self, ref, inf) -> torch.Tensor:
"""time-frequency MSE loss.
Args:
ref: (Batch, T, F) or (Batch, T, C, F)
inf: (Batch, T, F) or (Batch, T, C, F)
Returns:
loss: (Batch,)
"""
assert ref.shape == inf.shape, (ref.shape, inf.shape)
diff = ref - inf
if is_complex(diff):
mseloss = diff.real**2 + diff.imag**2
else:
mseloss = diff**2
if ref.dim() == 3:
mseloss = mseloss.mean(dim=[1, 2])
elif ref.dim() == 4:
mseloss = mseloss.mean(dim=[1, 2, 3])
else:
raise ValueError(
"Invalid input shape: ref={}, inf={}".format(ref.shape, inf.shape)
)
return mseloss
class FrequencyDomainL1(FrequencyDomainLoss):
def __init__(
self,
compute_on_mask=False,
mask_type="IBM",
name=None,
only_for_test=False,
is_noise_loss=False,
is_dereverb_loss=False,
):
if name is not None:
_name = name
elif compute_on_mask:
_name = f"L1_on_{mask_type}"
else:
_name = "L1_on_Spec"
super().__init__(
_name,
only_for_test=only_for_test,
is_noise_loss=is_noise_loss,
is_dereverb_loss=is_dereverb_loss,
)
self._compute_on_mask = compute_on_mask
self._mask_type = mask_type
@property
def compute_on_mask(self) -> bool:
return self._compute_on_mask
@property
def mask_type(self) -> str:
return self._mask_type
def forward(self, ref, inf) -> torch.Tensor:
"""time-frequency L1 loss.
Args:
ref: (Batch, T, F) or (Batch, T, C, F)
inf: (Batch, T, F) or (Batch, T, C, F)
Returns:
loss: (Batch,)
"""
assert ref.shape == inf.shape, (ref.shape, inf.shape)
if is_complex(inf):
l1loss = (
abs(ref.real - inf.real)
+ abs(ref.imag - inf.imag)
+ abs(ref.abs() - inf.abs())
)
else:
l1loss = abs(ref - inf)
if ref.dim() == 3:
l1loss = l1loss.mean(dim=[1, 2])
elif ref.dim() == 4:
l1loss = l1loss.mean(dim=[1, 2, 3])
else:
raise ValueError(
"Invalid input shape: ref={}, inf={}".format(ref.shape, inf.shape)
)
return l1loss
class FrequencyDomainDPCL(FrequencyDomainLoss):
def __init__(
self,
compute_on_mask=False,
mask_type="IBM",
loss_type="dpcl",
name=None,
only_for_test=False,
is_noise_loss=False,
is_dereverb_loss=False,
):
_name = "dpcl" if name is None else name
super().__init__(
_name,
only_for_test=only_for_test,
is_noise_loss=is_noise_loss,
is_dereverb_loss=is_dereverb_loss,
)
self._compute_on_mask = compute_on_mask
self._mask_type = mask_type
self._loss_type = loss_type
@property
def compute_on_mask(self) -> bool:
return self._compute_on_mask
@property
def mask_type(self) -> str:
return self._mask_type
def forward(self, ref, inf) -> torch.Tensor:
"""time-frequency Deep Clustering loss.
References:
[1] Deep clustering: Discriminative embeddings for segmentation and
separation; John R. Hershey. et al., 2016;
https://ieeexplore.ieee.org/document/7471631
[2] Manifold-Aware Deep Clustering: Maximizing Angles Between Embedding
Vectors Based on Regular Simplex; Tanaka, K. et al., 2021;
https://www.isca-speech.org/archive/interspeech_2021/tanaka21_interspeech.html
Args:
ref: List[(Batch, T, F) * spks]
inf: (Batch, T*F, D)
Returns:
loss: (Batch,)
""" # noqa: E501
assert len(ref) > 0
num_spk = len(ref)
# Compute the ref for Deep Clustering[1][2]
abs_ref = [abs(n) for n in ref]
if self._loss_type == "dpcl":
r = torch.zeros_like(abs_ref[0])
B = ref[0].shape[0]
for i in range(num_spk):
flags = [abs_ref[i] >= n for n in abs_ref]
mask = reduce(lambda x, y: x * y, flags)
mask = mask.int() * i
r += mask
r = r.contiguous().flatten().long()
re = F.one_hot(r, num_classes=num_spk)
re = re.contiguous().view(B, -1, num_spk)
elif self._loss_type == "mdc":
B = ref[0].shape[0]
manifold_vector = torch.full(
(num_spk, num_spk),
(-1 / num_spk) * math.sqrt(num_spk / (num_spk - 1)),
dtype=inf.dtype,
device=inf.device,
)
for i in range(num_spk):
manifold_vector[i][i] = ((num_spk - 1) / num_spk) * math.sqrt(
num_spk / (num_spk - 1)
)
re = torch.zeros(
ref[0].shape[0],
ref[0].shape[1],
ref[0].shape[2],
num_spk,
device=inf.device,
)
for i in range(num_spk):
flags = [abs_ref[i] >= n for n in abs_ref]
mask = reduce(lambda x, y: x * y, flags)
mask = mask.int()
re[mask == 1] = manifold_vector[i]
re = re.contiguous().view(B, -1, num_spk)
else:
raise ValueError(
f"Invalid loss type error: {self._loss_type}, "
'the loss type must be "dpcl" or "mdc"'
)
V2 = torch.matmul(torch.transpose(inf, 2, 1), inf).pow(2).sum(dim=(1, 2))
Y2 = (
torch.matmul(torch.transpose(re, 2, 1).float(), re.float())
.pow(2)
.sum(dim=(1, 2))
)
VY = torch.matmul(torch.transpose(inf, 2, 1), re.float()).pow(2).sum(dim=(1, 2))
return V2 + Y2 - 2 * VY
class FrequencyDomainAbsCoherence(FrequencyDomainLoss):
def __init__(
self,
compute_on_mask=False,
mask_type=None,
name=None,
only_for_test=False,
is_noise_loss=False,
is_dereverb_loss=False,
):
_name = "Coherence_on_Spec" if name is None else name
super().__init__(
_name,
only_for_test=only_for_test,
is_noise_loss=is_noise_loss,
is_dereverb_loss=is_dereverb_loss,
)
self._compute_on_mask = False
self._mask_type = None
@property
def compute_on_mask(self) -> bool:
return self._compute_on_mask
@property
def mask_type(self) -> str:
return self._mask_type
def forward(self, ref, inf) -> torch.Tensor:
"""time-frequency absolute coherence loss.
Reference:
Independent Vector Analysis with Deep Neural Network Source Priors;
Li et al 2020; https://arxiv.org/abs/2008.11273
Args:
ref: (Batch, T, F) or (Batch, T, C, F)
inf: (Batch, T, F) or (Batch, T, C, F)
Returns:
loss: (Batch,)
"""
assert ref.shape == inf.shape, (ref.shape, inf.shape)
if is_complex(ref) and is_complex(inf):
# sqrt( E[|inf|^2] * E[|ref|^2] )
denom = (
complex_norm(ref, dim=1) * complex_norm(inf, dim=1) / ref.size(1) + EPS
)
coh = (inf * ref.conj()).mean(dim=1).abs() / denom
if ref.dim() == 3:
coh_loss = 1.0 - coh.mean(dim=1)
elif ref.dim() == 4:
coh_loss = 1.0 - coh.mean(dim=[1, 2])
else:
raise ValueError(
"Invalid input shape: ref={}, inf={}".format(ref.shape, inf.shape)
)
else:
raise ValueError("`ref` and `inf` must be complex tensors.")
return coh_loss
class FrequencyDomainCrossEntropy(FrequencyDomainLoss):
def __init__(
self,
compute_on_mask=False,
mask_type=None,
ignore_id=-100,
name=None,
only_for_test=False,
is_noise_loss=False,
is_dereverb_loss=False,
):
if name is not None:
_name = name
elif compute_on_mask:
_name = f"CE_on_{mask_type}"
else:
_name = "CE_on_Spec"
super().__init__(
_name,
only_for_test=only_for_test,
is_noise_loss=is_noise_loss,
is_dereverb_loss=is_dereverb_loss,
)
self._compute_on_mask = compute_on_mask
self._mask_type = mask_type
self.cross_entropy = torch.nn.CrossEntropyLoss(
ignore_index=ignore_id, reduction="none"
)
self.ignore_id = ignore_id
@property
def compute_on_mask(self) -> bool:
return self._compute_on_mask
@property
def mask_type(self) -> str:
return self._mask_type
def forward(self, ref, inf) -> torch.Tensor:
"""time-frequency cross-entropy loss.
Args:
ref: (Batch, T) or (Batch, T, C)
inf: (Batch, T, nclass) or (Batch, T, C, nclass)
Returns:
loss: (Batch,)
"""
assert ref.shape[0] == inf.shape[0] and ref.shape[1] == inf.shape[1], (
ref.shape,
inf.shape,
)
if ref.dim() == 2:
loss = self.cross_entropy(inf.permute(0, 2, 1), ref).mean(dim=1)
elif ref.dim() == 3:
loss = self.cross_entropy(inf.permute(0, 3, 1, 2), ref).mean(dim=[1, 2])
else:
raise ValueError(
"Invalid input shape: ref={}, inf={}".format(ref.shape, inf.shape)
)
with torch.no_grad():
pred = inf.argmax(-1)
mask = ref != self.ignore_id
numerator = (pred == ref).masked_fill(~mask, 0).float()
if ref.dim() == 2:
acc = numerator.sum(dim=1) / mask.sum(dim=1).float()
elif ref.dim() == 3:
acc = numerator.sum(dim=[1, 2]) / mask.sum(dim=[1, 2]).float()
self.stats = {"acc": acc.cpu() * 100}
return loss
| 16,625 | 31.034682 | 94 | py |
espnet | espnet-master/espnet2/enh/loss/wrappers/mixit_solver.py | import itertools
from typing import Dict, List, Union
import torch
from torch_complex.tensor import ComplexTensor
from espnet2.enh.layers.complex_utils import einsum as complex_einsum
from espnet2.enh.layers.complex_utils import stack as complex_stack
from espnet2.enh.loss.criterions.abs_loss import AbsEnhLoss
from espnet2.enh.loss.wrappers.abs_wrapper import AbsLossWrapper
class MixITSolver(AbsLossWrapper):
def __init__(
self,
criterion: AbsEnhLoss,
weight: float = 1.0,
):
"""Mixture Invariant Training Solver.
Args:
criterion (AbsEnhLoss): an instance of AbsEnhLoss
weight (float): weight (between 0 and 1) of current loss
for multi-task learning.
"""
super().__init__()
self.criterion = criterion
self.weight = weight
@property
def name(self):
return "mixit"
def _complex_einsum(self, equation, *operands):
for op in operands:
if not isinstance(op, ComplexTensor):
op = ComplexTensor(op, torch.zeros_like(op))
return complex_einsum(equation, *operands)
def forward(
self,
ref: Union[List[torch.Tensor], List[ComplexTensor]],
inf: Union[List[torch.Tensor], List[ComplexTensor]],
others: Dict = {},
):
"""MixIT solver.
Args:
ref (List[torch.Tensor]): [(batch, ...), ...] x n_spk
inf (List[torch.Tensor]): [(batch, ...), ...] x n_est
Returns:
loss: (torch.Tensor): minimum loss with the best permutation
stats: dict, for collecting training status
others: dict, in this PIT solver, permutation order will be returned
"""
num_inf = len(inf)
num_ref = num_inf // 2
device = ref[0].device
is_complex = isinstance(ref[0], ComplexTensor)
assert is_complex == isinstance(inf[0], ComplexTensor)
if not is_complex:
ref_tensor = torch.stack(ref[:num_ref], dim=1) # (batch, num_ref, ...)
inf_tensor = torch.stack(inf, dim=1) # (batch, num_inf, ...)
einsum_fn = torch.einsum
else:
ref_tensor = complex_stack(ref[:num_ref], dim=1) # (batch, num_ref, ...)
inf_tensor = complex_stack(inf, dim=1) # (batch, num_inf, ...)
einsum_fn = self._complex_einsum
# all permutation assignments:
# [(0, 0, 0, 0), (0, 0, 0, 1), (0, 0, 1, 0), ..., (1, 1, 1, 1)]
all_assignments = list(itertools.product(range(num_ref), repeat=num_inf))
all_mixture_matrix = torch.stack(
[
torch.nn.functional.one_hot(
torch.tensor(asm, dtype=torch.int64, device=device),
num_classes=num_ref,
).transpose(1, 0)
for asm in all_assignments
],
dim=0,
).to(
inf_tensor.dtype
) # (num_ref ^ num_inf, num_ref, num_inf)
# (num_ref ^ num_inf, batch, num_ref, seq_len, ...)
if inf_tensor.dim() == 3:
est_sum_mixture = einsum_fn("ari,bil->abrl", all_mixture_matrix, inf_tensor)
elif inf_tensor.dim() > 3:
est_sum_mixture = einsum_fn(
"ari,bil...->abrl...", all_mixture_matrix, inf_tensor
)
losses = []
for i in range(all_mixture_matrix.shape[0]):
losses.append(
sum(
[
self.criterion(ref_tensor[:, s], est_sum_mixture[i, :, s])
for s in range(num_ref)
]
)
/ num_ref
)
losses = torch.stack(losses, dim=0) # (num_ref ^ num_inf, batch)
loss, perm = torch.min(losses, dim=0) # (batch)
loss = loss.mean()
perm = torch.index_select(all_mixture_matrix, 0, perm)
if perm.is_complex():
perm = perm.real
stats = dict()
stats[f"{self.criterion.name}_{self.name}"] = loss.detach()
return loss.mean(), stats, {"perm": perm}
| 4,145 | 32.983607 | 88 | py |
espnet | espnet-master/espnet2/enh/loss/wrappers/abs_wrapper.py | from abc import ABC, abstractmethod
from typing import Dict, List, Tuple
import torch
class AbsLossWrapper(torch.nn.Module, ABC):
"""Base class for all Enhancement loss wrapper modules."""
# The weight for the current loss in the multi-task learning.
# The overall training target will be combined as:
# loss = weight_1 * loss_1 + ... + weight_N * loss_N
weight = 1.0
@abstractmethod
def forward(
self,
ref: List,
inf: List,
others: Dict,
) -> Tuple[torch.Tensor, Dict, Dict]:
raise NotImplementedError
| 580 | 24.26087 | 65 | py |
espnet | espnet-master/espnet2/enh/loss/wrappers/multilayer_pit_solver.py | from espnet2.enh.loss.criterions.abs_loss import AbsEnhLoss
from espnet2.enh.loss.wrappers.abs_wrapper import AbsLossWrapper
from espnet2.enh.loss.wrappers.pit_solver import PITSolver
class MultiLayerPITSolver(AbsLossWrapper):
def __init__(
self,
criterion: AbsEnhLoss,
weight=1.0,
independent_perm=True,
layer_weights=None,
):
"""Multi-Layer Permutation Invariant Training Solver.
Compute the PIT loss given inferences of multiple layers and a single reference.
It also support single inference and single reference in evaluation stage.
Args:
criterion (AbsEnhLoss): an instance of AbsEnhLoss
weight (float): weight (between 0 and 1) of current loss
for multi-task learning.
independent_perm (bool):
If True, PIT will be performed in forward to find the best permutation;
If False, the permutation from the last LossWrapper output will be
inherited.
Note: You should be careful about the ordering of loss
wrappers defined in the yaml config, if this argument is False.
layer_weights (Optional[List[float]]): weights for each layer
If not None, the loss of each layer will be weighted-summed using the
specified weights.
"""
super().__init__()
self.criterion = criterion
self.weight = weight
self.independent_perm = independent_perm
self.solver = PITSolver(criterion, weight, independent_perm)
self.layer_weights = layer_weights
def forward(self, ref, infs, others={}):
"""Permutation invariant training solver.
Args:
ref (List[torch.Tensor]): [(batch, ...), ...] x n_spk
infs (Union[List[torch.Tensor], List[List[torch.Tensor]]]):
[(batch, ...), ...]
Returns:
loss: (torch.Tensor): minimum loss with the best permutation
stats: dict, for collecting training status
others: dict, in this PIT solver, permutation order will be returned
"""
losses = 0.0
# In single-layer case, the model only estimates waveforms in the last layer.
# The shape of infs is List[torch.Tensor]
if not isinstance(infs[0], (tuple, list)) and len(infs) == len(ref):
loss, stats, others = self.solver(ref, infs, others)
losses = loss
# In multi-layer case, weighted-sum the PIT loss of each layer
# The shape of ins is List[List[torch.Tensor]]
else:
for idx, inf in enumerate(infs):
loss, stats, others = self.solver(ref, inf, others)
if self.layer_weights is not None:
losses = losses + loss * self.layer_weights[idx]
else:
losses = losses + loss * (idx + 1) * (1.0 / len(infs))
losses = losses / len(infs)
return losses, stats, others
| 3,042 | 42.471429 | 88 | py |
espnet | espnet-master/espnet2/enh/loss/wrappers/fixed_order.py | from collections import defaultdict
import torch
from espnet2.enh.loss.criterions.abs_loss import AbsEnhLoss
from espnet2.enh.loss.wrappers.abs_wrapper import AbsLossWrapper
class FixedOrderSolver(AbsLossWrapper):
def __init__(self, criterion: AbsEnhLoss, weight=1.0):
super().__init__()
self.criterion = criterion
self.weight = weight
def forward(self, ref, inf, others={}):
"""An naive fixed-order solver
Args:
ref (List[torch.Tensor]): [(batch, ...), ...] x n_spk
inf (List[torch.Tensor]): [(batch, ...), ...]
Returns:
loss: (torch.Tensor): minimum loss with the best permutation
stats: dict, for collecting training status
others: reserved
"""
assert len(ref) == len(inf), (len(ref), len(inf))
num_spk = len(ref)
loss = 0.0
stats = defaultdict(list)
for r, i in zip(ref, inf):
loss += torch.mean(self.criterion(r, i)) / num_spk
for k, v in getattr(self.criterion, "stats", {}).items():
stats[k].append(v)
for k, v in stats.items():
stats[k] = torch.stack(v, dim=1).mean()
stats[self.criterion.name] = loss.detach()
perm = torch.arange(num_spk).unsqueeze(0).repeat(ref[0].size(0), 1)
return loss.mean(), dict(stats), {"perm": perm}
| 1,393 | 31.418605 | 75 | py |
espnet | espnet-master/espnet2/enh/loss/wrappers/dpcl_solver.py | from espnet2.enh.loss.criterions.abs_loss import AbsEnhLoss
from espnet2.enh.loss.wrappers.abs_wrapper import AbsLossWrapper
class DPCLSolver(AbsLossWrapper):
def __init__(self, criterion: AbsEnhLoss, weight=1.0):
super().__init__()
self.criterion = criterion
self.weight = weight
def forward(self, ref, inf, others={}):
"""A naive DPCL solver
Args:
ref (List[torch.Tensor]): [(batch, ...), ...] x n_spk
inf (List[torch.Tensor]): [(batch, ...), ...]
others (List): other data included in this solver
e.g. "tf_embedding" learned embedding of all T-F bins (B, T * F, D)
Returns:
loss: (torch.Tensor): minimum loss with the best permutation
stats: (dict), for collecting training status
others: reserved
"""
assert "tf_embedding" in others
loss = self.criterion(ref, others["tf_embedding"]).mean()
stats = dict()
stats[self.criterion.name] = loss.detach()
return loss.mean(), stats, {}
| 1,083 | 31.848485 | 83 | py |
espnet | espnet-master/espnet2/enh/loss/wrappers/pit_solver.py | from collections import defaultdict
from itertools import permutations
import torch
from espnet2.enh.loss.criterions.abs_loss import AbsEnhLoss
from espnet2.enh.loss.wrappers.abs_wrapper import AbsLossWrapper
class PITSolver(AbsLossWrapper):
def __init__(
self,
criterion: AbsEnhLoss,
weight=1.0,
independent_perm=True,
flexible_numspk=False,
):
"""Permutation Invariant Training Solver.
Args:
criterion (AbsEnhLoss): an instance of AbsEnhLoss
weight (float): weight (between 0 and 1) of current loss
for multi-task learning.
independent_perm (bool):
If True, PIT will be performed in forward to find the best permutation;
If False, the permutation from the last LossWrapper output will be
inherited.
NOTE (wangyou): You should be careful about the ordering of loss
wrappers defined in the yaml config, if this argument is False.
flexible_numspk (bool):
If True, num_spk will be taken from inf to handle flexible numbers of
speakers. This is because ref may include dummy data in this case.
"""
super().__init__()
self.criterion = criterion
self.weight = weight
self.independent_perm = independent_perm
self.flexible_numspk = flexible_numspk
def forward(self, ref, inf, others={}):
"""PITSolver forward.
Args:
ref (List[torch.Tensor]): [(batch, ...), ...] x n_spk
inf (List[torch.Tensor]): [(batch, ...), ...]
Returns:
loss: (torch.Tensor): minimum loss with the best permutation
stats: dict, for collecting training status
others: dict, in this PIT solver, permutation order will be returned
"""
perm = others["perm"] if "perm" in others else None
if not self.flexible_numspk:
assert len(ref) == len(inf), (len(ref), len(inf))
num_spk = len(ref)
else:
num_spk = len(inf)
stats = defaultdict(list)
def pre_hook(func, *args, **kwargs):
ret = func(*args, **kwargs)
for k, v in getattr(self.criterion, "stats", {}).items():
stats[k].append(v)
return ret
def pair_loss(permutation):
return sum(
[
pre_hook(self.criterion, ref[s], inf[t])
for s, t in enumerate(permutation)
]
) / len(permutation)
if self.independent_perm or perm is None:
# computate permuatation independently
device = ref[0].device
all_permutations = list(permutations(range(num_spk)))
losses = torch.stack([pair_loss(p) for p in all_permutations], dim=1)
loss, perm_ = torch.min(losses, dim=1)
perm = torch.index_select(
torch.tensor(all_permutations, device=device, dtype=torch.long),
0,
perm_,
)
# remove stats from unused permutations
for k, v in stats.items():
# (B, num_spk * len(all_permutations), ...)
new_v = torch.stack(v, dim=1)
B, L, *rest = new_v.shape
assert L == num_spk * len(all_permutations), (L, num_spk)
new_v = new_v.view(B, L // num_spk, num_spk, *rest).mean(2)
if new_v.dim() > 2:
shapes = [1 for _ in rest]
perm0 = perm_.view(perm_.shape[0], 1, *shapes).expand(-1, -1, *rest)
else:
perm0 = perm_.unsqueeze(1)
stats[k] = new_v.gather(1, perm0.to(device=new_v.device)).unbind(1)
else:
loss = torch.tensor(
[
torch.tensor(
[
pre_hook(
self.criterion,
ref[s][batch].unsqueeze(0),
inf[t][batch].unsqueeze(0),
)
for s, t in enumerate(p)
]
).mean()
for batch, p in enumerate(perm)
]
)
loss = loss.mean()
for k, v in stats.items():
stats[k] = torch.stack(v, dim=1).mean()
stats[self.criterion.name] = loss.detach()
return loss.mean(), dict(stats), {"perm": perm}
| 4,627 | 36.322581 | 88 | py |
espnet | espnet-master/espnet2/enh/extractor/td_speakerbeam_extractor.py | from collections import OrderedDict
from typing import List, Tuple, Union
import torch
from torch_complex.tensor import ComplexTensor
from espnet2.enh.extractor.abs_extractor import AbsExtractor
from espnet2.enh.layers.complex_utils import is_complex
from espnet2.enh.layers.tcn import TemporalConvNet, TemporalConvNetInformed
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
class TDSpeakerBeamExtractor(AbsExtractor):
def __init__(
self,
input_dim: int,
layer: int = 8,
stack: int = 3,
bottleneck_dim: int = 128,
hidden_dim: int = 512,
skip_dim: int = 128,
kernel: int = 3,
causal: bool = False,
norm_type: str = "gLN",
pre_nonlinear: str = "prelu",
nonlinear: str = "relu",
# enrollment related arguments
i_adapt_layer: int = 7,
adapt_layer_type: str = "mul",
adapt_enroll_dim: int = 128,
use_spk_emb: bool = False,
spk_emb_dim: int = 256,
):
"""Time-Domain SpeakerBeam Extractor.
Args:
input_dim: input feature dimension
layer: int, number of layers in each stack
stack: int, number of stacks
bottleneck_dim: bottleneck dimension
hidden_dim: number of convolution channel
skip_dim: int, number of skip connection channels
kernel: int, kernel size.
causal: bool, defalut False.
norm_type: str, choose from 'BN', 'gLN', 'cLN'
pre_nonlinear: the nonlinear function right before mask estimation
select from 'prelu', 'relu', 'tanh', 'sigmoid', 'linear'
nonlinear: the nonlinear function for mask estimation,
select from 'relu', 'tanh', 'sigmoid', 'linear'
i_adapt_layer: int, index of adaptation layer
adapt_layer_type: str, type of adaptation layer
see espnet2.enh.layers.adapt_layers for options
adapt_enroll_dim: int, dimensionality of the speaker embedding
use_spk_emb: bool, whether to use speaker embeddings as enrollment
spk_emb_dim: int, dimension of input speaker embeddings
only used when `use_spk_emb` is True
"""
super().__init__()
if pre_nonlinear not in ("sigmoid", "prelu", "relu", "tanh", "linear"):
raise ValueError("Not supporting pre_nonlinear={}".format(pre_nonlinear))
if nonlinear not in ("sigmoid", "relu", "tanh", "linear"):
raise ValueError("Not supporting nonlinear={}".format(nonlinear))
self.tcn = TemporalConvNetInformed(
N=input_dim,
B=bottleneck_dim,
H=hidden_dim,
P=kernel,
X=layer,
R=stack,
Sc=skip_dim,
out_channel=None,
norm_type=norm_type,
causal=causal,
pre_mask_nonlinear=pre_nonlinear,
mask_nonlinear=nonlinear,
i_adapt_layer=i_adapt_layer,
adapt_layer_type=adapt_layer_type,
adapt_enroll_dim=adapt_enroll_dim,
)
# Auxiliary network
self.use_spk_emb = use_spk_emb
if use_spk_emb:
self.auxiliary_net = torch.nn.Conv1d(
spk_emb_dim,
adapt_enroll_dim if skip_dim is None else adapt_enroll_dim * 2,
1,
)
else:
self.auxiliary_net = TemporalConvNet(
N=input_dim,
B=bottleneck_dim,
H=hidden_dim,
P=kernel,
X=layer,
R=1,
C=1,
Sc=skip_dim,
out_channel=adapt_enroll_dim
if skip_dim is None
else adapt_enroll_dim * 2,
norm_type=norm_type,
causal=False,
pre_mask_nonlinear=pre_nonlinear,
mask_nonlinear="linear",
)
def forward(
self,
input: Union[torch.Tensor, ComplexTensor],
ilens: torch.Tensor,
input_aux: torch.Tensor,
ilens_aux: torch.Tensor,
suffix_tag: str = "",
) -> Tuple[List[Union[torch.Tensor, ComplexTensor]], torch.Tensor, OrderedDict]:
"""TD-SpeakerBeam Forward.
Args:
input (torch.Tensor or ComplexTensor): Encoded feature [B, T, N]
ilens (torch.Tensor): input lengths [Batch]
input_aux (torch.Tensor or ComplexTensor): Encoded auxiliary feature
for the target speaker [B, T, N] or [B, N]
ilens_aux (torch.Tensor): input lengths of auxiliary input for the
target speaker [Batch]
suffix_tag (str): suffix to append to the keys in `others`
Returns:
masked (List[Union(torch.Tensor, ComplexTensor)]): [(B, T, N), ...]
ilens (torch.Tensor): (B,)
others predicted data, e.g. masks: OrderedDict[
f'mask{suffix_tag}': torch.Tensor(Batch, Frames, Freq),
f'enroll_emb{suffix_tag}': torch.Tensor(Batch, adapt_enroll_dim/adapt_enroll_dim*2),
]
""" # noqa: E501
# if complex spectrum
feature = abs(input) if is_complex(input) else input
aux_feature = abs(input_aux) if is_complex(input_aux) else input_aux
B, L, N = feature.shape
feature = feature.transpose(1, 2) # B, N, L
# NOTE(wangyou): When `self.use_spk_emb` is True, `aux_feature` is assumed to be
# a speaker embedding; otherwise, it is assumed to be an enrollment audio.
if self.use_spk_emb:
# B, N, L'=1
if aux_feature.dim() == 2:
aux_feature = aux_feature.unsqueeze(-1)
elif aux_feature.size(-2) == 1:
assert aux_feature.dim() == 3, aux_feature.shape
aux_feature = aux_feature.transpose(1, 2)
else:
aux_feature = aux_feature.transpose(1, 2) # B, N, L'
enroll_emb = self.auxiliary_net(aux_feature).squeeze(1) # B, N', L'
if not self.use_spk_emb:
enroll_emb.masked_fill_(make_pad_mask(ilens_aux, enroll_emb, -1), 0.0)
enroll_emb = enroll_emb.mean(dim=-1) # B, N'
mask = self.tcn(feature, enroll_emb) # B, N, L
mask = mask.transpose(-1, -2) # B, L, N
masked = input * mask
others = {
"enroll_emb{}".format(suffix_tag): enroll_emb.detach(),
}
return masked, ilens, others
| 6,590 | 37.770588 | 100 | py |
espnet | espnet-master/espnet2/enh/extractor/abs_extractor.py | from abc import ABC, abstractmethod
from collections import OrderedDict
from typing import Tuple
import torch
class AbsExtractor(torch.nn.Module, ABC):
@abstractmethod
def forward(
self,
input: torch.Tensor,
ilens: torch.Tensor,
input_aux: torch.Tensor,
ilens_aux: torch.Tensor,
suffix_tag: str = "",
) -> Tuple[Tuple[torch.Tensor], torch.Tensor, OrderedDict]:
raise NotImplementedError
| 458 | 23.157895 | 63 | py |
espnet | espnet-master/espnet2/enh/decoder/conv_decoder.py | import math
import torch
from espnet2.enh.decoder.abs_decoder import AbsDecoder
class ConvDecoder(AbsDecoder):
"""Transposed Convolutional decoder for speech enhancement and separation"""
def __init__(
self,
channel: int,
kernel_size: int,
stride: int,
):
super().__init__()
self.convtrans1d = torch.nn.ConvTranspose1d(
channel, 1, kernel_size, bias=False, stride=stride
)
self.kernel_size = kernel_size
self.stride = stride
def forward(self, input: torch.Tensor, ilens: torch.Tensor):
"""Forward.
Args:
input (torch.Tensor): spectrum [Batch, T, F]
ilens (torch.Tensor): input lengths [Batch]
"""
input = input.transpose(1, 2)
batch_size = input.shape[0]
wav = self.convtrans1d(input, output_size=(batch_size, 1, ilens.max()))
wav = wav.squeeze(1)
return wav, ilens
def forward_streaming(self, input_frame: torch.Tensor):
return self.forward(input_frame, ilens=torch.LongTensor([self.kernel_size]))[0]
def streaming_merge(self, chunks: torch.Tensor, ilens: torch.tensor = None):
"""streaming_merge. It merges the frame-level processed audio chunks
in the streaming *simulation*. It is noted that, in real applications,
the processed audio should be sent to the output channel frame by frame.
You may refer to this function to manage your streaming output buffer.
Args:
chunks: List [(B, frame_size),]
ilens: [B]
Returns:
merge_audio: [B, T]
"""
hop_size = self.stride
frame_size = self.kernel_size
num_chunks = len(chunks)
batch_size = chunks[0].shape[0]
audio_len = (
int(hop_size * num_chunks + frame_size - hop_size)
if not ilens
else ilens.max()
)
output = torch.zeros((batch_size, audio_len), dtype=chunks[0].dtype).to(
chunks[0].device
)
for i, chunk in enumerate(chunks):
output[:, i * hop_size : i * hop_size + frame_size] += chunk
return output
if __name__ == "__main__":
from espnet2.enh.encoder.conv_encoder import ConvEncoder
input_audio = torch.randn((1, 100))
ilens = torch.LongTensor([100])
kernel_size = 32
stride = 16
encoder = ConvEncoder(kernel_size=kernel_size, stride=stride, channel=16)
decoder = ConvDecoder(kernel_size=kernel_size, stride=stride, channel=16)
frames, flens = encoder(input_audio, ilens)
wav, ilens = decoder(frames, ilens)
splited = encoder.streaming_frame(input_audio)
sframes = [encoder.forward_streaming(s) for s in splited]
swavs = [decoder.forward_streaming(s) for s in sframes]
merged = decoder.streaming_merge(swavs, ilens)
sframes = torch.cat(sframes, dim=1)
torch.testing.assert_allclose(sframes, frames)
torch.testing.assert_allclose(wav, merged)
| 3,014 | 29.454545 | 87 | py |
espnet | espnet-master/espnet2/enh/decoder/null_decoder.py | import torch
from espnet2.enh.decoder.abs_decoder import AbsDecoder
class NullDecoder(AbsDecoder):
"""Null decoder, return the same args."""
def __init__(self):
super().__init__()
def forward(self, input: torch.Tensor, ilens: torch.Tensor):
"""Forward. The input should be the waveform already.
Args:
input (torch.Tensor): wav [Batch, sample]
ilens (torch.Tensor): input lengths [Batch]
"""
return input, ilens
| 493 | 23.7 | 64 | py |
espnet | espnet-master/espnet2/enh/decoder/abs_decoder.py | from abc import ABC, abstractmethod
from typing import Tuple
import torch
class AbsDecoder(torch.nn.Module, ABC):
@abstractmethod
def forward(
self,
input: torch.Tensor,
ilens: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
raise NotImplementedError
def forward_streaming(self, input_frame: torch.Tensor):
raise NotImplementedError
def streaming_merge(self, chunks: torch.Tensor, ilens: torch.tensor = None):
"""streaming_merge. It merges the frame-level processed audio chunks
in the streaming *simulation*. It is noted that, in real applications,
the processed audio should be sent to the output channel frame by frame.
You may refer to this function to manage your streaming output buffer.
Args:
chunks: List [(B, frame_size),]
ilens: [B]
Returns:
merge_audio: [B, T]
"""
raise NotImplementedError
| 975 | 28.575758 | 80 | py |
espnet | espnet-master/espnet2/enh/decoder/stft_decoder.py | import math
import torch
import torch_complex
from packaging.version import parse as V
from torch_complex.tensor import ComplexTensor
from espnet2.enh.decoder.abs_decoder import AbsDecoder
from espnet2.enh.layers.complex_utils import is_torch_complex_tensor
from espnet2.layers.stft import Stft
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
class STFTDecoder(AbsDecoder):
"""STFT decoder for speech enhancement and separation"""
def __init__(
self,
n_fft: int = 512,
win_length: int = None,
hop_length: int = 128,
window="hann",
center: bool = True,
normalized: bool = False,
onesided: bool = True,
):
super().__init__()
self.stft = Stft(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
window=window,
center=center,
normalized=normalized,
onesided=onesided,
)
self.win_length = win_length if win_length else n_fft
self.n_fft = n_fft
self.hop_length = hop_length
self.window = window
self.center = center
def forward(self, input: ComplexTensor, ilens: torch.Tensor):
"""Forward.
Args:
input (ComplexTensor): spectrum [Batch, T, (C,) F]
ilens (torch.Tensor): input lengths [Batch]
"""
if not isinstance(input, ComplexTensor) and (
is_torch_1_9_plus and not torch.is_complex(input)
):
raise TypeError("Only support complex tensors for stft decoder")
bs = input.size(0)
if input.dim() == 4:
multi_channel = True
# input: (Batch, T, C, F) -> (Batch * C, T, F)
input = input.transpose(1, 2).reshape(-1, input.size(1), input.size(3))
else:
multi_channel = False
# for supporting half-precision training
if input.dtype in (torch.float16, torch.bfloat16):
wav, wav_lens = self.stft.inverse(input.float(), ilens)
wav = wav.to(dtype=input.dtype)
elif (
is_torch_complex_tensor(input)
and hasattr(torch, "complex32")
and input.dtype == torch.complex32
):
wav, wav_lens = self.stft.inverse(input.cfloat(), ilens)
wav = wav.to(dtype=input.dtype)
else:
wav, wav_lens = self.stft.inverse(input, ilens)
if multi_channel:
# wav: (Batch * C, Nsamples) -> (Batch, Nsamples, C)
wav = wav.reshape(bs, -1, wav.size(1)).transpose(1, 2)
return wav, wav_lens
def _get_window_func(self):
window_func = getattr(torch, f"{self.window}_window")
window = window_func(self.win_length)
n_pad_left = (self.n_fft - window.shape[0]) // 2
n_pad_right = self.n_fft - window.shape[0] - n_pad_left
return window
def forward_streaming(self, input_frame: torch.Tensor):
"""Forward.
Args:
input (ComplexTensor): spectrum [Batch, 1, F]
output: wavs [Batch, 1, self.win_length]
"""
input_frame = input_frame.real + 1j * input_frame.imag
output_wav = (
torch.fft.irfft(input_frame)
if self.stft.onesided
else torch.fft.ifft(input_frame).real
)
output_wav = output_wav.squeeze(1)
n_pad_left = (self.n_fft - self.win_length) // 2
output_wav = output_wav[..., n_pad_left : n_pad_left + self.win_length]
return output_wav * self._get_window_func()
def streaming_merge(self, chunks, ilens=None):
"""streaming_merge. It merges the frame-level processed audio chunks
in the streaming *simulation*. It is noted that, in real applications,
the processed audio should be sent to the output channel frame by frame.
You may refer to this function to manage your streaming output buffer.
Args:
chunks: List [(B, frame_size),]
ilens: [B]
Returns:
merge_audio: [B, T]
"""
frame_size = self.win_length
hop_size = self.hop_length
num_chunks = len(chunks)
batch_size = chunks[0].shape[0]
audio_len = int(hop_size * num_chunks + frame_size - hop_size)
output = torch.zeros((batch_size, audio_len), dtype=chunks[0].dtype).to(
chunks[0].device
)
for i, chunk in enumerate(chunks):
output[:, i * hop_size : i * hop_size + frame_size] += chunk
window_sq = self._get_window_func().pow(2)
window_envelop = torch.zeros((batch_size, audio_len), dtype=chunks[0].dtype).to(
chunks[0].device
)
for i in range(len(chunks)):
window_envelop[:, i * hop_size : i * hop_size + frame_size] += window_sq
output = output / window_envelop
# We need to trim the front padding away if center.
start = (frame_size // 2) if self.center else 0
end = -(frame_size // 2) if ilens.max() is None else start + ilens.max()
return output[..., start:end]
if __name__ == "__main__":
from espnet2.enh.encoder.stft_encoder import STFTEncoder
input_audio = torch.randn((1, 100))
ilens = torch.LongTensor([100])
nfft = 32
win_length = 28
hop = 10
encoder = STFTEncoder(
n_fft=nfft, win_length=win_length, hop_length=hop, onesided=True
)
decoder = STFTDecoder(
n_fft=nfft, win_length=win_length, hop_length=hop, onesided=True
)
frames, flens = encoder(input_audio, ilens)
wav, ilens = decoder(frames, ilens)
splited = encoder.streaming_frame(input_audio)
sframes = [encoder.forward_streaming(s) for s in splited]
swavs = [decoder.forward_streaming(s) for s in sframes]
merged = decoder.streaming_merge(swavs, ilens)
if not (is_torch_1_9_plus and encoder.use_builtin_complex):
sframes = torch_complex.cat(sframes, dim=1)
else:
sframes = torch.cat(sframes, dim=1)
torch.testing.assert_close(sframes.real, frames.real)
torch.testing.assert_close(sframes.imag, frames.imag)
torch.testing.assert_close(wav, input_audio)
torch.testing.assert_close(wav, merged)
| 6,263 | 31.625 | 88 | py |
espnet | espnet-master/espnet2/torch_utils/forward_adaptor.py | import torch
from typeguard import check_argument_types
class ForwardAdaptor(torch.nn.Module):
"""Wrapped module to parallelize specified method
torch.nn.DataParallel parallelizes only "forward()"
and, maybe, the method having the other name can't be applied
except for wrapping the module just like this class.
Examples:
>>> class A(torch.nn.Module):
... def foo(self, x):
... ...
>>> model = A()
>>> model = ForwardAdaptor(model, "foo")
>>> model = torch.nn.DataParallel(model, device_ids=[0, 1])
>>> x = torch.randn(2, 10)
>>> model(x)
"""
def __init__(self, module: torch.nn.Module, name: str):
assert check_argument_types()
super().__init__()
self.module = module
self.name = name
if not hasattr(module, name):
raise ValueError(f"{module} doesn't have {name}")
def forward(self, *args, **kwargs):
func = getattr(self.module, self.name)
return func(*args, **kwargs)
| 1,052 | 29.970588 | 67 | py |
espnet | espnet-master/espnet2/torch_utils/initialize.py | #!/usr/bin/env python3
"""Initialize modules for espnet2 neural networks."""
import logging
import math
import torch
from typeguard import check_argument_types
def initialize(model: torch.nn.Module, init: str):
"""Initialize weights of a neural network module.
Parameters are initialized using the given method or distribution.
Custom initialization routines can be implemented into submodules
as function `espnet_initialization_fn` within the custom module.
Args:
model: Target.
init: Method of initialization.
"""
assert check_argument_types()
if init == "chainer":
# 1. lecun_normal_init_parameters
for name, p in model.named_parameters():
data = p.data
if ".bias" in name and data.dim() == 1:
# bias
data.zero_()
logging.info(f"Initialize {name} to zeros")
elif data.dim() == 1:
# linear weight
n = data.size(0)
stdv = 1.0 / math.sqrt(n)
data.normal_(0, stdv)
elif data.dim() == 2:
# linear weight
n = data.size(1)
stdv = 1.0 / math.sqrt(n)
data.normal_(0, stdv)
elif data.dim() in (3, 4):
# conv weight
n = data.size(1)
for k in data.size()[2:]:
n *= k
stdv = 1.0 / math.sqrt(n)
data.normal_(0, stdv)
else:
raise NotImplementedError
for mod in model.modules():
# 2. embed weight ~ Normal(0, 1)
if isinstance(mod, torch.nn.Embedding):
mod.weight.data.normal_(0, 1)
# 3. forget-bias = 1.0
elif isinstance(mod, torch.nn.RNNCellBase):
n = mod.bias_ih.size(0)
mod.bias_ih.data[n // 4 : n // 2].fill_(1.0)
elif isinstance(mod, torch.nn.RNNBase):
for name, param in mod.named_parameters():
if "bias" in name:
n = param.size(0)
param.data[n // 4 : n // 2].fill_(1.0)
if hasattr(mod, "espnet_initialization_fn"):
mod.espnet_initialization_fn()
else:
# weight init
for p in model.parameters():
if p.dim() > 1:
if init == "xavier_uniform":
torch.nn.init.xavier_uniform_(p.data)
elif init == "xavier_normal":
torch.nn.init.xavier_normal_(p.data)
elif init == "kaiming_uniform":
torch.nn.init.kaiming_uniform_(p.data, nonlinearity="relu")
elif init == "kaiming_normal":
torch.nn.init.kaiming_normal_(p.data, nonlinearity="relu")
else:
raise ValueError("Unknown initialization: " + init)
# bias init
for name, p in model.named_parameters():
if ".bias" in name and p.dim() == 1:
p.data.zero_()
logging.info(f"Initialize {name} to zeros")
# reset some modules with default init
for m in model.modules():
if isinstance(
m, (torch.nn.Embedding, torch.nn.LayerNorm, torch.nn.GroupNorm)
):
m.reset_parameters()
if hasattr(m, "espnet_initialization_fn"):
m.espnet_initialization_fn()
# TODO(xkc): Hacking s3prl_frontend and wav2vec2encoder initialization
if getattr(model, "encoder", None) and getattr(
model.encoder, "reload_pretrained_parameters", None
):
model.encoder.reload_pretrained_parameters()
if getattr(model, "frontend", None):
if getattr(model.frontend, "reload_pretrained_parameters", None):
model.frontend.reload_pretrained_parameters()
elif isinstance(
getattr(model.frontend, "frontends", None),
torch.nn.ModuleList,
):
for i, _ in enumerate(getattr(model.frontend, "frontends")):
if getattr(
model.frontend.frontends[i],
"reload_pretrained_parameters",
None,
):
model.frontend.frontends[i].reload_pretrained_parameters()
if getattr(model, "postencoder", None) and getattr(
model.postencoder, "reload_pretrained_parameters", None
):
model.postencoder.reload_pretrained_parameters()
if getattr(model, "decoder", None) and getattr(
model.decoder, "reload_pretrained_parameters", None
):
model.decoder.reload_pretrained_parameters()
| 4,844 | 37.452381 | 82 | py |
espnet | espnet-master/espnet2/torch_utils/model_summary.py | import humanfriendly
import numpy as np
import torch
def get_human_readable_count(number: int) -> str:
"""Return human_readable_count
Originated from:
https://github.com/PyTorchLightning/pytorch-lightning/blob/master/pytorch_lightning/core/memory.py
Abbreviates an integer number with K, M, B, T for thousands, millions,
billions and trillions, respectively.
Examples:
>>> get_human_readable_count(123)
'123 '
>>> get_human_readable_count(1234) # (one thousand)
'1 K'
>>> get_human_readable_count(2e6) # (two million)
'2 M'
>>> get_human_readable_count(3e9) # (three billion)
'3 B'
>>> get_human_readable_count(4e12) # (four trillion)
'4 T'
>>> get_human_readable_count(5e15) # (more than trillion)
'5,000 T'
Args:
number: a positive integer number
Return:
A string formatted according to the pattern described above.
"""
assert number >= 0
labels = [" ", "K", "M", "B", "T"]
num_digits = int(np.floor(np.log10(number)) + 1 if number > 0 else 1)
num_groups = int(np.ceil(num_digits / 3))
num_groups = min(num_groups, len(labels)) # don't abbreviate beyond trillions
shift = -3 * (num_groups - 1)
number = number * (10**shift)
index = num_groups - 1
return f"{number:.2f} {labels[index]}"
def to_bytes(dtype) -> int:
# torch.float16 -> 16
return int(str(dtype)[-2:]) // 8
def model_summary(model: torch.nn.Module) -> str:
message = "Model structure:\n"
message += str(model)
tot_params = sum(p.numel() for p in model.parameters())
num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
percent_trainable = "{:.1f}".format(num_params * 100.0 / tot_params)
tot_params = get_human_readable_count(tot_params)
num_params = get_human_readable_count(num_params)
message += "\n\nModel summary:\n"
message += f" Class Name: {model.__class__.__name__}\n"
message += f" Total Number of model parameters: {tot_params}\n"
message += (
f" Number of trainable parameters: {num_params} ({percent_trainable}%)\n"
)
num_bytes = humanfriendly.format_size(
sum(
p.numel() * to_bytes(p.dtype) for p in model.parameters() if p.requires_grad
)
)
message += f" Size: {num_bytes}\n"
dtype = next(iter(model.parameters())).dtype
message += f" Type: {dtype}"
return message
| 2,498 | 34.197183 | 102 | py |
espnet | espnet-master/espnet2/torch_utils/get_layer_from_string.py | import difflib
import torch
def get_layer(l_name, library=torch.nn):
"""Return layer object handler from library e.g. from torch.nn
E.g. if l_name=="elu", returns torch.nn.ELU.
Args:
l_name (string): Case insensitive name for layer in library (e.g. .'elu').
library (module): Name of library/module where to search for object handler
with l_name e.g. "torch.nn".
Returns:
layer_handler (object): handler for the requested layer e.g. (torch.nn.ELU)
"""
all_torch_layers = [x for x in dir(torch.nn)]
match = [x for x in all_torch_layers if l_name.lower() == x.lower()]
if len(match) == 0:
close_matches = difflib.get_close_matches(
l_name, [x.lower() for x in all_torch_layers]
)
raise NotImplementedError(
"Layer with name {} not found in {}.\n Closest matches: {}".format(
l_name, str(library), close_matches
)
)
elif len(match) > 1:
close_matches = difflib.get_close_matches(
l_name, [x.lower() for x in all_torch_layers]
)
raise NotImplementedError(
"Multiple matchs for layer with name {} not found in {}.\n "
"All matches: {}".format(l_name, str(library), close_matches)
)
else:
# valid
layer_handler = getattr(library, match[0])
return layer_handler
| 1,411 | 31.090909 | 83 | py |
espnet | espnet-master/espnet2/torch_utils/load_pretrained_model.py | import logging
from typing import Any, Dict, Union
import torch
import torch.nn
import torch.optim
def filter_state_dict(
dst_state: Dict[str, Union[float, torch.Tensor]],
src_state: Dict[str, Union[float, torch.Tensor]],
):
"""Filter name, size mismatch instances between dicts.
Args:
dst_state: reference state dict for filtering
src_state: target state dict for filtering
"""
match_state = {}
for key, value in src_state.items():
if key in dst_state and (dst_state[key].size() == src_state[key].size()):
match_state[key] = value
else:
if key not in dst_state:
logging.warning(
f"Filter out {key} from pretrained dict"
+ " because of name not found in target dict"
)
else:
logging.warning(
f"Filter out {key} from pretrained dict"
+ " because of size mismatch"
+ f"({dst_state[key].size()}-{src_state[key].size()})"
)
return match_state
def load_pretrained_model(
init_param: str,
model: torch.nn.Module,
ignore_init_mismatch: bool,
map_location: str = "cpu",
):
"""Load a model state and set it to the model.
Args:
init_param: <file_path>:<src_key>:<dst_key>:<exclude_Keys>
Examples:
>>> load_pretrained_model("somewhere/model.pth", model)
>>> load_pretrained_model("somewhere/model.pth:decoder:decoder", model)
>>> load_pretrained_model("somewhere/model.pth:decoder:decoder:", model)
>>> load_pretrained_model(
... "somewhere/model.pth:decoder:decoder:decoder.embed", model
... )
>>> load_pretrained_model("somewhere/decoder.pth::decoder", model)
"""
sps = init_param.split(":", 4)
if len(sps) == 4:
path, src_key, dst_key, excludes = sps
elif len(sps) == 3:
path, src_key, dst_key = sps
excludes = None
elif len(sps) == 2:
path, src_key = sps
dst_key, excludes = None, None
else:
(path,) = sps
src_key, dst_key, excludes = None, None, None
if src_key == "":
src_key = None
if dst_key == "":
dst_key = None
if dst_key is None:
obj = model
else:
def get_attr(obj: Any, key: str):
"""Get an nested attribute.
>>> class A(torch.nn.Module):
... def __init__(self):
... super().__init__()
... self.linear = torch.nn.Linear(10, 10)
>>> a = A()
>>> assert A.linear.weight is get_attr(A, 'linear.weight')
"""
if key.strip() == "":
return obj
for k in key.split("."):
obj = getattr(obj, k)
return obj
obj = get_attr(model, dst_key)
src_state = torch.load(path, map_location=map_location)
if excludes is not None:
for e in excludes.split(","):
src_state = {k: v for k, v in src_state.items() if not k.startswith(e)}
if src_key is not None:
src_state = {
k[len(src_key) + 1 :]: v
for k, v in src_state.items()
if k.startswith(src_key)
}
dst_state = obj.state_dict()
if ignore_init_mismatch:
src_state = filter_state_dict(dst_state, src_state)
dst_state.update(src_state)
obj.load_state_dict(dst_state)
| 3,500 | 29.181034 | 83 | py |
espnet | espnet-master/espnet2/torch_utils/add_gradient_noise.py | import torch
def add_gradient_noise(
model: torch.nn.Module,
iteration: int,
duration: float = 100,
eta: float = 1.0,
scale_factor: float = 0.55,
):
"""Adds noise from a standard normal distribution to the gradients.
The standard deviation (`sigma`) is controlled
by the three hyper-parameters below.
`sigma` goes to zero (no noise) with more iterations.
Args:
model: Model.
iteration: Number of iterations.
duration: {100, 1000}: Number of durations to control
the interval of the `sigma` change.
eta: {0.01, 0.3, 1.0}: The magnitude of `sigma`.
scale_factor: {0.55}: The scale of `sigma`.
"""
interval = (iteration // duration) + 1
sigma = eta / interval**scale_factor
for param in model.parameters():
if param.grad is not None:
_shape = param.grad.size()
noise = sigma * torch.randn(_shape).to(param.device)
param.grad += noise
| 987 | 29.875 | 71 | py |
espnet | espnet-master/espnet2/torch_utils/device_funcs.py | import dataclasses
import warnings
import numpy as np
import torch
def to_device(data, device=None, dtype=None, non_blocking=False, copy=False):
"""Change the device of object recursively"""
if isinstance(data, dict):
return {
k: to_device(v, device, dtype, non_blocking, copy) for k, v in data.items()
}
elif dataclasses.is_dataclass(data) and not isinstance(data, type):
return type(data)(
*[
to_device(v, device, dtype, non_blocking, copy)
for v in dataclasses.astuple(data)
]
)
# maybe namedtuple. I don't know the correct way to judge namedtuple.
elif isinstance(data, tuple) and type(data) is not tuple:
return type(data)(
*[to_device(o, device, dtype, non_blocking, copy) for o in data]
)
elif isinstance(data, (list, tuple)):
return type(data)(to_device(v, device, dtype, non_blocking, copy) for v in data)
elif isinstance(data, np.ndarray):
return to_device(torch.from_numpy(data), device, dtype, non_blocking, copy)
elif isinstance(data, torch.Tensor):
return data.to(device, dtype, non_blocking, copy)
else:
return data
def force_gatherable(data, device):
"""Change object to gatherable in torch.nn.DataParallel recursively
The difference from to_device() is changing to torch.Tensor if float or int
value is found.
The restriction to the returned value in DataParallel:
The object must be
- torch.cuda.Tensor
- 1 or more dimension. 0-dimension-tensor sends warning.
or a list, tuple, dict.
"""
if isinstance(data, dict):
return {k: force_gatherable(v, device) for k, v in data.items()}
# DataParallel can't handle NamedTuple well
elif isinstance(data, tuple) and type(data) is not tuple:
return type(data)(*[force_gatherable(o, device) for o in data])
elif isinstance(data, (list, tuple, set)):
return type(data)(force_gatherable(v, device) for v in data)
elif isinstance(data, np.ndarray):
return force_gatherable(torch.from_numpy(data), device)
elif isinstance(data, torch.Tensor):
if data.dim() == 0:
# To 1-dim array
data = data[None]
return data.to(device)
elif isinstance(data, float):
return torch.tensor([data], dtype=torch.float, device=device)
elif isinstance(data, int):
return torch.tensor([data], dtype=torch.long, device=device)
elif data is None:
return None
else:
warnings.warn(f"{type(data)} may not be gatherable by DataParallel")
return data
| 2,681 | 36.25 | 88 | py |
espnet | espnet-master/espnet2/torch_utils/pytorch_version.py | import torch
def pytorch_cudnn_version() -> str:
message = (
f"pytorch.version={torch.__version__}, "
f"cuda.available={torch.cuda.is_available()}, "
)
if torch.backends.cudnn.enabled:
message += (
f"cudnn.version={torch.backends.cudnn.version()}, "
f"cudnn.benchmark={torch.backends.cudnn.benchmark}, "
f"cudnn.deterministic={torch.backends.cudnn.deterministic}"
)
return message
| 468 | 26.588235 | 71 | py |
espnet | espnet-master/espnet2/torch_utils/recursive_op.py | """Torch utility module."""
import torch
if torch.distributed.is_available():
from torch.distributed import ReduceOp
def recursive_sum(obj, weight: torch.Tensor, distributed: bool = False):
assert weight.dim() == 1, weight.size()
if isinstance(obj, (tuple, list)):
return type(obj)(recursive_sum(v, weight, distributed) for v in obj)
elif isinstance(obj, dict):
return {k: recursive_sum(v, weight, distributed) for k, v in obj.items()}
elif isinstance(obj, torch.Tensor):
assert obj.size() == weight.size(), (obj.size(), weight.size())
obj = (obj * weight.type(obj.dtype)).sum()
if distributed:
torch.distributed.all_reduce(obj, op=ReduceOp.SUM)
return obj
elif obj is None:
return None
else:
raise ValueError(type(obj))
def recursive_divide(a, b: torch.Tensor):
if isinstance(a, (tuple, list)):
return type(a)(recursive_divide(v, b) for v in a)
elif isinstance(a, dict):
return {k: recursive_divide(v, b) for k, v in a.items()}
elif isinstance(a, torch.Tensor):
assert a.size() == b.size(), (a.size(), b.size())
return a / b.type(a.dtype)
elif a is None:
return None
else:
raise ValueError(type(a))
def recursive_average(obj, weight: torch.Tensor, distributed: bool = False):
obj = recursive_sum(obj, weight, distributed)
weight = weight.sum()
if distributed:
torch.distributed.all_reduce(weight, op=ReduceOp.SUM)
# Normalize weight to be sum-to-1
obj = recursive_divide(obj, weight)
return obj, weight
| 1,615 | 32.666667 | 81 | py |
espnet | espnet-master/espnet2/torch_utils/set_all_random_seed.py | import random
import numpy as np
import torch
def set_all_random_seed(seed: int):
random.seed(seed)
np.random.seed(seed)
torch.random.manual_seed(seed)
| 167 | 14.272727 | 35 | py |
espnet | espnet-master/espnet2/main_funcs/collect_stats.py | import logging
from collections import defaultdict
from pathlib import Path
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
import torch
from torch.nn.parallel import data_parallel
from torch.utils.data import DataLoader
from typeguard import check_argument_types
from espnet2.fileio.datadir_writer import DatadirWriter
from espnet2.fileio.npy_scp import NpyScpWriter
from espnet2.torch_utils.device_funcs import to_device
from espnet2.torch_utils.forward_adaptor import ForwardAdaptor
from espnet2.train.abs_espnet_model import AbsESPnetModel
@torch.no_grad()
def collect_stats(
model: Union[AbsESPnetModel, None],
train_iter: DataLoader and Iterable[Tuple[List[str], Dict[str, torch.Tensor]]],
valid_iter: DataLoader and Iterable[Tuple[List[str], Dict[str, torch.Tensor]]],
output_dir: Path,
ngpu: Optional[int],
log_interval: Optional[int],
write_collected_feats: bool,
) -> None:
"""Perform on collect_stats mode.
Running for deriving the shape information from data
and gathering statistics.
This method is used before executing train().
"""
assert check_argument_types()
npy_scp_writers = {}
for itr, mode in zip([train_iter, valid_iter], ["train", "valid"]):
if log_interval is None:
try:
log_interval = max(len(itr) // 20, 10)
except TypeError:
log_interval = 100
sum_dict = defaultdict(lambda: 0)
sq_dict = defaultdict(lambda: 0)
count_dict = defaultdict(lambda: 0)
with DatadirWriter(output_dir / mode) as datadir_writer:
for iiter, (keys, batch) in enumerate(itr, 1):
batch = to_device(batch, "cuda" if ngpu > 0 else "cpu")
# 1. Write shape file
for name in batch:
if name.endswith("_lengths"):
continue
for i, (key, data) in enumerate(zip(keys, batch[name])):
if f"{name}_lengths" in batch:
lg = int(batch[f"{name}_lengths"][i])
data = data[:lg]
datadir_writer[f"{name}_shape"][key] = ",".join(
map(str, data.shape)
)
if model is not None:
# 2. Extract feats
if ngpu <= 1:
data = model.collect_feats(**batch)
else:
# Note that data_parallel can parallelize only "forward()"
data = data_parallel(
ForwardAdaptor(model, "collect_feats"),
(),
range(ngpu),
module_kwargs=batch,
)
# 3. Calculate sum and square sum
for key, v in data.items():
for i, (uttid, seq) in enumerate(zip(keys, v.cpu().numpy())):
# Truncate zero-padding region
if f"{key}_lengths" in data:
length = data[f"{key}_lengths"][i]
# seq: (Length, Dim, ...)
seq = seq[:length]
else:
# seq: (Dim, ...) -> (1, Dim, ...)
seq = seq[None]
# Accumulate value, its square, and count
sum_dict[key] += seq.sum(0)
sq_dict[key] += (seq**2).sum(0)
count_dict[key] += len(seq)
# 4. [Option] Write derived features as npy format file.
if write_collected_feats:
# Instantiate NpyScpWriter for the first iteration
if (key, mode) not in npy_scp_writers:
p = output_dir / mode / "collect_feats"
npy_scp_writers[(key, mode)] = NpyScpWriter(
p / f"data_{key}", p / f"{key}.scp"
)
# Save array as npy file
npy_scp_writers[(key, mode)][uttid] = seq
if iiter % log_interval == 0:
logging.info(f"Niter: {iiter}")
for key in sum_dict:
np.savez(
output_dir / mode / f"{key}_stats.npz",
count=count_dict[key],
sum=sum_dict[key],
sum_square=sq_dict[key],
)
# batch_keys and stats_keys are used by aggregate_stats_dirs.py
with (output_dir / mode / "batch_keys").open("w", encoding="utf-8") as f:
f.write(
"\n".join(filter(lambda x: not x.endswith("_lengths"), batch)) + "\n"
)
with (output_dir / mode / "stats_keys").open("w", encoding="utf-8") as f:
f.write("\n".join(sum_dict) + "\n")
| 5,164 | 40.653226 | 85 | py |
espnet | espnet-master/espnet2/main_funcs/calculate_all_attentions.py | from collections import defaultdict
from typing import Dict, List
import torch
from espnet2.gan_tts.jets.alignments import AlignmentModule
from espnet2.train.abs_espnet_model import AbsESPnetModel
from espnet.nets.pytorch_backend.rnn.attentions import (
AttAdd,
AttCov,
AttCovLoc,
AttDot,
AttForward,
AttForwardTA,
AttLoc,
AttLoc2D,
AttLocRec,
AttMultiHeadAdd,
AttMultiHeadDot,
AttMultiHeadLoc,
AttMultiHeadMultiResLoc,
NoAtt,
)
from espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention
@torch.no_grad()
def calculate_all_attentions(
model: AbsESPnetModel, batch: Dict[str, torch.Tensor]
) -> Dict[str, List[torch.Tensor]]:
"""Derive the outputs from the all attention layers
Args:
model:
batch: same as forward
Returns:
return_dict: A dict of a list of tensor.
key_names x batch x (D1, D2, ...)
"""
bs = len(next(iter(batch.values())))
assert all(len(v) == bs for v in batch.values()), {
k: v.shape for k, v in batch.items()
}
# 1. Register forward_hook fn to save the output from specific layers
outputs = {}
handles = {}
for name, modu in model.named_modules():
def hook(module, input, output, name=name):
if isinstance(module, MultiHeadedAttention):
# NOTE(kamo): MultiHeadedAttention doesn't return attention weight
# attn: (B, Head, Tout, Tin)
outputs[name] = module.attn.detach().cpu()
elif isinstance(module, AttLoc2D):
c, w = output
# w: previous concate attentions
# w: (B, nprev, Tin)
att_w = w[:, -1].detach().cpu()
outputs.setdefault(name, []).append(att_w)
elif isinstance(module, (AttCov, AttCovLoc)):
c, w = output
assert isinstance(w, list), type(w)
# w: list of previous attentions
# w: nprev x (B, Tin)
att_w = w[-1].detach().cpu()
outputs.setdefault(name, []).append(att_w)
elif isinstance(module, AttLocRec):
# w: (B, Tin)
c, (w, (att_h, att_c)) = output
att_w = w.detach().cpu()
outputs.setdefault(name, []).append(att_w)
elif isinstance(
module,
(
AttMultiHeadDot,
AttMultiHeadAdd,
AttMultiHeadLoc,
AttMultiHeadMultiResLoc,
),
):
c, w = output
# w: nhead x (B, Tin)
assert isinstance(w, list), type(w)
att_w = [_w.detach().cpu() for _w in w]
outputs.setdefault(name, []).append(att_w)
elif isinstance(
module,
(
AttAdd,
AttDot,
AttForward,
AttForwardTA,
AttLoc,
NoAtt,
),
):
c, w = output
att_w = w.detach().cpu()
outputs.setdefault(name, []).append(att_w)
elif isinstance(module, AlignmentModule):
w = output
att_w = torch.exp(w).detach().cpu()
outputs.setdefault(name, []).append(att_w)
handle = modu.register_forward_hook(hook)
handles[name] = handle
# 2. Just forward one by one sample.
# Batch-mode can't be used to keep requirements small for each models.
keys = []
for k in batch:
if not (k.endswith("_lengths") or k in ["utt_id"]):
keys.append(k)
return_dict = defaultdict(list)
for ibatch in range(bs):
# *: (B, L, ...) -> (1, L2, ...)
_sample = {
k: batch[k][ibatch, None, : batch[k + "_lengths"][ibatch]]
if k + "_lengths" in batch
else batch[k][ibatch, None]
for k in keys
}
# *_lengths: (B,) -> (1,)
_sample.update(
{
k + "_lengths": batch[k + "_lengths"][ibatch, None]
for k in keys
if k + "_lengths" in batch
}
)
if "utt_id" in batch:
_sample["utt_id"] = batch["utt_id"]
model(**_sample)
# Derive the attention results
for name, output in outputs.items():
if isinstance(output, list):
if isinstance(output[0], list):
# output: nhead x (Tout, Tin)
output = torch.stack(
[
# Tout x (1, Tin) -> (Tout, Tin)
torch.cat([o[idx] for o in output], dim=0)
for idx in range(len(output[0]))
],
dim=0,
)
else:
# Tout x (1, Tin) -> (Tout, Tin)
output = torch.cat(output, dim=0)
else:
# output: (1, NHead, Tout, Tin) -> (NHead, Tout, Tin)
output = output.squeeze(0)
# output: (Tout, Tin) or (NHead, Tout, Tin)
return_dict[name].append(output)
outputs.clear()
# 3. Remove all hooks
for _, handle in handles.items():
handle.remove()
return dict(return_dict)
| 5,510 | 31.609467 | 82 | py |
espnet | espnet-master/espnet2/main_funcs/average_nbest_models.py | import logging
import warnings
from pathlib import Path
from typing import Collection, Optional, Sequence, Union
import torch
from typeguard import check_argument_types
from espnet2.train.reporter import Reporter
@torch.no_grad()
def average_nbest_models(
output_dir: Path,
reporter: Reporter,
best_model_criterion: Sequence[Sequence[str]],
nbest: Union[Collection[int], int],
suffix: Optional[str] = None,
) -> None:
"""Generate averaged model from n-best models
Args:
output_dir: The directory contains the model file for each epoch
reporter: Reporter instance
best_model_criterion: Give criterions to decide the best model.
e.g. [("valid", "loss", "min"), ("train", "acc", "max")]
nbest: Number of best model files to be averaged
suffix: A suffix added to the averaged model file name
"""
assert check_argument_types()
if isinstance(nbest, int):
nbests = [nbest]
else:
nbests = list(nbest)
if len(nbests) == 0:
warnings.warn("At least 1 nbest values are required")
nbests = [1]
if suffix is not None:
suffix = suffix + "."
else:
suffix = ""
# 1. Get nbests: List[Tuple[str, str, List[Tuple[epoch, value]]]]
nbest_epochs = [
(ph, k, reporter.sort_epochs_and_values(ph, k, m)[: max(nbests)])
for ph, k, m in best_model_criterion
if reporter.has(ph, k)
]
_loaded = {}
for ph, cr, epoch_and_values in nbest_epochs:
_nbests = [i for i in nbests if i <= len(epoch_and_values)]
if len(_nbests) == 0:
_nbests = [1]
for n in _nbests:
if n == 0:
continue
elif n == 1:
# The averaged model is same as the best model
e, _ = epoch_and_values[0]
op = output_dir / f"{e}epoch.pth"
sym_op = output_dir / f"{ph}.{cr}.ave_1best.{suffix}pth"
if sym_op.is_symlink() or sym_op.exists():
sym_op.unlink()
sym_op.symlink_to(op.name)
else:
op = output_dir / f"{ph}.{cr}.ave_{n}best.{suffix}pth"
logging.info(
f"Averaging {n}best models: " f'criterion="{ph}.{cr}": {op}'
)
avg = None
# 2.a. Averaging model
for e, _ in epoch_and_values[:n]:
if e not in _loaded:
_loaded[e] = torch.load(
output_dir / f"{e}epoch.pth",
map_location="cpu",
)
states = _loaded[e]
if avg is None:
avg = states
else:
# Accumulated
for k in avg:
avg[k] = avg[k] + states[k]
for k in avg:
if str(avg[k].dtype).startswith("torch.int"):
# For int type, not averaged, but only accumulated.
# e.g. BatchNorm.num_batches_tracked
# (If there are any cases that requires averaging
# or the other reducing method, e.g. max/min, for integer type,
# please report.)
pass
else:
avg[k] = avg[k] / n
# 2.b. Save the ave model and create a symlink
torch.save(avg, op)
# 3. *.*.ave.pth is a symlink to the max ave model
op = output_dir / f"{ph}.{cr}.ave_{max(_nbests)}best.{suffix}pth"
sym_op = output_dir / f"{ph}.{cr}.ave.{suffix}pth"
if sym_op.is_symlink() or sym_op.exists():
sym_op.unlink()
sym_op.symlink_to(op.name)
| 3,886 | 34.66055 | 88 | py |
espnet | espnet-master/espnet2/main_funcs/pack_funcs.py | import os
import sys
import tarfile
import zipfile
from datetime import datetime
from io import BytesIO, TextIOWrapper
from pathlib import Path
from typing import Dict, Iterable, Optional, Union
import yaml
class Archiver:
def __init__(self, file, mode="r"):
if Path(file).suffix == ".tar":
self.type = "tar"
elif Path(file).suffix == ".tgz" or Path(file).suffixes == [".tar", ".gz"]:
self.type = "tar"
if mode == "w":
mode = "w:gz"
elif Path(file).suffix == ".tbz2" or Path(file).suffixes == [".tar", ".bz2"]:
self.type = "tar"
if mode == "w":
mode = "w:bz2"
elif Path(file).suffix == ".txz" or Path(file).suffixes == [".tar", ".xz"]:
self.type = "tar"
if mode == "w":
mode = "w:xz"
elif Path(file).suffix == ".zip":
self.type = "zip"
else:
raise ValueError(f"Cannot detect archive format: type={file}")
if self.type == "tar":
self.fopen = tarfile.open(file, mode=mode)
elif self.type == "zip":
self.fopen = zipfile.ZipFile(file, mode=mode)
else:
raise ValueError(f"Not supported: type={type}")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.fopen.close()
def close(self):
self.fopen.close()
def __iter__(self):
if self.type == "tar":
return iter(self.fopen)
elif self.type == "zip":
return iter(self.fopen.infolist())
else:
raise ValueError(f"Not supported: type={self.type}")
def add(self, filename, arcname=None, recursive: bool = True):
if arcname is not None:
print(f"adding: {arcname}")
else:
print(f"adding: {filename}")
if recursive and Path(filename).is_dir():
for f in Path(filename).glob("**/*"):
if f.is_dir():
continue
if arcname is not None:
_arcname = Path(arcname) / f
else:
_arcname = None
self.add(f, _arcname)
return
if self.type == "tar":
return self.fopen.add(filename, arcname)
elif self.type == "zip":
return self.fopen.write(filename, arcname)
else:
raise ValueError(f"Not supported: type={self.type}")
def addfile(self, info, fileobj):
print(f"adding: {self.get_name_from_info(info)}")
if self.type == "tar":
return self.fopen.addfile(info, fileobj)
elif self.type == "zip":
return self.fopen.writestr(info, fileobj.read())
else:
raise ValueError(f"Not supported: type={self.type}")
def generate_info(self, name, size) -> Union[tarfile.TarInfo, zipfile.ZipInfo]:
"""Generate TarInfo using system information"""
if self.type == "tar":
tarinfo = tarfile.TarInfo(str(name))
if os.name == "posix":
tarinfo.gid = os.getgid()
tarinfo.uid = os.getuid()
tarinfo.mtime = datetime.now().timestamp()
tarinfo.size = size
# Keep mode as default
return tarinfo
elif self.type == "zip":
zipinfo = zipfile.ZipInfo(str(name), datetime.now().timetuple()[:6])
zipinfo.file_size = size
return zipinfo
else:
raise ValueError(f"Not supported: type={self.type}")
def get_name_from_info(self, info):
if self.type == "tar":
assert isinstance(info, tarfile.TarInfo), type(info)
return info.name
elif self.type == "zip":
assert isinstance(info, zipfile.ZipInfo), type(info)
return info.filename
else:
raise ValueError(f"Not supported: type={self.type}")
def extract(self, info, path=None):
if self.type == "tar":
return self.fopen.extract(info, path)
elif self.type == "zip":
return self.fopen.extract(info, path)
else:
raise ValueError(f"Not supported: type={self.type}")
def extractfile(self, info, mode="r"):
if self.type == "tar":
f = self.fopen.extractfile(info)
if mode == "r":
return TextIOWrapper(f)
else:
return f
elif self.type == "zip":
if mode == "rb":
mode = "r"
return self.fopen.open(info, mode)
else:
raise ValueError(f"Not supported: type={self.type}")
def find_path_and_change_it_recursive(value, src: str, tgt: str):
if isinstance(value, dict):
return {
k: find_path_and_change_it_recursive(v, src, tgt) for k, v in value.items()
}
elif isinstance(value, (list, tuple)):
return [find_path_and_change_it_recursive(v, src, tgt) for v in value]
elif isinstance(value, str) and Path(value) == Path(src):
return tgt
else:
return value
def get_dict_from_cache(meta: Union[Path, str]) -> Optional[Dict[str, str]]:
meta = Path(meta)
outpath = meta.parent.parent
if not meta.exists():
return None
with meta.open("r", encoding="utf-8") as f:
d = yaml.safe_load(f)
assert isinstance(d, dict), type(d)
yaml_files = d["yaml_files"]
files = d["files"]
assert isinstance(yaml_files, dict), type(yaml_files)
assert isinstance(files, dict), type(files)
retval = {}
for key, value in list(yaml_files.items()) + list(files.items()):
if not (outpath / value).exists():
return None
retval[key] = str(outpath / value)
return retval
def unpack(
input_archive: Union[Path, str],
outpath: Union[Path, str],
use_cache: bool = True,
) -> Dict[str, str]:
"""Scan all files in the archive file and return as a dict of files.
Examples:
tarfile:
model.pth
some1.file
some2.file
>>> unpack("tarfile", "out")
{'asr_model_file': 'out/model.pth'}
"""
input_archive = Path(input_archive)
outpath = Path(outpath)
with Archiver(input_archive) as archive:
for info in archive:
if Path(archive.get_name_from_info(info)).name == "meta.yaml":
if (
use_cache
and (outpath / Path(archive.get_name_from_info(info))).exists()
):
retval = get_dict_from_cache(
outpath / Path(archive.get_name_from_info(info))
)
if retval is not None:
return retval
d = yaml.safe_load(archive.extractfile(info))
assert isinstance(d, dict), type(d)
yaml_files = d["yaml_files"]
files = d["files"]
assert isinstance(yaml_files, dict), type(yaml_files)
assert isinstance(files, dict), type(files)
break
else:
raise RuntimeError("Format error: not found meta.yaml")
for info in archive:
fname = archive.get_name_from_info(info)
outname = outpath / fname
outname.parent.mkdir(parents=True, exist_ok=True)
if fname in set(yaml_files.values()):
d = yaml.safe_load(archive.extractfile(info))
# Rewrite yaml
for info2 in archive:
name = archive.get_name_from_info(info2)
d = find_path_and_change_it_recursive(d, name, str(outpath / name))
with outname.open("w", encoding="utf-8") as f:
yaml.safe_dump(d, f)
else:
archive.extract(info, path=outpath)
retval = {}
for key, value in list(yaml_files.items()) + list(files.items()):
retval[key] = str(outpath / value)
return retval
def _to_relative_or_resolve(f):
# Resolve to avoid symbolic link
p = Path(f).resolve()
try:
# Change to relative if it can
p = p.relative_to(Path(".").resolve())
except ValueError:
pass
return str(p)
def pack(
files: Dict[str, Union[str, Path]],
yaml_files: Dict[str, Union[str, Path]],
outpath: Union[str, Path],
option: Iterable[Union[str, Path]] = (),
):
for v in list(files.values()) + list(yaml_files.values()) + list(option):
if not Path(v).exists():
raise FileNotFoundError(f"No such file or directory: {v}")
files = {k: _to_relative_or_resolve(v) for k, v in files.items()}
yaml_files = {k: _to_relative_or_resolve(v) for k, v in yaml_files.items()}
option = [_to_relative_or_resolve(v) for v in option]
meta_objs = dict(
files=files,
yaml_files=yaml_files,
timestamp=datetime.now().timestamp(),
python=sys.version,
)
try:
import torch
meta_objs.update(torch=str(torch.__version__))
except ImportError:
pass
try:
import espnet
meta_objs.update(espnet=espnet.__version__)
except ImportError:
pass
Path(outpath).parent.mkdir(parents=True, exist_ok=True)
with Archiver(outpath, mode="w") as archive:
# Write packed/meta.yaml
fileobj = BytesIO(yaml.safe_dump(meta_objs).encode())
info = archive.generate_info("meta.yaml", fileobj.getbuffer().nbytes)
archive.addfile(info, fileobj=fileobj)
for f in list(yaml_files.values()) + list(files.values()) + list(option):
archive.add(f)
print(f"Generate: {outpath}")
| 9,839 | 32.020134 | 87 | py |
espnet | espnet-master/espnet2/slu/espnet_model.py | from contextlib import contextmanager
from typing import Dict, List, Optional, Tuple, Union
import torch
from packaging.version import parse as V
from typeguard import check_argument_types
from espnet2.asr.ctc import CTC
from espnet2.asr.decoder.abs_decoder import AbsDecoder
from espnet2.asr.encoder.abs_encoder import AbsEncoder
from espnet2.asr.espnet_model import ESPnetASRModel
from espnet2.asr.frontend.abs_frontend import AbsFrontend
from espnet2.asr.postencoder.abs_postencoder import AbsPostEncoder
from espnet2.asr.preencoder.abs_preencoder import AbsPreEncoder
from espnet2.asr.specaug.abs_specaug import AbsSpecAug
from espnet2.asr.transducer.error_calculator import ErrorCalculatorTransducer
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.slu.postdecoder.abs_postdecoder import AbsPostDecoder
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet2.train.abs_espnet_model import AbsESPnetModel
from espnet.nets.e2e_asr_common import ErrorCalculator
from espnet.nets.pytorch_backend.transformer.label_smoothing_loss import ( # noqa: H301
LabelSmoothingLoss,
)
if V(torch.__version__) >= V("1.6.0"):
from torch.cuda.amp import autocast
else:
# Nothing to do if torch<1.6.0
@contextmanager
def autocast(enabled=True):
yield
class ESPnetSLUModel(ESPnetASRModel):
"""CTC-attention hybrid Encoder-Decoder model"""
def __init__(
self,
vocab_size: int,
token_list: Union[Tuple[str, ...], List[str]],
frontend: Optional[AbsFrontend],
specaug: Optional[AbsSpecAug],
normalize: Optional[AbsNormalize],
preencoder: Optional[AbsPreEncoder],
encoder: AbsEncoder,
postencoder: Optional[AbsPostEncoder],
decoder: AbsDecoder,
ctc: CTC,
joint_network: Optional[torch.nn.Module],
postdecoder: Optional[AbsPostDecoder] = None,
deliberationencoder: Optional[AbsPostEncoder] = None,
transcript_token_list: Union[Tuple[str, ...], List[str]] = None,
ctc_weight: float = 0.5,
interctc_weight: float = 0.0,
ignore_id: int = -1,
lsm_weight: float = 0.0,
length_normalized_loss: bool = False,
report_cer: bool = True,
report_wer: bool = True,
sym_space: str = "<space>",
sym_blank: str = "<blank>",
extract_feats_in_collect_stats: bool = True,
two_pass: bool = False,
pre_postencoder_norm: bool = False,
):
assert check_argument_types()
assert 0.0 <= ctc_weight <= 1.0, ctc_weight
assert 0.0 <= interctc_weight < 1.0, interctc_weight
AbsESPnetModel.__init__(self)
# note that eos is the same as sos (equivalent ID)
self.blank_id = 0
self.sos = vocab_size - 1
self.eos = vocab_size - 1
self.vocab_size = vocab_size
self.ignore_id = ignore_id
self.ctc_weight = ctc_weight
self.interctc_weight = interctc_weight
self.token_list = token_list.copy()
if transcript_token_list is not None:
self.transcript_token_list = transcript_token_list.copy()
self.two_pass = two_pass
self.pre_postencoder_norm = pre_postencoder_norm
self.frontend = frontend
self.specaug = specaug
self.normalize = normalize
self.preencoder = preencoder
self.postencoder = postencoder
self.postdecoder = postdecoder
self.encoder = encoder
if self.postdecoder is not None:
if self.encoder._output_size != self.postdecoder.output_size_dim:
self.uniform_linear = torch.nn.Linear(
self.encoder._output_size, self.postdecoder.output_size_dim
)
self.deliberationencoder = deliberationencoder
# we set self.decoder = None in the CTC mode since
# self.decoder parameters were never used and PyTorch complained
# and threw an Exception in the multi-GPU experiment.
# thanks Jeff Farris for pointing out the issue.
if not hasattr(self.encoder, "interctc_use_conditioning"):
self.encoder.interctc_use_conditioning = False
if self.encoder.interctc_use_conditioning:
self.encoder.conditioning_layer = torch.nn.Linear(
vocab_size, self.encoder.output_size()
)
self.use_transducer_decoder = joint_network is not None
self.error_calculator = None
if self.use_transducer_decoder:
from warprnnt_pytorch import RNNTLoss
self.decoder = decoder
self.joint_network = joint_network
self.criterion_transducer = RNNTLoss(
blank=self.blank_id,
fastemit_lambda=0.0,
)
if report_cer or report_wer:
self.error_calculator_trans = ErrorCalculatorTransducer(
decoder,
joint_network,
token_list,
sym_space,
sym_blank,
report_cer=report_cer,
report_wer=report_wer,
)
else:
self.error_calculator_trans = None
if self.ctc_weight != 0:
self.error_calculator = ErrorCalculator(
token_list, sym_space, sym_blank, report_cer, report_wer
)
else:
# we set self.decoder = None in the CTC mode since
# self.decoder parameters were never used and PyTorch complained
# and threw an Exception in the multi-GPU experiment.
# thanks Jeff Farris for pointing out the issue.
if ctc_weight == 1.0:
self.decoder = None
else:
self.decoder = decoder
self.criterion_att = LabelSmoothingLoss(
size=vocab_size,
padding_idx=ignore_id,
smoothing=lsm_weight,
normalize_length=length_normalized_loss,
)
if report_cer or report_wer:
self.error_calculator = ErrorCalculator(
token_list, sym_space, sym_blank, report_cer, report_wer
)
if ctc_weight == 0.0:
self.ctc = None
else:
self.ctc = ctc
self.extract_feats_in_collect_stats = extract_feats_in_collect_stats
def forward(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
text: torch.Tensor,
text_lengths: torch.Tensor,
transcript: torch.Tensor = None,
transcript_lengths: torch.Tensor = None,
**kwargs,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Frontend + Encoder + Decoder + Calc loss
Args:
speech: (Batch, Length, ...)
speech_lengths: (Batch, )
text: (Batch, Length)
text_lengths: (Batch,)
kwargs: "utt_id" is among the input.
"""
assert text_lengths.dim() == 1, text_lengths.shape
# Check that batch_size is unified
assert (
speech.shape[0]
== speech_lengths.shape[0]
== text.shape[0]
== text_lengths.shape[0]
), (speech.shape, speech_lengths.shape, text.shape, text_lengths.shape)
batch_size = speech.shape[0]
# for data-parallel
text = text[:, : text_lengths.max()]
# 1. Encoder
encoder_out, encoder_out_lens = self.encode(
speech, speech_lengths, transcript, transcript_lengths
)
intermediate_outs = None
if isinstance(encoder_out, tuple):
intermediate_outs = encoder_out[1]
encoder_out = encoder_out[0]
loss_att, acc_att, cer_att, wer_att = None, None, None, None
loss_ctc, cer_ctc = None, None
loss_transducer, cer_transducer, wer_transducer = None, None, None
stats = dict()
# 1. CTC branch
if self.ctc_weight != 0.0:
loss_ctc, cer_ctc = self._calc_ctc_loss(
encoder_out, encoder_out_lens, text, text_lengths
)
# Collect CTC branch stats
stats["loss_ctc"] = loss_ctc.detach() if loss_ctc is not None else None
stats["cer_ctc"] = cer_ctc
# Intermediate CTC (optional)
loss_interctc = 0.0
if self.interctc_weight != 0.0 and intermediate_outs is not None:
for layer_idx, intermediate_out in intermediate_outs:
# we assume intermediate_out has the same length & padding
# as those of encoder_out
loss_ic, cer_ic = self._calc_ctc_loss(
intermediate_out, encoder_out_lens, text, text_lengths
)
loss_interctc = loss_interctc + loss_ic
# Collect Intermedaite CTC stats
stats["loss_interctc_layer{}".format(layer_idx)] = (
loss_ic.detach() if loss_ic is not None else None
)
stats["cer_interctc_layer{}".format(layer_idx)] = cer_ic
loss_interctc = loss_interctc / len(intermediate_outs)
# calculate whole encoder loss
loss_ctc = (
1 - self.interctc_weight
) * loss_ctc + self.interctc_weight * loss_interctc
if self.use_transducer_decoder:
# 2a. Transducer decoder branch
(
loss_transducer,
cer_transducer,
wer_transducer,
) = self._calc_transducer_loss(
encoder_out,
encoder_out_lens,
text,
)
if loss_ctc is not None:
loss = loss_transducer + (self.ctc_weight * loss_ctc)
else:
loss = loss_transducer
# Collect Transducer branch stats
stats["loss_transducer"] = (
loss_transducer.detach() if loss_transducer is not None else None
)
stats["cer_transducer"] = cer_transducer
stats["wer_transducer"] = wer_transducer
else:
# 2b. Attention decoder branch
if self.ctc_weight != 1.0:
loss_att, acc_att, cer_att, wer_att = self._calc_att_loss(
encoder_out, encoder_out_lens, text, text_lengths
)
# 3. CTC-Att loss definition
if self.ctc_weight == 0.0:
loss = loss_att
elif self.ctc_weight == 1.0:
loss = loss_ctc
else:
loss = self.ctc_weight * loss_ctc + (1 - self.ctc_weight) * loss_att
# Collect Attn branch stats
stats["loss_att"] = loss_att.detach() if loss_att is not None else None
stats["acc"] = acc_att
stats["cer"] = cer_att
stats["wer"] = wer_att
# Collect total loss stats
stats["loss"] = loss.detach()
# force_gatherable: to-device and to-tensor if scalar for DataParallel
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
return loss, stats, weight
def collect_feats(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
text: torch.Tensor,
text_lengths: torch.Tensor,
transcript: torch.Tensor = None,
transcript_lengths: torch.Tensor = None,
**kwargs,
) -> Dict[str, torch.Tensor]:
feats, feats_lengths = self._extract_feats(speech, speech_lengths)
return {"feats": feats, "feats_lengths": feats_lengths}
def encode(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
transcript_pad: torch.Tensor = None,
transcript_pad_lens: torch.Tensor = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Frontend + Encoder. Note that this method is used by asr_inference.py
Args:
speech: (Batch, Length, ...)
speech_lengths: (Batch, )
"""
with autocast(False):
# 1. Extract feats
feats, feats_lengths = self._extract_feats(speech, speech_lengths)
# 2. Data augmentation
if self.specaug is not None and self.training:
feats, feats_lengths = self.specaug(feats, feats_lengths)
# 3. Normalization for feature: e.g. Global-CMVN, Utterance-CMVN
if self.normalize is not None:
feats, feats_lengths = self.normalize(feats, feats_lengths)
# Pre-encoder, e.g. used for raw input data
if self.preencoder is not None:
feats, feats_lengths = self.preencoder(feats, feats_lengths)
# 4. Forward encoder
# feats: (Batch, Length, Dim)
# -> encoder_out: (Batch, Length2, Dim2)
if self.encoder.interctc_use_conditioning:
encoder_out, encoder_out_lens, _ = self.encoder(
feats, feats_lengths, ctc=self.ctc
)
else:
encoder_out, encoder_out_lens, _ = self.encoder(
feats,
feats_lengths,
)
intermediate_outs = None
if isinstance(encoder_out, tuple):
intermediate_outs = encoder_out[1]
encoder_out = encoder_out[0]
# Post-encoder, e.g. NLU
if self.postencoder is not None:
encoder_out, encoder_out_lens = self.postencoder(
encoder_out, encoder_out_lens
)
if self.postdecoder is not None:
if self.encoder._output_size != self.postdecoder.output_size_dim:
encoder_out = self.uniform_linear(encoder_out)
transcript_list = [
" ".join([self.transcript_token_list[int(k)] for k in k1 if k != -1])
for k1 in transcript_pad
]
(
transcript_input_id_features,
transcript_input_mask_features,
transcript_segment_ids_feature,
transcript_position_ids_feature,
input_id_length,
) = self.postdecoder.convert_examples_to_features(transcript_list, 128)
bert_encoder_out = self.postdecoder(
torch.LongTensor(transcript_input_id_features).to(device=speech.device),
torch.LongTensor(transcript_input_mask_features).to(
device=speech.device
),
torch.LongTensor(transcript_segment_ids_feature).to(
device=speech.device
),
torch.LongTensor(transcript_position_ids_feature).to(
device=speech.device
),
)
bert_encoder_lens = torch.LongTensor(input_id_length).to(
device=speech.device
)
bert_encoder_out = bert_encoder_out[:, : torch.max(bert_encoder_lens)]
final_encoder_out_lens = encoder_out_lens + bert_encoder_lens
max_lens = torch.max(final_encoder_out_lens)
encoder_new_out = torch.zeros(
(encoder_out.shape[0], max_lens, encoder_out.shape[2])
).to(device=speech.device)
for k in range(len(encoder_out)):
encoder_new_out[k] = torch.cat(
(
encoder_out[k, : encoder_out_lens[k]],
bert_encoder_out[k, : bert_encoder_lens[k]],
torch.zeros(
(max_lens - final_encoder_out_lens[k], encoder_out.shape[2])
).to(device=speech.device),
),
0,
)
if self.deliberationencoder is not None:
encoder_new_out, final_encoder_out_lens = self.deliberationencoder(
encoder_new_out, final_encoder_out_lens
)
encoder_out = encoder_new_out
encoder_out_lens = final_encoder_out_lens
assert encoder_out.size(0) == speech.size(0), (
encoder_out.size(),
speech.size(0),
)
assert encoder_out.size(1) <= encoder_out_lens.max(), (
encoder_out.size(),
encoder_out_lens.max(),
)
if intermediate_outs is not None:
return (encoder_out, intermediate_outs), encoder_out_lens
return encoder_out, encoder_out_lens
| 16,575 | 37.459397 | 88 | py |
espnet | espnet-master/espnet2/slu/postencoder/conformer_postencoder.py | #!/usr/bin/env python3
# 2021, Carnegie Mellon University; Siddhant Arora
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Conformers PostEncoder."""
import logging
from typing import Tuple
import torch
from typeguard import check_argument_types
from espnet2.asr.postencoder.abs_postencoder import AbsPostEncoder
from espnet.nets.pytorch_backend.conformer.convolution import ConvolutionModule
from espnet.nets.pytorch_backend.conformer.encoder_layer import EncoderLayer
from espnet.nets.pytorch_backend.nets_utils import get_activation, make_pad_mask
from espnet.nets.pytorch_backend.transformer.attention import ( # noqa: H301
LegacyRelPositionMultiHeadedAttention,
MultiHeadedAttention,
RelPositionMultiHeadedAttention,
)
from espnet.nets.pytorch_backend.transformer.embedding import ( # noqa: H301
LegacyRelPositionalEncoding,
PositionalEncoding,
RelPositionalEncoding,
ScaledPositionalEncoding,
)
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
from espnet.nets.pytorch_backend.transformer.multi_layer_conv import (
Conv1dLinear,
MultiLayeredConv1d,
)
from espnet.nets.pytorch_backend.transformer.positionwise_feed_forward import (
PositionwiseFeedForward,
)
from espnet.nets.pytorch_backend.transformer.repeat import repeat
class ConformerPostEncoder(AbsPostEncoder):
"""Hugging Face Transformers PostEncoder."""
"""Conformer encoder module.
Args:
input_size (int): Input dimension.
output_size (int): Dimension of attention.
attention_heads (int): The number of heads of multi head attention.
linear_units (int): The number of units of position-wise feed forward.
num_blocks (int): The number of decoder blocks.
dropout_rate (float): Dropout rate.
attention_dropout_rate (float): Dropout rate in attention.
positional_dropout_rate (float): Dropout rate after adding positional encoding.
input_layer (Union[str, torch.nn.Module]): Input layer type.
normalize_before (bool): Whether to use layer_norm before the first block.
concat_after (bool): Whether to concat attention layer's input and output.
If True, additional linear will be applied.
i.e. x -> x + linear(concat(x, att(x)))
If False, no additional linear will be applied. i.e. x -> x + att(x)
positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear".
positionwise_conv_kernel_size (int): Kernel size of positionwise conv1d layer.
rel_pos_type (str): Whether to use the latest relative positional encoding or
the legacy one. The legacy relative positional encoding will be deprecated
in the future. More Details can be found in
https://github.com/espnet/espnet/pull/2816.
encoder_pos_enc_layer_type (str): Encoder positional encoding layer type.
encoder_attn_layer_type (str): Encoder attention layer type.
activation_type (str): Encoder activation function type.
macaron_style (bool): Whether to use macaron style for positionwise layer.
use_cnn_module (bool): Whether to use convolution module.
zero_triu (bool): Whether to zero the upper triangular part of attention matrix.
cnn_module_kernel (int): Kernerl size of convolution module.
padding_idx (int): Padding idx for input_layer=embed.
"""
def __init__(
self,
input_size: int,
output_size: int = 256,
attention_heads: int = 4,
linear_units: int = 2048,
num_blocks: int = 6,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.1,
attention_dropout_rate: float = 0.0,
input_layer: str = "linear",
normalize_before: bool = True,
concat_after: bool = False,
positionwise_layer_type: str = "linear",
positionwise_conv_kernel_size: int = 3,
macaron_style: bool = False,
rel_pos_type: str = "legacy",
pos_enc_layer_type: str = "rel_pos",
selfattention_layer_type: str = "rel_selfattn",
activation_type: str = "swish",
use_cnn_module: bool = True,
zero_triu: bool = False,
cnn_module_kernel: int = 31,
padding_idx: int = -1,
):
assert check_argument_types()
super().__init__()
self._output_size = output_size
if rel_pos_type == "legacy":
if pos_enc_layer_type == "rel_pos":
pos_enc_layer_type = "legacy_rel_pos"
if selfattention_layer_type == "rel_selfattn":
selfattention_layer_type = "legacy_rel_selfattn"
elif rel_pos_type == "latest":
assert selfattention_layer_type != "legacy_rel_selfattn"
assert pos_enc_layer_type != "legacy_rel_pos"
else:
raise ValueError("unknown rel_pos_type: " + rel_pos_type)
activation = get_activation(activation_type)
if pos_enc_layer_type == "abs_pos":
pos_enc_class = PositionalEncoding
elif pos_enc_layer_type == "scaled_abs_pos":
pos_enc_class = ScaledPositionalEncoding
elif pos_enc_layer_type == "rel_pos":
assert selfattention_layer_type == "rel_selfattn"
pos_enc_class = RelPositionalEncoding
elif pos_enc_layer_type == "legacy_rel_pos":
assert selfattention_layer_type == "legacy_rel_selfattn"
pos_enc_class = LegacyRelPositionalEncoding
logging.warning(
"Using legacy_rel_pos and it will be deprecated in the future."
)
elif pos_enc_layer_type == "None":
pos_enc_class = None
else:
raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type)
if input_layer == "linear":
self.embed = torch.nn.Sequential(
pos_enc_class(output_size, positional_dropout_rate),
)
elif isinstance(input_layer, torch.nn.Module):
self.embed = torch.nn.Sequential(
input_layer,
pos_enc_class(output_size, positional_dropout_rate),
)
elif input_layer == "None":
self.embed = None
else:
raise ValueError("unknown input_layer: " + input_layer)
self.normalize_before = normalize_before
if positionwise_layer_type == "linear":
positionwise_layer = PositionwiseFeedForward
positionwise_layer_args = (
output_size,
linear_units,
dropout_rate,
activation,
)
elif positionwise_layer_type == "conv1d":
positionwise_layer = MultiLayeredConv1d
positionwise_layer_args = (
output_size,
linear_units,
positionwise_conv_kernel_size,
dropout_rate,
)
elif positionwise_layer_type == "conv1d-linear":
positionwise_layer = Conv1dLinear
positionwise_layer_args = (
output_size,
linear_units,
positionwise_conv_kernel_size,
dropout_rate,
)
else:
raise NotImplementedError("Support only linear or conv1d.")
if selfattention_layer_type == "selfattn":
encoder_selfattn_layer = MultiHeadedAttention
encoder_selfattn_layer_args = (
attention_heads,
output_size,
attention_dropout_rate,
)
elif selfattention_layer_type == "legacy_rel_selfattn":
assert pos_enc_layer_type == "legacy_rel_pos"
encoder_selfattn_layer = LegacyRelPositionMultiHeadedAttention
encoder_selfattn_layer_args = (
attention_heads,
output_size,
attention_dropout_rate,
)
logging.warning(
"Using legacy_rel_selfattn and it will be deprecated in the future."
)
elif selfattention_layer_type == "rel_selfattn":
assert pos_enc_layer_type == "rel_pos"
encoder_selfattn_layer = RelPositionMultiHeadedAttention
encoder_selfattn_layer_args = (
attention_heads,
output_size,
attention_dropout_rate,
zero_triu,
)
else:
raise ValueError("unknown encoder_attn_layer: " + selfattention_layer_type)
convolution_layer = ConvolutionModule
convolution_layer_args = (output_size, cnn_module_kernel, activation)
self.encoders = repeat(
num_blocks,
lambda lnum: EncoderLayer(
output_size,
encoder_selfattn_layer(*encoder_selfattn_layer_args),
positionwise_layer(*positionwise_layer_args),
positionwise_layer(*positionwise_layer_args) if macaron_style else None,
convolution_layer(*convolution_layer_args) if use_cnn_module else None,
dropout_rate,
normalize_before,
concat_after,
),
)
if self.normalize_before:
self.after_norm = LayerNorm(output_size)
def forward(
self, input: torch.Tensor, input_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Forward."""
xs_pad = input
masks = (~make_pad_mask(input_lengths)).to(input[0].device)
# print(mask)
if self.embed is None:
xs_pad = xs_pad
else:
xs_pad = self.embed(xs_pad)
masks = masks.reshape(masks.shape[0], 1, masks.shape[1])
xs_pad, masks = self.encoders(xs_pad, masks)
if isinstance(xs_pad, tuple):
xs_pad = xs_pad[0]
if self.normalize_before:
xs_pad = self.after_norm(xs_pad)
olens = masks.squeeze(1).sum(1)
return xs_pad, olens
def output_size(self) -> int:
"""Get the output size."""
return self._output_size
| 10,132 | 39.370518 | 88 | py |
espnet | espnet-master/espnet2/slu/postencoder/transformer_postencoder.py | # Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Encoder definition."""
from typing import Optional, Tuple
import torch
from typeguard import check_argument_types
from espnet2.asr.postencoder.abs_postencoder import AbsPostEncoder
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
from espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention
from espnet.nets.pytorch_backend.transformer.embedding import PositionalEncoding
from espnet.nets.pytorch_backend.transformer.encoder_layer import EncoderLayer
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
from espnet.nets.pytorch_backend.transformer.multi_layer_conv import (
Conv1dLinear,
MultiLayeredConv1d,
)
from espnet.nets.pytorch_backend.transformer.positionwise_feed_forward import (
PositionwiseFeedForward,
)
from espnet.nets.pytorch_backend.transformer.repeat import repeat
class TransformerPostEncoder(AbsPostEncoder):
"""Transformer encoder module.
Args:
input_size: input dim
output_size: dimension of attention
attention_heads: the number of heads of multi head attention
linear_units: the number of units of position-wise feed forward
num_blocks: the number of decoder blocks
dropout_rate: dropout rate
attention_dropout_rate: dropout rate in attention
positional_dropout_rate: dropout rate after adding positional encoding
input_layer: input layer type
pos_enc_class: PositionalEncoding or ScaledPositionalEncoding
normalize_before: whether to use layer_norm before the first block
concat_after: whether to concat attention layer's input and output
if True, additional linear will be applied.
i.e. x -> x + linear(concat(x, att(x)))
if False, no additional linear will be applied.
i.e. x -> x + att(x)
positionwise_layer_type: linear of conv1d
positionwise_conv_kernel_size: kernel size of positionwise conv1d layer
padding_idx: padding_idx for input_layer=embed
"""
def __init__(
self,
input_size: int,
output_size: int = 256,
attention_heads: int = 4,
linear_units: int = 2048,
num_blocks: int = 6,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.1,
attention_dropout_rate: float = 0.0,
input_layer: Optional[str] = "linear",
pos_enc_class=PositionalEncoding,
normalize_before: bool = True,
concat_after: bool = False,
positionwise_layer_type: str = "linear",
positionwise_conv_kernel_size: int = 1,
padding_idx: int = -1,
):
assert check_argument_types()
super().__init__()
self._output_size = output_size
if input_layer == "linear":
self.embed = torch.nn.Sequential(
torch.nn.Linear(input_size, output_size),
torch.nn.LayerNorm(output_size),
torch.nn.Dropout(dropout_rate),
torch.nn.ReLU(),
pos_enc_class(output_size, positional_dropout_rate),
)
elif input_layer == "None":
self.embed = torch.nn.Sequential(
torch.nn.Linear(input_size, output_size),
pos_enc_class(output_size, positional_dropout_rate),
)
else:
raise ValueError("unknown input_layer: " + input_layer)
self.normalize_before = normalize_before
if positionwise_layer_type == "linear":
positionwise_layer = PositionwiseFeedForward
positionwise_layer_args = (
output_size,
linear_units,
dropout_rate,
)
elif positionwise_layer_type == "conv1d":
positionwise_layer = MultiLayeredConv1d
positionwise_layer_args = (
output_size,
linear_units,
positionwise_conv_kernel_size,
dropout_rate,
)
elif positionwise_layer_type == "conv1d-linear":
positionwise_layer = Conv1dLinear
positionwise_layer_args = (
output_size,
linear_units,
positionwise_conv_kernel_size,
dropout_rate,
)
else:
raise NotImplementedError("Support only linear or conv1d.")
self.encoders = repeat(
num_blocks,
lambda lnum: EncoderLayer(
output_size,
MultiHeadedAttention(
attention_heads, output_size, attention_dropout_rate
),
positionwise_layer(*positionwise_layer_args),
dropout_rate,
normalize_before,
concat_after,
),
)
if self.normalize_before:
self.after_norm = LayerNorm(output_size)
def output_size(self) -> int:
return self._output_size
def forward(
self,
xs_pad: torch.Tensor,
ilens: torch.Tensor,
prev_states: torch.Tensor = None,
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
"""Embed positions in tensor.
Args:
xs_pad: input tensor (B, L, D)
ilens: input length (B)
prev_states: Not to be used now.
Returns:
position embedded tensor and mask
"""
masks = (~make_pad_mask(ilens)[:, None, :]).to(xs_pad.device)
xs_pad = self.embed(xs_pad)
xs_pad, masks = self.encoders(xs_pad, masks)
if self.normalize_before:
xs_pad = self.after_norm(xs_pad)
olens = masks.squeeze(1).sum(1)
return xs_pad, olens
| 5,830 | 36.140127 | 82 | py |
espnet | espnet-master/espnet2/slu/postdecoder/abs_postdecoder.py | from abc import ABC, abstractmethod
import torch
class AbsPostDecoder(torch.nn.Module, ABC):
@abstractmethod
def output_size(self) -> int:
raise NotImplementedError
@abstractmethod
def forward(
self,
transcript_input_ids: torch.LongTensor,
transcript_attention_mask: torch.LongTensor,
transcript_token_type_ids: torch.LongTensor,
transcript_position_ids: torch.LongTensor,
) -> torch.Tensor:
raise NotImplementedError
@abstractmethod
def convert_examples_to_features(
self, data: list, max_seq_length: int, output_size: int
):
raise NotImplementedError
| 662 | 24.5 | 63 | py |
espnet | espnet-master/espnet2/slu/postdecoder/hugging_face_transformers_postdecoder.py | #!/usr/bin/env python3
# 2022, Carnegie Mellon University; Siddhant Arora
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Hugging Face Transformers PostDecoder."""
from espnet2.slu.postdecoder.abs_postdecoder import AbsPostDecoder
try:
from transformers import AutoModel, AutoTokenizer
is_transformers_available = True
except ImportError:
is_transformers_available = False
import logging
import torch
from typeguard import check_argument_types
class HuggingFaceTransformersPostDecoder(AbsPostDecoder):
"""Hugging Face Transformers PostEncoder."""
def __init__(
self,
model_name_or_path: str,
output_size=256,
):
"""Initialize the module."""
assert check_argument_types()
super().__init__()
if not is_transformers_available:
raise ImportError(
"`transformers` is not available. Please install it via `pip install"
" transformers` or `cd /path/to/espnet/tools && . ./activate_python.sh"
" && ./installers/install_transformers.sh`."
)
self.model = AutoModel.from_pretrained(model_name_or_path)
self.tokenizer = AutoTokenizer.from_pretrained(
model_name_or_path,
use_fast=True,
)
logging.info("Pretrained Transformers model parameters reloaded!")
self.out_linear = torch.nn.Linear(self.model.config.hidden_size, output_size)
self.output_size_dim = output_size
def forward(
self,
transcript_input_ids: torch.LongTensor,
transcript_attention_mask: torch.LongTensor,
transcript_token_type_ids: torch.LongTensor,
transcript_position_ids: torch.LongTensor,
) -> torch.Tensor:
"""Forward."""
transcript_outputs = self.model(
input_ids=transcript_input_ids,
position_ids=transcript_position_ids,
attention_mask=transcript_attention_mask,
token_type_ids=transcript_token_type_ids,
)
return self.out_linear(transcript_outputs.last_hidden_state)
def output_size(self) -> int:
"""Get the output size."""
return self.output_size_dim
def convert_examples_to_features(self, data, max_seq_length):
input_id_features = []
input_mask_features = []
segment_ids_feature = []
position_ids_feature = []
input_id_length = []
for text_id in range(len(data)):
tokens_a = self.tokenizer.tokenize(data[text_id])
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[: (max_seq_length - 2)]
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
input_id_length.append(len(input_ids))
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
position_ids = [i for i in range(max_seq_length)]
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(position_ids) == max_seq_length
input_id_features.append(input_ids)
input_mask_features.append(input_mask)
segment_ids_feature.append(segment_ids)
position_ids_feature.append(position_ids)
return (
input_id_features,
input_mask_features,
segment_ids_feature,
position_ids_feature,
input_id_length,
)
| 3,807 | 34.588785 | 87 | py |
espnet | espnet-master/espnet2/bin/enh_inference.py | #!/usr/bin/env python3
import argparse
import logging
import sys
from itertools import chain
from pathlib import Path
from typing import Any, List, Optional, Sequence, Tuple, Union
import humanfriendly
import numpy as np
import torch
import yaml
from tqdm import trange
from typeguard import check_argument_types
from espnet2.enh.loss.criterions.tf_domain import FrequencyDomainMSE
from espnet2.enh.loss.criterions.time_domain import SISNRLoss
from espnet2.enh.loss.wrappers.pit_solver import PITSolver
from espnet2.fileio.sound_scp import SoundScpWriter
from espnet2.tasks.enh import EnhancementTask
from espnet2.tasks.enh_s2t import EnhS2TTask
from espnet2.torch_utils.device_funcs import to_device
from espnet2.torch_utils.set_all_random_seed import set_all_random_seed
from espnet2.train.abs_espnet_model import AbsESPnetModel
from espnet2.utils import config_argparse
from espnet2.utils.types import str2bool, str2triple_str, str_or_none
from espnet.utils.cli_utils import get_commandline_args
EPS = torch.finfo(torch.get_default_dtype()).eps
def get_train_config(train_config, model_file=None):
if train_config is None:
assert model_file is not None, (
"The argument 'model_file' must be provided "
"if the argument 'train_config' is not specified."
)
train_config = Path(model_file).parent / "config.yaml"
else:
train_config = Path(train_config)
return train_config
def recursive_dict_update(dict_org, dict_patch, verbose=False, log_prefix=""):
"""Update `dict_org` with `dict_patch` in-place recursively."""
for key, value in dict_patch.items():
if key not in dict_org:
if verbose:
logging.info(
"Overwriting config: [{}{}]: None -> {}".format(
log_prefix, key, value
)
)
dict_org[key] = value
elif isinstance(value, dict):
recursive_dict_update(
dict_org[key], value, verbose=verbose, log_prefix=f"{key}."
)
else:
if verbose and dict_org[key] != value:
logging.info(
"Overwriting config: [{}{}]: {} -> {}".format(
log_prefix, key, dict_org[key], value
)
)
dict_org[key] = value
def build_model_from_args_and_file(task, args, model_file, device):
model = task.build_model(args)
if not isinstance(model, AbsESPnetModel):
raise RuntimeError(
f"model must inherit {AbsESPnetModel.__name__}, but got {type(model)}"
)
model.to(device)
if model_file is not None:
if device == "cuda":
# NOTE(kamo): "cuda" for torch.load always indicates cuda:0
# in PyTorch<=1.4
device = f"cuda:{torch.cuda.current_device()}"
model.load_state_dict(torch.load(model_file, map_location=device))
return model
class SeparateSpeech:
"""SeparateSpeech class
Examples:
>>> import soundfile
>>> separate_speech = SeparateSpeech("enh_config.yml", "enh.pth")
>>> audio, rate = soundfile.read("speech.wav")
>>> separate_speech(audio)
[separated_audio1, separated_audio2, ...]
"""
def __init__(
self,
train_config: Union[Path, str] = None,
model_file: Union[Path, str] = None,
inference_config: Union[Path, str] = None,
segment_size: Optional[float] = None,
hop_size: Optional[float] = None,
normalize_segment_scale: bool = False,
show_progressbar: bool = False,
ref_channel: Optional[int] = None,
normalize_output_wav: bool = False,
device: str = "cpu",
dtype: str = "float32",
enh_s2t_task: bool = False,
):
assert check_argument_types()
task = EnhancementTask if not enh_s2t_task else EnhS2TTask
# 1. Build Enh model
if inference_config is None:
enh_model, enh_train_args = task.build_model_from_file(
train_config, model_file, device
)
else:
# Overwrite model attributes
train_config = get_train_config(train_config, model_file=model_file)
with train_config.open("r", encoding="utf-8") as f:
train_args = yaml.safe_load(f)
with Path(inference_config).open("r", encoding="utf-8") as f:
infer_args = yaml.safe_load(f)
if enh_s2t_task:
arg_list = ("enh_encoder", "enh_separator", "enh_decoder")
else:
arg_list = ("encoder", "separator", "decoder")
supported_keys = list(chain(*[[k, k + "_conf"] for k in arg_list]))
for k in infer_args.keys():
if k not in supported_keys:
raise ValueError(
"Only the following top-level keys are supported: %s"
% ", ".join(supported_keys)
)
recursive_dict_update(train_args, infer_args, verbose=True)
enh_train_args = argparse.Namespace(**train_args)
enh_model = build_model_from_args_and_file(
task, enh_train_args, model_file, device
)
if enh_s2t_task:
enh_model = enh_model.enh_model
enh_model.to(dtype=getattr(torch, dtype)).eval()
self.device = device
self.dtype = dtype
self.enh_train_args = enh_train_args
self.enh_model = enh_model
# only used when processing long speech, i.e.
# segment_size is not None and hop_size is not None
self.segment_size = segment_size
self.hop_size = hop_size
self.normalize_segment_scale = normalize_segment_scale
self.normalize_output_wav = normalize_output_wav
self.show_progressbar = show_progressbar
self.num_spk = enh_model.num_spk
task = "enhancement" if self.num_spk == 1 else "separation"
# reference channel for processing multi-channel speech
if ref_channel is not None:
logging.info(
"Overwrite enh_model.separator.ref_channel with {}".format(ref_channel)
)
enh_model.separator.ref_channel = ref_channel
if hasattr(enh_model.separator, "beamformer"):
enh_model.separator.beamformer.ref_channel = ref_channel
self.ref_channel = ref_channel
else:
self.ref_channel = enh_model.ref_channel
self.segmenting = segment_size is not None and hop_size is not None
if self.segmenting:
logging.info("Perform segment-wise speech %s" % task)
logging.info(
"Segment length = {} sec, hop length = {} sec".format(
segment_size, hop_size
)
)
else:
logging.info("Perform direct speech %s on the input" % task)
@torch.no_grad()
def __call__(
self, speech_mix: Union[torch.Tensor, np.ndarray], fs: int = 8000
) -> List[torch.Tensor]:
"""Inference
Args:
speech_mix: Input speech data (Batch, Nsamples [, Channels])
fs: sample rate
Returns:
[separated_audio1, separated_audio2, ...]
"""
assert check_argument_types()
# Input as audio signal
if isinstance(speech_mix, np.ndarray):
speech_mix = torch.as_tensor(speech_mix)
assert speech_mix.dim() > 1, speech_mix.size()
batch_size = speech_mix.size(0)
speech_mix = speech_mix.to(getattr(torch, self.dtype))
# lengths: (B,)
lengths = speech_mix.new_full(
[batch_size], dtype=torch.long, fill_value=speech_mix.size(1)
)
# a. To device
speech_mix = to_device(speech_mix, device=self.device)
lengths = to_device(lengths, device=self.device)
if self.segmenting and lengths[0] > self.segment_size * fs:
# Segment-wise speech enhancement/separation
overlap_length = int(np.round(fs * (self.segment_size - self.hop_size)))
num_segments = int(
np.ceil((speech_mix.size(1) - overlap_length) / (self.hop_size * fs))
)
t = T = int(self.segment_size * fs)
pad_shape = speech_mix[:, :T].shape
enh_waves = []
range_ = trange if self.show_progressbar else range
for i in range_(num_segments):
st = int(i * self.hop_size * fs)
en = st + T
if en >= lengths[0]:
# en - st < T (last segment)
en = lengths[0]
speech_seg = speech_mix.new_zeros(pad_shape)
t = en - st
speech_seg[:, :t] = speech_mix[:, st:en]
else:
t = T
speech_seg = speech_mix[:, st:en] # B x T [x C]
lengths_seg = speech_mix.new_full(
[batch_size], dtype=torch.long, fill_value=T
)
# b. Enhancement/Separation Forward
feats, f_lens = self.enh_model.encoder(speech_seg, lengths_seg)
feats, _, _ = self.enh_model.separator(feats, f_lens)
processed_wav = [
self.enh_model.decoder(f, lengths_seg)[0] for f in feats
]
if speech_seg.dim() > 2:
# multi-channel speech
speech_seg_ = speech_seg[:, self.ref_channel]
else:
speech_seg_ = speech_seg
if self.normalize_segment_scale:
# normalize the scale to match the input mixture scale
mix_energy = torch.sqrt(
torch.mean(speech_seg_[:, :t].pow(2), dim=1, keepdim=True)
)
enh_energy = torch.sqrt(
torch.mean(
sum(processed_wav)[:, :t].pow(2), dim=1, keepdim=True
)
)
processed_wav = [
w * (mix_energy / enh_energy) for w in processed_wav
]
# List[torch.Tensor(num_spk, B, T)]
enh_waves.append(torch.stack(processed_wav, dim=0))
# c. Stitch the enhanced segments together
waves = enh_waves[0]
for i in range(1, num_segments):
# permutation between separated streams in last and current segments
perm = self.cal_permumation(
waves[:, :, -overlap_length:],
enh_waves[i][:, :, :overlap_length],
criterion="si_snr",
)
# repermute separated streams in current segment
for batch in range(batch_size):
enh_waves[i][:, batch] = enh_waves[i][perm[batch], batch]
if i == num_segments - 1:
enh_waves[i][:, :, t:] = 0
enh_waves_res_i = enh_waves[i][:, :, overlap_length:t]
else:
enh_waves_res_i = enh_waves[i][:, :, overlap_length:]
# overlap-and-add (average over the overlapped part)
waves[:, :, -overlap_length:] = (
waves[:, :, -overlap_length:] + enh_waves[i][:, :, :overlap_length]
) / 2
# concatenate the residual parts of the later segment
waves = torch.cat([waves, enh_waves_res_i], dim=2)
# ensure the stitched length is same as input
assert waves.size(2) == speech_mix.size(1), (waves.shape, speech_mix.shape)
waves = torch.unbind(waves, dim=0)
else:
# b. Enhancement/Separation Forward
feats, f_lens = self.enh_model.encoder(speech_mix, lengths)
feats, _, _ = self.enh_model.separator(feats, f_lens)
waves = [self.enh_model.decoder(f, lengths)[0] for f in feats]
assert len(waves) == self.num_spk, len(waves) == self.num_spk
assert len(waves[0]) == batch_size, (len(waves[0]), batch_size)
if self.normalize_output_wav:
waves = [
(w / abs(w).max(dim=1, keepdim=True)[0] * 0.9).cpu().numpy()
for w in waves
] # list[(batch, sample)]
else:
waves = [w.cpu().numpy() for w in waves]
return waves
@torch.no_grad()
def cal_permumation(self, ref_wavs, enh_wavs, criterion="si_snr"):
"""Calculate the permutation between seaprated streams in two adjacent segments.
Args:
ref_wavs (List[torch.Tensor]): [(Batch, Nsamples)]
enh_wavs (List[torch.Tensor]): [(Batch, Nsamples)]
criterion (str): one of ("si_snr", "mse", "corr)
Returns:
perm (torch.Tensor): permutation for enh_wavs (Batch, num_spk)
"""
criterion_class = {"si_snr": SISNRLoss, "mse": FrequencyDomainMSE}[criterion]
pit_solver = PITSolver(criterion=criterion_class())
_, _, others = pit_solver(ref_wavs, enh_wavs)
perm = others["perm"]
return perm
@staticmethod
def from_pretrained(
model_tag: Optional[str] = None,
**kwargs: Optional[Any],
):
"""Build SeparateSpeech instance from the pretrained model.
Args:
model_tag (Optional[str]): Model tag of the pretrained models.
Currently, the tags of espnet_model_zoo are supported.
Returns:
SeparateSpeech: SeparateSpeech instance.
"""
if model_tag is not None:
try:
from espnet_model_zoo.downloader import ModelDownloader
except ImportError:
logging.error(
"`espnet_model_zoo` is not installed. "
"Please install via `pip install -U espnet_model_zoo`."
)
raise
d = ModelDownloader()
kwargs.update(**d.download_and_unpack(model_tag))
return SeparateSpeech(**kwargs)
def humanfriendly_or_none(value: str):
if value in ("none", "None", "NONE"):
return None
return humanfriendly.parse_size(value)
def inference(
output_dir: str,
batch_size: int,
dtype: str,
fs: int,
ngpu: int,
seed: int,
num_workers: int,
log_level: Union[int, str],
data_path_and_name_and_type: Sequence[Tuple[str, str, str]],
key_file: Optional[str],
train_config: Optional[str],
model_file: Optional[str],
model_tag: Optional[str],
inference_config: Optional[str],
allow_variable_data_keys: bool,
segment_size: Optional[float],
hop_size: Optional[float],
normalize_segment_scale: bool,
show_progressbar: bool,
ref_channel: Optional[int],
normalize_output_wav: bool,
enh_s2t_task: bool,
):
assert check_argument_types()
if batch_size > 1:
raise NotImplementedError("batch decoding is not implemented")
if ngpu > 1:
raise NotImplementedError("only single GPU decoding is supported")
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
if ngpu >= 1:
device = "cuda"
else:
device = "cpu"
# 1. Set random-seed
set_all_random_seed(seed)
# 2. Build separate_speech
separate_speech_kwargs = dict(
train_config=train_config,
model_file=model_file,
inference_config=inference_config,
segment_size=segment_size,
hop_size=hop_size,
normalize_segment_scale=normalize_segment_scale,
show_progressbar=show_progressbar,
ref_channel=ref_channel,
normalize_output_wav=normalize_output_wav,
device=device,
dtype=dtype,
enh_s2t_task=enh_s2t_task,
)
separate_speech = SeparateSpeech.from_pretrained(
model_tag=model_tag,
**separate_speech_kwargs,
)
# 3. Build data-iterator
loader = EnhancementTask.build_streaming_iterator(
data_path_and_name_and_type,
dtype=dtype,
batch_size=batch_size,
key_file=key_file,
num_workers=num_workers,
preprocess_fn=EnhancementTask.build_preprocess_fn(
separate_speech.enh_train_args, False
),
collate_fn=EnhancementTask.build_collate_fn(
separate_speech.enh_train_args, False
),
allow_variable_data_keys=allow_variable_data_keys,
inference=True,
)
# 4. Start for-loop
output_dir = Path(output_dir).expanduser().resolve()
writers = []
for i in range(separate_speech.num_spk):
writers.append(
SoundScpWriter(f"{output_dir}/wavs/{i + 1}", f"{output_dir}/spk{i + 1}.scp")
)
import tqdm
for i, (keys, batch) in tqdm.tqdm(enumerate(loader)):
logging.info(f"[{i}] Enhancing {keys}")
assert isinstance(batch, dict), type(batch)
assert all(isinstance(s, str) for s in keys), keys
_bs = len(next(iter(batch.values())))
assert len(keys) == _bs, f"{len(keys)} != {_bs}"
batch = {k: v for k, v in batch.items() if not k.endswith("_lengths")}
waves = separate_speech(**batch, fs=fs)
for spk, w in enumerate(waves):
for b in range(batch_size):
writers[spk][keys[b]] = fs, w[b]
for writer in writers:
writer.close()
def get_parser():
parser = config_argparse.ArgumentParser(
description="Frontend inference",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Note(kamo): Use '_' instead of '-' as separator.
# '-' is confusing if written in yaml.
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument("--output_dir", type=str, required=True)
parser.add_argument(
"--ngpu",
type=int,
default=0,
help="The number of gpus. 0 indicates CPU mode",
)
parser.add_argument("--seed", type=int, default=0, help="Random seed")
parser.add_argument(
"--dtype",
default="float32",
choices=["float16", "float32", "float64"],
help="Data type",
)
parser.add_argument(
"--fs", type=humanfriendly_or_none, default=8000, help="Sampling rate"
)
parser.add_argument(
"--num_workers",
type=int,
default=1,
help="The number of workers used for DataLoader",
)
group = parser.add_argument_group("Input data related")
group.add_argument(
"--data_path_and_name_and_type",
type=str2triple_str,
required=True,
action="append",
)
group.add_argument("--key_file", type=str_or_none)
group.add_argument("--allow_variable_data_keys", type=str2bool, default=False)
group = parser.add_argument_group("Output data related")
group.add_argument(
"--normalize_output_wav",
type=str2bool,
default=False,
help="Whether to normalize the predicted wav to [-1~1]",
)
group = parser.add_argument_group("The model configuration related")
group.add_argument(
"--train_config",
type=str,
help="Training configuration file",
)
group.add_argument(
"--model_file",
type=str,
help="Model parameter file",
)
group.add_argument(
"--model_tag",
type=str,
help="Pretrained model tag. If specify this option, train_config and "
"model_file will be overwritten",
)
group.add_argument(
"--inference_config",
type=str_or_none,
default=None,
help="Optional configuration file for overwriting enh model attributes "
"during inference",
)
group.add_argument(
"--enh_s2t_task",
type=str2bool,
default=False,
help="enhancement and asr joint model",
)
group = parser.add_argument_group("Data loading related")
group.add_argument(
"--batch_size",
type=int,
default=1,
help="The batch size for inference",
)
group = parser.add_argument_group("SeparateSpeech related")
group.add_argument(
"--segment_size",
type=float,
default=None,
help="Segment length in seconds for segment-wise speech enhancement/separation",
)
group.add_argument(
"--hop_size",
type=float,
default=None,
help="Hop length in seconds for segment-wise speech enhancement/separation",
)
group.add_argument(
"--normalize_segment_scale",
type=str2bool,
default=True,
help="Whether to normalize the energy of the separated streams in each segment",
)
group.add_argument(
"--show_progressbar",
type=str2bool,
default=False,
help="Whether to show a progress bar when performing segment-wise speech "
"enhancement/separation",
)
group.add_argument(
"--ref_channel",
type=int,
default=None,
help="If not None, this will overwrite the ref_channel defined in the "
"separator module (for multi-channel speech processing)",
)
return parser
def main(cmd=None):
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
kwargs.pop("config", None)
inference(**kwargs)
if __name__ == "__main__":
main()
| 21,910 | 33.724247 | 88 | py |
espnet | espnet-master/espnet2/bin/slu_inference.py | #!/usr/bin/env python3
import argparse
import logging
import sys
from distutils.version import LooseVersion
from pathlib import Path
from typing import Any, List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
import torch.quantization
from typeguard import check_argument_types, check_return_type
from espnet2.asr.transducer.beam_search_transducer import BeamSearchTransducer
from espnet2.asr.transducer.beam_search_transducer import (
ExtendedHypothesis as ExtTransHypothesis,
)
from espnet2.asr.transducer.beam_search_transducer import Hypothesis as TransHypothesis
from espnet2.fileio.datadir_writer import DatadirWriter
from espnet2.tasks.lm import LMTask
from espnet2.tasks.slu import SLUTask
from espnet2.text.build_tokenizer import build_tokenizer
from espnet2.text.token_id_converter import TokenIDConverter
from espnet2.torch_utils.device_funcs import to_device
from espnet2.torch_utils.set_all_random_seed import set_all_random_seed
from espnet2.utils import config_argparse
from espnet2.utils.types import str2bool, str2triple_str, str_or_none
from espnet.nets.batch_beam_search import BatchBeamSearch
from espnet.nets.batch_beam_search_online_sim import BatchBeamSearchOnlineSim
from espnet.nets.beam_search import BeamSearch, Hypothesis
from espnet.nets.pytorch_backend.transformer.subsampling import TooShortUttError
from espnet.nets.scorer_interface import BatchScorerInterface
from espnet.nets.scorers.ctc import CTCPrefixScorer
from espnet.nets.scorers.length_bonus import LengthBonus
from espnet.utils.cli_utils import get_commandline_args
class Speech2Understand:
"""Speech2Understand class
Examples:
>>> import soundfile
>>> speech2understand = Speech2Understand("slu_config.yml", "slu.pth")
>>> audio, rate = soundfile.read("speech.wav")
>>> speech2understand(audio)
[(text, token, token_int, hypothesis object), ...]
"""
def __init__(
self,
slu_train_config: Union[Path, str] = None,
slu_model_file: Union[Path, str] = None,
transducer_conf: dict = None,
lm_train_config: Union[Path, str] = None,
lm_file: Union[Path, str] = None,
ngram_scorer: str = "full",
ngram_file: Union[Path, str] = None,
token_type: str = None,
bpemodel: str = None,
device: str = "cpu",
maxlenratio: float = 0.0,
minlenratio: float = 0.0,
batch_size: int = 1,
dtype: str = "float32",
beam_size: int = 20,
ctc_weight: float = 0.5,
lm_weight: float = 1.0,
ngram_weight: float = 0.9,
penalty: float = 0.0,
nbest: int = 1,
streaming: bool = False,
quantize_asr_model: bool = False,
quantize_lm: bool = False,
quantize_modules: List[str] = ["Linear"],
quantize_dtype: str = "qint8",
):
assert check_argument_types()
task = SLUTask
if quantize_asr_model or quantize_lm:
if quantize_dtype == "float16" and torch.__version__ < LooseVersion(
"1.5.0"
):
raise ValueError(
"float16 dtype for dynamic quantization is not supported with "
"torch version < 1.5.0. Switch to qint8 dtype instead."
)
quantize_modules = set([getattr(torch.nn, q) for q in quantize_modules])
quantize_dtype = getattr(torch, quantize_dtype)
# 1. Build ASR model
scorers = {}
asr_model, asr_train_args = task.build_model_from_file(
slu_train_config, slu_model_file, device
)
asr_model.to(dtype=getattr(torch, dtype)).eval()
if quantize_asr_model:
logging.info("Use quantized asr model for decoding.")
asr_model = torch.quantization.quantize_dynamic(
asr_model, qconfig_spec=quantize_modules, dtype=quantize_dtype
)
decoder = asr_model.decoder
ctc = CTCPrefixScorer(ctc=asr_model.ctc, eos=asr_model.eos)
token_list = asr_model.token_list
scorers.update(
decoder=decoder,
ctc=ctc,
length_bonus=LengthBonus(len(token_list)),
)
# 2. Build Language model
if lm_train_config is not None:
lm, lm_train_args = LMTask.build_model_from_file(
lm_train_config, lm_file, device
)
if quantize_lm:
logging.info("Use quantized lm for decoding.")
lm = torch.quantization.quantize_dynamic(
lm, qconfig_spec=quantize_modules, dtype=quantize_dtype
)
scorers["lm"] = lm.lm
# 3. Build ngram model
if ngram_file is not None:
if ngram_scorer == "full":
from espnet.nets.scorers.ngram import NgramFullScorer
ngram = NgramFullScorer(ngram_file, token_list)
else:
from espnet.nets.scorers.ngram import NgramPartScorer
ngram = NgramPartScorer(ngram_file, token_list)
else:
ngram = None
scorers["ngram"] = ngram
# 4. Build BeamSearch object
if asr_model.use_transducer_decoder:
beam_search_transducer = BeamSearchTransducer(
decoder=asr_model.decoder,
joint_network=asr_model.joint_network,
beam_size=beam_size,
lm=scorers["lm"] if "lm" in scorers else None,
lm_weight=lm_weight,
token_list=token_list,
**transducer_conf,
)
beam_search = None
else:
beam_search_transducer = None
weights = dict(
decoder=1.0 - ctc_weight,
ctc=ctc_weight,
lm=lm_weight,
ngram=ngram_weight,
length_bonus=penalty,
)
beam_search = BeamSearch(
beam_size=beam_size,
weights=weights,
scorers=scorers,
sos=asr_model.sos,
eos=asr_model.eos,
vocab_size=len(token_list),
token_list=token_list,
pre_beam_score_key=None if ctc_weight == 1.0 else "full",
)
# TODO(karita): make all scorers batchfied
if batch_size == 1:
non_batch = [
k
for k, v in beam_search.full_scorers.items()
if not isinstance(v, BatchScorerInterface)
]
if len(non_batch) == 0:
if streaming:
beam_search.__class__ = BatchBeamSearchOnlineSim
beam_search.set_streaming_config(slu_train_config)
logging.info(
"BatchBeamSearchOnlineSim implementation is selected."
)
else:
beam_search.__class__ = BatchBeamSearch
logging.info("BatchBeamSearch implementation is selected.")
else:
logging.warning(
f"As non-batch scorers {non_batch} are found, "
f"fall back to non-batch implementation."
)
beam_search.to(device=device, dtype=getattr(torch, dtype)).eval()
for scorer in scorers.values():
if isinstance(scorer, torch.nn.Module):
scorer.to(device=device, dtype=getattr(torch, dtype)).eval()
logging.info(f"Beam_search: {beam_search}")
logging.info(f"Decoding device={device}, dtype={dtype}")
# 5. [Optional] Build Text converter: e.g. bpe-sym -> Text
if token_type is None:
token_type = asr_train_args.token_type
if bpemodel is None:
bpemodel = asr_train_args.bpemodel
if token_type is None:
tokenizer = None
elif token_type == "bpe":
if bpemodel is not None:
tokenizer = build_tokenizer(token_type=token_type, bpemodel=bpemodel)
else:
tokenizer = None
else:
tokenizer = build_tokenizer(token_type=token_type)
converter = TokenIDConverter(token_list=token_list)
logging.info(f"Text tokenizer: {tokenizer}")
self.asr_model = asr_model
self.asr_train_args = asr_train_args
self.converter = converter
self.tokenizer = tokenizer
self.beam_search = beam_search
self.beam_search_transducer = beam_search_transducer
self.maxlenratio = maxlenratio
self.minlenratio = minlenratio
self.device = device
self.dtype = dtype
self.nbest = nbest
@torch.no_grad()
def __call__(
self, speech: Union[torch.Tensor, np.ndarray], transcript: torch.Tensor = None
) -> List[
Tuple[
Optional[str],
List[str],
List[int],
Union[Hypothesis, ExtTransHypothesis, TransHypothesis],
]
]:
"""Inference
Args:
data: Input speech data
Returns:
text, token, token_int, hyp
"""
assert check_argument_types()
# Input as audio signal
if isinstance(speech, np.ndarray):
speech = torch.tensor(speech)
# data: (Nsamples,) -> (1, Nsamples)
speech = speech.unsqueeze(0).to(getattr(torch, self.dtype))
# lengths: (1,)
lengths = speech.new_full([1], dtype=torch.long, fill_value=speech.size(1))
if transcript is None:
batch = {"speech": speech, "speech_lengths": lengths}
logging.info("speech length: " + str(speech.size(1)))
else:
transcript = transcript.unsqueeze(0).to(getattr(torch, self.dtype))
# lengths: (1,)
transcript_lengths = transcript.new_full(
[1], dtype=torch.long, fill_value=transcript.size(1)
)
# print(text)
# print(text_lengths)
batch = {
"speech": speech,
"speech_lengths": lengths,
"transcript_pad": transcript,
"transcript_pad_lens": transcript_lengths,
}
# a. To device
batch = to_device(batch, device=self.device)
# b. Forward Encoder
enc, _ = self.asr_model.encode(**batch)
if isinstance(enc, tuple):
enc = enc[0]
assert len(enc) == 1, len(enc)
# c. Passed the encoder result and the beam search
if self.beam_search_transducer:
logging.info("encoder output length: " + str(enc[0].shape[0]))
nbest_hyps = self.beam_search_transducer(enc[0])
best = nbest_hyps[0]
logging.info(f"total log probability: {best.score:.2f}")
logging.info(
f"normalized log probability: {best.score / len(best.yseq):.2f}"
)
logging.info(
"best hypo: " + "".join(self.converter.ids2tokens(best.yseq[1:])) + "\n"
)
else:
nbest_hyps = self.beam_search(
x=enc[0], maxlenratio=self.maxlenratio, minlenratio=self.minlenratio
)
nbest_hyps = nbest_hyps[: self.nbest]
results = []
for hyp in nbest_hyps:
assert isinstance(hyp, (Hypothesis, TransHypothesis)), type(hyp)
# remove sos/eos and get results
last_pos = None if self.asr_model.use_transducer_decoder else -1
if isinstance(hyp.yseq, list):
token_int = hyp.yseq[1:last_pos]
else:
token_int = hyp.yseq[1:last_pos].tolist()
# remove blank symbol id, which is assumed to be 0
token_int = list(filter(lambda x: x != 0, token_int))
# Change integer-ids to tokens
token = self.converter.ids2tokens(token_int)
if self.tokenizer is not None:
text = self.tokenizer.tokens2text(token)
else:
text = None
results.append((text, token, token_int, hyp))
assert check_return_type(results)
return results
@staticmethod
def from_pretrained(
model_tag: Optional[str] = None,
**kwargs: Optional[Any],
):
"""Build Speech2Understand instance from the pretrained model.
Args:
model_tag (Optional[str]): Model tag of the pretrained models.
Currently, the tags of espnet_model_zoo are supported.
Returns:
Speech2Understand: Speech2Understand instance.
"""
if model_tag is not None:
try:
from espnet_model_zoo.downloader import ModelDownloader
except ImportError:
logging.error(
"`espnet_model_zoo` is not installed. "
"Please install via `pip install -U espnet_model_zoo`."
)
raise
d = ModelDownloader()
kwargs.update(**d.download_and_unpack(model_tag))
return Speech2Understand(**kwargs)
def inference(
output_dir: str,
maxlenratio: float,
minlenratio: float,
batch_size: int,
dtype: str,
beam_size: int,
ngpu: int,
seed: int,
ctc_weight: float,
lm_weight: float,
ngram_weight: float,
penalty: float,
nbest: int,
num_workers: int,
log_level: Union[int, str],
data_path_and_name_and_type: Sequence[Tuple[str, str, str]],
key_file: Optional[str],
slu_train_config: Optional[str],
slu_model_file: Optional[str],
lm_train_config: Optional[str],
lm_file: Optional[str],
word_lm_train_config: Optional[str],
word_lm_file: Optional[str],
ngram_file: Optional[str],
model_tag: Optional[str],
token_type: Optional[str],
bpemodel: Optional[str],
allow_variable_data_keys: bool,
transducer_conf: Optional[dict],
streaming: bool,
quantize_asr_model: bool,
quantize_lm: bool,
quantize_modules: List[str],
quantize_dtype: str,
):
assert check_argument_types()
if batch_size > 1:
raise NotImplementedError("batch decoding is not implemented")
if word_lm_train_config is not None:
raise NotImplementedError("Word LM is not implemented")
if ngpu > 1:
raise NotImplementedError("only single GPU decoding is supported")
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
if ngpu >= 1:
device = "cuda"
else:
device = "cpu"
# 1. Set random-seed
set_all_random_seed(seed)
# 2. Build speech2understand
speech2understand_kwargs = dict(
slu_train_config=slu_train_config,
slu_model_file=slu_model_file,
transducer_conf=transducer_conf,
lm_train_config=lm_train_config,
lm_file=lm_file,
ngram_file=ngram_file,
token_type=token_type,
bpemodel=bpemodel,
device=device,
maxlenratio=maxlenratio,
minlenratio=minlenratio,
dtype=dtype,
beam_size=beam_size,
ctc_weight=ctc_weight,
lm_weight=lm_weight,
ngram_weight=ngram_weight,
penalty=penalty,
nbest=nbest,
streaming=streaming,
quantize_asr_model=quantize_asr_model,
quantize_lm=quantize_lm,
quantize_modules=quantize_modules,
quantize_dtype=quantize_dtype,
)
speech2understand = Speech2Understand.from_pretrained(
model_tag=model_tag,
**speech2understand_kwargs,
)
# 3. Build data-iterator
loader = SLUTask.build_streaming_iterator(
data_path_and_name_and_type,
dtype=dtype,
batch_size=batch_size,
key_file=key_file,
num_workers=num_workers,
preprocess_fn=SLUTask.build_preprocess_fn(
speech2understand.asr_train_args, False
),
collate_fn=SLUTask.build_collate_fn(speech2understand.asr_train_args, False),
allow_variable_data_keys=allow_variable_data_keys,
inference=True,
)
# 7 .Start for-loop
# FIXME(kamo): The output format should be discussed about
with DatadirWriter(output_dir) as writer:
for keys, batch in loader:
assert isinstance(batch, dict), type(batch)
assert all(isinstance(s, str) for s in keys), keys
_bs = len(next(iter(batch.values())))
assert len(keys) == _bs, f"{len(keys)} != {_bs}"
batch = {k: v[0] for k, v in batch.items() if not k.endswith("_lengths")}
# N-best list of (text, token, token_int, hyp_object)
try:
results = speech2understand(**batch)
except TooShortUttError as e:
logging.warning(f"Utterance {keys} {e}")
hyp = Hypothesis(score=0.0, scores={}, states={}, yseq=[])
results = [[" ", ["<space>"], [2], hyp]] * nbest
# Only supporting batch_size==1
key = keys[0]
for n, (text, token, token_int, hyp) in zip(range(1, nbest + 1), results):
# Create a directory: outdir/{n}best_recog
ibest_writer = writer[f"{n}best_recog"]
# Write the result to each file
ibest_writer["token"][key] = " ".join(token)
ibest_writer["token_int"][key] = " ".join(map(str, token_int))
ibest_writer["score"][key] = str(hyp.score)
if text is not None:
ibest_writer["text"][key] = text
def get_parser():
parser = config_argparse.ArgumentParser(
description="ASR Decoding",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Note(kamo): Use '_' instead of '-' as separator.
# '-' is confusing if written in yaml.
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument("--output_dir", type=str, required=True)
parser.add_argument(
"--ngpu",
type=int,
default=0,
help="The number of gpus. 0 indicates CPU mode",
)
parser.add_argument("--seed", type=int, default=0, help="Random seed")
parser.add_argument(
"--dtype",
default="float32",
choices=["float16", "float32", "float64"],
help="Data type",
)
parser.add_argument(
"--num_workers",
type=int,
default=1,
help="The number of workers used for DataLoader",
)
group = parser.add_argument_group("Input data related")
group.add_argument(
"--data_path_and_name_and_type",
type=str2triple_str,
required=True,
action="append",
)
group.add_argument("--key_file", type=str_or_none)
group.add_argument("--allow_variable_data_keys", type=str2bool, default=False)
group = parser.add_argument_group("The model configuration related")
group.add_argument(
"--slu_train_config",
type=str,
help="ASR training configuration",
)
group.add_argument(
"--slu_model_file",
type=str,
help="ASR model parameter file",
)
group.add_argument(
"--lm_train_config",
type=str,
help="LM training configuration",
)
group.add_argument(
"--lm_file",
type=str,
help="LM parameter file",
)
group.add_argument(
"--word_lm_train_config",
type=str,
help="Word LM training configuration",
)
group.add_argument(
"--word_lm_file",
type=str,
help="Word LM parameter file",
)
group.add_argument(
"--ngram_file",
type=str,
help="N-gram parameter file",
)
group.add_argument(
"--model_tag",
type=str,
help="Pretrained model tag. If specify this option, *_train_config and "
"*_file will be overwritten",
)
group = parser.add_argument_group("Quantization related")
group.add_argument(
"--quantize_asr_model",
type=str2bool,
default=False,
help="Apply dynamic quantization to ASR model.",
)
group.add_argument(
"--quantize_lm",
type=str2bool,
default=False,
help="Apply dynamic quantization to LM.",
)
group.add_argument(
"--quantize_modules",
type=str,
nargs="*",
default=["Linear"],
help="""List of modules to be dynamically quantized.
E.g.: --quantize_modules=[Linear,LSTM,GRU].
Each specified module should be an attribute of 'torch.nn', e.g.:
torch.nn.Linear, torch.nn.LSTM, torch.nn.GRU, ...""",
)
group.add_argument(
"--quantize_dtype",
type=str,
default="qint8",
choices=["float16", "qint8"],
help="Dtype for dynamic quantization.",
)
group = parser.add_argument_group("Beam-search related")
group.add_argument(
"--batch_size",
type=int,
default=1,
help="The batch size for inference",
)
group.add_argument("--nbest", type=int, default=1, help="Output N-best hypotheses")
group.add_argument("--beam_size", type=int, default=20, help="Beam size")
group.add_argument("--penalty", type=float, default=0.0, help="Insertion penalty")
group.add_argument(
"--maxlenratio",
type=float,
default=0.0,
help="Input length ratio to obtain max output length. "
"If maxlenratio=0.0 (default), it uses a end-detect "
"function "
"to automatically find maximum hypothesis lengths."
"If maxlenratio<0.0, its absolute value is interpreted"
"as a constant max output length",
)
group.add_argument(
"--minlenratio",
type=float,
default=0.0,
help="Input length ratio to obtain min output length",
)
group.add_argument(
"--ctc_weight",
type=float,
default=0.5,
help="CTC weight in joint decoding",
)
group.add_argument("--lm_weight", type=float, default=1.0, help="RNNLM weight")
group.add_argument("--ngram_weight", type=float, default=0.9, help="ngram weight")
group.add_argument("--streaming", type=str2bool, default=False)
group.add_argument(
"--transducer_conf",
default=None,
help="The keyword arguments for transducer beam search.",
)
group = parser.add_argument_group("Text converter related")
group.add_argument(
"--token_type",
type=str_or_none,
default=None,
choices=["char", "bpe", None],
help="The token type for ASR model. "
"If not given, refers from the training args",
)
group.add_argument(
"--bpemodel",
type=str_or_none,
default=None,
help="The model path of sentencepiece. "
"If not given, refers from the training args",
)
return parser
def main(cmd=None):
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
kwargs.pop("config", None)
inference(**kwargs)
if __name__ == "__main__":
main()
| 23,605 | 32.578947 | 88 | py |
espnet | espnet-master/espnet2/bin/asr_inference_maskctc.py | #!/usr/bin/env python3
import argparse
import logging
import sys
from pathlib import Path
from typing import Any, List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from typeguard import check_argument_types, check_return_type
from espnet2.asr.maskctc_model import MaskCTCInference
from espnet2.fileio.datadir_writer import DatadirWriter
from espnet2.tasks.asr import ASRTask
from espnet2.text.build_tokenizer import build_tokenizer
from espnet2.text.token_id_converter import TokenIDConverter
from espnet2.torch_utils.device_funcs import to_device
from espnet2.torch_utils.set_all_random_seed import set_all_random_seed
from espnet2.utils import config_argparse
from espnet2.utils.types import str2bool, str2triple_str, str_or_none
from espnet.nets.beam_search import Hypothesis
from espnet.nets.pytorch_backend.transformer.subsampling import TooShortUttError
from espnet.utils.cli_utils import get_commandline_args
class Speech2Text:
"""Speech2Text class
Examples:
>>> import soundfile
>>> speech2text = Speech2Text("asr_config.yml", "asr.pth")
>>> audio, rate = soundfile.read("speech.wav")
>>> speech2text(audio)
[(text, token, token_int, hypothesis object), ...]
"""
def __init__(
self,
asr_train_config: Union[Path, str],
asr_model_file: Union[Path, str] = None,
token_type: str = None,
bpemodel: str = None,
device: str = "cpu",
batch_size: int = 1,
dtype: str = "float32",
maskctc_n_iterations: int = 10,
maskctc_threshold_probability: float = 0.99,
):
assert check_argument_types()
# 1. Build ASR model
asr_model, asr_train_args = ASRTask.build_model_from_file(
asr_train_config, asr_model_file, device
)
asr_model.to(dtype=getattr(torch, dtype)).eval()
token_list = asr_model.token_list
s2t = MaskCTCInference(
asr_model=asr_model,
n_iterations=maskctc_n_iterations,
threshold_probability=maskctc_threshold_probability,
)
s2t.to(device=device, dtype=getattr(torch, dtype)).eval()
# 2. [Optional] Build Text converter: e.g. bpe-sym -> Text
if token_type is None:
token_type = asr_train_args.token_type
if bpemodel is None:
bpemodel = asr_train_args.bpemodel
if token_type is None:
tokenizer = None
elif token_type == "bpe":
if bpemodel is not None:
tokenizer = build_tokenizer(token_type=token_type, bpemodel=bpemodel)
else:
tokenizer = None
else:
tokenizer = build_tokenizer(token_type=token_type)
converter = TokenIDConverter(token_list=token_list)
logging.info(f"Text tokenizer: {tokenizer}")
self.asr_model = asr_model
self.asr_train_args = asr_train_args
self.s2t = s2t
self.converter = converter
self.tokenizer = tokenizer
self.device = device
self.dtype = dtype
@torch.no_grad()
def __call__(
self, speech: Union[torch.Tensor, np.ndarray]
) -> List[Tuple[Optional[str], List[str], List[int], Hypothesis]]:
"""Inference
Args:
data: Input speech data
Returns:
text, token, token_int, hyp
"""
assert check_argument_types()
# Input as audio signal
if isinstance(speech, np.ndarray):
speech = torch.tensor(speech)
# data: (Nsamples,) -> (1, Nsamples)
speech = speech.unsqueeze(0).to(getattr(torch, self.dtype))
# lenghts: (1,)
lengths = speech.new_full([1], dtype=torch.long, fill_value=speech.size(1))
batch = {"speech": speech, "speech_lengths": lengths}
# a. To device
batch = to_device(batch, device=self.device)
# b. Forward Encoder
enc, _ = self.asr_model.encode(**batch)
if isinstance(enc, tuple):
enc = enc[0]
assert len(enc) == 1, len(enc)
# c. Passed the encoder result and the inference algorithm
hyp = self.s2t(enc[0])
assert isinstance(hyp, Hypothesis), type(hyp)
# remove sos/eos and get results
token_int = hyp.yseq[1:-1].tolist()
# remove blank symbol id, which is assumed to be 0
token_int = list(filter(lambda x: x != 0, token_int))
# Change integer-ids to tokens
token = self.converter.ids2tokens(token_int)
if self.tokenizer is not None:
text = self.tokenizer.tokens2text(token)
else:
text = None
results = [(text, token, token_int, hyp)]
assert check_return_type(results)
return results
@staticmethod
def from_pretrained(
model_tag: Optional[str] = None,
**kwargs: Optional[Any],
):
"""Build Speech2Text instance from the pretrained model.
Args:
model_tag (Optional[str]): Model tag of the pretrained models.
Currently, the tags of espnet_model_zoo are supported.
Returns:
Speech2Text: Speech2Text instance.
"""
if model_tag is not None:
try:
from espnet_model_zoo.downloader import ModelDownloader
except ImportError:
logging.error(
"`espnet_model_zoo` is not installed. "
"Please install via `pip install -U espnet_model_zoo`."
)
raise
d = ModelDownloader()
kwargs.update(**d.download_and_unpack(model_tag))
return Speech2Text(**kwargs)
def inference(
output_dir: str,
batch_size: int,
dtype: str,
ngpu: int,
seed: int,
num_workers: int,
log_level: Union[int, str],
data_path_and_name_and_type: Sequence[Tuple[str, str, str]],
key_file: Optional[str],
asr_train_config: str,
asr_model_file: str,
model_tag: Optional[str],
token_type: Optional[str],
bpemodel: Optional[str],
allow_variable_data_keys: bool,
maskctc_n_iterations: int,
maskctc_threshold_probability: float,
):
assert check_argument_types()
if batch_size > 1:
raise NotImplementedError("batch decoding is not implemented")
if ngpu > 1:
raise NotImplementedError("only single GPU decoding is supported")
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
if ngpu >= 1:
device = "cuda"
else:
device = "cpu"
# 1. Set random-seed
set_all_random_seed(seed)
# 2. Build speech2text
speech2text_kwargs = dict(
asr_train_config=asr_train_config,
asr_model_file=asr_model_file,
token_type=token_type,
bpemodel=bpemodel,
device=device,
batch_size=batch_size,
dtype=dtype,
maskctc_n_iterations=maskctc_n_iterations,
maskctc_threshold_probability=maskctc_threshold_probability,
)
speech2text = Speech2Text.from_pretrained(
model_tag=model_tag,
**speech2text_kwargs,
)
# 3. Build data-iterator
loader = ASRTask.build_streaming_iterator(
data_path_and_name_and_type,
dtype=dtype,
batch_size=batch_size,
key_file=key_file,
num_workers=num_workers,
preprocess_fn=ASRTask.build_preprocess_fn(speech2text.asr_train_args, False),
collate_fn=ASRTask.build_collate_fn(speech2text.asr_train_args, False),
allow_variable_data_keys=allow_variable_data_keys,
inference=True,
)
# 7 .Start for-loop
with DatadirWriter(output_dir) as writer:
for keys, batch in loader:
assert isinstance(batch, dict), type(batch)
assert all(isinstance(s, str) for s in keys), keys
_bs = len(next(iter(batch.values())))
assert len(keys) == _bs, f"{len(keys)} != {_bs}"
batch = {k: v[0] for k, v in batch.items() if not k.endswith("_lengths")}
try:
results = speech2text(**batch)
except TooShortUttError as e:
logging.warning(f"Utterance {keys} {e}")
hyp = Hypothesis(score=0.0, scores={}, states={}, yseq=[])
results = [[" ", ["<space>"], [2], hyp]]
# Only supporting batch_size==1
key = keys[0]
(text, token, token_int, hyp) = results[0]
# Create a directory: outdir/{n}best_recog
ibest_writer = writer["1best_recog"]
# Write the result to each file
ibest_writer["token"][key] = " ".join(token)
ibest_writer["token_int"][key] = " ".join(map(str, token_int))
ibest_writer["score"][key] = str(hyp.score)
if text is not None:
ibest_writer["text"][key] = text
def get_parser():
parser = config_argparse.ArgumentParser(
description="ASR Decoding",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Note(kamo): Use '_' instead of '-' as separator.
# '-' is confusing if written in yaml.
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument("--output_dir", type=str, required=True)
parser.add_argument(
"--ngpu",
type=int,
default=0,
help="The number of gpus. 0 indicates CPU mode",
)
parser.add_argument("--seed", type=int, default=0, help="Random seed")
parser.add_argument(
"--dtype",
default="float32",
choices=["float16", "float32", "float64"],
help="Data type",
)
parser.add_argument(
"--num_workers",
type=int,
default=1,
help="The number of workers used for DataLoader",
)
group = parser.add_argument_group("Input data related")
group.add_argument(
"--data_path_and_name_and_type",
type=str2triple_str,
required=True,
action="append",
)
group.add_argument("--key_file", type=str_or_none)
group.add_argument("--allow_variable_data_keys", type=str2bool, default=False)
group = parser.add_argument_group("The model configuration related")
group.add_argument("--asr_train_config", type=str, required=True)
group.add_argument("--asr_model_file", type=str, required=True)
group.add_argument(
"--model_tag",
type=str,
help="Pretrained model tag. If specify this option, *_train_config and "
"*_file will be overwritten",
)
group = parser.add_argument_group("Decoding related")
group.add_argument(
"--batch_size",
type=int,
default=1,
help="The batch size for inference",
)
group.add_argument("--maskctc_n_iterations", type=int, default=10)
group.add_argument("--maskctc_threshold_probability", type=float, default=0.99)
group = parser.add_argument_group("Text converter related")
group.add_argument(
"--token_type",
type=str_or_none,
default=None,
choices=["char", "bpe", None],
help="The token type for ASR model. "
"If not given, refers from the training args",
)
group.add_argument(
"--bpemodel",
type=str_or_none,
default=None,
help="The model path of sentencepiece. "
"If not given, refers from the training args",
)
return parser
def main(cmd=None):
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
kwargs.pop("config", None)
inference(**kwargs)
if __name__ == "__main__":
main()
| 11,952 | 30.70557 | 85 | py |
espnet | espnet-master/espnet2/bin/asr_inference_k2.py | #!/usr/bin/env python3
import argparse
import datetime
import logging
import sys
from pathlib import Path
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
import k2
import numpy as np
import torch
import yaml
from typeguard import check_argument_types, check_return_type
from espnet2.fileio.datadir_writer import DatadirWriter
from espnet2.fst.lm_rescore import nbest_am_lm_scores
from espnet2.tasks.asr import ASRTask
from espnet2.tasks.lm import LMTask
from espnet2.text.build_tokenizer import build_tokenizer
from espnet2.text.token_id_converter import TokenIDConverter
from espnet2.torch_utils.device_funcs import to_device
from espnet2.torch_utils.set_all_random_seed import set_all_random_seed
from espnet2.utils import config_argparse
from espnet2.utils.types import str2bool, str2triple_str, str_or_none
from espnet.utils.cli_utils import get_commandline_args
def indices_to_split_size(indices, total_elements: int = None):
"""convert indices to split_size
During decoding, the api torch.tensor_split should be used.
However, torch.tensor_split is only available with pytorch >= 1.8.0.
So torch.split is used to pass ci with pytorch < 1.8.0.
This fuction is used to prepare input for torch.split.
"""
if indices[0] != 0:
indices = [0] + indices
split_size = [indices[i] - indices[i - 1] for i in range(1, len(indices))]
if total_elements is not None and sum(split_size) != total_elements:
split_size.append(total_elements - sum(split_size))
return split_size
# copied from:
# https://github.com/k2-fsa/snowfall/blob/master/snowfall/training/ctc_graph.py#L13
def build_ctc_topo(tokens: List[int]) -> k2.Fsa:
"""Build CTC topology.
A token which appears once on the right side (i.e. olabels) may
appear multiple times on the left side (ilabels), possibly with
epsilons in between.
When 0 appears on the left side, it represents the blank symbol;
when it appears on the right side, it indicates an epsilon. That
is, 0 has two meanings here.
Args:
tokens:
A list of tokens, e.g., phones, characters, etc.
Returns:
Returns an FST that converts repeated tokens to a single token.
"""
assert 0 in tokens, "We assume 0 is ID of the blank symbol"
num_states = len(tokens)
final_state = num_states
arcs = ""
for i in range(num_states):
for j in range(num_states):
if i == j:
arcs += f"{i} {i} {tokens[i]} 0 0.0\n"
else:
arcs += f"{i} {j} {tokens[j]} {tokens[j]} 0.0\n"
arcs += f"{i} {final_state} -1 -1 0.0\n"
arcs += f"{final_state}"
ans = k2.Fsa.from_str(arcs, num_aux_labels=1)
ans = k2.arc_sort(ans)
return ans
# Modified from: https://github.com/k2-fsa/snowfall/blob/master/snowfall/common.py#L309
def get_texts(best_paths: k2.Fsa) -> List[List[int]]:
"""Extract the texts from the best-path FSAs.
Args:
best_paths: a k2.Fsa with best_paths.arcs.num_axes() == 3, i.e.
containing multiple FSAs, which is expected to be the result
of k2.shortest_path (otherwise the returned values won't
be meaningful). Must have the 'aux_labels' attribute, as
a ragged tensor.
Return:
Returns a list of lists of int, containing the label sequences we
decoded.
"""
# remove any 0's or -1's (there should be no 0's left but may be -1's.)
if isinstance(best_paths.aux_labels, k2.RaggedTensor):
aux_labels = best_paths.aux_labels.remove_values_leq(0)
aux_shape = best_paths.arcs.shape().compose(aux_labels.shape())
# remove the states and arcs axes.
aux_shape = aux_shape.remove_axis(1)
aux_shape = aux_shape.remove_axis(1)
aux_labels = k2.RaggedTensor(aux_shape, aux_labels.values())
else:
# remove axis corresponding to states.
aux_shape = best_paths.arcs.shape().remove_axis(1)
aux_labels = k2.RaggedTensor(aux_shape, best_paths.aux_labels)
# remove 0's and -1's.
aux_labels = aux_labels.remove_values_leq(0)
assert aux_labels.num_axes == 2
return aux_labels.tolist()
class k2Speech2Text:
"""Speech2Text class
Examples:
>>> import soundfile
>>> speech2text = k2Speech2Text("asr_config.yml", "asr.pth")
>>> audio, rate = soundfile.read("speech.wav")
>>> speech = np.expand_dims(audio, 0) # shape: [batch_size, speech_length]
>>> speech_lengths = np.array([audio.shape[0]]) # shape: [batch_size]
>>> batch = {"speech": speech, "speech_lengths", speech_lengths}
>>> speech2text(batch)
[(text, token, token_int, score), ...]
"""
def __init__(
self,
asr_train_config: Union[Path, str],
asr_model_file: Union[Path, str] = None,
lm_train_config: Union[Path, str] = None,
lm_file: Union[Path, str] = None,
token_type: str = None,
bpemodel: str = None,
device: str = "cpu",
maxlenratio: float = 0.0,
minlenratio: float = 0.0,
batch_size: int = 1,
dtype: str = "float32",
beam_size: int = 8,
ctc_weight: float = 0.5,
lm_weight: float = 1.0,
penalty: float = 0.0,
nbest: int = 1,
streaming: bool = False,
search_beam_size: int = 20,
output_beam_size: int = 20,
min_active_states: int = 30,
max_active_states: int = 10000,
blank_bias: float = 0.0,
lattice_weight: float = 1.0,
is_ctc_decoding: bool = True,
lang_dir: Optional[str] = None,
use_fgram_rescoring: bool = False,
use_nbest_rescoring: bool = False,
am_weight: float = 1.0,
decoder_weight: float = 0.5,
nnlm_weight: float = 1.0,
num_paths: int = 1000,
nbest_batch_size: int = 500,
nll_batch_size: int = 100,
):
assert check_argument_types()
# 1. Build ASR model
asr_model, asr_train_args = ASRTask.build_model_from_file(
asr_train_config, asr_model_file, device
)
asr_model.to(dtype=getattr(torch, dtype)).eval()
token_list = asr_model.token_list
# 2. Build Language model
if lm_train_config is not None:
lm, lm_train_args = LMTask.build_model_from_file(
lm_train_config, lm_file, device
)
self.lm = lm
self.is_ctc_decoding = is_ctc_decoding
self.use_fgram_rescoring = use_fgram_rescoring
self.use_nbest_rescoring = use_nbest_rescoring
assert self.is_ctc_decoding, "Currently, only ctc_decoding graph is supported."
if self.is_ctc_decoding:
self.decode_graph = k2.arc_sort(
build_ctc_topo(list(range(len(token_list))))
)
self.decode_graph = self.decode_graph.to(device)
if token_type is None:
token_type = asr_train_args.token_type
if bpemodel is None:
bpemodel = asr_train_args.bpemodel
if token_type is None:
tokenizer = None
elif token_type == "bpe":
if bpemodel is not None:
tokenizer = build_tokenizer(token_type=token_type, bpemodel=bpemodel)
else:
tokenizer = None
else:
tokenizer = build_tokenizer(token_type=token_type)
converter = TokenIDConverter(token_list=token_list)
logging.info(f"Text tokenizer: {tokenizer}")
logging.info(f"Running on : {device}")
self.asr_model = asr_model
self.asr_train_args = asr_train_args
self.converter = converter
self.tokenizer = tokenizer
self.device = device
self.dtype = dtype
self.search_beam_size = search_beam_size
self.output_beam_size = output_beam_size
self.min_active_states = min_active_states
self.max_active_states = max_active_states
self.blank_bias = blank_bias
self.lattice_weight = lattice_weight
self.am_weight = am_weight
self.decoder_weight = decoder_weight
self.nnlm_weight = nnlm_weight
self.num_paths = num_paths
self.nbest_batch_size = nbest_batch_size
self.nll_batch_size = nll_batch_size
@torch.no_grad()
def __call__(
self, batch: Dict[str, Union[torch.Tensor, np.ndarray]]
) -> List[Tuple[Optional[str], List[str], List[int], float]]:
"""Inference
Args:
batch: Input speech data and corresponding lengths
Returns:
text, token, token_int, hyp
"""
assert check_argument_types()
if isinstance(batch["speech"], np.ndarray):
batch["speech"] = torch.tensor(batch["speech"])
if isinstance(batch["speech_lengths"], np.ndarray):
batch["speech_lengths"] = torch.tensor(batch["speech_lengths"])
# a. To device
batch = to_device(batch, device=self.device)
# b. Forward Encoder
# enc: [N, T, C]
enc, encoder_out_lens = self.asr_model.encode(**batch)
# logp_encoder_output: [N, T, C]
logp_encoder_output = torch.nn.functional.log_softmax(
self.asr_model.ctc.ctc_lo(enc), dim=2
)
# It maybe useful to tune blank_bias.
# The valid range of blank_bias is [-inf, 0]
logp_encoder_output[:, :, 0] += self.blank_bias
batch_size = encoder_out_lens.size(0)
sequence_idx = torch.arange(0, batch_size).unsqueeze(0).t().to(torch.int32)
start_frame = torch.zeros([batch_size], dtype=torch.int32).unsqueeze(0).t()
num_frames = encoder_out_lens.cpu().unsqueeze(0).t().to(torch.int32)
supervision_segments = torch.cat([sequence_idx, start_frame, num_frames], dim=1)
supervision_segments = supervision_segments.to(torch.int32)
# An introduction to DenseFsaVec:
# https://k2-fsa.github.io/k2/core_concepts/index.html#dense-fsa-vector
# It could be viewed as a fsa-type lopg_encoder_output,
# whose weight on the arcs are initialized with logp_encoder_output.
# The goal of converting tensor-type to fsa-type is using
# fsa related functions in k2. e.g. k2.intersect_dense_pruned below
dense_fsa_vec = k2.DenseFsaVec(logp_encoder_output, supervision_segments)
# The term "intersect" is similar to "compose" in k2.
# The differences is are:
# for "compose" functions, the composition involves
# mathcing output label of a.fsa and input label of b.fsa
# while for "intersect" functions, the composition involves
# matching input label of a.fsa and input label of b.fsa
# Actually, in compose functions, b.fsa is inverted and then
# a.fsa and inv_b.fsa are intersected together.
# For difference between compose and interset:
# https://github.com/k2-fsa/k2/blob/master/k2/python/k2/fsa_algo.py#L308
# For definition of k2.intersect_dense_pruned:
# https://github.com/k2-fsa/k2/blob/master/k2/python/k2/autograd.py#L648
lattices = k2.intersect_dense_pruned(
self.decode_graph,
dense_fsa_vec,
self.search_beam_size,
self.output_beam_size,
self.min_active_states,
self.max_active_states,
)
# lattices.scores is the sum of decode_graph.scores(a.k.a. lm weight) and
# dense_fsa_vec.scores(a.k.a. am weight) on related arcs.
# For ctc decoding graph, lattices.scores only store am weight
# since the decoder_graph only define the ctc topology and
# has no lm weight on its arcs.
# While for 3-gram decoding, whose graph is converted from language models,
# lattice.scores contains both am weights and lm weights
#
# It maybe useful to tune lattice.scores
# The valid range of lattice_weight is [0, inf)
# The lattice_weight will affect the search of k2.random_paths
lattices.scores *= self.lattice_weight
results = []
if self.use_nbest_rescoring:
(
am_scores,
lm_scores,
token_ids,
new2old,
path_to_seq_map,
seq_to_path_splits,
) = nbest_am_lm_scores(
lattices, self.num_paths, self.device, self.nbest_batch_size
)
ys_pad_lens = torch.tensor([len(hyp) for hyp in token_ids]).to(self.device)
max_token_length = max(ys_pad_lens)
ys_pad_list = []
for hyp in token_ids:
ys_pad_list.append(
torch.cat(
[
torch.tensor(hyp, dtype=torch.long),
torch.tensor(
[self.asr_model.ignore_id]
* (max_token_length.item() - len(hyp)),
dtype=torch.long,
),
]
)
)
ys_pad = (
torch.stack(ys_pad_list).to(torch.long).to(self.device)
) # [batch, max_token_length]
encoder_out = enc.index_select(0, path_to_seq_map.to(torch.long)).to(
self.device
) # [batch, T, dim]
encoder_out_lens = encoder_out_lens.index_select(
0, path_to_seq_map.to(torch.long)
).to(
self.device
) # [batch]
decoder_scores = -self.asr_model.batchify_nll(
encoder_out, encoder_out_lens, ys_pad, ys_pad_lens, self.nll_batch_size
)
# padded_value for nnlm is 0
ys_pad[ys_pad == self.asr_model.ignore_id] = 0
nnlm_nll, x_lengths = self.lm.batchify_nll(
ys_pad, ys_pad_lens, self.nll_batch_size
)
nnlm_scores = -nnlm_nll.sum(dim=1)
batch_tot_scores = (
self.am_weight * am_scores
+ self.decoder_weight * decoder_scores
+ self.nnlm_weight * nnlm_scores
)
split_size = indices_to_split_size(
seq_to_path_splits.tolist(), total_elements=batch_tot_scores.size(0)
)
batch_tot_scores = torch.split(
batch_tot_scores,
split_size,
)
hyps = []
scores = []
processed_seqs = 0
for tot_scores in batch_tot_scores:
if tot_scores.nelement() == 0:
# the last element by torch.tensor_split may be empty
# e.g.
# torch.tensor_split(torch.tensor([1,2,3,4]), torch.tensor([2,4]))
# (tensor([1, 2]), tensor([3, 4]), tensor([], dtype=torch.int64))
break
best_seq_idx = processed_seqs + torch.argmax(tot_scores)
assert best_seq_idx < len(token_ids)
best_token_seqs = token_ids[best_seq_idx]
processed_seqs += tot_scores.nelement()
hyps.append(best_token_seqs)
scores.append(tot_scores.max().item())
assert len(hyps) == len(split_size)
else:
best_paths = k2.shortest_path(lattices, use_double_scores=True)
scores = best_paths.get_tot_scores(
use_double_scores=True, log_semiring=False
).tolist()
hyps = get_texts(best_paths)
assert len(scores) == len(hyps)
for token_int, score in zip(hyps, scores):
# For decoding methods nbest_rescoring and ctc_decoding
# hyps stores token_index, which is lattice.labels.
# convert token_id to text with self.tokenizer
token = self.converter.ids2tokens(token_int)
assert self.tokenizer is not None
text = self.tokenizer.tokens2text(token)
results.append((text, token, token_int, score))
assert check_return_type(results)
return results
@staticmethod
def from_pretrained(
model_tag: Optional[str] = None,
**kwargs: Optional[Any],
):
"""Build k2Speech2Text instance from the pretrained model.
Args:
model_tag (Optional[str]): Model tag of the pretrained models.
Currently, the tags of espnet_model_zoo are supported.
Returns:
Speech2Text: Speech2Text instance.
"""
if model_tag is not None:
try:
from espnet_model_zoo.downloader import ModelDownloader
except ImportError:
logging.error(
"`espnet_model_zoo` is not installed. "
"Please install via `pip install -U espnet_model_zoo`."
)
raise
d = ModelDownloader()
kwargs.update(**d.download_and_unpack(model_tag))
return k2Speech2Text(**kwargs)
def inference(
output_dir: str,
maxlenratio: float,
minlenratio: float,
batch_size: int,
dtype: str,
beam_size: int,
ngpu: int,
seed: int,
ctc_weight: float,
lm_weight: float,
penalty: float,
nbest: int,
num_workers: int,
log_level: Union[int, str],
data_path_and_name_and_type: Sequence[Tuple[str, str, str]],
key_file: Optional[str],
asr_train_config: Optional[str],
asr_model_file: Optional[str],
lm_train_config: Optional[str],
lm_file: Optional[str],
word_lm_train_config: Optional[str],
word_lm_file: Optional[str],
model_tag: Optional[str],
token_type: Optional[str],
bpemodel: Optional[str],
allow_variable_data_keys: bool,
streaming: bool,
is_ctc_decoding: bool,
use_nbest_rescoring: bool,
num_paths: int,
nbest_batch_size: int,
nll_batch_size: int,
k2_config: Optional[str],
):
assert is_ctc_decoding, "Currently, only ctc_decoding graph is supported."
assert check_argument_types()
if ngpu > 1:
raise NotImplementedError("only single GPU decoding is supported")
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
if ngpu >= 1:
device = "cuda"
else:
device = "cpu"
# 1. Set random-seed
set_all_random_seed(seed)
with open(k2_config) as k2_config_file:
dict_k2_config = yaml.safe_load(k2_config_file)
# 2. Build speech2text
speech2text_kwargs = dict(
asr_train_config=asr_train_config,
asr_model_file=asr_model_file,
lm_train_config=lm_train_config,
lm_file=lm_file,
token_type=token_type,
bpemodel=bpemodel,
device=device,
maxlenratio=maxlenratio,
minlenratio=minlenratio,
dtype=dtype,
beam_size=beam_size,
ctc_weight=ctc_weight,
lm_weight=lm_weight,
penalty=penalty,
nbest=nbest,
streaming=streaming,
is_ctc_decoding=is_ctc_decoding,
use_nbest_rescoring=use_nbest_rescoring,
num_paths=num_paths,
nbest_batch_size=nbest_batch_size,
nll_batch_size=nll_batch_size,
)
speech2text_kwargs = dict(**speech2text_kwargs, **dict_k2_config)
speech2text = k2Speech2Text.from_pretrained(
model_tag=model_tag,
**speech2text_kwargs,
)
# 3. Build data-iterator
loader = ASRTask.build_streaming_iterator(
data_path_and_name_and_type,
dtype=dtype,
batch_size=batch_size,
key_file=key_file,
num_workers=num_workers,
preprocess_fn=ASRTask.build_preprocess_fn(speech2text.asr_train_args, False),
collate_fn=ASRTask.build_collate_fn(speech2text.asr_train_args, False),
allow_variable_data_keys=allow_variable_data_keys,
inference=True,
)
with DatadirWriter(output_dir) as writer:
start_decoding_time = datetime.datetime.now()
for batch_idx, (keys, batch) in enumerate(loader):
if batch_idx % 10 == 0:
logging.info(f"Processing {batch_idx} batch")
assert isinstance(batch, dict), type(batch)
assert all(isinstance(s, str) for s in keys), keys
_bs = len(next(iter(batch.values())))
assert len(keys) == _bs, f"{len(keys)} != {_bs}"
# 1-best list of (text, token, token_int)
results = speech2text(batch)
for key_idx, (text, token, token_int, score) in enumerate(results):
key = keys[key_idx]
best_writer = writer["1best_recog"]
# Write the result to each file
best_writer["token"][key] = " ".join(token)
best_writer["token_int"][key] = " ".join(map(str, token_int))
best_writer["score"][key] = str(score)
if text is not None:
best_writer["text"][key] = text
end_decoding_time = datetime.datetime.now()
decoding_duration = end_decoding_time - start_decoding_time
logging.info(f"Decoding duration is {decoding_duration.seconds} seconds")
def get_parser():
parser = config_argparse.ArgumentParser(
description="ASR Decoding",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Note(kamo): Use '_' instead of '-' as separator.
# '-' is confusing if written in yaml.
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument("--output_dir", type=str, required=True)
parser.add_argument(
"--ngpu",
type=int,
default=0,
help="The number of gpus. 0 indicates CPU mode",
)
parser.add_argument("--seed", type=int, default=0, help="Random seed")
parser.add_argument(
"--dtype",
default="float32",
choices=["float16", "float32", "float64"],
help="Data type",
)
parser.add_argument(
"--num_workers",
type=int,
default=1,
help="The number of workers used for DataLoader",
)
group = parser.add_argument_group("Input data related")
group.add_argument(
"--data_path_and_name_and_type",
type=str2triple_str,
required=True,
action="append",
)
group.add_argument("--key_file", type=str_or_none)
group.add_argument("--allow_variable_data_keys", type=str2bool, default=False)
group = parser.add_argument_group("The model configuration related")
group.add_argument(
"--asr_train_config",
type=str,
help="ASR training configuration",
)
group.add_argument(
"--asr_model_file",
type=str,
help="ASR model parameter file",
)
group.add_argument(
"--lm_train_config",
type=str,
help="LM training configuration",
)
group.add_argument(
"--lm_file",
type=str,
help="LM parameter file",
)
group.add_argument(
"--word_lm_train_config",
type=str,
help="Word LM training configuration",
)
group.add_argument(
"--word_lm_file",
type=str,
help="Word LM parameter file",
)
group.add_argument(
"--model_tag",
type=str,
help="Pretrained model tag. If specify this option, *_train_config and "
"*_file will be overwritten",
)
group = parser.add_argument_group("Beam-search related")
group.add_argument(
"--batch_size",
type=int,
default=1,
help="The batch size for inference",
)
group.add_argument("--nbest", type=int, default=1, help="Output N-best hypotheses")
group.add_argument("--beam_size", type=int, default=20, help="Beam size")
group.add_argument("--penalty", type=float, default=0.0, help="Insertion penalty")
group.add_argument(
"--maxlenratio",
type=float,
default=0.0,
help="Input length ratio to obtain max output length. "
"If maxlenratio=0.0 (default), it uses a end-detect "
"function "
"to automatically find maximum hypothesis lengths",
)
group.add_argument(
"--minlenratio",
type=float,
default=0.0,
help="Input length ratio to obtain min output length",
)
group.add_argument(
"--ctc_weight",
type=float,
default=0.5,
help="CTC weight in joint decoding",
)
group.add_argument("--lm_weight", type=float, default=1.0, help="RNNLM weight")
group.add_argument("--streaming", type=str2bool, default=False)
group = parser.add_argument_group("Text converter related")
group.add_argument(
"--token_type",
type=str_or_none,
default=None,
choices=["char", "bpe", None],
help="The token type for ASR model. "
"If not given, refers from the training args",
)
group.add_argument(
"--bpemodel",
type=str_or_none,
default=None,
help="The model path of sentencepiece. "
"If not given, refers from the training args",
)
group.add_argument(
"--is_ctc_decoding",
type=str2bool,
default=True,
help="Use ctc topology as decoding graph",
)
group.add_argument("--use_nbest_rescoring", type=str2bool, default=False)
group.add_argument(
"--num_paths",
type=int,
default=1000,
help="The third argument for k2.random_paths",
)
group.add_argument(
"--nbest_batch_size",
type=int,
default=500,
help="batchify nbest list when computing am/lm scores to avoid OOM",
)
group.add_argument(
"--nll_batch_size",
type=int,
default=100,
help="batch_size when computing nll during nbest rescoring",
)
group.add_argument("--k2_config", type=str, help="Config file for decoding with k2")
return parser
def main(cmd=None):
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
kwargs.pop("config", None)
inference(**kwargs)
if __name__ == "__main__":
main()
| 26,605 | 34.054018 | 88 | py |
espnet | espnet-master/espnet2/bin/mt_inference.py | #!/usr/bin/env python3
import argparse
import logging
import sys
from pathlib import Path
from typing import Any, List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from typeguard import check_argument_types, check_return_type
from espnet2.fileio.datadir_writer import DatadirWriter
from espnet2.tasks.lm import LMTask
from espnet2.tasks.mt import MTTask
from espnet2.text.build_tokenizer import build_tokenizer
from espnet2.text.token_id_converter import TokenIDConverter
from espnet2.torch_utils.device_funcs import to_device
from espnet2.torch_utils.set_all_random_seed import set_all_random_seed
from espnet2.utils import config_argparse
from espnet2.utils.types import str2bool, str2triple_str, str_or_none
from espnet.nets.batch_beam_search import BatchBeamSearch
from espnet.nets.beam_search import BeamSearch, Hypothesis
from espnet.nets.pytorch_backend.transformer.subsampling import TooShortUttError
from espnet.nets.scorer_interface import BatchScorerInterface
from espnet.nets.scorers.ctc import CTCPrefixScorer
from espnet.nets.scorers.length_bonus import LengthBonus
from espnet.utils.cli_utils import get_commandline_args
class Text2Text:
"""Text2Text class
Examples:
>>> text2text = Text2Text("mt_config.yml", "mt.pth")
>>> text2text(audio)
[(text, token, token_int, hypothesis object), ...]
"""
def __init__(
self,
mt_train_config: Union[Path, str] = None,
mt_model_file: Union[Path, str] = None,
lm_train_config: Union[Path, str] = None,
lm_file: Union[Path, str] = None,
ngram_scorer: str = "full",
ngram_file: Union[Path, str] = None,
token_type: str = None,
bpemodel: str = None,
device: str = "cpu",
maxlenratio: float = 0.0,
minlenratio: float = 0.0,
batch_size: int = 1,
dtype: str = "float32",
beam_size: int = 20,
ctc_weight: float = 0.5,
lm_weight: float = 1.0,
ngram_weight: float = 0.9,
penalty: float = 0.0,
nbest: int = 1,
):
assert check_argument_types()
# 1. Build MT model
scorers = {}
mt_model, mt_train_args = MTTask.build_model_from_file(
mt_train_config, mt_model_file, device
)
mt_model.to(dtype=getattr(torch, dtype)).eval()
decoder = mt_model.decoder
ctc = (
CTCPrefixScorer(ctc=mt_model.ctc, eos=mt_model.eos)
if ctc_weight != 0.0
else None
)
token_list = mt_model.token_list
scorers.update(
decoder=decoder,
ctc=ctc,
length_bonus=LengthBonus(len(token_list)),
)
# 2. Build Language model
if lm_train_config is not None:
lm, lm_train_args = LMTask.build_model_from_file(
lm_train_config, lm_file, device
)
scorers["lm"] = lm.lm
# 3. Build ngram model
if ngram_file is not None:
if ngram_scorer == "full":
from espnet.nets.scorers.ngram import NgramFullScorer
ngram = NgramFullScorer(ngram_file, token_list)
else:
from espnet.nets.scorers.ngram import NgramPartScorer
ngram = NgramPartScorer(ngram_file, token_list)
else:
ngram = None
scorers["ngram"] = ngram
# 4. Build BeamSearch object
weights = dict(
decoder=1.0 - ctc_weight,
ctc=ctc_weight,
lm=lm_weight,
ngram=ngram_weight,
length_bonus=penalty,
)
beam_search = BeamSearch(
beam_size=beam_size,
weights=weights,
scorers=scorers,
sos=mt_model.sos,
eos=mt_model.eos,
vocab_size=len(token_list),
token_list=token_list,
pre_beam_score_key=None if ctc_weight == 1.0 else "full",
)
# TODO(karita): make all scorers batchfied
if batch_size == 1:
non_batch = [
k
for k, v in beam_search.full_scorers.items()
if not isinstance(v, BatchScorerInterface)
]
if len(non_batch) == 0:
beam_search.__class__ = BatchBeamSearch
logging.info("BatchBeamSearch implementation is selected.")
else:
logging.warning(
f"As non-batch scorers {non_batch} are found, "
f"fall back to non-batch implementation."
)
beam_search.to(device=device, dtype=getattr(torch, dtype)).eval()
for scorer in scorers.values():
if isinstance(scorer, torch.nn.Module):
scorer.to(device=device, dtype=getattr(torch, dtype)).eval()
logging.info(f"Beam_search: {beam_search}")
logging.info(f"Decoding device={device}, dtype={dtype}")
# 4. [Optional] Build Text converter: e.g. bpe-sym -> Text
if token_type is None:
token_type = mt_train_args.token_type
if bpemodel is None:
bpemodel = mt_train_args.bpemodel
if token_type is None:
tokenizer = None
elif token_type == "bpe":
if bpemodel is not None:
tokenizer = build_tokenizer(token_type=token_type, bpemodel=bpemodel)
else:
tokenizer = None
else:
tokenizer = build_tokenizer(token_type=token_type)
converter = TokenIDConverter(token_list=token_list)
logging.info(f"Text tokenizer: {tokenizer}")
self.mt_model = mt_model
self.mt_train_args = mt_train_args
self.converter = converter
self.tokenizer = tokenizer
self.beam_search = beam_search
self.maxlenratio = maxlenratio
self.minlenratio = minlenratio
self.device = device
self.dtype = dtype
self.nbest = nbest
@torch.no_grad()
def __call__(
self, src_text: Union[torch.Tensor, np.ndarray]
) -> List[Tuple[Optional[str], List[str], List[int], Hypothesis]]:
"""Inference
Args:
data: Input text data
Returns:
text, token, token_int, hyp
"""
assert check_argument_types()
# Input as audio signal
if isinstance(src_text, np.ndarray):
src_text = torch.tensor(src_text)
# data: (Nsamples,) -> (1, Nsamples)
src_text = src_text.unsqueeze(0).to(torch.long)
# lengths: (1,)
lengths = src_text.new_full([1], dtype=torch.long, fill_value=src_text.size(1))
batch = {"src_text": src_text, "src_text_lengths": lengths}
# a. To device
batch = to_device(batch, device=self.device)
# b. Forward Encoder
enc, _ = self.mt_model.encode(**batch)
# self-condition case
if isinstance(enc, tuple):
enc = enc[0]
assert len(enc) == 1, len(enc)
# c. Passed the encoder result and the beam search
nbest_hyps = self.beam_search(
x=enc[0], maxlenratio=self.maxlenratio, minlenratio=self.minlenratio
)
nbest_hyps = nbest_hyps[: self.nbest]
results = []
for hyp in nbest_hyps:
assert isinstance(hyp, Hypothesis), type(hyp)
# remove sos/eos and get results
# token_int = hyp.yseq[1:-1].tolist()
# TODO(sdalmia): check why the above line doesn't work
token_int = hyp.yseq.tolist()
token_int = list(filter(lambda x: x != self.mt_model.sos, token_int))
token_int = list(filter(lambda x: x != self.mt_model.eos, token_int))
# remove blank symbol id, which is assumed to be 0
token_int = list(filter(lambda x: x != 0, token_int))
# Change integer-ids to tokens
token = self.converter.ids2tokens(token_int)
if self.tokenizer is not None:
text = self.tokenizer.tokens2text(token)
else:
text = None
results.append((text, token, token_int, hyp))
assert check_return_type(results)
return results
@staticmethod
def from_pretrained(
model_tag: Optional[str] = None,
**kwargs: Optional[Any],
):
"""Build Text2Text instance from the pretrained model.
Args:
model_tag (Optional[str]): Model tag of the pretrained models.
Currently, the tags of espnet_model_zoo are supported.
Returns:
Text2Text: Text2Text instance.
"""
if model_tag is not None:
try:
from espnet_model_zoo.downloader import ModelDownloader
except ImportError:
logging.error(
"`espnet_model_zoo` is not installed. "
"Please install via `pip install -U espnet_model_zoo`."
)
raise
d = ModelDownloader()
kwargs.update(**d.download_and_unpack(model_tag))
return Text2Text(**kwargs)
def inference(
output_dir: str,
maxlenratio: float,
minlenratio: float,
batch_size: int,
dtype: str,
beam_size: int,
ngpu: int,
seed: int,
ctc_weight: float,
lm_weight: float,
ngram_weight: float,
penalty: float,
nbest: int,
num_workers: int,
log_level: Union[int, str],
data_path_and_name_and_type: Sequence[Tuple[str, str, str]],
key_file: Optional[str],
mt_train_config: Optional[str],
mt_model_file: Optional[str],
lm_train_config: Optional[str],
lm_file: Optional[str],
word_lm_train_config: Optional[str],
word_lm_file: Optional[str],
ngram_file: Optional[str],
model_tag: Optional[str],
token_type: Optional[str],
bpemodel: Optional[str],
allow_variable_data_keys: bool,
):
assert check_argument_types()
if batch_size > 1:
raise NotImplementedError("batch decoding is not implemented")
if word_lm_train_config is not None:
raise NotImplementedError("Word LM is not implemented")
if ngpu > 1:
raise NotImplementedError("only single GPU decoding is supported")
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
if ngpu >= 1:
device = "cuda"
else:
device = "cpu"
# 1. Set random-seed
set_all_random_seed(seed)
# 2. Build text2text
text2text_kwargs = dict(
mt_train_config=mt_train_config,
mt_model_file=mt_model_file,
lm_train_config=lm_train_config,
lm_file=lm_file,
ngram_file=ngram_file,
token_type=token_type,
bpemodel=bpemodel,
device=device,
maxlenratio=maxlenratio,
minlenratio=minlenratio,
dtype=dtype,
beam_size=beam_size,
ctc_weight=ctc_weight,
lm_weight=lm_weight,
ngram_weight=ngram_weight,
penalty=penalty,
nbest=nbest,
)
text2text = Text2Text.from_pretrained(
model_tag=model_tag,
**text2text_kwargs,
)
# 3. Build data-iterator
loader = MTTask.build_streaming_iterator(
data_path_and_name_and_type,
dtype=dtype,
batch_size=batch_size,
key_file=key_file,
num_workers=num_workers,
preprocess_fn=MTTask.build_preprocess_fn(text2text.mt_train_args, False),
collate_fn=MTTask.build_collate_fn(text2text.mt_train_args, False),
allow_variable_data_keys=allow_variable_data_keys,
inference=True,
)
# 7 .Start for-loop
# FIXME(kamo): The output format should be discussed about
with DatadirWriter(output_dir) as writer:
for keys, batch in loader:
assert isinstance(batch, dict), type(batch)
assert all(isinstance(s, str) for s in keys), keys
_bs = len(next(iter(batch.values())))
assert len(keys) == _bs, f"{len(keys)} != {_bs}"
batch = {k: v[0] for k, v in batch.items() if not k.endswith("_lengths")}
# N-best list of (text, token, token_int, hyp_object)
try:
results = text2text(**batch)
except TooShortUttError as e:
logging.warning(f"Utterance {keys} {e}")
hyp = Hypothesis(score=0.0, scores={}, states={}, yseq=[])
results = [[" ", ["<space>"], [2], hyp]] * nbest
# Only supporting batch_size==1
key = keys[0]
for n, (text, token, token_int, hyp) in zip(range(1, nbest + 1), results):
# Create a directory: outdir/{n}best_recog
ibest_writer = writer[f"{n}best_recog"]
# Write the result to each file
ibest_writer["token"][key] = " ".join(token)
ibest_writer["token_int"][key] = " ".join(map(str, token_int))
ibest_writer["score"][key] = str(hyp.score)
if text is not None:
ibest_writer["text"][key] = text
def get_parser():
parser = config_argparse.ArgumentParser(
description="MT Decoding",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Note(kamo): Use '_' instead of '-' as separator.
# '-' is confusing if written in yaml.
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument("--output_dir", type=str, required=True)
parser.add_argument(
"--ngpu",
type=int,
default=0,
help="The number of gpus. 0 indicates CPU mode",
)
parser.add_argument("--seed", type=int, default=0, help="Random seed")
parser.add_argument(
"--dtype",
default="float32",
choices=["float16", "float32", "float64"],
help="Data type",
)
parser.add_argument(
"--num_workers",
type=int,
default=1,
help="The number of workers used for DataLoader",
)
group = parser.add_argument_group("Input data related")
group.add_argument(
"--data_path_and_name_and_type",
type=str2triple_str,
required=True,
action="append",
)
group.add_argument("--key_file", type=str_or_none)
group.add_argument("--allow_variable_data_keys", type=str2bool, default=False)
group = parser.add_argument_group("The model configuration related")
group.add_argument(
"--mt_train_config",
type=str,
help="ST training configuration",
)
group.add_argument(
"--mt_model_file",
type=str,
help="MT model parameter file",
)
group.add_argument(
"--lm_train_config",
type=str,
help="LM training configuration",
)
group.add_argument(
"--lm_file",
type=str,
help="LM parameter file",
)
group.add_argument(
"--word_lm_train_config",
type=str,
help="Word LM training configuration",
)
group.add_argument(
"--word_lm_file",
type=str,
help="Word LM parameter file",
)
group.add_argument(
"--ngram_file",
type=str,
help="N-gram parameter file",
)
group.add_argument(
"--model_tag",
type=str,
help="Pretrained model tag. If specify this option, *_train_config and "
"*_file will be overwritten",
)
group = parser.add_argument_group("Beam-search related")
group.add_argument(
"--batch_size",
type=int,
default=1,
help="The batch size for inference",
)
group.add_argument("--nbest", type=int, default=1, help="Output N-best hypotheses")
group.add_argument("--beam_size", type=int, default=20, help="Beam size")
group.add_argument("--penalty", type=float, default=0.0, help="Insertion penalty")
group.add_argument(
"--maxlenratio",
type=float,
default=0.0,
help="Input length ratio to obtain max output length. "
"If maxlenratio=0.0 (default), it uses a end-detect "
"function "
"to automatically find maximum hypothesis lengths."
"If maxlenratio<0.0, its absolute value is interpreted"
"as a constant max output length",
)
group.add_argument(
"--minlenratio",
type=float,
default=0.0,
help="Input length ratio to obtain min output length",
)
group.add_argument(
"--ctc_weight",
type=float,
default=0.0,
help="CTC weight in joint decoding",
)
group.add_argument("--lm_weight", type=float, default=1.0, help="RNNLM weight")
group.add_argument("--ngram_weight", type=float, default=0.9, help="ngram weight")
group = parser.add_argument_group("Text converter related")
group.add_argument(
"--token_type",
type=str_or_none,
default=None,
choices=["char", "bpe", None],
help="The token type for ST model. "
"If not given, refers from the training args",
)
group.add_argument(
"--bpemodel",
type=str_or_none,
default=None,
help="The model path of sentencepiece. "
"If not given, refers from the training args",
)
return parser
def main(cmd=None):
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
kwargs.pop("config", None)
inference(**kwargs)
if __name__ == "__main__":
main()
| 17,802 | 31.369091 | 87 | py |
espnet | espnet-master/espnet2/bin/lm_calc_perplexity.py | #!/usr/bin/env python3
import argparse
import logging
import sys
from pathlib import Path
from typing import Optional, Sequence, Tuple, Union
import numpy as np
import torch
from torch.nn.parallel import data_parallel
from typeguard import check_argument_types
from espnet2.fileio.datadir_writer import DatadirWriter
from espnet2.tasks.lm import LMTask
from espnet2.torch_utils.device_funcs import to_device
from espnet2.torch_utils.forward_adaptor import ForwardAdaptor
from espnet2.torch_utils.set_all_random_seed import set_all_random_seed
from espnet2.utils import config_argparse
from espnet2.utils.types import float_or_none, str2bool, str2triple_str, str_or_none
from espnet.utils.cli_utils import get_commandline_args
def calc_perplexity(
output_dir: str,
batch_size: int,
dtype: str,
ngpu: int,
seed: int,
num_workers: int,
log_level: Union[int, str],
data_path_and_name_and_type: Sequence[Tuple[str, str, str]],
key_file: Optional[str],
train_config: Optional[str],
model_file: Optional[str],
log_base: Optional[float],
allow_variable_data_keys: bool,
):
assert check_argument_types()
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
if ngpu >= 1:
device = "cuda"
else:
device = "cpu"
# 1. Set random-seed
set_all_random_seed(seed)
# 2. Build LM
model, train_args = LMTask.build_model_from_file(train_config, model_file, device)
# Wrape model to make model.nll() data-parallel
wrapped_model = ForwardAdaptor(model, "nll")
wrapped_model.to(dtype=getattr(torch, dtype)).eval()
logging.info(f"Model:\n{model}")
# 3. Build data-iterator
loader = LMTask.build_streaming_iterator(
data_path_and_name_and_type,
dtype=dtype,
batch_size=batch_size,
key_file=key_file,
num_workers=num_workers,
preprocess_fn=LMTask.build_preprocess_fn(train_args, False),
collate_fn=LMTask.build_collate_fn(train_args, False),
allow_variable_data_keys=allow_variable_data_keys,
inference=True,
)
# 4. Start for-loop
with DatadirWriter(output_dir) as writer:
total_nll = 0.0
total_ntokens = 0
for keys, batch in loader:
assert isinstance(batch, dict), type(batch)
assert all(isinstance(s, str) for s in keys), keys
_bs = len(next(iter(batch.values())))
assert len(keys) == _bs, f"{len(keys)} != {_bs}"
with torch.no_grad():
batch = to_device(batch, device)
if ngpu <= 1:
# NOTE(kamo): data_parallel also should work with ngpu=1,
# but for debuggability it's better to keep this block.
nll, lengths = wrapped_model(**batch)
else:
nll, lengths = data_parallel(
wrapped_model, (), range(ngpu), module_kwargs=batch
)
assert _bs == len(nll) == len(lengths), (_bs, len(nll), len(lengths))
# nll: (B, L) -> (B,)
nll = nll.detach().cpu().numpy().sum(1)
# lengths: (B,)
lengths = lengths.detach().cpu().numpy()
total_nll += nll.sum()
total_ntokens += lengths.sum()
for key, _nll, ntoken in zip(keys, nll, lengths):
if log_base is None:
utt_ppl = np.exp(_nll / ntoken)
else:
utt_ppl = log_base ** (_nll / ntoken / np.log(log_base))
# Write PPL of each utts for debugging or analysis
writer["utt2ppl"][key] = str(utt_ppl)
writer["utt2ntokens"][key] = str(ntoken)
if log_base is None:
ppl = np.exp(total_nll / total_ntokens)
else:
ppl = log_base ** (total_nll / total_ntokens / np.log(log_base))
with (Path(output_dir) / "ppl").open("w", encoding="utf-8") as f:
f.write(f"{ppl}\n")
with (Path(output_dir) / "base").open("w", encoding="utf-8") as f:
if log_base is None:
_log_base = np.e
else:
_log_base = log_base
f.write(f"{_log_base}\n")
logging.info(f"PPL={ppl}")
def get_parser():
parser = config_argparse.ArgumentParser(
description="Calc perplexity",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Note(kamo): Use '_' instead of '-' as separator.
# '-' is confusing if written in yaml.
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument("--output_dir", type=str, required=True)
parser.add_argument(
"--ngpu",
type=int,
default=0,
help="The number of gpus. 0 indicates CPU mode",
)
parser.add_argument("--seed", type=int, default=0, help="Random seed")
parser.add_argument(
"--dtype",
default="float32",
choices=["float16", "float32", "float64"],
help="Data type",
)
parser.add_argument(
"--num_workers",
type=int,
default=1,
help="The number of workers used for DataLoader",
)
parser.add_argument(
"--batch_size",
type=int,
default=1,
help="The batch size for inference",
)
parser.add_argument(
"--log_base",
type=float_or_none,
default=None,
help="The base of logarithm for Perplexity. "
"If None, napier's constant is used.",
)
group = parser.add_argument_group("Input data related")
group.add_argument(
"--data_path_and_name_and_type",
type=str2triple_str,
required=True,
action="append",
)
group.add_argument("--key_file", type=str_or_none)
group.add_argument("--allow_variable_data_keys", type=str2bool, default=False)
group = parser.add_argument_group("The model configuration related")
group.add_argument("--train_config", type=str)
group.add_argument("--model_file", type=str)
return parser
def main(cmd=None):
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
kwargs.pop("config", None)
calc_perplexity(**kwargs)
if __name__ == "__main__":
main()
| 6,595 | 31.17561 | 86 | py |
espnet | espnet-master/espnet2/bin/st_inference_streaming.py | #!/usr/bin/env python3
import argparse
import logging
import math
import sys
from pathlib import Path
from typing import List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from typeguard import check_argument_types, check_return_type
from espnet2.asr.encoder.contextual_block_conformer_encoder import ( # noqa: H301
ContextualBlockConformerEncoder,
)
from espnet2.asr.encoder.contextual_block_transformer_encoder import ( # noqa: H301
ContextualBlockTransformerEncoder,
)
from espnet2.fileio.datadir_writer import DatadirWriter
from espnet2.tasks.lm import LMTask
from espnet2.tasks.st import STTask
from espnet2.text.build_tokenizer import build_tokenizer
from espnet2.text.token_id_converter import TokenIDConverter
from espnet2.torch_utils.device_funcs import to_device
from espnet2.torch_utils.set_all_random_seed import set_all_random_seed
from espnet2.utils import config_argparse
from espnet2.utils.types import str2bool, str2triple_str, str_or_none
from espnet.nets.batch_beam_search_online import BatchBeamSearchOnline
from espnet.nets.beam_search import Hypothesis
from espnet.nets.pytorch_backend.transformer.subsampling import TooShortUttError
from espnet.nets.scorer_interface import BatchScorerInterface
from espnet.nets.scorers.length_bonus import LengthBonus
from espnet.utils.cli_utils import get_commandline_args
class Speech2TextStreaming:
"""Speech2TextStreaming class
Details in "Streaming Transformer ASR with Blockwise Synchronous Beam Search"
(https://arxiv.org/abs/2006.14941)
Examples:
>>> import soundfile
>>> speech2text = Speech2TextStreaming("asr_config.yml", "asr.pth")
>>> audio, rate = soundfile.read("speech.wav")
>>> speech2text(audio)
[(text, token, token_int, hypothesis object), ...]
"""
def __init__(
self,
st_train_config: Union[Path, str],
st_model_file: Union[Path, str] = None,
lm_train_config: Union[Path, str] = None,
lm_file: Union[Path, str] = None,
token_type: str = None,
bpemodel: str = None,
device: str = "cpu",
maxlenratio: float = 0.0,
minlenratio: float = 0.0,
batch_size: int = 1,
dtype: str = "float32",
beam_size: int = 20,
lm_weight: float = 1.0,
penalty: float = 0.0,
nbest: int = 1,
disable_repetition_detection=False,
decoder_text_length_limit=0,
encoded_feat_length_limit=0,
):
assert check_argument_types()
# 1. Build ST model
scorers = {}
st_model, st_train_args = STTask.build_model_from_file(
st_train_config, st_model_file, device
)
st_model.to(dtype=getattr(torch, dtype)).eval()
assert isinstance(
st_model.encoder, ContextualBlockTransformerEncoder
) or isinstance(st_model.encoder, ContextualBlockConformerEncoder)
decoder = st_model.decoder
token_list = st_model.token_list
scorers.update(
decoder=decoder,
length_bonus=LengthBonus(len(token_list)),
)
# 2. Build Language model
if lm_train_config is not None:
lm, lm_train_args = LMTask.build_model_from_file(
lm_train_config, lm_file, device
)
scorers["lm"] = lm.lm
# 3. Build BeamSearch object
weights = dict(
decoder=1.0,
lm=lm_weight,
length_bonus=penalty,
)
assert "encoder_conf" in st_train_args
assert "look_ahead" in st_train_args.encoder_conf
assert "hop_size" in st_train_args.encoder_conf
assert "block_size" in st_train_args.encoder_conf
# look_ahead = st_train_args.encoder_conf['look_ahead']
# hop_size = st_train_args.encoder_conf['hop_size']
# block_size = st_train_args.encoder_conf['block_size']
assert batch_size == 1
beam_search = BatchBeamSearchOnline(
beam_size=beam_size,
weights=weights,
scorers=scorers,
sos=st_model.sos,
eos=st_model.eos,
vocab_size=len(token_list),
token_list=token_list,
pre_beam_score_key="full",
disable_repetition_detection=disable_repetition_detection,
decoder_text_length_limit=decoder_text_length_limit,
encoded_feat_length_limit=encoded_feat_length_limit,
)
non_batch = [
k
for k, v in beam_search.full_scorers.items()
if not isinstance(v, BatchScorerInterface)
]
assert len(non_batch) == 0
# TODO(karita): make all scorers batchfied
logging.info("BatchBeamSearchOnline implementation is selected.")
beam_search.to(device=device, dtype=getattr(torch, dtype)).eval()
for scorer in scorers.values():
if isinstance(scorer, torch.nn.Module):
scorer.to(device=device, dtype=getattr(torch, dtype)).eval()
logging.info(f"Beam_search: {beam_search}")
logging.info(f"Decoding device={device}, dtype={dtype}")
# 4. [Optional] Build Text converter: e.g. bpe-sym -> Text
if token_type is None:
token_type = st_train_args.token_type
if bpemodel is None:
bpemodel = st_train_args.bpemodel
if token_type is None:
tokenizer = None
elif token_type == "bpe":
if bpemodel is not None:
tokenizer = build_tokenizer(token_type=token_type, bpemodel=bpemodel)
else:
tokenizer = None
else:
tokenizer = build_tokenizer(token_type=token_type)
converter = TokenIDConverter(token_list=token_list)
logging.info(f"Text tokenizer: {tokenizer}")
self.st_model = st_model
self.st_train_args = st_train_args
self.converter = converter
self.tokenizer = tokenizer
self.beam_search = beam_search
self.maxlenratio = maxlenratio
self.minlenratio = minlenratio
self.device = device
self.dtype = dtype
self.nbest = nbest
if "n_fft" in st_train_args.frontend_conf:
self.n_fft = st_train_args.frontend_conf["n_fft"]
else:
self.n_fft = 512
if "hop_length" in st_train_args.frontend_conf:
self.hop_length = st_train_args.frontend_conf["hop_length"]
else:
self.hop_length = 128
if (
"win_length" in st_train_args.frontend_conf
and st_train_args.frontend_conf["win_length"] is not None
):
self.win_length = st_train_args.frontend_conf["win_length"]
else:
self.win_length = self.n_fft
self.reset()
def reset(self):
self.frontend_states = None
self.encoder_states = None
self.beam_search.reset()
def apply_frontend(
self, speech: torch.Tensor, prev_states=None, is_final: bool = False
):
if prev_states is not None:
buf = prev_states["waveform_buffer"]
speech = torch.cat([buf, speech], dim=0)
if is_final:
speech_to_process = speech
waveform_buffer = None
else:
n_frames = (
speech.size(0) - (self.win_length - self.hop_length)
) // self.hop_length
n_residual = (
speech.size(0) - (self.win_length - self.hop_length)
) % self.hop_length
speech_to_process = speech.narrow(
0, 0, (self.win_length - self.hop_length) + n_frames * self.hop_length
)
waveform_buffer = speech.narrow(
0,
speech.size(0) - (self.win_length - self.hop_length) - n_residual,
(self.win_length - self.hop_length) + n_residual,
).clone()
# data: (Nsamples,) -> (1, Nsamples)
speech_to_process = speech_to_process.unsqueeze(0).to(
getattr(torch, self.dtype)
)
lengths = speech_to_process.new_full(
[1], dtype=torch.long, fill_value=speech_to_process.size(1)
)
batch = {"speech": speech_to_process, "speech_lengths": lengths}
# lenghts: (1,)
# a. To device
batch = to_device(batch, device=self.device)
feats, feats_lengths = self.st_model._extract_feats(**batch)
if self.st_model.normalize is not None:
feats, feats_lengths = self.st_model.normalize(feats, feats_lengths)
# Trimming
if is_final:
if prev_states is None:
pass
else:
feats = feats.narrow(
1,
math.ceil(math.ceil(self.win_length / self.hop_length) / 2),
feats.size(1)
- math.ceil(math.ceil(self.win_length / self.hop_length) / 2),
)
else:
if prev_states is None:
feats = feats.narrow(
1,
0,
feats.size(1)
- math.ceil(math.ceil(self.win_length / self.hop_length) / 2),
)
else:
feats = feats.narrow(
1,
math.ceil(math.ceil(self.win_length / self.hop_length) / 2),
feats.size(1)
- 2 * math.ceil(math.ceil(self.win_length / self.hop_length) / 2),
)
feats_lengths = feats.new_full([1], dtype=torch.long, fill_value=feats.size(1))
if is_final:
next_states = None
else:
next_states = {"waveform_buffer": waveform_buffer}
return feats, feats_lengths, next_states
@torch.no_grad()
def __call__(
self, speech: Union[torch.Tensor, np.ndarray], is_final: bool = True
) -> List[Tuple[Optional[str], List[str], List[int], Hypothesis]]:
"""Inference
Args:
data: Input speech data
Returns:
text, token, token_int, hyp
"""
assert check_argument_types()
# Input as audio signal
if isinstance(speech, np.ndarray):
speech = torch.tensor(speech)
feats, feats_lengths, self.frontend_states = self.apply_frontend(
speech, self.frontend_states, is_final=is_final
)
enc, _, self.encoder_states = self.st_model.encoder(
feats,
feats_lengths,
self.encoder_states,
is_final=is_final,
infer_mode=True,
)
nbest_hyps = self.beam_search(
x=enc[0],
maxlenratio=self.maxlenratio,
minlenratio=self.minlenratio,
is_final=is_final,
)
ret = self.assemble_hyps(nbest_hyps)
if is_final:
self.reset()
return ret
def assemble_hyps(self, hyps):
nbest_hyps = hyps[: self.nbest]
results = []
for hyp in nbest_hyps:
assert isinstance(hyp, Hypothesis), type(hyp)
# remove sos/eos and get results
token_int = hyp.yseq[1:-1].tolist()
# remove blank symbol id, which is assumed to be 0
token_int = list(filter(lambda x: x != 0, token_int))
# Change integer-ids to tokens
token = self.converter.ids2tokens(token_int)
if self.tokenizer is not None:
text = self.tokenizer.tokens2text(token)
else:
text = None
results.append((text, token, token_int, hyp))
assert check_return_type(results)
return results
def inference(
output_dir: str,
maxlenratio: float,
minlenratio: float,
batch_size: int,
dtype: str,
beam_size: int,
ngpu: int,
seed: int,
lm_weight: float,
penalty: float,
nbest: int,
num_workers: int,
log_level: Union[int, str],
data_path_and_name_and_type: Sequence[Tuple[str, str, str]],
key_file: Optional[str],
st_train_config: str,
st_model_file: str,
lm_train_config: Optional[str],
lm_file: Optional[str],
word_lm_train_config: Optional[str],
word_lm_file: Optional[str],
token_type: Optional[str],
bpemodel: Optional[str],
allow_variable_data_keys: bool,
sim_chunk_length: int,
disable_repetition_detection: bool,
encoded_feat_length_limit: int,
decoder_text_length_limit: int,
):
assert check_argument_types()
if batch_size > 1:
raise NotImplementedError("batch decoding is not implemented")
if word_lm_train_config is not None:
raise NotImplementedError("Word LM is not implemented")
if ngpu > 1:
raise NotImplementedError("only single GPU decoding is supported")
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
if ngpu >= 1:
device = "cuda"
else:
device = "cpu"
# 1. Set random-seed
set_all_random_seed(seed)
# 2. Build speech2text
speech2text = Speech2TextStreaming(
st_train_config=st_train_config,
st_model_file=st_model_file,
lm_train_config=lm_train_config,
lm_file=lm_file,
token_type=token_type,
bpemodel=bpemodel,
device=device,
maxlenratio=maxlenratio,
minlenratio=minlenratio,
dtype=dtype,
beam_size=beam_size,
lm_weight=lm_weight,
penalty=penalty,
nbest=nbest,
disable_repetition_detection=disable_repetition_detection,
decoder_text_length_limit=decoder_text_length_limit,
encoded_feat_length_limit=encoded_feat_length_limit,
)
# 3. Build data-iterator
loader = STTask.build_streaming_iterator(
data_path_and_name_and_type,
dtype=dtype,
batch_size=batch_size,
key_file=key_file,
num_workers=num_workers,
preprocess_fn=STTask.build_preprocess_fn(speech2text.st_train_args, False),
collate_fn=STTask.build_collate_fn(speech2text.st_train_args, False),
allow_variable_data_keys=allow_variable_data_keys,
inference=True,
)
# 7 .Start for-loop
# FIXME(kamo): The output format should be discussed about
with DatadirWriter(output_dir) as writer:
for keys, batch in loader:
assert isinstance(batch, dict), type(batch)
assert all(isinstance(s, str) for s in keys), keys
_bs = len(next(iter(batch.values())))
assert len(keys) == _bs, f"{len(keys)} != {_bs}"
batch = {k: v[0] for k, v in batch.items() if not k.endswith("_lengths")}
assert len(batch.keys()) == 1
try:
if sim_chunk_length == 0:
# N-best list of (text, token, token_int, hyp_object)
results = speech2text(**batch)
else:
speech = batch["speech"]
if (len(speech) // sim_chunk_length) > 1:
for i in range(len(speech) // sim_chunk_length):
speech2text(
speech=speech[
i * sim_chunk_length : (i + 1) * sim_chunk_length
],
is_final=False,
)
results = speech2text(
speech[(i + 1) * sim_chunk_length : len(speech)],
is_final=True,
)
else:
results = speech2text(**batch)
except TooShortUttError as e:
logging.warning(f"Utterance {keys} {e}")
hyp = Hypothesis(score=0.0, scores={}, states={}, yseq=[])
results = [[" ", ["<space>"], [2], hyp]] * nbest
# Only supporting batch_size==1
key = keys[0]
for n, (text, token, token_int, hyp) in zip(range(1, nbest + 1), results):
# Create a directory: outdir/{n}best_recog
ibest_writer = writer[f"{n}best_recog"]
# Write the result to each file
ibest_writer["token"][key] = " ".join(token)
ibest_writer["token_int"][key] = " ".join(map(str, token_int))
ibest_writer["score"][key] = str(hyp.score)
if text is not None:
ibest_writer["text"][key] = text
def get_parser():
parser = config_argparse.ArgumentParser(
description="ST Decoding",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Note(kamo): Use '_' instead of '-' as separator.
# '-' is confusing if written in yaml.
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument("--output_dir", type=str, required=True)
parser.add_argument(
"--ngpu",
type=int,
default=0,
help="The number of gpus. 0 indicates CPU mode",
)
parser.add_argument("--seed", type=int, default=0, help="Random seed")
parser.add_argument(
"--dtype",
default="float32",
choices=["float16", "float32", "float64"],
help="Data type",
)
parser.add_argument(
"--num_workers",
type=int,
default=1,
help="The number of workers used for DataLoader",
)
group = parser.add_argument_group("Input data related")
group.add_argument(
"--data_path_and_name_and_type",
type=str2triple_str,
required=True,
action="append",
)
group.add_argument("--key_file", type=str_or_none)
group.add_argument("--allow_variable_data_keys", type=str2bool, default=False)
group.add_argument(
"--sim_chunk_length",
type=int,
default=0,
help="The length of one chunk, to which speech will be "
"divided for evalution of streaming processing.",
)
group = parser.add_argument_group("The model configuration related")
group.add_argument("--st_train_config", type=str, required=True)
group.add_argument("--st_model_file", type=str, required=True)
group.add_argument("--lm_train_config", type=str)
group.add_argument("--lm_file", type=str)
group.add_argument("--word_lm_train_config", type=str)
group.add_argument("--word_lm_file", type=str)
group = parser.add_argument_group("Beam-search related")
group.add_argument(
"--batch_size",
type=int,
default=1,
help="The batch size for inference",
)
group.add_argument("--nbest", type=int, default=1, help="Output N-best hypotheses")
group.add_argument("--beam_size", type=int, default=20, help="Beam size")
group.add_argument("--penalty", type=float, default=0.0, help="Insertion penalty")
group.add_argument(
"--maxlenratio",
type=float,
default=0.0,
help="Input length ratio to obtain max output length. "
"If maxlenratio=0.0 (default), it uses a end-detect "
"function "
"to automatically find maximum hypothesis lengths",
)
group.add_argument(
"--minlenratio",
type=float,
default=0.0,
help="Input length ratio to obtain min output length",
)
group.add_argument("--lm_weight", type=float, default=1.0, help="RNNLM weight")
group.add_argument("--disable_repetition_detection", type=str2bool, default=False)
group.add_argument(
"--encoded_feat_length_limit",
type=int,
default=0,
help="Limit the lengths of the encoded feature" "to input to the decoder.",
)
group.add_argument(
"--decoder_text_length_limit",
type=int,
default=0,
help="Limit the lengths of the text" "to input to the decoder.",
)
group = parser.add_argument_group("Text converter related")
group.add_argument(
"--token_type",
type=str_or_none,
default=None,
choices=["char", "bpe", None],
help="The token type for ST model. "
"If not given, refers from the training args",
)
group.add_argument(
"--bpemodel",
type=str_or_none,
default=None,
help="The model path of sentencepiece. "
"If not given, refers from the training args",
)
return parser
def main(cmd=None):
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
kwargs.pop("config", None)
inference(**kwargs)
if __name__ == "__main__":
main()
| 20,953 | 33.238562 | 87 | py |
espnet | espnet-master/espnet2/bin/diar_inference.py | #!/usr/bin/env python3
import argparse
import logging
import sys
from itertools import permutations
from pathlib import Path
from typing import Any, List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from tqdm import trange
from typeguard import check_argument_types
from espnet2.enh.loss.criterions.tf_domain import FrequencyDomainMSE
from espnet2.enh.loss.criterions.time_domain import SISNRLoss
from espnet2.enh.loss.wrappers.pit_solver import PITSolver
from espnet2.fileio.npy_scp import NpyScpWriter
from espnet2.fileio.sound_scp import SoundScpWriter
from espnet2.tasks.diar import DiarizationTask
from espnet2.tasks.enh_s2t import EnhS2TTask
from espnet2.torch_utils.device_funcs import to_device
from espnet2.torch_utils.set_all_random_seed import set_all_random_seed
from espnet2.utils import config_argparse
from espnet2.utils.types import (
humanfriendly_parse_size_or_none,
int_or_none,
str2bool,
str2triple_str,
str_or_none,
)
from espnet.utils.cli_utils import get_commandline_args
class DiarizeSpeech:
"""DiarizeSpeech class
Examples:
>>> import soundfile
>>> diarization = DiarizeSpeech("diar_config.yaml", "diar.pth")
>>> audio, rate = soundfile.read("speech.wav")
>>> diarization(audio)
[(spk_id, start, end), (spk_id2, start2, end2)]
"""
def __init__(
self,
train_config: Union[Path, str] = None,
model_file: Union[Path, str] = None,
segment_size: Optional[float] = None,
hop_size: Optional[float] = None,
normalize_segment_scale: bool = False,
show_progressbar: bool = False,
normalize_output_wav: bool = False,
num_spk: Optional[int] = None,
device: str = "cpu",
dtype: str = "float32",
enh_s2t_task: bool = False,
multiply_diar_result: bool = False,
):
assert check_argument_types()
task = DiarizationTask if not enh_s2t_task else EnhS2TTask
# 1. Build Diar model
diar_model, diar_train_args = task.build_model_from_file(
train_config, model_file, device
)
if enh_s2t_task:
diar_model.inherite_attributes(
inherite_s2t_attrs=[
"decoder",
"attractor",
],
inherite_enh_attrs=[
"mask_module",
],
)
diar_model.to(dtype=getattr(torch, dtype)).eval()
self.device = device
self.dtype = dtype
self.diar_train_args = diar_train_args
self.diar_model = diar_model
# only used when processing long speech, i.e.
# segment_size is not None and hop_size is not None
self.segment_size = segment_size
self.hop_size = hop_size
self.normalize_segment_scale = normalize_segment_scale
self.normalize_output_wav = normalize_output_wav
self.show_progressbar = show_progressbar
# not specifying "num_spk" in inference config file
# will enable speaker number prediction during inference
self.num_spk = num_spk
# multiply_diar_result corresponds to the "Post-processing"
# in https://arxiv.org/pdf/2203.17068.pdf
self.multiply_diar_result = multiply_diar_result
self.enh_s2t_task = enh_s2t_task
self.segmenting_diar = segment_size is not None and not enh_s2t_task
self.segmenting_enh_diar = (
segment_size is not None and hop_size is not None and enh_s2t_task
)
if self.segmenting_diar:
logging.info("Perform segment-wise speaker diarization")
logging.info("Segment length = {} sec".format(segment_size))
elif self.segmenting_enh_diar:
logging.info("Perform segment-wise speech separation and diarization")
logging.info(
"Segment length = {} sec, hop length = {} sec".format(
segment_size, hop_size
)
)
else:
logging.info("Perform direct speaker diarization on the input")
@torch.no_grad()
def __call__(
self, speech: Union[torch.Tensor, np.ndarray], fs: int = 8000
) -> List[torch.Tensor]:
"""Inference
Args:
speech: Input speech data (Batch, Nsamples [, Channels])
fs: sample rate
Returns:
[speaker_info1, speaker_info2, ...]
"""
assert check_argument_types()
# Input as audio signal
if isinstance(speech, np.ndarray):
speech = torch.as_tensor(speech)
assert speech.dim() > 1, speech.size()
batch_size = speech.size(0)
speech = speech.to(getattr(torch, self.dtype))
# lengths: (B,)
lengths = speech.new_full(
[batch_size], dtype=torch.long, fill_value=speech.size(1)
)
# a. To device
speech = to_device(speech, device=self.device)
lengths = to_device(lengths, device=self.device)
if self.segmenting_diar and lengths[0] > self.segment_size * fs:
# Segment-wise speaker diarization
# Note that the segments are processed independently for now
# i.e., no speaker tracing is performed
num_segments = int(np.ceil(speech.size(1) / (self.segment_size * fs)))
t = T = int(self.segment_size * fs)
pad_shape = speech[:, :T].shape
diarized_wavs = []
range_ = trange if self.show_progressbar else range
for i in range_(num_segments):
st = int(i * self.segment_size * fs)
en = st + T
if en >= lengths[0]:
# en - st < T (last segment)
en = lengths[0]
speech_seg = speech.new_zeros(pad_shape)
t = en - st
speech_seg[:, :t] = speech[:, st:en]
else:
t = T
speech_seg = speech[:, st:en] # B x T [x C]
lengths_seg = speech.new_full(
[batch_size], dtype=torch.long, fill_value=T
)
# b. Diarization Forward
encoder_out, encoder_out_lens = self.encode(
speech_seg,
lengths_seg,
)
spk_prediction, _ = self.decode(encoder_out, encoder_out_lens)
# List[torch.Tensor(B, T, num_spks)]
diarized_wavs.append(spk_prediction)
# Determine maximum estimated number of speakers among the segments
max_len = max([x.size(2) for x in diarized_wavs])
# pad tensors in diarized_wavs with "float('-inf')" to have same size
diarized_wavs = [
torch.nn.functional.pad(
x, (0, max_len - x.size(2)), "constant", float("-inf")
)
for x in diarized_wavs
]
spk_prediction = torch.cat(diarized_wavs, dim=1)
waves = None
else:
# b. Diarization Forward
encoder_out, encoder_out_lens = self.encode(speech, lengths)
spk_prediction, num_spk = self.decode(encoder_out, encoder_out_lens)
if self.enh_s2t_task:
# Segment-wise speech separation
# Note that this is done after diarization using the whole sequence
if self.segmenting_enh_diar and lengths[0] > self.segment_size * fs:
overlap_length = int(
np.round(fs * (self.segment_size - self.hop_size))
)
num_segments = int(
np.ceil(
(speech.size(1) - overlap_length) / (self.hop_size * fs)
)
)
t = T = int(self.segment_size * fs)
pad_shape = speech[:, :T].shape
enh_waves = []
range_ = trange if self.show_progressbar else range
for i in range_(num_segments):
st = int(i * self.hop_size * fs)
en = st + T
if en >= lengths[0]:
# en - st < T (last segment)
en = lengths[0]
speech_seg = speech.new_zeros(pad_shape)
t = en - st
speech_seg[:, :t] = speech[:, st:en]
else:
t = T
speech_seg = speech[:, st:en] # B x T [x C]
lengths_seg = speech.new_full(
[batch_size], dtype=torch.long, fill_value=T
)
# Separation Forward
_, _, processed_wav = self.diar_model.encode_diar(
speech_seg, lengths_seg, num_spk
)
if self.normalize_segment_scale:
# normalize the scale to match the input mixture scale
mix_energy = torch.sqrt(
torch.mean(
speech_seg[:, :t].pow(2), dim=1, keepdim=True
)
)
enh_energy = torch.sqrt(
torch.mean(
sum(processed_wav)[:, :t].pow(2),
dim=1,
keepdim=True,
)
)
processed_wav = [
w * (mix_energy / enh_energy) for w in processed_wav
]
# List[torch.Tensor(num_spk, B, T)]
enh_waves.append(torch.stack(processed_wav, dim=0))
# c. Stitch the enhanced segments together
waves = enh_waves[0]
for i in range(1, num_segments):
# permutation between separated streams
# in last and current segments
perm = self.cal_permumation(
waves[:, :, -overlap_length:],
enh_waves[i][:, :, :overlap_length],
criterion="si_snr",
)
# repermute separated streams in current segment
for batch in range(batch_size):
enh_waves[i][:, batch] = enh_waves[i][perm[batch], batch]
if i == num_segments - 1:
enh_waves[i][:, :, t:] = 0
enh_waves_res_i = enh_waves[i][:, :, overlap_length:t]
else:
enh_waves_res_i = enh_waves[i][:, :, overlap_length:]
# overlap-and-add (average over the overlapped part)
waves[:, :, -overlap_length:] = (
waves[:, :, -overlap_length:]
+ enh_waves[i][:, :, :overlap_length]
) / 2
# concatenate the residual parts of the later segment
waves = torch.cat([waves, enh_waves_res_i], dim=2)
# ensure the stitched length is same as input
assert waves.size(2) == speech.size(1), (waves.shape, speech.shape)
waves = torch.unbind(waves, dim=0)
else:
# Separation Forward using the whole signal
_, _, waves = self.diar_model.encode_diar(speech, lengths, num_spk)
# multiply diarization result and separation result
# by calculating the correlation
if self.multiply_diar_result:
spk_prediction, interp_prediction, _ = self.permute_diar(
waves, spk_prediction
)
waves = [
waves[i] * interp_prediction[:, :, i] for i in range(num_spk)
]
if self.normalize_output_wav:
waves = [
(w / abs(w).max(dim=1, keepdim=True)[0] * 0.9).cpu().numpy()
for w in waves
] # list[(batch, sample)]
else:
waves = [w.cpu().numpy() for w in waves]
else:
waves = None
if self.num_spk is not None:
assert spk_prediction.size(2) == self.num_spk, (
spk_prediction.size(2),
self.num_spk,
)
assert spk_prediction.size(0) == batch_size, (
spk_prediction.size(0),
batch_size,
)
spk_prediction = spk_prediction.cpu().numpy()
spk_prediction = 1 / (1 + np.exp(-spk_prediction))
return waves, spk_prediction if self.enh_s2t_task else spk_prediction
@torch.no_grad()
def cal_permumation(self, ref_wavs, enh_wavs, criterion="si_snr"):
"""Calculate the permutation between seaprated streams in two adjacent segments.
Args:
ref_wavs (List[torch.Tensor]): [(Batch, Nsamples)]
enh_wavs (List[torch.Tensor]): [(Batch, Nsamples)]
criterion (str): one of ("si_snr", "mse", "corr)
Returns:
perm (torch.Tensor): permutation for enh_wavs (Batch, num_spk)
"""
criterion_class = {"si_snr": SISNRLoss, "mse": FrequencyDomainMSE}[criterion]
pit_solver = PITSolver(criterion=criterion_class())
_, _, others = pit_solver(ref_wavs, enh_wavs)
perm = others["perm"]
return perm
@staticmethod
def from_pretrained(
model_tag: Optional[str] = None,
**kwargs: Optional[Any],
):
"""Build DiarizeSpeech instance from the pretrained model.
Args:
model_tag (Optional[str]): Model tag of the pretrained models.
Currently, the tags of espnet_model_zoo are supported.
Returns:
DiarizeSpeech: DiarizeSpeech instance.
"""
if model_tag is not None:
try:
from espnet_model_zoo.downloader import ModelDownloader
except ImportError:
logging.error(
"`espnet_model_zoo` is not installed. "
"Please install via `pip install -U espnet_model_zoo`."
)
raise
d = ModelDownloader()
kwargs.update(**d.download_and_unpack(model_tag))
return DiarizeSpeech(**kwargs)
def permute_diar(self, waves, spk_prediction):
# Permute the diarization result using the correlation
# between wav and spk_prediction
# FIXME(YushiUeda): batch_size > 1 is not considered
num_spk = len(waves)
permute_list = [np.array(p) for p in permutations(range(num_spk))]
corr_list = []
interp_prediction = F.interpolate(
torch.sigmoid(spk_prediction).transpose(1, 2),
size=waves[0].size(1),
mode="linear",
).transpose(1, 2)
for p in permute_list:
diar_perm = interp_prediction[:, :, p]
corr_perm = [0]
for q in range(num_spk):
corr_perm += np.corrcoef(
torch.squeeze(abs(waves[q])).cpu().numpy(),
torch.squeeze(diar_perm[:, :, q]).cpu().numpy(),
)[0, 1]
corr_list.append(corr_perm)
max_corr, max_idx = torch.max(torch.from_numpy(np.array(corr_list)), dim=0)
return (
spk_prediction[:, :, permute_list[max_idx]],
interp_prediction[:, :, permute_list[max_idx]],
permute_list[max_idx],
)
def encode(self, speech, lengths):
if self.enh_s2t_task:
encoder_out, encoder_out_lens, _ = self.diar_model.encode_diar(
speech, lengths, self.num_spk
)
else:
bottleneck_feats = bottleneck_feats_lengths = None
encoder_out, encoder_out_lens = self.diar_model.encode(
speech, lengths, bottleneck_feats, bottleneck_feats_lengths
)
return encoder_out, encoder_out_lens
def decode(self, encoder_out, encoder_out_lens):
# SA-EEND
if self.diar_model.attractor is None:
assert self.num_spk is not None, 'Argument "num_spk" must be specified'
spk_prediction = self.diar_model.decoder(encoder_out, encoder_out_lens)
num_spk = self.num_spk
# EEND-EDA
else:
# if num_spk is specified, use that number
if self.num_spk is not None:
attractor, att_prob = self.diar_model.attractor(
encoder_out,
encoder_out_lens,
to_device(
torch.zeros(
encoder_out.size(0),
self.num_spk + 1,
encoder_out.size(2),
),
device=self.device,
),
)
spk_prediction = torch.bmm(
encoder_out,
attractor[:, : self.num_spk, :].permute(0, 2, 1),
)
num_spk = self.num_spk
# else find the first att_prob[i] < 0
else:
max_num_spk = 15 # upper bound number for estimation
attractor, att_prob = self.diar_model.attractor(
encoder_out,
encoder_out_lens,
to_device(
torch.zeros(
encoder_out.size(0),
max_num_spk + 1,
encoder_out.size(2),
),
device=self.device,
),
)
att_prob = torch.squeeze(att_prob)
for num_spk in range(len(att_prob)):
if att_prob[num_spk].item() < 0:
break
spk_prediction = torch.bmm(
encoder_out, attractor[:, :num_spk, :].permute(0, 2, 1)
)
return spk_prediction, num_spk
def inference(
output_dir: str,
batch_size: int,
dtype: str,
fs: int,
ngpu: int,
seed: int,
num_workers: int,
log_level: Union[int, str],
data_path_and_name_and_type: Sequence[Tuple[str, str, str]],
key_file: Optional[str],
train_config: Optional[str],
model_file: Optional[str],
model_tag: Optional[str],
allow_variable_data_keys: bool,
segment_size: Optional[float],
hop_size: Optional[float],
normalize_segment_scale: bool,
show_progressbar: bool,
num_spk: Optional[int],
normalize_output_wav: bool,
multiply_diar_result: bool,
enh_s2t_task: bool,
):
assert check_argument_types()
if batch_size > 1:
raise NotImplementedError("batch decoding is not implemented")
if ngpu > 1:
raise NotImplementedError("only single GPU decoding is supported")
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
if ngpu >= 1:
device = "cuda"
else:
device = "cpu"
# 1. Set random-seed
set_all_random_seed(seed)
# 2. Build separate_speech
diarize_speech_kwargs = dict(
train_config=train_config,
model_file=model_file,
segment_size=segment_size,
hop_size=hop_size,
normalize_segment_scale=normalize_segment_scale,
show_progressbar=show_progressbar,
normalize_output_wav=normalize_output_wav,
num_spk=num_spk,
device=device,
dtype=dtype,
multiply_diar_result=multiply_diar_result,
enh_s2t_task=enh_s2t_task,
)
diarize_speech = DiarizeSpeech.from_pretrained(
model_tag=model_tag,
**diarize_speech_kwargs,
)
# 3. Build data-iterator
loader = DiarizationTask.build_streaming_iterator(
data_path_and_name_and_type,
dtype=dtype,
batch_size=batch_size,
key_file=key_file,
num_workers=num_workers,
preprocess_fn=DiarizationTask.build_preprocess_fn(
diarize_speech.diar_train_args, False
),
collate_fn=DiarizationTask.build_collate_fn(
diarize_speech.diar_train_args, False
),
allow_variable_data_keys=allow_variable_data_keys,
inference=True,
)
# 4. Start for-loop
writer = NpyScpWriter(f"{output_dir}/predictions", f"{output_dir}/diarize.scp")
if enh_s2t_task:
wav_writers = []
if diarize_speech.num_spk is not None:
for i in range(diarize_speech.num_spk):
wav_writers.append(
SoundScpWriter(
f"{output_dir}/wavs/{i + 1}", f"{output_dir}/spk{i + 1}.scp"
)
)
else:
for i in range(diarize_speech.diar_model.mask_module.max_num_spk):
wav_writers.append(
SoundScpWriter(
f"{output_dir}/wavs/{i + 1}", f"{output_dir}/spk{i + 1}.scp"
)
)
for keys, batch in loader:
assert isinstance(batch, dict), type(batch)
assert all(isinstance(s, str) for s in keys), keys
_bs = len(next(iter(batch.values())))
assert len(keys) == _bs, f"{len(keys)} != {_bs}"
batch = {k: v for k, v in batch.items() if not k.endswith("_lengths")}
if enh_s2t_task:
waves, spk_predictions = diarize_speech(**batch)
for b in range(batch_size):
writer[keys[b]] = spk_predictions[b]
for spk, w in enumerate(waves):
wav_writers[spk][keys[b]] = fs, w[b]
else:
spk_predictions = diarize_speech(**batch)
for b in range(batch_size):
writer[keys[b]] = spk_predictions[b]
if enh_s2t_task:
for w in wav_writers:
w.close()
writer.close()
def get_parser():
parser = config_argparse.ArgumentParser(
description="Speaker Diarization inference",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Note(kamo): Use '_' instead of '-' as separator.
# '-' is confusing if written in yaml.
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument("--output_dir", type=str, required=True)
parser.add_argument(
"--ngpu",
type=int,
default=0,
help="The number of gpus. 0 indicates CPU mode",
)
parser.add_argument("--seed", type=int, default=0, help="Random seed")
parser.add_argument(
"--dtype",
default="float32",
choices=["float16", "float32", "float64"],
help="Data type",
)
parser.add_argument(
"--fs",
type=humanfriendly_parse_size_or_none,
default=8000,
help="Sampling rate",
)
parser.add_argument(
"--num_workers",
type=int,
default=1,
help="The number of workers used for DataLoader",
)
group = parser.add_argument_group("Input data related")
group.add_argument(
"--data_path_and_name_and_type",
type=str2triple_str,
required=True,
action="append",
)
group.add_argument("--key_file", type=str_or_none)
group.add_argument("--allow_variable_data_keys", type=str2bool, default=False)
group = parser.add_argument_group("The model configuration related")
group.add_argument(
"--train_config",
type=str,
help="Diarization training configuration",
)
group.add_argument(
"--model_file",
type=str,
help="Diarization model parameter file",
)
group.add_argument(
"--model_tag",
type=str,
help="Pretrained model tag. If specify this option, train_config and "
"model_file will be overwritten",
)
group = parser.add_argument_group("Data loading related")
group.add_argument(
"--batch_size",
type=int,
default=1,
help="The batch size for inference",
)
group = parser.add_argument_group("Diarize speech related")
group.add_argument(
"--segment_size",
type=float,
default=None,
help="Segment length in seconds for segment-wise speaker diarization",
)
group.add_argument(
"--hop_size",
type=float,
default=None,
help="Hop length in seconds for segment-wise speech enhancement/separation",
)
group.add_argument(
"--show_progressbar",
type=str2bool,
default=False,
help="Whether to show a progress bar when performing segment-wise speaker "
"diarization",
)
group.add_argument(
"--num_spk",
type=int_or_none,
default=None,
help="Predetermined number of speakers for inference",
)
group = parser.add_argument_group("Enh + Diar related")
group.add_argument(
"--enh_s2t_task",
type=str2bool,
default=False,
help="enhancement and diarization joint model",
)
group.add_argument(
"--normalize_segment_scale",
type=str2bool,
default=False,
help="Whether to normalize the energy of the separated streams in each segment",
)
group.add_argument(
"--normalize_output_wav",
type=str2bool,
default=False,
help="Whether to normalize the predicted wav to [-1~1]",
)
group.add_argument(
"--multiply_diar_result",
type=str2bool,
default=False,
help="Whether to multiply diar results to separated waves",
)
return parser
def main(cmd=None):
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
kwargs.pop("config", None)
inference(**kwargs)
if __name__ == "__main__":
main()
| 26,815 | 35.53406 | 88 | py |
espnet | espnet-master/espnet2/bin/tts_inference.py | #!/usr/bin/env python3
"""Script to run the inference of text-to-speeech model."""
import argparse
import logging
import shutil
import sys
import time
from pathlib import Path
from typing import Any, Dict, Optional, Sequence, Tuple, Union
import numpy as np
import soundfile as sf
import torch
from packaging.version import parse as V
from typeguard import check_argument_types
from espnet2.fileio.npy_scp import NpyScpWriter
from espnet2.gan_tts.vits import VITS
from espnet2.tasks.tts import TTSTask
from espnet2.torch_utils.device_funcs import to_device
from espnet2.torch_utils.set_all_random_seed import set_all_random_seed
from espnet2.tts.fastspeech import FastSpeech
from espnet2.tts.fastspeech2 import FastSpeech2
from espnet2.tts.tacotron2 import Tacotron2
from espnet2.tts.transformer import Transformer
from espnet2.tts.utils import DurationCalculator
from espnet2.utils import config_argparse
from espnet2.utils.types import str2bool, str2triple_str, str_or_none
from espnet.utils.cli_utils import get_commandline_args
class Text2Speech:
"""Text2Speech class.
Examples:
>>> from espnet2.bin.tts_inference import Text2Speech
>>> # Case 1: Load the local model and use Griffin-Lim vocoder
>>> text2speech = Text2Speech(
>>> train_config="/path/to/config.yml",
>>> model_file="/path/to/model.pth",
>>> )
>>> # Case 2: Load the local model and the pretrained vocoder
>>> text2speech = Text2Speech.from_pretrained(
>>> train_config="/path/to/config.yml",
>>> model_file="/path/to/model.pth",
>>> vocoder_tag="kan-bayashi/ljspeech_tacotron2",
>>> )
>>> # Case 3: Load the pretrained model and use Griffin-Lim vocoder
>>> text2speech = Text2Speech.from_pretrained(
>>> model_tag="kan-bayashi/ljspeech_tacotron2",
>>> )
>>> # Case 4: Load the pretrained model and the pretrained vocoder
>>> text2speech = Text2Speech.from_pretrained(
>>> model_tag="kan-bayashi/ljspeech_tacotron2",
>>> vocoder_tag="parallel_wavegan/ljspeech_parallel_wavegan.v1",
>>> )
>>> # Run inference and save as wav file
>>> import soundfile as sf
>>> wav = text2speech("Hello, World")["wav"]
>>> sf.write("out.wav", wav.numpy(), text2speech.fs, "PCM_16")
"""
def __init__(
self,
train_config: Union[Path, str] = None,
model_file: Union[Path, str] = None,
threshold: float = 0.5,
minlenratio: float = 0.0,
maxlenratio: float = 10.0,
use_teacher_forcing: bool = False,
use_att_constraint: bool = False,
backward_window: int = 1,
forward_window: int = 3,
speed_control_alpha: float = 1.0,
noise_scale: float = 0.667,
noise_scale_dur: float = 0.8,
vocoder_config: Union[Path, str] = None,
vocoder_file: Union[Path, str] = None,
dtype: str = "float32",
device: str = "cpu",
seed: int = 777,
always_fix_seed: bool = False,
prefer_normalized_feats: bool = False,
):
"""Initialize Text2Speech module."""
assert check_argument_types()
# setup model
model, train_args = TTSTask.build_model_from_file(
train_config, model_file, device
)
model.to(dtype=getattr(torch, dtype)).eval()
self.device = device
self.dtype = dtype
self.train_args = train_args
self.model = model
self.tts = model.tts
self.normalize = model.normalize
self.feats_extract = model.feats_extract
self.duration_calculator = DurationCalculator()
self.preprocess_fn = TTSTask.build_preprocess_fn(train_args, False)
self.use_teacher_forcing = use_teacher_forcing
self.seed = seed
self.always_fix_seed = always_fix_seed
self.vocoder = None
self.prefer_normalized_feats = prefer_normalized_feats
if self.tts.require_vocoder:
vocoder = TTSTask.build_vocoder_from_file(
vocoder_config, vocoder_file, model, device
)
if isinstance(vocoder, torch.nn.Module):
vocoder.to(dtype=getattr(torch, dtype)).eval()
self.vocoder = vocoder
logging.info(f"Extractor:\n{self.feats_extract}")
logging.info(f"Normalizer:\n{self.normalize}")
logging.info(f"TTS:\n{self.tts}")
if self.vocoder is not None:
logging.info(f"Vocoder:\n{self.vocoder}")
# setup decoding config
decode_conf = {}
decode_conf.update(use_teacher_forcing=use_teacher_forcing)
if isinstance(self.tts, (Tacotron2, Transformer)):
decode_conf.update(
threshold=threshold,
maxlenratio=maxlenratio,
minlenratio=minlenratio,
)
if isinstance(self.tts, Tacotron2):
decode_conf.update(
use_att_constraint=use_att_constraint,
forward_window=forward_window,
backward_window=backward_window,
)
if isinstance(self.tts, (FastSpeech, FastSpeech2, VITS)):
decode_conf.update(alpha=speed_control_alpha)
if isinstance(self.tts, VITS):
decode_conf.update(
noise_scale=noise_scale,
noise_scale_dur=noise_scale_dur,
)
self.decode_conf = decode_conf
@torch.no_grad()
def __call__(
self,
text: Union[str, torch.Tensor, np.ndarray],
speech: Union[torch.Tensor, np.ndarray] = None,
durations: Union[torch.Tensor, np.ndarray] = None,
spembs: Union[torch.Tensor, np.ndarray] = None,
sids: Union[torch.Tensor, np.ndarray] = None,
lids: Union[torch.Tensor, np.ndarray] = None,
decode_conf: Optional[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
"""Run text-to-speech."""
assert check_argument_types()
# check inputs
if self.use_speech and speech is None:
raise RuntimeError("Missing required argument: 'speech'")
if self.use_sids and sids is None:
raise RuntimeError("Missing required argument: 'sids'")
if self.use_lids and lids is None:
raise RuntimeError("Missing required argument: 'lids'")
if self.use_spembs and spembs is None:
raise RuntimeError("Missing required argument: 'spembs'")
# prepare batch
if isinstance(text, str):
text = self.preprocess_fn("<dummy>", dict(text=text))["text"]
batch = dict(text=text)
if speech is not None:
batch.update(speech=speech)
if durations is not None:
batch.update(durations=durations)
if spembs is not None:
batch.update(spembs=spembs)
if sids is not None:
batch.update(sids=sids)
if lids is not None:
batch.update(lids=lids)
batch = to_device(batch, self.device)
# overwrite the decode configs if provided
cfg = self.decode_conf
if decode_conf is not None:
cfg = self.decode_conf.copy()
cfg.update(decode_conf)
# inference
if self.always_fix_seed:
set_all_random_seed(self.seed)
output_dict = self.model.inference(**batch, **cfg)
# calculate additional metrics
if output_dict.get("att_w") is not None:
duration, focus_rate = self.duration_calculator(output_dict["att_w"])
output_dict.update(duration=duration, focus_rate=focus_rate)
# apply vocoder (mel-to-wav)
if self.vocoder is not None:
if (
self.prefer_normalized_feats
or output_dict.get("feat_gen_denorm") is None
):
input_feat = output_dict["feat_gen"]
else:
input_feat = output_dict["feat_gen_denorm"]
wav = self.vocoder(input_feat)
output_dict.update(wav=wav)
return output_dict
@property
def fs(self) -> Optional[int]:
"""Return sampling rate."""
if hasattr(self.vocoder, "fs"):
return self.vocoder.fs
elif hasattr(self.tts, "fs"):
return self.tts.fs
else:
return None
@property
def use_speech(self) -> bool:
"""Return speech is needed or not in the inference."""
return self.use_teacher_forcing or getattr(self.tts, "use_gst", False)
@property
def use_sids(self) -> bool:
"""Return sid is needed or not in the inference."""
return self.tts.spks is not None
@property
def use_lids(self) -> bool:
"""Return sid is needed or not in the inference."""
return self.tts.langs is not None
@property
def use_spembs(self) -> bool:
"""Return spemb is needed or not in the inference."""
return self.tts.spk_embed_dim is not None
@staticmethod
def from_pretrained(
model_tag: Optional[str] = None,
vocoder_tag: Optional[str] = None,
**kwargs: Optional[Any],
):
"""Build Text2Speech instance from the pretrained model.
Args:
model_tag (Optional[str]): Model tag of the pretrained models.
Currently, the tags of espnet_model_zoo are supported.
vocoder_tag (Optional[str]): Vocoder tag of the pretrained vocoders.
Currently, the tags of parallel_wavegan are supported, which should
start with the prefix "parallel_wavegan/".
Returns:
Text2Speech: Text2Speech instance.
"""
if model_tag is not None:
try:
from espnet_model_zoo.downloader import ModelDownloader
except ImportError:
logging.error(
"`espnet_model_zoo` is not installed. "
"Please install via `pip install -U espnet_model_zoo`."
)
raise
d = ModelDownloader()
kwargs.update(**d.download_and_unpack(model_tag))
if vocoder_tag is not None:
if vocoder_tag.startswith("parallel_wavegan/"):
try:
from parallel_wavegan.utils import download_pretrained_model
except ImportError:
logging.error(
"`parallel_wavegan` is not installed. "
"Please install via `pip install -U parallel_wavegan`."
)
raise
from parallel_wavegan import __version__
# NOTE(kan-bayashi): Filelock download is supported from 0.5.2
assert V(__version__) > V("0.5.1"), (
"Please install the latest parallel_wavegan "
"via `pip install -U parallel_wavegan`."
)
vocoder_tag = vocoder_tag.replace("parallel_wavegan/", "")
vocoder_file = download_pretrained_model(vocoder_tag)
vocoder_config = Path(vocoder_file).parent / "config.yml"
kwargs.update(vocoder_config=vocoder_config, vocoder_file=vocoder_file)
else:
raise ValueError(f"{vocoder_tag} is unsupported format.")
return Text2Speech(**kwargs)
def inference(
output_dir: str,
batch_size: int,
dtype: str,
ngpu: int,
seed: int,
num_workers: int,
log_level: Union[int, str],
data_path_and_name_and_type: Sequence[Tuple[str, str, str]],
key_file: Optional[str],
train_config: Optional[str],
model_file: Optional[str],
model_tag: Optional[str],
threshold: float,
minlenratio: float,
maxlenratio: float,
use_teacher_forcing: bool,
use_att_constraint: bool,
backward_window: int,
forward_window: int,
speed_control_alpha: float,
noise_scale: float,
noise_scale_dur: float,
always_fix_seed: bool,
allow_variable_data_keys: bool,
vocoder_config: Optional[str],
vocoder_file: Optional[str],
vocoder_tag: Optional[str],
):
"""Run text-to-speech inference."""
assert check_argument_types()
if batch_size > 1:
raise NotImplementedError("batch decoding is not implemented")
if ngpu > 1:
raise NotImplementedError("only single GPU decoding is supported")
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
if ngpu >= 1:
device = "cuda"
else:
device = "cpu"
# 1. Set random-seed
set_all_random_seed(seed)
# 2. Build model
text2speech_kwargs = dict(
train_config=train_config,
model_file=model_file,
threshold=threshold,
maxlenratio=maxlenratio,
minlenratio=minlenratio,
use_teacher_forcing=use_teacher_forcing,
use_att_constraint=use_att_constraint,
backward_window=backward_window,
forward_window=forward_window,
speed_control_alpha=speed_control_alpha,
noise_scale=noise_scale,
noise_scale_dur=noise_scale_dur,
vocoder_config=vocoder_config,
vocoder_file=vocoder_file,
dtype=dtype,
device=device,
seed=seed,
always_fix_seed=always_fix_seed,
)
text2speech = Text2Speech.from_pretrained(
model_tag=model_tag,
vocoder_tag=vocoder_tag,
**text2speech_kwargs,
)
# 3. Build data-iterator
if not text2speech.use_speech:
data_path_and_name_and_type = list(
filter(lambda x: x[1] != "speech", data_path_and_name_and_type)
)
loader = TTSTask.build_streaming_iterator(
data_path_and_name_and_type,
dtype=dtype,
batch_size=batch_size,
key_file=key_file,
num_workers=num_workers,
preprocess_fn=TTSTask.build_preprocess_fn(text2speech.train_args, False),
collate_fn=TTSTask.build_collate_fn(text2speech.train_args, False),
allow_variable_data_keys=allow_variable_data_keys,
inference=True,
)
# 6. Start for-loop
output_dir = Path(output_dir)
(output_dir / "norm").mkdir(parents=True, exist_ok=True)
(output_dir / "denorm").mkdir(parents=True, exist_ok=True)
(output_dir / "speech_shape").mkdir(parents=True, exist_ok=True)
(output_dir / "wav").mkdir(parents=True, exist_ok=True)
(output_dir / "att_ws").mkdir(parents=True, exist_ok=True)
(output_dir / "probs").mkdir(parents=True, exist_ok=True)
(output_dir / "durations").mkdir(parents=True, exist_ok=True)
(output_dir / "focus_rates").mkdir(parents=True, exist_ok=True)
# Lazy load to avoid the backend error
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
with NpyScpWriter(
output_dir / "norm",
output_dir / "norm/feats.scp",
) as norm_writer, NpyScpWriter(
output_dir / "denorm", output_dir / "denorm/feats.scp"
) as denorm_writer, open(
output_dir / "speech_shape/speech_shape", "w"
) as shape_writer, open(
output_dir / "durations/durations", "w"
) as duration_writer, open(
output_dir / "focus_rates/focus_rates", "w"
) as focus_rate_writer:
for idx, (keys, batch) in enumerate(loader, 1):
assert isinstance(batch, dict), type(batch)
assert all(isinstance(s, str) for s in keys), keys
_bs = len(next(iter(batch.values())))
assert _bs == 1, _bs
# Change to single sequence and remove *_length
# because inference() requires 1-seq, not mini-batch.
batch = {k: v[0] for k, v in batch.items() if not k.endswith("_lengths")}
start_time = time.perf_counter()
output_dict = text2speech(**batch)
key = keys[0]
insize = next(iter(batch.values())).size(0) + 1
if output_dict.get("feat_gen") is not None:
# standard text2mel model case
feat_gen = output_dict["feat_gen"]
logging.info(
"inference speed = {:.1f} frames / sec.".format(
int(feat_gen.size(0)) / (time.perf_counter() - start_time)
)
)
logging.info(f"{key} (size:{insize}->{feat_gen.size(0)})")
if feat_gen.size(0) == insize * maxlenratio:
logging.warning(f"output length reaches maximum length ({key}).")
norm_writer[key] = output_dict["feat_gen"].cpu().numpy()
shape_writer.write(
f"{key} " + ",".join(map(str, output_dict["feat_gen"].shape)) + "\n"
)
if output_dict.get("feat_gen_denorm") is not None:
denorm_writer[key] = output_dict["feat_gen_denorm"].cpu().numpy()
else:
# end-to-end text2wav model case
wav = output_dict["wav"]
logging.info(
"inference speed = {:.1f} points / sec.".format(
int(wav.size(0)) / (time.perf_counter() - start_time)
)
)
logging.info(f"{key} (size:{insize}->{wav.size(0)})")
if output_dict.get("duration") is not None:
# Save duration and fucus rates
duration_writer.write(
f"{key} "
+ " ".join(map(str, output_dict["duration"].long().cpu().numpy()))
+ "\n"
)
if output_dict.get("focus_rate") is not None:
focus_rate_writer.write(
f"{key} {float(output_dict['focus_rate']):.5f}\n"
)
if output_dict.get("att_w") is not None:
# Plot attention weight
att_w = output_dict["att_w"].cpu().numpy()
if att_w.ndim == 2:
att_w = att_w[None][None]
elif att_w.ndim != 4:
raise RuntimeError(f"Must be 2 or 4 dimension: {att_w.ndim}")
w, h = plt.figaspect(att_w.shape[0] / att_w.shape[1])
fig = plt.Figure(
figsize=(
w * 1.3 * min(att_w.shape[0], 2.5),
h * 1.3 * min(att_w.shape[1], 2.5),
)
)
fig.suptitle(f"{key}")
axes = fig.subplots(att_w.shape[0], att_w.shape[1])
if len(att_w) == 1:
axes = [[axes]]
for ax, att_w in zip(axes, att_w):
for ax_, att_w_ in zip(ax, att_w):
ax_.imshow(att_w_.astype(np.float32), aspect="auto")
ax_.set_xlabel("Input")
ax_.set_ylabel("Output")
ax_.xaxis.set_major_locator(MaxNLocator(integer=True))
ax_.yaxis.set_major_locator(MaxNLocator(integer=True))
fig.set_tight_layout({"rect": [0, 0.03, 1, 0.95]})
fig.savefig(output_dir / f"att_ws/{key}.png")
fig.clf()
if output_dict.get("prob") is not None:
# Plot stop token prediction
prob = output_dict["prob"].cpu().numpy()
fig = plt.Figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(prob)
ax.set_title(f"{key}")
ax.set_xlabel("Output")
ax.set_ylabel("Stop probability")
ax.set_ylim(0, 1)
ax.grid(which="both")
fig.set_tight_layout(True)
fig.savefig(output_dir / f"probs/{key}.png")
fig.clf()
if output_dict.get("wav") is not None:
# TODO(kamo): Write scp
sf.write(
f"{output_dir}/wav/{key}.wav",
output_dict["wav"].cpu().numpy(),
text2speech.fs,
"PCM_16",
)
# remove files if those are not included in output dict
if output_dict.get("feat_gen") is None:
shutil.rmtree(output_dir / "norm")
if output_dict.get("feat_gen_denorm") is None:
shutil.rmtree(output_dir / "denorm")
if output_dict.get("att_w") is None:
shutil.rmtree(output_dir / "att_ws")
if output_dict.get("duration") is None:
shutil.rmtree(output_dir / "durations")
if output_dict.get("focus_rate") is None:
shutil.rmtree(output_dir / "focus_rates")
if output_dict.get("prob") is None:
shutil.rmtree(output_dir / "probs")
if output_dict.get("wav") is None:
shutil.rmtree(output_dir / "wav")
def get_parser():
"""Get argument parser."""
parser = config_argparse.ArgumentParser(
description="TTS inference",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Note(kamo): Use "_" instead of "-" as separator.
# "-" is confusing if written in yaml.
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="The path of output directory",
)
parser.add_argument(
"--ngpu",
type=int,
default=0,
help="The number of gpus. 0 indicates CPU mode",
)
parser.add_argument(
"--seed",
type=int,
default=0,
help="Random seed",
)
parser.add_argument(
"--dtype",
default="float32",
choices=["float16", "float32", "float64"],
help="Data type",
)
parser.add_argument(
"--num_workers",
type=int,
default=1,
help="The number of workers used for DataLoader",
)
parser.add_argument(
"--batch_size",
type=int,
default=1,
help="The batch size for inference",
)
group = parser.add_argument_group("Input data related")
group.add_argument(
"--data_path_and_name_and_type",
type=str2triple_str,
required=True,
action="append",
)
group.add_argument(
"--key_file",
type=str_or_none,
)
group.add_argument(
"--allow_variable_data_keys",
type=str2bool,
default=False,
)
group = parser.add_argument_group("The model configuration related")
group.add_argument(
"--train_config",
type=str,
help="Training configuration file",
)
group.add_argument(
"--model_file",
type=str,
help="Model parameter file",
)
group.add_argument(
"--model_tag",
type=str,
help="Pretrained model tag. If specify this option, train_config and "
"model_file will be overwritten",
)
group = parser.add_argument_group("Decoding related")
group.add_argument(
"--maxlenratio",
type=float,
default=10.0,
help="Maximum length ratio in decoding",
)
group.add_argument(
"--minlenratio",
type=float,
default=0.0,
help="Minimum length ratio in decoding",
)
group.add_argument(
"--threshold",
type=float,
default=0.5,
help="Threshold value in decoding",
)
group.add_argument(
"--use_att_constraint",
type=str2bool,
default=False,
help="Whether to use attention constraint",
)
group.add_argument(
"--backward_window",
type=int,
default=1,
help="Backward window value in attention constraint",
)
group.add_argument(
"--forward_window",
type=int,
default=3,
help="Forward window value in attention constraint",
)
group.add_argument(
"--use_teacher_forcing",
type=str2bool,
default=False,
help="Whether to use teacher forcing",
)
parser.add_argument(
"--speed_control_alpha",
type=float,
default=1.0,
help="Alpha in FastSpeech to change the speed of generated speech",
)
parser.add_argument(
"--noise_scale",
type=float,
default=0.667,
help="Noise scale parameter for the flow in vits",
)
parser.add_argument(
"--noise_scale_dur",
type=float,
default=0.8,
help="Noise scale parameter for the stochastic duration predictor in vits",
)
group.add_argument(
"--always_fix_seed",
type=str2bool,
default=False,
help="Whether to always fix seed",
)
group = parser.add_argument_group("Vocoder related")
group.add_argument(
"--vocoder_config",
type=str_or_none,
help="Vocoder configuration file",
)
group.add_argument(
"--vocoder_file",
type=str_or_none,
help="Vocoder parameter file",
)
group.add_argument(
"--vocoder_tag",
type=str,
help="Pretrained vocoder tag. If specify this option, vocoder_config and "
"vocoder_file will be overwritten",
)
return parser
def main(cmd=None):
"""Run TTS model inference."""
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
kwargs.pop("config", None)
inference(**kwargs)
if __name__ == "__main__":
main()
| 25,855 | 33.428762 | 88 | py |
espnet | espnet-master/espnet2/bin/enh_inference_streaming.py | #!/usr/bin/env python3
import argparse
import logging
import math
import sys
from itertools import chain
from pathlib import Path
from typing import Any, List, Optional, Sequence, Tuple, Union
import humanfriendly
import numpy as np
import torch
import torch_complex
import yaml
from typeguard import check_argument_types
from espnet2.bin.enh_inference import (
build_model_from_args_and_file,
get_train_config,
recursive_dict_update,
)
from espnet2.fileio.sound_scp import SoundScpWriter
from espnet2.tasks.enh import EnhancementTask
from espnet2.tasks.enh_s2t import EnhS2TTask
from espnet2.torch_utils.device_funcs import to_device
from espnet2.torch_utils.set_all_random_seed import set_all_random_seed
from espnet2.train.abs_espnet_model import AbsESPnetModel
from espnet2.utils import config_argparse
from espnet2.utils.types import str2bool, str2triple_str, str_or_none
from espnet.utils.cli_utils import get_commandline_args
EPS = torch.finfo(torch.get_default_dtype()).eps
class SeparateSpeechStreaming:
"""SeparateSpeechStreaming class. Separate a small audio chunk in streaming.
Examples:
>>> import soundfile
>>> separate_speech = SeparateSpeechStreaming("enh_config.yml", "enh.pth")
>>> audio, rate = soundfile.read("speech.wav")
>>> lengths = torch.LongTensor(audio.shape[-1])
>>> speech_sim_chunks = separate_speech.frame(wav)
>>> output_chunks = [[] for ii in range(separate_speech.num_spk)]
>>>
>>> for chunk in speech_sim_chunks:
>>> output = separate_speech(chunk)
>>> for spk in range(separate_speech.num_spk):
>>> output_chunks[spk].append(output[spk])
>>>
>>> separate_speech.reset()
>>> waves = [
>>> separate_speech.merge(chunks, length)
>>> for chunks in output_chunks ]
"""
def __init__(
self,
train_config: Union[Path, str] = None,
model_file: Union[Path, str] = None,
inference_config: Union[Path, str] = None,
ref_channel: Optional[int] = None,
device: str = "cpu",
dtype: str = "float32",
enh_s2t_task: bool = False,
):
assert check_argument_types()
task = EnhancementTask if not enh_s2t_task else EnhS2TTask
# 1. Build Enh model
if inference_config is None:
enh_model, enh_train_args = task.build_model_from_file(
train_config, model_file, device
)
else:
# Overwrite model attributes
train_config = get_train_config(train_config, model_file=model_file)
with train_config.open("r", encoding="utf-8") as f:
train_args = yaml.safe_load(f)
with Path(inference_config).open("r", encoding="utf-8") as f:
infer_args = yaml.safe_load(f)
if enh_s2t_task:
arg_list = ("enh_encoder", "enh_separator", "enh_decoder")
else:
arg_list = ("encoder", "separator", "decoder")
supported_keys = list(chain(*[[k, k + "_conf"] for k in arg_list]))
for k in infer_args.keys():
if k not in supported_keys:
raise ValueError(
"Only the following top-level keys are supported: %s"
% ", ".join(supported_keys)
)
recursive_dict_update(train_args, infer_args, verbose=True)
enh_train_args = argparse.Namespace(**train_args)
enh_model = build_model_from_args_and_file(
task, enh_train_args, model_file, device
)
if enh_s2t_task:
enh_model = enh_model.enh_model
enh_model.to(dtype=getattr(torch, dtype)).eval()
self.device = device
self.dtype = dtype
self.enh_train_args = enh_train_args
self.enh_model = enh_model
self.num_spk = enh_model.num_spk
task = "enhancement" if self.num_spk == 1 else "separation"
# reference channel for processing multi-channel speech
if ref_channel is not None:
logging.info(
"Overwrite enh_model.separator.ref_channel with {}".format(ref_channel)
)
enh_model.separator.ref_channel = ref_channel
self.ref_channel = ref_channel
else:
self.ref_channel = enh_model.ref_channel
self.streaming_states = None
def frame(self, audio):
return self.enh_model.encoder.streaming_frame(audio)
def merge(self, chunks, ilens=None):
return self.enh_model.decoder.streaming_merge(chunks, ilens=ilens)
def reset(self):
self.streaming_states = None
@torch.no_grad()
def __call__(
self, speech_mix: Union[torch.Tensor, np.ndarray], fs: int = 8000
) -> List[torch.Tensor]:
"""Inference
Args:
speech_mix: Input speech data (Batch, Nsamples [, Channels])
fs: sample rate
Returns:
[separated_audio1, separated_audio2, ...]
"""
assert check_argument_types()
# Input as audio signal
if isinstance(speech_mix, np.ndarray):
speech_mix = torch.as_tensor(speech_mix)
assert speech_mix.dim() > 1, speech_mix.size()
batch_size = speech_mix.size(0)
speech_mix = speech_mix.to(getattr(torch, self.dtype))
# a. To device
speech_mix = to_device(speech_mix, device=self.device)
# b. Enhancement/Separation Forward
# frame_feature: (B, 1, F)
frame_feature = self.enh_model.encoder.forward_streaming(speech_mix)
# frame_separated: list of num_spk [(B, 1, F)]
(
frame_separated,
self.streaming_states,
_,
) = self.enh_model.separator.forward_streaming(
frame_feature, self.streaming_states
)
# frame_separated: list of num_spk [(B, frame_size)]
waves = [self.enh_model.decoder.forward_streaming(f) for f in frame_separated]
assert len(waves) == self.num_spk, (len(waves), self.num_spk)
assert len(waves[0]) == batch_size, (len(waves[0]), batch_size)
return waves
@staticmethod
def from_pretrained(
model_tag: Optional[str] = None,
**kwargs: Optional[Any],
):
"""Build SeparateSpeech instance from the pretrained model.
Args:
model_tag (Optional[str]): Model tag of the pretrained models.
Currently, the tags of espnet_model_zoo are supported.
Returns:
SeparateSpeech: SeparateSpeech instance.
"""
if model_tag is not None:
try:
from espnet_model_zoo.downloader import ModelDownloader
except ImportError:
logging.error(
"`espnet_model_zoo` is not installed. "
"Please install via `pip install -U espnet_model_zoo`."
)
raise
d = ModelDownloader()
kwargs.update(**d.download_and_unpack(model_tag))
return SeparateSpeechStreaming(**kwargs)
def humanfriendly_or_none(value: str):
if value in ("none", "None", "NONE"):
return None
return humanfriendly.parse_size(value)
def inference(
output_dir: str,
batch_size: int,
dtype: str,
fs: int,
ngpu: int,
seed: int,
num_workers: int,
log_level: Union[int, str],
data_path_and_name_and_type: Sequence[Tuple[str, str, str]],
key_file: Optional[str],
train_config: Optional[str],
model_file: Optional[str],
model_tag: Optional[str],
inference_config: Optional[str],
allow_variable_data_keys: bool,
ref_channel: Optional[int],
enh_s2t_task: bool,
):
assert check_argument_types()
if batch_size > 1:
raise NotImplementedError("batch decoding is not implemented")
if ngpu > 1:
raise NotImplementedError("only single GPU decoding is supported")
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
if ngpu >= 1:
device = "cuda"
else:
device = "cpu"
# 1. Set random-seed
set_all_random_seed(seed)
# 2. Build separate_speech
separate_speech_kwargs = dict(
train_config=train_config,
model_file=model_file,
inference_config=inference_config,
ref_channel=ref_channel,
device=device,
dtype=dtype,
enh_s2t_task=enh_s2t_task,
)
separate_speech = SeparateSpeechStreaming.from_pretrained(
model_tag=model_tag,
**separate_speech_kwargs,
)
# 3. Build data-iterator
loader = EnhancementTask.build_streaming_iterator(
data_path_and_name_and_type,
dtype=dtype,
batch_size=batch_size,
key_file=key_file,
num_workers=num_workers,
preprocess_fn=EnhancementTask.build_preprocess_fn(
separate_speech.enh_train_args, False
),
collate_fn=EnhancementTask.build_collate_fn(
separate_speech.enh_train_args, False
),
allow_variable_data_keys=allow_variable_data_keys,
inference=True,
)
# 4. Start dataset for-loop
output_dir = Path(output_dir).expanduser().resolve()
writers = []
for i in range(separate_speech.num_spk):
writers.append(
SoundScpWriter(f"{output_dir}/wavs/{i + 1}", f"{output_dir}/spk{i + 1}.scp")
)
import tqdm
for i, (keys, batch) in tqdm.tqdm(enumerate(loader)):
logging.info(f"[{i}] Enhancing {keys}")
assert isinstance(batch, dict), type(batch)
assert all(isinstance(s, str) for s in keys), keys
_bs = len(next(iter(batch.values())))
assert len(keys) == _bs, f"{len(keys)} != {_bs}"
batch = {k: v for k, v in batch.items() if not k.endswith("_lengths")}
speech = batch["speech_mix"]
lengths = speech.new_full(
[batch_size], dtype=torch.long, fill_value=speech.size(1)
)
# split continuous speech into small chunks to simulate streaming
speech_sim_chunks = separate_speech.frame(speech)
output_chunks = [[] for ii in range(separate_speech.num_spk)]
# the main loop for streaming processing
for chunk in speech_sim_chunks:
# process a single chunk
output = separate_speech(chunk, fs=fs)
for channel in range(separate_speech.num_spk):
# append processed chunks to ouput channels
output_chunks[channel].append(output[channel])
# reset separator states after processing
separate_speech.reset()
# merge chunks
waves = [separate_speech.merge(chunks, lengths) for chunks in output_chunks]
waves = [
(w / abs(w).max(dim=1, keepdim=True)[0] * 0.9).cpu().numpy() for w in waves
]
for spk, w in enumerate(waves):
for b in range(batch_size):
writers[spk][keys[b]] = fs, w[b]
for writer in writers:
writer.close()
def get_parser():
parser = config_argparse.ArgumentParser(
description="Frontend inference",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Note(kamo): Use '_' instead of '-' as separator.
# '-' is confusing if written in yaml.
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument("--output_dir", type=str, required=True)
parser.add_argument(
"--ngpu",
type=int,
default=0,
help="The number of gpus. 0 indicates CPU mode",
)
parser.add_argument("--seed", type=int, default=0, help="Random seed")
parser.add_argument(
"--dtype",
default="float32",
choices=["float16", "float32", "float64"],
help="Data type",
)
parser.add_argument(
"--fs", type=humanfriendly_or_none, default=8000, help="Sampling rate"
)
parser.add_argument(
"--num_workers",
type=int,
default=1,
help="The number of workers used for DataLoader",
)
group = parser.add_argument_group("Input data related")
group.add_argument(
"--data_path_and_name_and_type",
type=str2triple_str,
required=True,
action="append",
)
group.add_argument("--key_file", type=str_or_none)
group.add_argument("--allow_variable_data_keys", type=str2bool, default=False)
group = parser.add_argument_group("Output data related")
group = parser.add_argument_group("The model configuration related")
group.add_argument(
"--train_config",
type=str,
help="Training configuration file",
)
group.add_argument(
"--model_file",
type=str,
help="Model parameter file",
)
group.add_argument(
"--model_tag",
type=str,
help="Pretrained model tag. If specify this option, train_config and "
"model_file will be overwritten",
)
group.add_argument(
"--inference_config",
type=str_or_none,
default=None,
help="Optional configuration file for overwriting enh model attributes "
"during inference",
)
group.add_argument(
"--enh_s2t_task",
type=str2bool,
default=False,
help="enhancement and asr joint model",
)
group = parser.add_argument_group("Data loading related")
group.add_argument(
"--batch_size",
type=int,
default=1,
help="The batch size for inference",
)
group = parser.add_argument_group("SeparateSpeech related")
group.add_argument(
"--ref_channel",
type=int,
default=None,
help="If not None, this will overwrite the ref_channel defined in the "
"separator module (for multi-channel speech processing)",
)
return parser
def main(cmd=None):
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
kwargs.pop("config", None)
inference(**kwargs)
if __name__ == "__main__":
main()
| 14,477 | 30.680525 | 88 | py |
espnet | espnet-master/espnet2/bin/asr_transducer_inference.py | #!/usr/bin/env python3
""" Inference class definition for Transducer models."""
from __future__ import annotations
import argparse
import logging
import sys
from pathlib import Path
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from packaging.version import parse as V
from typeguard import check_argument_types, check_return_type
from espnet2.asr_transducer.beam_search_transducer import (
BeamSearchTransducer,
Hypothesis,
)
from espnet2.asr_transducer.frontend.online_audio_processor import OnlineAudioProcessor
from espnet2.asr_transducer.utils import TooShortUttError
from espnet2.fileio.datadir_writer import DatadirWriter
from espnet2.tasks.asr_transducer import ASRTransducerTask
from espnet2.tasks.lm import LMTask
from espnet2.text.build_tokenizer import build_tokenizer
from espnet2.text.token_id_converter import TokenIDConverter
from espnet2.torch_utils.set_all_random_seed import set_all_random_seed
from espnet2.utils import config_argparse
from espnet2.utils.types import str2bool, str2triple_str, str_or_none
from espnet.utils.cli_utils import get_commandline_args
class Speech2Text:
"""Speech2Text class for Transducer models.
Args:
asr_train_config: ASR model training config path.
asr_model_file: ASR model path.
beam_search_config: Beam search config path.
lm_train_config: Language Model training config path.
lm_file: Language Model config path.
token_type: Type of token units.
bpemodel: BPE model path.
device: Device to use for inference.
beam_size: Size of beam during search.
dtype: Data type.
lm_weight: Language model weight.
quantize_asr_model: Whether to apply dynamic quantization to ASR model.
quantize_modules: List of module names to apply dynamic quantization on.
quantize_dtype: Dynamic quantization data type.
nbest: Number of final hypothesis.
streaming: Whether to perform chunk-by-chunk inference.
decoding_window: Size of the decoding window (in milliseconds).
left_context: Number of previous frames the attention module can see
in current chunk (used by Conformer and Branchformer block).
"""
def __init__(
self,
asr_train_config: Union[Path, str] = None,
asr_model_file: Union[Path, str] = None,
beam_search_config: Dict[str, Any] = None,
lm_train_config: Union[Path, str] = None,
lm_file: Union[Path, str] = None,
token_type: str = None,
bpemodel: str = None,
device: str = "cpu",
beam_size: int = 5,
dtype: str = "float32",
lm_weight: float = 1.0,
quantize_asr_model: bool = False,
quantize_modules: List[str] = None,
quantize_dtype: str = "qint8",
nbest: int = 1,
streaming: bool = False,
decoding_window: int = 640,
left_context: int = 32,
) -> None:
"""Construct a Speech2Text object."""
super().__init__()
assert check_argument_types()
asr_model, asr_train_args = ASRTransducerTask.build_model_from_file(
asr_train_config, asr_model_file, device
)
if quantize_asr_model:
if quantize_modules is not None:
if not all([q in ["LSTM", "Linear"] for q in quantize_modules]):
raise ValueError(
"Only 'Linear' and 'LSTM' modules are currently supported"
" by PyTorch and in --quantize_modules"
)
q_config = set([getattr(torch.nn, q) for q in quantize_modules])
else:
q_config = {torch.nn.Linear}
if quantize_dtype == "float16" and (V(torch.__version__) < V("1.5.0")):
raise ValueError(
"float16 dtype for dynamic quantization is not supported with torch"
" version < 1.5.0. Switching to qint8 dtype instead."
)
q_dtype = getattr(torch, quantize_dtype)
asr_model = torch.quantization.quantize_dynamic(
asr_model, q_config, dtype=q_dtype
).eval()
else:
asr_model.to(dtype=getattr(torch, dtype)).eval()
if hasattr(asr_model.decoder, "rescale_every") and (
asr_model.decoder.rescale_every > 0
):
rescale_every = asr_model.decoder.rescale_every
with torch.no_grad():
for block_id, block in enumerate(asr_model.decoder.rwkv_blocks):
block.att.proj_output.weight.div_(
2 ** int(block_id // rescale_every)
)
block.ffn.proj_value.weight.div_(
2 ** int(block_id // rescale_every)
)
asr_model.decoder.rescaled_layers = True
if lm_train_config is not None:
lm, lm_train_args = LMTask.build_model_from_file(
lm_train_config, lm_file, device
)
lm_scorer = lm.lm
else:
lm_scorer = None
# 4. Build BeamSearch object
if beam_search_config is None:
beam_search_config = {}
beam_search = BeamSearchTransducer(
asr_model.decoder,
asr_model.joint_network,
beam_size,
lm=lm_scorer,
lm_weight=lm_weight,
nbest=nbest,
**beam_search_config,
)
token_list = asr_model.token_list
if token_type is None:
token_type = asr_train_args.token_type
if bpemodel is None:
bpemodel = asr_train_args.bpemodel
if token_type == "bpe":
if bpemodel is not None:
tokenizer = build_tokenizer(token_type=token_type, bpemodel=bpemodel)
else:
tokenizer = None
else:
tokenizer = build_tokenizer(token_type=token_type)
converter = TokenIDConverter(token_list=token_list)
self.asr_model = asr_model
self.asr_train_args = asr_train_args
self.device = device
self.dtype = dtype
self.nbest = nbest
self.converter = converter
self.tokenizer = tokenizer
self.beam_search = beam_search
self.streaming = streaming and decoding_window >= 0
self.asr_model.encoder.dynamic_chunk_training = False
self.left_context = max(left_context, 0)
if streaming:
self.audio_processor = OnlineAudioProcessor(
asr_model._extract_feats,
asr_model.normalize,
decoding_window,
asr_model.encoder.embed.subsampling_factor,
asr_train_args.frontend_conf,
device,
)
self.reset_streaming_cache()
def reset_streaming_cache(self) -> None:
"""Reset Speech2Text parameters."""
self.asr_model.encoder.reset_cache(self.left_context, device=self.device)
self.beam_search.reset_cache()
self.audio_processor.reset_cache()
self.num_processed_frames = torch.tensor([[0]], device=self.device)
@torch.no_grad()
def streaming_decode(
self,
speech: Union[torch.Tensor, np.ndarray],
is_final: bool = False,
) -> List[Hypothesis]:
"""Speech2Text streaming call.
Args:
speech: Chunk of speech data. (S)
is_final: Whether speech corresponds to the final chunk of data.
Returns:
nbest_hypothesis: N-best hypothesis.
"""
nbest_hyps = []
if isinstance(speech, np.ndarray):
speech = torch.tensor(speech)
speech = speech.to(device=self.device)
feats, feats_length = self.audio_processor.compute_features(
speech.to(getattr(torch, self.dtype)), is_final
)
enc_out = self.asr_model.encoder.chunk_forward(
feats,
feats_length,
self.num_processed_frames,
left_context=self.left_context,
)
self.num_processed_frames += enc_out.size(1)
nbest_hyps = self.beam_search(enc_out[0], is_final=is_final)
if is_final:
self.reset_streaming_cache()
return nbest_hyps
@torch.no_grad()
def __call__(self, speech: Union[torch.Tensor, np.ndarray]) -> List[Hypothesis]:
"""Speech2Text call.
Args:
speech: Speech data. (S)
Returns:
nbest_hypothesis: N-best hypothesis.
"""
assert check_argument_types()
if isinstance(speech, np.ndarray):
speech = torch.tensor(speech)
speech = speech.unsqueeze(0).to(
dtype=getattr(torch, self.dtype), device=self.device
)
lengths = speech.new_full(
[1], dtype=torch.long, fill_value=speech.size(1), device=self.device
)
feats, feats_length = self.asr_model._extract_feats(speech, lengths)
if self.asr_model.normalize is not None:
feats, feats_length = self.asr_model.normalize(feats, feats_length)
enc_out, _ = self.asr_model.encoder(feats, feats_length)
nbest_hyps = self.beam_search(enc_out[0])
return nbest_hyps
def hypotheses_to_results(self, nbest_hyps: List[Hypothesis]) -> List[Any]:
"""Build partial or final results from the hypotheses.
Args:
nbest_hyps: N-best hypothesis.
Returns:
results: Results containing different representation for the hypothesis.
"""
results = []
for hyp in nbest_hyps:
token_int = list(filter(lambda x: x != 0, hyp.yseq))
token = self.converter.ids2tokens(token_int)
if self.tokenizer is not None:
text = self.tokenizer.tokens2text(token)
else:
text = None
results.append((text, token, token_int, hyp))
assert check_return_type(results)
return results
@staticmethod
def from_pretrained(
model_tag: Optional[str] = None,
**kwargs: Optional[Any],
) -> Speech2Text:
"""Build Speech2Text instance from the pretrained model.
Args:
model_tag: Model tag of the pretrained models.
Return:
: Speech2Text instance.
"""
if model_tag is not None:
try:
from espnet_model_zoo.downloader import ModelDownloader
except ImportError:
logging.error(
"`espnet_model_zoo` is not installed. "
"Please install via `pip install -U espnet_model_zoo`."
)
raise
d = ModelDownloader()
kwargs.update(**d.download_and_unpack(model_tag))
return Speech2Text(**kwargs)
def inference(
output_dir: str,
batch_size: int,
dtype: str,
beam_size: int,
ngpu: int,
seed: int,
lm_weight: float,
nbest: int,
num_workers: int,
log_level: Union[int, str],
data_path_and_name_and_type: Sequence[Tuple[str, str, str]],
asr_train_config: Optional[str],
asr_model_file: Optional[str],
beam_search_config: Optional[dict],
lm_train_config: Optional[str],
lm_file: Optional[str],
model_tag: Optional[str],
token_type: Optional[str],
bpemodel: Optional[str],
key_file: Optional[str],
allow_variable_data_keys: bool,
quantize_asr_model: Optional[bool],
quantize_modules: Optional[List[str]],
quantize_dtype: Optional[str],
streaming: bool,
decoding_window: int,
left_context: int,
display_hypotheses: bool,
) -> None:
"""Transducer model inference.
Args:
output_dir: Output directory path.
batch_size: Batch decoding size.
dtype: Data type.
beam_size: Beam size.
ngpu: Number of GPUs.
seed: Random number generator seed.
lm_weight: Weight of language model.
nbest: Number of final hypothesis.
num_workers: Number of workers.
log_level: Level of verbose for logs.
data_path_and_name_and_type:
asr_train_config: ASR model training config path.
asr_model_file: ASR model path.
beam_search_config: Beam search config path.
lm_train_config: Language Model training config path.
lm_file: Language Model path.
model_tag: Model tag.
token_type: Type of token units.
bpemodel: BPE model path.
key_file: File key.
allow_variable_data_keys: Whether to allow variable data keys.
quantize_asr_model: Whether to apply dynamic quantization to ASR model.
quantize_modules: List of module names to apply dynamic quantization on.
quantize_dtype: Dynamic quantization data type.
streaming: Whether to perform chunk-by-chunk inference.
decoding_window: Audio length (in milliseconds) to process during decoding.
left_context: Number of previous frames the attention module can see
in current chunk (used by Conformer and Branchformer block).
display_hypotheses: Whether to display (partial and full) hypotheses.
"""
assert check_argument_types()
if batch_size > 1:
raise NotImplementedError("batch decoding is not implemented")
if ngpu > 1:
raise NotImplementedError("only single GPU decoding is supported")
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
if ngpu >= 1:
device = "cuda"
else:
device = "cpu"
# 1. Set random-seed
set_all_random_seed(seed)
# 2. Build speech2text
speech2text_kwargs = dict(
asr_train_config=asr_train_config,
asr_model_file=asr_model_file,
beam_search_config=beam_search_config,
lm_train_config=lm_train_config,
lm_file=lm_file,
token_type=token_type,
bpemodel=bpemodel,
device=device,
dtype=dtype,
beam_size=beam_size,
lm_weight=lm_weight,
nbest=nbest,
quantize_asr_model=quantize_asr_model,
quantize_modules=quantize_modules,
quantize_dtype=quantize_dtype,
streaming=streaming,
decoding_window=decoding_window,
left_context=left_context,
)
speech2text = Speech2Text.from_pretrained(
model_tag=model_tag,
**speech2text_kwargs,
)
if speech2text.streaming:
decoding_samples = speech2text.audio_processor.decoding_samples
# 3. Build data-iterator
loader = ASRTransducerTask.build_streaming_iterator(
data_path_and_name_and_type,
dtype=dtype,
batch_size=batch_size,
key_file=key_file,
num_workers=num_workers,
preprocess_fn=ASRTransducerTask.build_preprocess_fn(
speech2text.asr_train_args, False
),
collate_fn=ASRTransducerTask.build_collate_fn(
speech2text.asr_train_args, False
),
allow_variable_data_keys=allow_variable_data_keys,
inference=True,
)
# 4 .Start for-loop
with DatadirWriter(output_dir) as writer:
for keys, batch in loader:
assert isinstance(batch, dict), type(batch)
assert all(isinstance(s, str) for s in keys), keys
_bs = len(next(iter(batch.values())))
assert len(keys) == _bs, f"{len(keys)} != {_bs}"
batch = {k: v[0] for k, v in batch.items() if not k.endswith("_lengths")}
assert len(batch.keys()) == 1
try:
if speech2text.streaming:
speech = batch["speech"]
decoding_steps = len(speech) // decoding_samples
for i in range(0, decoding_steps + 1, 1):
_start = i * decoding_samples
if i == decoding_steps:
final_hyps = speech2text.streaming_decode(
speech[i * decoding_samples : len(speech)],
is_final=True,
)
else:
part_hyps = speech2text.streaming_decode(
speech[
(i * decoding_samples) : _start + decoding_samples
],
is_final=False,
)
if display_hypotheses:
_result = speech2text.hypotheses_to_results(part_hyps)
_length = (i + 1) * decoding_window
logging.info(
f"Current best hypothesis (0-{_length}ms): "
f"{keys}: {_result[0][0]}"
)
else:
final_hyps = speech2text(**batch)
results = speech2text.hypotheses_to_results(final_hyps)
if display_hypotheses:
logging.info(f"Final best hypothesis: {keys}: {results[0][0]}")
except TooShortUttError as e:
logging.warning(f"Utterance {keys} {e}")
hyp = Hypothesis(score=0.0, yseq=[], dec_state=None)
results = [[" ", ["<space>"], [2], hyp]] * nbest
key = keys[0]
for n, (text, token, token_int, hyp) in zip(range(1, nbest + 1), results):
ibest_writer = writer[f"{n}best_recog"]
ibest_writer["token"][key] = " ".join(token)
ibest_writer["token_int"][key] = " ".join(map(str, token_int))
ibest_writer["score"][key] = str(hyp.score)
if text is not None:
ibest_writer["text"][key] = text
def get_parser():
"""Get Transducer model inference parser."""
parser = config_argparse.ArgumentParser(
description="ASR Transducer Decoding",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument("--output_dir", type=str, required=True)
parser.add_argument(
"--ngpu",
type=int,
default=0,
help="The number of gpus. 0 indicates CPU mode",
)
parser.add_argument("--seed", type=int, default=0, help="Random seed")
parser.add_argument(
"--dtype",
default="float32",
choices=["float16", "float32", "float64"],
help="Data type",
)
parser.add_argument(
"--num_workers",
type=int,
default=1,
help="The number of workers used for DataLoader",
)
group = parser.add_argument_group("Input data related")
group.add_argument(
"--data_path_and_name_and_type",
type=str2triple_str,
required=True,
action="append",
)
group.add_argument("--key_file", type=str_or_none)
group.add_argument("--allow_variable_data_keys", type=str2bool, default=False)
group = parser.add_argument_group("The model configuration related")
group.add_argument(
"--asr_train_config",
type=str,
help="ASR training configuration",
)
group.add_argument(
"--asr_model_file",
type=str,
help="ASR model parameter file",
)
group.add_argument(
"--lm_train_config",
type=str,
help="LM training configuration",
)
group.add_argument(
"--lm_file",
type=str,
help="LM parameter file",
)
group.add_argument(
"--model_tag",
type=str,
help="Pretrained model tag. If specify this option, *_train_config and "
"*_file will be overwritten",
)
group = parser.add_argument_group("Beam-search related")
group.add_argument(
"--batch_size",
type=int,
default=1,
help="The batch size for inference",
)
group.add_argument("--nbest", type=int, default=1, help="Output N-best hypotheses")
group.add_argument("--beam_size", type=int, default=5, help="Beam size")
group.add_argument("--lm_weight", type=float, default=1.0, help="RNNLM weight")
group.add_argument(
"--beam_search_config",
default={},
help="The keyword arguments for transducer beam search.",
)
group = parser.add_argument_group("Text converter related")
group.add_argument(
"--token_type",
type=str_or_none,
default=None,
choices=["char", "bpe", None],
help="The token type for ASR model. "
"If not given, refers from the training args",
)
group.add_argument(
"--bpemodel",
type=str_or_none,
default=None,
help="The model path of sentencepiece. "
"If not given, refers from the training args",
)
group = parser.add_argument_group("Dynamic quantization related")
parser.add_argument(
"--quantize_asr_model",
type=bool,
default=False,
help="Apply dynamic quantization to ASR model.",
)
parser.add_argument(
"--quantize_modules",
nargs="*",
default=None,
help="""Module names to apply dynamic quantization on.
The module names are provided as a list, where each name is separated
by a comma (e.g.: --quantize-config=[Linear,LSTM,GRU]).
Each specified name should be an attribute of 'torch.nn', e.g.:
torch.nn.Linear, torch.nn.LSTM, torch.nn.GRU, ...""",
)
parser.add_argument(
"--quantize_dtype",
type=str,
default="qint8",
choices=["float16", "qint8"],
help="Dtype for dynamic quantization.",
)
group = parser.add_argument_group("Streaming related")
parser.add_argument(
"--streaming",
type=bool,
default=False,
help="Whether to perform chunk-by-chunk inference.",
)
parser.add_argument(
"--decoding_window",
type=int,
default=640,
help="Audio length (in milliseconds) to process during decoding.",
)
parser.add_argument(
"--left_context",
type=int,
default=32,
help="""Number of previous frames (AFTER subsamplingà the attention module
can see in current chunk (used by Conformer and Branchformer block).""",
)
parser.add_argument(
"--display_hypotheses",
type=bool,
default=False,
help="""Whether to display hypotheses during inference. If streaming=True,
partial hypotheses will also be shown.""",
)
return parser
def main(cmd=None):
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
kwargs.pop("config", None)
inference(**kwargs)
if __name__ == "__main__":
main()
| 23,356 | 31.804775 | 88 | py |
espnet | espnet-master/espnet2/bin/st_inference.py | #!/usr/bin/env python3
import argparse
import logging
import sys
from pathlib import Path
from typing import Any, List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from typeguard import check_argument_types, check_return_type
from espnet2.fileio.datadir_writer import DatadirWriter
from espnet2.tasks.enh_s2t import EnhS2TTask
from espnet2.tasks.lm import LMTask
from espnet2.tasks.st import STTask
from espnet2.text.build_tokenizer import build_tokenizer
from espnet2.text.token_id_converter import TokenIDConverter
from espnet2.torch_utils.device_funcs import to_device
from espnet2.torch_utils.set_all_random_seed import set_all_random_seed
from espnet2.utils import config_argparse
from espnet2.utils.types import str2bool, str2triple_str, str_or_none
from espnet.nets.batch_beam_search import BatchBeamSearch
from espnet.nets.beam_search import BeamSearch, Hypothesis
from espnet.nets.pytorch_backend.transformer.subsampling import TooShortUttError
from espnet.nets.scorer_interface import BatchScorerInterface
from espnet.nets.scorers.length_bonus import LengthBonus
from espnet.utils.cli_utils import get_commandline_args
class Speech2Text:
"""Speech2Text class
Examples:
>>> import soundfile
>>> speech2text = Speech2Text("st_config.yml", "st.pth")
>>> audio, rate = soundfile.read("speech.wav")
>>> speech2text(audio)
[(text, token, token_int, hypothesis object), ...]
"""
def __init__(
self,
st_train_config: Union[Path, str] = None,
st_model_file: Union[Path, str] = None,
lm_train_config: Union[Path, str] = None,
lm_file: Union[Path, str] = None,
ngram_scorer: str = "full",
ngram_file: Union[Path, str] = None,
token_type: str = None,
bpemodel: str = None,
device: str = "cpu",
maxlenratio: float = 0.0,
minlenratio: float = 0.0,
batch_size: int = 1,
dtype: str = "float32",
beam_size: int = 20,
lm_weight: float = 1.0,
ngram_weight: float = 0.9,
penalty: float = 0.0,
nbest: int = 1,
enh_s2t_task: bool = False,
):
assert check_argument_types()
task = STTask if not enh_s2t_task else EnhS2TTask
# 1. Build ST model
scorers = {}
st_model, st_train_args = task.build_model_from_file(
st_train_config, st_model_file, device
)
if enh_s2t_task:
st_model.inherite_attributes(
inherite_s2t_attrs=[
"ctc",
"decoder",
"eos",
"joint_network",
"sos",
"token_list",
"use_transducer_decoder",
]
)
st_model.to(dtype=getattr(torch, dtype)).eval()
decoder = st_model.decoder
token_list = st_model.token_list
scorers.update(
decoder=decoder,
length_bonus=LengthBonus(len(token_list)),
)
# 2. Build Language model
if lm_train_config is not None:
lm, lm_train_args = LMTask.build_model_from_file(
lm_train_config, lm_file, device
)
scorers["lm"] = lm.lm
# 3. Build ngram model
if ngram_file is not None:
if ngram_scorer == "full":
from espnet.nets.scorers.ngram import NgramFullScorer
ngram = NgramFullScorer(ngram_file, token_list)
else:
from espnet.nets.scorers.ngram import NgramPartScorer
ngram = NgramPartScorer(ngram_file, token_list)
else:
ngram = None
scorers["ngram"] = ngram
# 4. Build BeamSearch object
weights = dict(
decoder=1.0,
lm=lm_weight,
ngram=ngram_weight,
length_bonus=penalty,
)
beam_search = BeamSearch(
beam_size=beam_size,
weights=weights,
scorers=scorers,
sos=st_model.sos,
eos=st_model.eos,
vocab_size=len(token_list),
token_list=token_list,
pre_beam_score_key="full",
)
# TODO(karita): make all scorers batchfied
if batch_size == 1:
non_batch = [
k
for k, v in beam_search.full_scorers.items()
if not isinstance(v, BatchScorerInterface)
]
if len(non_batch) == 0:
beam_search.__class__ = BatchBeamSearch
logging.info("BatchBeamSearch implementation is selected.")
else:
logging.warning(
f"As non-batch scorers {non_batch} are found, "
f"fall back to non-batch implementation."
)
beam_search.to(device=device, dtype=getattr(torch, dtype)).eval()
for scorer in scorers.values():
if isinstance(scorer, torch.nn.Module):
scorer.to(device=device, dtype=getattr(torch, dtype)).eval()
logging.info(f"Beam_search: {beam_search}")
logging.info(f"Decoding device={device}, dtype={dtype}")
# 4. [Optional] Build Text converter: e.g. bpe-sym -> Text
if token_type is None:
token_type = st_train_args.token_type
if bpemodel is None:
bpemodel = st_train_args.bpemodel
if token_type is None:
tokenizer = None
elif token_type == "bpe":
if bpemodel is not None:
tokenizer = build_tokenizer(token_type=token_type, bpemodel=bpemodel)
else:
tokenizer = None
else:
tokenizer = build_tokenizer(token_type=token_type)
converter = TokenIDConverter(token_list=token_list)
logging.info(f"Text tokenizer: {tokenizer}")
self.st_model = st_model
self.st_train_args = st_train_args
self.converter = converter
self.tokenizer = tokenizer
self.beam_search = beam_search
self.maxlenratio = maxlenratio
self.minlenratio = minlenratio
self.device = device
self.dtype = dtype
self.nbest = nbest
@torch.no_grad()
def __call__(
self, speech: Union[torch.Tensor, np.ndarray]
) -> List[Tuple[Optional[str], List[str], List[int], Hypothesis]]:
"""Inference
Args:
data: Input speech data
Returns:
text, token, token_int, hyp
"""
assert check_argument_types()
# Input as audio signal
if isinstance(speech, np.ndarray):
speech = torch.tensor(speech)
# data: (Nsamples,) -> (1, Nsamples)
speech = speech.unsqueeze(0).to(getattr(torch, self.dtype))
# lengths: (1,)
lengths = speech.new_full([1], dtype=torch.long, fill_value=speech.size(1))
batch = {"speech": speech, "speech_lengths": lengths}
# a. To device
batch = to_device(batch, device=self.device)
# b. Forward Encoder
enc, _ = self.st_model.encode(**batch)
assert len(enc) == 1, len(enc)
# c. Passed the encoder result and the beam search
nbest_hyps = self.beam_search(
x=enc[0], maxlenratio=self.maxlenratio, minlenratio=self.minlenratio
)
nbest_hyps = nbest_hyps[: self.nbest]
results = []
for hyp in nbest_hyps:
assert isinstance(hyp, Hypothesis), type(hyp)
# remove sos/eos and get results
token_int = hyp.yseq[1:-1].tolist()
# remove blank symbol id, which is assumed to be 0
token_int = list(filter(lambda x: x != 0, token_int))
# Change integer-ids to tokens
token = self.converter.ids2tokens(token_int)
if self.tokenizer is not None:
text = self.tokenizer.tokens2text(token)
else:
text = None
results.append((text, token, token_int, hyp))
assert check_return_type(results)
return results
@staticmethod
def from_pretrained(
model_tag: Optional[str] = None,
**kwargs: Optional[Any],
):
"""Build Speech2Text instance from the pretrained model.
Args:
model_tag (Optional[str]): Model tag of the pretrained models.
Currently, the tags of espnet_model_zoo are supported.
Returns:
Speech2Text: Speech2Text instance.
"""
if model_tag is not None:
try:
from espnet_model_zoo.downloader import ModelDownloader
except ImportError:
logging.error(
"`espnet_model_zoo` is not installed. "
"Please install via `pip install -U espnet_model_zoo`."
)
raise
d = ModelDownloader()
kwargs.update(**d.download_and_unpack(model_tag))
return Speech2Text(**kwargs)
def inference(
output_dir: str,
maxlenratio: float,
minlenratio: float,
batch_size: int,
dtype: str,
beam_size: int,
ngpu: int,
seed: int,
lm_weight: float,
ngram_weight: float,
penalty: float,
nbest: int,
num_workers: int,
log_level: Union[int, str],
data_path_and_name_and_type: Sequence[Tuple[str, str, str]],
key_file: Optional[str],
st_train_config: Optional[str],
st_model_file: Optional[str],
lm_train_config: Optional[str],
lm_file: Optional[str],
word_lm_train_config: Optional[str],
word_lm_file: Optional[str],
ngram_file: Optional[str],
model_tag: Optional[str],
token_type: Optional[str],
bpemodel: Optional[str],
allow_variable_data_keys: bool,
enh_s2t_task: bool,
):
assert check_argument_types()
if batch_size > 1:
raise NotImplementedError("batch decoding is not implemented")
if word_lm_train_config is not None:
raise NotImplementedError("Word LM is not implemented")
if ngpu > 1:
raise NotImplementedError("only single GPU decoding is supported")
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
if ngpu >= 1:
device = "cuda"
else:
device = "cpu"
# 1. Set random-seed
set_all_random_seed(seed)
# 2. Build speech2text
speech2text_kwargs = dict(
st_train_config=st_train_config,
st_model_file=st_model_file,
lm_train_config=lm_train_config,
lm_file=lm_file,
ngram_file=ngram_file,
token_type=token_type,
bpemodel=bpemodel,
device=device,
maxlenratio=maxlenratio,
minlenratio=minlenratio,
dtype=dtype,
beam_size=beam_size,
lm_weight=lm_weight,
ngram_weight=ngram_weight,
penalty=penalty,
nbest=nbest,
enh_s2t_task=enh_s2t_task,
)
speech2text = Speech2Text.from_pretrained(
model_tag=model_tag,
**speech2text_kwargs,
)
# 3. Build data-iterator
loader = STTask.build_streaming_iterator(
data_path_and_name_and_type,
dtype=dtype,
batch_size=batch_size,
key_file=key_file,
num_workers=num_workers,
preprocess_fn=STTask.build_preprocess_fn(speech2text.st_train_args, False),
collate_fn=STTask.build_collate_fn(speech2text.st_train_args, False),
allow_variable_data_keys=allow_variable_data_keys,
inference=True,
)
# 7 .Start for-loop
# FIXME(kamo): The output format should be discussed about
with DatadirWriter(output_dir) as writer:
for keys, batch in loader:
assert isinstance(batch, dict), type(batch)
assert all(isinstance(s, str) for s in keys), keys
_bs = len(next(iter(batch.values())))
assert len(keys) == _bs, f"{len(keys)} != {_bs}"
batch = {k: v[0] for k, v in batch.items() if not k.endswith("_lengths")}
# N-best list of (text, token, token_int, hyp_object)
try:
results = speech2text(**batch)
except TooShortUttError as e:
logging.warning(f"Utterance {keys} {e}")
hyp = Hypothesis(score=0.0, scores={}, states={}, yseq=[])
results = [[" ", ["<space>"], [2], hyp]] * nbest
# Only supporting batch_size==1
key = keys[0]
for n, (text, token, token_int, hyp) in zip(range(1, nbest + 1), results):
# Create a directory: outdir/{n}best_recog
ibest_writer = writer[f"{n}best_recog"]
# Write the result to each file
ibest_writer["token"][key] = " ".join(token)
ibest_writer["token_int"][key] = " ".join(map(str, token_int))
ibest_writer["score"][key] = str(hyp.score)
if text is not None:
ibest_writer["text"][key] = text
def get_parser():
parser = config_argparse.ArgumentParser(
description="ST Decoding",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Note(kamo): Use '_' instead of '-' as separator.
# '-' is confusing if written in yaml.
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument("--output_dir", type=str, required=True)
parser.add_argument(
"--ngpu",
type=int,
default=0,
help="The number of gpus. 0 indicates CPU mode",
)
parser.add_argument("--seed", type=int, default=0, help="Random seed")
parser.add_argument(
"--dtype",
default="float32",
choices=["float16", "float32", "float64"],
help="Data type",
)
parser.add_argument(
"--num_workers",
type=int,
default=1,
help="The number of workers used for DataLoader",
)
group = parser.add_argument_group("Input data related")
group.add_argument(
"--data_path_and_name_and_type",
type=str2triple_str,
required=True,
action="append",
)
group.add_argument("--key_file", type=str_or_none)
group.add_argument("--allow_variable_data_keys", type=str2bool, default=False)
group = parser.add_argument_group("The model configuration related")
group.add_argument(
"--st_train_config",
type=str,
help="ST training configuration",
)
group.add_argument(
"--st_model_file",
type=str,
help="ST model parameter file",
)
group.add_argument(
"--lm_train_config",
type=str,
help="LM training configuration",
)
group.add_argument(
"--lm_file",
type=str,
help="LM parameter file",
)
group.add_argument(
"--word_lm_train_config",
type=str,
help="Word LM training configuration",
)
group.add_argument(
"--word_lm_file",
type=str,
help="Word LM parameter file",
)
group.add_argument(
"--ngram_file",
type=str,
help="N-gram parameter file",
)
group.add_argument(
"--model_tag",
type=str,
help="Pretrained model tag. If specify this option, *_train_config and "
"*_file will be overwritten",
)
group.add_argument(
"--enh_s2t_task",
type=str2bool,
default=False,
help="enhancement and asr joint model",
)
group = parser.add_argument_group("Beam-search related")
group.add_argument(
"--batch_size",
type=int,
default=1,
help="The batch size for inference",
)
group.add_argument("--nbest", type=int, default=1, help="Output N-best hypotheses")
group.add_argument("--beam_size", type=int, default=20, help="Beam size")
group.add_argument("--penalty", type=float, default=0.0, help="Insertion penalty")
group.add_argument(
"--maxlenratio",
type=float,
default=0.0,
help="Input length ratio to obtain max output length. "
"If maxlenratio=0.0 (default), it uses a end-detect "
"function "
"to automatically find maximum hypothesis lengths."
"If maxlenratio<0.0, its absolute value is interpreted"
"as a constant max output length",
)
group.add_argument(
"--minlenratio",
type=float,
default=0.0,
help="Input length ratio to obtain min output length",
)
group.add_argument("--lm_weight", type=float, default=1.0, help="RNNLM weight")
group.add_argument("--ngram_weight", type=float, default=0.9, help="ngram weight")
group = parser.add_argument_group("Text converter related")
group.add_argument(
"--token_type",
type=str_or_none,
default=None,
choices=["char", "bpe", None],
help="The token type for ST model. "
"If not given, refers from the training args",
)
group.add_argument(
"--bpemodel",
type=str_or_none,
default=None,
help="The model path of sentencepiece. "
"If not given, refers from the training args",
)
return parser
def main(cmd=None):
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
kwargs.pop("config", None)
inference(**kwargs)
if __name__ == "__main__":
main()
| 17,745 | 31.206897 | 87 | py |
espnet | espnet-master/espnet2/bin/lm_inference.py | #!/usr/bin/env python3
import argparse
import logging
import sys
from pathlib import Path
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
import torch.quantization
from typeguard import check_argument_types, check_return_type
from espnet2.fileio.datadir_writer import DatadirWriter
from espnet2.tasks.lm import LMTask
from espnet2.text.build_tokenizer import build_tokenizer
from espnet2.text.token_id_converter import TokenIDConverter
from espnet2.text.whisper_token_id_converter import OpenAIWhisperTokenIDConverter
from espnet2.torch_utils.device_funcs import to_device
from espnet2.torch_utils.set_all_random_seed import set_all_random_seed
from espnet2.utils import config_argparse
from espnet2.utils.types import str2bool, str2triple_str, str_or_none
from espnet.nets.batch_beam_search import BatchBeamSearch
from espnet.nets.beam_search import BeamSearch, Hypothesis
from espnet.nets.scorer_interface import BatchScorerInterface
from espnet.nets.scorers.length_bonus import LengthBonus
from espnet.utils.cli_utils import get_commandline_args
# Alias for typing
ListOfHypothesis = List[
Tuple[
Optional[str],
List[str],
List[int],
Hypothesis,
]
]
class GenerateText:
"""GenerateText class
Examples:
>>> generatetext = GenerateText(
lm_train_config="lm_config.yaml",
lm_file="lm.pth",
token_type="bpe",
bpemodel="bpe.model",
)
>>> prompt = "I have travelled to many "
>>> generatetext(prompt)
[(text, token, token_int, hypothesis object), ...]
"""
def __init__(
self,
lm_train_config: Union[Path, str] = None,
lm_file: Union[Path, str] = None,
ngram_scorer: str = "full",
ngram_file: Union[Path, str] = None,
token_type: str = None,
bpemodel: str = None,
device: str = "cpu",
maxlen: int = 100,
minlen: int = 0,
batch_size: int = 1,
dtype: str = "float32",
beam_size: int = 20,
ngram_weight: float = 0.0,
penalty: float = 0.0,
nbest: int = 1,
quantize_lm: bool = False,
quantize_modules: List[str] = ["Linear"],
quantize_dtype: str = "qint8",
):
assert check_argument_types()
# 1. Build language model
lm, lm_train_args = LMTask.build_model_from_file(
lm_train_config, lm_file, device
)
lm.to(dtype=getattr(torch, dtype)).eval()
if quantize_lm:
logging.info("Use quantized LM for decoding.")
lm = torch.quantization.quantize_dynamic(
lm,
qconfig_spec=set([getattr(torch.nn, q) for q in quantize_modules]),
dtype=getattr(torch, quantize_dtype),
)
token_list = lm_train_args.token_list
# 2. Build ngram model
if ngram_file is not None:
if ngram_scorer == "full":
from espnet.nets.scorers.ngram import NgramFullScorer
ngram = NgramFullScorer(ngram_file, token_list)
else:
from espnet.nets.scorers.ngram import NgramPartScorer
ngram = NgramPartScorer(ngram_file, token_list)
else:
ngram = None
# 3. Build BeamSearch object
scorers = dict(
lm=lm.lm,
ngram=ngram,
length_bonus=LengthBonus(len(token_list)),
)
weights = dict(
lm=1.0,
ngram=ngram_weight,
length_bonus=penalty,
)
beam_search = BeamSearch(
scorers=scorers,
weights=weights,
beam_size=beam_size,
vocab_size=len(token_list),
sos=lm.sos,
eos=lm.eos,
token_list=token_list,
pre_beam_score_key="full",
)
# TODO(karita): make all scorers batchfied
if batch_size == 1:
non_batch = [
k
for k, v in beam_search.full_scorers.items()
if not isinstance(v, BatchScorerInterface)
]
if len(non_batch) == 0:
beam_search.__class__ = BatchBeamSearch
logging.info("BatchBeamSearch implementation is selected.")
else:
logging.warning(
f"As non-batch scorers {non_batch} are found, "
f"fall back to non-batch implementation."
)
beam_search.to(device=device, dtype=getattr(torch, dtype)).eval()
for scorer in scorers.values():
if isinstance(scorer, torch.nn.Module):
scorer.to(device=device, dtype=getattr(torch, dtype)).eval()
logging.info(f"Beam_search: {beam_search}")
logging.info(f"Decoding device={device}, dtype={dtype}")
# 4. [Optional] Build Text converter: e.g. bpe-sym -> Text
if token_type is None:
token_type = lm_train_args.token_type
if bpemodel is None:
bpemodel = lm_train_args.bpemodel
if token_type is None:
tokenizer = None
elif (
token_type == "bpe"
or token_type == "hugging_face"
or "whisper" in token_type
):
if bpemodel is not None:
tokenizer = build_tokenizer(token_type=token_type, bpemodel=bpemodel)
else:
tokenizer = None
else:
tokenizer = build_tokenizer(token_type=token_type)
if bpemodel not in ["whisper_en", "whisper_multilingual"]:
converter = TokenIDConverter(token_list=token_list)
else:
converter = OpenAIWhisperTokenIDConverter(model_type=bpemodel)
beam_search.set_hyp_primer(
list(converter.tokenizer.sot_sequence_including_notimestamps)
)
logging.info(f"Text tokenizer: {tokenizer}")
self.lm = lm
self.lm_train_args = lm_train_args
self.converter = converter
self.tokenizer = tokenizer
self.beam_search = beam_search
self.maxlen = maxlen
self.minlen = minlen
self.device = device
self.dtype = dtype
self.nbest = nbest
@torch.no_grad()
def __call__(
self, text: Optional[Union[str, torch.Tensor, np.ndarray]] = None
) -> ListOfHypothesis:
"""Inference
Args:
text: Input text used as condition for generation
If text is str, it will be converted to token ids
and a <sos> token will be added at the beginning.
If text is Tensor or ndarray, it will be used directly.
Returns:
List of (text, token, token_int, hyp)
"""
assert check_argument_types()
if isinstance(text, str):
tokens = self.tokenizer.text2tokens(text)
token_ids = self.converter.tokens2ids(tokens)
elif text is None:
token_ids = []
else:
token_ids = text.tolist()
hyp_primer = [self.lm.sos] + token_ids
self.beam_search.set_hyp_primer(hyp_primer)
logging.info(f"hyp primer: {hyp_primer}")
nbest_hyps = self.beam_search(
x=torch.zeros(1, 1, device=self.device), # only used to obtain device info
maxlenratio=-self.maxlen, # negative int means a constant max length
minlenratio=-self.minlen, # same for min length
)
nbest_hyps = nbest_hyps[: self.nbest]
results = []
for hyp in nbest_hyps:
assert isinstance(hyp, Hypothesis), type(hyp)
# remove sos/eos and convert to list
token_int = hyp.yseq[1:-1]
if not isinstance(token_int, list):
token_int = token_int.tolist()
# remove blank symbol id, which is assumed to be 0
token_int = list(filter(lambda x: x != 0, token_int))
# Change integer-ids to tokens
token = self.converter.ids2tokens(token_int)
text = None
if self.tokenizer is not None:
text = self.tokenizer.tokens2text(token)
results.append((text, token, token_int, hyp))
assert check_return_type(results)
return results
@staticmethod
def from_pretrained(
model_tag: Optional[str] = None,
**kwargs: Optional[Any],
):
"""Build GenerateText instance from the pretrained model.
Args:
model_tag (Optional[str]): Model tag of the pretrained models.
Currently, the tags of espnet_model_zoo are supported.
Returns:
GenerateText: GenerateText instance.
"""
if model_tag is not None:
try:
from espnet_model_zoo.downloader import ModelDownloader
except ImportError:
logging.error(
"`espnet_model_zoo` is not installed. "
"Please install via `pip install -U espnet_model_zoo`."
)
raise
d = ModelDownloader()
kwargs.update(**d.download_and_unpack(model_tag))
return GenerateText(**kwargs)
def inference(
output_dir: str,
maxlen: int,
minlen: int,
batch_size: int,
dtype: str,
beam_size: int,
ngpu: int,
seed: int,
ngram_weight: float,
penalty: float,
nbest: int,
num_workers: int,
log_level: Union[int, str],
data_path_and_name_and_type: Sequence[Tuple[str, str, str]],
key_file: Optional[str],
lm_train_config: Optional[str],
lm_file: Optional[str],
word_lm_train_config: Optional[str],
word_lm_file: Optional[str],
ngram_file: Optional[str],
model_tag: Optional[str],
token_type: Optional[str],
bpemodel: Optional[str],
allow_variable_data_keys: bool,
quantize_lm: bool,
quantize_modules: List[str],
quantize_dtype: str,
):
assert check_argument_types()
if batch_size > 1:
raise NotImplementedError("batch decoding is not implemented")
if word_lm_train_config is not None:
raise NotImplementedError("Word LM is not implemented")
if ngpu > 1:
raise NotImplementedError("only single GPU decoding is supported")
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
if ngpu >= 1:
device = "cuda"
else:
device = "cpu"
# 1. Set random seed
set_all_random_seed(seed)
# 2. Build generatetext
generatetext_kwargs = dict(
lm_train_config=lm_train_config,
lm_file=lm_file,
ngram_file=ngram_file,
token_type=token_type,
bpemodel=bpemodel,
device=device,
maxlen=maxlen,
minlen=minlen,
dtype=dtype,
beam_size=beam_size,
ngram_weight=ngram_weight,
penalty=penalty,
nbest=nbest,
quantize_lm=quantize_lm,
quantize_modules=quantize_modules,
quantize_dtype=quantize_dtype,
)
generatetext = GenerateText.from_pretrained(
model_tag=model_tag,
**generatetext_kwargs,
)
# 3. Build data iterator
loader = LMTask.build_streaming_iterator(
data_path_and_name_and_type,
dtype=dtype,
batch_size=batch_size,
key_file=key_file,
num_workers=num_workers,
preprocess_fn=LMTask.build_preprocess_fn(generatetext.lm_train_args, False),
collate_fn=LMTask.build_collate_fn(generatetext.lm_train_args, False),
allow_variable_data_keys=allow_variable_data_keys,
inference=True,
)
# 4. Start for-loop
# FIXME(kamo): The output format should be discussed about
with DatadirWriter(output_dir) as writer:
for keys, batch in loader:
assert isinstance(batch, dict), type(batch)
assert all(isinstance(s, str) for s in keys), keys
_bs = len(next(iter(batch.values())))
assert len(keys) == _bs, f"{len(keys)} != {_bs}"
batch = {k: v[0] for k, v in batch.items() if not k.endswith("_lengths")}
# N-best list of (text, token, token_int, hyp_object)
results = generatetext(**batch)
# Only supporting batch_size==1
key = keys[0]
for n, (text, token, token_int, hyp) in zip(range(1, nbest + 1), results):
# Create a directory: outdir/{n}best_recog
ibest_writer = writer[f"{n}best_recog"]
# Write the result to each file
ibest_writer["token"][key] = " ".join(token)
ibest_writer["token_int"][key] = " ".join(map(str, token_int))
ibest_writer["score"][key] = str(hyp.score)
if text is not None:
ibest_writer["text"][key] = text
def get_parser():
parser = config_argparse.ArgumentParser(
description="LM Decoding (conditional generation)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Note(kamo): Use '_' instead of '-' as separator.
# '-' is confusing if written in yaml.
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument("--output_dir", type=str, required=True)
parser.add_argument(
"--ngpu",
type=int,
default=0,
help="The number of gpus. 0 indicates CPU mode",
)
parser.add_argument("--seed", type=int, default=0, help="Random seed")
parser.add_argument(
"--dtype",
default="float32",
choices=["float16", "float32", "float64"],
help="Data type",
)
parser.add_argument(
"--num_workers",
type=int,
default=1,
help="The number of workers used for DataLoader",
)
group = parser.add_argument_group("Input data related")
group.add_argument(
"--data_path_and_name_and_type",
type=str2triple_str,
required=True,
action="append",
)
group.add_argument("--key_file", type=str_or_none)
group.add_argument("--allow_variable_data_keys", type=str2bool, default=False)
group = parser.add_argument_group("The model configuration related")
group.add_argument(
"--lm_train_config",
type=str,
help="LM training configuration",
)
group.add_argument(
"--lm_file",
type=str,
help="LM parameter file",
)
group.add_argument(
"--word_lm_train_config",
type=str,
help="Word LM training configuration",
)
group.add_argument(
"--word_lm_file",
type=str,
help="Word LM parameter file",
)
group.add_argument(
"--ngram_file",
type=str,
help="N-gram parameter file",
)
group.add_argument(
"--model_tag",
type=str,
help="Pretrained model tag. If specify this option, *_train_config and "
"*_file will be overwritten",
)
group = parser.add_argument_group("Quantization related")
group.add_argument(
"--quantize_lm",
type=str2bool,
default=False,
help="Apply dynamic quantization to LM.",
)
group.add_argument(
"--quantize_modules",
type=str,
nargs="*",
default=["Linear"],
help="""List of modules to be dynamically quantized.
E.g.: --quantize_modules=[Linear,LSTM,GRU].
Each specified module should be an attribute of 'torch.nn', e.g.:
torch.nn.Linear, torch.nn.LSTM, torch.nn.GRU, ...""",
)
group.add_argument(
"--quantize_dtype",
type=str,
default="qint8",
choices=["float16", "qint8"],
help="Dtype for dynamic quantization.",
)
group = parser.add_argument_group("Beam-search related")
group.add_argument(
"--batch_size",
type=int,
default=1,
help="Batch size for inference",
)
group.add_argument("--nbest", type=int, default=1, help="Output N-best hypotheses")
group.add_argument("--beam_size", type=int, default=20, help="Beam size")
group.add_argument("--penalty", type=float, default=0.0, help="Insertion penalty")
group.add_argument(
"--maxlen",
type=int,
default=100,
help="Maximum output length",
)
group.add_argument(
"--minlen",
type=int,
default=1,
help="Minimum output length",
)
group.add_argument("--ngram_weight", type=float, default=0.9, help="ngram weight")
group = parser.add_argument_group("Text converter related")
group.add_argument(
"--token_type",
type=str_or_none,
default=None,
choices=["char", "word", "bpe", None],
help="Token type for LM. If not given, refers from the train args",
)
group.add_argument(
"--bpemodel",
type=str_or_none,
default=None,
help="Model path for sentencepiece. If not given, refers from the train args",
)
return parser
def main(cmd=None):
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
kwargs.pop("config", None)
inference(**kwargs)
if __name__ == "__main__":
main()
| 17,623 | 30.640934 | 87 | py |
espnet | espnet-master/espnet2/bin/asr_inference.py | #!/usr/bin/env python3
import argparse
import logging
import sys
from distutils.version import LooseVersion
from itertools import groupby
from pathlib import Path
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
import torch.quantization
from typeguard import check_argument_types, check_return_type
from espnet2.asr.decoder.s4_decoder import S4Decoder
from espnet2.asr.transducer.beam_search_transducer import BeamSearchTransducer
from espnet2.asr.transducer.beam_search_transducer import (
ExtendedHypothesis as ExtTransHypothesis,
)
from espnet2.asr.transducer.beam_search_transducer import Hypothesis as TransHypothesis
from espnet2.fileio.datadir_writer import DatadirWriter
from espnet2.tasks.asr import ASRTask
from espnet2.tasks.enh_s2t import EnhS2TTask
from espnet2.tasks.lm import LMTask
from espnet2.text.build_tokenizer import build_tokenizer
from espnet2.text.token_id_converter import TokenIDConverter
from espnet2.text.whisper_token_id_converter import OpenAIWhisperTokenIDConverter
from espnet2.torch_utils.device_funcs import to_device
from espnet2.torch_utils.set_all_random_seed import set_all_random_seed
from espnet2.utils import config_argparse
from espnet2.utils.types import str2bool, str2triple_str, str_or_none
from espnet.nets.batch_beam_search import BatchBeamSearch
from espnet.nets.batch_beam_search_online_sim import BatchBeamSearchOnlineSim
from espnet.nets.beam_search import BeamSearch, Hypothesis
from espnet.nets.beam_search_timesync import BeamSearchTimeSync
from espnet.nets.pytorch_backend.transformer.add_sos_eos import add_sos_eos
from espnet.nets.pytorch_backend.transformer.subsampling import TooShortUttError
from espnet.nets.scorer_interface import BatchScorerInterface
from espnet.nets.scorers.ctc import CTCPrefixScorer
from espnet.nets.scorers.length_bonus import LengthBonus
from espnet.utils.cli_utils import get_commandline_args
try:
from transformers import AutoModelForSeq2SeqLM
from transformers.file_utils import ModelOutput
is_transformers_available = True
except ImportError:
is_transformers_available = False
# Alias for typing
ListOfHypothesis = List[
Tuple[
Optional[str],
List[str],
List[int],
Union[Hypothesis, ExtTransHypothesis, TransHypothesis],
]
]
class Speech2Text:
"""Speech2Text class
Examples:
>>> import soundfile
>>> speech2text = Speech2Text("asr_config.yml", "asr.pth")
>>> audio, rate = soundfile.read("speech.wav")
>>> speech2text(audio)
[(text, token, token_int, hypothesis object), ...]
"""
def __init__(
self,
asr_train_config: Union[Path, str] = None,
asr_model_file: Union[Path, str] = None,
transducer_conf: dict = None,
lm_train_config: Union[Path, str] = None,
lm_file: Union[Path, str] = None,
ngram_scorer: str = "full",
ngram_file: Union[Path, str] = None,
token_type: str = None,
bpemodel: str = None,
device: str = "cpu",
maxlenratio: float = 0.0,
minlenratio: float = 0.0,
batch_size: int = 1,
dtype: str = "float32",
beam_size: int = 20,
ctc_weight: float = 0.5,
lm_weight: float = 1.0,
ngram_weight: float = 0.9,
penalty: float = 0.0,
nbest: int = 1,
streaming: bool = False,
enh_s2t_task: bool = False,
quantize_asr_model: bool = False,
quantize_lm: bool = False,
quantize_modules: List[str] = ["Linear"],
quantize_dtype: str = "qint8",
hugging_face_decoder: bool = False,
hugging_face_decoder_max_length: int = 256,
time_sync: bool = False,
multi_asr: bool = False,
):
assert check_argument_types()
task = ASRTask if not enh_s2t_task else EnhS2TTask
if quantize_asr_model or quantize_lm:
if quantize_dtype == "float16" and torch.__version__ < LooseVersion(
"1.5.0"
):
raise ValueError(
"float16 dtype for dynamic quantization is not supported with "
"torch version < 1.5.0. Switch to qint8 dtype instead."
)
quantize_modules = set([getattr(torch.nn, q) for q in quantize_modules])
quantize_dtype = getattr(torch, quantize_dtype)
# 1. Build ASR model
scorers = {}
asr_model, asr_train_args = task.build_model_from_file(
asr_train_config, asr_model_file, device
)
if enh_s2t_task:
asr_model.inherite_attributes(
inherite_s2t_attrs=[
"ctc",
"decoder",
"eos",
"joint_network",
"sos",
"token_list",
"use_transducer_decoder",
]
)
asr_model.to(dtype=getattr(torch, dtype)).eval()
if quantize_asr_model:
logging.info("Use quantized asr model for decoding.")
asr_model = torch.quantization.quantize_dynamic(
asr_model, qconfig_spec=quantize_modules, dtype=quantize_dtype
)
decoder = asr_model.decoder
ctc = CTCPrefixScorer(ctc=asr_model.ctc, eos=asr_model.eos)
token_list = asr_model.token_list
scorers.update(
decoder=decoder,
ctc=ctc,
length_bonus=LengthBonus(len(token_list)),
)
# 2. Build Language model
if lm_train_config is not None:
lm, lm_train_args = LMTask.build_model_from_file(
lm_train_config, lm_file, device
)
if quantize_lm:
logging.info("Use quantized lm for decoding.")
lm = torch.quantization.quantize_dynamic(
lm, qconfig_spec=quantize_modules, dtype=quantize_dtype
)
scorers["lm"] = lm.lm
# 3. Build ngram model
if ngram_file is not None:
if ngram_scorer == "full":
from espnet.nets.scorers.ngram import NgramFullScorer
ngram = NgramFullScorer(ngram_file, token_list)
else:
from espnet.nets.scorers.ngram import NgramPartScorer
ngram = NgramPartScorer(ngram_file, token_list)
else:
ngram = None
scorers["ngram"] = ngram
# 4. Build BeamSearch object
if asr_model.use_transducer_decoder:
# In multi-blank RNNT, we assume all big blanks are
# just before the standard blank in token_list
multi_blank_durations = getattr(
asr_model, "transducer_multi_blank_durations", []
)[::-1] + [1]
multi_blank_indices = [
asr_model.blank_id - i + 1
for i in range(len(multi_blank_durations), 0, -1)
]
if transducer_conf is None:
transducer_conf = {}
beam_search_transducer = BeamSearchTransducer(
decoder=asr_model.decoder,
joint_network=asr_model.joint_network,
beam_size=beam_size,
lm=scorers["lm"] if "lm" in scorers else None,
lm_weight=lm_weight,
multi_blank_durations=multi_blank_durations,
multi_blank_indices=multi_blank_indices,
token_list=token_list,
**transducer_conf,
)
beam_search = None
hugging_face_model = None
hugging_face_linear_in = None
elif (
decoder.__class__.__name__ == "HuggingFaceTransformersDecoder"
and hugging_face_decoder
):
if not is_transformers_available:
raise ImportError(
"`transformers` is not available."
" Please install it via `pip install transformers`"
" or `cd /path/to/espnet/tools && . ./activate_python.sh"
" && ./installers/install_transformers.sh`."
)
hugging_face_model = AutoModelForSeq2SeqLM.from_pretrained(
decoder.model_name_or_path
)
hugging_face_model.lm_head.load_state_dict(decoder.lm_head.state_dict())
if hasattr(hugging_face_model, "model"):
hugging_face_model.model.decoder.load_state_dict(
decoder.decoder.state_dict()
)
del hugging_face_model.model.encoder
else:
hugging_face_model.decoder.load_state_dict(decoder.decoder.state_dict())
del hugging_face_model.encoder
del asr_model.decoder.lm_head
del asr_model.decoder.decoder
hugging_face_linear_in = decoder.linear_in
hugging_face_model.to(device=device).eval()
beam_search = None
beam_search_transducer = None
else:
beam_search_transducer = None
hugging_face_model = None
hugging_face_linear_in = None
weights = dict(
decoder=1.0 - ctc_weight,
ctc=ctc_weight,
lm=lm_weight,
ngram=ngram_weight,
length_bonus=penalty,
)
if time_sync:
if not hasattr(asr_model, "ctc"):
raise NotImplementedError(
"BeamSearchTimeSync without CTC is not supported."
)
if batch_size != 1:
raise NotImplementedError(
"BeamSearchTimeSync with batching is not yet supported."
)
logging.info("BeamSearchTimeSync implementation is selected.")
scorers["ctc"] = asr_model.ctc
beam_search = BeamSearchTimeSync(
beam_size=beam_size,
weights=weights,
scorers=scorers,
sos=asr_model.sos,
token_list=token_list,
)
else:
beam_search = BeamSearch(
beam_size=beam_size,
weights=weights,
scorers=scorers,
sos=asr_model.sos,
eos=asr_model.eos,
vocab_size=len(token_list),
token_list=token_list,
pre_beam_score_key=None if ctc_weight == 1.0 else "full",
)
# TODO(karita): make all scorers batchfied
if batch_size == 1:
non_batch = [
k
for k, v in beam_search.full_scorers.items()
if not isinstance(v, BatchScorerInterface)
]
if len(non_batch) == 0:
if streaming:
beam_search.__class__ = BatchBeamSearchOnlineSim
beam_search.set_streaming_config(asr_train_config)
logging.info(
"BatchBeamSearchOnlineSim implementation is selected."
)
else:
beam_search.__class__ = BatchBeamSearch
logging.info("BatchBeamSearch implementation is selected.")
else:
logging.warning(
f"As non-batch scorers {non_batch} are found, "
f"fall back to non-batch implementation."
)
beam_search.to(device=device, dtype=getattr(torch, dtype)).eval()
for scorer in scorers.values():
if isinstance(scorer, torch.nn.Module):
scorer.to(device=device, dtype=getattr(torch, dtype)).eval()
logging.info(f"Beam_search: {beam_search}")
logging.info(f"Decoding device={device}, dtype={dtype}")
# 5. [Optional] Build Text converter: e.g. bpe-sym -> Text
if token_type is None:
token_type = asr_train_args.token_type
if bpemodel is None:
bpemodel = asr_train_args.bpemodel
if token_type is None:
tokenizer = None
elif (
token_type == "bpe"
or token_type == "hugging_face"
or "whisper" in token_type
):
if bpemodel is not None:
tokenizer = build_tokenizer(token_type=token_type, bpemodel=bpemodel)
else:
tokenizer = None
else:
tokenizer = build_tokenizer(token_type=token_type)
if bpemodel not in ["whisper_en", "whisper_multilingual"]:
converter = TokenIDConverter(token_list=token_list)
else:
converter = OpenAIWhisperTokenIDConverter(model_type=bpemodel)
beam_search.set_hyp_primer(
list(converter.tokenizer.sot_sequence_including_notimestamps)
)
logging.info(f"Text tokenizer: {tokenizer}")
self.asr_model = asr_model
self.asr_train_args = asr_train_args
self.converter = converter
self.tokenizer = tokenizer
self.beam_search = beam_search
self.beam_search_transducer = beam_search_transducer
self.hugging_face_model = hugging_face_model
self.hugging_face_linear_in = hugging_face_linear_in
self.hugging_face_beam_size = beam_size
self.hugging_face_decoder_max_length = hugging_face_decoder_max_length
self.maxlenratio = maxlenratio
self.minlenratio = minlenratio
self.device = device
self.dtype = dtype
self.nbest = nbest
self.enh_s2t_task = enh_s2t_task
self.multi_asr = multi_asr
@torch.no_grad()
def __call__(
self, speech: Union[torch.Tensor, np.ndarray]
) -> Union[
ListOfHypothesis,
Tuple[
ListOfHypothesis,
Optional[Dict[int, List[str]]],
],
]:
"""Inference
Args:
data: Input speech data
Returns:
text, token, token_int, hyp
"""
assert check_argument_types()
# Input as audio signal
if isinstance(speech, np.ndarray):
speech = torch.tensor(speech)
# data: (Nsamples,) -> (1, Nsamples)
speech = speech.unsqueeze(0).to(getattr(torch, self.dtype))
# lengths: (1,)
lengths = speech.new_full([1], dtype=torch.long, fill_value=speech.size(1))
batch = {"speech": speech, "speech_lengths": lengths}
logging.info("speech length: " + str(speech.size(1)))
# a. To device
batch = to_device(batch, device=self.device)
# b. Forward Encoder
enc, enc_olens = self.asr_model.encode(**batch)
if self.multi_asr:
enc = enc.unbind(dim=1) # (batch, num_inf, ...) -> num_inf x [batch, ...]
if self.enh_s2t_task or self.multi_asr:
# Enh+ASR joint task or Multispkr ASR task
# NOTE (Wangyou): the return type in this case is List[default_return_type]
if self.multi_asr:
num_spk = getattr(self.asr_model, "num_inf", 1)
else:
num_spk = getattr(self.asr_model.enh_model, "num_spk", 1)
assert len(enc) == num_spk, (len(enc), num_spk)
results = []
for spk, enc_spk in enumerate(enc, 1):
logging.info(f"=== [{str(self.asr_model.__class__)}] Speaker {spk} ===")
if isinstance(enc_spk, tuple):
enc_spk = enc_spk[0]
assert len(enc_spk) == 1, len(enc_spk)
# c. Passed the encoder result and the beam search
ret = self._decode_single_sample(enc_spk[0])
assert check_return_type(ret)
results.append(ret)
else:
# Normal ASR
intermediate_outs = None
if isinstance(enc, tuple):
intermediate_outs = enc[1]
enc = enc[0]
assert len(enc) == 1, len(enc)
# c. Passed the encoder result and the beam search
results = self._decode_single_sample(enc[0])
# Encoder intermediate CTC predictions
if intermediate_outs is not None:
encoder_interctc_res = self._decode_interctc(intermediate_outs)
results = (results, encoder_interctc_res)
assert check_return_type(results)
return results
def _decode_interctc(
self, intermediate_outs: List[Tuple[int, torch.Tensor]]
) -> Dict[int, List[str]]:
assert check_argument_types()
exclude_ids = [self.asr_model.blank_id, self.asr_model.sos, self.asr_model.eos]
res = {}
token_list = self.beam_search.token_list
for layer_idx, encoder_out in intermediate_outs:
y = self.asr_model.ctc.argmax(encoder_out)[0] # batch_size = 1
y = [x[0] for x in groupby(y) if x[0] not in exclude_ids]
y = [token_list[x] for x in y]
res[layer_idx] = y
return res
def _decode_single_sample(self, enc: torch.Tensor):
if self.beam_search_transducer:
logging.info("encoder output length: " + str(enc.shape[0]))
nbest_hyps = self.beam_search_transducer(enc)
best = nbest_hyps[0]
logging.info(f"total log probability: {best.score:.2f}")
logging.info(
f"normalized log probability: {best.score / len(best.yseq):.2f}"
)
logging.info(
"best hypo: " + "".join(self.converter.ids2tokens(best.yseq[1:])) + "\n"
)
elif self.hugging_face_model:
decoder_start_token_id = (
self.hugging_face_model.config.decoder_start_token_id
)
yseq = self.hugging_face_model.generate(
encoder_outputs=ModelOutput(
last_hidden_state=self.hugging_face_linear_in(enc).unsqueeze(0)
),
use_cache=True,
decoder_start_token_id=decoder_start_token_id,
num_beams=self.hugging_face_beam_size,
max_length=self.hugging_face_decoder_max_length,
)
nbest_hyps = [Hypothesis(yseq=yseq[0])]
logging.info(
"best hypo: "
+ "".join(self.converter.ids2tokens(nbest_hyps[0].yseq[1:]))
+ "\n"
)
else:
if hasattr(self.beam_search.nn_dict, "decoder"):
if isinstance(self.beam_search.nn_dict.decoder, S4Decoder):
# Setup: required for S4 autoregressive generation
for module in self.beam_search.nn_dict.decoder.modules():
if hasattr(module, "setup_step"):
module.setup_step()
nbest_hyps = self.beam_search(
x=enc, maxlenratio=self.maxlenratio, minlenratio=self.minlenratio
)
nbest_hyps = nbest_hyps[: self.nbest]
results = []
for hyp in nbest_hyps:
assert isinstance(hyp, (Hypothesis, TransHypothesis)), type(hyp)
# remove sos/eos and get results
last_pos = None if self.asr_model.use_transducer_decoder else -1
if isinstance(hyp.yseq, list):
token_int = hyp.yseq[1:last_pos]
else:
token_int = hyp.yseq[1:last_pos].tolist()
# remove blank symbol id, which is assumed to be 0
token_int = list(filter(lambda x: x != 0, token_int))
# Change integer-ids to tokens
token = self.converter.ids2tokens(token_int)
if self.tokenizer is not None:
text = self.tokenizer.tokens2text(token)
else:
text = None
results.append((text, token, token_int, hyp))
return results
@staticmethod
def from_pretrained(
model_tag: Optional[str] = None,
**kwargs: Optional[Any],
):
"""Build Speech2Text instance from the pretrained model.
Args:
model_tag (Optional[str]): Model tag of the pretrained models.
Currently, the tags of espnet_model_zoo are supported.
Returns:
Speech2Text: Speech2Text instance.
"""
if model_tag is not None:
try:
from espnet_model_zoo.downloader import ModelDownloader
except ImportError:
logging.error(
"`espnet_model_zoo` is not installed. "
"Please install via `pip install -U espnet_model_zoo`."
)
raise
d = ModelDownloader()
kwargs.update(**d.download_and_unpack(model_tag))
return Speech2Text(**kwargs)
def inference(
output_dir: str,
maxlenratio: float,
minlenratio: float,
batch_size: int,
dtype: str,
beam_size: int,
ngpu: int,
seed: int,
ctc_weight: float,
lm_weight: float,
ngram_weight: float,
penalty: float,
nbest: int,
num_workers: int,
log_level: Union[int, str],
data_path_and_name_and_type: Sequence[Tuple[str, str, str]],
key_file: Optional[str],
asr_train_config: Optional[str],
asr_model_file: Optional[str],
lm_train_config: Optional[str],
lm_file: Optional[str],
word_lm_train_config: Optional[str],
word_lm_file: Optional[str],
ngram_file: Optional[str],
model_tag: Optional[str],
token_type: Optional[str],
bpemodel: Optional[str],
allow_variable_data_keys: bool,
transducer_conf: Optional[dict],
streaming: bool,
enh_s2t_task: bool,
quantize_asr_model: bool,
quantize_lm: bool,
quantize_modules: List[str],
quantize_dtype: str,
hugging_face_decoder: bool,
hugging_face_decoder_max_length: int,
time_sync: bool,
multi_asr: bool,
):
assert check_argument_types()
if batch_size > 1:
raise NotImplementedError("batch decoding is not implemented")
if word_lm_train_config is not None:
raise NotImplementedError("Word LM is not implemented")
if ngpu > 1:
raise NotImplementedError("only single GPU decoding is supported")
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
if ngpu >= 1:
device = "cuda"
else:
device = "cpu"
# 1. Set random-seed
set_all_random_seed(seed)
# 2. Build speech2text
speech2text_kwargs = dict(
asr_train_config=asr_train_config,
asr_model_file=asr_model_file,
transducer_conf=transducer_conf,
lm_train_config=lm_train_config,
lm_file=lm_file,
ngram_file=ngram_file,
token_type=token_type,
bpemodel=bpemodel,
device=device,
maxlenratio=maxlenratio,
minlenratio=minlenratio,
dtype=dtype,
beam_size=beam_size,
ctc_weight=ctc_weight,
lm_weight=lm_weight,
ngram_weight=ngram_weight,
penalty=penalty,
nbest=nbest,
streaming=streaming,
enh_s2t_task=enh_s2t_task,
multi_asr=multi_asr,
quantize_asr_model=quantize_asr_model,
quantize_lm=quantize_lm,
quantize_modules=quantize_modules,
quantize_dtype=quantize_dtype,
hugging_face_decoder=hugging_face_decoder,
hugging_face_decoder_max_length=hugging_face_decoder_max_length,
time_sync=time_sync,
)
speech2text = Speech2Text.from_pretrained(
model_tag=model_tag,
**speech2text_kwargs,
)
# 3. Build data-iterator
loader = ASRTask.build_streaming_iterator(
data_path_and_name_and_type,
dtype=dtype,
batch_size=batch_size,
key_file=key_file,
num_workers=num_workers,
preprocess_fn=ASRTask.build_preprocess_fn(speech2text.asr_train_args, False),
collate_fn=ASRTask.build_collate_fn(speech2text.asr_train_args, False),
allow_variable_data_keys=allow_variable_data_keys,
inference=True,
)
# 7 .Start for-loop
# FIXME(kamo): The output format should be discussed about
with DatadirWriter(output_dir) as writer:
for keys, batch in loader:
assert isinstance(batch, dict), type(batch)
assert all(isinstance(s, str) for s in keys), keys
_bs = len(next(iter(batch.values())))
assert len(keys) == _bs, f"{len(keys)} != {_bs}"
batch = {k: v[0] for k, v in batch.items() if not k.endswith("_lengths")}
# N-best list of (text, token, token_int, hyp_object)
try:
results = speech2text(**batch)
except TooShortUttError as e:
logging.warning(f"Utterance {keys} {e}")
hyp = Hypothesis(score=0.0, scores={}, states={}, yseq=[])
results = [[" ", ["<space>"], [2], hyp]] * nbest
if enh_s2t_task:
num_spk = getattr(speech2text.asr_model.enh_model, "num_spk", 1)
results = [results for _ in range(num_spk)]
# Only supporting batch_size==1
key = keys[0]
if enh_s2t_task or multi_asr:
# Enh+ASR joint task
for spk, ret in enumerate(results, 1):
for n, (text, token, token_int, hyp) in zip(
range(1, nbest + 1), ret
):
# Create a directory: outdir/{n}best_recog_spk?
ibest_writer = writer[f"{n}best_recog"]
# Write the result to each file
ibest_writer[f"token_spk{spk}"][key] = " ".join(token)
ibest_writer[f"token_int_spk{spk}"][key] = " ".join(
map(str, token_int)
)
ibest_writer[f"score_spk{spk}"][key] = str(hyp.score)
if text is not None:
ibest_writer[f"text_spk{spk}"][key] = text
else:
# Normal ASR
encoder_interctc_res = None
if isinstance(results, tuple):
results, encoder_interctc_res = results
for n, (text, token, token_int, hyp) in zip(
range(1, nbest + 1), results
):
# Create a directory: outdir/{n}best_recog
ibest_writer = writer[f"{n}best_recog"]
# Write the result to each file
ibest_writer["token"][key] = " ".join(token)
ibest_writer["token_int"][key] = " ".join(map(str, token_int))
ibest_writer["score"][key] = str(hyp.score)
if text is not None:
ibest_writer["text"][key] = text
# Write intermediate predictions to
# encoder_interctc_layer<layer_idx>.txt
ibest_writer = writer[f"1best_recog"]
if encoder_interctc_res is not None:
for idx, text in encoder_interctc_res.items():
ibest_writer[f"encoder_interctc_layer{idx}.txt"][
key
] = " ".join(text)
def get_parser():
parser = config_argparse.ArgumentParser(
description="ASR Decoding",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Note(kamo): Use '_' instead of '-' as separator.
# '-' is confusing if written in yaml.
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument("--output_dir", type=str, required=True)
parser.add_argument(
"--ngpu",
type=int,
default=0,
help="The number of gpus. 0 indicates CPU mode",
)
parser.add_argument("--seed", type=int, default=0, help="Random seed")
parser.add_argument(
"--dtype",
default="float32",
choices=["float16", "float32", "float64"],
help="Data type",
)
parser.add_argument(
"--num_workers",
type=int,
default=1,
help="The number of workers used for DataLoader",
)
group = parser.add_argument_group("Input data related")
group.add_argument(
"--data_path_and_name_and_type",
type=str2triple_str,
required=True,
action="append",
)
group.add_argument("--key_file", type=str_or_none)
group.add_argument("--allow_variable_data_keys", type=str2bool, default=False)
group = parser.add_argument_group("The model configuration related")
group.add_argument(
"--asr_train_config",
type=str,
help="ASR training configuration",
)
group.add_argument(
"--asr_model_file",
type=str,
help="ASR model parameter file",
)
group.add_argument(
"--lm_train_config",
type=str,
help="LM training configuration",
)
group.add_argument(
"--lm_file",
type=str,
help="LM parameter file",
)
group.add_argument(
"--word_lm_train_config",
type=str,
help="Word LM training configuration",
)
group.add_argument(
"--word_lm_file",
type=str,
help="Word LM parameter file",
)
group.add_argument(
"--ngram_file",
type=str,
help="N-gram parameter file",
)
group.add_argument(
"--model_tag",
type=str,
help="Pretrained model tag. If specify this option, *_train_config and "
"*_file will be overwritten",
)
group.add_argument(
"--enh_s2t_task",
type=str2bool,
default=False,
help="Whether we are using an enhancement and ASR joint model",
)
group.add_argument(
"--multi_asr",
type=str2bool,
default=False,
help="Whether we are using a monolithic multi-speaker ASR model "
"(This flag should be False if a speech separation model is used before ASR)",
)
group = parser.add_argument_group("Quantization related")
group.add_argument(
"--quantize_asr_model",
type=str2bool,
default=False,
help="Apply dynamic quantization to ASR model.",
)
group.add_argument(
"--quantize_lm",
type=str2bool,
default=False,
help="Apply dynamic quantization to LM.",
)
group.add_argument(
"--quantize_modules",
type=str,
nargs="*",
default=["Linear"],
help="""List of modules to be dynamically quantized.
E.g.: --quantize_modules=[Linear,LSTM,GRU].
Each specified module should be an attribute of 'torch.nn', e.g.:
torch.nn.Linear, torch.nn.LSTM, torch.nn.GRU, ...""",
)
group.add_argument(
"--quantize_dtype",
type=str,
default="qint8",
choices=["float16", "qint8"],
help="Dtype for dynamic quantization.",
)
group = parser.add_argument_group("Beam-search related")
group.add_argument(
"--batch_size",
type=int,
default=1,
help="The batch size for inference",
)
group.add_argument("--nbest", type=int, default=1, help="Output N-best hypotheses")
group.add_argument("--beam_size", type=int, default=20, help="Beam size")
group.add_argument("--penalty", type=float, default=0.0, help="Insertion penalty")
group.add_argument(
"--maxlenratio",
type=float,
default=0.0,
help="Input length ratio to obtain max output length. "
"If maxlenratio=0.0 (default), it uses a end-detect "
"function "
"to automatically find maximum hypothesis lengths."
"If maxlenratio<0.0, its absolute value is interpreted"
"as a constant max output length",
)
group.add_argument(
"--minlenratio",
type=float,
default=0.0,
help="Input length ratio to obtain min output length",
)
group.add_argument(
"--ctc_weight",
type=float,
default=0.5,
help="CTC weight in joint decoding",
)
group.add_argument("--lm_weight", type=float, default=1.0, help="RNNLM weight")
group.add_argument("--ngram_weight", type=float, default=0.9, help="ngram weight")
group.add_argument("--streaming", type=str2bool, default=False)
group.add_argument("--hugging_face_decoder", type=str2bool, default=False)
group.add_argument("--hugging_face_decoder_max_length", type=int, default=256)
group.add_argument(
"--transducer_conf",
default=None,
help="The keyword arguments for transducer beam search.",
)
group = parser.add_argument_group("Text converter related")
group.add_argument(
"--token_type",
type=str_or_none,
default=None,
choices=["char", "bpe", None],
help="The token type for ASR model. "
"If not given, refers from the training args",
)
group.add_argument(
"--bpemodel",
type=str_or_none,
default=None,
help="The model path of sentencepiece. "
"If not given, refers from the training args",
)
group.add_argument(
"--time_sync",
type=str2bool,
default=False,
help="Time synchronous beam search.",
)
return parser
def main(cmd=None):
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
kwargs.pop("config", None)
inference(**kwargs)
if __name__ == "__main__":
main()
| 34,289 | 34.386997 | 88 | py |
espnet | espnet-master/espnet2/bin/uasr_inference_k2.py | #!/usr/bin/env python3
import argparse
import datetime
import logging
import sys
from pathlib import Path
from typing import Any, List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
import yaml
from typeguard import check_argument_types, check_return_type
from espnet2.fileio.datadir_writer import DatadirWriter
from espnet2.tasks.lm import LMTask
from espnet2.tasks.uasr import UASRTask
from espnet2.text.build_tokenizer import build_tokenizer
from espnet2.text.token_id_converter import TokenIDConverter
from espnet2.torch_utils.device_funcs import to_device
from espnet2.torch_utils.set_all_random_seed import set_all_random_seed
from espnet2.utils import config_argparse
from espnet2.utils.types import str2bool, str2triple_str, str_or_none
from espnet.nets.pytorch_backend.transformer.subsampling import TooShortUttError
from espnet.utils.cli_utils import get_commandline_args
try:
import k2 # for CI import
from icefall.decode import get_lattice, one_best_decoding
from icefall.utils import get_texts
except ImportError or ModuleNotFoundError:
k2 = None
def indices_to_split_size(indices, total_elements: int = None):
"""convert indices to split_size
During decoding, the api torch.tensor_split should be used.
However, torch.tensor_split is only available with pytorch >= 1.8.0.
So torch.split is used to pass ci with pytorch < 1.8.0.
This fuction is used to prepare input for torch.split.
"""
if indices[0] != 0:
indices = [0] + indices
split_size = [indices[i] - indices[i - 1] for i in range(1, len(indices))]
if total_elements is not None and sum(split_size) != total_elements:
split_size.append(total_elements - sum(split_size))
return split_size
class k2Speech2Text:
"""Speech2Text class
Examples:
>>> import soundfile
>>> speech2text = k2Speech2Text("uasr_config.yml", "uasr.pth")
>>> audio, rate = soundfile.read("speech.wav")
>>> speech = np.expand_dims(audio, 0) # shape: [batch_size, speech_length]
>>> speech_lengths = np.array([audio.shape[0]]) # shape: [batch_size]
>>> batch = {"speech": speech, "speech_lengths", speech_lengths}
>>> speech2text(batch)
[(text, token, token_int, score), ...]
"""
def __init__(
self,
uasr_train_config: Union[Path, str],
decoding_graph: str,
uasr_model_file: Union[Path, str] = None,
lm_train_config: Union[Path, str] = None,
lm_file: Union[Path, str] = None,
token_type: str = None,
bpemodel: str = None,
device: str = "cpu",
maxlenratio: float = 0.0,
minlenratio: float = 0.0,
batch_size: int = 1,
dtype: str = "float32",
beam_size: int = 8,
ctc_weight: float = 0.5,
lm_weight: float = 1.0,
penalty: float = 0.0,
nbest: int = 1,
streaming: bool = False,
search_beam_size: int = 20,
output_beam_size: int = 20,
min_active_states: int = 14000,
max_active_states: int = 56000,
blank_bias: float = 0.0,
lattice_weight: float = 1.0,
is_ctc_decoding: bool = True,
lang_dir: Optional[str] = None,
token_list_file: Optional[str] = None,
use_fgram_rescoring: bool = False,
use_nbest_rescoring: bool = False,
am_weight: float = 0.5,
decoder_weight: float = 0.5,
nnlm_weight: float = 1.0,
num_paths: int = 1000,
nbest_batch_size: int = 500,
nll_batch_size: int = 100,
):
assert check_argument_types()
# 1. Build UASR model
uasr_model, uasr_train_args = UASRTask.build_model_from_file(
uasr_train_config, uasr_model_file, device
)
uasr_model.use_collected_training_feats = True
uasr_model.to(dtype=getattr(torch, dtype)).eval()
if token_list_file is not None:
token_list = []
with open(token_list_file, "r") as tlf:
for line in tlf.readlines():
token, _ = line.split()
assert token not in token_list
token_list.append(token)
else:
token_list = uasr_model.token_list
# 2. Build Language model
if lm_train_config is not None:
lm, lm_train_args = LMTask.build_model_from_file(
lm_train_config, lm_file, device
)
self.lm = lm
self.is_ctc_decoding = is_ctc_decoding
self.use_fgram_rescoring = use_fgram_rescoring
self.use_nbest_rescoring = use_nbest_rescoring
# load decoding graph
self.decoding_graph = k2.Fsa.from_dict(torch.load(decoding_graph))
self.decoding_graph = self.decoding_graph.to(device)
assert token_type is not None
tokenizer = build_tokenizer(token_type=token_type)
converter = TokenIDConverter(token_list=token_list)
logging.info(f"Text tokenizer: {tokenizer}")
logging.info(f"Running on : {device}")
self.uasr_model = uasr_model
self.uasr_train_args = uasr_train_args
self.converter = converter
self.tokenizer = tokenizer
self.device = device
self.dtype = dtype
self.search_beam_size = search_beam_size
self.output_beam_size = output_beam_size
self.min_active_states = min_active_states
self.max_active_states = max_active_states
self.blank_bias = blank_bias
self.lattice_weight = lattice_weight
self.am_weight = am_weight
self.decoder_weight = decoder_weight
self.nnlm_weight = nnlm_weight
self.num_paths = num_paths
self.nbest_batch_size = nbest_batch_size
self.nll_batch_size = nll_batch_size
self.uasr_model_ignore_id = 0
@torch.no_grad()
def __call__(
self, speech: Union[torch.Tensor, np.ndarray]
) -> List[Tuple[Optional[str], List[str], List[int], float]]:
"""Inference
Args:
batch: Input speech data and corresponding lengths
Returns:
text, token, token_int, hyp
"""
assert check_argument_types()
if isinstance(speech, np.ndarray):
speech = torch.tensor(speech)
# data: (Nsamples,) -> (1, Nsamples)
speech = speech.unsqueeze(0).to(getattr(torch, self.dtype))
# lengths: (1,)
lengths = speech.new_full([1], dtype=torch.long, fill_value=speech.size(1))
batch = {"speech": speech, "speech_lengths": lengths}
# a. To device
batch = to_device(batch, device=self.device)
# b. Forward Encoder
# enc: [N, T, C]
generated_sample, _ = self.uasr_model.inference(**batch)
# nnet_output: [N, T, C]
logp_encoder_output = torch.nn.functional.log_softmax(generated_sample, dim=-1)
# It maybe useful to tune blank_bias.
# The valid range of blank_bias is [-inf, 0]
# logp_encoder_output[:, :, 4] += 0
batch_size, time_length, _ = generated_sample.shape
assert batch_size == 1
sequence_idx = 0
start_frame = 0
num_frames = time_length
supervision_segments = torch.Tensor([[sequence_idx, start_frame, num_frames]])
supervision_segments = supervision_segments.to(torch.int32)
# An introduction to DenseFsaVec:
# https://k2-fsa.github.io/k2/core_concepts/index.html#dense-fsa-vector
# It could be viewed as a fsa-type lopg_encoder_output,
# whose weight on the arcs are initialized with logp_encoder_output.
# The goal of converting tensor-type to fsa-type is using
# fsa related functions in k2. e.g. k2.intersect_dense_pruned below
# The term "intersect" is similar to "compose" in k2.
# The differences is are:
# for "compose" functions, the composition involves
# mathcing output label of a.fsa and input label of b.fsa
# while for "intersect" functions, the composition involves
# matching input label of a.fsa and input label of b.fsa
# Actually, in compose functions, b.fsa is inverted and then
# a.fsa and inv_b.fsa are intersected together.
# For difference between compose and interset:
# https://github.com/k2-fsa/k2/blob/master/k2/python/k2/fsa_algo.py#L308
# For definition of k2.intersect_dense_pruned:
# https://github.com/k2-fsa/k2/blob/master/k2/python/k2/autograd.py#L648
lattices = get_lattice(
nnet_output=logp_encoder_output,
decoding_graph=self.decoding_graph,
supervision_segments=supervision_segments,
search_beam=self.search_beam_size,
output_beam=self.output_beam_size,
min_active_states=self.min_active_states,
max_active_states=self.max_active_states,
)
# lattices.scores is the sum of decode_graph.scores(a.k.a. lm weight) and
# dense_fsa_vec.scores(a.k.a. am weight) on related arcs.
# For ctc decoding graph, lattices.scores only store am weight
# since the decoder_graph only define the ctc topology and
# has no lm weight on its arcs.
# While for 3-gram decoding, whose graph is converted from language models,
# lattice.scores contains both am weights and lm weights
#
# It maybe useful to tune lattice.scores
# The valid range of lattice_weight is [0, inf)
# The lattice_weight will affect the search of k2.random_paths
lattices.scores *= self.lattice_weight
results = []
# TODO(Dongji): add nbest_rescoring
if self.use_nbest_rescoring:
raise ValueError("Currently nbest rescoring is not supported")
else:
best_paths = one_best_decoding(lattices, use_double_scores=True)
scores = best_paths.get_tot_scores(
use_double_scores=True, log_semiring=False
).tolist()
hyps = get_texts(best_paths)
assert len(scores) == len(hyps)
for token_int, score in zip(hyps, scores):
# For decoding methods nbest_rescoring and ctc_decoding
# hyps stores token_index, which is lattice.labels.
# convert token_id to text with self.tokenizer
token = self.converter.ids2tokens(token_int)
assert self.tokenizer is not None
text = self.tokenizer.tokens2text(token)
results.append((text, token, token_int, score))
assert check_return_type(results)
return results
@staticmethod
def from_pretrained(
model_tag: Optional[str] = None,
**kwargs: Optional[Any],
):
"""Build k2Speech2Text instance from the pretrained model.
Args:
model_tag (Optional[str]): Model tag of the pretrained models.
Currently, the tags of espnet_model_zoo are supported.
Returns:
Speech2Text: Speech2Text instance.
"""
if model_tag is not None:
try:
from espnet_model_zoo.downloader import ModelDownloader
except ImportError:
logging.error(
"`espnet_model_zoo` is not installed. "
"Please install via `pip install -U espnet_model_zoo`."
)
raise
d = ModelDownloader()
kwargs.update(**d.download_and_unpack(model_tag))
return k2Speech2Text(**kwargs)
def inference(
output_dir: str,
decoding_graph: str,
maxlenratio: float,
minlenratio: float,
batch_size: int,
dtype: str,
beam_size: int,
ngpu: int,
seed: int,
ctc_weight: float,
lm_weight: float,
penalty: float,
nbest: int,
num_workers: int,
log_level: Union[int, str],
data_path_and_name_and_type: Sequence[Tuple[str, str, str]],
key_file: Optional[str],
uasr_train_config: Optional[str],
uasr_model_file: Optional[str],
lm_train_config: Optional[str],
lm_file: Optional[str],
word_lm_train_config: Optional[str],
word_lm_file: Optional[str],
model_tag: Optional[str],
token_type: Optional[str],
word_token_list: Optional[str],
bpemodel: Optional[str],
allow_variable_data_keys: bool,
streaming: bool,
is_ctc_decoding: bool,
use_nbest_rescoring: bool,
num_paths: int,
nbest_batch_size: int,
nll_batch_size: int,
k2_config: Optional[str],
):
assert is_ctc_decoding, "Currently, only ctc_decoding graph is supported."
assert check_argument_types()
if ngpu > 1:
raise NotImplementedError("only single GPU decoding is supported")
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
if ngpu >= 1:
device = "cuda"
else:
device = "cpu"
# 1. Set random-seed
set_all_random_seed(seed)
with open(k2_config) as k2_config_file:
dict_k2_config = yaml.safe_load(k2_config_file)
# 2. Build speech2text
speech2text_kwargs = dict(
uasr_train_config=uasr_train_config,
uasr_model_file=uasr_model_file,
decoding_graph=decoding_graph,
lm_train_config=lm_train_config,
lm_file=lm_file,
token_type=token_type,
token_list_file=word_token_list,
bpemodel=bpemodel,
device=device,
maxlenratio=maxlenratio,
minlenratio=minlenratio,
dtype=dtype,
beam_size=beam_size,
ctc_weight=ctc_weight,
lm_weight=lm_weight,
penalty=penalty,
nbest=nbest,
streaming=streaming,
is_ctc_decoding=is_ctc_decoding,
use_nbest_rescoring=use_nbest_rescoring,
num_paths=num_paths,
nbest_batch_size=nbest_batch_size,
nll_batch_size=nll_batch_size,
)
speech2text_kwargs = dict(**speech2text_kwargs, **dict_k2_config)
speech2text = k2Speech2Text.from_pretrained(
model_tag=model_tag,
**speech2text_kwargs,
)
# 3. Build data-iterator
loader = UASRTask.build_streaming_iterator(
data_path_and_name_and_type,
dtype=dtype,
batch_size=batch_size,
key_file=key_file,
num_workers=num_workers,
preprocess_fn=UASRTask.build_preprocess_fn(speech2text.uasr_train_args, False),
collate_fn=UASRTask.build_collate_fn(speech2text.uasr_train_args, False),
allow_variable_data_keys=allow_variable_data_keys,
inference=True,
)
with DatadirWriter(output_dir) as writer:
start_decoding_time = datetime.datetime.now()
for batch_idx, (keys, batch) in enumerate(loader):
if batch_idx % 10 == 0:
logging.info(f"Processing {batch_idx} batch")
assert isinstance(batch, dict), type(batch)
assert all(isinstance(s, str) for s in keys), keys
_bs = len(next(iter(batch.values())))
assert len(keys) == _bs, f"{len(keys)} != {_bs}"
batch = {k: v[0] for k, v in batch.items() if not k.endswith("_lengths")}
# 1-best list of (text, token, token_int)
try:
results = speech2text(**batch)
except TooShortUttError as e:
logging.warning(f"Utterance {keys} {e}")
for key_idx, (text, token, token_int, score) in enumerate(results):
key = keys[key_idx]
best_writer = writer["1best_recog"]
# Write the result to each file
best_writer["token"][key] = " ".join(token)
best_writer["token_int"][key] = " ".join(map(str, token_int))
best_writer["score"][key] = str(score)
if text is not None:
best_writer["text"][key] = text
end_decoding_time = datetime.datetime.now()
decoding_duration = end_decoding_time - start_decoding_time
logging.info(f"Decoding duration is {decoding_duration.seconds} seconds")
def get_parser():
parser = config_argparse.ArgumentParser(
description="UASR Decoding",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Note(kamo): Use '_' instead of '-' as separator.
# '-' is confusing if written in yaml.
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument("--output_dir", type=str, required=True)
parser.add_argument(
"--ngpu",
type=int,
default=0,
help="The number of gpus. 0 indicates CPU mode",
)
parser.add_argument("--seed", type=int, default=0, help="Random seed")
parser.add_argument(
"--dtype",
default="float32",
choices=["float16", "float32", "float64"],
help="Data type",
)
parser.add_argument(
"--num_workers",
type=int,
default=1,
help="The number of workers used for DataLoader",
)
group = parser.add_argument_group("Input data related")
group.add_argument(
"--data_path_and_name_and_type",
type=str2triple_str,
required=True,
action="append",
)
group.add_argument("--key_file", type=str_or_none)
group.add_argument("--allow_variable_data_keys", type=str2bool, default=False)
group = parser.add_argument_group("The model configuration related")
group.add_argument(
"--uasr_train_config",
type=str,
help="UASR training configuration",
)
group.add_argument(
"--uasr_model_file",
type=str,
help="UASR model parameter file",
)
group.add_argument(
"--lm_train_config",
type=str,
help="LM training configuration",
)
group.add_argument(
"--lm_file",
type=str,
help="LM parameter file",
)
group.add_argument(
"--word_lm_train_config",
type=str,
help="Word LM training configuration",
)
group.add_argument(
"--word_lm_file",
type=str,
help="Word LM parameter file",
)
group.add_argument(
"--model_tag",
type=str,
help="Pretrained model tag. If specify this option, *_train_config and "
"*_file will be overwritten",
)
group = parser.add_argument_group("Beam-search related")
group.add_argument(
"--batch_size",
type=int,
default=1,
help="The batch size for inference",
)
group.add_argument("--nbest", type=int, default=1, help="Output N-best hypotheses")
group.add_argument("--beam_size", type=int, default=20, help="Beam size")
group.add_argument("--penalty", type=float, default=0.0, help="Insertion penalty")
group.add_argument(
"--maxlenratio",
type=float,
default=0.0,
help="Input length ratio to obtain max output length. "
"If maxlenratio=0.0 (default), it uses a end-detect "
"function "
"to automatically find maximum hypothesis lengths",
)
group.add_argument(
"--minlenratio",
type=float,
default=0.0,
help="Input length ratio to obtain min output length",
)
group.add_argument(
"--ctc_weight",
type=float,
default=0.5,
help="CTC weight in joint decoding",
)
group.add_argument("--lm_weight", type=float, default=1.0, help="RNNLM weight")
group.add_argument("--streaming", type=str2bool, default=False)
group = parser.add_argument_group("Text converter related")
group.add_argument(
"--token_type",
type=str_or_none,
default=None,
choices=["phn", "word"],
help="The token type for UASR model. "
"If not given, refers from the training args",
)
group.add_argument(
"--bpemodel",
type=str_or_none,
default=None,
help="The model path of sentencepiece. "
"If not given, refers from the training args",
)
group.add_argument(
"--is_ctc_decoding",
type=str2bool,
default=True,
help="Use ctc topology as decoding graph",
)
group.add_argument("--use_nbest_rescoring", type=str2bool, default=False)
group.add_argument(
"--num_paths",
type=int,
default=1000,
help="The third argument for k2.random_paths",
)
group.add_argument(
"--nbest_batch_size",
type=int,
default=500,
help="batchify nbest list when computing am/lm scores to avoid OOM",
)
group.add_argument(
"--nll_batch_size",
type=int,
default=100,
help="batch_size when computing nll during nbest rescoring",
)
group.add_argument("--decoding_graph", type=str, help="decoding graph")
group.add_argument(
"--word_token_list", type=str_or_none, default=None, help="output token list"
)
group.add_argument("--k2_config", type=str, help="Config file for decoding with k2")
return parser
def main(cmd=None):
assert (
k2 is not None
), "k2/icefall is not installed, please follow 'tools/installers' to install"
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
kwargs.pop("config", None)
inference(**kwargs)
if __name__ == "__main__":
main()
| 21,667 | 33.503185 | 88 | py |
espnet | espnet-master/espnet2/bin/launch.py | #!/usr/bin/env python3
import argparse
import logging
import os
import shlex
import shutil
import subprocess
import sys
import uuid
from pathlib import Path
from espnet2.utils.types import str2bool, str_or_none
from espnet.utils.cli_utils import get_commandline_args
def get_parser():
parser = argparse.ArgumentParser(
description="Launch distributed process with appropriate options. ",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--cmd",
help="The path of cmd script of Kaldi: run.pl. queue.pl, or slurm.pl",
default="utils/run.pl",
)
parser.add_argument(
"--log",
help="The path of log file used by cmd",
default="run.log",
)
parser.add_argument(
"--max_num_log_files",
help="The maximum number of log-files to be kept",
default=1000,
)
parser.add_argument(
"--ngpu", type=int, default=1, help="The number of GPUs per node"
)
egroup = parser.add_mutually_exclusive_group()
egroup.add_argument("--num_nodes", type=int, default=1, help="The number of nodes")
egroup.add_argument(
"--host",
type=str,
default=None,
help="Directly specify the host names. The job are submitted via SSH. "
"Multiple host names can be specified by splitting by comma. e.g. host1,host2"
" You can also the device id after the host name with ':'. e.g. "
"host1:0:2:3,host2:0:2. If the device ids are specified in this way, "
"the value of --ngpu is ignored.",
)
parser.add_argument(
"--envfile",
type=str_or_none,
default="path.sh",
help="Source the shell script before executing command. "
"This option is used when --host is specified.",
)
parser.add_argument(
"--multiprocessing_distributed",
type=str2bool,
default=True,
help="Distributed method is used when single-node mode.",
)
parser.add_argument(
"--master_port",
type=int,
default=None,
help="Specify the port number of master"
"Master is a host machine has RANK0 process.",
)
parser.add_argument(
"--master_addr",
type=str,
default=None,
help="Specify the address s of master. "
"Master is a host machine has RANK0 process.",
)
parser.add_argument(
"--init_file_prefix",
type=str,
default=".dist_init_",
help="The file name prefix for init_file, which is used for "
"'Shared-file system initialization'. "
"This option is used when --port is not specified",
)
parser.add_argument("args", type=str, nargs="+")
return parser
def main(cmd=None):
logfmt = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
logging.basicConfig(level=logging.INFO, format=logfmt)
logging.info(get_commandline_args())
parser = get_parser()
args = parser.parse_args(cmd)
args.cmd = shlex.split(args.cmd)
if args.host is None and shutil.which(args.cmd[0]) is None:
raise RuntimeError(
f"The first args of --cmd should be a script path. e.g. utils/run.pl: "
f"{args.cmd[0]}"
)
# Specify init_method:
# See: https://pytorch.org/docs/stable/distributed.html#initialization
if args.host is None and args.num_nodes <= 1:
# Automatically set init_method if num_node=1
init_method = None
else:
if args.master_port is None:
# Try "shared-file system initialization" if master_port is not specified
# Give random name to avoid reusing previous file
init_file = args.init_file_prefix + str(uuid.uuid4())
init_file = Path(init_file).absolute()
Path(init_file).parent.mkdir(exist_ok=True, parents=True)
init_method = ["--dist_init_method", f"file://{init_file}"]
else:
init_method = ["--dist_master_port", str(args.master_port)]
# This can be omitted if slurm mode
if args.master_addr is not None:
init_method += ["--dist_master_addr", args.master_addr]
elif args.host is not None:
init_method += [
"--dist_master_addr",
args.host.split(",")[0].split(":")[0],
]
# Log-rotation
for i in range(args.max_num_log_files - 1, -1, -1):
if i == 0:
p = Path(args.log)
pn = p.parent / (p.stem + ".1" + p.suffix)
else:
_p = Path(args.log)
p = _p.parent / (_p.stem + f".{i}" + _p.suffix)
pn = _p.parent / (_p.stem + f".{i + 1}" + _p.suffix)
if p.exists():
if i == args.max_num_log_files - 1:
p.unlink()
else:
shutil.move(p, pn)
processes = []
# Submit command via SSH
if args.host is not None:
hosts = []
ids_list = []
# e.g. args.host = "host1:0:2,host2:0:1"
for host in args.host.split(","):
# e.g host = "host1:0:2"
sps = host.split(":")
host = sps[0]
if len(sps) > 1:
ids = [int(x) for x in sps[1:]]
else:
ids = list(range(args.ngpu))
hosts.append(host)
ids_list.append(ids)
world_size = sum(max(len(x), 1) for x in ids_list)
logging.info(f"{len(hosts)}nodes with world_size={world_size} via SSH")
if args.envfile is not None:
env = f"source {args.envfile}"
else:
env = ""
if args.log != "-":
Path(args.log).parent.mkdir(parents=True, exist_ok=True)
f = Path(args.log).open("w", encoding="utf-8")
else:
# Output to stdout/stderr
f = None
rank = 0
for host, ids in zip(hosts, ids_list):
ngpu = 1 if len(ids) > 0 else 0
ids = ids if len(ids) > 0 else ["none"]
for local_rank in ids:
cmd = (
args.args
+ [
"--ngpu",
str(ngpu),
"--multiprocessing_distributed",
"false",
"--local_rank",
str(local_rank),
"--dist_rank",
str(rank),
"--dist_world_size",
str(world_size),
]
+ init_method
)
if ngpu == 0:
# Gloo supports both GPU and CPU mode.
# See: https://pytorch.org/docs/stable/distributed.html
cmd += ["--dist_backend", "gloo"]
heredoc = f"""<< EOF
set -euo pipefail
cd {os.getcwd()}
{env}
{" ".join([c if len(c) != 0 else "''" for c in cmd])}
EOF
"""
# FIXME(kamo): The process will be alive
# even if this program is stopped because we don't set -t here,
# i.e. not assigning pty,
# and the program is not killed when SSH connection is closed.
process = subprocess.Popen(
["ssh", host, "bash", heredoc],
stdout=f,
stderr=f,
)
processes.append(process)
rank += 1
# If Single node
elif args.num_nodes <= 1:
if args.ngpu > 1:
if args.multiprocessing_distributed:
# NOTE:
# If multiprocessing_distributed=true,
# -> Distributed mode, which is multi-process and Multi-GPUs.
# and TCP initializetion is used if single-node case:
# e.g. init_method="tcp://localhost:20000"
logging.info(f"single-node with {args.ngpu}gpu on distributed mode")
else:
# NOTE:
# If multiprocessing_distributed=false
# -> "DataParallel" mode, which is single-process
# and Multi-GPUs with threading.
# See:
# https://discuss.pytorch.org/t/why-torch-nn-parallel-distributeddataparallel-runs-faster-than-torch-nn-dataparallel-on-single-machine-with-multi-gpu/32977/2
logging.info(f"single-node with {args.ngpu}gpu using DataParallel")
# Using cmd as it is simply
cmd = (
args.cmd
# arguments for ${cmd}
+ ["--gpu", str(args.ngpu), args.log]
# arguments for *_train.py
+ args.args
+ [
"--ngpu",
str(args.ngpu),
"--multiprocessing_distributed",
str(args.multiprocessing_distributed),
]
)
process = subprocess.Popen(cmd)
processes.append(process)
elif Path(args.cmd[0]).name == "run.pl":
raise RuntimeError("run.pl doesn't support submitting to the other nodes.")
elif Path(args.cmd[0]).name == "ssh.pl":
raise RuntimeError("Use --host option instead of ssh.pl")
# If Slurm
elif Path(args.cmd[0]).name == "slurm.pl":
logging.info(f"{args.num_nodes}nodes and {args.ngpu}gpu-per-node using srun")
cmd = (
args.cmd
# arguments for ${cmd}
+ [
"--gpu",
str(args.ngpu),
"--num_threads",
str(max(args.ngpu, 1)),
"--num_nodes",
str(args.num_nodes),
args.log,
"srun",
# Inherit all environment variable from parent process
"--export=ALL",
]
# arguments for *_train.py
+ args.args
+ [
"--ngpu",
str(args.ngpu),
"--multiprocessing_distributed",
"true",
"--dist_launcher",
"slurm",
]
+ init_method
)
if args.ngpu == 0:
# Gloo supports both GPU and CPU mode.
# See: https://pytorch.org/docs/stable/distributed.html
cmd += ["--dist_backend", "gloo"]
process = subprocess.Popen(cmd)
processes.append(process)
else:
# This pattern can also works with Slurm.
logging.info(f"{args.num_nodes}nodes and {args.ngpu}gpu-per-node using mpirun")
cmd = (
args.cmd
# arguments for ${cmd}
+ [
"--gpu",
str(args.ngpu),
"--num_threads",
str(max(args.ngpu, 1)),
# Make sure scheduler setting, i.e. conf/queue.conf
# so that --num_nodes requires 1process-per-node
"--num_nodes",
str(args.num_nodes),
args.log,
"mpirun",
# -np option can be omitted with Torque/PBS
"-np",
str(args.num_nodes),
]
# arguments for *_train.py
+ args.args
+ [
"--ngpu",
str(args.ngpu),
"--multiprocessing_distributed",
"true",
"--dist_launcher",
"mpi",
]
+ init_method
)
if args.ngpu == 0:
# Gloo supports both GPU and CPU mode.
# See: https://pytorch.org/docs/stable/distributed.html
cmd += ["--dist_backend", "gloo"]
process = subprocess.Popen(cmd)
processes.append(process)
logging.info(f"log file: {args.log}")
failed = False
while any(p.returncode is None for p in processes):
for process in processes:
# If any process is failed, try to kill the other processes too
if failed and process.returncode is not None:
process.kill()
else:
try:
process.wait(0.5)
except subprocess.TimeoutExpired:
pass
if process.returncode is not None and process.returncode != 0:
failed = True
for process in processes:
if process.returncode != 0:
print(
subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd),
file=sys.stderr,
)
p = Path(args.log)
if p.exists():
with p.open() as f:
lines = list(f)
raise RuntimeError(
f"\n################### The last 1000 lines of {args.log} "
f"###################\n" + "".join(lines[-1000:])
)
else:
raise RuntimeError
if __name__ == "__main__":
main()
| 13,042 | 32.877922 | 173 | py |
espnet | espnet-master/espnet2/bin/enh_scoring.py | #!/usr/bin/env python3
import argparse
import logging
import re
import sys
from pathlib import Path
from typing import Dict, List, Union
import numpy as np
import torch
from mir_eval.separation import bss_eval_sources
from pystoi import stoi
from typeguard import check_argument_types
from espnet2.enh.loss.criterions.time_domain import SISNRLoss
from espnet2.fileio.datadir_writer import DatadirWriter
from espnet2.fileio.sound_scp import SoundScpReader
from espnet2.train.dataset import kaldi_loader
from espnet2.utils import config_argparse
from espnet2.utils.types import str2bool
from espnet.utils.cli_utils import get_commandline_args
si_snr_loss = SISNRLoss()
def get_readers(scps: List[str], dtype: str):
# Determine the audio format (sound or kaldi_ark)
with open(scps[0], "r") as f:
line = f.readline()
filename = Path(line.strip().split(maxsplit=1)[1]).name
if re.fullmatch(r".*\.ark(:\d+)?", filename):
# xxx.ark or xxx.ark:123
readers = [kaldi_loader(f, float_dtype=dtype) for f in scps]
audio_format = "kaldi_ark"
else:
readers = [SoundScpReader(f, dtype=dtype) for f in scps]
audio_format = "sound"
return readers, audio_format
def read_audio(reader, key, audio_format="sound"):
if audio_format == "sound":
return reader[key][1]
elif audio_format == "kaldi_ark":
return reader[key]
else:
raise ValueError(f"Unknown audio format: {audio_format}")
def scoring(
output_dir: str,
dtype: str,
log_level: Union[int, str],
key_file: str,
ref_scp: List[str],
inf_scp: List[str],
ref_channel: int,
flexible_numspk: bool,
use_dnsmos: bool,
dnsmos_args: Dict,
use_pesq: bool,
):
assert check_argument_types()
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
if use_dnsmos:
if dnsmos_args["mode"] == "local":
from espnet2.enh.layers.dnsmos import DNSMOS_local
if not Path(dnsmos_args["primary_model"]).exists():
raise ValueError(
f"The primary model '{dnsmos_args['primary_model']}' doesn't exist."
" You can download the model from https://github.com/microsoft/"
"DNS-Challenge/tree/master/DNSMOS/DNSMOS/sig_bak_ovr.onnx"
)
if not Path(dnsmos_args["p808_model"]).exists():
raise ValueError(
f"The P808 model '{dnsmos_args['p808_model']}' doesn't exist."
" You can download the model from https://github.com/microsoft/"
"DNS-Challenge/tree/master/DNSMOS/DNSMOS/model_v8.onnx"
)
dnsmos = DNSMOS_local(
dnsmos_args["primary_model"], dnsmos_args["p808_model"]
)
logging.warning("Using local DNSMOS models for evaluation")
elif dnsmos_args["mode"] == "web":
from espnet2.enh.layers.dnsmos import DNSMOS_web
if not dnsmos_args["auth_key"]:
raise ValueError(
"Please specify the authentication key for access to the Web-API. "
"You can apply for the AUTH_KEY at https://github.com/microsoft/"
"DNS-Challenge/blob/master/DNSMOS/README.md#to-use-the-web-api"
)
dnsmos = DNSMOS_web(dnsmos_args["auth_key"])
logging.warning("Using the DNSMOS Web-API for evaluation")
else:
dnsmos = None
if use_pesq:
try:
from pesq import PesqError, pesq
logging.warning("Using the PESQ package for evaluation")
except ImportError:
raise ImportError("Please install pesq and retry: pip install pesq")
else:
pesq = None
if not flexible_numspk:
assert len(ref_scp) == len(inf_scp), ref_scp
num_spk = len(ref_scp)
keys = [
line.rstrip().split(maxsplit=1)[0] for line in open(key_file, encoding="utf-8")
]
ref_readers, ref_audio_format = get_readers(ref_scp, dtype)
inf_readers, inf_audio_format = get_readers(inf_scp, dtype)
# get sample rate
retval = ref_readers[0][keys[0]]
if ref_audio_format == "kaldi_ark":
sample_rate = ref_readers[0].rate
elif ref_audio_format == "sound":
sample_rate = retval[0]
else:
raise NotImplementedError(ref_audio_format)
assert sample_rate is not None, (sample_rate, ref_audio_format)
# check keys
if not flexible_numspk:
for inf_reader, ref_reader in zip(inf_readers, ref_readers):
assert inf_reader.keys() == ref_reader.keys()
with DatadirWriter(output_dir) as writer:
for n, key in enumerate(keys):
logging.info(f"[{n}] Scoring {key}")
if not flexible_numspk:
ref_audios = [
read_audio(ref_reader, key, audio_format=ref_audio_format)
for ref_reader in ref_readers
]
inf_audios = [
read_audio(inf_reader, key, audio_format=inf_audio_format)
for inf_reader in inf_readers
]
else:
ref_audios = [
read_audio(ref_reader, key, audio_format=ref_audio_format)
for ref_reader in ref_readers
if key in ref_reader.keys()
]
inf_audios = [
read_audio(inf_reader, key, audio_format=inf_audio_format)
for inf_reader in inf_readers
if key in inf_reader.keys()
]
ref = np.array(ref_audios)
inf = np.array(inf_audios)
if ref.ndim > inf.ndim:
# multi-channel reference and single-channel output
ref = ref[..., ref_channel]
elif ref.ndim < inf.ndim:
# single-channel reference and multi-channel output
inf = inf[..., ref_channel]
elif ref.ndim == inf.ndim == 3:
# multi-channel reference and output
ref = ref[..., ref_channel]
inf = inf[..., ref_channel]
if not flexible_numspk:
assert ref.shape == inf.shape, (ref.shape, inf.shape)
else:
# epsilon value to avoid divergence
# caused by zero-value, e.g., log(0)
eps = 0.000001
# if num_spk of ref > num_spk of inf
if ref.shape[0] > inf.shape[0]:
p = np.full((ref.shape[0] - inf.shape[0], inf.shape[1]), eps)
inf = np.concatenate([inf, p])
num_spk = ref.shape[0]
# if num_spk of ref < num_spk of inf
elif ref.shape[0] < inf.shape[0]:
p = np.full((inf.shape[0] - ref.shape[0], ref.shape[1]), eps)
ref = np.concatenate([ref, p])
num_spk = inf.shape[0]
else:
num_spk = ref.shape[0]
sdr, sir, sar, perm = bss_eval_sources(ref, inf, compute_permutation=True)
for i in range(num_spk):
stoi_score = stoi(ref[i], inf[int(perm[i])], fs_sig=sample_rate)
estoi_score = stoi(
ref[i], inf[int(perm[i])], fs_sig=sample_rate, extended=True
)
si_snr_score = -float(
si_snr_loss(
torch.from_numpy(ref[i][None, ...]),
torch.from_numpy(inf[int(perm[i])][None, ...]),
)
)
if dnsmos:
dnsmos_score = dnsmos(inf[int(perm[i])], sample_rate)
writer[f"OVRL_spk{i + 1}"][key] = str(dnsmos_score["OVRL"])
writer[f"SIG_spk{i + 1}"][key] = str(dnsmos_score["SIG"])
writer[f"BAK_spk{i + 1}"][key] = str(dnsmos_score["BAK"])
writer[f"P808_MOS_spk{i + 1}"][key] = str(dnsmos_score["P808_MOS"])
if pesq:
if sample_rate == 8000:
mode = "nb"
elif sample_rate == 16000:
mode = "wb"
else:
raise ValueError(
"sample rate must be 8000 or 16000 for PESQ evaluation, "
f"but got {sample_rate}"
)
pesq_score = pesq(
sample_rate,
ref[i],
inf[int(perm[i])],
mode=mode,
on_error=PesqError.RETURN_VALUES,
)
if pesq_score == PesqError.NO_UTTERANCES_DETECTED:
logging.warning(
f"[PESQ] Error: No utterances detected for {key}. "
"Skipping this utterance."
)
else:
writer[f"PESQ_{mode.upper()}_spk{i + 1}"][key] = str(pesq_score)
writer[f"STOI_spk{i + 1}"][key] = str(stoi_score * 100) # in percentage
writer[f"ESTOI_spk{i + 1}"][key] = str(estoi_score * 100)
writer[f"SI_SNR_spk{i + 1}"][key] = str(si_snr_score)
writer[f"SDR_spk{i + 1}"][key] = str(sdr[i])
writer[f"SAR_spk{i + 1}"][key] = str(sar[i])
writer[f"SIR_spk{i + 1}"][key] = str(sir[i])
# save permutation assigned script file
if i < len(ref_scp):
if inf_audio_format == "sound":
writer[f"wav_spk{i + 1}"][key] = inf_readers[perm[i]].data[key]
elif inf_audio_format == "kaldi_ark":
# NOTE: SegmentsExtractor is not supported
writer[f"wav_spk{i + 1}"][key] = inf_readers[
perm[i]
].loader._dict[key]
else:
raise ValueError(f"Unknown audio format: {inf_audio_format}")
def get_parser():
parser = config_argparse.ArgumentParser(
description="Frontend inference",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Note(kamo): Use '_' instead of '-' as separator.
# '-' is confusing if written in yaml.
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument("--output_dir", type=str, required=True)
parser.add_argument(
"--dtype",
default="float32",
choices=["float16", "float32", "float64"],
help="Data type",
)
group = parser.add_argument_group("Input data related")
group.add_argument(
"--ref_scp",
type=str,
required=True,
action="append",
)
group.add_argument(
"--inf_scp",
type=str,
required=True,
action="append",
)
group.add_argument("--key_file", type=str)
group.add_argument("--ref_channel", type=int, default=0)
group.add_argument("--flexible_numspk", type=str2bool, default=False)
group = parser.add_argument_group("DNSMOS related")
group.add_argument("--use_dnsmos", type=str2bool, default=False)
group.add_argument(
"--dnsmos_mode",
type=str,
choices=("local", "web"),
default="local",
help="Use local DNSMOS model or web API for DNSMOS calculation",
)
group.add_argument(
"--dnsmos_auth_key", type=str, default="", help="Required if dnsmsos_mode='web'"
)
group.add_argument(
"--dnsmos_primary_model",
type=str,
default="./DNSMOS/sig_bak_ovr.onnx",
help="Path to the primary DNSMOS model. Required if dnsmsos_mode='local'",
)
group.add_argument(
"--dnsmos_p808_model",
type=str,
default="./DNSMOS/model_v8.onnx",
help="Path to the p808 model. Required if dnsmsos_mode='local'",
)
group = parser.add_argument_group("PESQ related")
group.add_argument(
"--use_pesq",
type=str2bool,
default=False,
help="Bebore setting this to True, please make sure that you or "
"your institution have the license "
"(check https://www.itu.int/rec/T-REC-P.862-200511-I!Amd2/en) to report PESQ",
)
return parser
def main(cmd=None):
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
kwargs.pop("config", None)
dnsmos_args = {
"mode": kwargs.pop("dnsmos_mode"),
"auth_key": kwargs.pop("dnsmos_auth_key"),
"primary_model": kwargs.pop("dnsmos_primary_model"),
"p808_model": kwargs.pop("dnsmos_p808_model"),
}
kwargs["dnsmos_args"] = dnsmos_args
scoring(**kwargs)
if __name__ == "__main__":
main()
| 13,261 | 36.252809 | 88 | py |
espnet | espnet-master/espnet2/bin/asr_inference_streaming.py | #!/usr/bin/env python3
import argparse
import logging
import math
import sys
from pathlib import Path
from typing import List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from typeguard import check_argument_types, check_return_type
from espnet2.asr.encoder.contextual_block_conformer_encoder import ( # noqa: H301
ContextualBlockConformerEncoder,
)
from espnet2.asr.encoder.contextual_block_transformer_encoder import ( # noqa: H301
ContextualBlockTransformerEncoder,
)
from espnet2.fileio.datadir_writer import DatadirWriter
from espnet2.tasks.asr import ASRTask
from espnet2.tasks.lm import LMTask
from espnet2.text.build_tokenizer import build_tokenizer
from espnet2.text.token_id_converter import TokenIDConverter
from espnet2.torch_utils.device_funcs import to_device
from espnet2.torch_utils.set_all_random_seed import set_all_random_seed
from espnet2.utils import config_argparse
from espnet2.utils.types import str2bool, str2triple_str, str_or_none
from espnet.nets.batch_beam_search_online import BatchBeamSearchOnline
from espnet.nets.beam_search import Hypothesis
from espnet.nets.pytorch_backend.transformer.subsampling import TooShortUttError
from espnet.nets.scorer_interface import BatchScorerInterface
from espnet.nets.scorers.ctc import CTCPrefixScorer
from espnet.nets.scorers.length_bonus import LengthBonus
from espnet.utils.cli_utils import get_commandline_args
class Speech2TextStreaming:
"""Speech2TextStreaming class
Details in "Streaming Transformer ASR with Blockwise Synchronous Beam Search"
(https://arxiv.org/abs/2006.14941)
Examples:
>>> import soundfile
>>> speech2text = Speech2TextStreaming("asr_config.yml", "asr.pth")
>>> audio, rate = soundfile.read("speech.wav")
>>> speech2text(audio)
[(text, token, token_int, hypothesis object), ...]
"""
def __init__(
self,
asr_train_config: Union[Path, str],
asr_model_file: Union[Path, str] = None,
lm_train_config: Union[Path, str] = None,
lm_file: Union[Path, str] = None,
token_type: str = None,
bpemodel: str = None,
device: str = "cpu",
maxlenratio: float = 0.0,
minlenratio: float = 0.0,
batch_size: int = 1,
dtype: str = "float32",
beam_size: int = 20,
ctc_weight: float = 0.5,
lm_weight: float = 1.0,
penalty: float = 0.0,
nbest: int = 1,
disable_repetition_detection=False,
decoder_text_length_limit=0,
encoded_feat_length_limit=0,
):
assert check_argument_types()
# 1. Build ASR model
scorers = {}
asr_model, asr_train_args = ASRTask.build_model_from_file(
asr_train_config, asr_model_file, device
)
asr_model.to(dtype=getattr(torch, dtype)).eval()
assert isinstance(
asr_model.encoder, ContextualBlockTransformerEncoder
) or isinstance(asr_model.encoder, ContextualBlockConformerEncoder)
decoder = asr_model.decoder
ctc = CTCPrefixScorer(ctc=asr_model.ctc, eos=asr_model.eos)
token_list = asr_model.token_list
scorers.update(
decoder=decoder,
ctc=ctc,
length_bonus=LengthBonus(len(token_list)),
)
# 2. Build Language model
if lm_train_config is not None:
lm, lm_train_args = LMTask.build_model_from_file(
lm_train_config, lm_file, device
)
scorers["lm"] = lm.lm
# 3. Build BeamSearch object
weights = dict(
decoder=1.0 - ctc_weight,
ctc=ctc_weight,
lm=lm_weight,
length_bonus=penalty,
)
assert "encoder_conf" in asr_train_args
assert "look_ahead" in asr_train_args.encoder_conf
assert "hop_size" in asr_train_args.encoder_conf
assert "block_size" in asr_train_args.encoder_conf
# look_ahead = asr_train_args.encoder_conf['look_ahead']
# hop_size = asr_train_args.encoder_conf['hop_size']
# block_size = asr_train_args.encoder_conf['block_size']
assert batch_size == 1
beam_search = BatchBeamSearchOnline(
beam_size=beam_size,
weights=weights,
scorers=scorers,
sos=asr_model.sos,
eos=asr_model.eos,
vocab_size=len(token_list),
token_list=token_list,
pre_beam_score_key=None if ctc_weight == 1.0 else "full",
disable_repetition_detection=disable_repetition_detection,
decoder_text_length_limit=decoder_text_length_limit,
encoded_feat_length_limit=encoded_feat_length_limit,
)
non_batch = [
k
for k, v in beam_search.full_scorers.items()
if not isinstance(v, BatchScorerInterface)
]
assert len(non_batch) == 0
# TODO(karita): make all scorers batchfied
logging.info("BatchBeamSearchOnline implementation is selected.")
beam_search.to(device=device, dtype=getattr(torch, dtype)).eval()
for scorer in scorers.values():
if isinstance(scorer, torch.nn.Module):
scorer.to(device=device, dtype=getattr(torch, dtype)).eval()
logging.info(f"Beam_search: {beam_search}")
logging.info(f"Decoding device={device}, dtype={dtype}")
# 4. [Optional] Build Text converter: e.g. bpe-sym -> Text
if token_type is None:
token_type = asr_train_args.token_type
if bpemodel is None:
bpemodel = asr_train_args.bpemodel
if token_type is None:
tokenizer = None
elif token_type == "bpe":
if bpemodel is not None:
tokenizer = build_tokenizer(token_type=token_type, bpemodel=bpemodel)
else:
tokenizer = None
else:
tokenizer = build_tokenizer(token_type=token_type)
converter = TokenIDConverter(token_list=token_list)
logging.info(f"Text tokenizer: {tokenizer}")
self.asr_model = asr_model
self.asr_train_args = asr_train_args
self.converter = converter
self.tokenizer = tokenizer
self.beam_search = beam_search
self.maxlenratio = maxlenratio
self.minlenratio = minlenratio
self.device = device
self.dtype = dtype
self.nbest = nbest
if "n_fft" in asr_train_args.frontend_conf:
self.n_fft = asr_train_args.frontend_conf["n_fft"]
else:
self.n_fft = 512
if "hop_length" in asr_train_args.frontend_conf:
self.hop_length = asr_train_args.frontend_conf["hop_length"]
else:
self.hop_length = 128
if (
"win_length" in asr_train_args.frontend_conf
and asr_train_args.frontend_conf["win_length"] is not None
):
self.win_length = asr_train_args.frontend_conf["win_length"]
else:
self.win_length = self.n_fft
self.reset()
def reset(self):
self.frontend_states = None
self.encoder_states = None
self.beam_search.reset()
def apply_frontend(
self, speech: torch.Tensor, prev_states=None, is_final: bool = False
):
if prev_states is not None:
buf = prev_states["waveform_buffer"]
speech = torch.cat([buf, speech], dim=0)
has_enough_samples = False if speech.size(0) <= self.win_length else True
if not has_enough_samples:
if is_final:
pad = torch.zeros(self.win_length - speech.size(0), dtype=speech.dtype)
speech = torch.cat([speech, pad], dim=0)
else:
feats = None
feats_lengths = None
next_states = {"waveform_buffer": speech.clone()}
return feats, feats_lengths, next_states
if is_final:
speech_to_process = speech
waveform_buffer = None
else:
n_frames = speech.size(0) // self.hop_length
n_residual = speech.size(0) % self.hop_length
speech_to_process = speech.narrow(0, 0, n_frames * self.hop_length)
waveform_buffer = speech.narrow(
0,
speech.size(0)
- (math.ceil(math.ceil(self.win_length / self.hop_length) / 2) * 2 - 1)
* self.hop_length
- n_residual,
(math.ceil(math.ceil(self.win_length / self.hop_length) / 2) * 2 - 1)
* self.hop_length
+ n_residual,
).clone()
# data: (Nsamples,) -> (1, Nsamples)
speech_to_process = speech_to_process.unsqueeze(0).to(
getattr(torch, self.dtype)
)
lengths = speech_to_process.new_full(
[1], dtype=torch.long, fill_value=speech_to_process.size(1)
)
batch = {"speech": speech_to_process, "speech_lengths": lengths}
# lenghts: (1,)
# a. To device
batch = to_device(batch, device=self.device)
feats, feats_lengths = self.asr_model._extract_feats(**batch)
if self.asr_model.normalize is not None:
feats, feats_lengths = self.asr_model.normalize(feats, feats_lengths)
# Trimming
if is_final:
if prev_states is None:
pass
else:
feats = feats.narrow(
1,
math.ceil(math.ceil(self.win_length / self.hop_length) / 2),
feats.size(1)
- math.ceil(math.ceil(self.win_length / self.hop_length) / 2),
)
else:
if prev_states is None:
feats = feats.narrow(
1,
0,
feats.size(1)
- math.ceil(math.ceil(self.win_length / self.hop_length) / 2),
)
else:
feats = feats.narrow(
1,
math.ceil(math.ceil(self.win_length / self.hop_length) / 2),
feats.size(1)
- 2 * math.ceil(math.ceil(self.win_length / self.hop_length) / 2),
)
feats_lengths = feats.new_full([1], dtype=torch.long, fill_value=feats.size(1))
if is_final:
next_states = None
else:
next_states = {"waveform_buffer": waveform_buffer}
return feats, feats_lengths, next_states
@torch.no_grad()
def __call__(
self, speech: Union[torch.Tensor, np.ndarray], is_final: bool = True
) -> List[Tuple[Optional[str], List[str], List[int], Hypothesis]]:
"""Inference
Args:
data: Input speech data
Returns:
text, token, token_int, hyp
"""
assert check_argument_types()
# Input as audio signal
if isinstance(speech, np.ndarray):
speech = torch.tensor(speech)
feats, feats_lengths, self.frontend_states = self.apply_frontend(
speech, self.frontend_states, is_final=is_final
)
if feats is not None:
enc, _, self.encoder_states = self.asr_model.encoder(
feats,
feats_lengths,
self.encoder_states,
is_final=is_final,
infer_mode=True,
)
nbest_hyps = self.beam_search(
x=enc[0],
maxlenratio=self.maxlenratio,
minlenratio=self.minlenratio,
is_final=is_final,
)
ret = self.assemble_hyps(nbest_hyps)
else:
ret = []
if is_final:
self.reset()
return ret
def assemble_hyps(self, hyps):
nbest_hyps = hyps[: self.nbest]
results = []
for hyp in nbest_hyps:
assert isinstance(hyp, Hypothesis), type(hyp)
# remove sos/eos and get results
token_int = hyp.yseq[1:-1].tolist()
# remove blank symbol id, which is assumed to be 0
token_int = list(filter(lambda x: x != 0, token_int))
# Change integer-ids to tokens
token = self.converter.ids2tokens(token_int)
if self.tokenizer is not None:
text = self.tokenizer.tokens2text(token)
else:
text = None
results.append((text, token, token_int, hyp))
assert check_return_type(results)
return results
def inference(
output_dir: str,
maxlenratio: float,
minlenratio: float,
batch_size: int,
dtype: str,
beam_size: int,
ngpu: int,
seed: int,
ctc_weight: float,
lm_weight: float,
penalty: float,
nbest: int,
num_workers: int,
log_level: Union[int, str],
data_path_and_name_and_type: Sequence[Tuple[str, str, str]],
key_file: Optional[str],
asr_train_config: str,
asr_model_file: str,
lm_train_config: Optional[str],
lm_file: Optional[str],
word_lm_train_config: Optional[str],
word_lm_file: Optional[str],
token_type: Optional[str],
bpemodel: Optional[str],
allow_variable_data_keys: bool,
sim_chunk_length: int,
disable_repetition_detection: bool,
encoded_feat_length_limit: int,
decoder_text_length_limit: int,
):
assert check_argument_types()
if batch_size > 1:
raise NotImplementedError("batch decoding is not implemented")
if word_lm_train_config is not None:
raise NotImplementedError("Word LM is not implemented")
if ngpu > 1:
raise NotImplementedError("only single GPU decoding is supported")
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
if ngpu >= 1:
device = "cuda"
else:
device = "cpu"
# 1. Set random-seed
set_all_random_seed(seed)
# 2. Build speech2text
speech2text = Speech2TextStreaming(
asr_train_config=asr_train_config,
asr_model_file=asr_model_file,
lm_train_config=lm_train_config,
lm_file=lm_file,
token_type=token_type,
bpemodel=bpemodel,
device=device,
maxlenratio=maxlenratio,
minlenratio=minlenratio,
dtype=dtype,
beam_size=beam_size,
ctc_weight=ctc_weight,
lm_weight=lm_weight,
penalty=penalty,
nbest=nbest,
disable_repetition_detection=disable_repetition_detection,
decoder_text_length_limit=decoder_text_length_limit,
encoded_feat_length_limit=encoded_feat_length_limit,
)
# 3. Build data-iterator
loader = ASRTask.build_streaming_iterator(
data_path_and_name_and_type,
dtype=dtype,
batch_size=batch_size,
key_file=key_file,
num_workers=num_workers,
preprocess_fn=ASRTask.build_preprocess_fn(speech2text.asr_train_args, False),
collate_fn=ASRTask.build_collate_fn(speech2text.asr_train_args, False),
allow_variable_data_keys=allow_variable_data_keys,
inference=True,
)
# 7 .Start for-loop
# FIXME(kamo): The output format should be discussed about
with DatadirWriter(output_dir) as writer:
for keys, batch in loader:
assert isinstance(batch, dict), type(batch)
assert all(isinstance(s, str) for s in keys), keys
_bs = len(next(iter(batch.values())))
assert len(keys) == _bs, f"{len(keys)} != {_bs}"
batch = {k: v[0] for k, v in batch.items() if not k.endswith("_lengths")}
assert len(batch.keys()) == 1
try:
if sim_chunk_length == 0:
# N-best list of (text, token, token_int, hyp_object)
results = speech2text(**batch)
else:
speech = batch["speech"]
for i in range(len(speech) // sim_chunk_length):
speech2text(
speech=speech[
i * sim_chunk_length : (i + 1) * sim_chunk_length
],
is_final=False,
)
results = speech2text(
speech[(i + 1) * sim_chunk_length : len(speech)], is_final=True
)
except TooShortUttError as e:
logging.warning(f"Utterance {keys} {e}")
hyp = Hypothesis(score=0.0, scores={}, states={}, yseq=[])
results = [[" ", ["<space>"], [2], hyp]] * nbest
# Only supporting batch_size==1
key = keys[0]
for n, (text, token, token_int, hyp) in zip(range(1, nbest + 1), results):
# Create a directory: outdir/{n}best_recog
ibest_writer = writer[f"{n}best_recog"]
# Write the result to each file
ibest_writer["token"][key] = " ".join(token)
ibest_writer["token_int"][key] = " ".join(map(str, token_int))
ibest_writer["score"][key] = str(hyp.score)
if text is not None:
ibest_writer["text"][key] = text
def get_parser():
parser = config_argparse.ArgumentParser(
description="ASR Decoding",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Note(kamo): Use '_' instead of '-' as separator.
# '-' is confusing if written in yaml.
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument("--output_dir", type=str, required=True)
parser.add_argument(
"--ngpu",
type=int,
default=0,
help="The number of gpus. 0 indicates CPU mode",
)
parser.add_argument("--seed", type=int, default=0, help="Random seed")
parser.add_argument(
"--dtype",
default="float32",
choices=["float16", "float32", "float64"],
help="Data type",
)
parser.add_argument(
"--num_workers",
type=int,
default=1,
help="The number of workers used for DataLoader",
)
group = parser.add_argument_group("Input data related")
group.add_argument(
"--data_path_and_name_and_type",
type=str2triple_str,
required=True,
action="append",
)
group.add_argument("--key_file", type=str_or_none)
group.add_argument("--allow_variable_data_keys", type=str2bool, default=False)
group.add_argument(
"--sim_chunk_length",
type=int,
default=0,
help="The length of one chunk, to which speech will be "
"divided for evalution of streaming processing.",
)
group = parser.add_argument_group("The model configuration related")
group.add_argument("--asr_train_config", type=str, required=True)
group.add_argument("--asr_model_file", type=str, required=True)
group.add_argument("--lm_train_config", type=str)
group.add_argument("--lm_file", type=str)
group.add_argument("--word_lm_train_config", type=str)
group.add_argument("--word_lm_file", type=str)
group = parser.add_argument_group("Beam-search related")
group.add_argument(
"--batch_size",
type=int,
default=1,
help="The batch size for inference",
)
group.add_argument("--nbest", type=int, default=1, help="Output N-best hypotheses")
group.add_argument("--beam_size", type=int, default=20, help="Beam size")
group.add_argument("--penalty", type=float, default=0.0, help="Insertion penalty")
group.add_argument(
"--maxlenratio",
type=float,
default=0.0,
help="Input length ratio to obtain max output length. "
"If maxlenratio=0.0 (default), it uses a end-detect "
"function "
"to automatically find maximum hypothesis lengths",
)
group.add_argument(
"--minlenratio",
type=float,
default=0.0,
help="Input length ratio to obtain min output length",
)
group.add_argument(
"--ctc_weight",
type=float,
default=0.5,
help="CTC weight in joint decoding",
)
group.add_argument("--lm_weight", type=float, default=1.0, help="RNNLM weight")
group.add_argument("--disable_repetition_detection", type=str2bool, default=False)
group.add_argument(
"--encoded_feat_length_limit",
type=int,
default=0,
help="Limit the lengths of the encoded feature" "to input to the decoder.",
)
group.add_argument(
"--decoder_text_length_limit",
type=int,
default=0,
help="Limit the lengths of the text" "to input to the decoder.",
)
group = parser.add_argument_group("Text converter related")
group.add_argument(
"--token_type",
type=str_or_none,
default=None,
choices=["char", "bpe", None],
help="The token type for ASR model. "
"If not given, refers from the training args",
)
group.add_argument(
"--bpemodel",
type=str_or_none,
default=None,
help="The model path of sentencepiece. "
"If not given, refers from the training args",
)
return parser
def main(cmd=None):
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
kwargs.pop("config", None)
inference(**kwargs)
if __name__ == "__main__":
main()
| 21,829 | 33.432177 | 87 | py |
espnet | espnet-master/espnet2/bin/svs_inference.py | #!/usr/bin/env python3
"""Script to run the inference of singing-voice-synthesis model."""
import argparse
import logging
import shutil
import sys
import time
from pathlib import Path
from typing import Any, Dict, Optional, Sequence, Tuple, Union
import numpy as np
import soundfile as sf
import torch
from typeguard import check_argument_types
from espnet2.fileio.npy_scp import NpyScpWriter
from espnet2.gan_svs.vits import VITS
from espnet2.svs.singing_tacotron.singing_tacotron import singing_tacotron
from espnet2.tasks.svs import SVSTask
from espnet2.torch_utils.device_funcs import to_device
from espnet2.torch_utils.set_all_random_seed import set_all_random_seed
from espnet2.tts.utils import DurationCalculator
from espnet2.utils import config_argparse
from espnet2.utils.types import str2bool, str2triple_str, str_or_none
from espnet.utils.cli_utils import get_commandline_args
class SingingGenerate:
"""SingingGenerate class
Examples:
>>> import soundfile
>>> svs = SingingGenerate("config.yml", "model.pth")
>>> wav = svs("Hello World")[0]
>>> soundfile.write("out.wav", wav.numpy(), svs.fs, "PCM_16")
"""
def __init__(
self,
train_config: Optional[Union[Path, str]],
model_file: Optional[Union[Path, str]] = None,
threshold: float = 0.5,
minlenratio: float = 0.0,
maxlenratio: float = 10.0,
use_teacher_forcing: bool = False,
use_att_constraint: bool = False,
use_dynamic_filter: bool = False,
backward_window: int = 2,
forward_window: int = 4,
speed_control_alpha: float = 1.0,
noise_scale: float = 0.667,
noise_scale_dur: float = 0.8,
vocoder_config: Union[Path, str] = None,
vocoder_checkpoint: Union[Path, str] = None,
dtype: str = "float32",
device: str = "cpu",
seed: int = 777,
always_fix_seed: bool = False,
prefer_normalized_feats: bool = False,
):
"""Initialize SingingGenerate module."""
assert check_argument_types()
# setup model
model, train_args = SVSTask.build_model_from_file(
train_config, model_file, device
)
model.to(dtype=getattr(torch, dtype)).eval()
self.device = device
self.dtype = dtype
self.train_args = train_args
self.model = model
self.svs = model.svs
self.normalize = model.normalize
self.feats_extract = model.feats_extract
self.duration_calculator = DurationCalculator()
self.preprocess_fn = SVSTask.build_preprocess_fn(train_args, False)
self.use_teacher_forcing = use_teacher_forcing
self.seed = seed
self.always_fix_seed = always_fix_seed
self.vocoder = None
self.prefer_normalized_feats = prefer_normalized_feats
if vocoder_checkpoint is not None:
vocoder = SVSTask.build_vocoder_from_file(
vocoder_config, vocoder_checkpoint, model, device
)
if isinstance(vocoder, torch.nn.Module):
vocoder.to(dtype=getattr(torch, dtype)).eval()
self.vocoder = vocoder
logging.info(f"Extractor:\n{self.feats_extract}")
logging.info(f"Normalizer:\n{self.normalize}")
logging.info(f"SVS:\n{self.svs}")
if self.vocoder is not None:
logging.info(f"Vocoder:\n{self.vocoder}")
# setup decoding config
decode_conf = {}
decode_conf.update({"use_teacher_forcing": use_teacher_forcing})
if isinstance(self.svs, VITS):
decode_conf.update(
noise_scale=noise_scale,
noise_scale_dur=noise_scale_dur,
)
if isinstance(self.svs, singing_tacotron):
decode_conf.update(
threshold=threshold,
maxlenratio=maxlenratio,
minlenratio=minlenratio,
use_att_constraint=use_att_constraint,
use_dynamic_filter=use_dynamic_filter,
forward_window=forward_window,
backward_window=backward_window,
)
self.decode_conf = decode_conf
@torch.no_grad()
def __call__(
self,
text: Union[Dict[str, Tuple], torch.Tensor, np.ndarray],
singing: Union[torch.Tensor, np.ndarray] = None,
label: Union[torch.Tensor, np.ndarray] = None,
midi: Union[torch.Tensor, np.ndarray] = None,
duration_phn: Union[torch.Tensor, np.ndarray] = None,
duration_ruled_phn: Union[torch.Tensor, np.ndarray] = None,
duration_syb: Union[torch.Tensor, np.ndarray] = None,
phn_cnt: Union[torch.Tensor, np.ndarray] = None,
slur: Union[torch.Tensor, np.ndarray] = None,
pitch: Union[torch.Tensor, np.ndarray] = None,
energy: Union[torch.Tensor, np.ndarray] = None,
spembs: Union[torch.Tensor, np.ndarray] = None,
sids: Union[torch.Tensor, np.ndarray] = None,
lids: Union[torch.Tensor, np.ndarray] = None,
decode_conf: Optional[Dict[str, Any]] = None,
):
assert check_argument_types()
# check inputs
if self.use_sids and sids is None:
raise RuntimeError("Missing required argument: 'sids'")
if self.use_lids and lids is None:
raise RuntimeError("Missing required argument: 'lids'")
if self.use_spembs and spembs is None:
raise RuntimeError("Missing required argument: 'spembs'")
# prepare batch
if isinstance(text, Dict):
data = self.preprocess_fn(
"<dummy>", dict(label=text["label"], score=text["score"])
)
label = data["label"]
midi = data["midi"]
duration_phn = data["duration_phn"]
duration_ruled_phn = data["duration_ruled_phn"]
duration_syb = data["duration_syb"]
phn_cnt = data["phn_cnt"]
slur = data["slur"]
batch = dict(text=data["label"])
else:
batch = dict(text=text)
if singing is not None:
batch.update(singing=singing)
if label is not None:
batch.update(label=label)
if midi is not None:
batch.update(midi=midi)
if duration_phn is not None:
batch.update(duration_phn=duration_phn)
if duration_ruled_phn is not None:
batch.update(duration_ruled_phn=duration_ruled_phn)
if duration_syb is not None:
batch.update(duration_syb=duration_syb)
if pitch is not None:
batch.update(pitch=pitch)
if phn_cnt is not None:
batch.update(phn_cnt=phn_cnt)
if slur is not None:
batch.update(slur=slur)
if energy is not None:
batch.update(energy=energy)
if spembs is not None:
batch.update(spembs=spembs)
if sids is not None:
batch.update(sids=sids)
if lids is not None:
batch.update(lids=lids)
batch = to_device(batch, self.device)
cfg = self.decode_conf
if decode_conf is not None:
cfg = self.decode_conf.copy()
cfg.update(decode_conf)
output_dict = self.model.inference(**batch, **cfg)
if output_dict.get("att_w") is not None:
duration, focus_rate = self.duration_calculator(output_dict["att_w"])
output_dict.update(duration=duration, focus_rate=focus_rate)
else:
output_dict.update(duration=None, focus_rate=None)
# apply vocoder (mel-to-wav)
if self.vocoder is not None:
if (
self.prefer_normalized_feats
or output_dict.get("feat_gen_denorm") is None
):
input_feat = output_dict["feat_gen"]
else:
input_feat = output_dict["feat_gen_denorm"]
wav = self.vocoder(input_feat)
output_dict.update(wav=wav)
return output_dict
@property
def fs(self) -> Optional[int]:
"""Return sampling rate."""
if hasattr(self.vocoder, "fs"):
return self.vocoder.fs
elif hasattr(self.svs, "fs"):
return self.svs.fs
else:
return None
@property
def use_speech(self) -> bool:
"""Return speech is needed or not in the inference."""
return self.use_teacher_forcing or getattr(self.svs, "use_gst", False)
@property
def use_sids(self) -> bool:
"""Return sid is needed or not in the inference."""
return self.svs.spks is not None
@property
def use_lids(self) -> bool:
"""Return sid is needed or not in the inference."""
return self.svs.langs is not None
@property
def use_spembs(self) -> bool:
"""Return spemb is needed or not in the inference."""
return self.svs.spk_embed_dim is not None
@staticmethod
def from_pretrained(
model_tag: Optional[str] = None,
vocoder_tag: Optional[str] = None,
**kwargs: Optional[Any],
):
"""Build SingingGenerate instance from the pretrained model.
Args:
model_tag (Optional[str]): Model tag of the pretrained models.
Currently, the tags of espnet_model_zoo are supported.
vocoder_tag (Optional[str]): Vocoder tag of the pretrained vocoders.
Currently, the tags of parallel_wavegan are supported, which should
start with the prefix "parallel_wavegan/".
Returns:
SingingGenerate: SingingGenerate instance.
"""
if model_tag is not None:
try:
from espnet_model_zoo.downloader import ModelDownloader
except ImportError:
logging.error(
"`espnet_model_zoo` is not installed. "
"Please install via `pip install -U espnet_model_zoo`."
)
raise
d = ModelDownloader()
kwargs.update(**d.download_and_unpack(model_tag))
if vocoder_tag is not None:
if vocoder_tag.startswith("parallel_wavegan/"):
try:
from parallel_wavegan.utils import download_pretrained_model
except ImportError:
logging.error(
"`parallel_wavegan` is not installed. "
"Please install via `pip install -U parallel_wavegan`."
)
raise
from parallel_wavegan import __version__
# NOTE(kan-bayashi): Filelock download is supported from 0.5.2
assert V(__version__) > V("0.5.1"), (
"Please install the latest parallel_wavegan "
"via `pip install -U parallel_wavegan`."
)
vocoder_tag = vocoder_tag.replace("parallel_wavegan/", "")
vocoder_file = download_pretrained_model(vocoder_tag)
vocoder_config = Path(vocoder_file).parent / "config.yml"
kwargs.update(vocoder_config=vocoder_config, vocoder_file=vocoder_file)
else:
raise ValueError(f"{vocoder_tag} is unsupported format.")
return SingingGenerate(**kwargs)
def inference(
output_dir: str,
batch_size: int,
dtype: str,
ngpu: int,
seed: int,
num_workers: int,
log_level: Union[int, str],
data_path_and_name_and_type: Sequence[Tuple[str, str, str]],
key_file: Optional[str],
train_config: Optional[str],
model_file: Optional[str],
use_teacher_forcing: bool,
noise_scale: float,
noise_scale_dur: float,
allow_variable_data_keys: bool,
vocoder_config: Optional[str] = None,
vocoder_checkpoint: Optional[str] = None,
vocoder_tag: Optional[str] = None,
):
"""Perform SVS model decoding."""
assert check_argument_types()
if batch_size > 1:
raise NotImplementedError("batch decoding is not implemented")
if ngpu > 1:
raise NotImplementedError("only single GPU decoding is supported")
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
if ngpu >= 1:
device = "cuda"
else:
device = "cpu"
# 1. Set random-seed
set_all_random_seed(seed)
# 2. Build model
singingGenerate = SingingGenerate(
train_config=train_config,
model_file=model_file,
use_teacher_forcing=use_teacher_forcing,
noise_scale=noise_scale,
noise_scale_dur=noise_scale_dur,
vocoder_config=vocoder_config,
vocoder_checkpoint=vocoder_checkpoint,
dtype=dtype,
device=device,
)
# 3. Build data-iterator
loader = SVSTask.build_streaming_iterator(
data_path_and_name_and_type,
dtype=dtype,
batch_size=batch_size,
key_file=key_file,
num_workers=num_workers,
preprocess_fn=SVSTask.build_preprocess_fn(singingGenerate.train_args, False),
collate_fn=SVSTask.build_collate_fn(singingGenerate.train_args, False),
allow_variable_data_keys=allow_variable_data_keys,
inference=True,
)
# 4. Start for-loop
output_dir = Path(output_dir)
(output_dir / "norm").mkdir(parents=True, exist_ok=True)
(output_dir / "denorm").mkdir(parents=True, exist_ok=True)
(output_dir / "speech_shape").mkdir(parents=True, exist_ok=True)
(output_dir / "wav").mkdir(parents=True, exist_ok=True)
(output_dir / "att_ws").mkdir(parents=True, exist_ok=True)
(output_dir / "probs").mkdir(parents=True, exist_ok=True)
(output_dir / "durations").mkdir(parents=True, exist_ok=True)
(output_dir / "focus_rates").mkdir(parents=True, exist_ok=True)
# Lazy load to avoid the backend error
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
with NpyScpWriter(
output_dir / "norm",
output_dir / "norm/feats.scp",
) as norm_writer, NpyScpWriter(
output_dir / "denorm", output_dir / "denorm/feats.scp"
) as denorm_writer, open(
output_dir / "speech_shape/speech_shape", "w"
) as shape_writer, open(
output_dir / "durations/durations", "w"
) as duration_writer, open(
output_dir / "focus_rates/focus_rates", "w"
) as focus_rate_writer:
for idx, (keys, batch) in enumerate(loader, 1):
assert isinstance(batch, dict), type(batch)
assert all(isinstance(s, str) for s in keys), keys
_bs = len(next(iter(batch.values())))
assert _bs == 1, _bs
# Change to single sequence and remove *_length
# because inference() requires 1-seq, not mini-batch.
batch = {k: v[0] for k, v in batch.items() if not k.endswith("_lengths")}
logging.info(f"batch: {batch}")
logging.info(f"keys: {keys}")
start_time = time.perf_counter()
output_dict = singingGenerate(**batch)
key = keys[0]
insize = next(iter(batch.values())).size(0) + 1
if output_dict.get("feat_gen") is not None:
# standard text2mel model case
feat_gen = output_dict["feat_gen"]
logging.info(
"inference speed = {:.1f} frames / sec.".format(
int(feat_gen.size(0)) / (time.perf_counter() - start_time)
)
)
logging.info(f"{key} (size:{insize}->{feat_gen.size(0)})")
norm_writer[key] = output_dict["feat_gen"].cpu().numpy()
shape_writer.write(
f"{key} " + ",".join(map(str, output_dict["feat_gen"].shape)) + "\n"
)
if output_dict.get("feat_gen_denorm") is not None:
denorm_writer[key] = output_dict["feat_gen_denorm"].cpu().numpy()
else:
# end-to-end text2wav model case
wav = output_dict["wav"]
logging.info(
"inference speed = {:.1f} points / sec.".format(
int(wav.size(0)) / (time.perf_counter() - start_time)
)
)
logging.info(f"{key} (size:{insize}->{wav.size(0)})")
if output_dict.get("duration") is not None:
# Save duration and fucus rates
duration_writer.write(
f"{key} "
+ " ".join(map(str, output_dict["duration"].long().cpu().numpy()))
+ "\n"
)
if output_dict.get("focus_rate") is not None:
focus_rate_writer.write(
f"{key} {float(output_dict['focus_rate']):.5f}\n"
)
if output_dict.get("att_w") is not None:
# Plot attention weight
att_w = output_dict["att_w"].cpu().numpy()
if att_w.ndim == 2:
att_w = att_w[None][None]
elif att_w.ndim != 4:
raise RuntimeError(f"Must be 2 or 4 dimension: {att_w.ndim}")
w, h = plt.figaspect(att_w.shape[0] / att_w.shape[1])
fig = plt.Figure(
figsize=(
w * 1.3 * min(att_w.shape[0], 2.5),
h * 1.3 * min(att_w.shape[1], 2.5),
)
)
fig.suptitle(f"{key}")
axes = fig.subplots(att_w.shape[0], att_w.shape[1])
if len(att_w) == 1:
axes = [[axes]]
for ax, att_w in zip(axes, att_w):
for ax_, att_w_ in zip(ax, att_w):
ax_.imshow(att_w_.astype(np.float32), aspect="auto")
ax_.set_xlabel("Input")
ax_.set_ylabel("Output")
ax_.xaxis.set_major_locator(MaxNLocator(integer=True))
ax_.yaxis.set_major_locator(MaxNLocator(integer=True))
fig.set_tight_layout({"rect": [0, 0.03, 1, 0.95]})
fig.savefig(output_dir / f"att_ws/{key}.png")
fig.clf()
if output_dict.get("prob") is not None:
# Plot stop token prediction
prob = output_dict["prob"].cpu().numpy()
fig = plt.Figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(prob)
ax.set_title(f"{key}")
ax.set_xlabel("Output")
ax.set_ylabel("Stop probability")
ax.set_ylim(0, 1)
ax.grid(which="both")
fig.set_tight_layout(True)
fig.savefig(output_dir / f"probs/{key}.png")
fig.clf()
# TODO(kamo): Write scp
if output_dict.get("wav") is not None:
sf.write(
f"{output_dir}/wav/{key}.wav",
output_dict["wav"].cpu().numpy(),
singingGenerate.fs,
"PCM_16",
)
# remove files if those are not included in output dict
if output_dict.get("feat_gen") is None:
shutil.rmtree(output_dir / "norm")
if output_dict.get("feat_gen_denorm") is None:
shutil.rmtree(output_dir / "denorm")
if output_dict.get("att_w") is None:
shutil.rmtree(output_dir / "att_ws")
if output_dict.get("duration") is None:
shutil.rmtree(output_dir / "durations")
if output_dict.get("focus_rate") is None:
shutil.rmtree(output_dir / "focus_rates")
if output_dict.get("prob") is None:
shutil.rmtree(output_dir / "probs")
if output_dict.get("wav") is None:
shutil.rmtree(output_dir / "wav")
def get_parser():
"""Get argument parser."""
parser = config_argparse.ArgumentParser(
description="SVS Decode",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Note(kamo): Use "_" instead of "-" as separator.
# "-" is confusing if written in yaml.
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="The path of output directory",
)
parser.add_argument(
"--ngpu",
type=int,
default=0,
help="The number of gpus. 0 indicates CPU mode",
)
parser.add_argument(
"--seed",
type=int,
default=0,
help="Random seed",
)
parser.add_argument(
"--dtype",
default="float32",
choices=["float16", "float32", "float64"],
help="Data type",
)
parser.add_argument(
"--num_workers",
type=int,
default=1,
help="The number of workers used for DataLoader",
)
parser.add_argument(
"--batch_size",
type=int,
default=1,
help="The batch size for inference",
)
group = parser.add_argument_group("Input data related")
group.add_argument(
"--data_path_and_name_and_type",
type=str2triple_str,
required=True,
action="append",
)
group.add_argument(
"--key_file",
type=str_or_none,
)
group.add_argument(
"--allow_variable_data_keys",
type=str2bool,
default=False,
)
group = parser.add_argument_group("The model configuration related")
group.add_argument(
"--train_config",
type=str,
help="Training configuration file.",
)
group.add_argument(
"--model_file",
type=str,
help="Model parameter file.",
)
group = parser.add_argument_group("Decoding related")
group.add_argument(
"--use_teacher_forcing",
type=str2bool,
default=False,
help="Whether to use teacher forcing",
)
parser.add_argument(
"--noise_scale",
type=float,
default=0.667,
help="Noise scale parameter for the flow in vits",
)
parser.add_argument(
"--noise_scale_dur",
type=float,
default=0.8,
help="Noise scale parameter for the stochastic duration predictor in vits",
)
group = parser.add_argument_group("Vocoder related")
group.add_argument(
"--vocoder_checkpoint",
default="None",
type=str_or_none,
help="checkpoint file to be loaded.",
)
group.add_argument(
"--vocoder_config",
default=None,
type=str_or_none,
help="yaml format configuration file. if not explicitly provided, "
"it will be searched in the checkpoint directory. (default=None)",
)
return parser
def main(cmd=None):
"""Run SVS model decoding."""
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
kwargs.pop("config", None)
inference(**kwargs)
if __name__ == "__main__":
main()
| 23,443 | 33.991045 | 88 | py |
espnet | espnet-master/espnet2/bin/uasr_extract_feature.py | #!/usr/bin/env python3
import argparse
import logging
import sys
from pathlib import Path
from typing import Optional, Sequence, Tuple, Union
from torch.nn.parallel import data_parallel
from typeguard import check_argument_types
from espnet2.fileio.npy_scp import NpyScpWriter
from espnet2.tasks.uasr import UASRTask
from espnet2.torch_utils.device_funcs import to_device
from espnet2.torch_utils.forward_adaptor import ForwardAdaptor
from espnet2.utils.types import str2bool, str2triple_str, str_or_none
from espnet.utils.cli_utils import get_commandline_args
def get_parser():
parser = argparse.ArgumentParser(
description="UASR Decoding",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--data_path_and_name_and_type",
type=str2triple_str,
required=True,
action="append",
)
parser.add_argument(
"--uasr_train_config",
type=str,
help="uasr training configuration",
)
parser.add_argument(
"--uasr_model_file",
type=str,
help="uasr model parameter file",
)
parser.add_argument(
"--key_file",
type=str_or_none,
help="key file",
)
parser.add_argument(
"--allow_variable_data_keys",
type=str2bool,
default=False,
)
parser.add_argument(
"--ngpu",
type=int,
default=0,
help="The number of gpus. 0 indicates CPU mode",
)
parser.add_argument(
"--num_workers",
type=int,
default=1,
help="The number of workers used for DataLoader",
)
parser.add_argument(
"--batch_size",
type=int,
default=1,
help="The batch size for feature extraction",
)
parser.add_argument(
"--dtype",
default="float32",
choices=["float16", "float32", "float64"],
help="Data type",
)
parser.add_argument(
"--dset",
type=str,
help="dataset",
)
parser.add_argument(
"--output_dir",
type=str,
help="Output directory",
)
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
return parser
def extract_feature(
uasr_train_config: Optional[str],
uasr_model_file: Optional[str],
data_path_and_name_and_type: Sequence[Tuple[str, str, str]],
key_file: Optional[str],
batch_size: int,
dtype: str,
num_workers: int,
allow_variable_data_keys: bool,
ngpu: int,
output_dir: str,
dset: str,
log_level: Union[int, str],
):
assert check_argument_types()
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
output_dir_path = Path(output_dir)
if ngpu >= 1:
device = "cuda"
else:
device = "cpu"
uasr_model, uasr_train_args = UASRTask.build_model_from_file(
uasr_train_config, uasr_model_file, device
)
test_iter = UASRTask.build_streaming_iterator(
data_path_and_name_and_type=data_path_and_name_and_type,
key_file=key_file,
batch_size=batch_size,
dtype=dtype,
num_workers=num_workers,
preprocess_fn=UASRTask.build_preprocess_fn(uasr_train_args, False),
collate_fn=UASRTask.build_collate_fn(uasr_train_args, False),
allow_variable_data_keys=allow_variable_data_keys,
inference=True,
)
npy_scp_writers = {}
for keys, batch in test_iter:
batch = to_device(batch, "cuda" if ngpu > 0 else "cpu")
if ngpu <= 1:
data = uasr_model.collect_feats(**batch)
else:
data = data_parallel(
ForwardAdaptor(uasr_model, "collect_feats"),
(),
range(ngpu),
module_kwargs=batch,
)
for key, v in data.items():
for i, (uttid, seq) in enumerate(zip(keys, v.cpu().detach().numpy())):
if f"{key}_lengths" in data:
length = data[f"{key}_lengths"][i]
seq = seq[:length]
else:
seq = seq[None]
if (key, dset) not in npy_scp_writers:
p = output_dir_path / dset / "collect_feats"
npy_scp_writers[(key, dset)] = NpyScpWriter(
p / f"data_{key}", p / f"{key}.scp"
)
npy_scp_writers[(key, dset)][uttid] = seq
def main(cmd=None):
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
extract_feature(**kwargs)
if __name__ == "__main__":
main()
| 4,900 | 26.227778 | 82 | py |
espnet | espnet-master/espnet2/bin/asr_align.py | #!/usr/bin/env python3
# Copyright 2021, Ludwig Kürzinger; Kamo Naoyuki
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Perform CTC segmentation to align utterances within audio files."""
import argparse
import logging
import sys
from pathlib import Path
from typing import List, Optional, TextIO, Union
import numpy as np
import soundfile
import torch
# imports for CTC segmentation
from ctc_segmentation import (
CtcSegmentationParameters,
ctc_segmentation,
determine_utterance_segments,
prepare_text,
prepare_token_list,
)
from typeguard import check_argument_types, check_return_type
from espnet2.tasks.asr import ASRTask
from espnet2.torch_utils.device_funcs import to_device
from espnet2.utils import config_argparse
from espnet2.utils.types import str2bool, str_or_none
# imports for inference
from espnet.utils.cli_utils import get_commandline_args
class CTCSegmentationTask:
"""Task object for CTC segmentation.
When formatted with str(·), this object returns
results in a kaldi-style segments file formatting.
The human-readable output can be configured with
the printing options.
Properties:
text: Utterance texts, separated by line. But without the utterance
name at the beginning of the line (as in kaldi-style text).
ground_truth_mat: Ground truth matrix (CTC segmentation).
utt_begin_indices: Utterance separator for the Ground truth matrix.
timings: Time marks of the corresponding chars.
state_list: Estimated alignment of chars/tokens.
segments: Calculated segments as: (start, end, confidence score).
config: CTC Segmentation configuration object.
name: Name of aligned audio file (Optional). If given, name is
considered when generating the text.
utt_ids: The list of utterance names (Optional). This list should
have the same length as the number of utterances.
lpz: CTC posterior log probabilities (Optional).
Properties for printing:
print_confidence_score: Includes the confidence score.
print_utterance_text: Includes utterance text.
"""
text = None
ground_truth_mat = None
utt_begin_indices = None
timings = None
char_probs = None
state_list = None
segments = None
config = None
done = False
# Optional
name = "utt"
utt_ids = None
lpz = None
# Printing
print_confidence_score = True
print_utterance_text = True
def __init__(self, **kwargs):
"""Initialize the module."""
self.set(**kwargs)
def set(self, **kwargs):
"""Update properties.
Args:
**kwargs: Key-value dict that contains all properties
with their new values. Unknown properties are ignored.
"""
for key in kwargs:
if (
not key.startswith("_")
and hasattr(self, key)
and kwargs[key] is not None
):
setattr(self, key, kwargs[key])
def __str__(self):
"""Return a kaldi-style ``segments`` file (string)."""
output = ""
num_utts = len(self.segments)
if self.utt_ids is None:
utt_names = [f"{self.name}_{i:04}" for i in range(num_utts)]
else:
# ensure correct mapping of segments to utterance ids
assert num_utts == len(self.utt_ids)
utt_names = self.utt_ids
for i, boundary in enumerate(self.segments):
# utterance name and file name
utt_entry = f"{utt_names[i]} {self.name} "
# segment start and end
utt_entry += f"{boundary[0]:.2f} {boundary[1]:.2f}"
# confidence score
if self.print_confidence_score:
utt_entry += f" {boundary[2]:3.4f}"
# utterance ground truth
if self.print_utterance_text:
utt_entry += f" {self.text[i]}"
output += utt_entry + "\n"
return output
class CTCSegmentation:
"""Align text to audio using CTC segmentation.
Usage:
Initialize with given ASR model and parameters.
If needed, parameters for CTC segmentation can be set with ``set_config(·)``.
Then call the instance as function to align text within an audio file.
Example:
>>> # example file included in the ESPnet repository
>>> import soundfile
>>> speech, fs = soundfile.read("test_utils/ctc_align_test.wav")
>>> # load an ASR model
>>> from espnet_model_zoo.downloader import ModelDownloader
>>> d = ModelDownloader()
>>> wsjmodel = d.download_and_unpack( "kamo-naoyuki/wsj" )
>>> # Apply CTC segmentation
>>> aligner = CTCSegmentation( **wsjmodel )
>>> text=["utt1 THE SALE OF THE HOTELS", "utt2 ON PROPERTY MANAGEMENT"]
>>> aligner.set_config( gratis_blank=True )
>>> segments = aligner( speech, text, fs=fs )
>>> print( segments )
utt1 utt 0.27 1.72 -0.1663 THE SALE OF THE HOTELS
utt2 utt 4.54 6.10 -4.9646 ON PROPERTY MANAGEMENT
On multiprocessing:
To parallelize the computation with multiprocessing, these three steps
can be separated:
(1) ``get_lpz``: obtain the lpz,
(2) ``prepare_segmentation_task``: prepare the task, and
(3) ``get_segments``: perform CTC segmentation.
Note that the function `get_segments` is a staticmethod and therefore
independent of an already initialized CTCSegmentation object.
References:
CTC-Segmentation of Large Corpora for German End-to-end Speech Recognition
2020, Kürzinger, Winkelbauer, Li, Watzel, Rigoll
https://arxiv.org/abs/2007.09127
More parameters are described in https://github.com/lumaku/ctc-segmentation
"""
fs = 16000
samples_to_frames_ratio = None
time_stamps = "auto"
choices_time_stamps = ["auto", "fixed"]
text_converter = "tokenize"
choices_text_converter = ["tokenize", "classic"]
warned_about_misconfiguration = False
config = CtcSegmentationParameters()
def __init__(
self,
asr_train_config: Union[Path, str],
asr_model_file: Union[Path, str] = None,
fs: int = 16000,
ngpu: int = 0,
batch_size: int = 1,
dtype: str = "float32",
kaldi_style_text: bool = True,
text_converter: str = "tokenize",
time_stamps: str = "auto",
**ctc_segmentation_args,
):
"""Initialize the CTCSegmentation module.
Args:
asr_train_config: ASR model config file (yaml).
asr_model_file: ASR model file (pth).
fs: Sample rate of audio file.
ngpu: Number of GPUs. Set 0 for processing on CPU, set to 1 for
processing on GPU. Multi-GPU aligning is currently not
implemented. Default: 0.
batch_size: Currently, only batch size == 1 is implemented.
dtype: Data type used for inference. Set dtype according to
the ASR model.
kaldi_style_text: A kaldi-style text file includes the name of the
utterance at the start of the line. If True, the utterance name
is expected as first word at each line. If False, utterance
names are automatically generated. Set this option according to
your input data. Default: True.
text_converter: How CTC segmentation handles text.
"tokenize": Use ESPnet 2 preprocessing to tokenize the text.
"classic": The text is preprocessed as in ESPnet 1 which takes
token length into account. If the ASR model has longer tokens,
this option may yield better results. Default: "tokenize".
time_stamps: Choose the method how the time stamps are
calculated. While "fixed" and "auto" use both the sample rate,
the ratio of samples to one frame is either automatically
determined for each inference or fixed at a certain ratio that
is initially determined by the module, but can be changed via
the parameter ``samples_to_frames_ratio``. Recommended for
longer audio files: "auto".
**ctc_segmentation_args: Parameters for CTC segmentation.
"""
assert check_argument_types()
# Basic settings
if batch_size > 1:
raise NotImplementedError("Batch decoding is not implemented")
device = "cpu"
if ngpu == 1:
device = "cuda"
elif ngpu > 1:
logging.error("Multi-GPU not yet implemented.")
raise NotImplementedError("Only single GPU decoding is supported")
# Prepare ASR model
asr_model, asr_train_args = ASRTask.build_model_from_file(
asr_train_config, asr_model_file, device
)
asr_model.to(dtype=getattr(torch, dtype)).eval()
self.preprocess_fn = ASRTask.build_preprocess_fn(asr_train_args, False)
# Warn for nets with high memory consumption on long audio files
if hasattr(asr_model, "encoder"):
encoder_module = asr_model.encoder.__class__.__module__
else:
encoder_module = "Unknown"
logging.info(f"Encoder module: {encoder_module}")
logging.info(f"CTC module: {asr_model.ctc.__class__.__module__}")
if "rnn" not in encoder_module.lower():
logging.warning("No RNN model detected; memory consumption may be high.")
self.asr_model = asr_model
self.asr_train_args = asr_train_args
self.device = device
self.dtype = dtype
self.ctc = asr_model.ctc
self.kaldi_style_text = kaldi_style_text
self.token_list = asr_model.token_list
# Apply configuration
self.set_config(
fs=fs,
time_stamps=time_stamps,
kaldi_style_text=kaldi_style_text,
text_converter=text_converter,
**ctc_segmentation_args,
)
# last token "<sos/eos>", not needed
self.config.char_list = asr_model.token_list[:-1]
def set_config(self, **kwargs):
"""Set CTC segmentation parameters.
Parameters for timing:
time_stamps: Select method how CTC index duration is estimated, and
thus how the time stamps are calculated.
fs: Sample rate.
samples_to_frames_ratio: If you want to directly determine the
ratio of samples to CTC frames, set this parameter, and
set ``time_stamps`` to "fixed".
Note: If you want to calculate the time stamps as in
ESPnet 1, set this parameter to:
``subsampling_factor * frame_duration / 1000``.
Parameters for text preparation:
set_blank: Index of blank in token list. Default: 0.
replace_spaces_with_blanks: Inserts blanks between words, which is
useful for handling long pauses between words. Only used in
``text_converter="classic"`` preprocessing mode. Default: False.
kaldi_style_text: Determines whether the utterance name is expected
as fist word of the utterance. Set at module initialization.
text_converter: How CTC segmentation handles text.
Set at module initialization.
Parameters for alignment:
min_window_size: Minimum number of frames considered for a single
utterance. The current default value of 8000 corresponds to
roughly 4 minutes (depending on ASR model) and should be OK in
most cases. If your utterances are further apart, increase
this value, or decrease it for smaller audio files.
max_window_size: Maximum window size. It should not be necessary
to change this value.
gratis_blank: If True, the transition cost of blank is set to zero.
Useful for long preambles or if there are large unrelated segments
between utterances. Default: False.
Parameters for calculation of confidence score:
scoring_length: Block length to calculate confidence score. The
default value of 30 should be OK in most cases.
"""
# Parameters for timing
if "time_stamps" in kwargs:
if kwargs["time_stamps"] not in self.choices_time_stamps:
raise NotImplementedError(
f"Parameter ´time_stamps´ has to be one of "
f"{list(self.choices_time_stamps)}",
)
self.time_stamps = kwargs["time_stamps"]
if "fs" in kwargs:
self.fs = float(kwargs["fs"])
if "samples_to_frames_ratio" in kwargs:
self.samples_to_frames_ratio = float(kwargs["samples_to_frames_ratio"])
# Parameters for text preparation
if "set_blank" in kwargs:
assert isinstance(kwargs["set_blank"], int)
self.config.blank = kwargs["set_blank"]
if "replace_spaces_with_blanks" in kwargs:
self.config.replace_spaces_with_blanks = bool(
kwargs["replace_spaces_with_blanks"]
)
if "kaldi_style_text" in kwargs:
assert isinstance(kwargs["kaldi_style_text"], bool)
self.kaldi_style_text = kwargs["kaldi_style_text"]
if "text_converter" in kwargs:
if kwargs["text_converter"] not in self.choices_text_converter:
raise NotImplementedError(
f"Parameter ´text_converter´ has to be one of "
f"{list(self.choices_text_converter)}",
)
self.text_converter = kwargs["text_converter"]
# Parameters for alignment
if "min_window_size" in kwargs:
assert isinstance(kwargs["min_window_size"], int)
self.config.min_window_size = kwargs["min_window_size"]
if "max_window_size" in kwargs:
assert isinstance(kwargs["max_window_size"], int)
self.config.max_window_size = kwargs["max_window_size"]
if "gratis_blank" in kwargs:
self.config.blank_transition_cost_zero = bool(kwargs["gratis_blank"])
if (
self.config.blank_transition_cost_zero
and self.config.replace_spaces_with_blanks
and not self.warned_about_misconfiguration
):
logging.error(
"Blanks are inserted between words, and also the transition cost of"
" blank is zero. This configuration may lead to misalignments!"
)
self.warned_about_misconfiguration = True
# Parameter for calculation of confidence score
if "scoring_length" in kwargs:
assert isinstance(kwargs["scoring_length"], int)
self.config.score_min_mean_over_L = kwargs["scoring_length"]
def get_timing_config(self, speech_len=None, lpz_len=None):
"""Obtain parameters to determine time stamps."""
timing_cfg = {
"index_duration": self.config.index_duration,
}
# As the parameter ctc_index_duration vetoes the other
if self.time_stamps == "fixed":
# Initialize the value, if not yet available
if self.samples_to_frames_ratio is None:
ratio = self.estimate_samples_to_frames_ratio()
self.samples_to_frames_ratio = ratio
index_duration = self.samples_to_frames_ratio / self.fs
else:
assert self.time_stamps == "auto"
samples_to_frames_ratio = speech_len / lpz_len
index_duration = samples_to_frames_ratio / self.fs
timing_cfg["index_duration"] = index_duration
return timing_cfg
def estimate_samples_to_frames_ratio(self, speech_len=215040):
"""Determine the ratio of encoded frames to sample points.
This method helps to determine the time a single encoded frame occupies.
As the sample rate already gave the number of samples, only the ratio
of samples per encoded CTC frame are needed. This function estimates them by
doing one inference, which is only needed once.
Args:
speech_len: Length of randomly generated speech vector for single
inference. Default: 215040.
Returns:
samples_to_frames_ratio: Estimated ratio.
"""
random_input = torch.rand(speech_len)
lpz = self.get_lpz(random_input)
lpz_len = lpz.shape[0]
# Most frontends (DefaultFrontend, SlidingWindow) discard trailing data
lpz_len = lpz_len + 1
samples_to_frames_ratio = speech_len // lpz_len
return samples_to_frames_ratio
@torch.no_grad()
def get_lpz(self, speech: Union[torch.Tensor, np.ndarray]):
"""Obtain CTC posterior log probabilities for given speech data.
Args:
speech: Speech audio input.
Returns:
lpz: Numpy vector with CTC log posterior probabilities.
"""
if isinstance(speech, np.ndarray):
speech = torch.tensor(speech)
# data: (Nsamples,) -> (1, Nsamples)
speech = speech.unsqueeze(0).to(getattr(torch, self.dtype))
# lengths: (1,)
lengths = speech.new_full([1], dtype=torch.long, fill_value=speech.size(1))
batch = {"speech": speech, "speech_lengths": lengths}
batch = to_device(batch, device=self.device)
# Encode input
enc, _ = self.asr_model.encode(**batch)
assert len(enc) == 1, len(enc)
# Apply ctc layer to obtain log character probabilities
lpz = self.ctc.log_softmax(enc).detach()
# Shape should be ( <time steps>, <classes> )
lpz = lpz.squeeze(0).cpu().numpy()
return lpz
def _split_text(self, text):
"""Convert text to list and extract utterance IDs."""
utt_ids = None
# Handle multiline strings
if isinstance(text, str):
text = text.splitlines()
# Remove empty lines
text = list(filter(len, text))
# Handle kaldi-style text format
if self.kaldi_style_text:
utt_ids_and_text = [utt.split(" ", 1) for utt in text]
# remove utterances with empty text
utt_ids_and_text = filter(lambda ui: len(ui) == 2, utt_ids_and_text)
utt_ids_and_text = list(utt_ids_and_text)
utt_ids = [utt[0] for utt in utt_ids_and_text]
text = [utt[1] for utt in utt_ids_and_text]
return utt_ids, text
def prepare_segmentation_task(self, text, lpz, name=None, speech_len=None):
"""Preprocess text, and gather text and lpz into a task object.
Text is pre-processed and tokenized depending on configuration.
If ``speech_len`` is given, the timing configuration is updated.
Text, lpz, and configuration is collected in a CTCSegmentationTask
object. The resulting object can be serialized and passed in a
multiprocessing computation.
A minimal amount of text processing is done, i.e., splitting the
utterances in ``text`` into a list and applying ``text_cleaner``.
It is recommended that you normalize the text beforehand, e.g.,
change numbers into their spoken equivalent word, remove special
characters, and convert UTF-8 characters to chars corresponding to
your ASR model dictionary.
The text is tokenized based on the ``text_converter`` setting:
The "tokenize" method is more efficient and the easiest for models
based on latin or cyrillic script that only contain the main chars,
["a", "b", ...] or for Japanese or Chinese ASR models with ~3000
short Kanji / Hanzi tokens.
The "classic" method improves the the accuracy of the alignments
for models that contain longer tokens, but with a greater complexity
for computation. The function scans for partial tokens which may
improve time resolution.
For example, the word "▁really" will be broken down into
``['▁', '▁r', '▁re', '▁real', '▁really']``. The alignment will be
based on the most probable activation sequence given by the network.
Args:
text: List or multiline-string with utterance ground truths.
lpz: Log CTC posterior probabilities obtained from the CTC-network;
numpy array shaped as ( <time steps>, <classes> ).
name: Audio file name. Choose a unique name, or the original audio
file name, to distinguish multiple audio files. Default: None.
speech_len: Number of sample points. If given, the timing
configuration is automatically derived from length of fs, length
of speech and length of lpz. If None is given, make sure the
timing parameters are correct, see time_stamps for reference!
Default: None.
Returns:
task: CTCSegmentationTask object that can be passed to
``get_segments()`` in order to obtain alignments.
"""
config = self.config
# Update timing parameters, if needed
if speech_len is not None:
lpz_len = lpz.shape[0]
timing_cfg = self.get_timing_config(speech_len, lpz_len)
config.set(**timing_cfg)
# `text` is needed in the form of a list.
utt_ids, text = self._split_text(text)
# Obtain utterance & label sequence from text
if self.text_converter == "tokenize":
# list of str --tokenize--> list of np.array
token_list = [
self.preprocess_fn("<dummy>", {"text": utt})["text"] for utt in text
]
# filter out any instances of the <unk> token
unk = config.char_list.index("<unk>")
token_list = [utt[utt != unk] for utt in token_list]
ground_truth_mat, utt_begin_indices = prepare_token_list(config, token_list)
else:
assert self.text_converter == "classic"
text = [self.preprocess_fn.text_cleaner(utt) for utt in text]
token_list = [
"".join(self.preprocess_fn.tokenizer.text2tokens(utt)) for utt in text
]
token_list = [utt.replace("<unk>", "") for utt in token_list]
ground_truth_mat, utt_begin_indices = prepare_text(config, token_list)
task = CTCSegmentationTask(
config=config,
name=name,
text=text,
ground_truth_mat=ground_truth_mat,
utt_begin_indices=utt_begin_indices,
utt_ids=utt_ids,
lpz=lpz,
)
return task
@staticmethod
def get_segments(task: CTCSegmentationTask):
"""Obtain segments for given utterance texts and CTC log posteriors.
Args:
task: CTCSegmentationTask object that contains ground truth and
CTC posterior probabilities.
Returns:
result: Dictionary with alignments. Combine this with the task
object to obtain a human-readable segments representation.
"""
assert check_argument_types()
assert task.config is not None
config = task.config
lpz = task.lpz
ground_truth_mat = task.ground_truth_mat
utt_begin_indices = task.utt_begin_indices
text = task.text
# Align using CTC segmentation
timings, char_probs, state_list = ctc_segmentation(
config, lpz, ground_truth_mat
)
# Obtain list of utterances with time intervals and confidence score
segments = determine_utterance_segments(
config, utt_begin_indices, char_probs, timings, text
)
# Store results
result = {
"name": task.name,
"timings": timings,
"char_probs": char_probs,
"state_list": state_list,
"segments": segments,
"done": True,
}
return result
def __call__(
self,
speech: Union[torch.Tensor, np.ndarray],
text: Union[List[str], str],
fs: Optional[int] = None,
name: Optional[str] = None,
) -> CTCSegmentationTask:
"""Align utterances.
Args:
speech: Audio file.
text: List or multiline-string with utterance ground truths.
fs: Sample rate in Hz. Optional, as this can be given when
the module is initialized.
name: Name of the file. Utterance names are derived from it.
Returns:
CTCSegmentationTask object with segments.
"""
assert check_argument_types()
if fs is not None:
self.set_config(fs=fs)
# Get log CTC posterior probabilities
lpz = self.get_lpz(speech)
# Conflate text & lpz & config as a segmentation task object
task = self.prepare_segmentation_task(text, lpz, name, speech.shape[0])
# Apply CTC segmentation
segments = self.get_segments(task)
task.set(**segments)
assert check_return_type(task)
return task
def ctc_align(
log_level: Union[int, str],
asr_train_config: str,
asr_model_file: str,
audio: Path,
text: TextIO,
output: TextIO,
print_utt_text: bool = True,
print_utt_score: bool = True,
**kwargs,
):
"""Provide the scripting interface to align text to audio."""
assert check_argument_types()
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
# Ignore configuration values that are set to None (from parser).
kwargs = {k: v for (k, v) in kwargs.items() if v is not None}
# Prepare CTC segmentation module
model = {
"asr_train_config": asr_train_config,
"asr_model_file": asr_model_file,
}
aligner = CTCSegmentation(**model, **kwargs)
# load audio file
assert audio.name != ""
name = audio.stem
speech, fs = soundfile.read(str(audio))
# load text file
transcripts = text.read()
# perform inference and CTC segmentation
segments = aligner(speech=speech, text=transcripts, fs=fs, name=name)
# Write to "segments" file or stdout
segments.print_utterance_text = print_utt_text
segments.print_confidence_score = print_utt_score
segments_str = str(segments)
output.write(segments_str)
def get_parser():
"""Obtain an argument-parser for the script interface."""
parser = config_argparse.ArgumentParser(
description="ASR Decoding",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Note(kamo): Use '_' instead of '-' as separator.
# '-' is confusing if written in yaml.
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument(
"--ngpu",
type=int,
default=0,
help="The number of gpus. 0 indicates CPU mode",
)
parser.add_argument(
"--dtype",
default="float32",
choices=["float16", "float32", "float64"],
help="Data type",
)
group = parser.add_argument_group("Model configuration related")
group.add_argument("--asr_train_config", type=str, required=True)
group.add_argument("--asr_model_file", type=str, required=True)
group = parser.add_argument_group("Text converter related")
group.add_argument(
"--token_type",
type=str_or_none,
default=None,
choices=["char", "bpe", None],
help="The token type for ASR model. "
"If not given, refers from the training args",
)
group.add_argument(
"--bpemodel",
type=str_or_none,
default=None,
help="The model path of sentencepiece. "
"If not given, refers from the training args",
)
group = parser.add_argument_group("CTC segmentation related")
group.add_argument(
"--fs",
type=int,
default=16000,
help="Sampling Frequency."
" The sampling frequency (in Hz) is needed to correctly determine the"
" starting and ending time of aligned segments.",
)
group.add_argument(
"--min_window_size",
type=int,
default=None,
help="Minimum window size considered for utterance.",
)
group.add_argument(
"--max_window_size",
type=int,
default=None,
help="Maximum window size considered for utterance.",
)
group.add_argument(
"--set_blank",
type=int,
default=None,
help="Index of model dictionary for blank token.",
)
group.add_argument(
"--gratis_blank",
type=str2bool,
default=False,
help="Set the transition cost of the blank token to zero. Audio sections"
" labeled with blank tokens can then be skipped without penalty. Useful"
" if there are unrelated audio segments between utterances.",
)
group.add_argument(
"--replace_spaces_with_blanks",
type=str2bool,
default=False,
help="Fill blanks in between words to better model pauses between words."
" This option is only active for `--text_converter classic`."
" Segments can be misaligned if this option is combined with"
" --gratis-blank.",
)
group.add_argument(
"--scoring_length",
type=int,
default=None,
help="Changes partitioning length L for calculation of the confidence score.",
)
group.add_argument(
"--time_stamps",
type=str,
default=CTCSegmentation.time_stamps,
choices=CTCSegmentation.choices_time_stamps,
help="Select method how CTC index duration is estimated, and"
" thus how the time stamps are calculated.",
)
group.add_argument(
"--text_converter",
type=str,
default=CTCSegmentation.text_converter,
choices=CTCSegmentation.choices_text_converter,
help="How CTC segmentation handles text.",
)
group = parser.add_argument_group("Input/output arguments")
group.add_argument(
"--kaldi_style_text",
type=str2bool,
default=True,
help="Assume that the input text file is kaldi-style formatted, i.e., the"
" utterance name is at the beginning of each line.",
)
group.add_argument(
"--print_utt_text",
type=str2bool,
default=True,
help="Include the utterance text in the segments output.",
)
group.add_argument(
"--print_utt_score",
type=str2bool,
default=True,
help="Include the confidence score in the segments output.",
)
group.add_argument(
"-a",
"--audio",
type=Path,
required=True,
help="Input audio file.",
)
group.add_argument(
"-t",
"--text",
type=argparse.FileType("r"),
required=True,
help="Input text file."
" Each line contains the ground truth of a single utterance."
" Kaldi-style text files include the name of the utterance as"
" the first word in the line.",
)
group.add_argument(
"-o",
"--output",
type=argparse.FileType("w"),
default="-",
help="Output in the form of a `segments` file."
" If not given, output is written to stdout.",
)
return parser
def main(cmd=None):
"""Parse arguments and start the alignment in ctc_align(·)."""
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
kwargs.pop("config", None)
ctc_align(**kwargs)
if __name__ == "__main__":
main()
| 32,283 | 38.084746 | 88 | py |
espnet | espnet-master/espnet2/bin/asvspoof_inference.py | #!/usr/bin/env python3
import argparse
import logging
import sys
from distutils.version import LooseVersion
from pathlib import Path
from typing import Any, List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
import torch.quantization
from typeguard import check_argument_types, check_return_type
from espnet2.fileio.datadir_writer import DatadirWriter
from espnet2.tasks.asvspoof import ASVSpoofTask
from espnet2.torch_utils.device_funcs import to_device
from espnet2.torch_utils.set_all_random_seed import set_all_random_seed
from espnet2.utils import config_argparse
from espnet2.utils.types import str2bool, str2triple_str, str_or_none
from espnet.nets.pytorch_backend.transformer.subsampling import TooShortUttError
from espnet.utils.cli_utils import get_commandline_args
class SpeechAntiSpoof:
"""SpeechAntiSpoof class
Examples:
>>> import soundfile
>>> speech_anti_spoof = SpeechAntiSpoof("asvspoof_config.yml", "asvspoof.pth")
>>> audio, rate = soundfile.read("speech.wav")
>>> speech_anti_spoof(audio)
prediction_result (int)
"""
def __init__(
self,
asvspoof_train_config: Union[Path, str] = None,
asvspoof_model_file: Union[Path, str] = None,
device: str = "cpu",
batch_size: int = 1,
dtype: str = "float32",
):
assert check_argument_types()
asvspoof_model, asvspoof_train_args = ASVSpoofTask.build_model_from_file(
asvspoof_train_config, asvspoof_model_file, device
)
asvspoof_model.to(dtype=getattr(torch, dtype)).eval()
self.asvspoof_model = asvspoof_model
self.asvspoof_train_args = asvspoof_train_args
self.device = device
self.dtype = dtype
@torch.no_grad()
def __call__(self, speech: Union[torch.Tensor, np.ndarray]) -> float:
"""Inference
Args:
data: Input speech data
Returns:
[prediction, scores]
"""
assert check_argument_types()
# Input as audio signal
if isinstance(speech, np.ndarray):
speech = torch.tensor(speech)
# data: (Nsamples,) -> (1, Nsamples)
speech = speech.unsqueeze(0).to(getattr(torch, self.dtype))
# lengths: (1,)
lengths = speech.new_full([1], dtype=torch.long, fill_value=speech.size(1))
batch = {"speech": speech, "speech_lengths": lengths}
logging.info("speech length: " + str(speech.size(1)))
# To device
batch = to_device(batch, device=self.device)
# TODO1 (checkpoint 4): Forward feature extraction and encoder etc.
if "oc_softmax_loss" in self.asvspoof_model.losses:
pass # TODO1 (exercise2): use loss score function to estimate score
else:
pass # TODO2 (checkpoint 4): Pass the encoder result to decoder
# TODO3 (checkpoint 4): return the prediction score
return None
def inference(
output_dir: str,
batch_size: int,
dtype: str,
ngpu: int,
seed: int,
num_workers: int,
log_level: Union[int, str],
data_path_and_name_and_type: Sequence[Tuple[str, str, str]],
key_file: Optional[str],
asvspoof_train_config: Optional[str],
asvspoof_model_file: Optional[str],
allow_variable_data_keys: bool,
):
assert check_argument_types()
if batch_size > 1:
raise NotImplementedError("batch decoding is not implemented")
if ngpu > 1:
raise NotImplementedError("only single GPU decoding is supported")
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
if ngpu >= 1:
device = "cuda"
else:
device = "cpu"
# 1. Set random-seed
set_all_random_seed(seed)
# 2. Build speech_anti_spoof
speech_anti_spoof_kwargs = dict(
asvspoof_train_config=asvspoof_train_config,
asvspoof_model_file=asvspoof_model_file,
device=device,
dtype=dtype,
)
speech_anti_spoof = SpeechAntiSpoof(
**speech_anti_spoof_kwargs,
)
# 3. Build data-iterator
loader = ASVSpoofTask.build_streaming_iterator(
data_path_and_name_and_type,
dtype=dtype,
batch_size=batch_size,
key_file=key_file,
num_workers=num_workers,
preprocess_fn=ASVSpoofTask.build_preprocess_fn(
speech_anti_spoof.asvspoof_train_args, False
),
collate_fn=ASVSpoofTask.build_collate_fn(
speech_anti_spoof.asvspoof_train_args, False
),
allow_variable_data_keys=allow_variable_data_keys,
inference=True,
)
# 7 .Start for-loop
# FIXME(kamo): The output format should be discussed about
with DatadirWriter(output_dir) as writer:
for keys, batch in loader:
assert isinstance(batch, dict), type(batch)
assert all(isinstance(s, str) for s in keys), keys
_bs = len(next(iter(batch.values())))
assert len(keys) == _bs, f"{len(keys)} != {_bs}"
batch = {k: v[0] for k, v in batch.items() if not k.endswith("_lengths")}
# N-best list of (text, token, token_int, hyp_object)
try:
score = speech_anti_spoof(**batch)
except TooShortUttError as e:
logging.warning(f"Utterance {keys} {e}")
score = 0
# Only supporting batch_size==1
key = keys[0]
# Create a directory: outdir/{n}best_recog
result_writer = writer[f"prediction"]
# Write the result to each file
result_writer["score"][key] = str(score)
logging.info("processed {}: score {}".format(key, score))
def get_parser():
parser = config_argparse.ArgumentParser(
description="ASVSpoof Decoding",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Note(kamo): Use '_' instead of '-' as separator.
# '-' is confusing if written in yaml.
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument(
"--batch_size",
type=int,
default=1,
help="The batch size for inference",
)
parser.add_argument("--output_dir", type=str, required=True)
parser.add_argument(
"--ngpu",
type=int,
default=0,
help="The number of gpus. 0 indicates CPU mode",
)
parser.add_argument("--seed", type=int, default=0, help="Random seed")
parser.add_argument(
"--dtype",
default="float32",
choices=["float16", "float32", "float64"],
help="Data type",
)
parser.add_argument(
"--num_workers",
type=int,
default=1,
help="The number of workers used for DataLoader",
)
group = parser.add_argument_group("Input data related")
group.add_argument(
"--data_path_and_name_and_type",
type=str2triple_str,
required=True,
action="append",
)
group.add_argument("--key_file", type=str_or_none)
group.add_argument("--allow_variable_data_keys", type=str2bool, default=False)
group = parser.add_argument_group("The model configuration related")
group.add_argument(
"--asvspoof_train_config",
type=str,
help="ASVSpoof training configuration",
)
group.add_argument(
"--asvspoof_model_file",
type=str,
help="ASVSpoof model parameter file",
)
return parser
def main(cmd=None):
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
kwargs.pop("config", None)
inference(**kwargs)
if __name__ == "__main__":
main()
| 7,971 | 29.899225 | 86 | py |
espnet | espnet-master/espnet2/bin/uasr_inference.py | #!/usr/bin/env python3
import argparse
import logging
import sys
from distutils.version import LooseVersion
from pathlib import Path
from typing import Any, List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
import torch.quantization
from typeguard import check_argument_types, check_return_type
from espnet2.fileio.datadir_writer import DatadirWriter
from espnet2.tasks.lm import LMTask
from espnet2.tasks.uasr import UASRTask
from espnet2.text.build_tokenizer import build_tokenizer
from espnet2.text.token_id_converter import TokenIDConverter
from espnet2.torch_utils.device_funcs import to_device
from espnet2.torch_utils.set_all_random_seed import set_all_random_seed
from espnet2.utils import config_argparse
from espnet2.utils.types import str2bool, str2triple_str, str_or_none
from espnet.nets.batch_beam_search import BatchBeamSearch
from espnet.nets.beam_search import BeamSearch, Hypothesis
from espnet.nets.pytorch_backend.transformer.subsampling import TooShortUttError
from espnet.nets.scorer_interface import BatchScorerInterface
from espnet.nets.scorers.uasr import UASRPrefixScorer
# from espnet.nets.scorers.uasr import UASRPrefixScorer
from espnet.utils.cli_utils import get_commandline_args
class Speech2Text:
"""Speech2Text class for unsupervised ASR
Examples:
>>> import soundfile
>>> speech2text = Speech2Text("uasr_config.yml", "uasr.pth")
>>> audio, rate = soundfile.read("speech.wav")
>>> speech2text(audio)
[(text, token, token_int, hypothesis_object), ...]
"""
def __init__(
self,
uasr_train_config: Union[Path, str] = None,
uasr_model_file: Union[Path, str] = None,
lm_train_config: Union[Path, str] = None,
lm_file: Union[Path, str] = None,
ngram_scorer: str = "full",
ngram_file: Union[Path, str] = None,
token_type: str = None,
bpemodel: str = None,
device: str = "cpu",
batch_size: int = 1,
dtype: str = "float32",
beam_size: int = 20,
lm_weight: float = 1.0,
ngram_weight: float = 0.9,
nbest: int = 1,
quantize_uasr_model: bool = False,
quantize_lm: bool = False,
quantize_modules: List[str] = ["Linear"],
quantize_dtype: str = "qint8",
):
assert check_argument_types()
if quantize_uasr_model or quantize_lm:
if quantize_dtype == "float16" and torch.__version__ < LooseVersion(
"1.5.0"
):
raise ValueError(
"float16 dtype for dynamic quantization is not supported with "
"torch version < 1.5.0. Switch to qint8 dtype instead."
)
quantize_modules = set([getattr(torch.nn, q) for q in quantize_modules])
quantize_dtype = getattr(torch, quantize_dtype)
# 1. Build UASR model
scorers = {}
uasr_model, uasr_train_args = UASRTask.build_model_from_file(
uasr_train_config, uasr_model_file, device
)
# TODO(Jiatong): change to not used pre-extracted features for inference
uasr_model.use_collected_training_feats = True
uasr_model.to(dtype=getattr(torch, dtype)).eval()
if quantize_uasr_model:
logging.info("Use quantized uasr model for decoding.")
uasr_model = torch.quantization.quantize_dynamic(
uasr_model, qconfig_spec=quantize_modules, dtype=quantize_dtype
)
decoder = UASRPrefixScorer(eos=uasr_model.eos)
token_list = uasr_model.token_list
scorers.update(decoder=decoder)
logging.info(f"beam search token list: {token_list}")
# 2. Build Language model
if lm_train_config is not None:
lm, lm_train_args = LMTask.build_model_from_file(
lm_train_config, lm_file, device
)
if quantize_lm:
logging.info("Use quantized lm for decoding.")
lm = torch.quantization.quantize_dynamic(
lm, qconfig_spec=quantize_modules, dtype=quantize_dtype
)
scorers["lm"] = lm.lm
# 3. Build ngram model
if ngram_file is not None:
if ngram_scorer == "full":
from espnet.nets.scorers.ngram import NgramFullScorer
ngram = NgramFullScorer(ngram_file, token_list)
else:
from espnet.nets.scorers.ngram import NgramPartScorer
ngram = NgramPartScorer(ngram_file, token_list)
else:
ngram = None
scorers["ngram"] = ngram
# 4. Build BeamSearch object
weights = dict(
decoder=1.0,
lm=lm_weight,
ngram=ngram_weight,
)
beam_search = BeamSearch(
beam_size=beam_size,
weights=weights,
scorers=scorers,
sos=uasr_model.sos,
eos=uasr_model.eos,
vocab_size=len(token_list),
token_list=token_list,
pre_beam_score_key=None, # NOTE(jiatong): for frame-decoding
)
# TODO(karita): make all scorers batchfied
if batch_size == 1:
non_batch = [
k
for k, v in beam_search.full_scorers.items()
if not isinstance(v, BatchScorerInterface)
]
if len(non_batch) == 0:
beam_search.__class__ = BatchBeamSearch
logging.info("BatchBeamSearch implementation is selected.")
else:
logging.warning(
f"As non-batch scorers {non_batch} are found, "
f"fall back to non-batch implementation."
)
beam_search.to(device=device, dtype=getattr(torch, dtype)).eval()
for scorer in scorers.values():
if isinstance(scorer, torch.nn.Module):
scorer.to(device=device, dtype=getattr(torch, dtype)).eval()
logging.info(f"Beam_search: {beam_search}")
logging.info(f"Decoding device={device}, dtype={dtype}")
# 5. [Optional] Build Text converter: e.g. bpe-sym -> Text
if token_type is None:
token_type = uasr_train_args.token_type
if bpemodel is None:
bpemodel = uasr_train_args.bpemodel
# delete
token_type = "word"
if token_type is None:
tokenizer = None
elif token_type == "bpe":
if bpemodel is not None:
tokenizer = build_tokenizer(token_type=token_type, bpemodel=bpemodel)
else:
tokenizer = None
else:
tokenizer = build_tokenizer(token_type=token_type)
converter = TokenIDConverter(token_list=token_list)
logging.info(f"Text tokenizer: {tokenizer}")
self.uasr_model = uasr_model
self.uasr_train_args = uasr_train_args
self.converter = converter
self.tokenizer = tokenizer
self.beam_search = beam_search
self.device = device
self.dtype = dtype
self.nbest = nbest
@torch.no_grad()
def __call__(
self, speech: Union[torch.Tensor, np.ndarray]
) -> List[Tuple[Optional[str], List[str], List[int], Union[Hypothesis]]]:
"""Inference
Args:
data: Input speech data
Returns:
text, token, token_int, hyp
"""
assert check_argument_types()
# Input as audio signal
if isinstance(speech, np.ndarray):
speech = torch.tensor(speech)
# data: (Nsamples,) -> (1, Nsamples)
speech = speech.unsqueeze(0).to(getattr(torch, self.dtype))
# lengths: (1,)
lengths = speech.new_full([1], dtype=torch.long, fill_value=speech.size(1))
batch = {"speech": speech, "speech_lengths": lengths}
# a. To device
batch = to_device(batch, device=self.device)
# b. Forward encoder
generated_sample, generated_sample_padding_mask = self.uasr_model.inference(
**batch
)
assert len(generated_sample) == 1, len(generated_sample)
# TODO(jiatong): add beamsearch
nbest_hyps = self.beam_search(x=generated_sample[0], maxlenratio=1.0)
nbest_hyps = nbest_hyps[: self.nbest]
results = []
for hyp in nbest_hyps:
assert isinstance(hyp, Hypothesis), type(hyp)
# remove sos/eos and get results
if isinstance(hyp.yseq, list):
token_int = hyp.yseq[1:-1]
else:
token_int = hyp.yseq[1:-1].tolist()
# remove blank symbol id, which is assumed to be 0
token_int = list(filter(lambda x: x >= 4, token_int))
# Change integer-ids to tokens
token = self.converter.ids2tokens(token_int)
if self.tokenizer is not None:
text = self.tokenizer.tokens2text(token)
else:
text = None
results.append((text, token, token_int, hyp))
assert check_return_type(results)
return results
@staticmethod
def from_pretrained(
model_tag: Optional[str] = None,
**kwargs: Optional[Any],
):
"""Build Speech2Text instance from the pretrained model.
Args:
model_tag (Optional[str]): Model tag of the pretrained models.
Currently, the tags of espnet_model_zoo are supported.
Returns:
Speech2Text: Speech2Text instance.
"""
if model_tag is not None:
try:
from espnet_model_zoo.downloader import ModelDownloader
except ImportError:
logging.error(
"`espnet_model_zoo` is not installed. "
"Please install via `pip install -U espnet_model_zoo`."
)
raise
d = ModelDownloader()
kwargs.update(**d.download_and_unpack(model_tag))
return Speech2Text(**kwargs)
def inference(
output_dir: str,
batch_size: int,
dtype: str,
beam_size: int,
ngpu: int,
seed: int,
lm_weight: float,
ngram_weight: float,
nbest: int,
num_workers: int,
log_level: Union[int, str],
data_path_and_name_and_type: Sequence[Tuple[str, str, str]],
key_file: Optional[str],
uasr_train_config: Optional[str],
uasr_model_file: Optional[str],
lm_train_config: Optional[str],
lm_file: Optional[str],
word_lm_train_config: Optional[str],
word_lm_file: Optional[str],
ngram_file: Optional[str],
model_tag: Optional[str],
token_type: Optional[str],
bpemodel: Optional[str],
allow_variable_data_keys: bool,
quantize_uasr_model: bool,
quantize_lm: bool,
quantize_modules: List[str],
quantize_dtype: str,
):
assert check_argument_types()
if batch_size > 1:
raise NotImplementedError("batch decoding is not implemented")
if word_lm_train_config is not None:
raise NotImplementedError("Word LM is not implemented")
if ngpu > 1:
raise NotImplementedError("only single GPU decoding is supported")
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
if ngpu >= 1:
device = "cuda"
else:
device = "cpu"
# 1. Set random-seed
set_all_random_seed(seed)
# 2. Build speech2text
speech2text_kwargs = dict(
uasr_train_config=uasr_train_config,
uasr_model_file=uasr_model_file,
lm_train_config=lm_train_config,
lm_file=lm_file,
ngram_file=ngram_file,
token_type=token_type,
bpemodel=bpemodel,
device=device,
dtype=dtype,
beam_size=beam_size,
lm_weight=lm_weight,
ngram_weight=ngram_weight,
nbest=nbest,
quantize_uasr_model=quantize_uasr_model,
quantize_lm=quantize_lm,
quantize_modules=quantize_modules,
quantize_dtype=quantize_dtype,
)
speech2text = Speech2Text.from_pretrained(
model_tag=model_tag,
**speech2text_kwargs,
)
# 3. Build data-iterator
loader = UASRTask.build_streaming_iterator(
data_path_and_name_and_type,
dtype=dtype,
batch_size=batch_size,
key_file=key_file,
num_workers=num_workers,
preprocess_fn=UASRTask.build_preprocess_fn(speech2text.uasr_train_args, False),
collate_fn=UASRTask.build_collate_fn(speech2text.uasr_train_args, False),
allow_variable_data_keys=allow_variable_data_keys,
inference=True,
)
# 7 .Start for-loop
# FIXME(kamo): The output format should be discussed about
with DatadirWriter(output_dir) as writer:
for keys, batch in loader:
assert isinstance(batch, dict), type(batch)
assert all(isinstance(s, str) for s in keys), keys
_bs = len(next(iter(batch.values())))
assert len(keys) == _bs, f"{len(keys)} != {_bs}"
batch = {k: v[0] for k, v in batch.items() if not k.endswith("_lengths")}
# N-best list of (text, token, token_int, hyp_object)
try:
results = speech2text(**batch)
except TooShortUttError as e:
logging.warning(f"Utterance {keys} {e}")
hyp = Hypothesis(score=0.0, scores={}, states={}, yseq=[])
results = [[" ", ["<space>"], [2], hyp]] * nbest
# Only supporting batch_size==1
key = keys[0]
for n, (text, token, token_int, hyp) in zip(range(1, nbest + 1), results):
# Create a directory: outdir/{n}best_recog
ibest_writer = writer[f"{n}best_recog"]
# Write the result to each file
ibest_writer["token"][key] = " ".join(token)
ibest_writer["token_int"][key] = " ".join(map(str, token_int))
ibest_writer["score"][key] = str(hyp.score)
if text is not None:
ibest_writer["text"][key] = text
logging.info("key: {} text: {}".format(key, text))
logging.info("key: {} token_int: {}\n".format(key, token_int))
def get_parser():
parser = config_argparse.ArgumentParser(
description="UASR Decoding",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Note(kamo): Use '_' instead of '-' as separator.
# '-' is confusing if written in yaml.
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument("--output_dir", type=str, required=True)
parser.add_argument(
"--ngpu",
type=int,
default=0,
help="The number of gpus. 0 indicates CPU mode",
)
parser.add_argument("--seed", type=int, default=0, help="Random seed")
parser.add_argument(
"--dtype",
default="float32",
choices=["float16", "float32", "float64"],
help="Data type",
)
parser.add_argument(
"--num_workers",
type=int,
default=1,
help="The number of workers used for DataLoader",
)
group = parser.add_argument_group("Input data related")
group.add_argument(
"--data_path_and_name_and_type",
type=str2triple_str,
required=True,
action="append",
)
group.add_argument("--key_file", type=str_or_none)
group.add_argument("--allow_variable_data_keys", type=str2bool, default=False)
group = parser.add_argument_group("The model configuration related")
group.add_argument(
"--uasr_train_config",
type=str,
help="uasr training configuration",
)
group.add_argument(
"--uasr_model_file",
type=str,
help="uasr model parameter file",
)
group.add_argument(
"--lm_train_config",
type=str,
help="LM training configuration",
)
group.add_argument(
"--lm_file",
type=str,
help="LM parameter file",
)
group.add_argument(
"--word_lm_train_config",
type=str,
help="Word LM training configuration",
)
group.add_argument(
"--word_lm_file",
type=str,
help="Word LM parameter file",
)
group.add_argument(
"--ngram_file",
type=str,
help="N-gram parameter file",
)
group.add_argument(
"--model_tag",
type=str,
help="Pretrained model tag. If specify this option, *_train_config and "
"*_file will be overwritten",
)
group = parser.add_argument_group("Quantization related")
group.add_argument(
"--quantize_uasr_model",
type=str2bool,
default=False,
help="Apply dynamic quantization to uasr model.",
)
group.add_argument(
"--quantize_lm",
type=str2bool,
default=False,
help="Apply dynamic quantization to LM.",
)
group.add_argument(
"--quantize_modules",
type=str,
nargs="*",
default=["Linear"],
help="""List of modules to be dynamically quantized.
E.g.: --quantize_modules=[Linear,LSTM,GRU].
Each specified module should be an attribute of 'torch.nn', e.g.:
torch.nn.Linear, torch.nn.LSTM, torch.nn.GRU, ...""",
)
group.add_argument(
"--quantize_dtype",
type=str,
default="qint8",
choices=["float16", "qint8"],
help="Dtype for dynamic quantization.",
)
group = parser.add_argument_group("Beam-search related")
group.add_argument(
"--batch_size",
type=int,
default=1,
help="The batch size for inference",
)
group.add_argument("--nbest", type=int, default=1, help="Output N-best hypotheses")
group.add_argument("--beam_size", type=int, default=20, help="Beam size")
group.add_argument("--lm_weight", type=float, default=1.0, help="RNNLM weight")
group.add_argument("--ngram_weight", type=float, default=0.9, help="ngram weight")
group = parser.add_argument_group("Text converter related")
group.add_argument(
"--token_type",
type=str_or_none,
default=None,
choices=["char", "bpe", None],
help="The token type for uasr model. "
"If not given, refers from the training args",
)
group.add_argument(
"--bpemodel",
type=str_or_none,
default=None,
help="The model path of sentencepiece. "
"If not given, refers from the training args",
)
return parser
def main(cmd=None):
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
kwargs.pop("config", None)
inference(**kwargs)
if __name__ == "__main__":
main()
| 19,155 | 31.80137 | 87 | py |
espnet | espnet-master/espnet2/bin/enh_tse_inference.py | #!/usr/bin/env python3
import argparse
import logging
import sys
from itertools import chain
from pathlib import Path
from typing import Any, List, Optional, Sequence, Tuple, Union
import humanfriendly
import numpy as np
import torch
import yaml
from tqdm import trange
from typeguard import check_argument_types
from espnet2.enh.loss.criterions.tf_domain import FrequencyDomainMSE
from espnet2.enh.loss.criterions.time_domain import SISNRLoss
from espnet2.enh.loss.wrappers.pit_solver import PITSolver
from espnet2.fileio.sound_scp import SoundScpWriter
from espnet2.tasks.enh_tse import TargetSpeakerExtractionTask as TSETask
from espnet2.torch_utils.device_funcs import to_device
from espnet2.torch_utils.set_all_random_seed import set_all_random_seed
from espnet2.train.abs_espnet_model import AbsESPnetModel
from espnet2.utils import config_argparse
from espnet2.utils.types import str2bool, str2triple_str, str_or_none
from espnet.utils.cli_utils import get_commandline_args
EPS = torch.finfo(torch.get_default_dtype()).eps
def get_train_config(train_config, model_file=None):
if train_config is None:
assert model_file is not None, (
"The argument 'model_file' must be provided "
"if the argument 'train_config' is not specified."
)
train_config = Path(model_file).parent / "config.yaml"
else:
train_config = Path(train_config)
return train_config
def recursive_dict_update(dict_org, dict_patch, verbose=False, log_prefix=""):
"""Update `dict_org` with `dict_patch` in-place recursively."""
for key, value in dict_patch.items():
if key not in dict_org:
if verbose:
logging.info(
"Overwriting config: [{}{}]: None -> {}".format(
log_prefix, key, value
)
)
dict_org[key] = value
elif isinstance(value, dict):
recursive_dict_update(
dict_org[key], value, verbose=verbose, log_prefix=f"{key}."
)
else:
if verbose and dict_org[key] != value:
logging.info(
"Overwriting config: [{}{}]: {} -> {}".format(
log_prefix, key, dict_org[key], value
)
)
dict_org[key] = value
def build_model_from_args_and_file(task, args, model_file, device):
model = task.build_model(args)
if not isinstance(model, AbsESPnetModel):
raise RuntimeError(
f"model must inherit {AbsESPnetModel.__name__}, but got {type(model)}"
)
model.to(device)
if model_file is not None:
if device == "cuda":
# NOTE(kamo): "cuda" for torch.load always indicates cuda:0
# in PyTorch<=1.4
device = f"cuda:{torch.cuda.current_device()}"
model.load_state_dict(torch.load(model_file, map_location=device))
return model
class SeparateSpeech:
"""SeparateSpeech class
Examples:
>>> import soundfile
>>> separate_speech = SeparateSpeech("enh_config.yml", "enh.pth")
>>> audio, rate = soundfile.read("speech.wav")
>>> separate_speech(audio)
[separated_audio1, separated_audio2, ...]
"""
def __init__(
self,
train_config: Union[Path, str] = None,
model_file: Union[Path, str] = None,
inference_config: Union[Path, str] = None,
segment_size: Optional[float] = None,
hop_size: Optional[float] = None,
normalize_segment_scale: bool = False,
show_progressbar: bool = False,
ref_channel: Optional[int] = None,
normalize_output_wav: bool = False,
device: str = "cpu",
dtype: str = "float32",
):
assert check_argument_types()
# 1. Build Enh model
if inference_config is None:
(
enh_model,
enh_train_args,
) = TSETask.build_model_from_file(train_config, model_file, device)
else:
# Overwrite model attributes
train_config = get_train_config(train_config, model_file=model_file)
with train_config.open("r", encoding="utf-8") as f:
train_args = yaml.safe_load(f)
with Path(inference_config).open("r", encoding="utf-8") as f:
infer_args = yaml.safe_load(f)
supported_keys = list(
chain(*[[k, k + "_conf"] for k in ("encoder", "extractor", "decoder")])
)
for k in infer_args.keys():
if k not in supported_keys:
raise ValueError(
"Only the following top-level keys are supported: %s"
% ", ".join(supported_keys)
)
recursive_dict_update(train_args, infer_args, verbose=True)
enh_train_args = argparse.Namespace(**train_args)
enh_model = build_model_from_args_and_file(
TSETask, enh_train_args, model_file, device
)
enh_model.to(dtype=getattr(torch, dtype)).eval()
self.device = device
self.dtype = dtype
self.enh_train_args = enh_train_args
self.enh_model = enh_model
# only used when processing long speech, i.e.
# segment_size is not None and hop_size is not None
self.segment_size = segment_size
self.hop_size = hop_size
self.normalize_segment_scale = normalize_segment_scale
self.normalize_output_wav = normalize_output_wav
self.show_progressbar = show_progressbar
self.num_spk = enh_model.num_spk
task = f"{self.num_spk}-speaker extraction"
# reference channel for processing multi-channel speech
if ref_channel is not None:
logging.info(
"Overwrite enh_model.extractor.ref_channel with {}".format(ref_channel)
)
enh_model.extractor.ref_channel = ref_channel
self.ref_channel = ref_channel
else:
self.ref_channel = enh_model.ref_channel
self.segmenting = segment_size is not None and hop_size is not None
if self.segmenting:
logging.info("Perform segment-wise speech %s" % task)
logging.info(
"Segment length = {} sec, hop length = {} sec".format(
segment_size, hop_size
)
)
else:
logging.info("Perform direct speech %s on the input" % task)
@torch.no_grad()
def __call__(
self, speech_mix: Union[torch.Tensor, np.ndarray], fs: int = 8000, **kwargs
) -> List[torch.Tensor]:
"""Inference
Args:
speech_mix: Input speech data (Batch, Nsamples [, Channels])
fs: sample rate
enroll_ref1: enrollment for speaker 1
enroll_ref2: enrollment for speaker 2
...
Returns:
[separated_audio1, separated_audio2, ...]
"""
assert check_argument_types()
enroll_ref = [
# (Batch, samples_aux)
torch.as_tensor(kwargs["enroll_ref{}".format(spk + 1)])
for spk in range(self.num_spk)
if "enroll_ref{}".format(spk + 1) in kwargs
]
# Input as audio signal
if isinstance(speech_mix, np.ndarray):
speech_mix = torch.as_tensor(speech_mix)
assert speech_mix.dim() > 1, speech_mix.size()
batch_size = speech_mix.size(0)
speech_mix = speech_mix.to(getattr(torch, self.dtype))
# lengths: (B,)
lengths = speech_mix.new_full(
[batch_size], dtype=torch.long, fill_value=speech_mix.size(1)
)
aux_lengths = [
aux.new_full([batch_size], dtype=torch.long, fill_value=aux.size(1))
for aux in enroll_ref
]
# a. To device
speech_mix = to_device(speech_mix, device=self.device)
enroll_ref = to_device(enroll_ref, device=self.device)
if self.enh_model.share_encoder:
feats_aux, flens_aux = zip(
*[
self.enh_model.encoder(enroll_ref[spk], aux_lengths[spk])
for spk in range(len(enroll_ref))
]
)
else:
feats_aux = enroll_ref
flens_aux = aux_lengths
if self.segmenting and lengths[0] > self.segment_size * fs:
# Segment-wise speech enhancement/separation
overlap_length = int(np.round(fs * (self.segment_size - self.hop_size)))
num_segments = int(
np.ceil((speech_mix.size(1) - overlap_length) / (self.hop_size * fs))
)
t = T = int(self.segment_size * fs)
pad_shape = speech_mix[:, :T].shape
enh_waves = []
range_ = trange if self.show_progressbar else range
for i in range_(num_segments):
st = int(i * self.hop_size * fs)
en = st + T
if en >= lengths[0]:
# en - st < T (last segment)
en = lengths[0]
speech_seg = speech_mix.new_zeros(pad_shape)
t = en - st
speech_seg[:, :t] = speech_mix[:, st:en]
else:
t = T
speech_seg = speech_mix[:, st:en] # B x T [x C]
lengths_seg = speech_mix.new_full(
[batch_size], dtype=torch.long, fill_value=T
)
# b. Enhancement/Separation Forward
feats, f_lens = self.enh_model.encoder(speech_seg, lengths_seg)
feature_pre, _, others = zip(
*[
self.enh_model.extractor(
feats,
f_lens,
feats_aux[spk],
flens_aux[spk],
suffix_tag=f"_spk{spk + 1}",
)
for spk in range(len(enroll_ref))
]
)
processed_wav = [
self.enh_model.decoder(f, lengths_seg)[0] for f in feature_pre
]
if speech_seg.dim() > 2:
# multi-channel speech
speech_seg_ = speech_seg[:, self.ref_channel]
else:
speech_seg_ = speech_seg
if self.normalize_segment_scale:
# normalize the scale to match the input mixture scale
mix_energy = torch.sqrt(
torch.mean(speech_seg_[:, :t].pow(2), dim=1, keepdim=True)
)
enh_energy = torch.sqrt(
torch.mean(
sum(processed_wav)[:, :t].pow(2), dim=1, keepdim=True
)
)
processed_wav = [
w * (mix_energy / enh_energy) for w in processed_wav
]
# List[torch.Tensor(num_spk, B, T)]
enh_waves.append(torch.stack(processed_wav, dim=0))
# c. Stitch the enhanced segments together
waves = enh_waves[0]
for i in range(1, num_segments):
# permutation between separated streams in last and current segments
perm = self.cal_permumation(
waves[:, :, -overlap_length:],
enh_waves[i][:, :, :overlap_length],
criterion="si_snr",
)
# repermute separated streams in current segment
for batch in range(batch_size):
enh_waves[i][:, batch] = enh_waves[i][perm[batch], batch]
if i == num_segments - 1:
enh_waves[i][:, :, t:] = 0
enh_waves_res_i = enh_waves[i][:, :, overlap_length:t]
else:
enh_waves_res_i = enh_waves[i][:, :, overlap_length:]
# overlap-and-add (average over the overlapped part)
waves[:, :, -overlap_length:] = (
waves[:, :, -overlap_length:] + enh_waves[i][:, :, :overlap_length]
) / 2
# concatenate the residual parts of the later segment
waves = torch.cat([waves, enh_waves_res_i], dim=2)
# ensure the stitched length is same as input
assert waves.size(2) == speech_mix.size(1), (waves.shape, speech_mix.shape)
waves = torch.unbind(waves, dim=0)
else:
# b. Enhancement/Separation Forward
feats, f_lens = self.enh_model.encoder(speech_mix, lengths)
feature_pre, _, others = zip(
*[
self.enh_model.extractor(
feats,
f_lens,
feats_aux[spk],
flens_aux[spk],
suffix_tag=f"_spk{spk + 1}",
)
for spk in range(len(enroll_ref))
]
)
others = {k: v for dic in others for k, v in dic.items()}
waves = [self.enh_model.decoder(f, lengths)[0] for f in feature_pre]
assert len(waves[0]) == batch_size, (len(waves[0]), batch_size)
if self.normalize_output_wav:
waves = [
(w / abs(w).max(dim=1, keepdim=True)[0] * 0.9).cpu().numpy()
for w in waves
] # list[(batch, sample)]
else:
waves = [w.cpu().numpy() for w in waves]
return waves
@torch.no_grad()
def cal_permumation(self, ref_wavs, enh_wavs, criterion="si_snr"):
"""Calculate the permutation between seaprated streams in two adjacent segments.
Args:
ref_wavs (List[torch.Tensor]): [(Batch, Nsamples)]
enh_wavs (List[torch.Tensor]): [(Batch, Nsamples)]
criterion (str): one of ("si_snr", "mse", "corr)
Returns:
perm (torch.Tensor): permutation for enh_wavs (Batch, num_spk)
"""
criterion_class = {"si_snr": SISNRLoss, "mse": FrequencyDomainMSE}[criterion]
pit_solver = PITSolver(criterion=criterion_class())
_, _, others = pit_solver(ref_wavs, enh_wavs)
perm = others["perm"]
return perm
@staticmethod
def from_pretrained(
model_tag: Optional[str] = None,
**kwargs: Optional[Any],
):
"""Build SeparateSpeech instance from the pretrained model.
Args:
model_tag (Optional[str]): Model tag of the pretrained models.
Currently, the tags of espnet_model_zoo are supported.
Returns:
SeparateSpeech: SeparateSpeech instance.
"""
if model_tag is not None:
try:
from espnet_model_zoo.downloader import ModelDownloader
except ImportError:
logging.error(
"`espnet_model_zoo` is not installed. "
"Please install via `pip install -U espnet_model_zoo`."
)
raise
d = ModelDownloader()
kwargs.update(**d.download_and_unpack(model_tag))
return SeparateSpeech(**kwargs)
def humanfriendly_or_none(value: str):
if value in ("none", "None", "NONE"):
return None
return humanfriendly.parse_size(value)
def inference(
output_dir: str,
batch_size: int,
dtype: str,
fs: int,
ngpu: int,
seed: int,
num_workers: int,
log_level: Union[int, str],
data_path_and_name_and_type: Sequence[Tuple[str, str, str]],
key_file: Optional[str],
train_config: Optional[str],
model_file: Optional[str],
model_tag: Optional[str],
inference_config: Optional[str],
allow_variable_data_keys: bool,
segment_size: Optional[float],
hop_size: Optional[float],
normalize_segment_scale: bool,
show_progressbar: bool,
ref_channel: Optional[int],
normalize_output_wav: bool,
):
assert check_argument_types()
if batch_size > 1:
raise NotImplementedError("batch decoding is not implemented")
if ngpu > 1:
raise NotImplementedError("only single GPU decoding is supported")
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
if ngpu >= 1:
device = "cuda"
else:
device = "cpu"
# 1. Set random-seed
set_all_random_seed(seed)
# 2. Build separate_speech
separate_speech_kwargs = dict(
train_config=train_config,
model_file=model_file,
inference_config=inference_config,
segment_size=segment_size,
hop_size=hop_size,
normalize_segment_scale=normalize_segment_scale,
show_progressbar=show_progressbar,
ref_channel=ref_channel,
normalize_output_wav=normalize_output_wav,
device=device,
dtype=dtype,
)
separate_speech = SeparateSpeech.from_pretrained(
model_tag=model_tag,
**separate_speech_kwargs,
)
# 3. Build data-iterator
loader = TSETask.build_streaming_iterator(
data_path_and_name_and_type,
dtype=dtype,
batch_size=batch_size,
key_file=key_file,
num_workers=num_workers,
preprocess_fn=TSETask.build_preprocess_fn(
separate_speech.enh_train_args, False
),
collate_fn=TSETask.build_collate_fn(separate_speech.enh_train_args, False),
allow_variable_data_keys=allow_variable_data_keys,
inference=True,
)
# 4. Start for-loop
output_dir = Path(output_dir).expanduser().resolve()
writers = []
for i in range(separate_speech.num_spk):
writers.append(
SoundScpWriter(f"{output_dir}/wavs/{i + 1}", f"{output_dir}/spk{i + 1}.scp")
)
for i, (keys, batch) in enumerate(loader):
logging.info(f"[{i}] Enhancing {keys}")
assert isinstance(batch, dict), type(batch)
assert all(isinstance(s, str) for s in keys), keys
_bs = len(next(iter(batch.values())))
assert len(keys) == _bs, f"{len(keys)} != {_bs}"
batch = {k: v for k, v in batch.items() if not k.endswith("_lengths")}
waves = separate_speech(**batch)
for spk, w in enumerate(waves):
for b in range(batch_size):
writers[spk][keys[b]] = fs, w[b]
for writer in writers:
writer.close()
def get_parser():
parser = config_argparse.ArgumentParser(
description="Frontend inference",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Note(kamo): Use '_' instead of '-' as extractor.
# '-' is confusing if written in yaml.
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument("--output_dir", type=str, required=True)
parser.add_argument(
"--ngpu",
type=int,
default=0,
help="The number of gpus. 0 indicates CPU mode",
)
parser.add_argument("--seed", type=int, default=0, help="Random seed")
parser.add_argument(
"--dtype",
default="float32",
choices=["float16", "float32", "float64"],
help="Data type",
)
parser.add_argument(
"--fs", type=humanfriendly_or_none, default=8000, help="Sampling rate"
)
parser.add_argument(
"--num_workers",
type=int,
default=1,
help="The number of workers used for DataLoader",
)
group = parser.add_argument_group("Input data related")
group.add_argument(
"--data_path_and_name_and_type",
type=str2triple_str,
required=True,
action="append",
)
group.add_argument("--key_file", type=str_or_none)
group.add_argument("--allow_variable_data_keys", type=str2bool, default=False)
group = parser.add_argument_group("Output data related")
group.add_argument(
"--normalize_output_wav",
type=str2bool,
default=False,
help="Whether to normalize the predicted wav to [-1~1]",
)
group = parser.add_argument_group("The model configuration related")
group.add_argument(
"--train_config",
type=str,
help="Training configuration file",
)
group.add_argument(
"--model_file",
type=str,
help="Model parameter file",
)
group.add_argument(
"--model_tag",
type=str,
help="Pretrained model tag. If specify this option, train_config and "
"model_file will be overwritten",
)
group.add_argument(
"--inference_config",
type=str_or_none,
default=None,
help="Optional configuration file for overwriting enh model attributes "
"during inference",
)
group = parser.add_argument_group("Data loading related")
group.add_argument(
"--batch_size",
type=int,
default=1,
help="The batch size for inference",
)
group = parser.add_argument_group("SeparateSpeech related")
group.add_argument(
"--segment_size",
type=float,
default=None,
help="Segment length in seconds for segment-wise speech enhancement/separation",
)
group.add_argument(
"--hop_size",
type=float,
default=None,
help="Hop length in seconds for segment-wise speech enhancement/separation",
)
group.add_argument(
"--normalize_segment_scale",
type=str2bool,
default=False,
help="Whether to normalize the energy of the separated streams in each segment",
)
group.add_argument(
"--show_progressbar",
type=str2bool,
default=False,
help="Whether to show a progress bar when performing segment-wise speech "
"enhancement/separation",
)
group.add_argument(
"--ref_channel",
type=int,
default=None,
help="If not None, this will overwrite the ref_channel defined in the "
"extractor module (for multi-channel speech processing)",
)
return parser
def main(cmd=None):
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
kwargs.pop("config", None)
inference(**kwargs)
if __name__ == "__main__":
main()
| 22,792 | 33.692542 | 88 | py |
espnet | espnet-master/espnet2/uasr/espnet_model.py | import argparse
import logging
from contextlib import contextmanager
from typing import Dict, Optional, Tuple
import editdistance
import torch
import torch.nn.functional as F
from packaging.version import parse as V
from typeguard import check_argument_types
from espnet2.asr.frontend.abs_frontend import AbsFrontend
from espnet2.text.token_id_converter import TokenIDConverter
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet2.train.abs_espnet_model import AbsESPnetModel
from espnet2.uasr.discriminator.abs_discriminator import AbsDiscriminator
from espnet2.uasr.generator.abs_generator import AbsGenerator
from espnet2.uasr.loss.abs_loss import AbsUASRLoss
from espnet2.uasr.segmenter.abs_segmenter import AbsSegmenter
from espnet2.utils.types import str2bool
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
if V(torch.__version__) >= V("1.6.0"):
from torch.cuda.amp import autocast
else:
# Nothing to do if torch<1.6.0
@contextmanager
def autocast(enabled=True):
yield
try:
import kenlm # for CI import
except ImportError or ModuleNotFoundError:
kenlm = None
class ESPnetUASRModel(AbsESPnetModel):
"""Unsupervised ASR model.
The source code is from FAIRSEQ:
https://github.com/facebookresearch/fairseq/tree/main/examples/wav2vec/unsupervised
"""
def __init__(
self,
frontend: Optional[AbsFrontend],
segmenter: Optional[AbsSegmenter],
generator: AbsGenerator,
discriminator: AbsDiscriminator,
losses: Dict[str, AbsUASRLoss],
kenlm_path: Optional[str],
token_list: Optional[list],
max_epoch: Optional[int],
vocab_size: int,
cfg: Optional[Dict] = None,
pad: int = 1,
sil_token: str = "<SIL>",
sos_token: str = "<s>",
eos_token: str = "</s>",
skip_softmax: str2bool = False,
use_gumbel: str2bool = False,
use_hard_gumbel: str2bool = True,
min_temperature: float = 0.1,
max_temperature: float = 2.0,
decay_temperature: float = 0.99995,
use_collected_training_feats: str2bool = False,
):
assert check_argument_types()
super().__init__()
# note that eos is the same as sos (equivalent ID)
self.frontend = frontend
self.segmenter = segmenter
self.use_segmenter = True if segmenter is not None else False
self.generator = generator
self.discriminator = discriminator
self.pad = pad
if cfg is not None:
cfg = argparse.Namespace(**cfg)
self.skip_softmax = cfg.no_softmax
self.use_gumbel = cfg.gumbel
self.use_hard_gumbel = cfg.hard_gumbel
else:
self.skip_softmax = skip_softmax
self.use_gumbel = use_gumbel
self.use_hard_gumbel = use_hard_gumbel
self.use_collected_training_feats = use_collected_training_feats
self.min_temperature = min_temperature
self.max_temperature = max_temperature
self.decay_temperature = decay_temperature
self.current_temperature = max_temperature
self._number_updates = 0
self._number_epochs = 0
self.max_epoch = max_epoch
# for loss registration
self.losses = torch.nn.ModuleDict(losses)
# for validation
self.vocab_size = vocab_size
self.token_list = token_list
self.token_id_converter = TokenIDConverter(token_list=token_list)
self.sil = self.token_id_converter.tokens2ids([sil_token])[0]
self.sos = self.token_id_converter.tokens2ids([sos_token])[0]
self.eos = self.token_id_converter.tokens2ids([eos_token])[0]
self.kenlm = None
assert (
kenlm is not None
), "kenlm is not installed, please install from tools/installers"
if kenlm_path:
self.kenlm = kenlm.Model(kenlm_path)
@property
def number_updates(self):
return self._number_updates
@number_updates.setter
def number_updates(self, iiter: int):
assert check_argument_types() and iiter >= 0
self._number_updates = iiter
def forward(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
text: Optional[torch.Tensor] = None,
text_lengths: Optional[torch.Tensor] = None,
pseudo_labels: Optional[torch.Tensor] = None,
pseudo_labels_lengths: Optional[torch.Tensor] = None,
do_validation: Optional[str2bool] = False,
print_hyp: Optional[str2bool] = False,
**kwargs,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Frontend + Segmenter + Generator + Discriminator + Calc Loss
Args:
"""
stats = {}
assert text_lengths.dim() == 1, text_lengths.shape
# Check that batch_size is unified
assert (
speech.shape[0]
== speech_lengths.shape[0]
== text.shape[0]
== text_lengths.shape[0]
), (
speech.shape,
speech_lengths.shape,
text.shape,
text_lengths.shape,
)
batch_size = speech.shape[0]
# for data-parallel
text = text[:, : text_lengths.max()]
# 1. Feats encode (Extract feats + Apply segmenter)
feats, padding_mask = self.encode(speech, speech_lengths)
# 2. Generate fake samples
(
generated_sample,
real_sample,
x_inter,
generated_sample_padding_mask,
) = self.generator(feats, text, padding_mask)
# 3. Reprocess segments
if self.use_segmenter:
(
generated_sample,
generated_sample_padding_mask,
) = self.segmenter.logit_segment(
generated_sample, generated_sample_padding_mask
)
# for phone_diversity_loss
generated_sample_logits = generated_sample
if not self.skip_softmax:
if self.training and self.use_gumbel:
generated_sample = F.gumbel_softmax(
generated_sample_logits.float(),
tau=self.curr_temp,
hard=self.use_hard_gumbel,
).type_as(generated_sample_logits)
else:
generated_sample = generated_sample_logits.softmax(-1)
# for validation
vocab_seen = None
if do_validation:
batch_num_errors = 0
batched_hyp_ids = generated_sample.argmax(-1)
batched_hyp_ids[generated_sample_padding_mask] = self.pad
# for kenlm ppl metric
batch_lm_log_prob = 0
batch_num_hyp_tokens = 0
vocab_seen = torch.zeros(self.vocab_size - 4, dtype=torch.bool)
for hyp_ids, ref_ids in zip(batched_hyp_ids, text):
# remove <pad> and <unk>
hyp_ids = hyp_ids[hyp_ids >= 4]
# remove duplicate tokens
hyp_ids = hyp_ids.unique_consecutive()
# remove silence
hyp_ids_nosil = hyp_ids[hyp_ids != self.sil]
hyp_ids_nosil_list = hyp_ids_nosil.tolist()
if self.kenlm:
hyp_token_list = self.token_id_converter.ids2tokens(
integers=hyp_ids
)
hyp_tokens = " ".join(hyp_token_list)
lm_log_prob = self.kenlm.score(hyp_tokens)
batch_lm_log_prob += lm_log_prob
batch_num_hyp_tokens += len(hyp_token_list)
hyp_tokens_index = hyp_ids[hyp_ids >= 4]
vocab_seen[hyp_tokens_index - 4] = True
ref_ids = ref_ids[ref_ids != self.pad]
ref_ids_list = ref_ids.tolist()
num_errors = editdistance.eval(hyp_ids_nosil_list, ref_ids_list)
batch_num_errors += num_errors
stats["batch_num_errors"] = batch_num_errors
stats["batch_num_ref_tokens"] = text_lengths.sum().item()
if self.kenlm:
stats["batch_lm_log_prob"] = batch_lm_log_prob
stats["batch_num_hyp_tokens"] = batch_num_hyp_tokens
stats["batch_size"] = batch_size
# print the last sample in the batch
if print_hyp:
hyp_token_list = self.token_id_converter.ids2tokens(
integers=hyp_ids_nosil
)
hyp_tokens = " ".join(hyp_token_list)
ref_token_list = self.token_id_converter.ids2tokens(integers=ref_ids)
ref_tokens = " ".join(ref_token_list)
logging.info(f"[REF]: {ref_tokens}")
logging.info(f"[HYP]: {hyp_tokens}")
real_sample_padding_mask = text == self.pad
# 5. Discriminator condition
generated_sample_prediction = self.discriminator(
generated_sample, generated_sample_padding_mask
)
real_sample_prediction = self.discriminator(
real_sample, real_sample_padding_mask
)
is_discriminative_step = self.is_discriminative_step()
# 5. Calculate losses
loss_info = []
if "discriminator_loss" in self.losses.keys():
(
generated_sample_prediction_loss,
real_sample_prediction_loss,
) = self.losses["discriminator_loss"](
generated_sample_prediction,
real_sample_prediction,
is_discriminative_step,
)
loss_info.append(
generated_sample_prediction_loss
* self.losses["discriminator_loss"].weight
)
if is_discriminative_step:
loss_info.append(
real_sample_prediction_loss
* self.losses["discriminator_loss"].weight
)
else:
generated_sample_prediction_loss, real_sample_prediction_loss = None, None
if "gradient_penalty" in self.losses.keys():
gp = self.losses["gradient_penalty"](
generated_sample,
real_sample,
self.training,
is_discriminative_step,
)
loss_info.append(gp * self.losses["gradient_penalty"].weight)
loss_info.append(gp * self.losses["gradient_penalty"].weight)
else:
gp = None
if "phoneme_diversity_loss" in self.losses.keys():
pdl = self.losses["phoneme_diversity_loss"](
generated_sample_logits, batch_size, is_discriminative_step
)
loss_info.append(pdl * self.losses["phoneme_diversity_loss"].weight)
else:
pdl = None
if "smoothness_penalty" in self.losses.keys():
sp = self.losses["smoothness_penalty"](
generated_sample_logits,
generated_sample_padding_mask,
batch_size,
is_discriminative_step,
)
loss_info.append(sp * self.losses["smoothness_penalty"].weight)
else:
sp = None
if "pseudo_label_loss" in self.losses.keys() and pseudo_labels is not None:
mmi = self.losses["pseudo_label_loss"](
x_inter, pseudo_labels, is_discriminative_step
)
loss_info.append(mmi * self.losses["pseudo_label_loss"].weight)
else:
mmi = None
# Update temperature
self._change_temperature()
self.number_updates += 1
loss = sum(loss_info)
# Collect total loss stats
stats["loss"] = loss.detach()
stats["generated_sample_prediction_loss"] = generated_sample_prediction_loss
stats["real_sample_prediction_loss"] = real_sample_prediction_loss
stats["gp"] = gp
stats["sp"] = sp
stats["pdl"] = pdl
stats["mmi"] = mmi
# force_gatherable: to-device and to-tensor if scalar for DataParallel
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
return loss, stats, weight, vocab_seen
def inference(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
):
# 1. Feats encode (Extract feats + Apply segmenter)
feats, padding_mask = self.encode(speech, speech_lengths)
# 2. Generate fake samples
(
generated_sample,
_,
x_inter,
generated_sample_padding_mask,
) = self.generator(feats, None, padding_mask)
# generated_sample = generated_sample.softmax(-1)
return generated_sample, generated_sample_padding_mask
def collect_feats(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
text: Optional[torch.Tensor] = None,
text_lengths: Optional[torch.Tensor] = None,
**kwargs,
) -> Dict[str, torch.Tensor]:
if self.frontend is not None:
# Frontend
# e.g. STFT and Feature extract
# data_loader may send time-domain signal in this case
# speech (Batch, NSamples) -> feats: (Batch, NFrames, Dim)
speech = F.layer_norm(speech, speech.shape)
feats, feats_lengths = self.frontend(speech, speech_lengths)
else:
# No frontend and no feature extract
feats, feats_lengths = speech, speech_lengths
return {"feats": feats, "feats_lengths": feats_lengths}
def _extract_feats(
self, speech: torch.Tensor, speech_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
assert speech_lengths.dim() == 1, speech_lengths.shape
# for data-parallel
speech = speech[:, : speech_lengths.max()]
if self.frontend is not None and not self.use_collected_training_feats:
# Frontend
# e.g. STFT and Feature extract
# data_loader may send time-domain signal in this case
# speech (Batch, NSamples) -> feats: (Batch, NFrames, Dim)
speech = F.layer_norm(speech, speech.shape)
feats, feats_lengths = self.frontend(speech, speech_lengths)
else:
# No frontend and no feature extract (usually with pre-extracted feat)
# logging.info("use exisitng features")
feats, feats_lengths = speech, speech_lengths
return feats, feats_lengths
def encode(
self, speech: torch.Tensor, speech_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
with autocast(False):
# 1. Extract feats
feats, feats_lengths = self._extract_feats(speech, speech_lengths)
padding_mask = make_pad_mask(feats_lengths).to(feats.device)
# 2. Apply feats
if self.use_segmenter:
feats, padding_mask = self.segmenter.pre_segment(feats, padding_mask)
return feats, padding_mask
def is_discriminative_step(self):
return self.number_updates % 2 == 1
def get_optim_index(self):
return self.number_updates % 2
def _change_temperature(self):
self.current_temperature = max(
self.max_temperature * self.decay_temperature**self.number_updates,
self.min_temperature,
)
| 15,490 | 34.941995 | 87 | py |
espnet | espnet-master/espnet2/uasr/discriminator/abs_discriminator.py | from abc import ABC, abstractmethod
import torch
class AbsDiscriminator(torch.nn.Module, ABC):
@abstractmethod
def forward(
self,
xs_pad: torch.Tensor,
padding_mask: torch.Tensor,
) -> torch.Tensor:
raise NotImplementedError
| 272 | 18.5 | 45 | py |
espnet | espnet-master/espnet2/uasr/discriminator/conv_discriminator.py | import argparse
from typing import Dict, Optional
import torch
from typeguard import check_argument_types
from espnet2.uasr.discriminator.abs_discriminator import AbsDiscriminator
from espnet2.utils.types import str2bool
class SamePad(torch.nn.Module):
def __init__(self, kernel_size, causal=False):
super().__init__()
if causal:
self.remove = kernel_size - 1
else:
self.remove = 1 if kernel_size % 2 == 0 else 0
def forward(self, x):
if self.remove > 0:
x = x[:, :, : -self.remove]
return x
class ConvDiscriminator(AbsDiscriminator):
"""convolutional discriminator for UASR."""
def __init__(
self,
input_dim: int,
cfg: Optional[Dict] = None,
conv_channels: int = 384,
conv_kernel: int = 8,
conv_dilation: int = 1,
conv_depth: int = 2,
linear_emb: str2bool = False,
causal: str2bool = True,
max_pool: str2bool = False,
act_after_linear: str2bool = False,
dropout: float = 0.0,
spectral_norm: str2bool = False,
weight_norm: str2bool = False,
):
super().__init__()
assert check_argument_types()
if cfg is not None:
cfg = argparse.Namespace(**cfg)
self.conv_channels = cfg.discriminator_dim
self.conv_kernel = cfg.discriminator_kernel
self.conv_dilation = cfg.discriminator_dilation
self.conv_depth = cfg.discriminator_depth
self.linear_emb = cfg.discriminator_linear_emb
self.causal = cfg.discriminator_causal
self.max_pool = cfg.discriminator_max_pool
self.act_after_linear = cfg.discriminator_act_after_linear
self.dropout = cfg.discriminator_dropout
self.spectral_norm = cfg.discriminator_spectral_norm
self.weight_norm = cfg.discriminator_weight_norm
else:
self.conv_channels = conv_channels
self.conv_kernel = conv_kernel
self.conv_dilation = conv_dilation
self.conv_depth = conv_depth
self.linear_emb = linear_emb
self.causal = causal
self.max_pool = max_pool
self.act_after_linear = act_after_linear
self.dropout = dropout
self.spectral_norm = spectral_norm
self.weight_norm = weight_norm
if self.causal:
self.conv_padding = self.conv_kernel - 1
else:
self.conv_padding = self.conv_kernel // 2
def make_conv(
in_channel, out_channel, kernal_size, padding_size=0, dilation_value=1
):
conv = torch.nn.Conv1d(
in_channel,
out_channel,
kernel_size=kernal_size,
padding=padding_size,
dilation=dilation_value,
)
if self.spectral_norm:
conv = torch.nn.utils.spectral_norm(conv)
elif self.weight_norm:
conv = torch.nn.utils.weight_norm(conv)
return conv
# initialize embedding
if self.linear_emb:
emb_net = [
make_conv(
input_dim, self.conv_channels, 1, dilation_value=self.conv_dilation
)
]
else:
emb_net = [
make_conv(
input_dim,
self.conv_channels,
self.conv_kernel,
self.conv_padding,
dilation_value=self.conv_dilation,
),
SamePad(kernel_size=self.conv_kernel, causal=self.causal),
]
if self.act_after_linear:
emb_net.append(torch.nn.GELU())
# initialize inner conv
inner_net = [
torch.nn.Sequential(
make_conv(
self.conv_channels,
self.conv_channels,
self.conv_kernel,
self.conv_padding,
dilation_value=self.conv_dilation,
),
SamePad(kernel_size=self.conv_kernel, causal=self.causal),
torch.nn.Dropout(self.dropout),
torch.nn.GELU(),
)
for _ in range(self.conv_depth - 1)
]
inner_net += [
make_conv(
self.conv_channels,
1,
self.conv_kernel,
self.conv_padding,
dilation_value=1,
),
SamePad(kernel_size=self.conv_kernel, causal=self.causal),
]
self.net = torch.nn.Sequential(
*emb_net,
torch.nn.Dropout(dropout),
*inner_net,
)
def forward(self, x: torch.Tensor, padding_mask: Optional[torch.Tensor]):
assert check_argument_types()
# (Batch, Time, Channel) -> (Batch, Channel, Time)
x = x.transpose(1, 2)
x = self.net(x)
# (Batch, Channel, Time) -> (Batch, Time, Channel)
x = x.transpose(1, 2)
x_sz = x.size(1)
if padding_mask is not None and padding_mask.any() and padding_mask.dim() > 1:
padding_mask = padding_mask[:, : x.size(1)]
padding_mask.to(x.device)
x[padding_mask] = float("-inf") if self.max_pool else 0
x_sz = x_sz - padding_mask.sum(dim=-1)
x = x.squeeze(-1)
if self.max_pool:
x, _ = x.max(dim=-1)
else:
x = x.sum(dim=-1)
x = x / x_sz
return x
| 5,634 | 31.385057 | 87 | py |
espnet | espnet-master/espnet2/uasr/loss/gradient_penalty.py | import numpy as np
import torch
from torch import autograd
from typeguard import check_argument_types
from espnet2.uasr.discriminator.abs_discriminator import AbsDiscriminator
from espnet2.uasr.loss.abs_loss import AbsUASRLoss
from espnet2.utils.types import str2bool
class UASRGradientPenalty(AbsUASRLoss):
"""gradient penalty for UASR."""
def __init__(
self,
discriminator: AbsDiscriminator,
weight: float = 1.0,
probabilistic_grad_penalty_slicing: str2bool = False,
reduction: str = "sum",
):
super().__init__()
assert check_argument_types()
self.discriminator = [discriminator]
self.weight = weight
self.probabilistic_grad_penalty_slicing = probabilistic_grad_penalty_slicing
self.reduction = reduction
def forward(
self,
fake_sample: torch.Tensor,
real_sample: torch.Tensor,
is_training: str2bool,
is_discrimininative_step: str2bool,
):
"""Forward.
Args:
fake_sample: generated sample from generator
real_sample: real sample
is_training: whether is at training step
is_discriminative_step: whether is training discriminator
"""
if self.weight > 0 and is_discrimininative_step and is_training:
batch_size = min(fake_sample.size(0), real_sample.size(0))
time_length = min(fake_sample.size(1), real_sample.size(1))
if self.probabilistic_grad_penalty_slicing:
def get_slice(sample, dim, target_size):
size = sample.size(dim)
diff = size - target_size
if diff <= 0:
return sample
start = np.random.randint(0, diff + 1)
return sample.narrow(dim=dim, start=start, length=target_size)
fake_sample = get_slice(fake_sample, 0, batch_size)
fake_sample = get_slice(fake_sample, 1, time_length)
real_sample = get_slice(real_sample, 0, batch_size)
real_sample = get_slice(real_sample, 1, time_length)
else:
fake_sample = fake_sample[:batch_size, :time_length]
real_sample = real_sample[:batch_size, :time_length]
alpha = torch.rand(real_sample.size(0), 1, 1)
alpha = alpha.expand(real_sample.size())
alpha = alpha.to(real_sample.device)
interpolates = alpha * real_sample + ((1 - alpha) * fake_sample)
disc_interpolates = self.discriminator[0](interpolates, None)
gradients = autograd.grad(
outputs=disc_interpolates,
inputs=interpolates,
grad_outputs=torch.ones(
disc_interpolates.size(), device=real_sample.device
),
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
gradient_penalty = (gradients.norm(2, dim=1) - 1) ** 2
return gradient_penalty.sum()
else:
return 0
| 3,160 | 33.736264 | 84 | py |
espnet | espnet-master/espnet2/uasr/loss/smoothness_penalty.py | import torch
import torch.nn.functional as F
from typeguard import check_argument_types
from espnet2.uasr.loss.abs_loss import AbsUASRLoss
class UASRSmoothnessPenalty(AbsUASRLoss):
"""smoothness penalty for UASR."""
def __init__(
self,
weight: float = 1.0,
reduction: str = "none",
):
super().__init__()
assert check_argument_types()
self.weight = weight
self.reduction = reduction
def forward(
self,
dense_logits: torch.Tensor,
dense_padding_mask: torch.Tensor,
sample_size: int,
is_discriminative_step: bool,
):
"""Forward.
Args:
dense_logits: output logits of generator
dense_padding_mask: padding mask of logits
sample_size: batch size
is_discriminative_step: Whether is training discriminator
"""
if self.weight > 0 and not is_discriminative_step:
smoothness_penalty = F.mse_loss(
dense_logits[:, :-1], dense_logits[:, 1:], reduction=self.reduction
)
smoothness_penalty[dense_padding_mask[:, 1:]] = 0
smoothness_penalty = smoothness_penalty.mean() * sample_size
return smoothness_penalty
else:
return 0
| 1,308 | 26.851064 | 83 | py |
espnet | espnet-master/espnet2/uasr/loss/phoneme_diversity_loss.py | import torch
from typeguard import check_argument_types
from espnet2.uasr.loss.abs_loss import AbsUASRLoss
from espnet2.utils.types import str2bool
class UASRPhonemeDiversityLoss(AbsUASRLoss):
"""phoneme diversity loss for UASR."""
def __init__(
self,
weight: float = 1.0,
):
super().__init__()
assert check_argument_types()
self.weight = weight
def forward(
self, dense_x: torch.Tensor, sample_size: int, is_discriminative_step: str2bool
):
"""Forward.
Args:
dense_x: predicted logits of generated samples
sample_size: batch size
is_dicriminative_step: whether is training discriminator
"""
if self.weight > 0 and not is_discriminative_step:
batch_size, time_length, channel_size = dense_x.shape
avg_probs = torch.softmax(
dense_x.reshape(-1, channel_size).float(), dim=-1
).mean(dim=0)
phoneme_ppl = torch.exp(
-torch.sum(avg_probs * torch.log(avg_probs + 1e-7), dim=-1)
)
phoneme_diversity_loss = (
(channel_size - phoneme_ppl) / channel_size
) * sample_size
return phoneme_diversity_loss
else:
return 0
| 1,316 | 27.630435 | 87 | py |
espnet | espnet-master/espnet2/uasr/loss/abs_loss.py | from abc import ABC, abstractmethod
import torch
EPS = torch.finfo(torch.get_default_dtype()).eps
class AbsUASRLoss(torch.nn.Module, ABC):
"""Base class for all Diarization loss modules."""
# the name will be the key that appears in the reporter
@property
def name(self) -> str:
return NotImplementedError
@abstractmethod
def forward(
self,
) -> torch.Tensor:
# the return tensor should be shape of (batch)
raise NotImplementedError
| 499 | 21.727273 | 59 | py |
espnet | espnet-master/espnet2/uasr/loss/pseudo_label_loss.py | import torch
import torch.nn.functional as F
from typeguard import check_argument_types
from espnet2.uasr.loss.abs_loss import AbsUASRLoss
from espnet2.utils.types import str2bool
class UASRPseudoLabelLoss(AbsUASRLoss):
"""auxiliary pseudo label loss for UASR."""
def __init__(
self,
weight: float = 1.0,
input_dim: int = 128,
output_dim: int = 64,
downsample_rate: int = 2,
ignore_index: int = -1,
reduction: str = "none",
):
super().__init__()
assert check_argument_types()
self.weight = weight
self.input_dim = input_dim
self.output_dim = output_dim
self.downsample_rate = downsample_rate
self.ignore_index = ignore_index
self.reduction = reduction
if self.weight > 0:
self.decoder = torch.nn.Linear(self.input_dim, self.output_dim)
def forward(
self,
inter_x: torch.Tensor,
pseudo_labels: torch.Tensor,
is_discriminative_step: str2bool,
):
"""Forward.
Args:
"""
if self.weight > 0 and not is_discriminative_step and pseudo_labels is not None:
inter_x = self.decoder(inter_x)
if self.downsample_rate > 1:
pseudo_labels = pseudo_labels[:, :: self.downsample_rate]
valid_time_length = min(pseudo_labels.shape[1], inter_x.shape[1])
pseudo_label_loss = F.cross_entropy(
inter_x[:, :valid_time_length].transpose(1, 2),
pseudo_labels[:, :valid_time_length],
ignore_index=self.ignore_index,
reduction=self.reduction,
)
pseudo_label_loss = pseudo_label_loss.mean() * pseudo_label_loss.shape[0]
return pseudo_label_loss
else:
return 0
| 1,847 | 29.295082 | 88 | py |
espnet | espnet-master/espnet2/uasr/loss/discriminator_loss.py | import torch
import torch.nn.functional as F
from typeguard import check_argument_types
from espnet2.uasr.loss.abs_loss import AbsUASRLoss
from espnet2.utils.types import str2bool
class UASRDiscriminatorLoss(AbsUASRLoss):
"""discriminator loss for UASR."""
def __init__(
self,
weight: float = 1.0,
smoothing: float = 0.0,
smoothing_one_side: str2bool = False,
reduction: str = "sum",
):
super().__init__()
assert check_argument_types()
self.weight = weight
self.smoothing = smoothing
self.smoothing_one_sided = smoothing_one_side
self.reduction = reduction
def forward(
self,
dense_y: torch.Tensor,
token_y: torch.Tensor,
is_discriminative_step: str2bool,
):
"""Forward.
Args:
dense_y: predicted logits of generated samples
token_y: predicted logits of real samples
"""
if self.weight > 0:
fake_smooth = self.smoothing
real_smooth = self.smoothing
if self.smoothing_one_sided:
fake_smooth = 0
if is_discriminative_step:
loss_dense = F.binary_cross_entropy_with_logits(
dense_y,
dense_y.new_ones(dense_y.shape) - fake_smooth,
reduction=self.reduction,
)
loss_token = F.binary_cross_entropy_with_logits(
token_y,
token_y.new_zeros(token_y.shape) + real_smooth,
reduction=self.reduction,
)
else:
loss_dense = F.binary_cross_entropy_with_logits(
dense_y,
dense_y.new_zeros(dense_y.shape) + fake_smooth,
reduction=self.reduction,
)
loss_token = None
return loss_dense, loss_token
else:
return 0
| 1,994 | 29.227273 | 67 | py |
espnet | espnet-master/espnet2/uasr/segmenter/abs_segmenter.py | """
Segmenter definition for UASR task
Practially, the output of the generator (in frame-level) may
predict the same phoneme for consecutive frames, which makes
it too easy for the discriminator. So, the segmenter here is
to merge frames with a similar prediction from the generator output.
"""
from abc import ABC, abstractmethod
import torch
class AbsSegmenter(torch.nn.Module, ABC):
@abstractmethod
def pre_segment(
self,
xs_pad: torch.Tensor,
ilens: torch.Tensor,
) -> torch.Tensor:
raise NotImplementedError
@abstractmethod
def logit_segment(
self,
xs_pad: torch.Tensor,
ilens: torch.Tensor,
) -> torch.Tensor:
raise NotImplementedError
| 736 | 22.774194 | 68 | py |
espnet | espnet-master/espnet2/uasr/segmenter/random_segmenter.py | import math
import torch
from typeguard import check_argument_types
from espnet2.uasr.segmenter.abs_segmenter import AbsSegmenter
from espnet2.utils.types import str2bool
class RandomSegmenter(AbsSegmenter):
def __init__(
self,
subsample_rate: float = 0.25,
mean_pool: str2bool = True,
mean_join_pool: str2bool = False,
remove_zeros: str2bool = False,
):
super().__init__()
assert check_argument_types()
self.subsample_rate = subsample_rate
def pre_segment(
self,
xs_pad: torch.Tensor,
padding_mask: torch.Tensor,
) -> torch.Tensor:
target_num = math.ceil(xs_pad.size(1) * self.subsample_rate)
ones = torch.ones(xs_pad.shape[:-1], device=xs_pad.device)
indices, _ = ones.multinomial(target_num).sort(dim=-1)
indices_ld = indices.unsqueeze(-1).expand(-1, -1, xs_pad.size(-1))
xs_pad = xs_pad.gather(1, indices_ld)
padding_mask = padding_mask.gather(1, index=indices)
return xs_pad, padding_mask
def logit_segment(
self,
xs_pad: torch.Tensor,
padding_mask: torch.Tensor,
) -> torch.Tensor:
return xs_pad, padding_mask
| 1,222 | 28.829268 | 74 | py |
espnet | espnet-master/espnet2/uasr/segmenter/join_segmenter.py | import argparse
from typing import Dict, Optional
import torch
from typeguard import check_argument_types
from espnet2.uasr.segmenter.abs_segmenter import AbsSegmenter
from espnet2.utils.types import str2bool
class JoinSegmenter(AbsSegmenter):
def __init__(
self,
cfg: Optional[Dict] = None,
subsample_rate: float = 0.25,
mean_pool: str2bool = True,
mean_join_pool: str2bool = False,
remove_zeros: str2bool = False,
):
super().__init__()
assert check_argument_types()
if cfg is not None:
cfg = argparse.Namespace(**cfg["segmentation"])
assert cfg.type == "JOIN"
self.subsampling_rate = cfg.subsample_rate
self.mean_pool = cfg.mean_pool
self.mean_pool_join = cfg.mean_pool_join
self.remove_zeros = cfg.remove_zeros
else:
self.mean_pool_join = mean_join_pool
self.remove_zeros = remove_zeros
def pre_segment(
self,
xs_pad: torch.Tensor,
padding_mask: torch.Tensor,
) -> torch.Tensor:
assert check_argument_types()
return xs_pad, padding_mask
def logit_segment(
self,
logits: torch.Tensor,
padding_mask: torch.Tensor,
) -> torch.Tensor:
assert check_argument_types()
preds = logits.argmax(dim=-1)
if padding_mask.any():
preds[padding_mask] = -1 # mark pad
uniques = []
batch_size, time_length, channel_size = logits.shape
for p in preds:
uniques.append(
p.cpu().unique_consecutive(return_inverse=True, return_counts=True)
)
new_time_length = max(u[0].numel() for u in uniques)
new_logits = logits.new_zeros(batch_size, new_time_length, channel_size)
new_pad = padding_mask.new_zeros(batch_size, new_time_length)
for b in range(batch_size):
value, index, count = uniques[b]
keep = value != -1
if self.remove_zeros:
keep.logical_and_(value != 0)
if self.training and not self.mean_pool_join:
value[0] = 0
value[1:] = count.cumsum(0)[:-1]
part = count > 1
random = torch.rand(part.sum())
value[part] += (count[part] * random).long()
new_logits[b, : value.numel()] = logits[b, value]
else:
new_logits[b].index_add_(
dim=0, index=index.to(new_logits.device), source=logits[b]
)
new_logits[b, : count.numel()] = new_logits[
b, : count.numel()
] / count.unsqueeze(-1).to(new_logits.device)
new_size = keep.sum()
if not keep.all():
kept_logits = new_logits[b, : count.numel()][keep]
new_logits[b, :new_size] = kept_logits
if new_size < new_time_length:
pad = new_time_length - new_size
new_logits[b, -pad:] = 0
new_pad[b, -pad:] = True
return new_logits, new_pad
| 3,165 | 31.639175 | 83 | py |
espnet | espnet-master/espnet2/uasr/generator/abs_generator.py | from abc import ABC, abstractmethod
from typing import Optional, Tuple
import torch
class AbsGenerator(torch.nn.Module, ABC):
@abstractmethod
def output_size(self) -> int:
raise NotImplementedError
@abstractmethod
def forward(
self,
xs_pad: torch.Tensor,
ilens: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
raise NotImplementedError
| 430 | 21.684211 | 67 | py |
espnet | espnet-master/espnet2/uasr/generator/conv_generator.py | import argparse
import logging
from typing import Dict, Optional
import torch
from typeguard import check_argument_types
from espnet2.uasr.generator.abs_generator import AbsGenerator
from espnet2.utils.types import str2bool
class TransposeLast(torch.nn.Module):
def __init__(self, deconstruct_idx=None):
super().__init__()
self.deconstruct_idx = deconstruct_idx
def forward(self, x):
if self.deconstruct_idx is not None:
x = x[self.deconstruct_idx]
return x.transpose(-2, -1)
class SamePad(torch.nn.Module):
def __init__(self, kernel_size, causal=False):
super().__init__()
if causal:
self.remove = kernel_size - 1
else:
self.remove = 1 if kernel_size % 2 == 0 else 0
def forward(self, x):
if self.remove > 0:
x = x[:, :, : -self.remove]
return x
class ConvGenerator(AbsGenerator):
"""convolutional generator for UASR."""
def __init__(
self,
input_dim: int,
output_dim: int,
cfg: Optional[Dict] = None,
conv_kernel: int = 3,
conv_dilation: int = 1,
conv_stride: int = 9,
pad: int = -1,
bias: str2bool = False,
dropout: float = 0.0,
batch_norm: str2bool = True,
batch_norm_weight: float = 30.0,
residual: str2bool = True,
):
super().__init__()
assert check_argument_types()
self.input_dim = input_dim
self.output_dim = output_dim
if cfg is not None:
cfg = argparse.Namespace(**cfg)
self.conv_kernel = cfg.generator_kernel
self.conv_dilation = cfg.generator_dilation
self.conv_stride = cfg.generator_stride
self.pad = cfg.generator_pad
self.bias = cfg.generator_bias
self.dropout = torch.nn.Dropout(cfg.generator_dropout)
# TODO(Dongji): batch_norm is not in cfg
self.batch_norm = False
self.batch_norm_weight = cfg.generator_batch_norm
self.residual = cfg.generator_residual
else:
self.conv_kernel = conv_kernel
self.conv_dilation = conv_dilation
self.conv_stride = conv_stride
self.output_dim = output_dim
self.pad = pad
self.bias = bias
self.dropout = torch.nn.Dropout(dropout)
self.batch_norm = batch_norm
self.batch_norm_weight = batch_norm_weight
self.residual = residual
if self.pad < 0:
self.padding = self.conv_kernel // 2
else:
self.padding = self.pad
self.proj = torch.nn.Sequential(
TransposeLast(),
torch.nn.Conv1d(
input_dim,
output_dim,
kernel_size=self.conv_kernel,
stride=self.conv_stride,
dilation=self.conv_dilation,
padding=self.padding,
bias=self.bias,
),
TransposeLast(),
)
if self.batch_norm:
self.bn = torch.nn.BatchNorm1d(input_dim)
self.bn.weight.data.fill_(self.batch_norm_weight)
if self.residual:
self.in_proj = torch.nn.Linear(input_dim, input_dim)
def output_size(self):
return self.output_dim
def forward(
self,
feats: torch.Tensor,
text: Optional[torch.Tensor],
feats_padding_mask: torch.Tensor,
):
inter_x = None
if self.batch_norm:
feats = self.bn_padded_data(feats, feats_padding_mask)
if self.residual:
inter_x = self.in_proj(self.dropout(feats))
feats = feats + inter_x
feats = self.dropout(feats)
generated_sample = self.proj(feats)
generated_sample_padding_mask = feats_padding_mask[:, :: self.conv_stride]
if generated_sample_padding_mask.size(1) != generated_sample.size(1):
new_padding = generated_sample_padding_mask.new_zeros(
generated_sample.shape[:-1]
)
diff = new_padding.size(1) - generated_sample_padding_mask.size(1)
if diff > 0:
new_padding[:, diff:] = generated_sample_padding_mask
else:
logging.info("ATTENTION: make sure that you are using V2 instead of V1")
assert diff < 0
new_padding = generated_sample_padding_mask[:, :diff]
generated_sample_padding_mask = new_padding
real_sample = None
if text is not None:
assert torch.count_nonzero(text) > 0
real_sample = generated_sample.new_zeros(text.numel(), self.output_dim)
real_sample.scatter_(1, text.view(-1, 1).long(), 1)
real_sample = real_sample.view(text.shape + (self.output_dim,))
return generated_sample, real_sample, inter_x, generated_sample_padding_mask
def bn_padded_data(self, feature: torch.Tensor, padding_mask: torch.Tensor):
normed_feature = feature.clone()
normed_feature[~padding_mask] = self.bn(
feature[~padding_mask].unsqueeze(-1)
).squeeze(-1)
return normed_feature
| 5,254 | 31.84375 | 88 | py |
espnet | espnet-master/espnet2/st/espnet_model.py | import logging
from contextlib import contextmanager
from typing import Dict, List, Optional, Tuple, Union
import torch
from packaging.version import parse as V
from typeguard import check_argument_types
from espnet2.asr.ctc import CTC
from espnet2.asr.decoder.abs_decoder import AbsDecoder
from espnet2.asr.encoder.abs_encoder import AbsEncoder
from espnet2.asr.frontend.abs_frontend import AbsFrontend
from espnet2.asr.postencoder.abs_postencoder import AbsPostEncoder
from espnet2.asr.preencoder.abs_preencoder import AbsPreEncoder
from espnet2.asr.specaug.abs_specaug import AbsSpecAug
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet2.train.abs_espnet_model import AbsESPnetModel
from espnet.nets.e2e_asr_common import ErrorCalculator as ASRErrorCalculator
from espnet.nets.e2e_mt_common import ErrorCalculator as MTErrorCalculator
from espnet.nets.pytorch_backend.nets_utils import th_accuracy
from espnet.nets.pytorch_backend.transformer.add_sos_eos import add_sos_eos
from espnet.nets.pytorch_backend.transformer.label_smoothing_loss import ( # noqa: H301
LabelSmoothingLoss,
)
if V(torch.__version__) >= V("1.6.0"):
from torch.cuda.amp import autocast
else:
# Nothing to do if torch<1.6.0
@contextmanager
def autocast(enabled=True):
yield
class ESPnetSTModel(AbsESPnetModel):
"""CTC-attention hybrid Encoder-Decoder model"""
def __init__(
self,
vocab_size: int,
token_list: Union[Tuple[str, ...], List[str]],
frontend: Optional[AbsFrontend],
specaug: Optional[AbsSpecAug],
normalize: Optional[AbsNormalize],
preencoder: Optional[AbsPreEncoder],
encoder: AbsEncoder,
postencoder: Optional[AbsPostEncoder],
decoder: AbsDecoder,
extra_asr_decoder: Optional[AbsDecoder],
extra_mt_decoder: Optional[AbsDecoder],
ctc: Optional[CTC],
src_vocab_size: Optional[int],
src_token_list: Optional[Union[Tuple[str, ...], List[str]]],
asr_weight: float = 0.0,
mt_weight: float = 0.0,
mtlalpha: float = 0.0,
ignore_id: int = -1,
lsm_weight: float = 0.0,
length_normalized_loss: bool = False,
report_cer: bool = True,
report_wer: bool = True,
report_bleu: bool = True,
sym_space: str = "<space>",
sym_blank: str = "<blank>",
extract_feats_in_collect_stats: bool = True,
):
assert check_argument_types()
assert 0.0 <= asr_weight < 1.0, "asr_weight should be [0.0, 1.0)"
assert 0.0 <= mt_weight < 1.0, "mt_weight should be [0.0, 1.0)"
assert 0.0 <= mtlalpha <= 1.0, "mtlalpha should be [0.0, 1.0]"
super().__init__()
# note that eos is the same as sos (equivalent ID)
self.sos = vocab_size - 1
self.eos = vocab_size - 1
self.src_sos = src_vocab_size - 1 if src_vocab_size else None
self.src_eos = src_vocab_size - 1 if src_vocab_size else None
self.vocab_size = vocab_size
self.src_vocab_size = src_vocab_size
self.ignore_id = ignore_id
self.asr_weight = asr_weight
self.mt_weight = mt_weight
self.mtlalpha = mtlalpha
self.token_list = token_list.copy()
self.frontend = frontend
self.specaug = specaug
self.normalize = normalize
self.preencoder = preencoder
self.postencoder = postencoder
self.encoder = encoder
self.decoder = (
decoder # TODO(jiatong): directly implement multi-decoder structure at here
)
self.criterion_st = LabelSmoothingLoss(
size=vocab_size,
padding_idx=ignore_id,
smoothing=lsm_weight,
normalize_length=length_normalized_loss,
)
self.criterion_asr = LabelSmoothingLoss(
size=src_vocab_size,
padding_idx=ignore_id,
smoothing=lsm_weight,
normalize_length=length_normalized_loss,
)
# submodule for ASR task
if self.asr_weight > 0:
assert (
src_token_list is not None
), "Missing src_token_list, cannot add asr module to st model"
if self.mtlalpha > 0.0:
self.ctc = ctc
if self.mtlalpha < 1.0:
self.extra_asr_decoder = extra_asr_decoder
elif extra_asr_decoder is not None:
logging.warning(
"Not using extra_asr_decoder because "
"mtlalpha is set as {} (== 1.0)".format(mtlalpha),
)
# submodule for MT task
if self.mt_weight > 0:
self.extra_mt_decoder = extra_mt_decoder
elif extra_mt_decoder is not None:
logging.warning(
"Not using extra_mt_decoder because "
"mt_weight is set as {} (== 0)".format(mt_weight),
)
# MT error calculator
if report_bleu:
self.mt_error_calculator = MTErrorCalculator(
token_list, sym_space, sym_blank, report_bleu
)
else:
self.mt_error_calculator = None
# ASR error calculator
if self.asr_weight > 0 and (report_cer or report_wer):
assert (
src_token_list is not None
), "Missing src_token_list, cannot add asr module to st model"
self.asr_error_calculator = ASRErrorCalculator(
src_token_list, sym_space, sym_blank, report_cer, report_wer
)
else:
self.asr_error_calculator = None
self.extract_feats_in_collect_stats = extract_feats_in_collect_stats
# TODO(jiatong): add multilingual related functions
def forward(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
text: torch.Tensor,
text_lengths: torch.Tensor,
src_text: Optional[torch.Tensor] = None,
src_text_lengths: Optional[torch.Tensor] = None,
**kwargs,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Frontend + Encoder + Decoder + Calc loss
Args:
speech: (Batch, Length, ...)
speech_lengths: (Batch,)
text: (Batch, Length)
text_lengths: (Batch,)
src_text: (Batch, length)
src_text_lengths: (Batch,)
kwargs: "utt_id" is among the input.
"""
assert text_lengths.dim() == 1, text_lengths.shape
# Check that batch_size is unified
assert (
speech.shape[0]
== speech_lengths.shape[0]
== text.shape[0]
== text_lengths.shape[0]
), (speech.shape, speech_lengths.shape, text.shape, text_lengths.shape)
# additional checks with valid src_text
if src_text is not None:
assert src_text_lengths.dim() == 1, src_text_lengths.shape
assert text.shape[0] == src_text.shape[0] == src_text_lengths.shape[0], (
text.shape,
src_text.shape,
src_text_lengths.shape,
)
batch_size = speech.shape[0]
# for data-parallel
text = text[:, : text_lengths.max()]
if src_text is not None:
src_text = src_text[:, : src_text_lengths.max()]
# 1. Encoder
encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
# 2a. Attention-decoder branch (ST)
loss_st_att, acc_st_att, bleu_st_att = self._calc_mt_att_loss(
encoder_out, encoder_out_lens, text, text_lengths, st=True
)
# 2b. CTC branch
if self.asr_weight > 0:
assert src_text is not None, "missing source text for asr sub-task of ST"
if self.asr_weight > 0 and self.mtlalpha > 0:
loss_asr_ctc, cer_asr_ctc = self._calc_ctc_loss(
encoder_out, encoder_out_lens, src_text, src_text_lengths
)
else:
loss_asr_ctc, cer_asr_ctc = 0, None
# 2c. Attention-decoder branch (extra ASR)
if self.asr_weight > 0 and self.mtlalpha < 1.0:
(
loss_asr_att,
acc_asr_att,
cer_asr_att,
wer_asr_att,
) = self._calc_asr_att_loss(
encoder_out, encoder_out_lens, src_text, src_text_lengths
)
else:
loss_asr_att, acc_asr_att, cer_asr_att, wer_asr_att = 0, None, None, None
# 2d. Attention-decoder branch (extra MT)
if self.mt_weight > 0:
loss_mt_att, acc_mt_att = self._calc_mt_att_loss(
encoder_out, encoder_out_lens, text, text_lengths, st=False
)
else:
loss_mt_att, acc_mt_att = 0, None
# 3. Loss computation
asr_ctc_weight = self.mtlalpha
loss_st = loss_st_att
if asr_ctc_weight == 1.0:
loss_asr = loss_asr_ctc
elif asr_ctc_weight == 0.0:
loss_asr = loss_asr_att
else:
loss_asr = (
asr_ctc_weight * loss_asr_ctc + (1 - asr_ctc_weight) * loss_asr_att
)
loss_mt = self.mt_weight * loss_mt_att
loss = (
(1 - self.asr_weight - self.mt_weight) * loss_st
+ self.asr_weight * loss_asr
+ self.mt_weight * loss_mt
)
stats = dict(
loss=loss.detach(),
loss_asr=loss_asr.detach()
if type(loss_asr) not in {float, int}
else loss_asr,
loss_mt=loss_mt.detach() if type(loss_mt) is not float else loss_mt,
loss_st=loss_st.detach(),
acc_asr=acc_asr_att,
acc_mt=acc_mt_att,
acc=acc_st_att,
cer_ctc=cer_asr_ctc,
cer=cer_asr_att,
wer=wer_asr_att,
bleu=bleu_st_att,
)
# force_gatherable: to-device and to-tensor if scalar for DataParallel
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
return loss, stats, weight
def collect_feats(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
text: torch.Tensor,
text_lengths: torch.Tensor,
src_text: Optional[torch.Tensor] = None,
src_text_lengths: Optional[torch.Tensor] = None,
**kwargs,
) -> Dict[str, torch.Tensor]:
feats, feats_lengths = self._extract_feats(speech, speech_lengths)
return {"feats": feats, "feats_lengths": feats_lengths}
def encode(
self, speech: torch.Tensor, speech_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Frontend + Encoder. Note that this method is used by st_inference.py
Args:
speech: (Batch, Length, ...)
speech_lengths: (Batch, )
"""
with autocast(False):
# 1. Extract feats
feats, feats_lengths = self._extract_feats(speech, speech_lengths)
# 2. Data augmentation
if self.specaug is not None and self.training:
feats, feats_lengths = self.specaug(feats, feats_lengths)
# 3. Normalization for feature: e.g. Global-CMVN, Utterance-CMVN
if self.normalize is not None:
feats, feats_lengths = self.normalize(feats, feats_lengths)
# Pre-encoder, e.g. used for raw input data
if self.preencoder is not None:
feats, feats_lengths = self.preencoder(feats, feats_lengths)
# 4. Forward encoder
# feats: (Batch, Length, Dim)
# -> encoder_out: (Batch, Length2, Dim2)
encoder_out, encoder_out_lens, _ = self.encoder(feats, feats_lengths)
# Post-encoder, e.g. NLU
if self.postencoder is not None:
encoder_out, encoder_out_lens = self.postencoder(
encoder_out, encoder_out_lens
)
assert encoder_out.size(0) == speech.size(0), (
encoder_out.size(),
speech.size(0),
)
assert encoder_out.size(1) <= encoder_out_lens.max(), (
encoder_out.size(),
encoder_out_lens.max(),
)
return encoder_out, encoder_out_lens
def _extract_feats(
self, speech: torch.Tensor, speech_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
assert speech_lengths.dim() == 1, speech_lengths.shape
# for data-parallel
speech = speech[:, : speech_lengths.max()]
if self.frontend is not None:
# Frontend
# e.g. STFT and Feature extract
# data_loader may send time-domain signal in this case
# speech (Batch, NSamples) -> feats: (Batch, NFrames, Dim)
feats, feats_lengths = self.frontend(speech, speech_lengths)
else:
# No frontend and no feature extract
feats, feats_lengths = speech, speech_lengths
return feats, feats_lengths
def _calc_mt_att_loss(
self,
encoder_out: torch.Tensor,
encoder_out_lens: torch.Tensor,
ys_pad: torch.Tensor,
ys_pad_lens: torch.Tensor,
st: bool = True,
):
ys_in_pad, ys_out_pad = add_sos_eos(ys_pad, self.sos, self.eos, self.ignore_id)
ys_in_lens = ys_pad_lens + 1
# 1. Forward decoder
if st:
decoder_out, _ = self.decoder(
encoder_out, encoder_out_lens, ys_in_pad, ys_in_lens
)
else:
decoder_out, _ = self.extra_mt_decoder(
encoder_out, encoder_out_lens, ys_in_pad, ys_in_lens
)
# 2. Compute attention loss
loss_att = self.criterion_st(decoder_out, ys_out_pad)
acc_att = th_accuracy(
decoder_out.view(-1, self.vocab_size),
ys_out_pad,
ignore_label=self.ignore_id,
)
# Compute cer/wer using attention-decoder
if self.training or self.mt_error_calculator is None:
bleu_att = None
else:
ys_hat = decoder_out.argmax(dim=-1)
bleu_att = self.mt_error_calculator(ys_hat.cpu(), ys_pad.cpu())
return loss_att, acc_att, bleu_att
def _calc_asr_att_loss(
self,
encoder_out: torch.Tensor,
encoder_out_lens: torch.Tensor,
ys_pad: torch.Tensor,
ys_pad_lens: torch.Tensor,
):
ys_in_pad, ys_out_pad = add_sos_eos(
ys_pad, self.src_sos, self.src_eos, self.ignore_id
)
ys_in_lens = ys_pad_lens + 1
# 1. Forward decoder
decoder_out, _ = self.extra_asr_decoder(
encoder_out, encoder_out_lens, ys_in_pad, ys_in_lens
)
# 2. Compute attention loss
loss_att = self.criterion_asr(decoder_out, ys_out_pad)
acc_att = th_accuracy(
decoder_out.view(-1, self.src_vocab_size),
ys_out_pad,
ignore_label=self.ignore_id,
)
# Compute cer/wer using attention-decoder
if self.training or self.asr_error_calculator is None:
cer_att, wer_att = None, None
else:
ys_hat = decoder_out.argmax(dim=-1)
cer_att, wer_att = self.asr_error_calculator(ys_hat.cpu(), ys_pad.cpu())
return loss_att, acc_att, cer_att, wer_att
def _calc_ctc_loss(
self,
encoder_out: torch.Tensor,
encoder_out_lens: torch.Tensor,
ys_pad: torch.Tensor,
ys_pad_lens: torch.Tensor,
):
# Calc CTC loss
loss_ctc = self.ctc(encoder_out, encoder_out_lens, ys_pad, ys_pad_lens)
# Calc CER using CTC
cer_ctc = None
if not self.training and self.asr_error_calculator is not None:
ys_hat = self.ctc.argmax(encoder_out).data
cer_ctc = self.asr_error_calculator(ys_hat.cpu(), ys_pad.cpu(), is_ctc=True)
return loss_ctc, cer_ctc
| 16,065 | 34.781737 | 88 | py |
espnet | espnet-master/espnet2/hubert/espnet_model.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Thanks to Abdelrahman Mohamed and Wei-Ning Hsu's help in this implementation,
# Their origial Hubert work is in:
# Paper: https://arxiv.org/pdf/2106.07447.pdf
# Code in Fairseq: https://github.com/pytorch/fairseq/tree/master/examples/hubert
import logging
from contextlib import contextmanager
from typing import Dict, List, Optional, Tuple, Union
import torch
from packaging.version import parse as V
from typeguard import check_argument_types
from espnet2.asr.encoder.abs_encoder import AbsEncoder
from espnet2.asr.frontend.abs_frontend import AbsFrontend
from espnet2.asr.preencoder.abs_preencoder import AbsPreEncoder
from espnet2.asr.specaug.abs_specaug import AbsSpecAug
from espnet2.hubert.hubert_loss import HubertPretrainLoss
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet2.train.abs_espnet_model import AbsESPnetModel
from espnet.nets.e2e_asr_common import ErrorCalculator
if V(torch.__version__) >= V("1.6.0"):
from torch.cuda.amp import autocast
else:
# Nothing to do if torch<1.6.0
@contextmanager
def autocast(enabled=True):
yield
class TorchAudioHubertPretrainModel(AbsESPnetModel):
"""TorchAudio Hubert Pretrain model"""
def __init__(
self,
vocab_size: int,
token_list: Union[Tuple[str, ...], List[str]],
frontend: Optional[AbsFrontend],
specaug: Optional[AbsSpecAug],
normalize: Optional[AbsNormalize],
preencoder: Optional[AbsPreEncoder],
encoder: AbsEncoder,
ignore_id: int = -1,
):
assert check_argument_types()
super().__init__()
self.vocab_size = vocab_size
self.ignore_id = ignore_id
self.token_list = token_list.copy()
self.frontend = frontend
self.specaug = specaug
self.normalize = normalize
self.preencoder = preencoder
self.encoder = encoder
self.error_calculator = None
self.nan_loss_count = 0.0
def forward(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
text: torch.Tensor,
text_lengths: torch.Tensor,
**kwargs,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Frontend + Encoder + Calc loss
Args:
speech: (Batch, Length, ...)
speech_lengths: (Batch, )
text: (Batch, Length)
text_lengths: (Batch,)
kwargs: "utt_id" is among the input.
"""
assert text_lengths.dim() == 1, text_lengths.shape
# Check that batch_size is unified
assert (
speech.shape[0]
== speech_lengths.shape[0]
== text.shape[0]
== text_lengths.shape[0]
), (speech.shape, speech_lengths.shape, text.shape, text_lengths.shape)
batch_size = speech.shape[0]
# for data-parallel
text = text[:, : text_lengths.max()]
# 1. Encoder
logit_m, logit_u, feature_penalty = self.encode(
speech, speech_lengths, text, text_lengths
)
# 2a. Hubert criterion
loss = self._calc_hubert_loss(
logit_m,
logit_u,
feature_penalty,
)
if not torch.isinf(loss) and not torch.isnan(loss):
pass
# logging.warning(f"loss, {loss.item() / logit_m.size(0)}")
else:
self.nan_loss_count += 1
logging.warning(f"nan_loss_count, {self.nan_loss_count}")
# log accuracies of masked and unmasked frames
correct_m, count_m = self._compute_correct(logit_m)
correct_u, count_u = self._compute_correct(logit_u)
stats = dict(
loss=loss.detach(),
correct_m=correct_m,
count_m=count_m,
acc_m=correct_m / count_m,
correct_u=correct_u,
count_u=count_u,
acc_u=correct_u / count_u,
)
# force_gatherable: to-device and to-tensor if scalar for DataParallel
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
return loss, stats, weight
def collect_feats(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
text: torch.Tensor,
text_lengths: torch.Tensor,
**kwargs,
) -> Dict[str, torch.Tensor]:
feats, feats_lengths = self._extract_feats(speech, speech_lengths)
return {"feats": feats, "feats_lengths": feats_lengths}
def encode(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
y_pad: torch.Tensor,
y_pad_length: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Frontend + Encoder. Note that this method is used by asr_inference.py
Args:
speech: (Batch, Length, ...)
speech_lengths: (Batch, )
y_pad: (Batch, Length, ...)
y_pad_length: (Batch, )
"""
with autocast(False):
# 1. Extract feats
feats, feats_lengths = self._extract_feats(speech, speech_lengths)
# 2. Data augmentation
if self.specaug is not None and self.training:
feats, feats_lengths = self.specaug(feats, feats_lengths)
# 3. Normalization for feature: e.g. Global-CMVN, Utterance-CMVN
if self.normalize is not None:
feats, feats_lengths = self.normalize(feats, feats_lengths)
# Pre-encoder, e.g. used for raw input data
if self.preencoder is not None:
feats, feats_lengths = self.preencoder(feats, feats_lengths)
# 4. Forward encoder
# feats: (Batch, Length, Dim)
# -> encoder_out: (Batch, Length2, Dim2)
encoder_out = self.encoder(feats, feats_lengths, y_pad, y_pad_length)
return encoder_out
def _extract_feats(
self, speech: torch.Tensor, speech_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
assert speech_lengths.dim() == 1, speech_lengths.shape
# for data-parallel
speech = speech[:, : speech_lengths.max()]
if self.frontend is not None:
# Frontend
# e.g. STFT and Feature extract
# data_loader may send time-domain signal in this case
# speech (Batch, NSamples) -> feats: (Batch, NFrames, Dim)
feats, feats_lengths = self.frontend(speech, speech_lengths)
else:
# No frontend and no feature extract
feats, feats_lengths = speech, speech_lengths
return feats, feats_lengths
def _compute_correct(
self,
logits,
):
if logits.numel() == 0:
corr, count = 0, 0
else:
assert logits.dim() > 1, logits.shape
max = logits.argmax(-1) == 0
min = logits.argmin(-1) == 0
both = max & min
corr = max.long().sum().item() - both.long().sum().item()
count = max.numel()
return corr, count
def _calc_hubert_loss(
self,
logit_m: Optional[torch.Tensor],
logit_u: Optional[torch.Tensor],
feature_penalty: torch.Tensor,
masked_weight: float = 1.0,
unmasked_weight: float = 0.0,
feature_weight: float = 10.0,
reduction: str = "sum",
) -> torch.Tensor:
"""Compute the cross-entropy loss on HuBERT masked and non-masked logits.
Args:
logit_m (Tensor or None): The masked logit Tensor of dimension
`(masked_frames, final_dim)`.
logit_u (Tensor or None): The non-masked logit Tensor of dimension
`(unmasked_frames, final_dim)`.
feature_penalty (Tensor): The feature mean value for additional penalty
loss.
masked_weight (float, optional): The weight for masked cross-entropy loss
(Default: ``1.0``).
unmasked_weight (float, optional): The weight for non-masked cross-entropy
loss (Default: ``0.0``).
feature_weight (float, optional): The weight for feature penalty loss
(Default: ``10.0``).
reduction (str, optional): The reduction method for cross-entropy loss
(Default: ``"sum"``).
Ref:
torchaudio: examples/hubert/loss/hubert_loss.py
"""
loss = feature_penalty * feature_weight * logit_m.shape[0]
if logit_m is not None:
target_m = torch.zeros(
logit_m.shape[0], dtype=torch.long, device=logit_m.device
)
loss_m = torch.nn.functional.cross_entropy(
logit_m, target_m, reduction=reduction
)
loss += loss_m * masked_weight
if logit_u is not None:
target_u = torch.zeros(
logit_u.shape[0], dtype=torch.long, device=logit_m.device
)
loss_u = torch.nn.functional.cross_entropy(
logit_u, target_u, reduction=reduction
)
loss += loss_u * unmasked_weight
return loss
class HubertPretrainModel(AbsESPnetModel):
"""Hubert Pretrain model"""
def __init__(
self,
vocab_size: int,
token_list: Union[Tuple[str, ...], List[str]],
frontend: Optional[AbsFrontend],
specaug: Optional[AbsSpecAug],
normalize: Optional[AbsNormalize],
preencoder: Optional[AbsPreEncoder],
encoder: AbsEncoder,
ignore_id: int = -1,
lsm_weight: float = 0.0,
length_normalized_loss: bool = False,
report_cer: bool = False,
report_wer: bool = False,
sym_space: str = "<space>",
sym_blank: str = "<blank>",
pred_masked_weight: float = 1.0,
pred_nomask_weight: float = 0.0,
loss_weights: float = 0.0,
):
assert check_argument_types()
super().__init__()
# note that eos is the same as sos (equivalent ID)
self.sos = vocab_size - 1
self.eos = vocab_size - 1
self.vocab_size = vocab_size
self.ignore_id = ignore_id
self.token_list = token_list.copy()
self.frontend = frontend
self.specaug = specaug
self.normalize = normalize
self.preencoder = preencoder
self.encoder = encoder
self.criterion_hubert = HubertPretrainLoss(
pred_masked_weight,
pred_nomask_weight,
loss_weights,
)
self.pred_masked_weight = pred_masked_weight
self.pred_nomask_weight = pred_nomask_weight
self.loss_weights = loss_weights
if report_cer or report_wer:
self.error_calculator = ErrorCalculator(
token_list, sym_space, sym_blank, report_cer, report_wer
)
else:
self.error_calculator = None
def forward(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
text: torch.Tensor,
text_lengths: torch.Tensor,
**kwargs,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Frontend + Encoder + Calc loss
Args:
speech: (Batch, Length, ...)
speech_lengths: (Batch, )
text: (Batch, Length)
text_lengths: (Batch,)
kwargs: "utt_id" is among the input.
"""
assert text_lengths.dim() == 1, text_lengths.shape
# Check that batch_size is unified
assert (
speech.shape[0]
== speech_lengths.shape[0]
== text.shape[0]
== text_lengths.shape[0]
), (speech.shape, speech_lengths.shape, text.shape, text_lengths.shape)
batch_size = speech.shape[0]
# for data-parallel
text = text[:, : text_lengths.max()]
# 1. Encoder
encoder_out = self.encode(speech, speech_lengths, text, text_lengths)
# 2a. Hubert criterion
loss, acc_mask, acc_unmask = self._calc_hubert_loss(
encoder_out,
)
stats = dict(
loss=loss.detach(),
acc_mask=acc_mask,
acc_unmask=acc_unmask,
acc=acc_mask,
)
# force_gatherable: to-device and to-tensor if scalar for DataParallel
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
return loss, stats, weight
def collect_feats(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
text: torch.Tensor,
text_lengths: torch.Tensor,
**kwargs,
) -> Dict[str, torch.Tensor]:
feats, feats_lengths = self._extract_feats(speech, speech_lengths)
return {"feats": feats, "feats_lengths": feats_lengths}
def encode(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
y_pad: torch.Tensor,
y_pad_length: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Frontend + Encoder. Note that this method is used by asr_inference.py
Args:
speech: (Batch, Length, ...)
speech_lengths: (Batch, )
y_pad: (Batch, Length, ...)
y_pad_length: (Batch, )
"""
with autocast(False):
# 1. Extract feats
feats, feats_lengths = self._extract_feats(speech, speech_lengths)
# 2. Data augmentation
if self.specaug is not None and self.training:
feats, feats_lengths = self.specaug(feats, feats_lengths)
# 3. Normalization for feature: e.g. Global-CMVN, Utterance-CMVN
if self.normalize is not None:
feats, feats_lengths = self.normalize(feats, feats_lengths)
# Pre-encoder, e.g. used for raw input data
if self.preencoder is not None:
feats, feats_lengths = self.preencoder(feats, feats_lengths)
# 4. Forward encoder
# feats: (Batch, Length, Dim)
# -> encoder_out: (Batch, Length2, Dim2)
encoder_out = self.encoder(feats, feats_lengths, y_pad, y_pad_length)
if hasattr(self.encoder, "encoder"):
logp_m_list = self.encoder.encoder.get_logits(encoder_out, True)
assert self.pred_masked_weight == 0 or len(logp_m_list) > 0
logp_u_list = self.encoder.encoder.get_logits(encoder_out, False)
assert self.pred_nomask_weight == 0 or len(logp_u_list) > 0
return encoder_out
def _extract_feats(
self, speech: torch.Tensor, speech_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
assert speech_lengths.dim() == 1, speech_lengths.shape
# for data-parallel
speech = speech[:, : speech_lengths.max()]
if self.frontend is not None:
# Frontend
# e.g. STFT and Feature extract
# data_loader may send time-domain signal in this case
# speech (Batch, NSamples) -> feats: (Batch, NFrames, Dim)
feats, feats_lengths = self.frontend(speech, speech_lengths)
else:
# No frontend and no feature extract
feats, feats_lengths = speech, speech_lengths
return feats, feats_lengths
def compute_correct(
self,
logits,
):
if logits.numel() == 0:
corr, count = 0, 0
else:
assert logits.dim() > 1, logits.shape
max = logits.argmax(-1) == 0
min = logits.argmin(-1) == 0
both = max & min
corr = max.long().sum().item() - both.long().sum().item()
count = max.numel()
return corr, count
def _calc_hubert_loss(
self,
encoder_out: Dict[str, torch.Tensor],
):
# 1. Compute hubert loss
loss, logp_m_list, logp_u_list = self.criterion_hubert(
self.encoder.encoder, encoder_out
)
corr_masked, count_masked = 0, 0
corr_unmask, count_unmask = 0, 0
with torch.no_grad():
for i, logp_m in enumerate(logp_m_list):
corr_m, count_m = self.compute_correct(logp_m)
corr_masked += corr_m
count_masked += count_m
for i, logp_u in enumerate(logp_u_list):
corr_u, count_u = self.compute_correct(logp_u)
corr_unmask += corr_u
count_unmask += count_u
acc_m = corr_masked / (count_masked + 1e-10)
acc_u = corr_unmask / (count_unmask + 1e-10)
return loss, acc_m, acc_u
| 16,726 | 33.559917 | 86 | py |
espnet | espnet-master/espnet2/hubert/hubert_loss.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# The HubertPretrainLoss Module uses code from Fairseq:
# https://github.com/pytorch/fairseq/blob/master/fairseq/criterions/hubert_criterion.py
#
# Thanks to Abdelrahman Mohamed and Wei-Ning Hsu's help in this implementation,
# Their origial Hubert work is in:
# Paper: https://arxiv.org/pdf/2106.07447.pdf
# Code in Fairseq: https://github.com/pytorch/fairseq/tree/master/examples/hubert
"""Hubert Pretrain Loss module."""
import torch.nn.functional as F
from torch import nn
class HubertPretrainLoss(nn.Module):
"""Hubert criterion module.
Args:
pred_masked_weight: weight for predictive loss for masked frames
pred_nomask_weight: weight for predictive loss for unmasked frames
loss_weights: weights for additional loss terms (not first one)
"""
def __init__(
self,
pred_masked_weight: float = 1.0,
pred_nomask_weight: float = 0.0,
loss_weights: float = 10.0,
):
super(HubertPretrainLoss, self).__init__()
self.pred_masked_weight = pred_masked_weight
self.pred_nomask_weight = pred_nomask_weight
self.loss_weights = loss_weights
def forward(self, model, enc_outputs, reduce=True):
loss = 0.0
sample_size = 0
reduction = "sum" if reduce else "none"
loss_m_list = []
logp_m_list = model.get_logits(enc_outputs, True)
targ_m_list = model.get_targets(enc_outputs, True)
for i, (logp_m, targ_m) in enumerate(zip(logp_m_list, targ_m_list)):
loss_m = F.cross_entropy(logp_m, targ_m, reduction=reduction)
loss_m_list.append(loss_m)
if self.pred_masked_weight > 0:
loss += self.pred_masked_weight * sum(loss_m_list)
sample_size += targ_m_list[0].numel()
loss_u_list = []
logp_u_list = model.get_logits(enc_outputs, False)
targ_u_list = model.get_targets(enc_outputs, False)
for i, (logp_u, targ_u) in enumerate(zip(logp_u_list, targ_u_list)):
loss_u = F.cross_entropy(logp_u, targ_u, reduction=reduction)
loss_u_list.append(loss_u)
if self.pred_nomask_weight > 0:
loss += self.pred_nomask_weight * sum(loss_u_list)
sample_size += targ_u_list[0].numel()
if self.loss_weights > 0:
assert hasattr(model, "get_extra_losses")
extra_losses, names = model.get_extra_losses(enc_outputs)
if isinstance(extra_losses, list):
extra_losses = extra_losses[0]
names = names[0]
else:
raise NotImplementedError("only support one extra loss")
loss += self.loss_weights * extra_losses.float() * sample_size
return loss, logp_m_list, logp_u_list
| 2,827 | 36.706667 | 91 | py |
espnet | espnet-master/espnet2/diar/abs_diar.py | from abc import ABC, abstractmethod
from collections import OrderedDict
from typing import Tuple
import torch
class AbsDiarization(torch.nn.Module, ABC):
# @abstractmethod
# def output_size(self) -> int:
# raise NotImplementedError
@abstractmethod
def forward(
self,
input: torch.Tensor,
ilens: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, OrderedDict]:
raise NotImplementedError
@abstractmethod
def forward_rawwav(
self, input: torch.Tensor, ilens: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, OrderedDict]:
raise NotImplementedError
| 643 | 23.769231 | 56 | py |
espnet | espnet-master/espnet2/diar/espnet_model.py | # Copyright 2021 Jiatong Shi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from contextlib import contextmanager
from itertools import permutations
from typing import Dict, Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from packaging.version import parse as V
from typeguard import check_argument_types
from espnet2.asr.encoder.abs_encoder import AbsEncoder
from espnet2.asr.frontend.abs_frontend import AbsFrontend
from espnet2.asr.specaug.abs_specaug import AbsSpecAug
from espnet2.diar.attractor.abs_attractor import AbsAttractor
from espnet2.diar.decoder.abs_decoder import AbsDecoder
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet2.train.abs_espnet_model import AbsESPnetModel
from espnet.nets.pytorch_backend.nets_utils import to_device
if V(torch.__version__) >= V("1.6.0"):
from torch.cuda.amp import autocast
else:
# Nothing to do if torch<1.6.0
@contextmanager
def autocast(enabled=True):
yield
class ESPnetDiarizationModel(AbsESPnetModel):
"""Speaker Diarization model
If "attractor" is "None", SA-EEND will be used.
Else if "attractor" is not "None", EEND-EDA will be used.
For the details about SA-EEND and EEND-EDA, refer to the following papers:
SA-EEND: https://arxiv.org/pdf/1909.06247.pdf
EEND-EDA: https://arxiv.org/pdf/2005.09921.pdf, https://arxiv.org/pdf/2106.10654.pdf
"""
def __init__(
self,
frontend: Optional[AbsFrontend],
specaug: Optional[AbsSpecAug],
normalize: Optional[AbsNormalize],
label_aggregator: torch.nn.Module,
encoder: AbsEncoder,
decoder: AbsDecoder,
attractor: Optional[AbsAttractor],
diar_weight: float = 1.0,
attractor_weight: float = 1.0,
):
assert check_argument_types()
super().__init__()
self.encoder = encoder
self.normalize = normalize
self.frontend = frontend
self.specaug = specaug
self.label_aggregator = label_aggregator
self.diar_weight = diar_weight
self.attractor_weight = attractor_weight
self.attractor = attractor
self.decoder = decoder
if self.attractor is not None:
self.decoder = None
elif self.decoder is not None:
self.num_spk = decoder.num_spk
else:
raise NotImplementedError
def forward(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor = None,
spk_labels: torch.Tensor = None,
spk_labels_lengths: torch.Tensor = None,
**kwargs,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Frontend + Encoder + Decoder + Calc loss
Args:
speech: (Batch, samples)
speech_lengths: (Batch,) default None for chunk interator,
because the chunk-iterator does not
have the speech_lengths returned.
see in
espnet2/iterators/chunk_iter_factory.py
spk_labels: (Batch, )
kwargs: "utt_id" is among the input.
"""
assert speech.shape[0] == spk_labels.shape[0], (speech.shape, spk_labels.shape)
batch_size = speech.shape[0]
# 1. Encoder
# Use bottleneck_feats if exist. Only for "enh + diar" task.
bottleneck_feats = kwargs.get("bottleneck_feats", None)
bottleneck_feats_lengths = kwargs.get("bottleneck_feats_lengths", None)
encoder_out, encoder_out_lens = self.encode(
speech, speech_lengths, bottleneck_feats, bottleneck_feats_lengths
)
if self.attractor is None:
# 2a. Decoder (baiscally a predction layer after encoder_out)
pred = self.decoder(encoder_out, encoder_out_lens)
else:
# 2b. Encoder Decoder Attractors
# Shuffle the chronological order of encoder_out, then calculate attractor
encoder_out_shuffled = encoder_out.clone()
for i in range(len(encoder_out_lens)):
encoder_out_shuffled[i, : encoder_out_lens[i], :] = encoder_out[
i, torch.randperm(encoder_out_lens[i]), :
]
attractor, att_prob = self.attractor(
encoder_out_shuffled,
encoder_out_lens,
to_device(
self,
torch.zeros(
encoder_out.size(0), spk_labels.size(2) + 1, encoder_out.size(2)
),
),
)
# Remove the final attractor which does not correspond to a speaker
# Then multiply the attractors and encoder_out
pred = torch.bmm(encoder_out, attractor[:, :-1, :].permute(0, 2, 1))
# 3. Aggregate time-domain labels
spk_labels, spk_labels_lengths = self.label_aggregator(
spk_labels, spk_labels_lengths
)
# If encoder uses conv* as input_layer (i.e., subsampling),
# the sequence length of 'pred' might be slighly less than the
# length of 'spk_labels'. Here we force them to be equal.
length_diff_tolerance = 2
length_diff = spk_labels.shape[1] - pred.shape[1]
if length_diff > 0 and length_diff <= length_diff_tolerance:
spk_labels = spk_labels[:, 0 : pred.shape[1], :]
if self.attractor is None:
loss_pit, loss_att = None, None
loss, perm_idx, perm_list, label_perm = self.pit_loss(
pred, spk_labels, encoder_out_lens
)
else:
loss_pit, perm_idx, perm_list, label_perm = self.pit_loss(
pred, spk_labels, encoder_out_lens
)
loss_att = self.attractor_loss(att_prob, spk_labels)
loss = self.diar_weight * loss_pit + self.attractor_weight * loss_att
(
correct,
num_frames,
speech_scored,
speech_miss,
speech_falarm,
speaker_scored,
speaker_miss,
speaker_falarm,
speaker_error,
) = self.calc_diarization_error(pred, label_perm, encoder_out_lens)
if speech_scored > 0 and num_frames > 0:
sad_mr, sad_fr, mi, fa, cf, acc, der = (
speech_miss / speech_scored,
speech_falarm / speech_scored,
speaker_miss / speaker_scored,
speaker_falarm / speaker_scored,
speaker_error / speaker_scored,
correct / num_frames,
(speaker_miss + speaker_falarm + speaker_error) / speaker_scored,
)
else:
sad_mr, sad_fr, mi, fa, cf, acc, der = 0, 0, 0, 0, 0, 0, 0
stats = dict(
loss=loss.detach(),
loss_att=loss_att.detach() if loss_att is not None else None,
loss_pit=loss_pit.detach() if loss_pit is not None else None,
sad_mr=sad_mr,
sad_fr=sad_fr,
mi=mi,
fa=fa,
cf=cf,
acc=acc,
der=der,
)
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
return loss, stats, weight
def collect_feats(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
spk_labels: torch.Tensor = None,
spk_labels_lengths: torch.Tensor = None,
**kwargs,
) -> Dict[str, torch.Tensor]:
feats, feats_lengths = self._extract_feats(speech, speech_lengths)
return {"feats": feats, "feats_lengths": feats_lengths}
def encode(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
bottleneck_feats: torch.Tensor,
bottleneck_feats_lengths: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Frontend + Encoder
Args:
speech: (Batch, Length, ...)
speech_lengths: (Batch,)
bottleneck_feats: (Batch, Length, ...): used for enh + diar
"""
with autocast(False):
# 1. Extract feats
feats, feats_lengths = self._extract_feats(speech, speech_lengths)
# 2. Data augmentation
if self.specaug is not None and self.training:
feats, feats_lengths = self.specaug(feats, feats_lengths)
# 3. Normalization for feature: e.g. Global-CMVN, Utterance-CMVN
if self.normalize is not None:
feats, feats_lengths = self.normalize(feats, feats_lengths)
# 4. Forward encoder
# feats: (Batch, Length, Dim)
# -> encoder_out: (Batch, Length2, Dim)
if bottleneck_feats is None:
encoder_out, encoder_out_lens, _ = self.encoder(feats, feats_lengths)
elif self.frontend is None:
# use only bottleneck feature
encoder_out, encoder_out_lens, _ = self.encoder(
bottleneck_feats, bottleneck_feats_lengths
)
else:
# use both frontend and bottleneck feats
# interpolate (copy) feats frames
# to match the length with bottleneck_feats
feats = F.interpolate(
feats.transpose(1, 2), size=bottleneck_feats.shape[1]
).transpose(1, 2)
# concatenate frontend LMF feature and bottleneck feature
encoder_out, encoder_out_lens, _ = self.encoder(
torch.cat((bottleneck_feats, feats), 2), bottleneck_feats_lengths
)
assert encoder_out.size(0) == speech.size(0), (
encoder_out.size(),
speech.size(0),
)
assert encoder_out.size(1) <= encoder_out_lens.max(), (
encoder_out.size(),
encoder_out_lens.max(),
)
return encoder_out, encoder_out_lens
def _extract_feats(
self, speech: torch.Tensor, speech_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
batch_size = speech.shape[0]
speech_lengths = (
speech_lengths
if speech_lengths is not None
else torch.ones(batch_size).int() * speech.shape[1]
)
assert speech_lengths.dim() == 1, speech_lengths.shape
# for data-parallel
speech = speech[:, : speech_lengths.max()]
if self.frontend is not None:
# Frontend
# e.g. STFT and Feature extract
# data_loader may send time-domain signal in this case
# speech (Batch, NSamples) -> feats: (Batch, NFrames, Dim)
feats, feats_lengths = self.frontend(speech, speech_lengths)
else:
# No frontend and no feature extract
feats, feats_lengths = speech, speech_lengths
return feats, feats_lengths
def pit_loss_single_permute(self, pred, label, length):
bce_loss = torch.nn.BCEWithLogitsLoss(reduction="none")
mask = self.create_length_mask(length, label.size(1), label.size(2))
loss = bce_loss(pred, label)
loss = loss * mask
loss = torch.sum(torch.mean(loss, dim=2), dim=1)
loss = torch.unsqueeze(loss, dim=1)
return loss
def pit_loss(self, pred, label, lengths):
# Note (jiatong): Credit to https://github.com/hitachi-speech/EEND
num_output = label.size(2)
permute_list = [np.array(p) for p in permutations(range(num_output))]
loss_list = []
for p in permute_list:
label_perm = label[:, :, p]
loss_perm = self.pit_loss_single_permute(pred, label_perm, lengths)
loss_list.append(loss_perm)
loss = torch.cat(loss_list, dim=1)
min_loss, min_idx = torch.min(loss, dim=1)
loss = torch.sum(min_loss) / torch.sum(lengths.float())
batch_size = len(min_idx)
label_list = []
for i in range(batch_size):
label_list.append(label[i, :, permute_list[min_idx[i]]].data.cpu().numpy())
label_permute = torch.from_numpy(np.array(label_list)).float()
return loss, min_idx, permute_list, label_permute
def create_length_mask(self, length, max_len, num_output):
batch_size = len(length)
mask = torch.zeros(batch_size, max_len, num_output)
for i in range(batch_size):
mask[i, : length[i], :] = 1
mask = to_device(self, mask)
return mask
def attractor_loss(self, att_prob, label):
batch_size = len(label)
bce_loss = torch.nn.BCEWithLogitsLoss(reduction="none")
# create attractor label [1, 1, ..., 1, 0]
# att_label: (Batch, num_spk + 1, 1)
att_label = to_device(self, torch.zeros(batch_size, label.size(2) + 1, 1))
att_label[:, : label.size(2), :] = 1
loss = bce_loss(att_prob, att_label)
loss = torch.mean(torch.mean(loss, dim=1))
return loss
@staticmethod
def calc_diarization_error(pred, label, length):
# Note (jiatong): Credit to https://github.com/hitachi-speech/EEND
(batch_size, max_len, num_output) = label.size()
# mask the padding part
mask = np.zeros((batch_size, max_len, num_output))
for i in range(batch_size):
mask[i, : length[i], :] = 1
# pred and label have the shape (batch_size, max_len, num_output)
label_np = label.data.cpu().numpy().astype(int)
pred_np = (pred.data.cpu().numpy() > 0).astype(int)
label_np = label_np * mask
pred_np = pred_np * mask
length = length.data.cpu().numpy()
# compute speech activity detection error
n_ref = np.sum(label_np, axis=2)
n_sys = np.sum(pred_np, axis=2)
speech_scored = float(np.sum(n_ref > 0))
speech_miss = float(np.sum(np.logical_and(n_ref > 0, n_sys == 0)))
speech_falarm = float(np.sum(np.logical_and(n_ref == 0, n_sys > 0)))
# compute speaker diarization error
speaker_scored = float(np.sum(n_ref))
speaker_miss = float(np.sum(np.maximum(n_ref - n_sys, 0)))
speaker_falarm = float(np.sum(np.maximum(n_sys - n_ref, 0)))
n_map = np.sum(np.logical_and(label_np == 1, pred_np == 1), axis=2)
speaker_error = float(np.sum(np.minimum(n_ref, n_sys) - n_map))
correct = float(1.0 * np.sum((label_np == pred_np) * mask) / num_output)
num_frames = np.sum(length)
return (
correct,
num_frames,
speech_scored,
speech_miss,
speech_falarm,
speaker_scored,
speaker_miss,
speaker_falarm,
speaker_error,
)
| 14,924 | 38.070681 | 88 | py |
espnet | espnet-master/espnet2/diar/label_processor.py | import torch
from espnet2.layers.label_aggregation import LabelAggregate
class LabelProcessor(torch.nn.Module):
"""Label aggregator for speaker diarization"""
def __init__(
self, win_length: int = 512, hop_length: int = 128, center: bool = True
):
super().__init__()
self.label_aggregator = LabelAggregate(win_length, hop_length, center)
def forward(self, input: torch.Tensor, ilens: torch.Tensor):
"""Forward.
Args:
input: (Batch, Nsamples, Label_dim)
ilens: (Batch)
Returns:
output: (Batch, Frames, Label_dim)
olens: (Batch)
"""
output, olens = self.label_aggregator(input, ilens)
return output, olens
| 749 | 24 | 79 | py |
espnet | espnet-master/espnet2/diar/separator/tcn_separator_nomask.py | from distutils.version import LooseVersion
from typing import Tuple, Union
import torch
from torch_complex.tensor import ComplexTensor
from espnet2.diar.layers.tcn_nomask import TemporalConvNet
from espnet2.enh.layers.complex_utils import is_complex
from espnet2.enh.separator.abs_separator import AbsSeparator
is_torch_1_9_plus = LooseVersion(torch.__version__) >= LooseVersion("1.9.0")
class TCNSeparatorNomask(AbsSeparator):
def __init__(
self,
input_dim: int,
layer: int = 8,
stack: int = 3,
bottleneck_dim: int = 128,
hidden_dim: int = 512,
kernel: int = 3,
causal: bool = False,
norm_type: str = "gLN",
):
"""Temporal Convolution Separator
Note that this separator is equivalent to TCNSeparator except
for not having the mask estimation part.
This separator outputs the intermediate bottleneck feats
(which is used as the input to diarization branch in enh_diar task).
This separator is followed by MultiMask module,
which estimates the masks.
Args:
input_dim: input feature dimension
layer: int, number of layers in each stack.
stack: int, number of stacks
bottleneck_dim: bottleneck dimension
hidden_dim: number of convolution channel
kernel: int, kernel size.
causal: bool, defalut False.
norm_type: str, choose from 'BN', 'gLN', 'cLN'
"""
super().__init__()
self.tcn = TemporalConvNet(
N=input_dim,
B=bottleneck_dim,
H=hidden_dim,
P=kernel,
X=layer,
R=stack,
norm_type=norm_type,
causal=causal,
)
self._output_dim = bottleneck_dim
def forward(
self, input: Union[torch.Tensor, ComplexTensor], ilens: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Forward.
Args:
input (torch.Tensor or ComplexTensor): Encoded feature [B, T, N]
ilens (torch.Tensor): input lengths [Batch]
Returns:
feats (torch.Tensor): [B, T, bottleneck_dim]
ilens (torch.Tensor): (B,)
"""
# if complex spectrum
if is_complex(input):
feature = abs(input)
else:
feature = input
feature = feature.transpose(1, 2) # B, N, L
feats = self.tcn(feature) # [B, bottleneck_dim, L]
feats = feats.transpose(1, 2) # B, L, bottleneck_dim
return feats, ilens
@property
def output_dim(self) -> int:
return self._output_dim
@property
def num_spk(self):
return None
| 2,737 | 28.76087 | 76 | py |
espnet | espnet-master/espnet2/diar/attractor/abs_attractor.py | from abc import ABC, abstractmethod
from typing import Tuple
import torch
class AbsAttractor(torch.nn.Module, ABC):
@abstractmethod
def forward(
self,
enc_input: torch.Tensor,
ilens: torch.Tensor,
dec_input: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
raise NotImplementedError
| 343 | 20.5 | 43 | py |
espnet | espnet-master/espnet2/diar/attractor/rnn_attractor.py | import torch
from espnet2.diar.attractor.abs_attractor import AbsAttractor
class RnnAttractor(AbsAttractor):
"""encoder decoder attractor for speaker diarization"""
def __init__(
self,
encoder_output_size: int,
layer: int = 1,
unit: int = 512,
dropout: float = 0.1,
attractor_grad: bool = True,
):
super().__init__()
self.attractor_encoder = torch.nn.LSTM(
input_size=encoder_output_size,
hidden_size=unit,
num_layers=layer,
dropout=dropout,
batch_first=True,
)
self.attractor_decoder = torch.nn.LSTM(
input_size=encoder_output_size,
hidden_size=unit,
num_layers=layer,
dropout=dropout,
batch_first=True,
)
self.dropout_layer = torch.nn.Dropout(p=dropout)
self.linear_projection = torch.nn.Linear(unit, 1)
self.attractor_grad = attractor_grad
def forward(
self,
enc_input: torch.Tensor,
ilens: torch.Tensor,
dec_input: torch.Tensor,
):
"""Forward.
Args:
enc_input (torch.Tensor): hidden_space [Batch, T, F]
ilens (torch.Tensor): input lengths [Batch]
dec_input (torch.Tensor): decoder input (zeros) [Batch, num_spk + 1, F]
Returns:
attractor: [Batch, num_spk + 1, F]
att_prob: [Batch, num_spk + 1, 1]
"""
pack = torch.nn.utils.rnn.pack_padded_sequence(
enc_input, lengths=ilens.cpu(), batch_first=True, enforce_sorted=False
)
_, hs = self.attractor_encoder(pack)
attractor, _ = self.attractor_decoder(dec_input, hs)
attractor = self.dropout_layer(attractor)
if self.attractor_grad is True:
att_prob = self.linear_projection(attractor)
else:
att_prob = self.linear_projection(attractor.detach())
return attractor, att_prob
| 2,007 | 29.424242 | 83 | py |
espnet | espnet-master/espnet2/diar/layers/tcn_nomask.py | # Implementation of the TCN proposed in
# Luo. et al. "Conv-tasnet: Surpassing ideal time–frequency
# magnitude masking for speech separation."
#
# The code is based on:
# https://github.com/kaituoxu/Conv-TasNet/blob/master/src/conv_tasnet.py
#
import torch
import torch.nn as nn
EPS = torch.finfo(torch.get_default_dtype()).eps
class TemporalConvNet(nn.Module):
def __init__(self, N, B, H, P, X, R, norm_type="gLN", causal=False):
"""Basic Module of tasnet.
Args:
N: Number of filters in autoencoder
B: Number of channels in bottleneck 1 * 1-conv block
H: Number of channels in convolutional blocks
P: Kernel size in convolutional blocks
X: Number of convolutional blocks in each repeat
R: Number of repeats
norm_type: BN, gLN, cLN
causal: causal or non-causal
"""
super().__init__()
# Components
# [M, N, K] -> [M, N, K]
layer_norm = ChannelwiseLayerNorm(N)
# [M, N, K] -> [M, B, K]
bottleneck_conv1x1 = nn.Conv1d(N, B, 1, bias=False)
# [M, B, K] -> [M, B, K]
repeats = []
for r in range(R):
blocks = []
for x in range(X):
dilation = 2**x
padding = (P - 1) * dilation if causal else (P - 1) * dilation // 2
blocks += [
TemporalBlock(
B,
H,
P,
stride=1,
padding=padding,
dilation=dilation,
norm_type=norm_type,
causal=causal,
)
]
repeats += [nn.Sequential(*blocks)]
temporal_conv_net = nn.Sequential(*repeats)
# Put together (except mask_conv1x1, modified from the original code)
self.network = nn.Sequential(layer_norm, bottleneck_conv1x1, temporal_conv_net)
def forward(self, mixture_w):
"""Keep this API same with TasNet.
Args:
mixture_w: [M, N, K], M is batch size
Returns:
bottleneck_feature: [M, B, K]
"""
return self.network(mixture_w) # [M, N, K] -> [M, B, K]
class TemporalBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
norm_type="gLN",
causal=False,
):
super().__init__()
# [M, B, K] -> [M, H, K]
conv1x1 = nn.Conv1d(in_channels, out_channels, 1, bias=False)
prelu = nn.PReLU()
norm = chose_norm(norm_type, out_channels)
# [M, H, K] -> [M, B, K]
dsconv = DepthwiseSeparableConv(
out_channels,
in_channels,
kernel_size,
stride,
padding,
dilation,
norm_type,
causal,
)
# Put together
self.net = nn.Sequential(conv1x1, prelu, norm, dsconv)
def forward(self, x):
"""Forward.
Args:
x: [M, B, K]
Returns:
[M, B, K]
"""
residual = x
out = self.net(x)
# TODO(Jing): when P = 3 here works fine, but when P = 2 maybe need to pad?
return out + residual # look like w/o F.relu is better than w/ F.relu
# return F.relu(out + residual)
class DepthwiseSeparableConv(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
norm_type="gLN",
causal=False,
):
super().__init__()
# Use `groups` option to implement depthwise convolution
# [M, H, K] -> [M, H, K]
depthwise_conv = nn.Conv1d(
in_channels,
in_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=in_channels,
bias=False,
)
if causal:
chomp = Chomp1d(padding)
prelu = nn.PReLU()
norm = chose_norm(norm_type, in_channels)
# [M, H, K] -> [M, B, K]
pointwise_conv = nn.Conv1d(in_channels, out_channels, 1, bias=False)
# Put together
if causal:
self.net = nn.Sequential(depthwise_conv, chomp, prelu, norm, pointwise_conv)
else:
self.net = nn.Sequential(depthwise_conv, prelu, norm, pointwise_conv)
def forward(self, x):
"""Forward.
Args:
x: [M, H, K]
Returns:
result: [M, B, K]
"""
return self.net(x)
class Chomp1d(nn.Module):
"""To ensure the output length is the same as the input."""
def __init__(self, chomp_size):
super().__init__()
self.chomp_size = chomp_size
def forward(self, x):
"""Forward.
Args:
x: [M, H, Kpad]
Returns:
[M, H, K]
"""
return x[:, :, : -self.chomp_size].contiguous()
def check_nonlinear(nolinear_type):
if nolinear_type not in ["softmax", "relu"]:
raise ValueError("Unsupported nonlinear type")
def chose_norm(norm_type, channel_size):
"""The input of normalization will be (M, C, K), where M is batch size.
C is channel size and K is sequence length.
"""
if norm_type == "gLN":
return GlobalLayerNorm(channel_size)
elif norm_type == "cLN":
return ChannelwiseLayerNorm(channel_size)
elif norm_type == "BN":
# Given input (M, C, K), nn.BatchNorm1d(C) will accumulate statics
# along M and K, so this BN usage is right.
return nn.BatchNorm1d(channel_size)
else:
raise ValueError("Unsupported normalization type")
class ChannelwiseLayerNorm(nn.Module):
"""Channel-wise Layer Normalization (cLN)."""
def __init__(self, channel_size):
super().__init__()
self.gamma = nn.Parameter(torch.Tensor(1, channel_size, 1)) # [1, N, 1]
self.beta = nn.Parameter(torch.Tensor(1, channel_size, 1)) # [1, N, 1]
self.reset_parameters()
def reset_parameters(self):
self.gamma.data.fill_(1)
self.beta.data.zero_()
def forward(self, y):
"""Forward.
Args:
y: [M, N, K], M is batch size, N is channel size, K is length
Returns:
cLN_y: [M, N, K]
"""
mean = torch.mean(y, dim=1, keepdim=True) # [M, 1, K]
var = torch.var(y, dim=1, keepdim=True, unbiased=False) # [M, 1, K]
cLN_y = self.gamma * (y - mean) / torch.pow(var + EPS, 0.5) + self.beta
return cLN_y
class GlobalLayerNorm(nn.Module):
"""Global Layer Normalization (gLN)."""
def __init__(self, channel_size):
super().__init__()
self.gamma = nn.Parameter(torch.Tensor(1, channel_size, 1)) # [1, N, 1]
self.beta = nn.Parameter(torch.Tensor(1, channel_size, 1)) # [1, N, 1]
self.reset_parameters()
def reset_parameters(self):
self.gamma.data.fill_(1)
self.beta.data.zero_()
def forward(self, y):
"""Forward.
Args:
y: [M, N, K], M is batch size, N is channel size, K is length
Returns:
gLN_y: [M, N, K]
"""
mean = y.mean(dim=1, keepdim=True).mean(dim=2, keepdim=True) # [M, 1, 1]
var = (
(torch.pow(y - mean, 2)).mean(dim=1, keepdim=True).mean(dim=2, keepdim=True)
)
gLN_y = self.gamma * (y - mean) / torch.pow(var + EPS, 0.5) + self.beta
return gLN_y
| 7,701 | 28.064151 | 88 | py |
espnet | espnet-master/espnet2/diar/layers/abs_mask.py | from abc import ABC, abstractmethod
from collections import OrderedDict
from typing import Tuple
import torch
class AbsMask(torch.nn.Module, ABC):
@property
@abstractmethod
def max_num_spk(self) -> int:
raise NotImplementedError
@abstractmethod
def forward(
self,
input,
ilens,
bottleneck_feat,
num_spk,
) -> Tuple[Tuple[torch.Tensor], torch.Tensor, OrderedDict]:
raise NotImplementedError
| 474 | 19.652174 | 63 | py |
espnet | espnet-master/espnet2/diar/layers/multi_mask.py | # This is an implementation of the multiple 1x1 convolution layer architecture
# in https://arxiv.org/pdf/2203.17068.pdf
from collections import OrderedDict
from typing import List, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_complex.tensor import ComplexTensor
from espnet2.diar.layers.abs_mask import AbsMask
class MultiMask(AbsMask):
def __init__(
self,
input_dim: int,
bottleneck_dim: int = 128,
max_num_spk: int = 3,
mask_nonlinear="relu",
):
"""Multiple 1x1 convolution layer Module.
This module corresponds to the final 1x1 conv block and
non-linear function in TCNSeparator.
This module has multiple 1x1 conv blocks. One of them is selected
according to the given num_spk to handle flexible num_spk.
Args:
input_dim: Number of filters in autoencoder
bottleneck_dim: Number of channels in bottleneck 1 * 1-conv block
max_num_spk: Number of mask_conv1x1 modules
(>= Max number of speakers in the dataset)
mask_nonlinear: use which non-linear function to generate mask
"""
super().__init__()
# Hyper-parameter
self._max_num_spk = max_num_spk
self.mask_nonlinear = mask_nonlinear
# [M, B, K] -> [M, C*N, K]
self.mask_conv1x1 = nn.ModuleList()
for z in range(1, max_num_spk + 1):
self.mask_conv1x1.append(
nn.Conv1d(bottleneck_dim, z * input_dim, 1, bias=False)
)
@property
def max_num_spk(self) -> int:
return self._max_num_spk
def forward(
self,
input: Union[torch.Tensor, ComplexTensor],
ilens: torch.Tensor,
bottleneck_feat: torch.Tensor,
num_spk: int,
) -> Tuple[List[Union[torch.Tensor, ComplexTensor]], torch.Tensor, OrderedDict]:
"""Keep this API same with TasNet.
Args:
input: [M, K, N], M is batch size
ilens (torch.Tensor): (M,)
bottleneck_feat: [M, K, B]
num_spk: number of speakers
(Training: oracle,
Inference: estimated by other module (e.g, EEND-EDA))
Returns:
masked (List[Union(torch.Tensor, ComplexTensor)]): [(M, K, N), ...]
ilens (torch.Tensor): (M,)
others predicted data, e.g. masks: OrderedDict[
'mask_spk1': torch.Tensor(Batch, Frames, Freq),
'mask_spk2': torch.Tensor(Batch, Frames, Freq),
...
'mask_spkn': torch.Tensor(Batch, Frames, Freq),
]
"""
M, K, N = input.size()
bottleneck_feat = bottleneck_feat.transpose(1, 2) # [M, B, K]
score = self.mask_conv1x1[num_spk - 1](
bottleneck_feat
) # [M, B, K] -> [M, num_spk*N, K]
# add other outputs of the module list with factor 0.0
# to enable distributed training
for z in range(self._max_num_spk):
if z != num_spk - 1:
score += 0.0 * F.interpolate(
self.mask_conv1x1[z](bottleneck_feat).transpose(1, 2),
size=num_spk * N,
).transpose(1, 2)
score = score.view(M, num_spk, N, K) # [M, num_spk*N, K] -> [M, num_spk, N, K]
if self.mask_nonlinear == "softmax":
est_mask = F.softmax(score, dim=1)
elif self.mask_nonlinear == "relu":
est_mask = F.relu(score)
elif self.mask_nonlinear == "sigmoid":
est_mask = torch.sigmoid(score)
elif self.mask_nonlinear == "tanh":
est_mask = torch.tanh(score)
else:
raise ValueError("Unsupported mask non-linear function")
masks = est_mask.transpose(2, 3) # [M, num_spk, K, N]
masks = masks.unbind(dim=1) # List[M, K, N]
masked = [input * m for m in masks]
others = OrderedDict(
zip(["mask_spk{}".format(i + 1) for i in range(len(masks))], masks)
)
return masked, ilens, others
| 4,124 | 34.869565 | 87 | py |
espnet | espnet-master/espnet2/diar/decoder/linear_decoder.py | import torch
from espnet2.diar.decoder.abs_decoder import AbsDecoder
class LinearDecoder(AbsDecoder):
"""Linear decoder for speaker diarization"""
def __init__(
self,
encoder_output_size: int,
num_spk: int = 2,
):
super().__init__()
self._num_spk = num_spk
self.linear_decoder = torch.nn.Linear(encoder_output_size, num_spk)
def forward(self, input: torch.Tensor, ilens: torch.Tensor):
"""Forward.
Args:
input (torch.Tensor): hidden_space [Batch, T, F]
ilens (torch.Tensor): input lengths [Batch]
"""
output = self.linear_decoder(input)
return output
@property
def num_spk(self):
return self._num_spk
| 754 | 21.878788 | 75 | py |
espnet | espnet-master/espnet2/diar/decoder/abs_decoder.py | from abc import ABC, abstractmethod
from typing import Tuple
import torch
class AbsDecoder(torch.nn.Module, ABC):
@abstractmethod
def forward(
self,
input: torch.Tensor,
ilens: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
raise NotImplementedError
@property
@abstractmethod
def num_spk(self):
raise NotImplementedError
| 396 | 18.85 | 43 | py |
espnet | espnet-master/espnet2/layers/inversible_interface.py | from abc import ABC, abstractmethod
from typing import Tuple
import torch
class InversibleInterface(ABC):
@abstractmethod
def inverse(
self, input: torch.Tensor, input_lengths: torch.Tensor = None
) -> Tuple[torch.Tensor, torch.Tensor]:
# return output, output_lengths
raise NotImplementedError
| 334 | 22.928571 | 69 | py |
espnet | espnet-master/espnet2/layers/stft.py | from typing import Optional, Tuple, Union
import librosa
import numpy as np
import torch
from packaging.version import parse as V
from torch_complex.tensor import ComplexTensor
from typeguard import check_argument_types
from espnet2.enh.layers.complex_utils import is_complex
from espnet2.layers.inversible_interface import InversibleInterface
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
is_torch_1_10_plus = V(torch.__version__) >= V("1.10.0")
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
is_torch_1_7_plus = V(torch.__version__) >= V("1.7")
class Stft(torch.nn.Module, InversibleInterface):
def __init__(
self,
n_fft: int = 512,
win_length: int = None,
hop_length: int = 128,
window: Optional[str] = "hann",
center: bool = True,
normalized: bool = False,
onesided: bool = True,
):
assert check_argument_types()
super().__init__()
self.n_fft = n_fft
if win_length is None:
self.win_length = n_fft
else:
self.win_length = win_length
self.hop_length = hop_length
self.center = center
self.normalized = normalized
self.onesided = onesided
if window is not None and not hasattr(torch, f"{window}_window"):
raise ValueError(f"{window} window is not implemented")
self.window = window
def extra_repr(self):
return (
f"n_fft={self.n_fft}, "
f"win_length={self.win_length}, "
f"hop_length={self.hop_length}, "
f"center={self.center}, "
f"normalized={self.normalized}, "
f"onesided={self.onesided}"
)
def forward(
self, input: torch.Tensor, ilens: torch.Tensor = None
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
"""STFT forward function.
Args:
input: (Batch, Nsamples) or (Batch, Nsample, Channels)
ilens: (Batch)
Returns:
output: (Batch, Frames, Freq, 2) or (Batch, Frames, Channels, Freq, 2)
"""
bs = input.size(0)
if input.dim() == 3:
multi_channel = True
# input: (Batch, Nsample, Channels) -> (Batch * Channels, Nsample)
input = input.transpose(1, 2).reshape(-1, input.size(1))
else:
multi_channel = False
# NOTE(kamo):
# The default behaviour of torch.stft is compatible with librosa.stft
# about padding and scaling.
# Note that it's different from scipy.signal.stft
# output: (Batch, Freq, Frames, 2=real_imag)
# or (Batch, Channel, Freq, Frames, 2=real_imag)
if self.window is not None:
window_func = getattr(torch, f"{self.window}_window")
window = window_func(
self.win_length, dtype=input.dtype, device=input.device
)
else:
window = None
# For the compatibility of ARM devices, which do not support
# torch.stft() due to the lack of MKL (on older pytorch versions),
# there is an alternative replacement implementation with librosa.
# Note: pytorch >= 1.10.0 now has native support for FFT and STFT
# on all cpu targets including ARM.
if is_torch_1_10_plus or input.is_cuda or torch.backends.mkl.is_available():
stft_kwargs = dict(
n_fft=self.n_fft,
win_length=self.win_length,
hop_length=self.hop_length,
center=self.center,
window=window,
normalized=self.normalized,
onesided=self.onesided,
)
if is_torch_1_7_plus:
stft_kwargs["return_complex"] = False
output = torch.stft(input, **stft_kwargs)
else:
if self.training:
raise NotImplementedError(
"stft is implemented with librosa on this device, which does not "
"support the training mode."
)
# use stft_kwargs to flexibly control different PyTorch versions' kwargs
# note: librosa does not support a win_length that is < n_ftt
# but the window can be manually padded (see below).
stft_kwargs = dict(
n_fft=self.n_fft,
win_length=self.n_fft,
hop_length=self.hop_length,
center=self.center,
window=window,
pad_mode="reflect",
)
if window is not None:
# pad the given window to n_fft
n_pad_left = (self.n_fft - window.shape[0]) // 2
n_pad_right = self.n_fft - window.shape[0] - n_pad_left
stft_kwargs["window"] = torch.cat(
[torch.zeros(n_pad_left), window, torch.zeros(n_pad_right)], 0
).numpy()
else:
win_length = (
self.win_length if self.win_length is not None else self.n_fft
)
stft_kwargs["window"] = torch.ones(win_length)
output = []
# iterate over istances in a batch
for i, instance in enumerate(input):
stft = librosa.stft(input[i].numpy(), **stft_kwargs)
output.append(torch.tensor(np.stack([stft.real, stft.imag], -1)))
output = torch.stack(output, 0)
if not self.onesided:
len_conj = self.n_fft - output.shape[1]
conj = output[:, 1 : 1 + len_conj].flip(1)
conj[:, :, :, -1].data *= -1
output = torch.cat([output, conj], 1)
if self.normalized:
output = output * (stft_kwargs["window"].shape[0] ** (-0.5))
# output: (Batch, Freq, Frames, 2=real_imag)
# -> (Batch, Frames, Freq, 2=real_imag)
output = output.transpose(1, 2)
if multi_channel:
# output: (Batch * Channel, Frames, Freq, 2=real_imag)
# -> (Batch, Frame, Channel, Freq, 2=real_imag)
output = output.view(bs, -1, output.size(1), output.size(2), 2).transpose(
1, 2
)
if ilens is not None:
if self.center:
pad = self.n_fft // 2
ilens = ilens + 2 * pad
if is_torch_1_9_plus:
olens = (
torch.div(
ilens - self.n_fft, self.hop_length, rounding_mode="trunc"
)
+ 1
)
else:
olens = (ilens - self.n_fft) // self.hop_length + 1
output.masked_fill_(make_pad_mask(olens, output, 1), 0.0)
else:
olens = None
return output, olens
def inverse(
self, input: Union[torch.Tensor, ComplexTensor], ilens: torch.Tensor = None
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Inverse STFT.
Args:
input: Tensor(batch, T, F, 2) or ComplexTensor(batch, T, F)
ilens: (batch,)
Returns:
wavs: (batch, samples)
ilens: (batch,)
"""
if V(torch.__version__) >= V("1.6.0"):
istft = torch.functional.istft
else:
try:
import torchaudio
except ImportError:
raise ImportError(
"Please install torchaudio>=0.3.0 or use torch>=1.6.0"
)
if not hasattr(torchaudio.functional, "istft"):
raise ImportError(
"Please install torchaudio>=0.3.0 or use torch>=1.6.0"
)
istft = torchaudio.functional.istft
if self.window is not None:
window_func = getattr(torch, f"{self.window}_window")
if is_complex(input):
datatype = input.real.dtype
else:
datatype = input.dtype
window = window_func(self.win_length, dtype=datatype, device=input.device)
else:
window = None
if is_complex(input):
input = torch.stack([input.real, input.imag], dim=-1)
elif input.shape[-1] != 2:
raise TypeError("Invalid input type")
input = input.transpose(1, 2)
wavs = istft(
input,
n_fft=self.n_fft,
hop_length=self.hop_length,
win_length=self.win_length,
window=window,
center=self.center,
normalized=self.normalized,
onesided=self.onesided,
length=ilens.max() if ilens is not None else ilens,
)
return wavs, ilens
| 8,787 | 34.869388 | 86 | py |
espnet | espnet-master/espnet2/layers/global_mvn.py | from pathlib import Path
from typing import Tuple, Union
import numpy as np
import torch
from typeguard import check_argument_types
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.layers.inversible_interface import InversibleInterface
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
class GlobalMVN(AbsNormalize, InversibleInterface):
"""Apply global mean and variance normalization
TODO(kamo): Make this class portable somehow
Args:
stats_file: npy file
norm_means: Apply mean normalization
norm_vars: Apply var normalization
eps:
"""
def __init__(
self,
stats_file: Union[Path, str],
norm_means: bool = True,
norm_vars: bool = True,
eps: float = 1.0e-20,
):
assert check_argument_types()
super().__init__()
self.norm_means = norm_means
self.norm_vars = norm_vars
self.eps = eps
stats_file = Path(stats_file)
self.stats_file = stats_file
stats = np.load(stats_file)
if isinstance(stats, np.ndarray):
# Kaldi like stats
count = stats[0].flatten()[-1]
mean = stats[0, :-1] / count
var = stats[1, :-1] / count - mean * mean
else:
# New style: Npz file
count = stats["count"]
sum_v = stats["sum"]
sum_square_v = stats["sum_square"]
mean = sum_v / count
var = sum_square_v / count - mean * mean
std = np.sqrt(np.maximum(var, eps))
if isinstance(mean, np.ndarray):
mean = torch.from_numpy(mean)
else:
mean = torch.tensor(mean).float()
if isinstance(std, np.ndarray):
std = torch.from_numpy(std)
else:
std = torch.tensor(std).float()
self.register_buffer("mean", mean)
self.register_buffer("std", std)
def extra_repr(self):
return (
f"stats_file={self.stats_file}, "
f"norm_means={self.norm_means}, norm_vars={self.norm_vars}"
)
def forward(
self, x: torch.Tensor, ilens: torch.Tensor = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Forward function
Args:
x: (B, L, ...)
ilens: (B,)
"""
if ilens is None:
ilens = x.new_full([x.size(0)], x.size(1))
norm_means = self.norm_means
norm_vars = self.norm_vars
self.mean = self.mean.to(x.device, x.dtype)
self.std = self.std.to(x.device, x.dtype)
mask = make_pad_mask(ilens, x, 1)
# feat: (B, T, D)
if norm_means:
if x.requires_grad:
x = x - self.mean
else:
x -= self.mean
if x.requires_grad:
x = x.masked_fill(mask, 0.0)
else:
x.masked_fill_(mask, 0.0)
if norm_vars:
x /= self.std
return x, ilens
def inverse(
self, x: torch.Tensor, ilens: torch.Tensor = None
) -> Tuple[torch.Tensor, torch.Tensor]:
if ilens is None:
ilens = x.new_full([x.size(0)], x.size(1))
norm_means = self.norm_means
norm_vars = self.norm_vars
self.mean = self.mean.to(x.device, x.dtype)
self.std = self.std.to(x.device, x.dtype)
mask = make_pad_mask(ilens, x, 1)
if x.requires_grad:
x = x.masked_fill(mask, 0.0)
else:
x.masked_fill_(mask, 0.0)
if norm_vars:
x *= self.std
# feat: (B, T, D)
if norm_means:
x += self.mean
x.masked_fill_(make_pad_mask(ilens, x, 1), 0.0)
return x, ilens
| 3,746 | 27.823077 | 71 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.