repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
espnet | espnet-master/espnet/nets/pytorch_backend/e2e_asr_mix.py | #!/usr/bin/env python3
"""
This script is used to construct End-to-End models of multi-speaker ASR.
Copyright 2017 Johns Hopkins University (Shinji Watanabe)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
import argparse
import logging
import math
import os
import sys
from itertools import groupby
import numpy as np
import torch
from espnet.nets.asr_interface import ASRInterface
from espnet.nets.e2e_asr_common import get_vgg2l_odim, label_smoothing_dist
from espnet.nets.pytorch_backend.ctc import ctc_for
from espnet.nets.pytorch_backend.e2e_asr import E2E as E2EASR
from espnet.nets.pytorch_backend.e2e_asr import Reporter
from espnet.nets.pytorch_backend.frontends.feature_transform import ( # noqa: H301
feature_transform_for,
)
from espnet.nets.pytorch_backend.frontends.frontend import frontend_for
from espnet.nets.pytorch_backend.initialization import (
lecun_normal_init_parameters,
set_forget_bias_to_one,
)
from espnet.nets.pytorch_backend.nets_utils import (
get_subsample,
make_pad_mask,
pad_list,
to_device,
to_torch_tensor,
)
from espnet.nets.pytorch_backend.rnn.attentions import att_for
from espnet.nets.pytorch_backend.rnn.decoders import decoder_for
from espnet.nets.pytorch_backend.rnn.encoders import RNNP, VGG2L
from espnet.nets.pytorch_backend.rnn.encoders import encoder_for as encoder_for_single
CTC_LOSS_THRESHOLD = 10000
class PIT(object):
"""Permutation Invariant Training (PIT) module.
:parameter int num_spkrs: number of speakers for PIT process (2 or 3)
"""
def __init__(self, num_spkrs):
"""Initialize PIT module."""
self.num_spkrs = num_spkrs
# [[0, 1], [1, 0]] or
# [[0, 1, 2], [0, 2, 1], [1, 0, 2], [1, 2, 0], [2, 1, 0], [2, 0, 1]]
self.perm_choices = []
initial_seq = np.linspace(0, num_spkrs - 1, num_spkrs, dtype=np.int64)
self.permutationDFS(initial_seq, 0)
# [[0, 3], [1, 2]] or
# [[0, 4, 8], [0, 5, 7], [1, 3, 8], [1, 5, 6], [2, 4, 6], [2, 3, 7]]
self.loss_perm_idx = np.linspace(
0, num_spkrs * (num_spkrs - 1), num_spkrs, dtype=np.int64
).reshape(1, num_spkrs)
self.loss_perm_idx = (self.loss_perm_idx + np.array(self.perm_choices)).tolist()
def min_pit_sample(self, loss):
"""Compute the PIT loss for each sample.
:param 1-D torch.Tensor loss: list of losses for one sample,
including [h1r1, h1r2, h2r1, h2r2] or
[h1r1, h1r2, h1r3, h2r1, h2r2, h2r3, h3r1, h3r2, h3r3]
:return minimum loss of best permutation
:rtype torch.Tensor (1)
:return the best permutation
:rtype List: len=2
"""
score_perms = (
torch.stack(
[torch.sum(loss[loss_perm_idx]) for loss_perm_idx in self.loss_perm_idx]
)
/ self.num_spkrs
)
perm_loss, min_idx = torch.min(score_perms, 0)
permutation = self.perm_choices[min_idx]
return perm_loss, permutation
def pit_process(self, losses):
"""Compute the PIT loss for a batch.
:param torch.Tensor losses: losses (B, 1|4|9)
:return minimum losses of a batch with best permutation
:rtype torch.Tensor (B)
:return the best permutation
:rtype torch.LongTensor (B, 1|2|3)
"""
bs = losses.size(0)
ret = [self.min_pit_sample(losses[i]) for i in range(bs)]
loss_perm = torch.stack([r[0] for r in ret], dim=0).to(losses.device) # (B)
permutation = torch.tensor([r[1] for r in ret]).long().to(losses.device)
return torch.mean(loss_perm), permutation
def permutationDFS(self, source, start):
"""Get permutations with DFS.
The final result is all permutations of the 'source' sequence.
e.g. [[1, 2], [2, 1]] or
[[1, 2, 3], [1, 3, 2], [2, 1, 3], [2, 3, 1], [3, 2, 1], [3, 1, 2]]
:param np.ndarray source: (num_spkrs, 1), e.g. [1, 2, ..., N]
:param int start: the start point to permute
"""
if start == len(source) - 1: # reach final state
self.perm_choices.append(source.tolist())
for i in range(start, len(source)):
# swap values at position start and i
source[start], source[i] = source[i], source[start]
self.permutationDFS(source, start + 1)
# reverse the swap
source[start], source[i] = source[i], source[start]
class E2E(ASRInterface, torch.nn.Module):
"""E2E module.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
@staticmethod
def add_arguments(parser):
"""Add arguments."""
E2EASR.encoder_add_arguments(parser)
E2E.encoder_mix_add_arguments(parser)
E2EASR.attention_add_arguments(parser)
E2EASR.decoder_add_arguments(parser)
return parser
@staticmethod
def encoder_mix_add_arguments(parser):
"""Add arguments for multi-speaker encoder."""
group = parser.add_argument_group("E2E encoder setting for multi-speaker")
# asr-mix encoder
group.add_argument(
"--spa",
action="store_true",
help="Enable speaker parallel attention "
"for multi-speaker speech recognition task.",
)
group.add_argument(
"--elayers-sd",
default=4,
type=int,
help="Number of speaker differentiate encoder layers"
"for multi-speaker speech recognition task.",
)
return parser
def get_total_subsampling_factor(self):
"""Get total subsampling factor."""
return self.enc.conv_subsampling_factor * int(np.prod(self.subsample))
def __init__(self, idim, odim, args):
"""Initialize multi-speaker E2E module."""
super(E2E, self).__init__()
torch.nn.Module.__init__(self)
self.mtlalpha = args.mtlalpha
assert 0.0 <= self.mtlalpha <= 1.0, "mtlalpha should be [0.0, 1.0]"
self.etype = args.etype
self.verbose = args.verbose
# NOTE: for self.build method
args.char_list = getattr(args, "char_list", None)
self.char_list = args.char_list
self.outdir = args.outdir
self.space = args.sym_space
self.blank = args.sym_blank
self.reporter = Reporter()
self.num_spkrs = args.num_spkrs
self.spa = args.spa
self.pit = PIT(self.num_spkrs)
# below means the last number becomes eos/sos ID
# note that sos/eos IDs are identical
self.sos = odim - 1
self.eos = odim - 1
# subsample info
self.subsample = get_subsample(args, mode="asr", arch="rnn_mix")
# label smoothing info
if args.lsm_type and os.path.isfile(args.train_json):
logging.info("Use label smoothing with " + args.lsm_type)
labeldist = label_smoothing_dist(
odim, args.lsm_type, transcript=args.train_json
)
else:
labeldist = None
if getattr(args, "use_frontend", False): # use getattr to keep compatibility
self.frontend = frontend_for(args, idim)
self.feature_transform = feature_transform_for(args, (idim - 1) * 2)
idim = args.n_mels
else:
self.frontend = None
# encoder
self.enc = encoder_for(args, idim, self.subsample)
# ctc
self.ctc = ctc_for(args, odim, reduce=False)
# attention
num_att = self.num_spkrs if args.spa else 1
self.att = att_for(args, num_att)
# decoder
self.dec = decoder_for(args, odim, self.sos, self.eos, self.att, labeldist)
# weight initialization
self.init_like_chainer()
# options for beam search
if "report_cer" in vars(args) and (args.report_cer or args.report_wer):
recog_args = {
"beam_size": args.beam_size,
"penalty": args.penalty,
"ctc_weight": args.ctc_weight,
"maxlenratio": args.maxlenratio,
"minlenratio": args.minlenratio,
"lm_weight": args.lm_weight,
"rnnlm": args.rnnlm,
"nbest": args.nbest,
"space": args.sym_space,
"blank": args.sym_blank,
}
self.recog_args = argparse.Namespace(**recog_args)
self.report_cer = args.report_cer
self.report_wer = args.report_wer
else:
self.report_cer = False
self.report_wer = False
self.rnnlm = None
self.logzero = -10000000000.0
self.loss = None
self.acc = None
def init_like_chainer(self):
"""Initialize weight like chainer.
chainer basically uses LeCun way: W ~ Normal(0, fan_in ** -0.5), b = 0
pytorch basically uses W, b ~ Uniform(-fan_in**-0.5, fan_in**-0.5)
however, there are two exceptions as far as I know.
- EmbedID.W ~ Normal(0, 1)
- LSTM.upward.b[forget_gate_range] = 1 (but not used in NStepLSTM)
"""
lecun_normal_init_parameters(self)
# exceptions
# embed weight ~ Normal(0, 1)
self.dec.embed.weight.data.normal_(0, 1)
# forget-bias = 1.0
# https://discuss.pytorch.org/t/set-forget-gate-bias-of-lstm/1745
for i in range(len(self.dec.decoder)):
set_forget_bias_to_one(self.dec.decoder[i].bias_ih)
def forward(self, xs_pad, ilens, ys_pad):
"""E2E forward.
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, idim)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor ys_pad:
batch of padded character id sequence tensor (B, num_spkrs, Lmax)
:return: ctc loss value
:rtype: torch.Tensor
:return: attention loss value
:rtype: torch.Tensor
:return: accuracy in attention decoder
:rtype: float
"""
import editdistance
# 0. Frontend
if self.frontend is not None:
hs_pad, hlens, mask = self.frontend(to_torch_tensor(xs_pad), ilens)
if isinstance(hs_pad, list):
hlens_n = [None] * self.num_spkrs
for i in range(self.num_spkrs):
hs_pad[i], hlens_n[i] = self.feature_transform(hs_pad[i], hlens)
hlens = hlens_n
else:
hs_pad, hlens = self.feature_transform(hs_pad, hlens)
else:
hs_pad, hlens = xs_pad, ilens
# 1. Encoder
if not isinstance(
hs_pad, list
): # single-channel input xs_pad (single- or multi-speaker)
hs_pad, hlens, _ = self.enc(hs_pad, hlens)
else: # multi-channel multi-speaker input xs_pad
for i in range(self.num_spkrs):
hs_pad[i], hlens[i], _ = self.enc(hs_pad[i], hlens[i])
# 2. CTC loss
if self.mtlalpha == 0:
loss_ctc, min_perm = None, None
else:
if not isinstance(hs_pad, list): # single-speaker input xs_pad
loss_ctc = torch.mean(self.ctc(hs_pad, hlens, ys_pad))
else: # multi-speaker input xs_pad
ys_pad = ys_pad.transpose(0, 1) # (num_spkrs, B, Lmax)
loss_ctc_perm = torch.stack(
[
self.ctc(
hs_pad[i // self.num_spkrs],
hlens[i // self.num_spkrs],
ys_pad[i % self.num_spkrs],
)
for i in range(self.num_spkrs**2)
],
dim=1,
) # (B, num_spkrs^2)
loss_ctc, min_perm = self.pit.pit_process(loss_ctc_perm)
logging.info("ctc loss:" + str(float(loss_ctc)))
# 3. attention loss
if self.mtlalpha == 1:
loss_att = None
acc = None
else:
if not isinstance(hs_pad, list): # single-speaker input xs_pad
loss_att, acc, _ = self.dec(hs_pad, hlens, ys_pad)
else:
for i in range(ys_pad.size(1)): # B
ys_pad[:, i] = ys_pad[min_perm[i], i]
rslt = [
self.dec(hs_pad[i], hlens[i], ys_pad[i], strm_idx=i)
for i in range(self.num_spkrs)
]
loss_att = sum([r[0] for r in rslt]) / float(len(rslt))
acc = sum([r[1] for r in rslt]) / float(len(rslt))
self.acc = acc
# 4. compute cer without beam search
if self.mtlalpha == 0 or self.char_list is None:
cer_ctc = None
else:
cers = []
for ns in range(self.num_spkrs):
y_hats = self.ctc.argmax(hs_pad[ns]).data
for i, y in enumerate(y_hats):
y_hat = [x[0] for x in groupby(y)]
y_true = ys_pad[ns][i]
seq_hat = [
self.char_list[int(idx)] for idx in y_hat if int(idx) != -1
]
seq_true = [
self.char_list[int(idx)] for idx in y_true if int(idx) != -1
]
seq_hat_text = "".join(seq_hat).replace(self.space, " ")
seq_hat_text = seq_hat_text.replace(self.blank, "")
seq_true_text = "".join(seq_true).replace(self.space, " ")
hyp_chars = seq_hat_text.replace(" ", "")
ref_chars = seq_true_text.replace(" ", "")
if len(ref_chars) > 0:
cers.append(
editdistance.eval(hyp_chars, ref_chars) / len(ref_chars)
)
cer_ctc = sum(cers) / len(cers) if cers else None
# 5. compute cer/wer
if (
self.training
or not (self.report_cer or self.report_wer)
or not isinstance(hs_pad, list)
):
cer, wer = 0.0, 0.0
else:
if self.recog_args.ctc_weight > 0.0:
lpz = [
self.ctc.log_softmax(hs_pad[i]).data for i in range(self.num_spkrs)
]
else:
lpz = None
word_eds, char_eds, word_ref_lens, char_ref_lens = [], [], [], []
nbest_hyps = [
self.dec.recognize_beam_batch(
hs_pad[i],
torch.tensor(hlens[i]),
lpz[i],
self.recog_args,
self.char_list,
self.rnnlm,
strm_idx=i,
)
for i in range(self.num_spkrs)
]
# remove <sos> and <eos>
y_hats = [
[nbest_hyp[0]["yseq"][1:-1] for nbest_hyp in nbest_hyps[i]]
for i in range(self.num_spkrs)
]
for i in range(len(y_hats[0])):
hyp_words = []
hyp_chars = []
ref_words = []
ref_chars = []
for ns in range(self.num_spkrs):
y_hat = y_hats[ns][i]
y_true = ys_pad[ns][i]
seq_hat = [
self.char_list[int(idx)] for idx in y_hat if int(idx) != -1
]
seq_true = [
self.char_list[int(idx)] for idx in y_true if int(idx) != -1
]
seq_hat_text = "".join(seq_hat).replace(self.recog_args.space, " ")
seq_hat_text = seq_hat_text.replace(self.recog_args.blank, "")
seq_true_text = "".join(seq_true).replace(
self.recog_args.space, " "
)
hyp_words.append(seq_hat_text.split())
ref_words.append(seq_true_text.split())
hyp_chars.append(seq_hat_text.replace(" ", ""))
ref_chars.append(seq_true_text.replace(" ", ""))
tmp_word_ed = [
editdistance.eval(
hyp_words[ns // self.num_spkrs], ref_words[ns % self.num_spkrs]
)
for ns in range(self.num_spkrs**2)
] # h1r1,h1r2,h2r1,h2r2
tmp_char_ed = [
editdistance.eval(
hyp_chars[ns // self.num_spkrs], ref_chars[ns % self.num_spkrs]
)
for ns in range(self.num_spkrs**2)
] # h1r1,h1r2,h2r1,h2r2
word_eds.append(self.pit.min_pit_sample(torch.tensor(tmp_word_ed))[0])
word_ref_lens.append(len(sum(ref_words, [])))
char_eds.append(self.pit.min_pit_sample(torch.tensor(tmp_char_ed))[0])
char_ref_lens.append(len("".join(ref_chars)))
wer = (
0.0
if not self.report_wer
else float(sum(word_eds)) / sum(word_ref_lens)
)
cer = (
0.0
if not self.report_cer
else float(sum(char_eds)) / sum(char_ref_lens)
)
alpha = self.mtlalpha
if alpha == 0:
self.loss = loss_att
loss_att_data = float(loss_att)
loss_ctc_data = None
elif alpha == 1:
self.loss = loss_ctc
loss_att_data = None
loss_ctc_data = float(loss_ctc)
else:
self.loss = alpha * loss_ctc + (1 - alpha) * loss_att
loss_att_data = float(loss_att)
loss_ctc_data = float(loss_ctc)
loss_data = float(self.loss)
if loss_data < CTC_LOSS_THRESHOLD and not math.isnan(loss_data):
self.reporter.report(
loss_ctc_data, loss_att_data, self.acc, cer_ctc, cer, wer, loss_data
)
else:
logging.warning("loss (=%f) is not correct", loss_data)
return self.loss
def recognize(self, x, recog_args, char_list, rnnlm=None):
"""E2E beam search.
:param ndarray x: input acoustic feature (T, D)
:param Namespace recog_args: argument Namespace containing options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
prev = self.training
self.eval()
ilens = [x.shape[0]]
# subsample frame
x = x[:: self.subsample[0], :]
h = to_device(self, to_torch_tensor(x).float())
# make a utt list (1) to use the same interface for encoder
hs = h.contiguous().unsqueeze(0)
# 0. Frontend
if self.frontend is not None:
hs, hlens, mask = self.frontend(hs, ilens)
hlens_n = [None] * self.num_spkrs
for i in range(self.num_spkrs):
hs[i], hlens_n[i] = self.feature_transform(hs[i], hlens)
hlens = hlens_n
else:
hs, hlens = hs, ilens
# 1. Encoder
if not isinstance(hs, list): # single-channel multi-speaker input x
hs, hlens, _ = self.enc(hs, hlens)
else: # multi-channel multi-speaker input x
for i in range(self.num_spkrs):
hs[i], hlens[i], _ = self.enc(hs[i], hlens[i])
# calculate log P(z_t|X) for CTC scores
if recog_args.ctc_weight > 0.0:
lpz = [self.ctc.log_softmax(i)[0] for i in hs]
else:
lpz = None
# 2. decoder
# decode the first utterance
y = [
self.dec.recognize_beam(
hs[i][0], lpz[i], recog_args, char_list, rnnlm, strm_idx=i
)
for i in range(self.num_spkrs)
]
if prev:
self.train()
return y
def recognize_batch(self, xs, recog_args, char_list, rnnlm=None):
"""E2E beam search.
:param ndarray xs: input acoustic feature (T, D)
:param Namespace recog_args: argument Namespace containing options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
prev = self.training
self.eval()
ilens = np.fromiter((xx.shape[0] for xx in xs), dtype=np.int64)
# subsample frame
xs = [xx[:: self.subsample[0], :] for xx in xs]
xs = [to_device(self, to_torch_tensor(xx).float()) for xx in xs]
xs_pad = pad_list(xs, 0.0)
# 0. Frontend
if self.frontend is not None:
hs_pad, hlens, mask = self.frontend(xs_pad, ilens)
hlens_n = [None] * self.num_spkrs
for i in range(self.num_spkrs):
hs_pad[i], hlens_n[i] = self.feature_transform(hs_pad[i], hlens)
hlens = hlens_n
else:
hs_pad, hlens = xs_pad, ilens
# 1. Encoder
if not isinstance(hs_pad, list): # single-channel multi-speaker input x
hs_pad, hlens, _ = self.enc(hs_pad, hlens)
else: # multi-channel multi-speaker input x
for i in range(self.num_spkrs):
hs_pad[i], hlens[i], _ = self.enc(hs_pad[i], hlens[i])
# calculate log P(z_t|X) for CTC scores
if recog_args.ctc_weight > 0.0:
lpz = [self.ctc.log_softmax(hs_pad[i]) for i in range(self.num_spkrs)]
normalize_score = False
else:
lpz = None
normalize_score = True
# 2. decoder
y = [
self.dec.recognize_beam_batch(
hs_pad[i],
hlens[i],
lpz[i],
recog_args,
char_list,
rnnlm,
normalize_score=normalize_score,
strm_idx=i,
)
for i in range(self.num_spkrs)
]
if prev:
self.train()
return y
def enhance(self, xs):
"""Forward only the frontend stage.
:param ndarray xs: input acoustic feature (T, C, F)
"""
if self.frontend is None:
raise RuntimeError("Frontend doesn't exist")
prev = self.training
self.eval()
ilens = np.fromiter((xx.shape[0] for xx in xs), dtype=np.int64)
# subsample frame
xs = [xx[:: self.subsample[0], :] for xx in xs]
xs = [to_device(self, to_torch_tensor(xx).float()) for xx in xs]
xs_pad = pad_list(xs, 0.0)
enhanced, hlensm, mask = self.frontend(xs_pad, ilens)
if prev:
self.train()
if isinstance(enhanced, (tuple, list)):
enhanced = list(enhanced)
mask = list(mask)
for idx in range(len(enhanced)): # number of speakers
enhanced[idx] = enhanced[idx].cpu().numpy()
mask[idx] = mask[idx].cpu().numpy()
return enhanced, mask, ilens
return enhanced.cpu().numpy(), mask.cpu().numpy(), ilens
def calculate_all_attentions(self, xs_pad, ilens, ys_pad):
"""E2E attention calculation.
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, idim)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor ys_pad:
batch of padded character id sequence tensor (B, num_spkrs, Lmax)
:return: attention weights with the following shape,
1) multi-head case => attention weights (B, H, Lmax, Tmax),
2) other case => attention weights (B, Lmax, Tmax).
:rtype: float ndarray
"""
with torch.no_grad():
# 0. Frontend
if self.frontend is not None:
hs_pad, hlens, mask = self.frontend(to_torch_tensor(xs_pad), ilens)
hlens_n = [None] * self.num_spkrs
for i in range(self.num_spkrs):
hs_pad[i], hlens_n[i] = self.feature_transform(hs_pad[i], hlens)
hlens = hlens_n
else:
hs_pad, hlens = xs_pad, ilens
# 1. Encoder
if not isinstance(hs_pad, list): # single-channel multi-speaker input x
hs_pad, hlens, _ = self.enc(hs_pad, hlens)
else: # multi-channel multi-speaker input x
for i in range(self.num_spkrs):
hs_pad[i], hlens[i], _ = self.enc(hs_pad[i], hlens[i])
# Permutation
ys_pad = ys_pad.transpose(0, 1) # (num_spkrs, B, Lmax)
if self.num_spkrs <= 3:
loss_ctc = torch.stack(
[
self.ctc(
hs_pad[i // self.num_spkrs],
hlens[i // self.num_spkrs],
ys_pad[i % self.num_spkrs],
)
for i in range(self.num_spkrs**2)
],
1,
) # (B, num_spkrs^2)
loss_ctc, min_perm = self.pit.pit_process(loss_ctc)
for i in range(ys_pad.size(1)): # B
ys_pad[:, i] = ys_pad[min_perm[i], i]
# 2. Decoder
att_ws = [
self.dec.calculate_all_attentions(
hs_pad[i], hlens[i], ys_pad[i], strm_idx=i
)
for i in range(self.num_spkrs)
]
return att_ws
class EncoderMix(torch.nn.Module):
"""Encoder module for the case of multi-speaker mixture speech.
:param str etype: type of encoder network
:param int idim: number of dimensions of encoder network
:param int elayers_sd:
number of layers of speaker differentiate part in encoder network
:param int elayers_rec:
number of layers of shared recognition part in encoder network
:param int eunits: number of lstm units of encoder network
:param int eprojs: number of projection units of encoder network
:param np.ndarray subsample: list of subsampling numbers
:param float dropout: dropout rate
:param int in_channel: number of input channels
:param int num_spkrs: number of number of speakers
"""
def __init__(
self,
etype,
idim,
elayers_sd,
elayers_rec,
eunits,
eprojs,
subsample,
dropout,
num_spkrs=2,
in_channel=1,
):
"""Initialize the encoder of single-channel multi-speaker ASR."""
super(EncoderMix, self).__init__()
typ = etype.lstrip("vgg").rstrip("p")
if typ not in ["lstm", "gru", "blstm", "bgru"]:
logging.error("Error: need to specify an appropriate encoder architecture")
if etype.startswith("vgg"):
if etype[-1] == "p":
self.enc_mix = torch.nn.ModuleList([VGG2L(in_channel)])
self.enc_sd = torch.nn.ModuleList(
[
torch.nn.ModuleList(
[
RNNP(
get_vgg2l_odim(idim, in_channel=in_channel),
elayers_sd,
eunits,
eprojs,
subsample[: elayers_sd + 1],
dropout,
typ=typ,
)
]
)
for i in range(num_spkrs)
]
)
self.enc_rec = torch.nn.ModuleList(
[
RNNP(
eprojs,
elayers_rec,
eunits,
eprojs,
subsample[elayers_sd:],
dropout,
typ=typ,
)
]
)
logging.info("Use CNN-VGG + B" + typ.upper() + "P for encoder")
else:
logging.error(
f"Error: need to specify an appropriate encoder architecture. "
f"Illegal name {etype}"
)
sys.exit()
else:
logging.error(
f"Error: need to specify an appropriate encoder architecture. "
f"Illegal name {etype}"
)
sys.exit()
self.num_spkrs = num_spkrs
def forward(self, xs_pad, ilens):
"""Encodermix forward.
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, D)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:return: list: batch of hidden state sequences [num_spkrs x (B, Tmax, eprojs)]
:rtype: torch.Tensor
"""
# mixture encoder
for module in self.enc_mix:
xs_pad, ilens, _ = module(xs_pad, ilens)
# SD and Rec encoder
xs_pad_sd = [xs_pad for i in range(self.num_spkrs)]
ilens_sd = [ilens for i in range(self.num_spkrs)]
for ns in range(self.num_spkrs):
# Encoder_SD: speaker differentiate encoder
for module in self.enc_sd[ns]:
xs_pad_sd[ns], ilens_sd[ns], _ = module(xs_pad_sd[ns], ilens_sd[ns])
# Encoder_Rec: recognition encoder
for module in self.enc_rec:
xs_pad_sd[ns], ilens_sd[ns], _ = module(xs_pad_sd[ns], ilens_sd[ns])
# make mask to remove bias value in padded part
mask = to_device(xs_pad, make_pad_mask(ilens_sd[0]).unsqueeze(-1))
return [x.masked_fill(mask, 0.0) for x in xs_pad_sd], ilens_sd, None
def encoder_for(args, idim, subsample):
"""Construct the encoder."""
if getattr(args, "use_frontend", False): # use getattr to keep compatibility
# with frontend, the mixed speech are separated as streams for each speaker
return encoder_for_single(args, idim, subsample)
else:
return EncoderMix(
args.etype,
idim,
args.elayers_sd,
args.elayers,
args.eunits,
args.eprojs,
subsample,
args.dropout_rate,
args.num_spkrs,
)
| 30,819 | 36.13253 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/e2e_st_transformer.py | # Copyright 2019 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Transformer speech recognition model (pytorch)."""
import logging
import math
from argparse import Namespace
import numpy
import torch
from espnet.nets.e2e_asr_common import ErrorCalculator as ASRErrorCalculator
from espnet.nets.e2e_asr_common import end_detect
from espnet.nets.e2e_mt_common import ErrorCalculator as MTErrorCalculator
from espnet.nets.pytorch_backend.ctc import CTC
from espnet.nets.pytorch_backend.e2e_asr import CTC_LOSS_THRESHOLD
from espnet.nets.pytorch_backend.e2e_st import Reporter
from espnet.nets.pytorch_backend.nets_utils import (
get_subsample,
make_non_pad_mask,
pad_list,
th_accuracy,
)
from espnet.nets.pytorch_backend.transformer.add_sos_eos import add_sos_eos
from espnet.nets.pytorch_backend.transformer.argument import ( # noqa: H301
add_arguments_transformer_common,
)
from espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention
from espnet.nets.pytorch_backend.transformer.decoder import Decoder
from espnet.nets.pytorch_backend.transformer.encoder import Encoder
from espnet.nets.pytorch_backend.transformer.initializer import initialize
from espnet.nets.pytorch_backend.transformer.label_smoothing_loss import ( # noqa: H301
LabelSmoothingLoss,
)
from espnet.nets.pytorch_backend.transformer.mask import subsequent_mask, target_mask
from espnet.nets.pytorch_backend.transformer.plot import PlotAttentionReport
from espnet.nets.st_interface import STInterface
from espnet.utils.fill_missing_args import fill_missing_args
class E2E(STInterface, torch.nn.Module):
"""E2E module.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
@staticmethod
def add_arguments(parser):
"""Add arguments."""
group = parser.add_argument_group("transformer model setting")
group = add_arguments_transformer_common(group)
return parser
@property
def attention_plot_class(self):
"""Return PlotAttentionReport."""
return PlotAttentionReport
def get_total_subsampling_factor(self):
"""Get total subsampling factor."""
return self.encoder.conv_subsampling_factor * int(numpy.prod(self.subsample))
def __init__(self, idim, odim, args, ignore_id=-1):
"""Construct an E2E object.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
torch.nn.Module.__init__(self)
# fill missing arguments for compatibility
args = fill_missing_args(args, self.add_arguments)
if args.transformer_attn_dropout_rate is None:
args.transformer_attn_dropout_rate = args.dropout_rate
self.encoder = Encoder(
idim=idim,
selfattention_layer_type=args.transformer_encoder_selfattn_layer_type,
attention_dim=args.adim,
attention_heads=args.aheads,
conv_wshare=args.wshare,
conv_kernel_length=args.ldconv_encoder_kernel_length,
conv_usebias=args.ldconv_usebias,
linear_units=args.eunits,
num_blocks=args.elayers,
input_layer=args.transformer_input_layer,
dropout_rate=args.dropout_rate,
positional_dropout_rate=args.dropout_rate,
attention_dropout_rate=args.transformer_attn_dropout_rate,
)
self.decoder = Decoder(
odim=odim,
selfattention_layer_type=args.transformer_decoder_selfattn_layer_type,
attention_dim=args.adim,
attention_heads=args.aheads,
conv_wshare=args.wshare,
conv_kernel_length=args.ldconv_decoder_kernel_length,
conv_usebias=args.ldconv_usebias,
linear_units=args.dunits,
num_blocks=args.dlayers,
dropout_rate=args.dropout_rate,
positional_dropout_rate=args.dropout_rate,
self_attention_dropout_rate=args.transformer_attn_dropout_rate,
src_attention_dropout_rate=args.transformer_attn_dropout_rate,
)
self.pad = 0 # use <blank> for padding
self.sos = odim - 1
self.eos = odim - 1
self.odim = odim
self.ignore_id = ignore_id
self.subsample = get_subsample(args, mode="st", arch="transformer")
self.reporter = Reporter()
self.criterion = LabelSmoothingLoss(
self.odim,
self.ignore_id,
args.lsm_weight,
args.transformer_length_normalized_loss,
)
# submodule for ASR task
self.mtlalpha = args.mtlalpha
self.asr_weight = args.asr_weight
if self.asr_weight > 0 and args.mtlalpha < 1:
self.decoder_asr = Decoder(
odim=odim,
attention_dim=args.adim,
attention_heads=args.aheads,
linear_units=args.dunits,
num_blocks=args.dlayers,
dropout_rate=args.dropout_rate,
positional_dropout_rate=args.dropout_rate,
self_attention_dropout_rate=args.transformer_attn_dropout_rate,
src_attention_dropout_rate=args.transformer_attn_dropout_rate,
)
# submodule for MT task
self.mt_weight = args.mt_weight
if self.mt_weight > 0:
self.encoder_mt = Encoder(
idim=odim,
attention_dim=args.adim,
attention_heads=args.aheads,
linear_units=args.dunits,
num_blocks=args.dlayers,
input_layer="embed",
dropout_rate=args.dropout_rate,
positional_dropout_rate=args.dropout_rate,
attention_dropout_rate=args.transformer_attn_dropout_rate,
padding_idx=0,
)
self.reset_parameters(args) # NOTE: place after the submodule initialization
self.adim = args.adim # used for CTC (equal to d_model)
if self.asr_weight > 0 and args.mtlalpha > 0.0:
self.ctc = CTC(
odim, args.adim, args.dropout_rate, ctc_type=args.ctc_type, reduce=True
)
else:
self.ctc = None
# translation error calculator
self.error_calculator = MTErrorCalculator(
args.char_list, args.sym_space, args.sym_blank, args.report_bleu
)
# recognition error calculator
self.error_calculator_asr = ASRErrorCalculator(
args.char_list,
args.sym_space,
args.sym_blank,
args.report_cer,
args.report_wer,
)
self.rnnlm = None
# multilingual E2E-ST related
self.multilingual = getattr(args, "multilingual", False)
self.replace_sos = getattr(args, "replace_sos", False)
def reset_parameters(self, args):
"""Initialize parameters."""
initialize(self, args.transformer_init)
if self.mt_weight > 0:
torch.nn.init.normal_(
self.encoder_mt.embed[0].weight, mean=0, std=args.adim**-0.5
)
torch.nn.init.constant_(self.encoder_mt.embed[0].weight[self.pad], 0)
def forward(self, xs_pad, ilens, ys_pad, ys_pad_src):
"""E2E forward.
:param torch.Tensor xs_pad: batch of padded source sequences (B, Tmax, idim)
:param torch.Tensor ilens: batch of lengths of source sequences (B)
:param torch.Tensor ys_pad: batch of padded target sequences (B, Lmax)
:param torch.Tensor ys_pad_src: batch of padded target sequences (B, Lmax)
:return: ctc loss value
:rtype: torch.Tensor
:return: attention loss value
:rtype: torch.Tensor
:return: accuracy in attention decoder
:rtype: float
"""
# 0. Extract target language ID
tgt_lang_ids = None
if self.multilingual:
tgt_lang_ids = ys_pad[:, 0:1]
ys_pad = ys_pad[:, 1:] # remove target language ID in the beginning
# 1. forward encoder
xs_pad = xs_pad[:, : max(ilens)] # for data parallel
src_mask = make_non_pad_mask(ilens.tolist()).to(xs_pad.device).unsqueeze(-2)
hs_pad, hs_mask = self.encoder(xs_pad, src_mask)
# 2. forward decoder
ys_in_pad, ys_out_pad = add_sos_eos(ys_pad, self.sos, self.eos, self.ignore_id)
# replace <sos> with target language ID
if self.replace_sos:
ys_in_pad = torch.cat([tgt_lang_ids, ys_in_pad[:, 1:]], dim=1)
ys_mask = target_mask(ys_in_pad, self.ignore_id)
pred_pad, pred_mask = self.decoder(ys_in_pad, ys_mask, hs_pad, hs_mask)
# 3. compute ST loss
loss_att = self.criterion(pred_pad, ys_out_pad)
self.acc = th_accuracy(
pred_pad.view(-1, self.odim), ys_out_pad, ignore_label=self.ignore_id
)
# 4. compute corpus-level bleu in a mini-batch
if self.training:
self.bleu = None
else:
ys_hat = pred_pad.argmax(dim=-1)
self.bleu = self.error_calculator(ys_hat.cpu(), ys_pad.cpu())
# 5. compute auxiliary ASR loss
loss_asr_att, acc_asr, loss_asr_ctc, cer_ctc, cer, wer = self.forward_asr(
hs_pad, hs_mask, ys_pad_src
)
# 6. compute auxiliary MT loss
loss_mt, acc_mt = 0.0, None
if self.mt_weight > 0:
loss_mt, acc_mt = self.forward_mt(
ys_pad_src, ys_in_pad, ys_out_pad, ys_mask
)
asr_ctc_weight = self.mtlalpha
self.loss = (
(1 - self.asr_weight - self.mt_weight) * loss_att
+ self.asr_weight
* (asr_ctc_weight * loss_asr_ctc + (1 - asr_ctc_weight) * loss_asr_att)
+ self.mt_weight * loss_mt
)
loss_asr_data = float(
asr_ctc_weight * loss_asr_ctc + (1 - asr_ctc_weight) * loss_asr_att
)
loss_mt_data = None if self.mt_weight == 0 else float(loss_mt)
loss_st_data = float(loss_att)
loss_data = float(self.loss)
if loss_data < CTC_LOSS_THRESHOLD and not math.isnan(loss_data):
self.reporter.report(
loss_asr_data,
loss_mt_data,
loss_st_data,
acc_asr,
acc_mt,
self.acc,
cer_ctc,
cer,
wer,
self.bleu,
loss_data,
)
else:
logging.warning("loss (=%f) is not correct", loss_data)
return self.loss
def forward_asr(self, hs_pad, hs_mask, ys_pad):
"""Forward pass in the auxiliary ASR task.
:param torch.Tensor hs_pad: batch of padded source sequences (B, Tmax, idim)
:param torch.Tensor hs_mask: batch of input token mask (B, Lmax)
:param torch.Tensor ys_pad: batch of padded target sequences (B, Lmax)
:return: ASR attention loss value
:rtype: torch.Tensor
:return: accuracy in ASR attention decoder
:rtype: float
:return: ASR CTC loss value
:rtype: torch.Tensor
:return: character error rate from CTC prediction
:rtype: float
:return: character error rate from attetion decoder prediction
:rtype: float
:return: word error rate from attetion decoder prediction
:rtype: float
"""
loss_att, loss_ctc = 0.0, 0.0
acc = None
cer, wer = None, None
cer_ctc = None
if self.asr_weight == 0:
return loss_att, acc, loss_ctc, cer_ctc, cer, wer
# attention
if self.mtlalpha < 1:
ys_in_pad_asr, ys_out_pad_asr = add_sos_eos(
ys_pad, self.sos, self.eos, self.ignore_id
)
ys_mask_asr = target_mask(ys_in_pad_asr, self.ignore_id)
pred_pad, _ = self.decoder_asr(ys_in_pad_asr, ys_mask_asr, hs_pad, hs_mask)
loss_att = self.criterion(pred_pad, ys_out_pad_asr)
acc = th_accuracy(
pred_pad.view(-1, self.odim),
ys_out_pad_asr,
ignore_label=self.ignore_id,
)
if not self.training:
ys_hat_asr = pred_pad.argmax(dim=-1)
cer, wer = self.error_calculator_asr(ys_hat_asr.cpu(), ys_pad.cpu())
# CTC
if self.mtlalpha > 0:
batch_size = hs_pad.size(0)
hs_len = hs_mask.view(batch_size, -1).sum(1)
loss_ctc = self.ctc(hs_pad.view(batch_size, -1, self.adim), hs_len, ys_pad)
if not self.training:
ys_hat_ctc = self.ctc.argmax(
hs_pad.view(batch_size, -1, self.adim)
).data
cer_ctc = self.error_calculator_asr(
ys_hat_ctc.cpu(), ys_pad.cpu(), is_ctc=True
)
# for visualization
self.ctc.softmax(hs_pad)
return loss_att, acc, loss_ctc, cer_ctc, cer, wer
def forward_mt(self, xs_pad, ys_in_pad, ys_out_pad, ys_mask):
"""Forward pass in the auxiliary MT task.
:param torch.Tensor xs_pad: batch of padded source sequences (B, Tmax, idim)
:param torch.Tensor ys_in_pad: batch of padded target sequences (B, Lmax)
:param torch.Tensor ys_out_pad: batch of padded target sequences (B, Lmax)
:param torch.Tensor ys_mask: batch of input token mask (B, Lmax)
:return: MT loss value
:rtype: torch.Tensor
:return: accuracy in MT decoder
:rtype: float
"""
loss, acc = 0.0, None
if self.mt_weight == 0:
return loss, acc
ilens = torch.sum(xs_pad != self.ignore_id, dim=1).cpu().numpy()
# NOTE: xs_pad is padded with -1
xs = [x[x != self.ignore_id] for x in xs_pad] # parse padded xs
xs_zero_pad = pad_list(xs, self.pad) # re-pad with zero
xs_zero_pad = xs_zero_pad[:, : max(ilens)] # for data parallel
src_mask = (
make_non_pad_mask(ilens.tolist()).to(xs_zero_pad.device).unsqueeze(-2)
)
hs_pad, hs_mask = self.encoder_mt(xs_zero_pad, src_mask)
pred_pad, _ = self.decoder(ys_in_pad, ys_mask, hs_pad, hs_mask)
loss = self.criterion(pred_pad, ys_out_pad)
acc = th_accuracy(
pred_pad.view(-1, self.odim), ys_out_pad, ignore_label=self.ignore_id
)
return loss, acc
def scorers(self):
"""Scorers."""
return dict(decoder=self.decoder)
def encode(self, x):
"""Encode source acoustic features.
:param ndarray x: source acoustic feature (T, D)
:return: encoder outputs
:rtype: torch.Tensor
"""
self.eval()
x = torch.as_tensor(x).unsqueeze(0)
enc_output, _ = self.encoder(x, None)
return enc_output.squeeze(0)
def translate(
self,
x,
trans_args,
char_list=None,
):
"""Translate input speech.
:param ndnarray x: input acoustic feature (B, T, D) or (T, D)
:param Namespace trans_args: argment Namespace contraining options
:param list char_list: list of characters
:return: N-best decoding results
:rtype: list
"""
# preprate sos
if getattr(trans_args, "tgt_lang", False):
if self.replace_sos:
y = char_list.index(trans_args.tgt_lang)
else:
y = self.sos
logging.info("<sos> index: " + str(y))
logging.info("<sos> mark: " + char_list[y])
logging.info("input lengths: " + str(x.shape[0]))
enc_output = self.encode(x).unsqueeze(0)
h = enc_output
logging.info("encoder output lengths: " + str(h.size(1)))
# search parms
beam = trans_args.beam_size
penalty = trans_args.penalty
if trans_args.maxlenratio == 0:
maxlen = h.size(1)
else:
# maxlen >= 1
maxlen = max(1, int(trans_args.maxlenratio * h.size(1)))
minlen = int(trans_args.minlenratio * h.size(1))
logging.info("max output length: " + str(maxlen))
logging.info("min output length: " + str(minlen))
# initialize hypothesis
hyp = {"score": 0.0, "yseq": [y]}
hyps = [hyp]
ended_hyps = []
for i in range(maxlen):
logging.debug("position " + str(i))
# batchfy
ys = h.new_zeros((len(hyps), i + 1), dtype=torch.int64)
for j, hyp in enumerate(hyps):
ys[j, :] = torch.tensor(hyp["yseq"])
ys_mask = subsequent_mask(i + 1).unsqueeze(0).to(h.device)
local_scores = self.decoder.forward_one_step(
ys, ys_mask, h.repeat([len(hyps), 1, 1])
)[0]
hyps_best_kept = []
for j, hyp in enumerate(hyps):
local_best_scores, local_best_ids = torch.topk(
local_scores[j : j + 1], beam, dim=1
)
for j in range(beam):
new_hyp = {}
new_hyp["score"] = hyp["score"] + float(local_best_scores[0, j])
new_hyp["yseq"] = [0] * (1 + len(hyp["yseq"]))
new_hyp["yseq"][: len(hyp["yseq"])] = hyp["yseq"]
new_hyp["yseq"][len(hyp["yseq"])] = int(local_best_ids[0, j])
# will be (2 x beam) hyps at most
hyps_best_kept.append(new_hyp)
hyps_best_kept = sorted(
hyps_best_kept, key=lambda x: x["score"], reverse=True
)[:beam]
# sort and get nbest
hyps = hyps_best_kept
logging.debug("number of pruned hypothes: " + str(len(hyps)))
if char_list is not None:
logging.debug(
"best hypo: "
+ "".join([char_list[int(x)] for x in hyps[0]["yseq"][1:]])
)
# add eos in the final loop to avoid that there are no ended hyps
if i == maxlen - 1:
logging.info("adding <eos> in the last position in the loop")
for hyp in hyps:
hyp["yseq"].append(self.eos)
# add ended hypothes to a final list, and removed them from current hypothes
# (this will be a probmlem, number of hyps < beam)
remained_hyps = []
for hyp in hyps:
if hyp["yseq"][-1] == self.eos:
# only store the sequence that has more than minlen outputs
# also add penalty
if len(hyp["yseq"]) > minlen:
hyp["score"] += (i + 1) * penalty
ended_hyps.append(hyp)
else:
remained_hyps.append(hyp)
# end detection
if end_detect(ended_hyps, i) and trans_args.maxlenratio == 0.0:
logging.info("end detected at %d", i)
break
hyps = remained_hyps
if len(hyps) > 0:
logging.debug("remeined hypothes: " + str(len(hyps)))
else:
logging.info("no hypothesis. Finish decoding.")
break
if char_list is not None:
for hyp in hyps:
logging.debug(
"hypo: " + "".join([char_list[int(x)] for x in hyp["yseq"][1:]])
)
logging.debug("number of ended hypothes: " + str(len(ended_hyps)))
nbest_hyps = sorted(ended_hyps, key=lambda x: x["score"], reverse=True)[
: min(len(ended_hyps), trans_args.nbest)
]
# check number of hypotheis
if len(nbest_hyps) == 0:
logging.warning(
"there is no N-best results, perform translation "
"again with smaller minlenratio."
)
# should copy becasuse Namespace will be overwritten globally
trans_args = Namespace(**vars(trans_args))
trans_args.minlenratio = max(0.0, trans_args.minlenratio - 0.1)
return self.translate(x, trans_args, char_list)
logging.info("total log probability: " + str(nbest_hyps[0]["score"]))
logging.info(
"normalized log probability: "
+ str(nbest_hyps[0]["score"] / len(nbest_hyps[0]["yseq"]))
)
return nbest_hyps
def calculate_all_attentions(self, xs_pad, ilens, ys_pad, ys_pad_src):
"""E2E attention calculation.
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, idim)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor ys_pad: batch of padded token id sequence tensor (B, Lmax)
:param torch.Tensor ys_pad_src:
batch of padded token id sequence tensor (B, Lmax)
:return: attention weights (B, H, Lmax, Tmax)
:rtype: float ndarray
"""
self.eval()
with torch.no_grad():
self.forward(xs_pad, ilens, ys_pad, ys_pad_src)
ret = dict()
for name, m in self.named_modules():
if (
isinstance(m, MultiHeadedAttention) and m.attn is not None
): # skip MHA for submodules
ret[name] = m.attn.cpu().numpy()
self.train()
return ret
def calculate_all_ctc_probs(self, xs_pad, ilens, ys_pad, ys_pad_src):
"""E2E CTC probability calculation.
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor ys_pad: batch of padded token id sequence tensor (B, Lmax)
:param torch.Tensor ys_pad_src:
batch of padded token id sequence tensor (B, Lmax)
:return: CTC probability (B, Tmax, vocab)
:rtype: float ndarray
"""
ret = None
if self.asr_weight == 0 or self.mtlalpha == 0:
return ret
self.eval()
with torch.no_grad():
self.forward(xs_pad, ilens, ys_pad, ys_pad_src)
ret = None
for name, m in self.named_modules():
if isinstance(m, CTC) and m.probs is not None:
ret = m.probs.cpu().numpy()
self.train()
return ret
| 22,657 | 37.534014 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/streaming/window.py | import torch
# TODO(pzelasko): Currently allows half-streaming only;
# needs streaming attention decoder implementation
class WindowStreamingE2E(object):
"""WindowStreamingE2E constructor.
:param E2E e2e: E2E ASR object
:param recog_args: arguments for "recognize" method of E2E
"""
def __init__(self, e2e, recog_args, rnnlm=None):
self._e2e = e2e
self._recog_args = recog_args
self._char_list = e2e.char_list
self._rnnlm = rnnlm
self._e2e.eval()
self._offset = 0
self._previous_encoder_recurrent_state = None
self._encoder_states = []
self._ctc_posteriors = []
self._last_recognition = None
assert (
self._recog_args.ctc_weight > 0.0
), "WindowStreamingE2E works only with combined CTC and attention decoders."
def accept_input(self, x):
"""Call this method each time a new batch of input is available."""
h, ilen = self._e2e.subsample_frames(x)
# Streaming encoder
h, _, self._previous_encoder_recurrent_state = self._e2e.enc(
h.unsqueeze(0), ilen, self._previous_encoder_recurrent_state
)
self._encoder_states.append(h.squeeze(0))
# CTC posteriors for the incoming audio
self._ctc_posteriors.append(self._e2e.ctc.log_softmax(h).squeeze(0))
def _input_window_for_decoder(self, use_all=False):
if use_all:
return (
torch.cat(self._encoder_states, dim=0),
torch.cat(self._ctc_posteriors, dim=0),
)
def select_unprocessed_windows(window_tensors):
last_offset = self._offset
offset_traversed = 0
selected_windows = []
for es in window_tensors:
if offset_traversed > last_offset:
selected_windows.append(es)
continue
offset_traversed += es.size(1)
return torch.cat(selected_windows, dim=0)
return (
select_unprocessed_windows(self._encoder_states),
select_unprocessed_windows(self._ctc_posteriors),
)
def decode_with_attention_offline(self):
"""Run the attention decoder offline.
Works even if the previous layers (encoder and CTC decoder) were
being run in the online mode.
This method should be run after all the audio has been consumed.
This is used mostly to compare the results between offline
and online implementation of the previous layers.
"""
h, lpz = self._input_window_for_decoder(use_all=True)
return self._e2e.dec.recognize_beam(
h, lpz, self._recog_args, self._char_list, self._rnnlm
)
| 2,768 | 32.768293 | 84 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/streaming/segment.py | import numpy as np
import torch
class SegmentStreamingE2E(object):
"""SegmentStreamingE2E constructor.
:param E2E e2e: E2E ASR object
:param recog_args: arguments for "recognize" method of E2E
"""
def __init__(self, e2e, recog_args, rnnlm=None):
self._e2e = e2e
self._recog_args = recog_args
self._char_list = e2e.char_list
self._rnnlm = rnnlm
self._e2e.eval()
self._blank_idx_in_char_list = -1
for idx in range(len(self._char_list)):
if self._char_list[idx] == self._e2e.blank:
self._blank_idx_in_char_list = idx
break
self._subsampling_factor = np.prod(e2e.subsample)
self._activates = 0
self._blank_dur = 0
self._previous_input = []
self._previous_encoder_recurrent_state = None
self._encoder_states = []
self._ctc_posteriors = []
assert (
self._recog_args.batchsize <= 1
), "SegmentStreamingE2E works only with batch size <= 1"
assert (
"b" not in self._e2e.etype
), "SegmentStreamingE2E works only with uni-directional encoders"
def accept_input(self, x):
"""Call this method each time a new batch of input is available."""
self._previous_input.extend(x)
h, ilen = self._e2e.subsample_frames(x)
# Run encoder and apply greedy search on CTC softmax output
h, _, self._previous_encoder_recurrent_state = self._e2e.enc(
h.unsqueeze(0), ilen, self._previous_encoder_recurrent_state
)
z = self._e2e.ctc.argmax(h).squeeze(0)
if self._activates == 0 and z[0] != self._blank_idx_in_char_list:
self._activates = 1
# Rerun encoder with zero state at onset of detection
tail_len = self._subsampling_factor * (
self._recog_args.streaming_onset_margin + 1
)
h, ilen = self._e2e.subsample_frames(
np.reshape(
self._previous_input[-tail_len:], [-1, len(self._previous_input[0])]
)
)
h, _, self._previous_encoder_recurrent_state = self._e2e.enc(
h.unsqueeze(0), ilen, None
)
hyp = None
if self._activates == 1:
self._encoder_states.extend(h.squeeze(0))
self._ctc_posteriors.extend(self._e2e.ctc.log_softmax(h).squeeze(0))
if z[0] == self._blank_idx_in_char_list:
self._blank_dur += 1
else:
self._blank_dur = 0
if self._blank_dur >= self._recog_args.streaming_min_blank_dur:
seg_len = (
len(self._encoder_states)
- self._blank_dur
+ self._recog_args.streaming_offset_margin
)
if seg_len > 0:
# Run decoder with a detected segment
h = torch.cat(self._encoder_states[:seg_len], dim=0).view(
-1, self._encoder_states[0].size(0)
)
if self._recog_args.ctc_weight > 0.0:
lpz = torch.cat(self._ctc_posteriors[:seg_len], dim=0).view(
-1, self._ctc_posteriors[0].size(0)
)
if self._recog_args.batchsize > 0:
lpz = lpz.unsqueeze(0)
normalize_score = False
else:
lpz = None
normalize_score = True
if self._recog_args.batchsize == 0:
hyp = self._e2e.dec.recognize_beam(
h, lpz, self._recog_args, self._char_list, self._rnnlm
)
else:
hlens = torch.tensor([h.shape[0]])
hyp = self._e2e.dec.recognize_beam_batch(
h.unsqueeze(0),
hlens,
lpz,
self._recog_args,
self._char_list,
self._rnnlm,
normalize_score=normalize_score,
)[0]
self._activates = 0
self._blank_dur = 0
tail_len = (
self._subsampling_factor
* self._recog_args.streaming_onset_margin
)
self._previous_input = self._previous_input[-tail_len:]
self._encoder_states = []
self._ctc_posteriors = []
return hyp
| 4,774 | 35.730769 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transducer/joint_network.py | """Transducer joint network implementation."""
import torch
from espnet.nets.pytorch_backend.nets_utils import get_activation
class JointNetwork(torch.nn.Module):
"""Transducer joint network module.
Args:
joint_output_size: Joint network output dimension
encoder_output_size: Encoder output dimension.
decoder_output_size: Decoder output dimension.
joint_space_size: Dimension of joint space.
joint_activation_type: Type of activation for joint network.
"""
def __init__(
self,
joint_output_size: int,
encoder_output_size: int,
decoder_output_size: int,
joint_space_size: int,
joint_activation_type: int,
):
"""Joint network initializer."""
super().__init__()
self.lin_enc = torch.nn.Linear(encoder_output_size, joint_space_size)
self.lin_dec = torch.nn.Linear(
decoder_output_size, joint_space_size, bias=False
)
self.lin_out = torch.nn.Linear(joint_space_size, joint_output_size)
self.joint_activation = get_activation(joint_activation_type)
def forward(
self,
enc_out: torch.Tensor,
dec_out: torch.Tensor,
is_aux: bool = False,
quantization: bool = False,
) -> torch.Tensor:
"""Joint computation of encoder and decoder hidden state sequences.
Args:
enc_out: Expanded encoder output state sequences (B, T, 1, D_enc)
dec_out: Expanded decoder output state sequences (B, 1, U, D_dec)
is_aux: Whether auxiliary tasks in used.
quantization: Whether dynamic quantization is used.
Returns:
joint_out: Joint output state sequences. (B, T, U, D_out)
"""
if is_aux:
joint_out = self.joint_activation(enc_out + self.lin_dec(dec_out))
elif quantization:
joint_out = self.joint_activation(
self.lin_enc(enc_out.unsqueeze(0)) + self.lin_dec(dec_out.unsqueeze(0))
)
return self.lin_out(joint_out)[0]
else:
joint_out = self.joint_activation(
self.lin_enc(enc_out) + self.lin_dec(dec_out)
)
joint_out = self.lin_out(joint_out)
return joint_out
| 2,306 | 30.175676 | 87 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transducer/rnn_encoder.py | """RNN encoder implementation for Transducer model.
These classes are based on the ones in espnet.nets.pytorch_backend.rnn.encoders,
and modified to output intermediate representation based given list of layers as input.
To do so, RNN class rely on a stack of 1-layer LSTM instead of a multi-layer LSTM.
The additional outputs are intended to be used with Transducer auxiliary tasks.
"""
from argparse import Namespace
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from espnet.nets.e2e_asr_common import get_vgg2l_odim
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask, to_device
class RNNP(torch.nn.Module):
"""RNN with projection layer module.
Args:
idim: Input dimension.
rnn_type: RNNP units type.
elayers: Number of RNNP layers.
eunits: Number of units ((2 * eunits) if bidirectional).
eprojs: Number of projection units.
subsample: Subsampling rate per layer.
dropout_rate: Dropout rate for RNNP layers.
aux_output_layers: Layer IDs for auxiliary RNNP output sequences.
"""
def __init__(
self,
idim: int,
rnn_type: str,
elayers: int,
eunits: int,
eprojs: int,
subsample: np.ndarray,
dropout_rate: float,
aux_output_layers: List = [],
):
"""Initialize RNNP module."""
super().__init__()
bidir = rnn_type[0] == "b"
for i in range(elayers):
if i == 0:
input_dim = idim
else:
input_dim = eprojs
rnn_layer = torch.nn.LSTM if "lstm" in rnn_type else torch.nn.GRU
rnn = rnn_layer(
input_dim, eunits, num_layers=1, bidirectional=bidir, batch_first=True
)
setattr(self, "%s%d" % ("birnn" if bidir else "rnn", i), rnn)
if bidir:
setattr(self, "bt%d" % i, torch.nn.Linear(2 * eunits, eprojs))
else:
setattr(self, "bt%d" % i, torch.nn.Linear(eunits, eprojs))
self.dropout = torch.nn.Dropout(p=dropout_rate)
self.elayers = elayers
self.eunits = eunits
self.subsample = subsample
self.rnn_type = rnn_type
self.bidir = bidir
self.aux_output_layers = aux_output_layers
def forward(
self,
rnn_input: torch.Tensor,
rnn_len: torch.Tensor,
prev_states: Optional[List[torch.Tensor]] = None,
) -> Tuple[torch.Tensor, List[torch.Tensor], torch.Tensor]:
"""RNNP forward.
Args:
rnn_input: RNN input sequences. (B, T, D_in)
rnn_len: RNN input sequences lengths. (B,)
prev_states: RNN hidden states. [N x (B, T, D_proj)]
Returns:
rnn_output : RNN output sequences. (B, T, D_proj)
with or without intermediate RNN output sequences.
((B, T, D_proj), [N x (B, T, D_proj)])
rnn_len: RNN output sequences lengths. (B,)
current_states: RNN hidden states. [N x (B, T, D_proj)]
"""
aux_rnn_outputs = []
aux_rnn_lens = []
current_states = []
for layer in range(self.elayers):
if not isinstance(rnn_len, torch.Tensor):
rnn_len = torch.tensor(rnn_len)
pack_rnn_input = pack_padded_sequence(
rnn_input, rnn_len.cpu(), batch_first=True
)
rnn = getattr(self, ("birnn" if self.bidir else "rnn") + str(layer))
if isinstance(rnn, (torch.nn.LSTM, torch.nn.GRU)):
rnn.flatten_parameters()
if prev_states is not None and rnn.bidirectional:
prev_states = reset_backward_rnn_state(prev_states)
pack_rnn_output, states = rnn(
pack_rnn_input, hx=None if prev_states is None else prev_states[layer]
)
current_states.append(states)
pad_rnn_output, rnn_len = pad_packed_sequence(
pack_rnn_output, batch_first=True
)
sub = self.subsample[layer + 1]
if sub > 1:
pad_rnn_output = pad_rnn_output[:, ::sub]
rnn_len = torch.tensor([int(i + 1) // sub for i in rnn_len])
projection_layer = getattr(self, "bt%d" % layer)
proj_rnn_output = projection_layer(
pad_rnn_output.contiguous().view(-1, pad_rnn_output.size(2))
)
rnn_output = proj_rnn_output.view(
pad_rnn_output.size(0), pad_rnn_output.size(1), -1
)
if layer in self.aux_output_layers:
aux_rnn_outputs.append(rnn_output)
aux_rnn_lens.append(rnn_len)
if layer < self.elayers - 1:
rnn_output = torch.tanh(self.dropout(rnn_output))
rnn_input = rnn_output
if aux_rnn_outputs:
return (
(rnn_output, aux_rnn_outputs),
(rnn_len, aux_rnn_lens),
current_states,
)
else:
return rnn_output, rnn_len, current_states
class RNN(torch.nn.Module):
"""RNN module.
Args:
idim: Input dimension.
rnn_type: RNN units type.
elayers: Number of RNN layers.
eunits: Number of units ((2 * eunits) if bidirectional)
eprojs: Number of final projection units.
dropout_rate: Dropout rate for RNN layers.
aux_output_layers: List of layer IDs for auxiliary RNN output sequences.
"""
def __init__(
self,
idim: int,
rnn_type: str,
elayers: int,
eunits: int,
eprojs: int,
dropout_rate: float,
aux_output_layers: List = [],
):
"""Initialize RNN module."""
super().__init__()
bidir = rnn_type[0] == "b"
for i in range(elayers):
if i == 0:
input_dim = idim
else:
input_dim = eunits
rnn_layer = torch.nn.LSTM if "lstm" in rnn_type else torch.nn.GRU
rnn = rnn_layer(
input_dim, eunits, num_layers=1, bidirectional=bidir, batch_first=True
)
setattr(self, "%s%d" % ("birnn" if bidir else "rnn", i), rnn)
self.dropout = torch.nn.Dropout(p=dropout_rate)
self.elayers = elayers
self.eunits = eunits
self.eprojs = eprojs
self.rnn_type = rnn_type
self.bidir = bidir
self.l_last = torch.nn.Linear(eunits, eprojs)
self.aux_output_layers = aux_output_layers
def forward(
self,
rnn_input: torch.Tensor,
rnn_len: torch.Tensor,
prev_states: Optional[List[torch.Tensor]] = None,
) -> Tuple[torch.Tensor, List[torch.Tensor], torch.Tensor]:
"""RNN forward.
Args:
rnn_input: RNN input sequences. (B, T, D_in)
rnn_len: RNN input sequences lengths. (B,)
prev_states: RNN hidden states. [N x (B, T, D_proj)]
Returns:
rnn_output : RNN output sequences. (B, T, D_proj)
with or without intermediate RNN output sequences.
((B, T, D_proj), [N x (B, T, D_proj)])
rnn_len: RNN output sequences lengths. (B,)
current_states: RNN hidden states. [N x (B, T, D_proj)]
"""
aux_rnn_outputs = []
aux_rnn_lens = []
current_states = []
for layer in range(self.elayers):
if not isinstance(rnn_len, torch.Tensor):
rnn_len = torch.tensor(rnn_len)
pack_rnn_input = pack_padded_sequence(
rnn_input, rnn_len.cpu(), batch_first=True
)
rnn = getattr(self, ("birnn" if self.bidir else "rnn") + str(layer))
if isinstance(rnn, (torch.nn.LSTM, torch.nn.GRU)):
rnn.flatten_parameters()
if prev_states is not None and rnn.bidirectional:
prev_states = reset_backward_rnn_state(prev_states)
pack_rnn_output, states = rnn(
pack_rnn_input, hx=None if prev_states is None else prev_states[layer]
)
current_states.append(states)
rnn_output, rnn_len = pad_packed_sequence(pack_rnn_output, batch_first=True)
if self.bidir:
rnn_output = (
rnn_output[:, :, : self.eunits] + rnn_output[:, :, self.eunits :]
)
if layer in self.aux_output_layers:
aux_proj_rnn_output = torch.tanh(
self.l_last(rnn_output.contiguous().view(-1, rnn_output.size(2)))
)
aux_rnn_output = aux_proj_rnn_output.view(
rnn_output.size(0), rnn_output.size(1), -1
)
aux_rnn_outputs.append(aux_rnn_output)
aux_rnn_lens.append(rnn_len)
if layer < self.elayers - 1:
rnn_input = self.dropout(rnn_output)
proj_rnn_output = torch.tanh(
self.l_last(rnn_output.contiguous().view(-1, rnn_output.size(2)))
)
rnn_output = proj_rnn_output.view(rnn_output.size(0), rnn_output.size(1), -1)
if aux_rnn_outputs:
return (
(rnn_output, aux_rnn_outputs),
(rnn_len, aux_rnn_lens),
current_states,
)
else:
return rnn_output, rnn_len, current_states
def reset_backward_rnn_state(
states: Union[torch.Tensor, List[Optional[torch.Tensor]]]
) -> Union[torch.Tensor, List[Optional[torch.Tensor]]]:
"""Set backward BRNN states to zeroes.
Args:
states: Encoder hidden states.
Returns:
states: Encoder hidden states with backward set to zero.
"""
if isinstance(states, list):
for state in states:
state[1::2] = 0.0
else:
states[1::2] = 0.0
return states
class VGG2L(torch.nn.Module):
"""VGG-like module.
Args:
in_channel: number of input channels
"""
def __init__(self, in_channel: int = 1):
"""Initialize VGG-like module."""
super(VGG2L, self).__init__()
# CNN layer (VGG motivated)
self.conv1_1 = torch.nn.Conv2d(in_channel, 64, 3, stride=1, padding=1)
self.conv1_2 = torch.nn.Conv2d(64, 64, 3, stride=1, padding=1)
self.conv2_1 = torch.nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.conv2_2 = torch.nn.Conv2d(128, 128, 3, stride=1, padding=1)
self.in_channel = in_channel
def forward(
self, feats: torch.Tensor, feats_len: torch.Tensor, **kwargs
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
"""VGG2L forward.
Args:
feats: Feature sequences. (B, F, D_feats)
feats_len: Feature sequences lengths. (B, )
Returns:
vgg_out: VGG2L output sequences. (B, F // 4, 128 * D_feats // 4)
vgg_out_len: VGG2L output sequences lengths. (B,)
"""
feats = feats.view(
feats.size(0),
feats.size(1),
self.in_channel,
feats.size(2) // self.in_channel,
).transpose(1, 2)
vgg1 = F.relu(self.conv1_1(feats))
vgg1 = F.relu(self.conv1_2(vgg1))
vgg1 = F.max_pool2d(vgg1, 2, stride=2, ceil_mode=True)
vgg2 = F.relu(self.conv2_1(vgg1))
vgg2 = F.relu(self.conv2_2(vgg2))
vgg2 = F.max_pool2d(vgg2, 2, stride=2, ceil_mode=True)
vgg_out = vgg2.transpose(1, 2)
vgg_out = vgg_out.contiguous().view(
vgg_out.size(0), vgg_out.size(1), vgg_out.size(2) * vgg_out.size(3)
)
if torch.is_tensor(feats_len):
feats_len = feats_len.cpu().numpy()
else:
feats_len = np.array(feats_len, dtype=np.float32)
vgg1_len = np.array(np.ceil(feats_len / 2), dtype=np.int64)
vgg_out_len = np.array(
np.ceil(np.array(vgg1_len, dtype=np.float32) / 2), dtype=np.int64
).tolist()
return vgg_out, vgg_out_len, None
class Encoder(torch.nn.Module):
"""Encoder module.
Args:
idim: Input dimension.
etype: Encoder units type.
elayers: Number of encoder layers.
eunits: Number of encoder units per layer.
eprojs: Number of projection units per layer.
subsample: Subsampling rate per layer.
dropout_rate: Dropout rate for encoder layers.
intermediate_encoder_layers: Layer IDs for auxiliary encoder output sequences.
"""
def __init__(
self,
idim: int,
etype: str,
elayers: int,
eunits: int,
eprojs: int,
subsample: np.ndarray,
dropout_rate: float = 0.0,
aux_enc_output_layers: List = [],
):
"""Initialize Encoder module."""
super(Encoder, self).__init__()
rnn_type = etype.lstrip("vgg").rstrip("p")
in_channel = 1
if etype.startswith("vgg"):
if etype[-1] == "p":
self.enc = torch.nn.ModuleList(
[
VGG2L(in_channel),
RNNP(
get_vgg2l_odim(idim, in_channel=in_channel),
rnn_type,
elayers,
eunits,
eprojs,
subsample,
dropout_rate=dropout_rate,
aux_output_layers=aux_enc_output_layers,
),
]
)
else:
self.enc = torch.nn.ModuleList(
[
VGG2L(in_channel),
RNN(
get_vgg2l_odim(idim, in_channel=in_channel),
rnn_type,
elayers,
eunits,
eprojs,
dropout_rate=dropout_rate,
aux_output_layers=aux_enc_output_layers,
),
]
)
self.conv_subsampling_factor = 4
else:
if etype[-1] == "p":
self.enc = torch.nn.ModuleList(
[
RNNP(
idim,
rnn_type,
elayers,
eunits,
eprojs,
subsample,
dropout_rate=dropout_rate,
aux_output_layers=aux_enc_output_layers,
)
]
)
else:
self.enc = torch.nn.ModuleList(
[
RNN(
idim,
rnn_type,
elayers,
eunits,
eprojs,
dropout_rate=dropout_rate,
aux_output_layers=aux_enc_output_layers,
)
]
)
self.conv_subsampling_factor = 1
def forward(
self,
feats: torch.Tensor,
feats_len: torch.Tensor,
prev_states: Optional[List[torch.Tensor]] = None,
):
"""Forward encoder.
Args:
feats: Feature sequences. (B, F, D_feats)
feats_len: Feature sequences lengths. (B,)
prev_states: Previous encoder hidden states. [N x (B, T, D_enc)]
Returns:
enc_out: Encoder output sequences. (B, T, D_enc)
with or without encoder intermediate output sequences.
((B, T, D_enc), [N x (B, T, D_enc)])
enc_out_len: Encoder output sequences lengths. (B,)
current_states: Encoder hidden states. [N x (B, T, D_enc)]
"""
if prev_states is None:
prev_states = [None] * len(self.enc)
assert len(prev_states) == len(self.enc)
_enc_out = feats
_enc_out_len = feats_len
current_states = []
for rnn_module, prev_state in zip(self.enc, prev_states):
_enc_out, _enc_out_len, states = rnn_module(
_enc_out,
_enc_out_len,
prev_states=prev_state,
)
current_states.append(states)
if isinstance(_enc_out, tuple):
enc_out, aux_enc_out = _enc_out[0], _enc_out[1]
enc_out_len, aux_enc_out_len = _enc_out_len[0], _enc_out_len[1]
enc_out_mask = to_device(enc_out, make_pad_mask(enc_out_len).unsqueeze(-1))
enc_out = enc_out.masked_fill(enc_out_mask, 0.0)
for i in range(len(aux_enc_out)):
aux_mask = to_device(
aux_enc_out[i], make_pad_mask(aux_enc_out_len[i]).unsqueeze(-1)
)
aux_enc_out[i] = aux_enc_out[i].masked_fill(aux_mask, 0.0)
return (
(enc_out, aux_enc_out),
(enc_out_len, aux_enc_out_len),
current_states,
)
else:
enc_out_mask = to_device(
_enc_out, make_pad_mask(_enc_out_len).unsqueeze(-1)
)
return _enc_out.masked_fill(enc_out_mask, 0.0), _enc_out_len, current_states
def encoder_for(
args: Namespace,
idim: int,
subsample: np.ndarray,
aux_enc_output_layers: List = [],
) -> torch.nn.Module:
"""Instantiate a RNN encoder with specified arguments.
Args:
args: The model arguments.
idim: Input dimension.
subsample: Subsampling rate per layer.
aux_enc_output_layers: Layer IDs for auxiliary encoder output sequences.
Returns:
: Encoder module.
"""
return Encoder(
idim,
args.etype,
args.elayers,
args.eunits,
args.eprojs,
subsample,
dropout_rate=args.dropout_rate,
aux_enc_output_layers=aux_enc_output_layers,
)
| 18,464 | 31.225131 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transducer/rnn_decoder.py | """RNN decoder definition for Transducer model."""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from espnet.nets.transducer_decoder_interface import (
ExtendedHypothesis,
Hypothesis,
TransducerDecoderInterface,
)
class RNNDecoder(TransducerDecoderInterface, torch.nn.Module):
"""RNN decoder module for Transducer model.
Args:
odim: Output dimension.
dtype: Decoder units type.
dlayers: Number of decoder layers.
dunits: Number of decoder units per layer..
embed_dim: Embedding layer dimension.
dropout_rate: Dropout rate for decoder layers.
dropout_rate_embed: Dropout rate for embedding layer.
blank_id: Blank symbol ID.
"""
def __init__(
self,
odim: int,
dtype: str,
dlayers: int,
dunits: int,
embed_dim: int,
dropout_rate: float = 0.0,
dropout_rate_embed: float = 0.0,
blank_id: int = 0,
):
"""Transducer initializer."""
super().__init__()
self.embed = torch.nn.Embedding(odim, embed_dim, padding_idx=blank_id)
self.dropout_embed = torch.nn.Dropout(p=dropout_rate_embed)
dec_net = torch.nn.LSTM if dtype == "lstm" else torch.nn.GRU
self.decoder = torch.nn.ModuleList(
[dec_net(embed_dim, dunits, 1, batch_first=True)]
)
self.dropout_dec = torch.nn.Dropout(p=dropout_rate)
for _ in range(1, dlayers):
self.decoder += [dec_net(dunits, dunits, 1, batch_first=True)]
self.dlayers = dlayers
self.dunits = dunits
self.dtype = dtype
self.odim = odim
self.ignore_id = -1
self.blank_id = blank_id
self.multi_gpus = torch.cuda.device_count() > 1
def set_device(self, device: torch.device):
"""Set GPU device to use.
Args:
device: Device ID.
"""
self.device = device
def init_state(
self, batch_size: int
) -> Tuple[torch.Tensor, Optional[torch.tensor]]:
"""Initialize decoder states.
Args:
batch_size: Batch size.
Returns:
: Initial decoder hidden states. ((N, B, D_dec), (N, B, D_dec))
"""
h_n = torch.zeros(
self.dlayers,
batch_size,
self.dunits,
device=self.device,
)
if self.dtype == "lstm":
c_n = torch.zeros(
self.dlayers,
batch_size,
self.dunits,
device=self.device,
)
return (h_n, c_n)
return (h_n, None)
def rnn_forward(
self,
sequence: torch.Tensor,
state: Tuple[torch.Tensor, Optional[torch.Tensor]],
) -> Tuple[torch.Tensor, Tuple[torch.Tensor, Optional[torch.Tensor]]]:
"""Encode source label sequences.
Args:
sequence: RNN input sequences. (B, D_emb)
state: Decoder hidden states. ((N, B, D_dec), (N, B, D_dec))
Returns:
sequence: RNN output sequences. (B, D_dec)
(h_next, c_next): Decoder hidden states. (N, B, D_dec), (N, B, D_dec))
"""
h_prev, c_prev = state
h_next, c_next = self.init_state(sequence.size(0))
for layer in range(self.dlayers):
if self.dtype == "lstm":
(
sequence,
(
h_next[layer : layer + 1],
c_next[layer : layer + 1],
),
) = self.decoder[layer](
sequence, hx=(h_prev[layer : layer + 1], c_prev[layer : layer + 1])
)
else:
sequence, h_next[layer : layer + 1] = self.decoder[layer](
sequence, hx=h_prev[layer : layer + 1]
)
sequence = self.dropout_dec(sequence)
return sequence, (h_next, c_next)
def forward(self, labels: torch.Tensor) -> torch.Tensor:
"""Encode source label sequences.
Args:
labels: Label ID sequences. (B, L)
Returns:
dec_out: Decoder output sequences. (B, T, U, D_dec)
"""
init_state = self.init_state(labels.size(0))
dec_embed = self.dropout_embed(self.embed(labels))
dec_out, _ = self.rnn_forward(dec_embed, init_state)
return dec_out
def score(
self, hyp: Hypothesis, cache: Dict[str, Any]
) -> Tuple[torch.Tensor, Tuple[torch.Tensor, Optional[torch.Tensor]], torch.Tensor]:
"""One-step forward hypothesis.
Args:
hyp: Hypothesis.
cache: Pairs of (dec_out, state) for each label sequence. (key)
Returns:
dec_out: Decoder output sequence. (1, D_dec)
new_state: Decoder hidden states. ((N, 1, D_dec), (N, 1, D_dec))
label: Label ID for LM. (1,)
"""
label = torch.full((1, 1), hyp.yseq[-1], dtype=torch.long, device=self.device)
str_labels = "_".join(list(map(str, hyp.yseq)))
if str_labels in cache:
dec_out, dec_state = cache[str_labels]
else:
dec_emb = self.embed(label)
dec_out, dec_state = self.rnn_forward(dec_emb, hyp.dec_state)
cache[str_labels] = (dec_out, dec_state)
return dec_out[0][0], dec_state, label[0]
def batch_score(
self,
hyps: Union[List[Hypothesis], List[ExtendedHypothesis]],
dec_states: Tuple[torch.Tensor, Optional[torch.Tensor]],
cache: Dict[str, Any],
use_lm: bool,
) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor], torch.Tensor]:
"""One-step forward hypotheses.
Args:
hyps: Hypotheses.
states: Decoder hidden states. ((N, B, D_dec), (N, B, D_dec))
cache: Pairs of (dec_out, dec_states) for each label sequences. (keys)
use_lm: Whether to compute label ID sequences for LM.
Returns:
dec_out: Decoder output sequences. (B, D_dec)
dec_states: Decoder hidden states. ((N, B, D_dec), (N, B, D_dec))
lm_labels: Label ID sequences for LM. (B,)
"""
final_batch = len(hyps)
process = []
done = [None] * final_batch
for i, hyp in enumerate(hyps):
str_labels = "_".join(list(map(str, hyp.yseq)))
if str_labels in cache:
done[i] = cache[str_labels]
else:
process.append((str_labels, hyp.yseq[-1], hyp.dec_state))
if process:
labels = torch.LongTensor([[p[1]] for p in process], device=self.device)
p_dec_states = self.create_batch_states(
self.init_state(labels.size(0)), [p[2] for p in process]
)
dec_emb = self.embed(labels)
dec_out, new_states = self.rnn_forward(dec_emb, p_dec_states)
j = 0
for i in range(final_batch):
if done[i] is None:
state = self.select_state(new_states, j)
done[i] = (dec_out[j], state)
cache[process[j][0]] = (dec_out[j], state)
j += 1
dec_out = torch.cat([d[0] for d in done], dim=0)
dec_states = self.create_batch_states(dec_states, [d[1] for d in done])
if use_lm:
lm_labels = torch.LongTensor([h.yseq[-1] for h in hyps], device=self.device)
return dec_out, dec_states, lm_labels
return dec_out, dec_states, None
def select_state(
self, states: Tuple[torch.Tensor, Optional[torch.Tensor]], idx: int
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Get specified ID state from decoder hidden states.
Args:
states: Decoder hidden states. ((N, B, D_dec), (N, B, D_dec))
idx: State ID to extract.
Returns:
: Decoder hidden state for given ID.
((N, 1, D_dec), (N, 1, D_dec))
"""
return (
states[0][:, idx : idx + 1, :],
states[1][:, idx : idx + 1, :] if self.dtype == "lstm" else None,
)
def create_batch_states(
self,
states: Tuple[torch.Tensor, Optional[torch.Tensor]],
new_states: List[Tuple[torch.Tensor, Optional[torch.Tensor]]],
check_list: Optional[List] = None,
) -> List[Tuple[torch.Tensor, Optional[torch.Tensor]]]:
"""Create decoder hidden states.
Args:
states: Decoder hidden states. ((N, B, D_dec), (N, B, D_dec))
new_states: Decoder hidden states. [N x ((1, D_dec), (1, D_dec))]
Returns:
states: Decoder hidden states. ((N, B, D_dec), (N, B, D_dec))
"""
return (
torch.cat([s[0] for s in new_states], dim=1),
torch.cat([s[1] for s in new_states], dim=1)
if self.dtype == "lstm"
else None,
)
| 9,028 | 29.503378 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transducer/vgg2l.py | """VGG2L module definition for custom encoder."""
from typing import Tuple, Union
import torch
class VGG2L(torch.nn.Module):
"""VGG2L module for custom encoder.
Args:
idim: Input dimension.
odim: Output dimension.
pos_enc: Positional encoding class.
"""
def __init__(self, idim: int, odim: int, pos_enc: torch.nn.Module = None):
"""Construct a VGG2L object."""
super().__init__()
self.vgg2l = torch.nn.Sequential(
torch.nn.Conv2d(1, 64, 3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv2d(64, 64, 3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d((3, 2)),
torch.nn.Conv2d(64, 128, 3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv2d(128, 128, 3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d((2, 2)),
)
if pos_enc is not None:
self.output = torch.nn.Sequential(
torch.nn.Linear(128 * ((idim // 2) // 2), odim), pos_enc
)
else:
self.output = torch.nn.Linear(128 * ((idim // 2) // 2), odim)
def forward(
self, feats: torch.Tensor, feats_mask: torch.Tensor
) -> Union[
Tuple[torch.Tensor, torch.Tensor],
Tuple[Tuple[torch.Tensor, torch.Tensor], torch.Tensor],
]:
"""Forward VGG2L bottleneck.
Args:
feats: Feature sequences. (B, F, D_feats)
feats_mask: Mask of feature sequences. (B, 1, F)
Returns:
vgg_output: VGG output sequences.
(B, sub(F), D_out) or ((B, sub(F), D_out), (B, sub(F), D_att))
vgg_mask: Mask of VGG output sequences. (B, 1, sub(F))
"""
feats = feats.unsqueeze(1)
vgg_output = self.vgg2l(feats)
b, c, t, f = vgg_output.size()
vgg_output = self.output(
vgg_output.transpose(1, 2).contiguous().view(b, t, c * f)
)
if feats_mask is not None:
vgg_mask = self.create_new_mask(feats_mask)
else:
vgg_mask = feats_mask
return vgg_output, vgg_mask
def create_new_mask(self, feats_mask: torch.Tensor) -> torch.Tensor:
"""Create a subsampled mask of feature sequences.
Args:
feats_mask: Mask of feature sequences. (B, 1, F)
Returns:
vgg_mask: Mask of VGG2L output sequences. (B, 1, sub(F))
"""
vgg1_t_len = feats_mask.size(2) - (feats_mask.size(2) % 3)
vgg_mask = feats_mask[:, :, :vgg1_t_len][:, :, ::3]
vgg2_t_len = vgg_mask.size(2) - (vgg_mask.size(2) % 2)
vgg_mask = vgg_mask[:, :, :vgg2_t_len][:, :, ::2]
return vgg_mask
| 2,782 | 28.924731 | 81 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transducer/utils.py | """Utility functions for Transducer models."""
import os
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from espnet.nets.pytorch_backend.nets_utils import pad_list
from espnet.nets.transducer_decoder_interface import ExtendedHypothesis, Hypothesis
def get_decoder_input(
labels: torch.Tensor, blank_id: int, ignore_id: int
) -> torch.Tensor:
"""Prepare decoder input.
Args:
labels: Label ID sequences. (B, L)
Returns:
decoder_input: Label ID sequences with blank prefix. (B, U)
"""
device = labels.device
labels_unpad = [label[label != ignore_id] for label in labels]
blank = labels[0].new([blank_id])
decoder_input = pad_list(
[torch.cat([blank, label], dim=0) for label in labels_unpad], blank_id
).to(device)
return decoder_input
def valid_aux_encoder_output_layers(
aux_layer_id: List[int],
enc_num_layers: int,
use_symm_kl_div_loss: bool,
subsample: List[int],
) -> List[int]:
"""Check whether provided auxiliary encoder layer IDs are valid.
Return the valid list sorted with duplicates removed.
Args:
aux_layer_id: Auxiliary encoder layer IDs.
enc_num_layers: Number of encoder layers.
use_symm_kl_div_loss: Whether symmetric KL divergence loss is used.
subsample: Subsampling rate per layer.
Returns:
valid: Valid list of auxiliary encoder layers.
"""
if (
not isinstance(aux_layer_id, list)
or not aux_layer_id
or not all(isinstance(layer, int) for layer in aux_layer_id)
):
raise ValueError(
"aux-transducer-loss-enc-output-layers option takes a list of layer IDs."
" Correct argument format is: '[0, 1]'"
)
sorted_list = sorted(aux_layer_id, key=int, reverse=False)
valid = list(filter(lambda x: 0 <= x < enc_num_layers, sorted_list))
if sorted_list != valid:
raise ValueError(
"Provided argument for aux-transducer-loss-enc-output-layers is incorrect."
" IDs should be between [0, %d]" % enc_num_layers
)
if use_symm_kl_div_loss:
sorted_list += [enc_num_layers]
for n in range(1, len(sorted_list)):
sub_range = subsample[(sorted_list[n - 1] + 1) : sorted_list[n] + 1]
valid_shape = [False if n > 1 else True for n in sub_range]
if False in valid_shape:
raise ValueError(
"Encoder layers %d and %d have different shape due to subsampling."
" Symmetric KL divergence loss doesn't cover such case for now."
% (sorted_list[n - 1], sorted_list[n])
)
return valid
def is_prefix(x: List[int], pref: List[int]) -> bool:
"""Check if pref is a prefix of x.
Args:
x: Label ID sequence.
pref: Prefix label ID sequence.
Returns:
: Whether pref is a prefix of x.
"""
if len(pref) >= len(x):
return False
for i in range(len(pref) - 1, -1, -1):
if pref[i] != x[i]:
return False
return True
def subtract(
x: List[ExtendedHypothesis], subset: List[ExtendedHypothesis]
) -> List[ExtendedHypothesis]:
"""Remove elements of subset if corresponding label ID sequence already exist in x.
Args:
x: Set of hypotheses.
subset: Subset of x.
Returns:
final: New set of hypotheses.
"""
final = []
for x_ in x:
if any(x_.yseq == sub.yseq for sub in subset):
continue
final.append(x_)
return final
def select_k_expansions(
hyps: List[ExtendedHypothesis],
topk_idxs: torch.Tensor,
topk_logps: torch.Tensor,
gamma: float,
) -> List[ExtendedHypothesis]:
"""Return K hypotheses candidates for expansion from a list of hypothesis.
K candidates are selected according to the extended hypotheses probabilities
and a prune-by-value method. Where K is equal to beam_size + beta.
Args:
hyps: Hypotheses.
topk_idxs: Indices of candidates hypothesis.
topk_logps: Log-probabilities for hypotheses expansions.
gamma: Allowed logp difference for prune-by-value method.
Return:
k_expansions: Best K expansion hypotheses candidates.
"""
k_expansions = []
for i, hyp in enumerate(hyps):
hyp_i = [
(int(k), hyp.score + float(v)) for k, v in zip(topk_idxs[i], topk_logps[i])
]
k_best_exp = max(hyp_i, key=lambda x: x[1])[1]
k_expansions.append(
sorted(
filter(lambda x: (k_best_exp - gamma) <= x[1], hyp_i),
key=lambda x: x[1],
reverse=True,
)
)
return k_expansions
def select_lm_state(
lm_states: Union[List[Any], Dict[str, Any]],
idx: int,
lm_layers: int,
is_wordlm: bool,
) -> Union[List[Any], Dict[str, Any]]:
"""Get ID state from LM hidden states.
Args:
lm_states: LM hidden states.
idx: LM state ID to extract.
lm_layers: Number of LM layers.
is_wordlm: Whether provided LM is a word-level LM.
Returns:
idx_state: LM hidden state for given ID.
"""
if is_wordlm:
idx_state = lm_states[idx]
else:
idx_state = {}
idx_state["c"] = [lm_states["c"][layer][idx] for layer in range(lm_layers)]
idx_state["h"] = [lm_states["h"][layer][idx] for layer in range(lm_layers)]
return idx_state
def create_lm_batch_states(
lm_states: Union[List[Any], Dict[str, Any]], lm_layers, is_wordlm: bool
) -> Union[List[Any], Dict[str, Any]]:
"""Create LM hidden states.
Args:
lm_states: LM hidden states.
lm_layers: Number of LM layers.
is_wordlm: Whether provided LM is a word-level LM.
Returns:
new_states: LM hidden states.
"""
if is_wordlm:
return lm_states
new_states = {}
new_states["c"] = [
torch.stack([state["c"][layer] for state in lm_states])
for layer in range(lm_layers)
]
new_states["h"] = [
torch.stack([state["h"][layer] for state in lm_states])
for layer in range(lm_layers)
]
return new_states
def init_lm_state(lm_model: torch.nn.Module):
"""Initialize LM hidden states.
Args:
lm_model: LM module.
Returns:
lm_state: Initial LM hidden states.
"""
lm_layers = len(lm_model.rnn)
lm_units_typ = lm_model.typ
lm_units = lm_model.n_units
p = next(lm_model.parameters())
h = [
torch.zeros(lm_units).to(device=p.device, dtype=p.dtype)
for _ in range(lm_layers)
]
lm_state = {"h": h}
if lm_units_typ == "lstm":
lm_state["c"] = [
torch.zeros(lm_units).to(device=p.device, dtype=p.dtype)
for _ in range(lm_layers)
]
return lm_state
def recombine_hyps(hyps: List[Hypothesis]) -> List[Hypothesis]:
"""Recombine hypotheses with same label ID sequence.
Args:
hyps: Hypotheses.
Returns:
final: Recombined hypotheses.
"""
final = []
for hyp in hyps:
seq_final = [f.yseq for f in final if f.yseq]
if hyp.yseq in seq_final:
seq_pos = seq_final.index(hyp.yseq)
final[seq_pos].score = np.logaddexp(final[seq_pos].score, hyp.score)
else:
final.append(hyp)
return final
def pad_sequence(labels: List[int], pad_id: int) -> List[int]:
"""Left pad label ID sequences.
Args:
labels: Label ID sequence.
pad_id: Padding symbol ID.
Returns:
final: Padded label ID sequences.
"""
maxlen = max(len(x) for x in labels)
final = [([pad_id] * (maxlen - len(x))) + x for x in labels]
return final
def check_state(
state: List[Optional[torch.Tensor]], max_len: int, pad_id: int
) -> List[Optional[torch.Tensor]]:
"""Check decoder hidden states and left pad or trim if necessary.
Args:
state: Decoder hidden states. [N x (?, D_dec)]
max_len: maximum sequence length.
pad_id: Padding symbol ID.
Returns:
final: Decoder hidden states. [N x (1, max_len, D_dec)]
"""
if state is None or max_len < 1 or state[0].size(1) == max_len:
return state
curr_len = state[0].size(1)
if curr_len > max_len:
trim_val = int(state[0].size(1) - max_len)
for i, s in enumerate(state):
state[i] = s[:, trim_val:, :]
else:
layers = len(state)
ddim = state[0].size(2)
final_dims = (1, max_len, ddim)
final = [state[0].data.new(*final_dims).fill_(pad_id) for _ in range(layers)]
for i, s in enumerate(state):
final[i][:, (max_len - s.size(1)) : max_len, :] = s
return final
return state
def check_batch_states(states, max_len, pad_id):
"""Check decoder hidden states and left pad or trim if necessary.
Args:
state: Decoder hidden states. [N x (B, ?, D_dec)]
max_len: maximum sequence length.
pad_id: Padding symbol ID.
Returns:
final: Decoder hidden states. [N x (B, max_len, dec_dim)]
"""
final_dims = (len(states), max_len, states[0].size(1))
final = states[0].data.new(*final_dims).fill_(pad_id)
for i, s in enumerate(states):
curr_len = s.size(0)
if curr_len < max_len:
final[i, (max_len - curr_len) : max_len, :] = s
else:
final[i, :, :] = s[(curr_len - max_len) :, :]
return final
def custom_torch_load(model_path: str, model: torch.nn.Module, training: bool = True):
"""Load Transducer model with training-only modules and parameters removed.
Args:
model_path: Model path.
model: Transducer model.
"""
if "snapshot" in os.path.basename(model_path):
model_state_dict = torch.load(
model_path, map_location=lambda storage, loc: storage
)["model"]
else:
model_state_dict = torch.load(
model_path, map_location=lambda storage, loc: storage
)
if not training:
task_keys = ("mlp", "ctc_lin", "kl_div", "lm_lin", "error_calculator")
model_state_dict = {
k: v
for k, v in model_state_dict.items()
if not any(mod in k for mod in task_keys)
}
model.load_state_dict(model_state_dict)
del model_state_dict
| 10,508 | 24.884236 | 87 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transducer/custom_encoder.py | """Cutom encoder definition for transducer models."""
from typing import List, Tuple, Union
import torch
from espnet.nets.pytorch_backend.transducer.blocks import build_blocks
from espnet.nets.pytorch_backend.transducer.vgg2l import VGG2L
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
from espnet.nets.pytorch_backend.transformer.subsampling import Conv2dSubsampling
class CustomEncoder(torch.nn.Module):
"""Custom encoder module for transducer models.
Args:
idim: Input dimension.
enc_arch: Encoder block architecture (type and parameters).
input_layer: Input layer type.
repeat_block: Number of times blocks_arch is repeated.
self_attn_type: Self-attention type.
positional_encoding_type: Positional encoding type.
positionwise_layer_type: Positionwise layer type.
positionwise_activation_type: Positionwise activation type.
conv_mod_activation_type: Convolutional module activation type.
aux_enc_output_layers: Layer IDs for auxiliary encoder output sequences.
input_layer_dropout_rate: Dropout rate for input layer.
input_layer_pos_enc_dropout_rate: Dropout rate for input layer pos. enc.
padding_idx: Padding symbol ID for embedding layer.
"""
def __init__(
self,
idim: int,
enc_arch: List,
input_layer: str = "linear",
repeat_block: int = 1,
self_attn_type: str = "selfattn",
positional_encoding_type: str = "abs_pos",
positionwise_layer_type: str = "linear",
positionwise_activation_type: str = "relu",
conv_mod_activation_type: str = "relu",
aux_enc_output_layers: List = [],
input_layer_dropout_rate: float = 0.0,
input_layer_pos_enc_dropout_rate: float = 0.0,
padding_idx: int = -1,
):
"""Construct an CustomEncoder object."""
super().__init__()
(
self.embed,
self.encoders,
self.enc_out,
self.conv_subsampling_factor,
) = build_blocks(
"encoder",
idim,
input_layer,
enc_arch,
repeat_block=repeat_block,
self_attn_type=self_attn_type,
positional_encoding_type=positional_encoding_type,
positionwise_layer_type=positionwise_layer_type,
positionwise_activation_type=positionwise_activation_type,
conv_mod_activation_type=conv_mod_activation_type,
input_layer_dropout_rate=input_layer_dropout_rate,
input_layer_pos_enc_dropout_rate=input_layer_pos_enc_dropout_rate,
padding_idx=padding_idx,
)
self.after_norm = LayerNorm(self.enc_out)
self.n_blocks = len(enc_arch) * repeat_block
self.aux_enc_output_layers = aux_enc_output_layers
def forward(
self,
feats: torch.Tensor,
mask: torch.Tensor,
) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor], torch.Tensor]]:
"""Encode feature sequences.
Args:
feats: Feature sequences. (B, F, D_feats)
feats_mask: Feature mask sequences. (B, 1, F)
Returns:
enc_out: Encoder output sequences. (B, T, D_enc) with/without
Auxiliary encoder output sequences. (B, T, D_enc_aux)
enc_out_mask: Mask for encoder output sequences. (B, 1, T) with/without
Mask for auxiliary encoder output sequences. (B, T, D_enc_aux)
"""
if isinstance(self.embed, (Conv2dSubsampling, VGG2L)):
enc_out, mask = self.embed(feats, mask)
else:
enc_out = self.embed(feats)
if self.aux_enc_output_layers:
aux_custom_outputs = []
aux_custom_lens = []
for b in range(self.n_blocks):
enc_out, mask = self.encoders[b](enc_out, mask)
if b in self.aux_enc_output_layers:
if isinstance(enc_out, tuple):
aux_custom_output = enc_out[0]
else:
aux_custom_output = enc_out
aux_custom_outputs.append(self.after_norm(aux_custom_output))
aux_custom_lens.append(mask)
else:
enc_out, mask = self.encoders(enc_out, mask)
if isinstance(enc_out, tuple):
enc_out = enc_out[0]
enc_out = self.after_norm(enc_out)
if self.aux_enc_output_layers:
return (enc_out, aux_custom_outputs), (mask, aux_custom_lens)
return enc_out, mask
| 4,661 | 34.861538 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transducer/conv1d_nets.py | """Convolution networks definition for custom archictecture."""
from typing import Optional, Tuple, Union
import torch
class Conv1d(torch.nn.Module):
"""1D convolution module for custom encoder.
Args:
idim: Input dimension.
odim: Output dimension.
kernel_size: Size of the convolving kernel.
stride: Stride of the convolution.
dilation: Spacing between the kernel points.
groups: Number of blocked connections from input channels to output channels.
bias: Whether to add a learnable bias to the output.
batch_norm: Whether to use batch normalization after convolution.
relu: Whether to use a ReLU activation after convolution.
dropout_rate: Dropout rate.
"""
def __init__(
self,
idim: int,
odim: int,
kernel_size: Union[int, Tuple],
stride: Union[int, Tuple] = 1,
dilation: Union[int, Tuple] = 1,
groups: Union[int, Tuple] = 1,
bias: bool = True,
batch_norm: bool = False,
relu: bool = True,
dropout_rate: float = 0.0,
):
"""Construct a Conv1d module object."""
super().__init__()
self.conv = torch.nn.Conv1d(
idim,
odim,
kernel_size,
stride=stride,
dilation=dilation,
groups=groups,
bias=bias,
)
self.dropout = torch.nn.Dropout(p=dropout_rate)
if relu:
self.relu_func = torch.nn.ReLU()
if batch_norm:
self.bn = torch.nn.BatchNorm1d(odim)
self.relu = relu
self.batch_norm = batch_norm
self.padding = dilation * (kernel_size - 1)
self.stride = stride
self.out_pos = torch.nn.Linear(idim, odim)
def forward(
self,
sequence: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]],
mask: torch.Tensor,
) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]], torch.Tensor]:
"""Forward ConvEncoderLayer module object.
Args:
sequence: Input sequences.
(B, T, D_in)
or (B, T, D_in), (B, 2 * (T - 1), D_att)
mask: Mask of input sequences. (B, 1, T)
Returns:
sequence: Output sequences.
(B, sub(T), D_out)
or (B, sub(T), D_out), (B, 2 * (sub(T) - 1), D_att)
mask: Mask of output sequences. (B, 1, sub(T))
"""
if isinstance(sequence, tuple):
sequence, pos_embed = sequence[0], sequence[1]
else:
sequence, pos_embed = sequence, None
sequence = sequence.transpose(1, 2)
sequence = self.conv(sequence)
if self.batch_norm:
sequence = self.bn(sequence)
sequence = self.dropout(sequence)
if self.relu:
sequence = self.relu_func(sequence)
sequence = sequence.transpose(1, 2)
mask = self.create_new_mask(mask)
if pos_embed is not None:
pos_embed = self.create_new_pos_embed(pos_embed)
return (sequence, pos_embed), mask
return sequence, mask
def create_new_mask(self, mask: torch.Tensor) -> torch.Tensor:
"""Create new mask.
Args:
mask: Mask of input sequences. (B, 1, T)
Returns:
mask: Mask of output sequences. (B, 1, sub(T))
"""
if mask is None:
return mask
if self.padding != 0:
mask = mask[:, :, : -self.padding]
mask = mask[:, :, :: self.stride]
return mask
def create_new_pos_embed(self, pos_embed: torch.Tensor) -> torch.Tensor:
"""Create new positional embedding vector.
Args:
pos_embed: Input sequences positional embedding.
(B, 2 * (T - 1), D_att)
Return:
pos_embed: Output sequences positional embedding.
(B, 2 * (sub(T) - 1), D_att)
"""
pos_embed_positive = pos_embed[:, : pos_embed.size(1) // 2 + 1, :]
pos_embed_negative = pos_embed[:, pos_embed.size(1) // 2 :, :]
if self.padding != 0:
pos_embed_positive = pos_embed_positive[:, : -self.padding, :]
pos_embed_negative = pos_embed_negative[:, : -self.padding, :]
pos_embed_positive = pos_embed_positive[:, :: self.stride, :]
pos_embed_negative = pos_embed_negative[:, :: self.stride, :]
pos_embed = torch.cat([pos_embed_positive, pos_embed_negative[:, 1:, :]], dim=1)
return self.out_pos(pos_embed)
class CausalConv1d(torch.nn.Module):
"""1D causal convolution module for custom decoder.
Args:
idim: Input dimension.
odim: Output dimension.
kernel_size: Size of the convolving kernel.
stride: Stride of the convolution.
dilation: Spacing between the kernel points.
groups: Number of blocked connections from input channels to output channels.
bias: Whether to add a learnable bias to the output.
batch_norm: Whether to apply batch normalization.
relu: Whether to pass final output through ReLU activation.
dropout_rate: Dropout rate.
"""
def __init__(
self,
idim: int,
odim: int,
kernel_size: int,
stride: int = 1,
dilation: int = 1,
groups: int = 1,
bias: bool = True,
batch_norm: bool = False,
relu: bool = True,
dropout_rate: float = 0.0,
):
"""Construct a CausalConv1d object."""
super().__init__()
self.padding = (kernel_size - 1) * dilation
self.causal_conv1d = torch.nn.Conv1d(
idim,
odim,
kernel_size=kernel_size,
stride=stride,
padding=self.padding,
dilation=dilation,
groups=groups,
bias=bias,
)
self.dropout = torch.nn.Dropout(p=dropout_rate)
if batch_norm:
self.bn = torch.nn.BatchNorm1d(odim)
if relu:
self.relu_func = torch.nn.ReLU()
self.batch_norm = batch_norm
self.relu = relu
def forward(
self,
sequence: torch.Tensor,
mask: torch.Tensor,
cache: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Forward CausalConv1d for custom decoder.
Args:
sequence: CausalConv1d input sequences. (B, U, D_in)
mask: Mask of CausalConv1d input sequences. (B, 1, U)
Returns:
sequence: CausalConv1d output sequences. (B, sub(U), D_out)
mask: Mask of CausalConv1d output sequences. (B, 1, sub(U))
"""
sequence = sequence.transpose(1, 2)
sequence = self.causal_conv1d(sequence)
if self.padding != 0:
sequence = sequence[:, :, : -self.padding]
if self.batch_norm:
sequence = self.bn(sequence)
sequence = self.dropout(sequence)
if self.relu:
sequence = self.relu_func(sequence)
sequence = sequence.transpose(1, 2)
return sequence, mask
| 7,246 | 27.644269 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transducer/transducer_tasks.py | """Module implementing Transducer main and auxiliary tasks."""
from typing import Any, List, Optional, Tuple
import torch
from espnet.nets.pytorch_backend.nets_utils import pad_list
from espnet.nets.pytorch_backend.transducer.joint_network import JointNetwork
from espnet.nets.pytorch_backend.transformer.label_smoothing_loss import ( # noqa: H301
LabelSmoothingLoss,
)
class TransducerTasks(torch.nn.Module):
"""Transducer tasks module."""
def __init__(
self,
encoder_dim: int,
decoder_dim: int,
joint_dim: int,
output_dim: int,
joint_activation_type: str = "tanh",
transducer_loss_weight: float = 1.0,
ctc_loss: bool = False,
ctc_loss_weight: float = 0.5,
ctc_loss_dropout_rate: float = 0.0,
lm_loss: bool = False,
lm_loss_weight: float = 0.5,
lm_loss_smoothing_rate: float = 0.0,
aux_transducer_loss: bool = False,
aux_transducer_loss_weight: float = 0.2,
aux_transducer_loss_mlp_dim: int = 320,
aux_trans_loss_mlp_dropout_rate: float = 0.0,
symm_kl_div_loss: bool = False,
symm_kl_div_loss_weight: float = 0.2,
fastemit_lambda: float = 0.0,
blank_id: int = 0,
ignore_id: int = -1,
training: bool = False,
):
"""Initialize module for Transducer tasks.
Args:
encoder_dim: Encoder outputs dimension.
decoder_dim: Decoder outputs dimension.
joint_dim: Joint space dimension.
output_dim: Output dimension.
joint_activation_type: Type of activation for joint network.
transducer_loss_weight: Weight for main transducer loss.
ctc_loss: Compute CTC loss.
ctc_loss_weight: Weight of CTC loss.
ctc_loss_dropout_rate: Dropout rate for CTC loss inputs.
lm_loss: Compute LM loss.
lm_loss_weight: Weight of LM loss.
lm_loss_smoothing_rate: Smoothing rate for LM loss' label smoothing.
aux_transducer_loss: Compute auxiliary transducer loss.
aux_transducer_loss_weight: Weight of auxiliary transducer loss.
aux_transducer_loss_mlp_dim: Hidden dimension for aux. transducer MLP.
aux_trans_loss_mlp_dropout_rate: Dropout rate for aux. transducer MLP.
symm_kl_div_loss: Compute KL divergence loss.
symm_kl_div_loss_weight: Weight of KL divergence loss.
fastemit_lambda: Regularization parameter for FastEmit.
blank_id: Blank symbol ID.
ignore_id: Padding symbol ID.
training: Whether the model was initializated in training or inference mode.
"""
super().__init__()
if not training:
ctc_loss, lm_loss, aux_transducer_loss, symm_kl_div_loss = (
False,
False,
False,
False,
)
self.joint_network = JointNetwork(
output_dim, encoder_dim, decoder_dim, joint_dim, joint_activation_type
)
if training:
from warprnnt_pytorch import RNNTLoss
self.transducer_loss = RNNTLoss(
blank=blank_id,
reduction="sum",
fastemit_lambda=fastemit_lambda,
)
if ctc_loss:
self.ctc_lin = torch.nn.Linear(encoder_dim, output_dim)
self.ctc_loss = torch.nn.CTCLoss(
blank=blank_id,
reduction="none",
zero_infinity=True,
)
if aux_transducer_loss:
self.mlp = torch.nn.Sequential(
torch.nn.Linear(encoder_dim, aux_transducer_loss_mlp_dim),
torch.nn.LayerNorm(aux_transducer_loss_mlp_dim),
torch.nn.Dropout(p=aux_trans_loss_mlp_dropout_rate),
torch.nn.ReLU(),
torch.nn.Linear(aux_transducer_loss_mlp_dim, joint_dim),
)
if symm_kl_div_loss:
self.kl_div = torch.nn.KLDivLoss(reduction="sum")
if lm_loss:
self.lm_lin = torch.nn.Linear(decoder_dim, output_dim)
self.label_smoothing_loss = LabelSmoothingLoss(
output_dim, ignore_id, lm_loss_smoothing_rate, normalize_length=False
)
self.output_dim = output_dim
self.transducer_loss_weight = transducer_loss_weight
self.use_ctc_loss = ctc_loss
self.ctc_loss_weight = ctc_loss_weight
self.ctc_dropout_rate = ctc_loss_dropout_rate
self.use_lm_loss = lm_loss
self.lm_loss_weight = lm_loss_weight
self.use_aux_transducer_loss = aux_transducer_loss
self.aux_transducer_loss_weight = aux_transducer_loss_weight
self.use_symm_kl_div_loss = symm_kl_div_loss
self.symm_kl_div_loss_weight = symm_kl_div_loss_weight
self.blank_id = blank_id
self.ignore_id = ignore_id
self.target = None
def compute_transducer_loss(
self,
enc_out: torch.Tensor,
dec_out: torch.tensor,
target: torch.Tensor,
t_len: torch.Tensor,
u_len: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Compute Transducer loss.
Args:
enc_out: Encoder output sequences. (B, T, D_enc)
dec_out: Decoder output sequences. (B, U, D_dec)
target: Target label ID sequences. (B, L)
t_len: Time lengths. (B,)
u_len: Label lengths. (B,)
Returns:
(joint_out, loss_trans):
Joint output sequences. (B, T, U, D_joint),
Transducer loss value.
"""
joint_out = self.joint_network(enc_out.unsqueeze(2), dec_out.unsqueeze(1))
loss_trans = self.transducer_loss(joint_out, target, t_len, u_len)
loss_trans /= joint_out.size(0)
return joint_out, loss_trans
def compute_ctc_loss(
self,
enc_out: torch.Tensor,
target: torch.Tensor,
t_len: torch.Tensor,
u_len: torch.Tensor,
):
"""Compute CTC loss.
Args:
enc_out: Encoder output sequences. (B, T, D_enc)
target: Target character ID sequences. (B, U)
t_len: Time lengths. (B,)
u_len: Label lengths. (B,)
Returns:
: CTC loss value.
"""
ctc_lin = self.ctc_lin(
torch.nn.functional.dropout(
enc_out.to(dtype=torch.float32), p=self.ctc_dropout_rate
)
)
ctc_logp = torch.log_softmax(ctc_lin.transpose(0, 1), dim=-1)
with torch.backends.cudnn.flags(deterministic=True):
loss_ctc = self.ctc_loss(ctc_logp, target, t_len, u_len)
return loss_ctc.mean()
def compute_aux_transducer_and_symm_kl_div_losses(
self,
aux_enc_out: torch.Tensor,
dec_out: torch.Tensor,
joint_out: torch.Tensor,
target: torch.Tensor,
aux_t_len: torch.Tensor,
u_len: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Compute auxiliary Transducer loss and Jensen-Shannon divergence loss.
Args:
aux_enc_out: Encoder auxiliary output sequences. [N x (B, T_aux, D_enc_aux)]
dec_out: Decoder output sequences. (B, U, D_dec)
joint_out: Joint output sequences. (B, T, U, D_joint)
target: Target character ID sequences. (B, L)
aux_t_len: Auxiliary time lengths. [N x (B,)]
u_len: True U lengths. (B,)
Returns:
: Auxiliary Transducer loss and KL divergence loss values.
"""
aux_trans_loss = 0
symm_kl_div_loss = 0
num_aux_layers = len(aux_enc_out)
B, T, U, D = joint_out.shape
for p in self.joint_network.parameters():
p.requires_grad = False
for i, aux_enc_out_i in enumerate(aux_enc_out):
aux_mlp = self.mlp(aux_enc_out_i)
aux_joint_out = self.joint_network(
aux_mlp.unsqueeze(2),
dec_out.unsqueeze(1),
is_aux=True,
)
if self.use_aux_transducer_loss:
aux_trans_loss += (
self.transducer_loss(
aux_joint_out,
target,
aux_t_len[i],
u_len,
)
/ B
)
if self.use_symm_kl_div_loss:
denom = B * T * U
kl_main_aux = (
self.kl_div(
torch.log_softmax(joint_out, dim=-1),
torch.softmax(aux_joint_out, dim=-1),
)
/ denom
)
kl_aux_main = (
self.kl_div(
torch.log_softmax(aux_joint_out, dim=-1),
torch.softmax(joint_out, dim=-1),
)
/ denom
)
symm_kl_div_loss += kl_main_aux + kl_aux_main
for p in self.joint_network.parameters():
p.requires_grad = True
aux_trans_loss /= num_aux_layers
if self.use_symm_kl_div_loss:
symm_kl_div_loss /= num_aux_layers
return aux_trans_loss, symm_kl_div_loss
def compute_lm_loss(
self,
dec_out: torch.Tensor,
target: torch.Tensor,
) -> torch.Tensor:
"""Forward LM loss.
Args:
dec_out: Decoder output sequences. (B, U, D_dec)
target: Target label ID sequences. (B, U)
Returns:
: LM loss value.
"""
lm_lin = self.lm_lin(dec_out)
lm_loss = self.label_smoothing_loss(lm_lin, target)
return lm_loss
def set_target(self, target: torch.Tensor):
"""Set target label ID sequences.
Args:
target: Target label ID sequences. (B, L)
"""
self.target = target
def get_target(self):
"""Set target label ID sequences.
Args:
Returns:
target: Target label ID sequences. (B, L)
"""
return self.target
def get_transducer_tasks_io(
self,
labels: torch.Tensor,
enc_out_len: torch.Tensor,
aux_enc_out_len: Optional[List],
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""Get Transducer tasks inputs and outputs.
Args:
labels: Label ID sequences. (B, U)
enc_out_len: Time lengths. (B,)
aux_enc_out_len: Auxiliary time lengths. [N X (B,)]
Returns:
target: Target label ID sequences. (B, L)
lm_loss_target: LM loss target label ID sequences. (B, U)
t_len: Time lengths. (B,)
aux_t_len: Auxiliary time lengths. [N x (B,)]
u_len: Label lengths. (B,)
"""
device = labels.device
labels_unpad = [label[label != self.ignore_id] for label in labels]
blank = labels[0].new([self.blank_id])
target = pad_list(labels_unpad, self.blank_id).type(torch.int32).to(device)
lm_loss_target = (
pad_list(
[torch.cat([y, blank], dim=0) for y in labels_unpad], self.ignore_id
)
.type(torch.int64)
.to(device)
)
self.set_target(target)
if enc_out_len.dim() > 1:
enc_mask_unpad = [m[m != 0] for m in enc_out_len]
enc_out_len = list(map(int, [m.size(0) for m in enc_mask_unpad]))
else:
enc_out_len = list(map(int, enc_out_len))
t_len = torch.IntTensor(enc_out_len).to(device)
u_len = torch.IntTensor([label.size(0) for label in labels_unpad]).to(device)
if aux_enc_out_len:
aux_t_len = []
for i in range(len(aux_enc_out_len)):
if aux_enc_out_len[i].dim() > 1:
aux_mask_unpad = [aux[aux != 0] for aux in aux_enc_out_len[i]]
aux_t_len.append(
torch.IntTensor(
list(map(int, [aux.size(0) for aux in aux_mask_unpad]))
).to(device)
)
else:
aux_t_len.append(
torch.IntTensor(list(map(int, aux_enc_out_len[i]))).to(device)
)
else:
aux_t_len = aux_enc_out_len
return target, lm_loss_target, t_len, aux_t_len, u_len
def forward(
self,
enc_out: torch.Tensor,
aux_enc_out: List[torch.Tensor],
dec_out: torch.Tensor,
labels: torch.Tensor,
enc_out_len: torch.Tensor,
aux_enc_out_len: torch.Tensor,
) -> Tuple[Tuple[Any], float, float]:
"""Forward main and auxiliary task.
Args:
enc_out: Encoder output sequences. (B, T, D_enc)
aux_enc_out: Encoder intermediate output sequences. (B, T_aux, D_enc_aux)
dec_out: Decoder output sequences. (B, U, D_dec)
target: Target label ID sequences. (B, L)
t_len: Time lengths. (B,)
aux_t_len: Auxiliary time lengths. (B,)
u_len: Label lengths. (B,)
Returns:
: Weighted losses.
(transducer loss, ctc loss, aux Transducer loss, KL div loss, LM loss)
cer: Sentence-level CER score.
wer: Sentence-level WER score.
"""
if self.use_symm_kl_div_loss:
assert self.use_aux_transducer_loss
(trans_loss, ctc_loss, lm_loss, aux_trans_loss, symm_kl_div_loss) = (
0.0,
0.0,
0.0,
0.0,
0.0,
)
target, lm_loss_target, t_len, aux_t_len, u_len = self.get_transducer_tasks_io(
labels,
enc_out_len,
aux_enc_out_len,
)
joint_out, trans_loss = self.compute_transducer_loss(
enc_out, dec_out, target, t_len, u_len
)
if self.use_ctc_loss:
ctc_loss = self.compute_ctc_loss(enc_out, target, t_len, u_len)
if self.use_aux_transducer_loss:
(
aux_trans_loss,
symm_kl_div_loss,
) = self.compute_aux_transducer_and_symm_kl_div_losses(
aux_enc_out,
dec_out,
joint_out,
target,
aux_t_len,
u_len,
)
if self.use_lm_loss:
lm_loss = self.compute_lm_loss(dec_out, lm_loss_target)
return (
self.transducer_loss_weight * trans_loss,
self.ctc_loss_weight * ctc_loss,
self.aux_transducer_loss_weight * aux_trans_loss,
self.symm_kl_div_loss_weight * symm_kl_div_loss,
self.lm_loss_weight * lm_loss,
)
| 15,051 | 31.231263 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transducer/transformer_decoder_layer.py | """Transformer decoder layer definition for custom Transducer model."""
from typing import Optional
import torch
from espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
from espnet.nets.pytorch_backend.transformer.positionwise_feed_forward import (
PositionwiseFeedForward,
)
class TransformerDecoderLayer(torch.nn.Module):
"""Transformer decoder layer module for custom Transducer model.
Args:
hdim: Hidden dimension.
self_attention: Self-attention module.
feed_forward: Feed forward module.
dropout_rate: Dropout rate.
"""
def __init__(
self,
hdim: int,
self_attention: MultiHeadedAttention,
feed_forward: PositionwiseFeedForward,
dropout_rate: float,
):
"""Construct an DecoderLayer object."""
super().__init__()
self.self_attention = self_attention
self.feed_forward = feed_forward
self.norm1 = LayerNorm(hdim)
self.norm2 = LayerNorm(hdim)
self.dropout = torch.nn.Dropout(dropout_rate)
self.hdim = hdim
def forward(
self,
sequence: torch.Tensor,
mask: torch.Tensor,
cache: Optional[torch.Tensor] = None,
):
"""Compute previous decoder output sequences.
Args:
sequence: Transformer input sequences. (B, U, D_dec)
mask: Transformer intput mask sequences. (B, U)
cache: Cached decoder output sequences. (B, (U - 1), D_dec)
Returns:
sequence: Transformer output sequences. (B, U, D_dec)
mask: Transformer output mask sequences. (B, U)
"""
residual = sequence
sequence = self.norm1(sequence)
if cache is None:
sequence_q = sequence
else:
batch = sequence.shape[0]
prev_len = sequence.shape[1] - 1
assert cache.shape == (
batch,
prev_len,
self.hdim,
), f"{cache.shape} == {(batch, prev_len, self.hdim)}"
sequence_q = sequence[:, -1:, :]
residual = residual[:, -1:, :]
if mask is not None:
mask = mask[:, -1:, :]
sequence = residual + self.dropout(
self.self_attention(sequence_q, sequence, sequence, mask)
)
residual = sequence
sequence = self.norm2(sequence)
sequence = residual + self.dropout(self.feed_forward(sequence))
if cache is not None:
sequence = torch.cat([cache, sequence], dim=1)
return sequence, mask
| 2,702 | 26.865979 | 82 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transducer/error_calculator.py | """CER/WER computation for Transducer model."""
from typing import List, Tuple, Union
import torch
from espnet.nets.beam_search_transducer import BeamSearchTransducer
from espnet.nets.pytorch_backend.transducer.custom_decoder import CustomDecoder
from espnet.nets.pytorch_backend.transducer.joint_network import JointNetwork
from espnet.nets.pytorch_backend.transducer.rnn_decoder import RNNDecoder
class ErrorCalculator(object):
"""CER and WER computation for Transducer model.
Args:
decoder: Decoder module.
joint_network: Joint network module.
token_list: Set of unique labels.
sym_space: Space symbol.
sym_blank: Blank symbol.
report_cer: Whether to compute CER.
report_wer: Whether to compute WER.
"""
def __init__(
self,
decoder: Union[RNNDecoder, CustomDecoder],
joint_network: JointNetwork,
token_list: List[int],
sym_space: str,
sym_blank: str,
report_cer: bool = False,
report_wer: bool = False,
):
"""Construct an ErrorCalculator object for Transducer model."""
super().__init__()
self.beam_search = BeamSearchTransducer(
decoder=decoder,
joint_network=joint_network,
beam_size=2,
search_type="default",
)
self.decoder = decoder
self.token_list = token_list
self.space = sym_space
self.blank = sym_blank
self.report_cer = report_cer
self.report_wer = report_wer
def __call__(
self, enc_out: torch.Tensor, target: torch.Tensor
) -> Tuple[float, float]:
"""Calculate sentence-level CER/WER score for hypotheses sequences.
Args:
enc_out: Encoder output sequences. (B, T, D_enc)
target: Target label ID sequences. (B, L)
Returns:
cer: Sentence-level CER score.
wer: Sentence-level WER score.
"""
cer, wer = None, None
batchsize = int(enc_out.size(0))
batch_nbest = []
enc_out = enc_out.to(next(self.decoder.parameters()).device)
for b in range(batchsize):
nbest_hyps = self.beam_search(enc_out[b])
batch_nbest.append(nbest_hyps[-1])
batch_nbest = [nbest_hyp.yseq[1:] for nbest_hyp in batch_nbest]
hyps, refs = self.convert_to_char(batch_nbest, target.cpu())
if self.report_cer:
cer = self.calculate_cer(hyps, refs)
if self.report_wer:
wer = self.calculate_wer(hyps, refs)
return cer, wer
def convert_to_char(
self, hyps: torch.Tensor, refs: torch.Tensor
) -> Tuple[List, List]:
"""Convert label ID sequences to character.
Args:
hyps: Hypotheses sequences. (B, L)
refs: References sequences. (B, L)
Returns:
char_hyps: Character list of hypotheses.
char_hyps: Character list of references.
"""
char_hyps, char_refs = [], []
for i, hyp in enumerate(hyps):
hyp_i = [self.token_list[int(h)] for h in hyp]
ref_i = [self.token_list[int(r)] for r in refs[i]]
char_hyp = "".join(hyp_i).replace(self.space, " ")
char_hyp = char_hyp.replace(self.blank, "")
char_ref = "".join(ref_i).replace(self.space, " ")
char_hyps.append(char_hyp)
char_refs.append(char_ref)
return char_hyps, char_refs
def calculate_cer(self, hyps: torch.Tensor, refs: torch.Tensor) -> float:
"""Calculate sentence-level CER score.
Args:
hyps: Hypotheses sequences. (B, L)
refs: References sequences. (B, L)
Returns:
: Average sentence-level CER score.
"""
import editdistance
distances, lens = [], []
for i, hyp in enumerate(hyps):
char_hyp = hyp.replace(" ", "")
char_ref = refs[i].replace(" ", "")
distances.append(editdistance.eval(char_hyp, char_ref))
lens.append(len(char_ref))
return float(sum(distances)) / sum(lens)
def calculate_wer(self, hyps: torch.Tensor, refs: torch.Tensor) -> float:
"""Calculate sentence-level WER score.
Args:
hyps: Hypotheses sequences. (B, L)
refs: References sequences. (B, L)
Returns:
: Average sentence-level WER score.
"""
import editdistance
distances, lens = [], []
for i, hyp in enumerate(hyps):
word_hyp = hyp.split()
word_ref = refs[i].split()
distances.append(editdistance.eval(word_hyp, word_ref))
lens.append(len(word_ref))
return float(sum(distances)) / sum(lens)
| 4,829 | 27.579882 | 79 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transducer/initializer.py | """Parameter initialization for Transducer model."""
import math
from argparse import Namespace
import torch
from espnet.nets.pytorch_backend.initialization import set_forget_bias_to_one
def initializer(model: torch.nn.Module, args: Namespace):
"""Initialize Transducer model.
Args:
model: Transducer model.
args: Namespace containing model options.
"""
for name, p in model.named_parameters():
if any(x in name for x in ["enc.", "dec.", "transducer_tasks."]):
if p.dim() == 1:
# bias
p.data.zero_()
elif p.dim() == 2:
# linear weight
n = p.size(1)
stdv = 1.0 / math.sqrt(n)
p.data.normal_(0, stdv)
elif p.dim() in (3, 4):
# conv weight
n = p.size(1)
for k in p.size()[2:]:
n *= k
stdv = 1.0 / math.sqrt(n)
p.data.normal_(0, stdv)
if args.dtype != "custom":
model.dec.embed.weight.data.normal_(0, 1)
for i in range(model.dec.dlayers):
set_forget_bias_to_one(getattr(model.dec.decoder[i], "bias_ih_l0"))
set_forget_bias_to_one(getattr(model.dec.decoder[i], "bias_hh_l0"))
| 1,304 | 29.348837 | 79 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transducer/custom_decoder.py | """Custom decoder definition for Transducer model."""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from espnet.nets.pytorch_backend.transducer.blocks import build_blocks
from espnet.nets.pytorch_backend.transducer.utils import (
check_batch_states,
check_state,
pad_sequence,
)
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
from espnet.nets.pytorch_backend.transformer.mask import subsequent_mask
from espnet.nets.transducer_decoder_interface import (
ExtendedHypothesis,
Hypothesis,
TransducerDecoderInterface,
)
class CustomDecoder(TransducerDecoderInterface, torch.nn.Module):
"""Custom decoder module for Transducer model.
Args:
odim: Output dimension.
dec_arch: Decoder block architecture (type and parameters).
input_layer: Input layer type.
repeat_block: Number of times dec_arch is repeated.
joint_activation_type: Type of activation for joint network.
positional_encoding_type: Positional encoding type.
positionwise_layer_type: Positionwise layer type.
positionwise_activation_type: Positionwise activation type.
input_layer_dropout_rate: Dropout rate for input layer.
blank_id: Blank symbol ID.
"""
def __init__(
self,
odim: int,
dec_arch: List,
input_layer: str = "embed",
repeat_block: int = 0,
joint_activation_type: str = "tanh",
positional_encoding_type: str = "abs_pos",
positionwise_layer_type: str = "linear",
positionwise_activation_type: str = "relu",
input_layer_dropout_rate: float = 0.0,
blank_id: int = 0,
):
"""Construct a CustomDecoder object."""
torch.nn.Module.__init__(self)
self.embed, self.decoders, ddim, _ = build_blocks(
"decoder",
odim,
input_layer,
dec_arch,
repeat_block=repeat_block,
positional_encoding_type=positional_encoding_type,
positionwise_layer_type=positionwise_layer_type,
positionwise_activation_type=positionwise_activation_type,
input_layer_dropout_rate=input_layer_dropout_rate,
padding_idx=blank_id,
)
self.after_norm = LayerNorm(ddim)
self.dlayers = len(self.decoders)
self.dunits = ddim
self.odim = odim
self.blank_id = blank_id
def set_device(self, device: torch.device):
"""Set GPU device to use.
Args:
device: Device ID.
"""
self.device = device
def init_state(
self,
batch_size: Optional[int] = None,
) -> List[Optional[torch.Tensor]]:
"""Initialize decoder states.
Args:
batch_size: Batch size.
Returns:
state: Initial decoder hidden states. [N x None]
"""
state = [None] * self.dlayers
return state
def forward(
self, dec_input: torch.Tensor, dec_mask: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Encode label ID sequences.
Args:
dec_input: Label ID sequences. (B, U)
dec_mask: Label mask sequences. (B, U)
Return:
dec_output: Decoder output sequences. (B, U, D_dec)
dec_output_mask: Mask of decoder output sequences. (B, U)
"""
dec_input = self.embed(dec_input)
dec_output, dec_mask = self.decoders(dec_input, dec_mask)
dec_output = self.after_norm(dec_output)
return dec_output, dec_mask
def score(
self, hyp: Hypothesis, cache: Dict[str, Any]
) -> Tuple[torch.Tensor, List[Optional[torch.Tensor]], torch.Tensor]:
"""One-step forward hypothesis.
Args:
hyp: Hypothesis.
cache: Pairs of (dec_out, dec_state) for each label sequence. (key)
Returns:
dec_out: Decoder output sequence. (1, D_dec)
dec_state: Decoder hidden states. [N x (1, U, D_dec)]
lm_label: Label ID for LM. (1,)
"""
labels = torch.tensor([hyp.yseq], device=self.device)
lm_label = labels[:, -1]
str_labels = "_".join(list(map(str, hyp.yseq)))
if str_labels in cache:
dec_out, dec_state = cache[str_labels]
else:
dec_out_mask = subsequent_mask(len(hyp.yseq)).unsqueeze_(0)
new_state = check_state(hyp.dec_state, (labels.size(1) - 1), self.blank_id)
dec_out = self.embed(labels)
dec_state = []
for s, decoder in zip(new_state, self.decoders):
dec_out, dec_out_mask = decoder(dec_out, dec_out_mask, cache=s)
dec_state.append(dec_out)
dec_out = self.after_norm(dec_out[:, -1])
cache[str_labels] = (dec_out, dec_state)
return dec_out[0], dec_state, lm_label
def batch_score(
self,
hyps: Union[List[Hypothesis], List[ExtendedHypothesis]],
dec_states: List[Optional[torch.Tensor]],
cache: Dict[str, Any],
use_lm: bool,
) -> Tuple[torch.Tensor, List[Optional[torch.Tensor]], torch.Tensor]:
"""One-step forward hypotheses.
Args:
hyps: Hypotheses.
dec_states: Decoder hidden states. [N x (B, U, D_dec)]
cache: Pairs of (h_dec, dec_states) for each label sequences. (keys)
use_lm: Whether to compute label ID sequences for LM.
Returns:
dec_out: Decoder output sequences. (B, D_dec)
dec_states: Decoder hidden states. [N x (B, U, D_dec)]
lm_labels: Label ID sequences for LM. (B,)
"""
final_batch = len(hyps)
process = []
done = [None] * final_batch
for i, hyp in enumerate(hyps):
str_labels = "_".join(list(map(str, hyp.yseq)))
if str_labels in cache:
done[i] = cache[str_labels]
else:
process.append((str_labels, hyp.yseq, hyp.dec_state))
if process:
labels = pad_sequence([p[1] for p in process], self.blank_id)
labels = torch.LongTensor(labels, device=self.device)
p_dec_states = self.create_batch_states(
self.init_state(),
[p[2] for p in process],
labels,
)
dec_out = self.embed(labels)
dec_out_mask = (
subsequent_mask(labels.size(-1))
.unsqueeze_(0)
.expand(len(process), -1, -1)
)
new_states = []
for s, decoder in zip(p_dec_states, self.decoders):
dec_out, dec_out_mask = decoder(dec_out, dec_out_mask, cache=s)
new_states.append(dec_out)
dec_out = self.after_norm(dec_out[:, -1])
j = 0
for i in range(final_batch):
if done[i] is None:
state = self.select_state(new_states, j)
done[i] = (dec_out[j], state)
cache[process[j][0]] = (dec_out[j], state)
j += 1
dec_out = torch.stack([d[0] for d in done])
dec_states = self.create_batch_states(
dec_states, [d[1] for d in done], [[0] + h.yseq for h in hyps]
)
if use_lm:
lm_labels = torch.LongTensor(
[hyp.yseq[-1] for hyp in hyps], device=self.device
)
return dec_out, dec_states, lm_labels
return dec_out, dec_states, None
def select_state(
self, states: List[Optional[torch.Tensor]], idx: int
) -> List[Optional[torch.Tensor]]:
"""Get specified ID state from decoder hidden states.
Args:
states: Decoder hidden states. [N x (B, U, D_dec)]
idx: State ID to extract.
Returns:
state_idx: Decoder hidden state for given ID. [N x (1, U, D_dec)]
"""
if states[0] is None:
return states
state_idx = [states[layer][idx] for layer in range(self.dlayers)]
return state_idx
def create_batch_states(
self,
states: List[Optional[torch.Tensor]],
new_states: List[Optional[torch.Tensor]],
check_list: List[List[int]],
) -> List[Optional[torch.Tensor]]:
"""Create decoder hidden states sequences.
Args:
states: Decoder hidden states. [N x (B, U, D_dec)]
new_states: Decoder hidden states. [B x [N x (1, U, D_dec)]]
check_list: Label ID sequences.
Returns:
states: New decoder hidden states. [N x (B, U, D_dec)]
"""
if new_states[0][0] is None:
return states
max_len = max(len(elem) for elem in check_list) - 1
for layer in range(self.dlayers):
states[layer] = check_batch_states(
[s[layer] for s in new_states], max_len, self.blank_id
)
return states
| 9,049 | 29.782313 | 87 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transducer/blocks.py | """Set of methods to create custom architecture."""
from typing import Any, Dict, List, Tuple, Union
import torch
from espnet.nets.pytorch_backend.conformer.convolution import ConvolutionModule
from espnet.nets.pytorch_backend.conformer.encoder_layer import (
EncoderLayer as ConformerEncoderLayer,
)
from espnet.nets.pytorch_backend.nets_utils import get_activation
from espnet.nets.pytorch_backend.transducer.conv1d_nets import CausalConv1d, Conv1d
from espnet.nets.pytorch_backend.transducer.transformer_decoder_layer import (
TransformerDecoderLayer,
)
from espnet.nets.pytorch_backend.transducer.vgg2l import VGG2L
from espnet.nets.pytorch_backend.transformer.attention import (
MultiHeadedAttention,
RelPositionMultiHeadedAttention,
)
from espnet.nets.pytorch_backend.transformer.embedding import (
PositionalEncoding,
RelPositionalEncoding,
ScaledPositionalEncoding,
)
from espnet.nets.pytorch_backend.transformer.encoder_layer import EncoderLayer
from espnet.nets.pytorch_backend.transformer.positionwise_feed_forward import (
PositionwiseFeedForward,
)
from espnet.nets.pytorch_backend.transformer.repeat import MultiSequential
from espnet.nets.pytorch_backend.transformer.subsampling import Conv2dSubsampling
def verify_block_arguments(
net_part: str,
block: Dict[str, Any],
num_block: int,
) -> Tuple[int, int]:
"""Verify block arguments are valid.
Args:
net_part: Network part, either 'encoder' or 'decoder'.
block: Block parameters.
num_block: Block ID.
Return:
block_io: Input and output dimension of the block.
"""
block_type = block.get("type")
if block_type is None:
raise ValueError(
"Block %d in %s doesn't a type assigned.", (num_block, net_part)
)
if block_type == "transformer":
arguments = {"d_hidden", "d_ff", "heads"}
elif block_type == "conformer":
arguments = {
"d_hidden",
"d_ff",
"heads",
"macaron_style",
"use_conv_mod",
}
if net_part == "decoder":
raise ValueError("Decoder does not support 'conformer'.")
if block.get("use_conv_mod", None) is True and "conv_mod_kernel" not in block:
raise ValueError(
"Block %d: 'use_conv_mod' is True but "
" 'conv_mod_kernel' is not specified" % num_block
)
elif block_type == "causal-conv1d":
arguments = {"idim", "odim", "kernel_size"}
if net_part == "encoder":
raise ValueError("Encoder does not support 'causal-conv1d'.")
elif block_type == "conv1d":
arguments = {"idim", "odim", "kernel_size"}
if net_part == "decoder":
raise ValueError("Decoder does not support 'conv1d.'")
else:
raise NotImplementedError(
"Wrong type. Currently supported: "
"causal-conv1d, conformer, conv-nd or transformer."
)
if not arguments.issubset(block):
raise ValueError(
"%s in %s in position %d: Expected block arguments : %s."
" See tutorial page for more information."
% (block_type, net_part, num_block, arguments)
)
if block_type in ("transformer", "conformer"):
block_io = (block["d_hidden"], block["d_hidden"])
else:
block_io = (block["idim"], block["odim"])
return block_io
def prepare_input_layer(
input_layer_type: str,
feats_dim: int,
blocks: List[Dict[str, Any]],
dropout_rate: float,
pos_enc_dropout_rate: float,
) -> Dict[str, Any]:
"""Prepare input layer arguments.
Args:
input_layer_type: Input layer type.
feats_dim: Dimension of input features.
blocks: Blocks parameters for network part.
dropout_rate: Dropout rate for input layer.
pos_enc_dropout_rate: Dropout rate for input layer pos. enc.
Return:
input_block: Input block parameters.
"""
input_block = {}
first_block_type = blocks[0].get("type", None)
if first_block_type == "causal-conv1d":
input_block["type"] = "c-embed"
else:
input_block["type"] = input_layer_type
input_block["dropout-rate"] = dropout_rate
input_block["pos-dropout-rate"] = pos_enc_dropout_rate
input_block["idim"] = feats_dim
if first_block_type in ("transformer", "conformer"):
input_block["odim"] = blocks[0].get("d_hidden", 0)
else:
input_block["odim"] = blocks[0].get("idim", 0)
return input_block
def prepare_body_model(
net_part: str,
blocks: List[Dict[str, Any]],
) -> Tuple[int]:
"""Prepare model body blocks.
Args:
net_part: Network part, either 'encoder' or 'decoder'.
blocks: Blocks parameters for network part.
Return:
: Network output dimension.
"""
cmp_io = [
verify_block_arguments(net_part, b, (i + 1)) for i, b in enumerate(blocks)
]
if {"transformer", "conformer"} <= {b["type"] for b in blocks}:
raise NotImplementedError(
net_part + ": transformer and conformer blocks "
"can't be used together in the same net part."
)
for i in range(1, len(cmp_io)):
if cmp_io[(i - 1)][1] != cmp_io[i][0]:
raise ValueError(
"Output/Input mismatch between blocks %d and %d in %s"
% (i, (i + 1), net_part)
)
return cmp_io[-1][1]
def get_pos_enc_and_att_class(
net_part: str, pos_enc_type: str, self_attn_type: str
) -> Tuple[
Union[PositionalEncoding, ScaledPositionalEncoding, RelPositionalEncoding],
Union[MultiHeadedAttention, RelPositionMultiHeadedAttention],
]:
"""Get positional encoding and self attention module class.
Args:
net_part: Network part, either 'encoder' or 'decoder'.
pos_enc_type: Positional encoding type.
self_attn_type: Self-attention type.
Return:
pos_enc_class: Positional encoding class.
self_attn_class: Self-attention class.
"""
if pos_enc_type == "abs_pos":
pos_enc_class = PositionalEncoding
elif pos_enc_type == "scaled_abs_pos":
pos_enc_class = ScaledPositionalEncoding
elif pos_enc_type == "rel_pos":
if net_part == "encoder" and self_attn_type != "rel_self_attn":
raise ValueError("'rel_pos' is only compatible with 'rel_self_attn'")
pos_enc_class = RelPositionalEncoding
else:
raise NotImplementedError(
"pos_enc_type should be either 'abs_pos', 'scaled_abs_pos' or 'rel_pos'"
)
if self_attn_type == "rel_self_attn":
self_attn_class = RelPositionMultiHeadedAttention
else:
self_attn_class = MultiHeadedAttention
return pos_enc_class, self_attn_class
def build_input_layer(
block: Dict[str, Any],
pos_enc_class: torch.nn.Module,
padding_idx: int,
) -> Tuple[Union[Conv2dSubsampling, VGG2L, torch.nn.Sequential], int]:
"""Build input layer.
Args:
block: Architecture definition of input layer.
pos_enc_class: Positional encoding class.
padding_idx: Padding symbol ID for embedding layer (if provided).
Returns:
: Input layer module.
subsampling_factor: Subsampling factor.
"""
input_type = block["type"]
idim = block["idim"]
odim = block["odim"]
dropout_rate = block["dropout-rate"]
pos_dropout_rate = block["pos-dropout-rate"]
if pos_enc_class.__name__ == "RelPositionalEncoding":
pos_enc_class_subsampling = pos_enc_class(odim, pos_dropout_rate)
else:
pos_enc_class_subsampling = None
if input_type == "linear":
return (
torch.nn.Sequential(
torch.nn.Linear(idim, odim),
torch.nn.LayerNorm(odim),
torch.nn.Dropout(dropout_rate),
torch.nn.ReLU(),
pos_enc_class(odim, pos_dropout_rate),
),
1,
)
elif input_type == "conv2d":
return Conv2dSubsampling(idim, odim, dropout_rate, pos_enc_class_subsampling), 4
elif input_type == "vgg2l":
return VGG2L(idim, odim, pos_enc_class_subsampling), 4
elif input_type == "embed":
return (
torch.nn.Sequential(
torch.nn.Embedding(idim, odim, padding_idx=padding_idx),
pos_enc_class(odim, pos_dropout_rate),
),
1,
)
elif input_type == "c-embed":
return (
torch.nn.Sequential(
torch.nn.Embedding(idim, odim, padding_idx=padding_idx),
torch.nn.Dropout(dropout_rate),
),
1,
)
else:
raise NotImplementedError(
"Invalid input layer: %s. Supported: linear, conv2d, vgg2l and embed"
% input_type
)
def build_transformer_block(
net_part: str,
block: Dict[str, Any],
pw_layer_type: str,
pw_activation_type: str,
) -> Union[EncoderLayer, TransformerDecoderLayer]:
"""Build function for transformer block.
Args:
net_part: Network part, either 'encoder' or 'decoder'.
block: Transformer block parameters.
pw_layer_type: Positionwise layer type.
pw_activation_type: Positionwise activation type.
Returns:
: Function to create transformer (encoder or decoder) block.
"""
d_hidden = block["d_hidden"]
dropout_rate = block.get("dropout-rate", 0.0)
pos_dropout_rate = block.get("pos-dropout-rate", 0.0)
att_dropout_rate = block.get("att-dropout-rate", 0.0)
if pw_layer_type != "linear":
raise NotImplementedError(
"Transformer block only supports linear pointwise layer."
)
if net_part == "encoder":
transformer_layer_class = EncoderLayer
elif net_part == "decoder":
transformer_layer_class = TransformerDecoderLayer
return lambda: transformer_layer_class(
d_hidden,
MultiHeadedAttention(block["heads"], d_hidden, att_dropout_rate),
PositionwiseFeedForward(
d_hidden,
block["d_ff"],
pos_dropout_rate,
get_activation(pw_activation_type),
),
dropout_rate,
)
def build_conformer_block(
block: Dict[str, Any],
self_attn_class: str,
pw_layer_type: str,
pw_activation_type: str,
conv_mod_activation_type: str,
) -> ConformerEncoderLayer:
"""Build function for conformer block.
Args:
block: Conformer block parameters.
self_attn_type: Self-attention module type.
pw_layer_type: Positionwise layer type.
pw_activation_type: Positionwise activation type.
conv_mod_activation_type: Convolutional module activation type.
Returns:
: Function to create conformer (encoder) block.
"""
d_hidden = block["d_hidden"]
d_ff = block["d_ff"]
dropout_rate = block.get("dropout-rate", 0.0)
pos_dropout_rate = block.get("pos-dropout-rate", 0.0)
att_dropout_rate = block.get("att-dropout-rate", 0.0)
macaron_style = block["macaron_style"]
use_conv_mod = block["use_conv_mod"]
if pw_layer_type == "linear":
pw_layer = PositionwiseFeedForward
pw_layer_args = (
d_hidden,
d_ff,
pos_dropout_rate,
get_activation(pw_activation_type),
)
else:
raise NotImplementedError("Conformer block only supports linear yet.")
if macaron_style:
macaron_net = PositionwiseFeedForward
macaron_net_args = (
d_hidden,
d_ff,
pos_dropout_rate,
get_activation(pw_activation_type),
)
if use_conv_mod:
conv_mod = ConvolutionModule
conv_mod_args = (
d_hidden,
block["conv_mod_kernel"],
get_activation(conv_mod_activation_type),
)
return lambda: ConformerEncoderLayer(
d_hidden,
self_attn_class(block["heads"], d_hidden, att_dropout_rate),
pw_layer(*pw_layer_args),
macaron_net(*macaron_net_args) if macaron_style else None,
conv_mod(*conv_mod_args) if use_conv_mod else None,
dropout_rate,
)
def build_conv1d_block(block: Dict[str, Any], block_type: str) -> CausalConv1d:
"""Build function for causal conv1d block.
Args:
block: CausalConv1d or Conv1D block parameters.
Returns:
: Function to create conv1d (encoder) or causal conv1d (decoder) block.
"""
if block_type == "conv1d":
conv_class = Conv1d
else:
conv_class = CausalConv1d
stride = block.get("stride", 1)
dilation = block.get("dilation", 1)
groups = block.get("groups", 1)
bias = block.get("bias", True)
use_batch_norm = block.get("use-batch-norm", False)
use_relu = block.get("use-relu", False)
dropout_rate = block.get("dropout-rate", 0.0)
return lambda: conv_class(
block["idim"],
block["odim"],
block["kernel_size"],
stride=stride,
dilation=dilation,
groups=groups,
bias=bias,
relu=use_relu,
batch_norm=use_batch_norm,
dropout_rate=dropout_rate,
)
def build_blocks(
net_part: str,
idim: int,
input_layer_type: str,
blocks: List[Dict[str, Any]],
repeat_block: int = 0,
self_attn_type: str = "self_attn",
positional_encoding_type: str = "abs_pos",
positionwise_layer_type: str = "linear",
positionwise_activation_type: str = "relu",
conv_mod_activation_type: str = "relu",
input_layer_dropout_rate: float = 0.0,
input_layer_pos_enc_dropout_rate: float = 0.0,
padding_idx: int = -1,
) -> Tuple[
Union[Conv2dSubsampling, VGG2L, torch.nn.Sequential], MultiSequential, int, int
]:
"""Build custom model blocks.
Args:
net_part: Network part, either 'encoder' or 'decoder'.
idim: Input dimension.
input_layer: Input layer type.
blocks: Blocks parameters for network part.
repeat_block: Number of times provided blocks are repeated.
positional_encoding_type: Positional encoding layer type.
positionwise_layer_type: Positionwise layer type.
positionwise_activation_type: Positionwise activation type.
conv_mod_activation_type: Convolutional module activation type.
input_layer_dropout_rate: Dropout rate for input layer.
input_layer_pos_enc_dropout_rate: Dropout rate for input layer pos. enc.
padding_idx: Padding symbol ID for embedding layer.
Returns:
in_layer: Input layer
all_blocks: Encoder/Decoder network.
out_dim: Network output dimension.
conv_subsampling_factor: Subsampling factor in frontend CNN.
"""
fn_modules = []
pos_enc_class, self_attn_class = get_pos_enc_and_att_class(
net_part, positional_encoding_type, self_attn_type
)
input_block = prepare_input_layer(
input_layer_type,
idim,
blocks,
input_layer_dropout_rate,
input_layer_pos_enc_dropout_rate,
)
out_dim = prepare_body_model(net_part, blocks)
input_layer, conv_subsampling_factor = build_input_layer(
input_block,
pos_enc_class,
padding_idx,
)
for i in range(len(blocks)):
block_type = blocks[i]["type"]
if block_type in ("causal-conv1d", "conv1d"):
module = build_conv1d_block(blocks[i], block_type)
elif block_type == "conformer":
module = build_conformer_block(
blocks[i],
self_attn_class,
positionwise_layer_type,
positionwise_activation_type,
conv_mod_activation_type,
)
elif block_type == "transformer":
module = build_transformer_block(
net_part,
blocks[i],
positionwise_layer_type,
positionwise_activation_type,
)
fn_modules.append(module)
if repeat_block > 1:
fn_modules = fn_modules * repeat_block
return (
input_layer,
MultiSequential(*[fn() for fn in fn_modules]),
out_dim,
conv_subsampling_factor,
)
| 16,356 | 29.459963 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/tacotron2/encoder.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Tacotron2 encoder related modules."""
import torch
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
def encoder_init(m):
"""Initialize encoder parameters."""
if isinstance(m, torch.nn.Conv1d):
torch.nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("relu"))
class Encoder(torch.nn.Module):
"""Encoder module of Spectrogram prediction network.
This is a module of encoder of Spectrogram prediction network in Tacotron2,
which described in `Natural TTS Synthesis by Conditioning WaveNet on Mel
Spectrogram Predictions`_. This is the encoder which converts either a sequence
of characters or acoustic features into the sequence of hidden states.
.. _`Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`:
https://arxiv.org/abs/1712.05884
"""
def __init__(
self,
idim,
input_layer="embed",
embed_dim=512,
elayers=1,
eunits=512,
econv_layers=3,
econv_chans=512,
econv_filts=5,
use_batch_norm=True,
use_residual=False,
dropout_rate=0.5,
padding_idx=0,
):
"""Initialize Tacotron2 encoder module.
Args:
idim (int) Dimension of the inputs.
input_layer (str): Input layer type.
embed_dim (int, optional) Dimension of character embedding.
elayers (int, optional) The number of encoder blstm layers.
eunits (int, optional) The number of encoder blstm units.
econv_layers (int, optional) The number of encoder conv layers.
econv_filts (int, optional) The number of encoder conv filter size.
econv_chans (int, optional) The number of encoder conv filter channels.
use_batch_norm (bool, optional) Whether to use batch normalization.
use_residual (bool, optional) Whether to use residual connection.
dropout_rate (float, optional) Dropout rate.
"""
super(Encoder, self).__init__()
# store the hyperparameters
self.idim = idim
self.use_residual = use_residual
# define network layer modules
if input_layer == "linear":
self.embed = torch.nn.Linear(idim, econv_chans)
elif input_layer == "embed":
self.embed = torch.nn.Embedding(idim, embed_dim, padding_idx=padding_idx)
else:
raise ValueError("unknown input_layer: " + input_layer)
if econv_layers > 0:
self.convs = torch.nn.ModuleList()
for layer in range(econv_layers):
ichans = (
embed_dim if layer == 0 and input_layer == "embed" else econv_chans
)
if use_batch_norm:
self.convs += [
torch.nn.Sequential(
torch.nn.Conv1d(
ichans,
econv_chans,
econv_filts,
stride=1,
padding=(econv_filts - 1) // 2,
bias=False,
),
torch.nn.BatchNorm1d(econv_chans),
torch.nn.ReLU(),
torch.nn.Dropout(dropout_rate),
)
]
else:
self.convs += [
torch.nn.Sequential(
torch.nn.Conv1d(
ichans,
econv_chans,
econv_filts,
stride=1,
padding=(econv_filts - 1) // 2,
bias=False,
),
torch.nn.ReLU(),
torch.nn.Dropout(dropout_rate),
)
]
else:
self.convs = None
if elayers > 0:
iunits = econv_chans if econv_layers != 0 else embed_dim
self.blstm = torch.nn.LSTM(
iunits, eunits // 2, elayers, batch_first=True, bidirectional=True
)
else:
self.blstm = None
# initialize
self.apply(encoder_init)
def forward(self, xs, ilens=None):
"""Calculate forward propagation.
Args:
xs (Tensor): Batch of the padded sequence. Either character ids (B, Tmax)
or acoustic feature (B, Tmax, idim * encoder_reduction_factor). Padded
value should be 0.
ilens (LongTensor): Batch of lengths of each input batch (B,).
Returns:
Tensor: Batch of the sequences of encoder states(B, Tmax, eunits).
LongTensor: Batch of lengths of each sequence (B,)
"""
xs = self.embed(xs).transpose(1, 2)
if self.convs is not None:
for i in range(len(self.convs)):
if self.use_residual:
xs = xs + self.convs[i](xs)
else:
xs = self.convs[i](xs)
if self.blstm is None:
return xs.transpose(1, 2)
if not isinstance(ilens, torch.Tensor):
ilens = torch.tensor(ilens)
xs = pack_padded_sequence(xs.transpose(1, 2), ilens.cpu(), batch_first=True)
self.blstm.flatten_parameters()
xs, _ = self.blstm(xs) # (B, Tmax, C)
xs, hlens = pad_packed_sequence(xs, batch_first=True)
return xs, hlens
def inference(self, x):
"""Inference.
Args:
x (Tensor): The sequeunce of character ids (T,)
or acoustic feature (T, idim * encoder_reduction_factor).
Returns:
Tensor: The sequences of encoder states(T, eunits).
"""
xs = x.unsqueeze(0)
ilens = torch.tensor([x.size(0)])
return self.forward(xs, ilens)[0][0]
| 6,261 | 35.196532 | 87 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/tacotron2/decoder.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Tacotron2 decoder related modules."""
import torch
import torch.nn.functional as F
from espnet.nets.pytorch_backend.rnn.attentions import AttForwardTA
def decoder_init(m):
"""Initialize decoder parameters."""
if isinstance(m, torch.nn.Conv1d):
torch.nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("tanh"))
class ZoneOutCell(torch.nn.Module):
"""ZoneOut Cell module.
This is a module of zoneout described in
`Zoneout: Regularizing RNNs by Randomly Preserving Hidden Activations`_.
This code is modified from `eladhoffer/seq2seq.pytorch`_.
Examples:
>>> lstm = torch.nn.LSTMCell(16, 32)
>>> lstm = ZoneOutCell(lstm, 0.5)
.. _`Zoneout: Regularizing RNNs by Randomly Preserving Hidden Activations`:
https://arxiv.org/abs/1606.01305
.. _`eladhoffer/seq2seq.pytorch`:
https://github.com/eladhoffer/seq2seq.pytorch
"""
def __init__(self, cell, zoneout_rate=0.1):
"""Initialize zone out cell module.
Args:
cell (torch.nn.Module): Pytorch recurrent cell module
e.g. `torch.nn.Module.LSTMCell`.
zoneout_rate (float, optional): Probability of zoneout from 0.0 to 1.0.
"""
super(ZoneOutCell, self).__init__()
self.cell = cell
self.hidden_size = cell.hidden_size
self.zoneout_rate = zoneout_rate
if zoneout_rate > 1.0 or zoneout_rate < 0.0:
raise ValueError(
"zoneout probability must be in the range from 0.0 to 1.0."
)
def forward(self, inputs, hidden):
"""Calculate forward propagation.
Args:
inputs (Tensor): Batch of input tensor (B, input_size).
hidden (tuple):
- Tensor: Batch of initial hidden states (B, hidden_size).
- Tensor: Batch of initial cell states (B, hidden_size).
Returns:
tuple:
- Tensor: Batch of next hidden states (B, hidden_size).
- Tensor: Batch of next cell states (B, hidden_size).
"""
next_hidden = self.cell(inputs, hidden)
next_hidden = self._zoneout(hidden, next_hidden, self.zoneout_rate)
return next_hidden
def _zoneout(self, h, next_h, prob):
# apply recursively
if isinstance(h, tuple):
num_h = len(h)
if not isinstance(prob, tuple):
prob = tuple([prob] * num_h)
return tuple(
[self._zoneout(h[i], next_h[i], prob[i]) for i in range(num_h)]
)
if self.training:
mask = h.new(*h.size()).bernoulli_(prob)
return mask * h + (1 - mask) * next_h
else:
return prob * h + (1 - prob) * next_h
class Prenet(torch.nn.Module):
"""Prenet module for decoder of Spectrogram prediction network.
This is a module of Prenet in the decoder of Spectrogram prediction network,
which described in `Natural TTS
Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`_.
The Prenet preforms nonlinear conversion
of inputs before input to auto-regressive lstm,
which helps to learn diagonal attentions.
Note:
This module alway applies dropout even in evaluation.
See the detail in `Natural TTS Synthesis by
Conditioning WaveNet on Mel Spectrogram Predictions`_.
.. _`Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`:
https://arxiv.org/abs/1712.05884
"""
def __init__(self, idim, n_layers=2, n_units=256, dropout_rate=0.5):
"""Initialize prenet module.
Args:
idim (int): Dimension of the inputs.
odim (int): Dimension of the outputs.
n_layers (int, optional): The number of prenet layers.
n_units (int, optional): The number of prenet units.
"""
super(Prenet, self).__init__()
self.dropout_rate = dropout_rate
self.prenet = torch.nn.ModuleList()
for layer in range(n_layers):
n_inputs = idim if layer == 0 else n_units
self.prenet += [
torch.nn.Sequential(torch.nn.Linear(n_inputs, n_units), torch.nn.ReLU())
]
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Batch of input tensors (B, ..., idim).
Returns:
Tensor: Batch of output tensors (B, ..., odim).
"""
for i in range(len(self.prenet)):
# we make this part non deterministic. See the above note.
x = F.dropout(self.prenet[i](x), self.dropout_rate)
return x
class Postnet(torch.nn.Module):
"""Postnet module for Spectrogram prediction network.
This is a module of Postnet in Spectrogram prediction network,
which described in `Natural TTS Synthesis by
Conditioning WaveNet on Mel Spectrogram Predictions`_.
The Postnet predicts refines the predicted
Mel-filterbank of the decoder,
which helps to compensate the detail structure of spectrogram.
.. _`Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`:
https://arxiv.org/abs/1712.05884
"""
def __init__(
self,
idim,
odim,
n_layers=5,
n_chans=512,
n_filts=5,
dropout_rate=0.5,
use_batch_norm=True,
):
"""Initialize postnet module.
Args:
idim (int): Dimension of the inputs.
odim (int): Dimension of the outputs.
n_layers (int, optional): The number of layers.
n_filts (int, optional): The number of filter size.
n_units (int, optional): The number of filter channels.
use_batch_norm (bool, optional): Whether to use batch normalization..
dropout_rate (float, optional): Dropout rate..
"""
super(Postnet, self).__init__()
self.postnet = torch.nn.ModuleList()
for layer in range(n_layers - 1):
ichans = odim if layer == 0 else n_chans
ochans = odim if layer == n_layers - 1 else n_chans
if use_batch_norm:
self.postnet += [
torch.nn.Sequential(
torch.nn.Conv1d(
ichans,
ochans,
n_filts,
stride=1,
padding=(n_filts - 1) // 2,
bias=False,
),
torch.nn.BatchNorm1d(ochans),
torch.nn.Tanh(),
torch.nn.Dropout(dropout_rate),
)
]
else:
self.postnet += [
torch.nn.Sequential(
torch.nn.Conv1d(
ichans,
ochans,
n_filts,
stride=1,
padding=(n_filts - 1) // 2,
bias=False,
),
torch.nn.Tanh(),
torch.nn.Dropout(dropout_rate),
)
]
ichans = n_chans if n_layers != 1 else odim
if use_batch_norm:
self.postnet += [
torch.nn.Sequential(
torch.nn.Conv1d(
ichans,
odim,
n_filts,
stride=1,
padding=(n_filts - 1) // 2,
bias=False,
),
torch.nn.BatchNorm1d(odim),
torch.nn.Dropout(dropout_rate),
)
]
else:
self.postnet += [
torch.nn.Sequential(
torch.nn.Conv1d(
ichans,
odim,
n_filts,
stride=1,
padding=(n_filts - 1) // 2,
bias=False,
),
torch.nn.Dropout(dropout_rate),
)
]
def forward(self, xs):
"""Calculate forward propagation.
Args:
xs (Tensor): Batch of the sequences of padded input tensors (B, idim, Tmax).
Returns:
Tensor: Batch of padded output tensor. (B, odim, Tmax).
"""
for i in range(len(self.postnet)):
xs = self.postnet[i](xs)
return xs
class Decoder(torch.nn.Module):
"""Decoder module of Spectrogram prediction network.
This is a module of decoder of Spectrogram prediction network in Tacotron2,
which described in `Natural TTS
Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`_.
The decoder generates the sequence of
features from the sequence of the hidden states.
.. _`Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`:
https://arxiv.org/abs/1712.05884
"""
def __init__(
self,
idim,
odim,
att,
dlayers=2,
dunits=1024,
prenet_layers=2,
prenet_units=256,
postnet_layers=5,
postnet_chans=512,
postnet_filts=5,
output_activation_fn=None,
cumulate_att_w=True,
use_batch_norm=True,
use_concate=True,
dropout_rate=0.5,
zoneout_rate=0.1,
reduction_factor=1,
):
"""Initialize Tacotron2 decoder module.
Args:
idim (int): Dimension of the inputs.
odim (int): Dimension of the outputs.
att (torch.nn.Module): Instance of attention class.
dlayers (int, optional): The number of decoder lstm layers.
dunits (int, optional): The number of decoder lstm units.
prenet_layers (int, optional): The number of prenet layers.
prenet_units (int, optional): The number of prenet units.
postnet_layers (int, optional): The number of postnet layers.
postnet_filts (int, optional): The number of postnet filter size.
postnet_chans (int, optional): The number of postnet filter channels.
output_activation_fn (torch.nn.Module, optional):
Activation function for outputs.
cumulate_att_w (bool, optional):
Whether to cumulate previous attention weight.
use_batch_norm (bool, optional): Whether to use batch normalization.
use_concate (bool, optional): Whether to concatenate encoder embedding
with decoder lstm outputs.
dropout_rate (float, optional): Dropout rate.
zoneout_rate (float, optional): Zoneout rate.
reduction_factor (int, optional): Reduction factor.
"""
super(Decoder, self).__init__()
# store the hyperparameters
self.idim = idim
self.odim = odim
self.att = att
self.output_activation_fn = output_activation_fn
self.cumulate_att_w = cumulate_att_w
self.use_concate = use_concate
self.reduction_factor = reduction_factor
# check attention type
if isinstance(self.att, AttForwardTA):
self.use_att_extra_inputs = True
else:
self.use_att_extra_inputs = False
# define lstm network
prenet_units = prenet_units if prenet_layers != 0 else odim
self.lstm = torch.nn.ModuleList()
for layer in range(dlayers):
iunits = idim + prenet_units if layer == 0 else dunits
lstm = torch.nn.LSTMCell(iunits, dunits)
if zoneout_rate > 0.0:
lstm = ZoneOutCell(lstm, zoneout_rate)
self.lstm += [lstm]
# define prenet
if prenet_layers > 0:
self.prenet = Prenet(
idim=odim,
n_layers=prenet_layers,
n_units=prenet_units,
dropout_rate=dropout_rate,
)
else:
self.prenet = None
# define postnet
if postnet_layers > 0:
self.postnet = Postnet(
idim=idim,
odim=odim,
n_layers=postnet_layers,
n_chans=postnet_chans,
n_filts=postnet_filts,
use_batch_norm=use_batch_norm,
dropout_rate=dropout_rate,
)
else:
self.postnet = None
# define projection layers
iunits = idim + dunits if use_concate else dunits
self.feat_out = torch.nn.Linear(iunits, odim * reduction_factor, bias=False)
self.prob_out = torch.nn.Linear(iunits, reduction_factor)
# initialize
self.apply(decoder_init)
def _zero_state(self, hs):
init_hs = hs.new_zeros(hs.size(0), self.lstm[0].hidden_size)
return init_hs
def forward(self, hs, hlens, ys):
"""Calculate forward propagation.
Args:
hs (Tensor): Batch of the sequences of padded hidden states (B, Tmax, idim).
hlens (LongTensor): Batch of lengths of each input batch (B,).
ys (Tensor):
Batch of the sequences of padded target features (B, Lmax, odim).
Returns:
Tensor: Batch of output tensors after postnet (B, Lmax, odim).
Tensor: Batch of output tensors before postnet (B, Lmax, odim).
Tensor: Batch of logits of stop prediction (B, Lmax).
Tensor: Batch of attention weights (B, Lmax, Tmax).
Note:
This computation is performed in teacher-forcing manner.
"""
# thin out frames (B, Lmax, odim) -> (B, Lmax/r, odim)
if self.reduction_factor > 1:
ys = ys[:, self.reduction_factor - 1 :: self.reduction_factor]
# length list should be list of int
hlens = list(map(int, hlens))
# initialize hidden states of decoder
c_list = [self._zero_state(hs)]
z_list = [self._zero_state(hs)]
for _ in range(1, len(self.lstm)):
c_list += [self._zero_state(hs)]
z_list += [self._zero_state(hs)]
prev_out = hs.new_zeros(hs.size(0), self.odim)
# initialize attention
prev_att_w = None
self.att.reset()
# loop for an output sequence
outs, logits, att_ws = [], [], []
for y in ys.transpose(0, 1):
if self.use_att_extra_inputs:
att_c, att_w = self.att(hs, hlens, z_list[0], prev_att_w, prev_out)
else:
att_c, att_w = self.att(hs, hlens, z_list[0], prev_att_w)
prenet_out = self.prenet(prev_out) if self.prenet is not None else prev_out
xs = torch.cat([att_c, prenet_out], dim=1)
z_list[0], c_list[0] = self.lstm[0](xs, (z_list[0], c_list[0]))
for i in range(1, len(self.lstm)):
z_list[i], c_list[i] = self.lstm[i](
z_list[i - 1], (z_list[i], c_list[i])
)
zcs = (
torch.cat([z_list[-1], att_c], dim=1)
if self.use_concate
else z_list[-1]
)
outs += [self.feat_out(zcs).view(hs.size(0), self.odim, -1)]
logits += [self.prob_out(zcs)]
att_ws += [att_w]
prev_out = y # teacher forcing
if self.cumulate_att_w and prev_att_w is not None:
prev_att_w = prev_att_w + att_w # Note: error when use +=
else:
prev_att_w = att_w
logits = torch.cat(logits, dim=1) # (B, Lmax)
before_outs = torch.cat(outs, dim=2) # (B, odim, Lmax)
att_ws = torch.stack(att_ws, dim=1) # (B, Lmax, Tmax)
if self.reduction_factor > 1:
before_outs = before_outs.view(
before_outs.size(0), self.odim, -1
) # (B, odim, Lmax)
if self.postnet is not None:
after_outs = before_outs + self.postnet(before_outs) # (B, odim, Lmax)
else:
after_outs = before_outs
before_outs = before_outs.transpose(2, 1) # (B, Lmax, odim)
after_outs = after_outs.transpose(2, 1) # (B, Lmax, odim)
logits = logits
# apply activation function for scaling
if self.output_activation_fn is not None:
before_outs = self.output_activation_fn(before_outs)
after_outs = self.output_activation_fn(after_outs)
return after_outs, before_outs, logits, att_ws
def inference(
self,
h,
threshold=0.5,
minlenratio=0.0,
maxlenratio=10.0,
use_att_constraint=False,
backward_window=None,
forward_window=None,
):
"""Generate the sequence of features given the sequences of characters.
Args:
h (Tensor): Input sequence of encoder hidden states (T, C).
threshold (float, optional): Threshold to stop generation.
minlenratio (float, optional): Minimum length ratio.
If set to 1.0 and the length of input is 10,
the minimum length of outputs will be 10 * 1 = 10.
minlenratio (float, optional): Minimum length ratio.
If set to 10 and the length of input is 10,
the maximum length of outputs will be 10 * 10 = 100.
use_att_constraint (bool):
Whether to apply attention constraint introduced in `Deep Voice 3`_.
backward_window (int): Backward window size in attention constraint.
forward_window (int): Forward window size in attention constraint.
Returns:
Tensor: Output sequence of features (L, odim).
Tensor: Output sequence of stop probabilities (L,).
Tensor: Attention weights (L, T).
Note:
This computation is performed in auto-regressive manner.
.. _`Deep Voice 3`: https://arxiv.org/abs/1710.07654
"""
# setup
assert len(h.size()) == 2
hs = h.unsqueeze(0)
ilens = [h.size(0)]
maxlen = int(h.size(0) * maxlenratio)
minlen = int(h.size(0) * minlenratio)
# initialize hidden states of decoder
c_list = [self._zero_state(hs)]
z_list = [self._zero_state(hs)]
for _ in range(1, len(self.lstm)):
c_list += [self._zero_state(hs)]
z_list += [self._zero_state(hs)]
prev_out = hs.new_zeros(1, self.odim)
# initialize attention
prev_att_w = None
self.att.reset()
# setup for attention constraint
if use_att_constraint:
last_attended_idx = 0
else:
last_attended_idx = None
# loop for an output sequence
idx = 0
outs, att_ws, probs = [], [], []
while True:
# updated index
idx += self.reduction_factor
# decoder calculation
if self.use_att_extra_inputs:
att_c, att_w = self.att(
hs,
ilens,
z_list[0],
prev_att_w,
prev_out,
last_attended_idx=last_attended_idx,
backward_window=backward_window,
forward_window=forward_window,
)
else:
att_c, att_w = self.att(
hs,
ilens,
z_list[0],
prev_att_w,
last_attended_idx=last_attended_idx,
backward_window=backward_window,
forward_window=forward_window,
)
att_ws += [att_w]
prenet_out = self.prenet(prev_out) if self.prenet is not None else prev_out
xs = torch.cat([att_c, prenet_out], dim=1)
z_list[0], c_list[0] = self.lstm[0](xs, (z_list[0], c_list[0]))
for i in range(1, len(self.lstm)):
z_list[i], c_list[i] = self.lstm[i](
z_list[i - 1], (z_list[i], c_list[i])
)
zcs = (
torch.cat([z_list[-1], att_c], dim=1)
if self.use_concate
else z_list[-1]
)
outs += [self.feat_out(zcs).view(1, self.odim, -1)] # [(1, odim, r), ...]
probs += [torch.sigmoid(self.prob_out(zcs))[0]] # [(r), ...]
if self.output_activation_fn is not None:
prev_out = self.output_activation_fn(outs[-1][:, :, -1]) # (1, odim)
else:
prev_out = outs[-1][:, :, -1] # (1, odim)
if self.cumulate_att_w and prev_att_w is not None:
prev_att_w = prev_att_w + att_w # Note: error when use +=
else:
prev_att_w = att_w
if use_att_constraint:
last_attended_idx = int(att_w.argmax())
# check whether to finish generation
if int(sum(probs[-1] >= threshold)) > 0 or idx >= maxlen:
# check mininum length
if idx < minlen:
continue
outs = torch.cat(outs, dim=2) # (1, odim, L)
if self.postnet is not None:
outs = outs + self.postnet(outs) # (1, odim, L)
outs = outs.transpose(2, 1).squeeze(0) # (L, odim)
probs = torch.cat(probs, dim=0)
att_ws = torch.cat(att_ws, dim=0)
break
if self.output_activation_fn is not None:
outs = self.output_activation_fn(outs)
return outs, probs, att_ws
def calculate_all_attentions(self, hs, hlens, ys):
"""Calculate all of the attention weights.
Args:
hs (Tensor): Batch of the sequences of padded hidden states (B, Tmax, idim).
hlens (LongTensor): Batch of lengths of each input batch (B,).
ys (Tensor):
Batch of the sequences of padded target features (B, Lmax, odim).
Returns:
numpy.ndarray: Batch of attention weights (B, Lmax, Tmax).
Note:
This computation is performed in teacher-forcing manner.
"""
# thin out frames (B, Lmax, odim) -> (B, Lmax/r, odim)
if self.reduction_factor > 1:
ys = ys[:, self.reduction_factor - 1 :: self.reduction_factor]
# length list should be list of int
hlens = list(map(int, hlens))
# initialize hidden states of decoder
c_list = [self._zero_state(hs)]
z_list = [self._zero_state(hs)]
for _ in range(1, len(self.lstm)):
c_list += [self._zero_state(hs)]
z_list += [self._zero_state(hs)]
prev_out = hs.new_zeros(hs.size(0), self.odim)
# initialize attention
prev_att_w = None
self.att.reset()
# loop for an output sequence
att_ws = []
for y in ys.transpose(0, 1):
if self.use_att_extra_inputs:
att_c, att_w = self.att(hs, hlens, z_list[0], prev_att_w, prev_out)
else:
att_c, att_w = self.att(hs, hlens, z_list[0], prev_att_w)
att_ws += [att_w]
prenet_out = self.prenet(prev_out) if self.prenet is not None else prev_out
xs = torch.cat([att_c, prenet_out], dim=1)
z_list[0], c_list[0] = self.lstm[0](xs, (z_list[0], c_list[0]))
for i in range(1, len(self.lstm)):
z_list[i], c_list[i] = self.lstm[i](
z_list[i - 1], (z_list[i], c_list[i])
)
prev_out = y # teacher forcing
if self.cumulate_att_w and prev_att_w is not None:
prev_att_w = prev_att_w + att_w # Note: error when use +=
else:
prev_att_w = att_w
att_ws = torch.stack(att_ws, dim=1) # (B, Lmax, Tmax)
return att_ws
| 24,375 | 35.059172 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/tacotron2/cbhg.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""CBHG related modules."""
import torch
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask
class CBHGLoss(torch.nn.Module):
"""Loss function module for CBHG."""
def __init__(self, use_masking=True):
"""Initialize CBHG loss module.
Args:
use_masking (bool): Whether to mask padded part in loss calculation.
"""
super(CBHGLoss, self).__init__()
self.use_masking = use_masking
def forward(self, cbhg_outs, spcs, olens):
"""Calculate forward propagation.
Args:
cbhg_outs (Tensor): Batch of CBHG outputs (B, Lmax, spc_dim).
spcs (Tensor): Batch of groundtruth of spectrogram (B, Lmax, spc_dim).
olens (LongTensor): Batch of the lengths of each sequence (B,).
Returns:
Tensor: L1 loss value
Tensor: Mean square error loss value.
"""
# perform masking for padded values
if self.use_masking:
mask = make_non_pad_mask(olens).unsqueeze(-1).to(spcs.device)
spcs = spcs.masked_select(mask)
cbhg_outs = cbhg_outs.masked_select(mask)
# calculate loss
cbhg_l1_loss = F.l1_loss(cbhg_outs, spcs)
cbhg_mse_loss = F.mse_loss(cbhg_outs, spcs)
return cbhg_l1_loss, cbhg_mse_loss
class CBHG(torch.nn.Module):
"""CBHG module to convert log Mel-filterbanks to linear spectrogram.
This is a module of CBHG introduced
in `Tacotron: Towards End-to-End Speech Synthesis`_.
The CBHG converts the sequence of log Mel-filterbanks into linear spectrogram.
.. _`Tacotron: Towards End-to-End Speech Synthesis`:
https://arxiv.org/abs/1703.10135
"""
def __init__(
self,
idim,
odim,
conv_bank_layers=8,
conv_bank_chans=128,
conv_proj_filts=3,
conv_proj_chans=256,
highway_layers=4,
highway_units=128,
gru_units=256,
):
"""Initialize CBHG module.
Args:
idim (int): Dimension of the inputs.
odim (int): Dimension of the outputs.
conv_bank_layers (int, optional): The number of convolution bank layers.
conv_bank_chans (int, optional): The number of channels in convolution bank.
conv_proj_filts (int, optional):
Kernel size of convolutional projection layer.
conv_proj_chans (int, optional):
The number of channels in convolutional projection layer.
highway_layers (int, optional): The number of highway network layers.
highway_units (int, optional): The number of highway network units.
gru_units (int, optional): The number of GRU units (for both directions).
"""
super(CBHG, self).__init__()
self.idim = idim
self.odim = odim
self.conv_bank_layers = conv_bank_layers
self.conv_bank_chans = conv_bank_chans
self.conv_proj_filts = conv_proj_filts
self.conv_proj_chans = conv_proj_chans
self.highway_layers = highway_layers
self.highway_units = highway_units
self.gru_units = gru_units
# define 1d convolution bank
self.conv_bank = torch.nn.ModuleList()
for k in range(1, self.conv_bank_layers + 1):
if k % 2 != 0:
padding = (k - 1) // 2
else:
padding = ((k - 1) // 2, (k - 1) // 2 + 1)
self.conv_bank += [
torch.nn.Sequential(
torch.nn.ConstantPad1d(padding, 0.0),
torch.nn.Conv1d(
idim, self.conv_bank_chans, k, stride=1, padding=0, bias=True
),
torch.nn.BatchNorm1d(self.conv_bank_chans),
torch.nn.ReLU(),
)
]
# define max pooling (need padding for one-side to keep same length)
self.max_pool = torch.nn.Sequential(
torch.nn.ConstantPad1d((0, 1), 0.0), torch.nn.MaxPool1d(2, stride=1)
)
# define 1d convolution projection
self.projections = torch.nn.Sequential(
torch.nn.Conv1d(
self.conv_bank_chans * self.conv_bank_layers,
self.conv_proj_chans,
self.conv_proj_filts,
stride=1,
padding=(self.conv_proj_filts - 1) // 2,
bias=True,
),
torch.nn.BatchNorm1d(self.conv_proj_chans),
torch.nn.ReLU(),
torch.nn.Conv1d(
self.conv_proj_chans,
self.idim,
self.conv_proj_filts,
stride=1,
padding=(self.conv_proj_filts - 1) // 2,
bias=True,
),
torch.nn.BatchNorm1d(self.idim),
)
# define highway network
self.highways = torch.nn.ModuleList()
self.highways += [torch.nn.Linear(idim, self.highway_units)]
for _ in range(self.highway_layers):
self.highways += [HighwayNet(self.highway_units)]
# define bidirectional GRU
self.gru = torch.nn.GRU(
self.highway_units,
gru_units // 2,
num_layers=1,
batch_first=True,
bidirectional=True,
)
# define final projection
self.output = torch.nn.Linear(gru_units, odim, bias=True)
def forward(self, xs, ilens):
"""Calculate forward propagation.
Args:
xs (Tensor): Batch of the padded sequences of inputs (B, Tmax, idim).
ilens (LongTensor): Batch of lengths of each input sequence (B,).
Return:
Tensor: Batch of the padded sequence of outputs (B, Tmax, odim).
LongTensor: Batch of lengths of each output sequence (B,).
"""
xs = xs.transpose(1, 2) # (B, idim, Tmax)
convs = []
for k in range(self.conv_bank_layers):
convs += [self.conv_bank[k](xs)]
convs = torch.cat(convs, dim=1) # (B, #CH * #BANK, Tmax)
convs = self.max_pool(convs)
convs = self.projections(convs).transpose(1, 2) # (B, Tmax, idim)
xs = xs.transpose(1, 2) + convs
# + 1 for dimension adjustment layer
for i in range(self.highway_layers + 1):
xs = self.highways[i](xs)
# sort by length
xs, ilens, sort_idx = self._sort_by_length(xs, ilens)
# total_length needs for DataParallel
# (see https://github.com/pytorch/pytorch/pull/6327)
total_length = xs.size(1)
if not isinstance(ilens, torch.Tensor):
ilens = torch.tensor(ilens)
xs = pack_padded_sequence(xs, ilens.cpu(), batch_first=True)
self.gru.flatten_parameters()
xs, _ = self.gru(xs)
xs, ilens = pad_packed_sequence(xs, batch_first=True, total_length=total_length)
# revert sorting by length
xs, ilens = self._revert_sort_by_length(xs, ilens, sort_idx)
xs = self.output(xs) # (B, Tmax, odim)
return xs, ilens
def inference(self, x):
"""Inference.
Args:
x (Tensor): The sequences of inputs (T, idim).
Return:
Tensor: The sequence of outputs (T, odim).
"""
assert len(x.size()) == 2
xs = x.unsqueeze(0)
ilens = x.new([x.size(0)]).long()
return self.forward(xs, ilens)[0][0]
def _sort_by_length(self, xs, ilens):
sort_ilens, sort_idx = ilens.sort(0, descending=True)
return xs[sort_idx], ilens[sort_idx], sort_idx
def _revert_sort_by_length(self, xs, ilens, sort_idx):
_, revert_idx = sort_idx.sort(0)
return xs[revert_idx], ilens[revert_idx]
class HighwayNet(torch.nn.Module):
"""Highway Network module.
This is a module of Highway Network introduced in `Highway Networks`_.
.. _`Highway Networks`: https://arxiv.org/abs/1505.00387
"""
def __init__(self, idim):
"""Initialize Highway Network module.
Args:
idim (int): Dimension of the inputs.
"""
super(HighwayNet, self).__init__()
self.idim = idim
self.projection = torch.nn.Sequential(
torch.nn.Linear(idim, idim), torch.nn.ReLU()
)
self.gate = torch.nn.Sequential(torch.nn.Linear(idim, idim), torch.nn.Sigmoid())
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Batch of inputs (B, ..., idim).
Returns:
Tensor: Batch of outputs, which are the same shape as inputs (B, ..., idim).
"""
proj = self.projection(x)
gate = self.gate(x)
return proj * gate + x * (1.0 - gate)
| 9,068 | 31.978182 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/fastspeech/duration_calculator.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Duration calculator related modules."""
import torch
from espnet.nets.pytorch_backend.e2e_tts_tacotron2 import Tacotron2
from espnet.nets.pytorch_backend.e2e_tts_transformer import Transformer
from espnet.nets.pytorch_backend.nets_utils import pad_list
class DurationCalculator(torch.nn.Module):
"""Duration calculator module for FastSpeech.
Todo:
* Fix the duplicated calculation of diagonal head decision
"""
def __init__(self, teacher_model):
"""Initialize duration calculator module.
Args:
teacher_model (e2e_tts_transformer.Transformer):
Pretrained auto-regressive Transformer.
"""
super(DurationCalculator, self).__init__()
if isinstance(teacher_model, Transformer):
self.register_buffer("diag_head_idx", torch.tensor(-1))
elif isinstance(teacher_model, Tacotron2):
pass
else:
raise ValueError(
"teacher model should be the instance of "
"e2e_tts_transformer.Transformer or e2e_tts_tacotron2.Tacotron2."
)
self.teacher_model = teacher_model
def forward(self, xs, ilens, ys, olens, spembs=None):
"""Calculate forward propagation.
Args:
xs (Tensor): Batch of the padded sequences of character ids (B, Tmax).
ilens (Tensor): Batch of lengths of each input sequence (B,).
ys (Tensor):
Batch of the padded sequence of target features (B, Lmax, odim).
olens (Tensor): Batch of lengths of each output sequence (B,).
spembs (Tensor, optional):
Batch of speaker embedding vectors (B, spk_embed_dim).
Returns:
Tensor: Batch of durations (B, Tmax).
"""
if isinstance(self.teacher_model, Transformer):
att_ws = self._calculate_encoder_decoder_attentions(
xs, ilens, ys, olens, spembs=spembs
)
# TODO(kan-bayashi): fix this issue
# this does not work in multi-gpu case. registered buffer is not saved.
if int(self.diag_head_idx) == -1:
self._init_diagonal_head(att_ws)
att_ws = att_ws[:, self.diag_head_idx]
else:
# NOTE(kan-bayashi): Here we assume that the teacher is tacotron 2
att_ws = self.teacher_model.calculate_all_attentions(
xs, ilens, ys, spembs=spembs, keep_tensor=True
)
durations = [
self._calculate_duration(att_w, ilen, olen)
for att_w, ilen, olen in zip(att_ws, ilens, olens)
]
return pad_list(durations, 0)
@staticmethod
def _calculate_duration(att_w, ilen, olen):
return torch.stack(
[att_w[:olen, :ilen].argmax(-1).eq(i).sum() for i in range(ilen)]
)
def _init_diagonal_head(self, att_ws):
diagonal_scores = att_ws.max(dim=-1)[0].mean(dim=-1).mean(dim=0) # (H * L,)
self.register_buffer("diag_head_idx", diagonal_scores.argmax())
def _calculate_encoder_decoder_attentions(self, xs, ilens, ys, olens, spembs=None):
att_dict = self.teacher_model.calculate_all_attentions(
xs, ilens, ys, olens, spembs=spembs, skip_output=True, keep_tensor=True
)
return torch.cat(
[att_dict[k] for k in att_dict.keys() if "src_attn" in k], dim=1
) # (B, H*L, Lmax, Tmax)
| 3,600 | 35.744898 | 87 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/fastspeech/duration_predictor.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Duration predictor related modules."""
import torch
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
class DurationPredictor(torch.nn.Module):
"""Duration predictor module.
This is a module of duration predictor described
in `FastSpeech: Fast, Robust and Controllable Text to Speech`_.
The duration predictor predicts a duration of each frame in log domain
from the hidden embeddings of encoder.
.. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:
https://arxiv.org/pdf/1905.09263.pdf
Note:
The calculation domain of outputs is different
between in `forward` and in `inference`. In `forward`,
the outputs are calculated in log domain but in `inference`,
those are calculated in linear domain.
"""
def __init__(
self, idim, n_layers=2, n_chans=384, kernel_size=3, dropout_rate=0.1, offset=1.0
):
"""Initilize duration predictor module.
Args:
idim (int): Input dimension.
n_layers (int, optional): Number of convolutional layers.
n_chans (int, optional): Number of channels of convolutional layers.
kernel_size (int, optional): Kernel size of convolutional layers.
dropout_rate (float, optional): Dropout rate.
offset (float, optional): Offset value to avoid nan in log domain.
"""
super(DurationPredictor, self).__init__()
self.offset = offset
self.conv = torch.nn.ModuleList()
for idx in range(n_layers):
in_chans = idim if idx == 0 else n_chans
self.conv += [
torch.nn.Sequential(
torch.nn.Conv1d(
in_chans,
n_chans,
kernel_size,
stride=1,
padding=(kernel_size - 1) // 2,
),
torch.nn.ReLU(),
LayerNorm(n_chans, dim=1),
torch.nn.Dropout(dropout_rate),
)
]
self.linear = torch.nn.Linear(n_chans, 1)
def _forward(self, xs, x_masks=None, is_inference=False):
xs = xs.transpose(1, -1) # (B, idim, Tmax)
for f in self.conv:
xs = f(xs) # (B, C, Tmax)
# NOTE: calculate in log domain
xs = self.linear(xs.transpose(1, -1)).squeeze(-1) # (B, Tmax)
if is_inference:
# NOTE: calculate in linear domain
xs = torch.clamp(
torch.round(xs.exp() - self.offset), min=0
).long() # avoid negative value
if x_masks is not None:
xs = xs.masked_fill(x_masks, 0.0)
return xs
def forward(self, xs, x_masks=None):
"""Calculate forward propagation.
Args:
xs (Tensor): Batch of input sequences (B, Tmax, idim).
x_masks (ByteTensor, optional):
Batch of masks indicating padded part (B, Tmax).
Returns:
Tensor: Batch of predicted durations in log domain (B, Tmax).
"""
return self._forward(xs, x_masks, False)
def inference(self, xs, x_masks=None):
"""Inference duration.
Args:
xs (Tensor): Batch of input sequences (B, Tmax, idim).
x_masks (ByteTensor, optional):
Batch of masks indicating padded part (B, Tmax).
Returns:
LongTensor: Batch of predicted durations in linear domain (B, Tmax).
"""
return self._forward(xs, x_masks, True)
class DurationPredictorLoss(torch.nn.Module):
"""Loss function module for duration predictor.
The loss value is Calculated in log domain to make it Gaussian.
"""
def __init__(self, offset=1.0, reduction="mean"):
"""Initilize duration predictor loss module.
Args:
offset (float, optional): Offset value to avoid nan in log domain.
reduction (str): Reduction type in loss calculation.
"""
super(DurationPredictorLoss, self).__init__()
self.criterion = torch.nn.MSELoss(reduction=reduction)
self.offset = offset
def forward(self, outputs, targets):
"""Calculate forward propagation.
Args:
outputs (Tensor): Batch of prediction durations in log domain (B, T)
targets (LongTensor): Batch of groundtruth durations in linear domain (B, T)
Returns:
Tensor: Mean squared error loss value.
Note:
`outputs` is in log domain but `targets` is in linear domain.
"""
# NOTE: outputs is in log domain while targets in linear
targets = torch.log(targets.float() + self.offset)
loss = self.criterion(outputs, targets)
return loss
| 4,992 | 31.422078 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/fastspeech/length_regulator.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Length regulator related modules."""
import logging
import torch
from espnet.nets.pytorch_backend.nets_utils import pad_list
class LengthRegulator(torch.nn.Module):
"""Length regulator module for feed-forward Transformer.
This is a module of length regulator described in
`FastSpeech: Fast, Robust and Controllable Text to Speech`_.
The length regulator expands char or
phoneme-level embedding features to frame-level by repeating each
feature based on the corresponding predicted durations.
.. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:
https://arxiv.org/pdf/1905.09263.pdf
"""
def __init__(self, pad_value=0.0):
"""Initilize length regulator module.
Args:
pad_value (float, optional): Value used for padding.
"""
super().__init__()
self.pad_value = pad_value
def forward(self, xs, ds, alpha=1.0):
"""Calculate forward propagation.
Args:
xs (Tensor): Batch of sequences of char or phoneme embeddings (B, Tmax, D).
ds (LongTensor): Batch of durations of each frame (B, T).
alpha (float, optional): Alpha value to control speed of speech.
Returns:
Tensor: replicated input tensor based on durations (B, T*, D).
"""
if alpha != 1.0:
assert alpha > 0
ds = torch.round(ds.float() * alpha).long()
if ds.sum() == 0:
logging.warning(
"predicted durations includes all 0 sequences. "
"fill the first element with 1."
)
# NOTE(kan-bayashi): This case must not be happened in teacher forcing.
# It will be happened in inference with a bad duration predictor.
# So we do not need to care the padded sequence case here.
ds[ds.sum(dim=1).eq(0)] = 1
repeat = [torch.repeat_interleave(x, d, dim=0) for x, d in zip(xs, ds)]
return pad_list(repeat, self.pad_value)
| 2,171 | 30.941176 | 87 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/rnn/decoders.py | """RNN decoder module."""
import logging
import math
import random
from argparse import Namespace
import numpy as np
import torch
import torch.nn.functional as F
from espnet.nets.ctc_prefix_score import CTCPrefixScore, CTCPrefixScoreTH
from espnet.nets.e2e_asr_common import end_detect
from espnet.nets.pytorch_backend.nets_utils import (
mask_by_length,
pad_list,
th_accuracy,
to_device,
)
from espnet.nets.pytorch_backend.rnn.attentions import att_to_numpy
from espnet.nets.scorer_interface import ScorerInterface
MAX_DECODER_OUTPUT = 5
CTC_SCORING_RATIO = 1.5
class Decoder(torch.nn.Module, ScorerInterface):
"""Decoder module
:param int eprojs: encoder projection units
:param int odim: dimension of outputs
:param str dtype: gru or lstm
:param int dlayers: decoder layers
:param int dunits: decoder units
:param int sos: start of sequence symbol id
:param int eos: end of sequence symbol id
:param torch.nn.Module att: attention module
:param int verbose: verbose level
:param list char_list: list of character strings
:param ndarray labeldist: distribution of label smoothing
:param float lsm_weight: label smoothing weight
:param float sampling_probability: scheduled sampling probability
:param float dropout: dropout rate
:param float context_residual: if True, use context vector for token generation
:param float replace_sos: use for multilingual (speech/text) translation
"""
def __init__(
self,
eprojs,
odim,
dtype,
dlayers,
dunits,
sos,
eos,
att,
verbose=0,
char_list=None,
labeldist=None,
lsm_weight=0.0,
sampling_probability=0.0,
dropout=0.0,
context_residual=False,
replace_sos=False,
num_encs=1,
):
torch.nn.Module.__init__(self)
self.dtype = dtype
self.dunits = dunits
self.dlayers = dlayers
self.context_residual = context_residual
self.embed = torch.nn.Embedding(odim, dunits)
self.dropout_emb = torch.nn.Dropout(p=dropout)
self.decoder = torch.nn.ModuleList()
self.dropout_dec = torch.nn.ModuleList()
self.decoder += [
torch.nn.LSTMCell(dunits + eprojs, dunits)
if self.dtype == "lstm"
else torch.nn.GRUCell(dunits + eprojs, dunits)
]
self.dropout_dec += [torch.nn.Dropout(p=dropout)]
for _ in range(1, self.dlayers):
self.decoder += [
torch.nn.LSTMCell(dunits, dunits)
if self.dtype == "lstm"
else torch.nn.GRUCell(dunits, dunits)
]
self.dropout_dec += [torch.nn.Dropout(p=dropout)]
# NOTE: dropout is applied only for the vertical connections
# see https://arxiv.org/pdf/1409.2329.pdf
self.ignore_id = -1
if context_residual:
self.output = torch.nn.Linear(dunits + eprojs, odim)
else:
self.output = torch.nn.Linear(dunits, odim)
self.loss = None
self.att = att
self.dunits = dunits
self.sos = sos
self.eos = eos
self.odim = odim
self.verbose = verbose
self.char_list = char_list
# for label smoothing
self.labeldist = labeldist
self.vlabeldist = None
self.lsm_weight = lsm_weight
self.sampling_probability = sampling_probability
self.dropout = dropout
self.num_encs = num_encs
# for multilingual E2E-ST
self.replace_sos = replace_sos
self.logzero = -10000000000.0
def zero_state(self, hs_pad):
return hs_pad.new_zeros(hs_pad.size(0), self.dunits)
def rnn_forward(self, ey, z_list, c_list, z_prev, c_prev):
if self.dtype == "lstm":
z_list[0], c_list[0] = self.decoder[0](ey, (z_prev[0], c_prev[0]))
for i in range(1, self.dlayers):
z_list[i], c_list[i] = self.decoder[i](
self.dropout_dec[i - 1](z_list[i - 1]), (z_prev[i], c_prev[i])
)
else:
z_list[0] = self.decoder[0](ey, z_prev[0])
for i in range(1, self.dlayers):
z_list[i] = self.decoder[i](
self.dropout_dec[i - 1](z_list[i - 1]), z_prev[i]
)
return z_list, c_list
def forward(self, hs_pad, hlens, ys_pad, strm_idx=0, lang_ids=None):
"""Decoder forward
:param torch.Tensor hs_pad: batch of padded hidden state sequences (B, Tmax, D)
[in multi-encoder case,
list of torch.Tensor,
[(B, Tmax_1, D), (B, Tmax_2, D), ..., ] ]
:param torch.Tensor hlens: batch of lengths of hidden state sequences (B)
[in multi-encoder case, list of torch.Tensor,
[(B), (B), ..., ]
:param torch.Tensor ys_pad: batch of padded character id sequence tensor
(B, Lmax)
:param int strm_idx: stream index indicates the index of decoding stream.
:param torch.Tensor lang_ids: batch of target language id tensor (B, 1)
:return: attention loss value
:rtype: torch.Tensor
:return: accuracy
:rtype: float
"""
# to support mutiple encoder asr mode, in single encoder mode,
# convert torch.Tensor to List of torch.Tensor
if self.num_encs == 1:
hs_pad = [hs_pad]
hlens = [hlens]
# TODO(kan-bayashi): need to make more smart way
ys = [y[y != self.ignore_id] for y in ys_pad] # parse padded ys
# attention index for the attention module
# in SPA (speaker parallel attention),
# att_idx is used to select attention module. In other cases, it is 0.
att_idx = min(strm_idx, len(self.att) - 1)
# hlens should be list of list of integer
hlens = [list(map(int, hlens[idx])) for idx in range(self.num_encs)]
self.loss = None
# prepare input and output word sequences with sos/eos IDs
eos = ys[0].new([self.eos])
sos = ys[0].new([self.sos])
if self.replace_sos:
ys_in = [torch.cat([idx, y], dim=0) for idx, y in zip(lang_ids, ys)]
else:
ys_in = [torch.cat([sos, y], dim=0) for y in ys]
ys_out = [torch.cat([y, eos], dim=0) for y in ys]
# padding for ys with -1
# pys: utt x olen
ys_in_pad = pad_list(ys_in, self.eos)
ys_out_pad = pad_list(ys_out, self.ignore_id)
# get dim, length info
batch = ys_out_pad.size(0)
olength = ys_out_pad.size(1)
for idx in range(self.num_encs):
logging.info(
self.__class__.__name__
+ "Number of Encoder:{}; enc{}: input lengths: {}.".format(
self.num_encs, idx + 1, hlens[idx]
)
)
logging.info(
self.__class__.__name__
+ " output lengths: "
+ str([y.size(0) for y in ys_out])
)
# initialization
c_list = [self.zero_state(hs_pad[0])]
z_list = [self.zero_state(hs_pad[0])]
for _ in range(1, self.dlayers):
c_list.append(self.zero_state(hs_pad[0]))
z_list.append(self.zero_state(hs_pad[0]))
z_all = []
if self.num_encs == 1:
att_w = None
self.att[att_idx].reset() # reset pre-computation of h
else:
att_w_list = [None] * (self.num_encs + 1) # atts + han
att_c_list = [None] * (self.num_encs) # atts
for idx in range(self.num_encs + 1):
self.att[idx].reset() # reset pre-computation of h in atts and han
# pre-computation of embedding
eys = self.dropout_emb(self.embed(ys_in_pad)) # utt x olen x zdim
# loop for an output sequence
for i in range(olength):
if self.num_encs == 1:
att_c, att_w = self.att[att_idx](
hs_pad[0], hlens[0], self.dropout_dec[0](z_list[0]), att_w
)
else:
for idx in range(self.num_encs):
att_c_list[idx], att_w_list[idx] = self.att[idx](
hs_pad[idx],
hlens[idx],
self.dropout_dec[0](z_list[0]),
att_w_list[idx],
)
hs_pad_han = torch.stack(att_c_list, dim=1)
hlens_han = [self.num_encs] * len(ys_in)
att_c, att_w_list[self.num_encs] = self.att[self.num_encs](
hs_pad_han,
hlens_han,
self.dropout_dec[0](z_list[0]),
att_w_list[self.num_encs],
)
if i > 0 and random.random() < self.sampling_probability:
logging.info(" scheduled sampling ")
z_out = self.output(z_all[-1])
z_out = np.argmax(z_out.detach().cpu(), axis=1)
z_out = self.dropout_emb(self.embed(to_device(hs_pad[0], z_out)))
ey = torch.cat((z_out, att_c), dim=1) # utt x (zdim + hdim)
else:
ey = torch.cat((eys[:, i, :], att_c), dim=1) # utt x (zdim + hdim)
z_list, c_list = self.rnn_forward(ey, z_list, c_list, z_list, c_list)
if self.context_residual:
z_all.append(
torch.cat((self.dropout_dec[-1](z_list[-1]), att_c), dim=-1)
) # utt x (zdim + hdim)
else:
z_all.append(self.dropout_dec[-1](z_list[-1])) # utt x (zdim)
z_all = torch.stack(z_all, dim=1).view(batch * olength, -1)
# compute loss
y_all = self.output(z_all)
self.loss = F.cross_entropy(
y_all,
ys_out_pad.view(-1),
ignore_index=self.ignore_id,
reduction="mean",
)
# compute perplexity
ppl = math.exp(self.loss.item())
# -1: eos, which is removed in the loss computation
self.loss *= np.mean([len(x) for x in ys_in]) - 1
acc = th_accuracy(y_all, ys_out_pad, ignore_label=self.ignore_id)
logging.info("att loss:" + "".join(str(self.loss.item()).split("\n")))
# show predicted character sequence for debug
if self.verbose > 0 and self.char_list is not None:
ys_hat = y_all.view(batch, olength, -1)
ys_true = ys_out_pad
for (i, y_hat), y_true in zip(
enumerate(ys_hat.detach().cpu().numpy()), ys_true.detach().cpu().numpy()
):
if i == MAX_DECODER_OUTPUT:
break
idx_hat = np.argmax(y_hat[y_true != self.ignore_id], axis=1)
idx_true = y_true[y_true != self.ignore_id]
seq_hat = [self.char_list[int(idx)] for idx in idx_hat]
seq_true = [self.char_list[int(idx)] for idx in idx_true]
seq_hat = "".join(seq_hat)
seq_true = "".join(seq_true)
logging.info("groundtruth[%d]: " % i + seq_true)
logging.info("prediction [%d]: " % i + seq_hat)
if self.labeldist is not None:
if self.vlabeldist is None:
self.vlabeldist = to_device(hs_pad[0], torch.from_numpy(self.labeldist))
loss_reg = -torch.sum(
(F.log_softmax(y_all, dim=1) * self.vlabeldist).view(-1), dim=0
) / len(ys_in)
self.loss = (1.0 - self.lsm_weight) * self.loss + self.lsm_weight * loss_reg
return self.loss, acc, ppl
def recognize_beam(self, h, lpz, recog_args, char_list, rnnlm=None, strm_idx=0):
"""beam search implementation
:param torch.Tensor h: encoder hidden state (T, eprojs)
[in multi-encoder case, list of torch.Tensor,
[(T1, eprojs), (T2, eprojs), ...] ]
:param torch.Tensor lpz: ctc log softmax output (T, odim)
[in multi-encoder case, list of torch.Tensor,
[(T1, odim), (T2, odim), ...] ]
:param Namespace recog_args: argument Namespace containing options
:param char_list: list of character strings
:param torch.nn.Module rnnlm: language module
:param int strm_idx:
stream index for speaker parallel attention in multi-speaker case
:return: N-best decoding results
:rtype: list of dicts
"""
# to support mutiple encoder asr mode, in single encoder mode,
# convert torch.Tensor to List of torch.Tensor
if self.num_encs == 1:
h = [h]
lpz = [lpz]
if self.num_encs > 1 and lpz is None:
lpz = [lpz] * self.num_encs
for idx in range(self.num_encs):
logging.info(
"Number of Encoder:{}; enc{}: input lengths: {}.".format(
self.num_encs, idx + 1, h[0].size(0)
)
)
att_idx = min(strm_idx, len(self.att) - 1)
# initialization
c_list = [self.zero_state(h[0].unsqueeze(0))]
z_list = [self.zero_state(h[0].unsqueeze(0))]
for _ in range(1, self.dlayers):
c_list.append(self.zero_state(h[0].unsqueeze(0)))
z_list.append(self.zero_state(h[0].unsqueeze(0)))
if self.num_encs == 1:
a = None
self.att[att_idx].reset() # reset pre-computation of h
else:
a = [None] * (self.num_encs + 1) # atts + han
att_w_list = [None] * (self.num_encs + 1) # atts + han
att_c_list = [None] * (self.num_encs) # atts
for idx in range(self.num_encs + 1):
self.att[idx].reset() # reset pre-computation of h in atts and han
# search parms
beam = recog_args.beam_size
penalty = recog_args.penalty
ctc_weight = getattr(recog_args, "ctc_weight", False) # for NMT
if lpz[0] is not None and self.num_encs > 1:
# weights-ctc,
# e.g. ctc_loss = w_1*ctc_1_loss + w_2 * ctc_2_loss + w_N * ctc_N_loss
weights_ctc_dec = recog_args.weights_ctc_dec / np.sum(
recog_args.weights_ctc_dec
) # normalize
logging.info(
"ctc weights (decoding): " + " ".join([str(x) for x in weights_ctc_dec])
)
else:
weights_ctc_dec = [1.0]
# preprate sos
if self.replace_sos and recog_args.tgt_lang:
y = char_list.index(recog_args.tgt_lang)
else:
y = self.sos
logging.info("<sos> index: " + str(y))
logging.info("<sos> mark: " + char_list[y])
vy = h[0].new_zeros(1).long()
maxlen = np.amin([h[idx].size(0) for idx in range(self.num_encs)])
if recog_args.maxlenratio != 0:
# maxlen >= 1
maxlen = max(1, int(recog_args.maxlenratio * maxlen))
minlen = int(recog_args.minlenratio * maxlen)
logging.info("max output length: " + str(maxlen))
logging.info("min output length: " + str(minlen))
# initialize hypothesis
if rnnlm:
hyp = {
"score": 0.0,
"yseq": [y],
"c_prev": c_list,
"z_prev": z_list,
"a_prev": a,
"rnnlm_prev": None,
}
else:
hyp = {
"score": 0.0,
"yseq": [y],
"c_prev": c_list,
"z_prev": z_list,
"a_prev": a,
}
if lpz[0] is not None:
ctc_prefix_score = [
CTCPrefixScore(lpz[idx].detach().numpy(), 0, self.eos, np)
for idx in range(self.num_encs)
]
hyp["ctc_state_prev"] = [
ctc_prefix_score[idx].initial_state() for idx in range(self.num_encs)
]
hyp["ctc_score_prev"] = [0.0] * self.num_encs
if ctc_weight != 1.0:
# pre-pruning based on attention scores
ctc_beam = min(lpz[0].shape[-1], int(beam * CTC_SCORING_RATIO))
else:
ctc_beam = lpz[0].shape[-1]
hyps = [hyp]
ended_hyps = []
for i in range(maxlen):
logging.debug("position " + str(i))
hyps_best_kept = []
for hyp in hyps:
vy[0] = hyp["yseq"][i]
ey = self.dropout_emb(self.embed(vy)) # utt list (1) x zdim
if self.num_encs == 1:
att_c, att_w = self.att[att_idx](
h[0].unsqueeze(0),
[h[0].size(0)],
self.dropout_dec[0](hyp["z_prev"][0]),
hyp["a_prev"],
)
else:
for idx in range(self.num_encs):
att_c_list[idx], att_w_list[idx] = self.att[idx](
h[idx].unsqueeze(0),
[h[idx].size(0)],
self.dropout_dec[0](hyp["z_prev"][0]),
hyp["a_prev"][idx],
)
h_han = torch.stack(att_c_list, dim=1)
att_c, att_w_list[self.num_encs] = self.att[self.num_encs](
h_han,
[self.num_encs],
self.dropout_dec[0](hyp["z_prev"][0]),
hyp["a_prev"][self.num_encs],
)
ey = torch.cat((ey, att_c), dim=1) # utt(1) x (zdim + hdim)
z_list, c_list = self.rnn_forward(
ey, z_list, c_list, hyp["z_prev"], hyp["c_prev"]
)
# get nbest local scores and their ids
if self.context_residual:
logits = self.output(
torch.cat((self.dropout_dec[-1](z_list[-1]), att_c), dim=-1)
)
else:
logits = self.output(self.dropout_dec[-1](z_list[-1]))
local_att_scores = F.log_softmax(logits, dim=1)
if rnnlm:
rnnlm_state, local_lm_scores = rnnlm.predict(hyp["rnnlm_prev"], vy)
local_scores = (
local_att_scores + recog_args.lm_weight * local_lm_scores
)
else:
local_scores = local_att_scores
if lpz[0] is not None:
local_best_scores, local_best_ids = torch.topk(
local_att_scores, ctc_beam, dim=1
)
ctc_scores, ctc_states = (
[None] * self.num_encs,
[None] * self.num_encs,
)
for idx in range(self.num_encs):
ctc_scores[idx], ctc_states[idx] = ctc_prefix_score[idx](
hyp["yseq"], local_best_ids[0], hyp["ctc_state_prev"][idx]
)
local_scores = (1.0 - ctc_weight) * local_att_scores[
:, local_best_ids[0]
]
if self.num_encs == 1:
local_scores += ctc_weight * torch.from_numpy(
ctc_scores[0] - hyp["ctc_score_prev"][0]
)
else:
for idx in range(self.num_encs):
local_scores += (
ctc_weight
* weights_ctc_dec[idx]
* torch.from_numpy(
ctc_scores[idx] - hyp["ctc_score_prev"][idx]
)
)
if rnnlm:
local_scores += (
recog_args.lm_weight * local_lm_scores[:, local_best_ids[0]]
)
local_best_scores, joint_best_ids = torch.topk(
local_scores, beam, dim=1
)
local_best_ids = local_best_ids[:, joint_best_ids[0]]
else:
local_best_scores, local_best_ids = torch.topk(
local_scores, beam, dim=1
)
for j in range(beam):
new_hyp = {}
# [:] is needed!
new_hyp["z_prev"] = z_list[:]
new_hyp["c_prev"] = c_list[:]
if self.num_encs == 1:
new_hyp["a_prev"] = att_w[:]
else:
new_hyp["a_prev"] = [
att_w_list[idx][:] for idx in range(self.num_encs + 1)
]
new_hyp["score"] = hyp["score"] + local_best_scores[0, j]
new_hyp["yseq"] = [0] * (1 + len(hyp["yseq"]))
new_hyp["yseq"][: len(hyp["yseq"])] = hyp["yseq"]
new_hyp["yseq"][len(hyp["yseq"])] = int(local_best_ids[0, j])
if rnnlm:
new_hyp["rnnlm_prev"] = rnnlm_state
if lpz[0] is not None:
new_hyp["ctc_state_prev"] = [
ctc_states[idx][joint_best_ids[0, j]]
for idx in range(self.num_encs)
]
new_hyp["ctc_score_prev"] = [
ctc_scores[idx][joint_best_ids[0, j]]
for idx in range(self.num_encs)
]
# will be (2 x beam) hyps at most
hyps_best_kept.append(new_hyp)
hyps_best_kept = sorted(
hyps_best_kept, key=lambda x: x["score"], reverse=True
)[:beam]
# sort and get nbest
hyps = hyps_best_kept
logging.debug("number of pruned hypotheses: " + str(len(hyps)))
logging.debug(
"best hypo: "
+ "".join([char_list[int(x)] for x in hyps[0]["yseq"][1:]])
)
# add eos in the final loop to avoid that there are no ended hyps
if i == maxlen - 1:
logging.info("adding <eos> in the last position in the loop")
for hyp in hyps:
hyp["yseq"].append(self.eos)
# add ended hypotheses to a final list,
# and removed them from current hypotheses
# (this will be a problem, number of hyps < beam)
remained_hyps = []
for hyp in hyps:
if hyp["yseq"][-1] == self.eos:
# only store the sequence that has more than minlen outputs
# also add penalty
if len(hyp["yseq"]) > minlen:
hyp["score"] += (i + 1) * penalty
if rnnlm: # Word LM needs to add final <eos> score
hyp["score"] += recog_args.lm_weight * rnnlm.final(
hyp["rnnlm_prev"]
)
ended_hyps.append(hyp)
else:
remained_hyps.append(hyp)
# end detection
if end_detect(ended_hyps, i) and recog_args.maxlenratio == 0.0:
logging.info("end detected at %d", i)
break
hyps = remained_hyps
if len(hyps) > 0:
logging.debug("remaining hypotheses: " + str(len(hyps)))
else:
logging.info("no hypothesis. Finish decoding.")
break
for hyp in hyps:
logging.debug(
"hypo: " + "".join([char_list[int(x)] for x in hyp["yseq"][1:]])
)
logging.debug("number of ended hypotheses: " + str(len(ended_hyps)))
nbest_hyps = sorted(ended_hyps, key=lambda x: x["score"], reverse=True)[
: min(len(ended_hyps), recog_args.nbest)
]
# check number of hypotheses
if len(nbest_hyps) == 0:
logging.warning(
"there is no N-best results, "
"perform recognition again with smaller minlenratio."
)
# should copy because Namespace will be overwritten globally
recog_args = Namespace(**vars(recog_args))
recog_args.minlenratio = max(0.0, recog_args.minlenratio - 0.1)
if self.num_encs == 1:
return self.recognize_beam(h[0], lpz[0], recog_args, char_list, rnnlm)
else:
return self.recognize_beam(h, lpz, recog_args, char_list, rnnlm)
logging.info("total log probability: " + str(nbest_hyps[0]["score"]))
logging.info(
"normalized log probability: "
+ str(nbest_hyps[0]["score"] / len(nbest_hyps[0]["yseq"]))
)
# remove sos
return nbest_hyps
def recognize_beam_batch(
self,
h,
hlens,
lpz,
recog_args,
char_list,
rnnlm=None,
normalize_score=True,
strm_idx=0,
lang_ids=None,
):
# to support mutiple encoder asr mode, in single encoder mode,
# convert torch.Tensor to List of torch.Tensor
if self.num_encs == 1:
h = [h]
hlens = [hlens]
lpz = [lpz]
if self.num_encs > 1 and lpz is None:
lpz = [lpz] * self.num_encs
att_idx = min(strm_idx, len(self.att) - 1)
for idx in range(self.num_encs):
logging.info(
"Number of Encoder:{}; enc{}: input lengths: {}.".format(
self.num_encs, idx + 1, h[idx].size(1)
)
)
h[idx] = mask_by_length(h[idx], hlens[idx], 0.0)
# search params
batch = len(hlens[0])
beam = recog_args.beam_size
penalty = recog_args.penalty
ctc_weight = getattr(recog_args, "ctc_weight", 0) # for NMT
att_weight = 1.0 - ctc_weight
ctc_margin = getattr(
recog_args, "ctc_window_margin", 0
) # use getattr to keep compatibility
# weights-ctc,
# e.g. ctc_loss = w_1*ctc_1_loss + w_2 * ctc_2_loss + w_N * ctc_N_loss
if lpz[0] is not None and self.num_encs > 1:
weights_ctc_dec = recog_args.weights_ctc_dec / np.sum(
recog_args.weights_ctc_dec
) # normalize
logging.info(
"ctc weights (decoding): " + " ".join([str(x) for x in weights_ctc_dec])
)
else:
weights_ctc_dec = [1.0]
n_bb = batch * beam
pad_b = to_device(h[0], torch.arange(batch) * beam).view(-1, 1)
max_hlen = np.amin([max(hlens[idx]) for idx in range(self.num_encs)])
if recog_args.maxlenratio == 0:
maxlen = max_hlen
else:
maxlen = max(1, int(recog_args.maxlenratio * max_hlen))
minlen = int(recog_args.minlenratio * max_hlen)
logging.info("max output length: " + str(maxlen))
logging.info("min output length: " + str(minlen))
# initialization
c_prev = [
to_device(h[0], torch.zeros(n_bb, self.dunits)) for _ in range(self.dlayers)
]
z_prev = [
to_device(h[0], torch.zeros(n_bb, self.dunits)) for _ in range(self.dlayers)
]
c_list = [
to_device(h[0], torch.zeros(n_bb, self.dunits)) for _ in range(self.dlayers)
]
z_list = [
to_device(h[0], torch.zeros(n_bb, self.dunits)) for _ in range(self.dlayers)
]
vscores = to_device(h[0], torch.zeros(batch, beam))
rnnlm_state = None
if self.num_encs == 1:
a_prev = [None]
att_w_list, ctc_scorer, ctc_state = [None], [None], [None]
self.att[att_idx].reset() # reset pre-computation of h
else:
a_prev = [None] * (self.num_encs + 1) # atts + han
att_w_list = [None] * (self.num_encs + 1) # atts + han
att_c_list = [None] * (self.num_encs) # atts
ctc_scorer, ctc_state = [None] * (self.num_encs), [None] * (self.num_encs)
for idx in range(self.num_encs + 1):
self.att[idx].reset() # reset pre-computation of h in atts and han
if self.replace_sos and recog_args.tgt_lang:
logging.info("<sos> index: " + str(char_list.index(recog_args.tgt_lang)))
logging.info("<sos> mark: " + recog_args.tgt_lang)
yseq = [[char_list.index(recog_args.tgt_lang)] for _ in range(n_bb)]
elif lang_ids is not None:
# NOTE: used for evaluation during training
yseq = [[lang_ids[b // recog_args.beam_size]] for b in range(n_bb)]
else:
logging.info("<sos> index: " + str(self.sos))
logging.info("<sos> mark: " + char_list[self.sos])
yseq = [[self.sos] for _ in range(n_bb)]
accum_odim_ids = [self.sos for _ in range(n_bb)]
stop_search = [False for _ in range(batch)]
nbest_hyps = [[] for _ in range(batch)]
ended_hyps = [[] for _ in range(batch)]
exp_hlens = [
hlens[idx].repeat(beam).view(beam, batch).transpose(0, 1).contiguous()
for idx in range(self.num_encs)
]
exp_hlens = [exp_hlens[idx].view(-1).tolist() for idx in range(self.num_encs)]
exp_h = [
h[idx].unsqueeze(1).repeat(1, beam, 1, 1).contiguous()
for idx in range(self.num_encs)
]
exp_h = [
exp_h[idx].view(n_bb, h[idx].size()[1], h[idx].size()[2])
for idx in range(self.num_encs)
]
if lpz[0] is not None:
scoring_num = min(
int(beam * CTC_SCORING_RATIO)
if att_weight > 0.0 and not lpz[0].is_cuda
else 0,
lpz[0].size(-1),
)
ctc_scorer = [
CTCPrefixScoreTH(
lpz[idx],
hlens[idx],
0,
self.eos,
margin=ctc_margin,
)
for idx in range(self.num_encs)
]
for i in range(maxlen):
logging.debug("position " + str(i))
vy = to_device(h[0], torch.LongTensor(self._get_last_yseq(yseq)))
ey = self.dropout_emb(self.embed(vy))
if self.num_encs == 1:
att_c, att_w = self.att[att_idx](
exp_h[0], exp_hlens[0], self.dropout_dec[0](z_prev[0]), a_prev[0]
)
att_w_list = [att_w]
else:
for idx in range(self.num_encs):
att_c_list[idx], att_w_list[idx] = self.att[idx](
exp_h[idx],
exp_hlens[idx],
self.dropout_dec[0](z_prev[0]),
a_prev[idx],
)
exp_h_han = torch.stack(att_c_list, dim=1)
att_c, att_w_list[self.num_encs] = self.att[self.num_encs](
exp_h_han,
[self.num_encs] * n_bb,
self.dropout_dec[0](z_prev[0]),
a_prev[self.num_encs],
)
ey = torch.cat((ey, att_c), dim=1)
# attention decoder
z_list, c_list = self.rnn_forward(ey, z_list, c_list, z_prev, c_prev)
if self.context_residual:
logits = self.output(
torch.cat((self.dropout_dec[-1](z_list[-1]), att_c), dim=-1)
)
else:
logits = self.output(self.dropout_dec[-1](z_list[-1]))
local_scores = att_weight * F.log_softmax(logits, dim=1)
# rnnlm
if rnnlm:
rnnlm_state, local_lm_scores = rnnlm.buff_predict(rnnlm_state, vy, n_bb)
local_scores = local_scores + recog_args.lm_weight * local_lm_scores
# ctc
if ctc_scorer[0]:
local_scores[:, 0] = self.logzero # avoid choosing blank
part_ids = (
torch.topk(local_scores, scoring_num, dim=-1)[1]
if scoring_num > 0
else None
)
for idx in range(self.num_encs):
att_w = att_w_list[idx]
att_w_ = att_w if isinstance(att_w, torch.Tensor) else att_w[0]
local_ctc_scores, ctc_state[idx] = ctc_scorer[idx](
yseq, ctc_state[idx], part_ids, att_w_
)
local_scores = (
local_scores
+ ctc_weight * weights_ctc_dec[idx] * local_ctc_scores
)
local_scores = local_scores.view(batch, beam, self.odim)
if i == 0:
local_scores[:, 1:, :] = self.logzero
# accumulate scores
eos_vscores = local_scores[:, :, self.eos] + vscores
vscores = vscores.view(batch, beam, 1).repeat(1, 1, self.odim)
vscores[:, :, self.eos] = self.logzero
vscores = (vscores + local_scores).view(batch, -1)
# global pruning
accum_best_scores, accum_best_ids = torch.topk(vscores, beam, 1)
accum_odim_ids = (
torch.fmod(accum_best_ids, self.odim).view(-1).data.cpu().tolist()
)
accum_padded_beam_ids = (
(accum_best_ids // self.odim + pad_b).view(-1).data.cpu().tolist()
)
y_prev = yseq[:][:]
yseq = self._index_select_list(yseq, accum_padded_beam_ids)
yseq = self._append_ids(yseq, accum_odim_ids)
vscores = accum_best_scores
vidx = to_device(h[0], torch.LongTensor(accum_padded_beam_ids))
a_prev = []
num_atts = self.num_encs if self.num_encs == 1 else self.num_encs + 1
for idx in range(num_atts):
if isinstance(att_w_list[idx], torch.Tensor):
_a_prev = torch.index_select(
att_w_list[idx].view(n_bb, *att_w_list[idx].shape[1:]), 0, vidx
)
elif isinstance(att_w_list[idx], list):
# handle the case of multi-head attention
_a_prev = [
torch.index_select(att_w_one.view(n_bb, -1), 0, vidx)
for att_w_one in att_w_list[idx]
]
else:
# handle the case of location_recurrent when return is a tuple
_a_prev_ = torch.index_select(
att_w_list[idx][0].view(n_bb, -1), 0, vidx
)
_h_prev_ = torch.index_select(
att_w_list[idx][1][0].view(n_bb, -1), 0, vidx
)
_c_prev_ = torch.index_select(
att_w_list[idx][1][1].view(n_bb, -1), 0, vidx
)
_a_prev = (_a_prev_, (_h_prev_, _c_prev_))
a_prev.append(_a_prev)
z_prev = [
torch.index_select(z_list[li].view(n_bb, -1), 0, vidx)
for li in range(self.dlayers)
]
c_prev = [
torch.index_select(c_list[li].view(n_bb, -1), 0, vidx)
for li in range(self.dlayers)
]
# pick ended hyps
if i >= minlen:
k = 0
penalty_i = (i + 1) * penalty
thr = accum_best_scores[:, -1]
for samp_i in range(batch):
if stop_search[samp_i]:
k = k + beam
continue
for beam_j in range(beam):
_vscore = None
if eos_vscores[samp_i, beam_j] > thr[samp_i]:
yk = y_prev[k][:]
if len(yk) <= min(
hlens[idx][samp_i] for idx in range(self.num_encs)
):
_vscore = eos_vscores[samp_i][beam_j] + penalty_i
elif i == maxlen - 1:
yk = yseq[k][:]
_vscore = vscores[samp_i][beam_j] + penalty_i
if _vscore:
yk.append(self.eos)
if rnnlm:
_vscore += recog_args.lm_weight * rnnlm.final(
rnnlm_state, index=k
)
_score = _vscore.data.cpu().numpy()
ended_hyps[samp_i].append(
{"yseq": yk, "vscore": _vscore, "score": _score}
)
k = k + 1
# end detection
stop_search = [
stop_search[samp_i] or end_detect(ended_hyps[samp_i], i)
for samp_i in range(batch)
]
stop_search_summary = list(set(stop_search))
if len(stop_search_summary) == 1 and stop_search_summary[0]:
break
if rnnlm:
rnnlm_state = self._index_select_lm_state(rnnlm_state, 0, vidx)
if ctc_scorer[0]:
for idx in range(self.num_encs):
ctc_state[idx] = ctc_scorer[idx].index_select_state(
ctc_state[idx], accum_best_ids
)
torch.cuda.empty_cache()
dummy_hyps = [
{"yseq": [self.sos, self.eos], "score": np.array([-float("inf")])}
]
ended_hyps = [
ended_hyps[samp_i] if len(ended_hyps[samp_i]) != 0 else dummy_hyps
for samp_i in range(batch)
]
if normalize_score:
for samp_i in range(batch):
for x in ended_hyps[samp_i]:
x["score"] /= len(x["yseq"])
nbest_hyps = [
sorted(ended_hyps[samp_i], key=lambda x: x["score"], reverse=True)[
: min(len(ended_hyps[samp_i]), recog_args.nbest)
]
for samp_i in range(batch)
]
return nbest_hyps
def calculate_all_attentions(self, hs_pad, hlen, ys_pad, strm_idx=0, lang_ids=None):
"""Calculate all of attentions
:param torch.Tensor hs_pad: batch of padded hidden state sequences
(B, Tmax, D)
in multi-encoder case, list of torch.Tensor,
[(B, Tmax_1, D), (B, Tmax_2, D), ..., ] ]
:param torch.Tensor hlen: batch of lengths of hidden state sequences (B)
[in multi-encoder case, list of torch.Tensor,
[(B), (B), ..., ]
:param torch.Tensor ys_pad:
batch of padded character id sequence tensor (B, Lmax)
:param int strm_idx:
stream index for parallel speaker attention in multi-speaker case
:param torch.Tensor lang_ids: batch of target language id tensor (B, 1)
:return: attention weights with the following shape,
1) multi-head case => attention weights (B, H, Lmax, Tmax),
2) multi-encoder case =>
[(B, Lmax, Tmax1), (B, Lmax, Tmax2), ..., (B, Lmax, NumEncs)]
3) other case => attention weights (B, Lmax, Tmax).
:rtype: float ndarray
"""
# to support mutiple encoder asr mode, in single encoder mode,
# convert torch.Tensor to List of torch.Tensor
if self.num_encs == 1:
hs_pad = [hs_pad]
hlen = [hlen]
# TODO(kan-bayashi): need to make more smart way
ys = [y[y != self.ignore_id] for y in ys_pad] # parse padded ys
att_idx = min(strm_idx, len(self.att) - 1)
# hlen should be list of list of integer
hlen = [list(map(int, hlen[idx])) for idx in range(self.num_encs)]
self.loss = None
# prepare input and output word sequences with sos/eos IDs
eos = ys[0].new([self.eos])
sos = ys[0].new([self.sos])
if self.replace_sos:
ys_in = [torch.cat([idx, y], dim=0) for idx, y in zip(lang_ids, ys)]
else:
ys_in = [torch.cat([sos, y], dim=0) for y in ys]
ys_out = [torch.cat([y, eos], dim=0) for y in ys]
# padding for ys with -1
# pys: utt x olen
ys_in_pad = pad_list(ys_in, self.eos)
ys_out_pad = pad_list(ys_out, self.ignore_id)
# get length info
olength = ys_out_pad.size(1)
# initialization
c_list = [self.zero_state(hs_pad[0])]
z_list = [self.zero_state(hs_pad[0])]
for _ in range(1, self.dlayers):
c_list.append(self.zero_state(hs_pad[0]))
z_list.append(self.zero_state(hs_pad[0]))
att_ws = []
if self.num_encs == 1:
att_w = None
self.att[att_idx].reset() # reset pre-computation of h
else:
att_w_list = [None] * (self.num_encs + 1) # atts + han
att_c_list = [None] * (self.num_encs) # atts
for idx in range(self.num_encs + 1):
self.att[idx].reset() # reset pre-computation of h in atts and han
# pre-computation of embedding
eys = self.dropout_emb(self.embed(ys_in_pad)) # utt x olen x zdim
# loop for an output sequence
for i in range(olength):
if self.num_encs == 1:
att_c, att_w = self.att[att_idx](
hs_pad[0], hlen[0], self.dropout_dec[0](z_list[0]), att_w
)
att_ws.append(att_w)
else:
for idx in range(self.num_encs):
att_c_list[idx], att_w_list[idx] = self.att[idx](
hs_pad[idx],
hlen[idx],
self.dropout_dec[0](z_list[0]),
att_w_list[idx],
)
hs_pad_han = torch.stack(att_c_list, dim=1)
hlen_han = [self.num_encs] * len(ys_in)
att_c, att_w_list[self.num_encs] = self.att[self.num_encs](
hs_pad_han,
hlen_han,
self.dropout_dec[0](z_list[0]),
att_w_list[self.num_encs],
)
att_ws.append(att_w_list.copy())
ey = torch.cat((eys[:, i, :], att_c), dim=1) # utt x (zdim + hdim)
z_list, c_list = self.rnn_forward(ey, z_list, c_list, z_list, c_list)
if self.num_encs == 1:
# convert to numpy array with the shape (B, Lmax, Tmax)
att_ws = att_to_numpy(att_ws, self.att[att_idx])
else:
_att_ws = []
for idx, ws in enumerate(zip(*att_ws)):
ws = att_to_numpy(ws, self.att[idx])
_att_ws.append(ws)
att_ws = _att_ws
return att_ws
@staticmethod
def _get_last_yseq(exp_yseq):
last = []
for y_seq in exp_yseq:
last.append(y_seq[-1])
return last
@staticmethod
def _append_ids(yseq, ids):
if isinstance(ids, list):
for i, j in enumerate(ids):
yseq[i].append(j)
else:
for i in range(len(yseq)):
yseq[i].append(ids)
return yseq
@staticmethod
def _index_select_list(yseq, lst):
new_yseq = []
for i in lst:
new_yseq.append(yseq[i][:])
return new_yseq
@staticmethod
def _index_select_lm_state(rnnlm_state, dim, vidx):
if isinstance(rnnlm_state, dict):
new_state = {}
for k, v in rnnlm_state.items():
new_state[k] = [torch.index_select(vi, dim, vidx) for vi in v]
elif isinstance(rnnlm_state, list):
new_state = []
for i in vidx:
new_state.append(rnnlm_state[int(i)][:])
return new_state
# scorer interface methods
def init_state(self, x):
# to support mutiple encoder asr mode, in single encoder mode,
# convert torch.Tensor to List of torch.Tensor
if self.num_encs == 1:
x = [x]
c_list = [self.zero_state(x[0].unsqueeze(0))]
z_list = [self.zero_state(x[0].unsqueeze(0))]
for _ in range(1, self.dlayers):
c_list.append(self.zero_state(x[0].unsqueeze(0)))
z_list.append(self.zero_state(x[0].unsqueeze(0)))
# TODO(karita): support strm_index for `asr_mix`
strm_index = 0
att_idx = min(strm_index, len(self.att) - 1)
if self.num_encs == 1:
a = None
self.att[att_idx].reset() # reset pre-computation of h
else:
a = [None] * (self.num_encs + 1) # atts + han
for idx in range(self.num_encs + 1):
self.att[idx].reset() # reset pre-computation of h in atts and han
return dict(
c_prev=c_list[:],
z_prev=z_list[:],
a_prev=a,
workspace=(att_idx, z_list, c_list),
)
def score(self, yseq, state, x):
# to support mutiple encoder asr mode, in single encoder mode,
# convert torch.Tensor to List of torch.Tensor
if self.num_encs == 1:
x = [x]
att_idx, z_list, c_list = state["workspace"]
vy = yseq[-1].unsqueeze(0)
ey = self.dropout_emb(self.embed(vy)) # utt list (1) x zdim
if self.num_encs == 1:
att_c, att_w = self.att[att_idx](
x[0].unsqueeze(0),
[x[0].size(0)],
self.dropout_dec[0](state["z_prev"][0]),
state["a_prev"],
)
else:
att_w = [None] * (self.num_encs + 1) # atts + han
att_c_list = [None] * (self.num_encs) # atts
for idx in range(self.num_encs):
att_c_list[idx], att_w[idx] = self.att[idx](
x[idx].unsqueeze(0),
[x[idx].size(0)],
self.dropout_dec[0](state["z_prev"][0]),
state["a_prev"][idx],
)
h_han = torch.stack(att_c_list, dim=1)
att_c, att_w[self.num_encs] = self.att[self.num_encs](
h_han,
[self.num_encs],
self.dropout_dec[0](state["z_prev"][0]),
state["a_prev"][self.num_encs],
)
ey = torch.cat((ey, att_c), dim=1) # utt(1) x (zdim + hdim)
z_list, c_list = self.rnn_forward(
ey, z_list, c_list, state["z_prev"], state["c_prev"]
)
if self.context_residual:
logits = self.output(
torch.cat((self.dropout_dec[-1](z_list[-1]), att_c), dim=-1)
)
else:
logits = self.output(self.dropout_dec[-1](z_list[-1]))
logp = F.log_softmax(logits, dim=1).squeeze(0)
return (
logp,
dict(
c_prev=c_list[:],
z_prev=z_list[:],
a_prev=att_w,
workspace=(att_idx, z_list, c_list),
),
)
def decoder_for(args, odim, sos, eos, att, labeldist):
return Decoder(
args.eprojs,
odim,
args.dtype,
args.dlayers,
args.dunits,
sos,
eos,
att,
args.verbose,
args.char_list,
labeldist,
args.lsm_weight,
args.sampling_probability,
args.dropout_rate_decoder,
getattr(args, "context_residual", False), # use getattr to keep compatibility
getattr(args, "replace_sos", False), # use getattr to keep compatibility
getattr(args, "num_encs", 1),
) # use getattr to keep compatibility
| 48,709 | 39.356255 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/rnn/attentions.py | """Attention modules for RNN."""
import math
import torch
import torch.nn.functional as F
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask, to_device
def _apply_attention_constraint(
e, last_attended_idx, backward_window=1, forward_window=3
):
"""Apply monotonic attention constraint.
This function apply the monotonic attention constraint
introduced in `Deep Voice 3: Scaling
Text-to-Speech with Convolutional Sequence Learning`_.
Args:
e (Tensor): Attention energy before applying softmax (1, T).
last_attended_idx (int): The index of the inputs of the last attended [0, T].
backward_window (int, optional): Backward window size in attention constraint.
forward_window (int, optional): Forward window size in attetion constraint.
Returns:
Tensor: Monotonic constrained attention energy (1, T).
.. _`Deep Voice 3: Scaling Text-to-Speech with Convolutional Sequence Learning`:
https://arxiv.org/abs/1710.07654
"""
if e.size(0) != 1:
raise NotImplementedError("Batch attention constraining is not yet supported.")
backward_idx = last_attended_idx - backward_window
forward_idx = last_attended_idx + forward_window
if backward_idx > 0:
e[:, :backward_idx] = -float("inf")
if forward_idx < e.size(1):
e[:, forward_idx:] = -float("inf")
return e
class NoAtt(torch.nn.Module):
"""No attention"""
def __init__(self):
super(NoAtt, self).__init__()
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.c = None
def reset(self):
"""reset states"""
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.c = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev):
"""NoAtt forward
:param torch.Tensor enc_hs_pad: padded encoder hidden state (B, T_max, D_enc)
:param list enc_hs_len: padded encoder hidden state length (B)
:param torch.Tensor dec_z: dummy (does not use)
:param torch.Tensor att_prev: dummy (does not use)
:return: attention weighted encoder state (B, D_enc)
:rtype: torch.Tensor
:return: previous attention weights
:rtype: torch.Tensor
"""
batch = len(enc_hs_pad)
# pre-compute all h outside the decoder loop
if self.pre_compute_enc_h is None:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# initialize attention weight with uniform dist.
if att_prev is None:
# if no bias, 0 0-pad goes 0
mask = 1.0 - make_pad_mask(enc_hs_len).float()
att_prev = mask / mask.new(enc_hs_len).unsqueeze(-1)
att_prev = att_prev.to(self.enc_h)
self.c = torch.sum(
self.enc_h * att_prev.view(batch, self.h_length, 1), dim=1
)
return self.c, att_prev
class AttDot(torch.nn.Module):
"""Dot product attention
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int att_dim: attention dimension
:param bool han_mode: flag to swith on mode of hierarchical attention
and not store pre_compute_enc_h
"""
def __init__(self, eprojs, dunits, att_dim, han_mode=False):
super(AttDot, self).__init__()
self.mlp_enc = torch.nn.Linear(eprojs, att_dim)
self.mlp_dec = torch.nn.Linear(dunits, att_dim)
self.dunits = dunits
self.eprojs = eprojs
self.att_dim = att_dim
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
self.han_mode = han_mode
def reset(self):
"""reset states"""
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev, scaling=2.0):
"""AttDot forward
:param torch.Tensor enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_hs_len: padded encoder hidden state length (B)
:param torch.Tensor dec_z: dummy (does not use)
:param torch.Tensor att_prev: dummy (does not use)
:param float scaling: scaling parameter before applying softmax
:return: attention weighted encoder state (B, D_enc)
:rtype: torch.Tensor
:return: previous attention weight (B x T_max)
:rtype: torch.Tensor
"""
batch = enc_hs_pad.size(0)
# pre-compute all h outside the decoder loop
if self.pre_compute_enc_h is None or self.han_mode:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_enc_h = torch.tanh(self.mlp_enc(self.enc_h))
if dec_z is None:
dec_z = enc_hs_pad.new_zeros(batch, self.dunits)
else:
dec_z = dec_z.view(batch, self.dunits)
e = torch.sum(
self.pre_compute_enc_h
* torch.tanh(self.mlp_dec(dec_z)).view(batch, 1, self.att_dim),
dim=2,
) # utt x frame
# NOTE consider zero padding when compute w.
if self.mask is None:
self.mask = to_device(enc_hs_pad, make_pad_mask(enc_hs_len))
e.masked_fill_(self.mask, -float("inf"))
w = F.softmax(scaling * e, dim=1)
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c = torch.sum(self.enc_h * w.view(batch, self.h_length, 1), dim=1)
return c, w
class AttAdd(torch.nn.Module):
"""Additive attention
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int att_dim: attention dimension
:param bool han_mode: flag to swith on mode of hierarchical attention
and not store pre_compute_enc_h
"""
def __init__(self, eprojs, dunits, att_dim, han_mode=False):
super(AttAdd, self).__init__()
self.mlp_enc = torch.nn.Linear(eprojs, att_dim)
self.mlp_dec = torch.nn.Linear(dunits, att_dim, bias=False)
self.gvec = torch.nn.Linear(att_dim, 1)
self.dunits = dunits
self.eprojs = eprojs
self.att_dim = att_dim
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
self.han_mode = han_mode
def reset(self):
"""reset states"""
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev, scaling=2.0):
"""AttAdd forward
:param torch.Tensor enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_hs_len: padded encoder hidden state length (B)
:param torch.Tensor dec_z: decoder hidden state (B x D_dec)
:param torch.Tensor att_prev: dummy (does not use)
:param float scaling: scaling parameter before applying softmax
:return: attention weighted encoder state (B, D_enc)
:rtype: torch.Tensor
:return: previous attention weights (B x T_max)
:rtype: torch.Tensor
"""
batch = len(enc_hs_pad)
# pre-compute all h outside the decoder loop
if self.pre_compute_enc_h is None or self.han_mode:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_enc_h = self.mlp_enc(self.enc_h)
if dec_z is None:
dec_z = enc_hs_pad.new_zeros(batch, self.dunits)
else:
dec_z = dec_z.view(batch, self.dunits)
# dec_z_tiled: utt x frame x att_dim
dec_z_tiled = self.mlp_dec(dec_z).view(batch, 1, self.att_dim)
# dot with gvec
# utt x frame x att_dim -> utt x frame
e = self.gvec(torch.tanh(self.pre_compute_enc_h + dec_z_tiled)).squeeze(2)
# NOTE consider zero padding when compute w.
if self.mask is None:
self.mask = to_device(enc_hs_pad, make_pad_mask(enc_hs_len))
e.masked_fill_(self.mask, -float("inf"))
w = F.softmax(scaling * e, dim=1)
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c = torch.sum(self.enc_h * w.view(batch, self.h_length, 1), dim=1)
return c, w
class AttLoc(torch.nn.Module):
"""location-aware attention module.
Reference: Attention-Based Models for Speech Recognition
(https://arxiv.org/pdf/1506.07503.pdf)
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int att_dim: attention dimension
:param int aconv_chans: # channels of attention convolution
:param int aconv_filts: filter size of attention convolution
:param bool han_mode: flag to swith on mode of hierarchical attention
and not store pre_compute_enc_h
"""
def __init__(
self, eprojs, dunits, att_dim, aconv_chans, aconv_filts, han_mode=False
):
super(AttLoc, self).__init__()
self.mlp_enc = torch.nn.Linear(eprojs, att_dim)
self.mlp_dec = torch.nn.Linear(dunits, att_dim, bias=False)
self.mlp_att = torch.nn.Linear(aconv_chans, att_dim, bias=False)
self.loc_conv = torch.nn.Conv2d(
1,
aconv_chans,
(1, 2 * aconv_filts + 1),
padding=(0, aconv_filts),
bias=False,
)
self.gvec = torch.nn.Linear(att_dim, 1)
self.dunits = dunits
self.eprojs = eprojs
self.att_dim = att_dim
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
self.han_mode = han_mode
def reset(self):
"""reset states"""
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
def forward(
self,
enc_hs_pad,
enc_hs_len,
dec_z,
att_prev,
scaling=2.0,
last_attended_idx=None,
backward_window=1,
forward_window=3,
):
"""Calculate AttLoc forward propagation.
:param torch.Tensor enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_hs_len: padded encoder hidden state length (B)
:param torch.Tensor dec_z: decoder hidden state (B x D_dec)
:param torch.Tensor att_prev: previous attention weight (B x T_max)
:param float scaling: scaling parameter before applying softmax
:param torch.Tensor forward_window:
forward window size when constraining attention
:param int last_attended_idx: index of the inputs of the last attended
:param int backward_window: backward window size in attention constraint
:param int forward_window: forward window size in attetion constraint
:return: attention weighted encoder state (B, D_enc)
:rtype: torch.Tensor
:return: previous attention weights (B x T_max)
:rtype: torch.Tensor
"""
batch = len(enc_hs_pad)
# pre-compute all h outside the decoder loop
if self.pre_compute_enc_h is None or self.han_mode:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_enc_h = self.mlp_enc(self.enc_h)
if dec_z is None:
dec_z = enc_hs_pad.new_zeros(batch, self.dunits)
else:
dec_z = dec_z.view(batch, self.dunits)
# initialize attention weight with uniform dist.
if att_prev is None:
# if no bias, 0 0-pad goes 0
att_prev = 1.0 - make_pad_mask(enc_hs_len).to(
device=dec_z.device, dtype=dec_z.dtype
)
att_prev = att_prev / att_prev.new(enc_hs_len).unsqueeze(-1)
# att_prev: utt x frame -> utt x 1 x 1 x frame
# -> utt x att_conv_chans x 1 x frame
att_conv = self.loc_conv(att_prev.view(batch, 1, 1, self.h_length))
# att_conv: utt x att_conv_chans x 1 x frame -> utt x frame x att_conv_chans
att_conv = att_conv.squeeze(2).transpose(1, 2)
# att_conv: utt x frame x att_conv_chans -> utt x frame x att_dim
att_conv = self.mlp_att(att_conv)
# dec_z_tiled: utt x frame x att_dim
dec_z_tiled = self.mlp_dec(dec_z).view(batch, 1, self.att_dim)
# dot with gvec
# utt x frame x att_dim -> utt x frame
e = self.gvec(
torch.tanh(att_conv + self.pre_compute_enc_h + dec_z_tiled)
).squeeze(2)
# NOTE: consider zero padding when compute w.
if self.mask is None:
self.mask = to_device(enc_hs_pad, make_pad_mask(enc_hs_len))
e.masked_fill_(self.mask, -float("inf"))
# apply monotonic attention constraint (mainly for TTS)
if last_attended_idx is not None:
e = _apply_attention_constraint(
e, last_attended_idx, backward_window, forward_window
)
w = F.softmax(scaling * e, dim=1)
# weighted sum over flames
# utt x hdim
c = torch.sum(self.enc_h * w.view(batch, self.h_length, 1), dim=1)
return c, w
class AttCov(torch.nn.Module):
"""Coverage mechanism attention
Reference: Get To The Point: Summarization with Pointer-Generator Network
(https://arxiv.org/abs/1704.04368)
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int att_dim: attention dimension
:param bool han_mode: flag to swith on mode of hierarchical attention
and not store pre_compute_enc_h
"""
def __init__(self, eprojs, dunits, att_dim, han_mode=False):
super(AttCov, self).__init__()
self.mlp_enc = torch.nn.Linear(eprojs, att_dim)
self.mlp_dec = torch.nn.Linear(dunits, att_dim, bias=False)
self.wvec = torch.nn.Linear(1, att_dim)
self.gvec = torch.nn.Linear(att_dim, 1)
self.dunits = dunits
self.eprojs = eprojs
self.att_dim = att_dim
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
self.han_mode = han_mode
def reset(self):
"""reset states"""
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev_list, scaling=2.0):
"""AttCov forward
:param torch.Tensor enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_hs_len: padded encoder hidden state length (B)
:param torch.Tensor dec_z: decoder hidden state (B x D_dec)
:param list att_prev_list: list of previous attention weight
:param float scaling: scaling parameter before applying softmax
:return: attention weighted encoder state (B, D_enc)
:rtype: torch.Tensor
:return: list of previous attention weights
:rtype: list
"""
batch = len(enc_hs_pad)
# pre-compute all h outside the decoder loop
if self.pre_compute_enc_h is None or self.han_mode:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_enc_h = self.mlp_enc(self.enc_h)
if dec_z is None:
dec_z = enc_hs_pad.new_zeros(batch, self.dunits)
else:
dec_z = dec_z.view(batch, self.dunits)
# initialize attention weight with uniform dist.
if att_prev_list is None:
# if no bias, 0 0-pad goes 0
att_prev_list = to_device(
enc_hs_pad, (1.0 - make_pad_mask(enc_hs_len).float())
)
att_prev_list = [
att_prev_list / att_prev_list.new(enc_hs_len).unsqueeze(-1)
]
# att_prev_list: L' * [B x T] => cov_vec B x T
cov_vec = sum(att_prev_list)
# cov_vec: B x T => B x T x 1 => B x T x att_dim
cov_vec = self.wvec(cov_vec.unsqueeze(-1))
# dec_z_tiled: utt x frame x att_dim
dec_z_tiled = self.mlp_dec(dec_z).view(batch, 1, self.att_dim)
# dot with gvec
# utt x frame x att_dim -> utt x frame
e = self.gvec(
torch.tanh(cov_vec + self.pre_compute_enc_h + dec_z_tiled)
).squeeze(2)
# NOTE consider zero padding when compute w.
if self.mask is None:
self.mask = to_device(enc_hs_pad, make_pad_mask(enc_hs_len))
e.masked_fill_(self.mask, -float("inf"))
w = F.softmax(scaling * e, dim=1)
att_prev_list += [w]
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c = torch.sum(self.enc_h * w.view(batch, self.h_length, 1), dim=1)
return c, att_prev_list
class AttLoc2D(torch.nn.Module):
"""2D location-aware attention
This attention is an extended version of location aware attention.
It take not only one frame before attention weights,
but also earlier frames into account.
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int att_dim: attention dimension
:param int aconv_chans: # channels of attention convolution
:param int aconv_filts: filter size of attention convolution
:param int att_win: attention window size (default=5)
:param bool han_mode:
flag to swith on mode of hierarchical attention and not store pre_compute_enc_h
"""
def __init__(
self, eprojs, dunits, att_dim, att_win, aconv_chans, aconv_filts, han_mode=False
):
super(AttLoc2D, self).__init__()
self.mlp_enc = torch.nn.Linear(eprojs, att_dim)
self.mlp_dec = torch.nn.Linear(dunits, att_dim, bias=False)
self.mlp_att = torch.nn.Linear(aconv_chans, att_dim, bias=False)
self.loc_conv = torch.nn.Conv2d(
1,
aconv_chans,
(att_win, 2 * aconv_filts + 1),
padding=(0, aconv_filts),
bias=False,
)
self.gvec = torch.nn.Linear(att_dim, 1)
self.dunits = dunits
self.eprojs = eprojs
self.att_dim = att_dim
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.aconv_chans = aconv_chans
self.att_win = att_win
self.mask = None
self.han_mode = han_mode
def reset(self):
"""reset states"""
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev, scaling=2.0):
"""AttLoc2D forward
:param torch.Tensor enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_hs_len: padded encoder hidden state length (B)
:param torch.Tensor dec_z: decoder hidden state (B x D_dec)
:param torch.Tensor att_prev: previous attention weight (B x att_win x T_max)
:param float scaling: scaling parameter before applying softmax
:return: attention weighted encoder state (B, D_enc)
:rtype: torch.Tensor
:return: previous attention weights (B x att_win x T_max)
:rtype: torch.Tensor
"""
batch = len(enc_hs_pad)
# pre-compute all h outside the decoder loop
if self.pre_compute_enc_h is None or self.han_mode:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_enc_h = self.mlp_enc(self.enc_h)
if dec_z is None:
dec_z = enc_hs_pad.new_zeros(batch, self.dunits)
else:
dec_z = dec_z.view(batch, self.dunits)
# initialize attention weight with uniform dist.
if att_prev is None:
# B * [Li x att_win]
# if no bias, 0 0-pad goes 0
att_prev = to_device(enc_hs_pad, (1.0 - make_pad_mask(enc_hs_len).float()))
att_prev = att_prev / att_prev.new(enc_hs_len).unsqueeze(-1)
att_prev = att_prev.unsqueeze(1).expand(-1, self.att_win, -1)
# att_prev: B x att_win x Tmax -> B x 1 x att_win x Tmax -> B x C x 1 x Tmax
att_conv = self.loc_conv(att_prev.unsqueeze(1))
# att_conv: B x C x 1 x Tmax -> B x Tmax x C
att_conv = att_conv.squeeze(2).transpose(1, 2)
# att_conv: utt x frame x att_conv_chans -> utt x frame x att_dim
att_conv = self.mlp_att(att_conv)
# dec_z_tiled: utt x frame x att_dim
dec_z_tiled = self.mlp_dec(dec_z).view(batch, 1, self.att_dim)
# dot with gvec
# utt x frame x att_dim -> utt x frame
e = self.gvec(
torch.tanh(att_conv + self.pre_compute_enc_h + dec_z_tiled)
).squeeze(2)
# NOTE consider zero padding when compute w.
if self.mask is None:
self.mask = to_device(enc_hs_pad, make_pad_mask(enc_hs_len))
e.masked_fill_(self.mask, -float("inf"))
w = F.softmax(scaling * e, dim=1)
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c = torch.sum(self.enc_h * w.view(batch, self.h_length, 1), dim=1)
# update att_prev: B x att_win x Tmax -> B x att_win+1 x Tmax
# -> B x att_win x Tmax
att_prev = torch.cat([att_prev, w.unsqueeze(1)], dim=1)
att_prev = att_prev[:, 1:]
return c, att_prev
class AttLocRec(torch.nn.Module):
"""location-aware recurrent attention
This attention is an extended version of location aware attention.
With the use of RNN,
it take the effect of the history of attention weights into account.
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int att_dim: attention dimension
:param int aconv_chans: # channels of attention convolution
:param int aconv_filts: filter size of attention convolution
:param bool han_mode:
flag to swith on mode of hierarchical attention and not store pre_compute_enc_h
"""
def __init__(
self, eprojs, dunits, att_dim, aconv_chans, aconv_filts, han_mode=False
):
super(AttLocRec, self).__init__()
self.mlp_enc = torch.nn.Linear(eprojs, att_dim)
self.mlp_dec = torch.nn.Linear(dunits, att_dim, bias=False)
self.loc_conv = torch.nn.Conv2d(
1,
aconv_chans,
(1, 2 * aconv_filts + 1),
padding=(0, aconv_filts),
bias=False,
)
self.att_lstm = torch.nn.LSTMCell(aconv_chans, att_dim, bias=False)
self.gvec = torch.nn.Linear(att_dim, 1)
self.dunits = dunits
self.eprojs = eprojs
self.att_dim = att_dim
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
self.han_mode = han_mode
def reset(self):
"""reset states"""
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev_states, scaling=2.0):
"""AttLocRec forward
:param torch.Tensor enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_hs_len: padded encoder hidden state length (B)
:param torch.Tensor dec_z: decoder hidden state (B x D_dec)
:param tuple att_prev_states: previous attention weight and lstm states
((B, T_max), ((B, att_dim), (B, att_dim)))
:param float scaling: scaling parameter before applying softmax
:return: attention weighted encoder state (B, D_enc)
:rtype: torch.Tensor
:return: previous attention weights and lstm states (w, (hx, cx))
((B, T_max), ((B, att_dim), (B, att_dim)))
:rtype: tuple
"""
batch = len(enc_hs_pad)
# pre-compute all h outside the decoder loop
if self.pre_compute_enc_h is None or self.han_mode:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_enc_h = self.mlp_enc(self.enc_h)
if dec_z is None:
dec_z = enc_hs_pad.new_zeros(batch, self.dunits)
else:
dec_z = dec_z.view(batch, self.dunits)
if att_prev_states is None:
# initialize attention weight with uniform dist.
# if no bias, 0 0-pad goes 0
att_prev = to_device(enc_hs_pad, (1.0 - make_pad_mask(enc_hs_len).float()))
att_prev = att_prev / att_prev.new(enc_hs_len).unsqueeze(-1)
# initialize lstm states
att_h = enc_hs_pad.new_zeros(batch, self.att_dim)
att_c = enc_hs_pad.new_zeros(batch, self.att_dim)
att_states = (att_h, att_c)
else:
att_prev = att_prev_states[0]
att_states = att_prev_states[1]
# B x 1 x 1 x T -> B x C x 1 x T
att_conv = self.loc_conv(att_prev.view(batch, 1, 1, self.h_length))
# apply non-linear
att_conv = F.relu(att_conv)
# B x C x 1 x T -> B x C x 1 x 1 -> B x C
att_conv = F.max_pool2d(att_conv, (1, att_conv.size(3))).view(batch, -1)
att_h, att_c = self.att_lstm(att_conv, att_states)
# dec_z_tiled: utt x frame x att_dim
dec_z_tiled = self.mlp_dec(dec_z).view(batch, 1, self.att_dim)
# dot with gvec
# utt x frame x att_dim -> utt x frame
e = self.gvec(
torch.tanh(att_h.unsqueeze(1) + self.pre_compute_enc_h + dec_z_tiled)
).squeeze(2)
# NOTE consider zero padding when compute w.
if self.mask is None:
self.mask = to_device(enc_hs_pad, make_pad_mask(enc_hs_len))
e.masked_fill_(self.mask, -float("inf"))
w = F.softmax(scaling * e, dim=1)
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c = torch.sum(self.enc_h * w.view(batch, self.h_length, 1), dim=1)
return c, (w, (att_h, att_c))
class AttCovLoc(torch.nn.Module):
"""Coverage mechanism location aware attention
This attention is a combination of coverage and location-aware attentions.
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int att_dim: attention dimension
:param int aconv_chans: # channels of attention convolution
:param int aconv_filts: filter size of attention convolution
:param bool han_mode:
flag to swith on mode of hierarchical attention and not store pre_compute_enc_h
"""
def __init__(
self, eprojs, dunits, att_dim, aconv_chans, aconv_filts, han_mode=False
):
super(AttCovLoc, self).__init__()
self.mlp_enc = torch.nn.Linear(eprojs, att_dim)
self.mlp_dec = torch.nn.Linear(dunits, att_dim, bias=False)
self.mlp_att = torch.nn.Linear(aconv_chans, att_dim, bias=False)
self.loc_conv = torch.nn.Conv2d(
1,
aconv_chans,
(1, 2 * aconv_filts + 1),
padding=(0, aconv_filts),
bias=False,
)
self.gvec = torch.nn.Linear(att_dim, 1)
self.dunits = dunits
self.eprojs = eprojs
self.att_dim = att_dim
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.aconv_chans = aconv_chans
self.mask = None
self.han_mode = han_mode
def reset(self):
"""reset states"""
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev_list, scaling=2.0):
"""AttCovLoc forward
:param torch.Tensor enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_hs_len: padded encoder hidden state length (B)
:param torch.Tensor dec_z: decoder hidden state (B x D_dec)
:param list att_prev_list: list of previous attention weight
:param float scaling: scaling parameter before applying softmax
:return: attention weighted encoder state (B, D_enc)
:rtype: torch.Tensor
:return: list of previous attention weights
:rtype: list
"""
batch = len(enc_hs_pad)
# pre-compute all h outside the decoder loop
if self.pre_compute_enc_h is None or self.han_mode:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_enc_h = self.mlp_enc(self.enc_h)
if dec_z is None:
dec_z = enc_hs_pad.new_zeros(batch, self.dunits)
else:
dec_z = dec_z.view(batch, self.dunits)
# initialize attention weight with uniform dist.
if att_prev_list is None:
# if no bias, 0 0-pad goes 0
mask = 1.0 - make_pad_mask(enc_hs_len).float()
att_prev_list = [
to_device(enc_hs_pad, mask / mask.new(enc_hs_len).unsqueeze(-1))
]
# att_prev_list: L' * [B x T] => cov_vec B x T
cov_vec = sum(att_prev_list)
# cov_vec: B x T -> B x 1 x 1 x T -> B x C x 1 x T
att_conv = self.loc_conv(cov_vec.view(batch, 1, 1, self.h_length))
# att_conv: utt x att_conv_chans x 1 x frame -> utt x frame x att_conv_chans
att_conv = att_conv.squeeze(2).transpose(1, 2)
# att_conv: utt x frame x att_conv_chans -> utt x frame x att_dim
att_conv = self.mlp_att(att_conv)
# dec_z_tiled: utt x frame x att_dim
dec_z_tiled = self.mlp_dec(dec_z).view(batch, 1, self.att_dim)
# dot with gvec
# utt x frame x att_dim -> utt x frame
e = self.gvec(
torch.tanh(att_conv + self.pre_compute_enc_h + dec_z_tiled)
).squeeze(2)
# NOTE consider zero padding when compute w.
if self.mask is None:
self.mask = to_device(enc_hs_pad, make_pad_mask(enc_hs_len))
e.masked_fill_(self.mask, -float("inf"))
w = F.softmax(scaling * e, dim=1)
att_prev_list += [w]
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c = torch.sum(self.enc_h * w.view(batch, self.h_length, 1), dim=1)
return c, att_prev_list
class AttMultiHeadDot(torch.nn.Module):
"""Multi head dot product attention
Reference: Attention is all you need
(https://arxiv.org/abs/1706.03762)
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int aheads: # heads of multi head attention
:param int att_dim_k: dimension k in multi head attention
:param int att_dim_v: dimension v in multi head attention
:param bool han_mode: flag to swith on mode of hierarchical attention
and not store pre_compute_k and pre_compute_v
"""
def __init__(self, eprojs, dunits, aheads, att_dim_k, att_dim_v, han_mode=False):
super(AttMultiHeadDot, self).__init__()
self.mlp_q = torch.nn.ModuleList()
self.mlp_k = torch.nn.ModuleList()
self.mlp_v = torch.nn.ModuleList()
for _ in range(aheads):
self.mlp_q += [torch.nn.Linear(dunits, att_dim_k)]
self.mlp_k += [torch.nn.Linear(eprojs, att_dim_k, bias=False)]
self.mlp_v += [torch.nn.Linear(eprojs, att_dim_v, bias=False)]
self.mlp_o = torch.nn.Linear(aheads * att_dim_v, eprojs, bias=False)
self.dunits = dunits
self.eprojs = eprojs
self.aheads = aheads
self.att_dim_k = att_dim_k
self.att_dim_v = att_dim_v
self.scaling = 1.0 / math.sqrt(att_dim_k)
self.h_length = None
self.enc_h = None
self.pre_compute_k = None
self.pre_compute_v = None
self.mask = None
self.han_mode = han_mode
def reset(self):
"""reset states"""
self.h_length = None
self.enc_h = None
self.pre_compute_k = None
self.pre_compute_v = None
self.mask = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev):
"""AttMultiHeadDot forward
:param torch.Tensor enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_hs_len: padded encoder hidden state length (B)
:param torch.Tensor dec_z: decoder hidden state (B x D_dec)
:param torch.Tensor att_prev: dummy (does not use)
:return: attention weighted encoder state (B x D_enc)
:rtype: torch.Tensor
:return: list of previous attention weight (B x T_max) * aheads
:rtype: list
"""
batch = enc_hs_pad.size(0)
# pre-compute all k and v outside the decoder loop
if self.pre_compute_k is None or self.han_mode:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_k = [
torch.tanh(self.mlp_k[h](self.enc_h)) for h in range(self.aheads)
]
if self.pre_compute_v is None or self.han_mode:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_v = [self.mlp_v[h](self.enc_h) for h in range(self.aheads)]
if dec_z is None:
dec_z = enc_hs_pad.new_zeros(batch, self.dunits)
else:
dec_z = dec_z.view(batch, self.dunits)
c = []
w = []
for h in range(self.aheads):
e = torch.sum(
self.pre_compute_k[h]
* torch.tanh(self.mlp_q[h](dec_z)).view(batch, 1, self.att_dim_k),
dim=2,
) # utt x frame
# NOTE consider zero padding when compute w.
if self.mask is None:
self.mask = to_device(enc_hs_pad, make_pad_mask(enc_hs_len))
e.masked_fill_(self.mask, -float("inf"))
w += [F.softmax(self.scaling * e, dim=1)]
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c += [
torch.sum(
self.pre_compute_v[h] * w[h].view(batch, self.h_length, 1), dim=1
)
]
# concat all of c
c = self.mlp_o(torch.cat(c, dim=1))
return c, w
class AttMultiHeadAdd(torch.nn.Module):
"""Multi head additive attention
Reference: Attention is all you need
(https://arxiv.org/abs/1706.03762)
This attention is multi head attention using additive attention for each head.
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int aheads: # heads of multi head attention
:param int att_dim_k: dimension k in multi head attention
:param int att_dim_v: dimension v in multi head attention
:param bool han_mode: flag to swith on mode of hierarchical attention
and not store pre_compute_k and pre_compute_v
"""
def __init__(self, eprojs, dunits, aheads, att_dim_k, att_dim_v, han_mode=False):
super(AttMultiHeadAdd, self).__init__()
self.mlp_q = torch.nn.ModuleList()
self.mlp_k = torch.nn.ModuleList()
self.mlp_v = torch.nn.ModuleList()
self.gvec = torch.nn.ModuleList()
for _ in range(aheads):
self.mlp_q += [torch.nn.Linear(dunits, att_dim_k)]
self.mlp_k += [torch.nn.Linear(eprojs, att_dim_k, bias=False)]
self.mlp_v += [torch.nn.Linear(eprojs, att_dim_v, bias=False)]
self.gvec += [torch.nn.Linear(att_dim_k, 1)]
self.mlp_o = torch.nn.Linear(aheads * att_dim_v, eprojs, bias=False)
self.dunits = dunits
self.eprojs = eprojs
self.aheads = aheads
self.att_dim_k = att_dim_k
self.att_dim_v = att_dim_v
self.scaling = 1.0 / math.sqrt(att_dim_k)
self.h_length = None
self.enc_h = None
self.pre_compute_k = None
self.pre_compute_v = None
self.mask = None
self.han_mode = han_mode
def reset(self):
"""reset states"""
self.h_length = None
self.enc_h = None
self.pre_compute_k = None
self.pre_compute_v = None
self.mask = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev):
"""AttMultiHeadAdd forward
:param torch.Tensor enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_hs_len: padded encoder hidden state length (B)
:param torch.Tensor dec_z: decoder hidden state (B x D_dec)
:param torch.Tensor att_prev: dummy (does not use)
:return: attention weighted encoder state (B, D_enc)
:rtype: torch.Tensor
:return: list of previous attention weight (B x T_max) * aheads
:rtype: list
"""
batch = enc_hs_pad.size(0)
# pre-compute all k and v outside the decoder loop
if self.pre_compute_k is None or self.han_mode:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_k = [self.mlp_k[h](self.enc_h) for h in range(self.aheads)]
if self.pre_compute_v is None or self.han_mode:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_v = [self.mlp_v[h](self.enc_h) for h in range(self.aheads)]
if dec_z is None:
dec_z = enc_hs_pad.new_zeros(batch, self.dunits)
else:
dec_z = dec_z.view(batch, self.dunits)
c = []
w = []
for h in range(self.aheads):
e = self.gvec[h](
torch.tanh(
self.pre_compute_k[h]
+ self.mlp_q[h](dec_z).view(batch, 1, self.att_dim_k)
)
).squeeze(2)
# NOTE consider zero padding when compute w.
if self.mask is None:
self.mask = to_device(enc_hs_pad, make_pad_mask(enc_hs_len))
e.masked_fill_(self.mask, -float("inf"))
w += [F.softmax(self.scaling * e, dim=1)]
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c += [
torch.sum(
self.pre_compute_v[h] * w[h].view(batch, self.h_length, 1), dim=1
)
]
# concat all of c
c = self.mlp_o(torch.cat(c, dim=1))
return c, w
class AttMultiHeadLoc(torch.nn.Module):
"""Multi head location based attention
Reference: Attention is all you need
(https://arxiv.org/abs/1706.03762)
This attention is multi head attention using location-aware attention for each head.
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int aheads: # heads of multi head attention
:param int att_dim_k: dimension k in multi head attention
:param int att_dim_v: dimension v in multi head attention
:param int aconv_chans: # channels of attention convolution
:param int aconv_filts: filter size of attention convolution
:param bool han_mode: flag to swith on mode of hierarchical attention
and not store pre_compute_k and pre_compute_v
"""
def __init__(
self,
eprojs,
dunits,
aheads,
att_dim_k,
att_dim_v,
aconv_chans,
aconv_filts,
han_mode=False,
):
super(AttMultiHeadLoc, self).__init__()
self.mlp_q = torch.nn.ModuleList()
self.mlp_k = torch.nn.ModuleList()
self.mlp_v = torch.nn.ModuleList()
self.gvec = torch.nn.ModuleList()
self.loc_conv = torch.nn.ModuleList()
self.mlp_att = torch.nn.ModuleList()
for _ in range(aheads):
self.mlp_q += [torch.nn.Linear(dunits, att_dim_k)]
self.mlp_k += [torch.nn.Linear(eprojs, att_dim_k, bias=False)]
self.mlp_v += [torch.nn.Linear(eprojs, att_dim_v, bias=False)]
self.gvec += [torch.nn.Linear(att_dim_k, 1)]
self.loc_conv += [
torch.nn.Conv2d(
1,
aconv_chans,
(1, 2 * aconv_filts + 1),
padding=(0, aconv_filts),
bias=False,
)
]
self.mlp_att += [torch.nn.Linear(aconv_chans, att_dim_k, bias=False)]
self.mlp_o = torch.nn.Linear(aheads * att_dim_v, eprojs, bias=False)
self.dunits = dunits
self.eprojs = eprojs
self.aheads = aheads
self.att_dim_k = att_dim_k
self.att_dim_v = att_dim_v
self.scaling = 1.0 / math.sqrt(att_dim_k)
self.h_length = None
self.enc_h = None
self.pre_compute_k = None
self.pre_compute_v = None
self.mask = None
self.han_mode = han_mode
def reset(self):
"""reset states"""
self.h_length = None
self.enc_h = None
self.pre_compute_k = None
self.pre_compute_v = None
self.mask = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev, scaling=2.0):
"""AttMultiHeadLoc forward
:param torch.Tensor enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_hs_len: padded encoder hidden state length (B)
:param torch.Tensor dec_z: decoder hidden state (B x D_dec)
:param torch.Tensor att_prev:
list of previous attention weight (B x T_max) * aheads
:param float scaling: scaling parameter before applying softmax
:return: attention weighted encoder state (B x D_enc)
:rtype: torch.Tensor
:return: list of previous attention weight (B x T_max) * aheads
:rtype: list
"""
batch = enc_hs_pad.size(0)
# pre-compute all k and v outside the decoder loop
if self.pre_compute_k is None or self.han_mode:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_k = [self.mlp_k[h](self.enc_h) for h in range(self.aheads)]
if self.pre_compute_v is None or self.han_mode:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_v = [self.mlp_v[h](self.enc_h) for h in range(self.aheads)]
if dec_z is None:
dec_z = enc_hs_pad.new_zeros(batch, self.dunits)
else:
dec_z = dec_z.view(batch, self.dunits)
if att_prev is None:
att_prev = []
for _ in range(self.aheads):
# if no bias, 0 0-pad goes 0
mask = 1.0 - make_pad_mask(enc_hs_len).float()
att_prev += [
to_device(enc_hs_pad, mask / mask.new(enc_hs_len).unsqueeze(-1))
]
c = []
w = []
for h in range(self.aheads):
att_conv = self.loc_conv[h](att_prev[h].view(batch, 1, 1, self.h_length))
att_conv = att_conv.squeeze(2).transpose(1, 2)
att_conv = self.mlp_att[h](att_conv)
e = self.gvec[h](
torch.tanh(
self.pre_compute_k[h]
+ att_conv
+ self.mlp_q[h](dec_z).view(batch, 1, self.att_dim_k)
)
).squeeze(2)
# NOTE consider zero padding when compute w.
if self.mask is None:
self.mask = to_device(enc_hs_pad, make_pad_mask(enc_hs_len))
e.masked_fill_(self.mask, -float("inf"))
w += [F.softmax(scaling * e, dim=1)]
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c += [
torch.sum(
self.pre_compute_v[h] * w[h].view(batch, self.h_length, 1), dim=1
)
]
# concat all of c
c = self.mlp_o(torch.cat(c, dim=1))
return c, w
class AttMultiHeadMultiResLoc(torch.nn.Module):
"""Multi head multi resolution location based attention
Reference: Attention is all you need
(https://arxiv.org/abs/1706.03762)
This attention is multi head attention using location-aware attention for each head.
Furthermore, it uses different filter size for each head.
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int aheads: # heads of multi head attention
:param int att_dim_k: dimension k in multi head attention
:param int att_dim_v: dimension v in multi head attention
:param int aconv_chans: maximum # channels of attention convolution
each head use #ch = aconv_chans * (head + 1) / aheads
e.g. aheads=4, aconv_chans=100 => filter size = 25, 50, 75, 100
:param int aconv_filts: filter size of attention convolution
:param bool han_mode: flag to swith on mode of hierarchical attention
and not store pre_compute_k and pre_compute_v
"""
def __init__(
self,
eprojs,
dunits,
aheads,
att_dim_k,
att_dim_v,
aconv_chans,
aconv_filts,
han_mode=False,
):
super(AttMultiHeadMultiResLoc, self).__init__()
self.mlp_q = torch.nn.ModuleList()
self.mlp_k = torch.nn.ModuleList()
self.mlp_v = torch.nn.ModuleList()
self.gvec = torch.nn.ModuleList()
self.loc_conv = torch.nn.ModuleList()
self.mlp_att = torch.nn.ModuleList()
for h in range(aheads):
self.mlp_q += [torch.nn.Linear(dunits, att_dim_k)]
self.mlp_k += [torch.nn.Linear(eprojs, att_dim_k, bias=False)]
self.mlp_v += [torch.nn.Linear(eprojs, att_dim_v, bias=False)]
self.gvec += [torch.nn.Linear(att_dim_k, 1)]
afilts = aconv_filts * (h + 1) // aheads
self.loc_conv += [
torch.nn.Conv2d(
1, aconv_chans, (1, 2 * afilts + 1), padding=(0, afilts), bias=False
)
]
self.mlp_att += [torch.nn.Linear(aconv_chans, att_dim_k, bias=False)]
self.mlp_o = torch.nn.Linear(aheads * att_dim_v, eprojs, bias=False)
self.dunits = dunits
self.eprojs = eprojs
self.aheads = aheads
self.att_dim_k = att_dim_k
self.att_dim_v = att_dim_v
self.scaling = 1.0 / math.sqrt(att_dim_k)
self.h_length = None
self.enc_h = None
self.pre_compute_k = None
self.pre_compute_v = None
self.mask = None
self.han_mode = han_mode
def reset(self):
"""reset states"""
self.h_length = None
self.enc_h = None
self.pre_compute_k = None
self.pre_compute_v = None
self.mask = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev):
"""AttMultiHeadMultiResLoc forward
:param torch.Tensor enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_hs_len: padded encoder hidden state length (B)
:param torch.Tensor dec_z: decoder hidden state (B x D_dec)
:param torch.Tensor att_prev: list of previous attention weight
(B x T_max) * aheads
:return: attention weighted encoder state (B x D_enc)
:rtype: torch.Tensor
:return: list of previous attention weight (B x T_max) * aheads
:rtype: list
"""
batch = enc_hs_pad.size(0)
# pre-compute all k and v outside the decoder loop
if self.pre_compute_k is None or self.han_mode:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_k = [self.mlp_k[h](self.enc_h) for h in range(self.aheads)]
if self.pre_compute_v is None or self.han_mode:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_v = [self.mlp_v[h](self.enc_h) for h in range(self.aheads)]
if dec_z is None:
dec_z = enc_hs_pad.new_zeros(batch, self.dunits)
else:
dec_z = dec_z.view(batch, self.dunits)
if att_prev is None:
att_prev = []
for _ in range(self.aheads):
# if no bias, 0 0-pad goes 0
mask = 1.0 - make_pad_mask(enc_hs_len).float()
att_prev += [
to_device(enc_hs_pad, mask / mask.new(enc_hs_len).unsqueeze(-1))
]
c = []
w = []
for h in range(self.aheads):
att_conv = self.loc_conv[h](att_prev[h].view(batch, 1, 1, self.h_length))
att_conv = att_conv.squeeze(2).transpose(1, 2)
att_conv = self.mlp_att[h](att_conv)
e = self.gvec[h](
torch.tanh(
self.pre_compute_k[h]
+ att_conv
+ self.mlp_q[h](dec_z).view(batch, 1, self.att_dim_k)
)
).squeeze(2)
# NOTE consider zero padding when compute w.
if self.mask is None:
self.mask = to_device(enc_hs_pad, make_pad_mask(enc_hs_len))
e.masked_fill_(self.mask, -float("inf"))
w += [F.softmax(self.scaling * e, dim=1)]
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c += [
torch.sum(
self.pre_compute_v[h] * w[h].view(batch, self.h_length, 1), dim=1
)
]
# concat all of c
c = self.mlp_o(torch.cat(c, dim=1))
return c, w
class AttForward(torch.nn.Module):
"""Forward attention module.
Reference:
Forward attention in sequence-to-sequence acoustic modeling for speech synthesis
(https://arxiv.org/pdf/1807.06736.pdf)
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int att_dim: attention dimension
:param int aconv_chans: # channels of attention convolution
:param int aconv_filts: filter size of attention convolution
"""
def __init__(self, eprojs, dunits, att_dim, aconv_chans, aconv_filts):
super(AttForward, self).__init__()
self.mlp_enc = torch.nn.Linear(eprojs, att_dim)
self.mlp_dec = torch.nn.Linear(dunits, att_dim, bias=False)
self.mlp_att = torch.nn.Linear(aconv_chans, att_dim, bias=False)
self.loc_conv = torch.nn.Conv2d(
1,
aconv_chans,
(1, 2 * aconv_filts + 1),
padding=(0, aconv_filts),
bias=False,
)
self.gvec = torch.nn.Linear(att_dim, 1)
self.dunits = dunits
self.eprojs = eprojs
self.att_dim = att_dim
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
def reset(self):
"""reset states"""
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
def forward(
self,
enc_hs_pad,
enc_hs_len,
dec_z,
att_prev,
scaling=1.0,
last_attended_idx=None,
backward_window=1,
forward_window=3,
):
"""Calculate AttForward forward propagation.
:param torch.Tensor enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_hs_len: padded encoder hidden state length (B)
:param torch.Tensor dec_z: decoder hidden state (B x D_dec)
:param torch.Tensor att_prev: attention weights of previous step
:param float scaling: scaling parameter before applying softmax
:param int last_attended_idx: index of the inputs of the last attended
:param int backward_window: backward window size in attention constraint
:param int forward_window: forward window size in attetion constraint
:return: attention weighted encoder state (B, D_enc)
:rtype: torch.Tensor
:return: previous attention weights (B x T_max)
:rtype: torch.Tensor
"""
batch = len(enc_hs_pad)
# pre-compute all h outside the decoder loop
if self.pre_compute_enc_h is None:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_enc_h = self.mlp_enc(self.enc_h)
if dec_z is None:
dec_z = enc_hs_pad.new_zeros(batch, self.dunits)
else:
dec_z = dec_z.view(batch, self.dunits)
if att_prev is None:
# initial attention will be [1, 0, 0, ...]
att_prev = enc_hs_pad.new_zeros(*enc_hs_pad.size()[:2])
att_prev[:, 0] = 1.0
# att_prev: utt x frame -> utt x 1 x 1 x frame
# -> utt x att_conv_chans x 1 x frame
att_conv = self.loc_conv(att_prev.view(batch, 1, 1, self.h_length))
# att_conv: utt x att_conv_chans x 1 x frame -> utt x frame x att_conv_chans
att_conv = att_conv.squeeze(2).transpose(1, 2)
# att_conv: utt x frame x att_conv_chans -> utt x frame x att_dim
att_conv = self.mlp_att(att_conv)
# dec_z_tiled: utt x frame x att_dim
dec_z_tiled = self.mlp_dec(dec_z).unsqueeze(1)
# dot with gvec
# utt x frame x att_dim -> utt x frame
e = self.gvec(
torch.tanh(self.pre_compute_enc_h + dec_z_tiled + att_conv)
).squeeze(2)
# NOTE: consider zero padding when compute w.
if self.mask is None:
self.mask = to_device(enc_hs_pad, make_pad_mask(enc_hs_len))
e.masked_fill_(self.mask, -float("inf"))
# apply monotonic attention constraint (mainly for TTS)
if last_attended_idx is not None:
e = _apply_attention_constraint(
e, last_attended_idx, backward_window, forward_window
)
w = F.softmax(scaling * e, dim=1)
# forward attention
att_prev_shift = F.pad(att_prev, (1, 0))[:, :-1]
w = (att_prev + att_prev_shift) * w
# NOTE: clamp is needed to avoid nan gradient
w = F.normalize(torch.clamp(w, 1e-6), p=1, dim=1)
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c = torch.sum(self.enc_h * w.unsqueeze(-1), dim=1)
return c, w
class AttForwardTA(torch.nn.Module):
"""Forward attention with transition agent module.
Reference:
Forward attention in sequence-to-sequence acoustic modeling for speech synthesis
(https://arxiv.org/pdf/1807.06736.pdf)
:param int eunits: # units of encoder
:param int dunits: # units of decoder
:param int att_dim: attention dimension
:param int aconv_chans: # channels of attention convolution
:param int aconv_filts: filter size of attention convolution
:param int odim: output dimension
"""
def __init__(self, eunits, dunits, att_dim, aconv_chans, aconv_filts, odim):
super(AttForwardTA, self).__init__()
self.mlp_enc = torch.nn.Linear(eunits, att_dim)
self.mlp_dec = torch.nn.Linear(dunits, att_dim, bias=False)
self.mlp_ta = torch.nn.Linear(eunits + dunits + odim, 1)
self.mlp_att = torch.nn.Linear(aconv_chans, att_dim, bias=False)
self.loc_conv = torch.nn.Conv2d(
1,
aconv_chans,
(1, 2 * aconv_filts + 1),
padding=(0, aconv_filts),
bias=False,
)
self.gvec = torch.nn.Linear(att_dim, 1)
self.dunits = dunits
self.eunits = eunits
self.att_dim = att_dim
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
self.trans_agent_prob = 0.5
def reset(self):
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
self.trans_agent_prob = 0.5
def forward(
self,
enc_hs_pad,
enc_hs_len,
dec_z,
att_prev,
out_prev,
scaling=1.0,
last_attended_idx=None,
backward_window=1,
forward_window=3,
):
"""Calculate AttForwardTA forward propagation.
:param torch.Tensor enc_hs_pad: padded encoder hidden state (B, Tmax, eunits)
:param list enc_hs_len: padded encoder hidden state length (B)
:param torch.Tensor dec_z: decoder hidden state (B, dunits)
:param torch.Tensor att_prev: attention weights of previous step
:param torch.Tensor out_prev: decoder outputs of previous step (B, odim)
:param float scaling: scaling parameter before applying softmax
:param int last_attended_idx: index of the inputs of the last attended
:param int backward_window: backward window size in attention constraint
:param int forward_window: forward window size in attetion constraint
:return: attention weighted encoder state (B, dunits)
:rtype: torch.Tensor
:return: previous attention weights (B, Tmax)
:rtype: torch.Tensor
"""
batch = len(enc_hs_pad)
# pre-compute all h outside the decoder loop
if self.pre_compute_enc_h is None:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_enc_h = self.mlp_enc(self.enc_h)
if dec_z is None:
dec_z = enc_hs_pad.new_zeros(batch, self.dunits)
else:
dec_z = dec_z.view(batch, self.dunits)
if att_prev is None:
# initial attention will be [1, 0, 0, ...]
att_prev = enc_hs_pad.new_zeros(*enc_hs_pad.size()[:2])
att_prev[:, 0] = 1.0
# att_prev: utt x frame -> utt x 1 x 1 x frame
# -> utt x att_conv_chans x 1 x frame
att_conv = self.loc_conv(att_prev.view(batch, 1, 1, self.h_length))
# att_conv: utt x att_conv_chans x 1 x frame -> utt x frame x att_conv_chans
att_conv = att_conv.squeeze(2).transpose(1, 2)
# att_conv: utt x frame x att_conv_chans -> utt x frame x att_dim
att_conv = self.mlp_att(att_conv)
# dec_z_tiled: utt x frame x att_dim
dec_z_tiled = self.mlp_dec(dec_z).view(batch, 1, self.att_dim)
# dot with gvec
# utt x frame x att_dim -> utt x frame
e = self.gvec(
torch.tanh(att_conv + self.pre_compute_enc_h + dec_z_tiled)
).squeeze(2)
# NOTE consider zero padding when compute w.
if self.mask is None:
self.mask = to_device(enc_hs_pad, make_pad_mask(enc_hs_len))
e.masked_fill_(self.mask, -float("inf"))
# apply monotonic attention constraint (mainly for TTS)
if last_attended_idx is not None:
e = _apply_attention_constraint(
e, last_attended_idx, backward_window, forward_window
)
w = F.softmax(scaling * e, dim=1)
# forward attention
att_prev_shift = F.pad(att_prev, (1, 0))[:, :-1]
w = (
self.trans_agent_prob * att_prev
+ (1 - self.trans_agent_prob) * att_prev_shift
) * w
# NOTE: clamp is needed to avoid nan gradient
w = F.normalize(torch.clamp(w, 1e-6), p=1, dim=1)
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c = torch.sum(self.enc_h * w.view(batch, self.h_length, 1), dim=1)
# update transition agent prob
self.trans_agent_prob = torch.sigmoid(
self.mlp_ta(torch.cat([c, out_prev, dec_z], dim=1))
)
return c, w
def att_for(args, num_att=1, han_mode=False):
"""Instantiates an attention module given the program arguments
:param Namespace args: The arguments
:param int num_att: number of attention modules
(in multi-speaker case, it can be 2 or more)
:param bool han_mode: switch on/off mode of hierarchical attention network (HAN)
:rtype torch.nn.Module
:return: The attention module
"""
att_list = torch.nn.ModuleList()
num_encs = getattr(args, "num_encs", 1) # use getattr to keep compatibility
aheads = getattr(args, "aheads", None)
awin = getattr(args, "awin", None)
aconv_chans = getattr(args, "aconv_chans", None)
aconv_filts = getattr(args, "aconv_filts", None)
if num_encs == 1:
for i in range(num_att):
att = initial_att(
args.atype,
args.eprojs,
args.dunits,
aheads,
args.adim,
awin,
aconv_chans,
aconv_filts,
)
att_list.append(att)
elif num_encs > 1: # no multi-speaker mode
if han_mode:
att = initial_att(
args.han_type,
args.eprojs,
args.dunits,
args.han_heads,
args.han_dim,
args.han_win,
args.han_conv_chans,
args.han_conv_filts,
han_mode=True,
)
return att
else:
att_list = torch.nn.ModuleList()
for idx in range(num_encs):
att = initial_att(
args.atype[idx],
args.eprojs,
args.dunits,
aheads[idx],
args.adim[idx],
awin[idx],
aconv_chans[idx],
aconv_filts[idx],
)
att_list.append(att)
else:
raise ValueError(
"Number of encoders needs to be more than one. {}".format(num_encs)
)
return att_list
def initial_att(
atype, eprojs, dunits, aheads, adim, awin, aconv_chans, aconv_filts, han_mode=False
):
"""Instantiates a single attention module
:param str atype: attention type
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int aheads: # heads of multi head attention
:param int adim: attention dimension
:param int awin: attention window size
:param int aconv_chans: # channels of attention convolution
:param int aconv_filts: filter size of attention convolution
:param bool han_mode: flag to swith on mode of hierarchical attention
:return: The attention module
"""
if atype == "noatt":
att = NoAtt()
elif atype == "dot":
att = AttDot(eprojs, dunits, adim, han_mode)
elif atype == "add":
att = AttAdd(eprojs, dunits, adim, han_mode)
elif atype == "location":
att = AttLoc(eprojs, dunits, adim, aconv_chans, aconv_filts, han_mode)
elif atype == "location2d":
att = AttLoc2D(eprojs, dunits, adim, awin, aconv_chans, aconv_filts, han_mode)
elif atype == "location_recurrent":
att = AttLocRec(eprojs, dunits, adim, aconv_chans, aconv_filts, han_mode)
elif atype == "coverage":
att = AttCov(eprojs, dunits, adim, han_mode)
elif atype == "coverage_location":
att = AttCovLoc(eprojs, dunits, adim, aconv_chans, aconv_filts, han_mode)
elif atype == "multi_head_dot":
att = AttMultiHeadDot(eprojs, dunits, aheads, adim, adim, han_mode)
elif atype == "multi_head_add":
att = AttMultiHeadAdd(eprojs, dunits, aheads, adim, adim, han_mode)
elif atype == "multi_head_loc":
att = AttMultiHeadLoc(
eprojs, dunits, aheads, adim, adim, aconv_chans, aconv_filts, han_mode
)
elif atype == "multi_head_multi_res_loc":
att = AttMultiHeadMultiResLoc(
eprojs, dunits, aheads, adim, adim, aconv_chans, aconv_filts, han_mode
)
return att
def att_to_numpy(att_ws, att):
"""Converts attention weights to a numpy array given the attention
:param list att_ws: The attention weights
:param torch.nn.Module att: The attention
:rtype: np.ndarray
:return: The numpy array of the attention weights
"""
# convert to numpy array with the shape (B, Lmax, Tmax)
if isinstance(att, AttLoc2D):
# att_ws => list of previous concate attentions
att_ws = torch.stack([aw[:, -1] for aw in att_ws], dim=1).cpu().numpy()
elif isinstance(att, (AttCov, AttCovLoc)):
# att_ws => list of list of previous attentions
att_ws = (
torch.stack([aw[idx] for idx, aw in enumerate(att_ws)], dim=1).cpu().numpy()
)
elif isinstance(att, AttLocRec):
# att_ws => list of tuple of attention and hidden states
att_ws = torch.stack([aw[0] for aw in att_ws], dim=1).cpu().numpy()
elif isinstance(
att,
(AttMultiHeadDot, AttMultiHeadAdd, AttMultiHeadLoc, AttMultiHeadMultiResLoc),
):
# att_ws => list of list of each head attention
n_heads = len(att_ws[0])
att_ws_sorted_by_head = []
for h in range(n_heads):
att_ws_head = torch.stack([aw[h] for aw in att_ws], dim=1)
att_ws_sorted_by_head += [att_ws_head]
att_ws = torch.stack(att_ws_sorted_by_head, dim=1).cpu().numpy()
else:
# att_ws => list of attentions
att_ws = torch.stack(att_ws, dim=1).cpu().numpy()
return att_ws
def _apply_dynamic_filter(p, last_attended_idx, backward_window=1, forward_window=3):
"""Apply dynamic filter.
This function apply the dynamic filter
introduced in `Singing-Tacotron: Global Duration Control Attention and Dynamic
Filter for End-to-end Singing Voice Synthesis`_.
Args:
p (Tensor): probability before applying softmax (1, T).
last_attended_idx (int): The index of the inputs of the last attended [0, T].
backward_window (int, optional): Backward window size in dynamic filter.
forward_window (int, optional): Forward window size in dynamic filter.
Returns:
Tensor: Dynamic filtered probability (1, T).
.. _`Singing-Tacotron: Global Duration Control Attention and Dynamic
Filter for End-to-end Singing Voice Synthesis`:
https://arxiv.org/pdf/2202.07907v1.pdf
"""
if p.size(0) != 1:
raise NotImplementedError("Batch dynamic filter is not yet supported.")
backward_idx = last_attended_idx - backward_window
forward_idx = last_attended_idx + forward_window
if backward_idx > 0:
p[:, :backward_idx] = 0
if forward_idx < p.size(1):
p[:, forward_idx:] = 0
return p
class GDCAttLoc(torch.nn.Module):
"""Global duration control attention module.
Reference: Singing-Tacotron: Global Duration Control Attention and Dynamic
Filter for End-to-end Singing Voice Synthesis
(https://arxiv.org/abs/2202.07907)
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int att_dim: attention dimension
:param int aconv_chans: # channels of attention convolution
:param int aconv_filts: filter size of attention convolution
:param bool han_mode: flag to swith on mode of hierarchical attention
and not store pre_compute_enc_h
"""
def __init__(
self, eprojs, dunits, att_dim, aconv_chans, aconv_filts, han_mode=False
):
super(GDCAttLoc, self).__init__()
self.pt_zero_linear = torch.nn.Linear(att_dim, 1)
self.mlp_enc = torch.nn.Linear(eprojs, att_dim)
self.mlp_dec = torch.nn.Linear(dunits, att_dim, bias=False)
self.mlp_att = torch.nn.Linear(aconv_chans, att_dim, bias=False)
self.loc_conv = torch.nn.Conv2d(
1,
aconv_chans,
(1, 2 * aconv_filts + 1),
padding=(0, aconv_filts),
bias=False,
)
self.gvec = torch.nn.Linear(att_dim, 1)
self.dunits = dunits
self.eprojs = eprojs
self.att_dim = att_dim
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
self.han_mode = han_mode
def reset(self):
"""reset states"""
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
def forward(
self,
enc_hs_pad,
enc_hs_len,
trans_token,
dec_z,
att_prev,
scaling=1.0,
last_attended_idx=None,
backward_window=1,
forward_window=3,
):
"""Calcualte AttLoc forward propagation.
:param torch.Tensor enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_hs_len: padded encoder hidden state length (B)
:param torch.Tensor trans_token: Global transition token
for duration (B x T_max x 1)
:param torch.Tensor dec_z: decoder hidden state (B x D_dec)
:param torch.Tensor att_prev: previous attention weight (B x T_max)
:param float scaling: scaling parameter before applying softmax
:param torch.Tensor forward_window: forward window size
when constraining attention
:param int last_attended_idx: index of the inputs of the last attended
:param int backward_window: backward window size in attention constraint
:param int forward_window: forward window size in attetion constraint
:return: attention weighted encoder state (B, D_enc)
:rtype: torch.Tensor
:return: previous attention weights (B x T_max)
:rtype: torch.Tensor
"""
batch = len(enc_hs_pad)
# pre-compute all h outside the decoder loop
if self.pre_compute_enc_h is None or self.han_mode:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_enc_h = self.mlp_enc(self.enc_h)
if dec_z is None:
dec_z = enc_hs_pad.new_zeros(batch, self.dunits)
else:
dec_z = dec_z.view(batch, self.dunits)
# initialize attention weight with uniform dist.
if att_prev is None:
att_prev = enc_hs_pad.new_zeros(*enc_hs_pad.size()[:2])
att_prev[:, 0] = 1.0
# att_prev: utt x frame -> utt x 1 x 1 x frame
# -> utt x att_conv_chans x 1 x frame
att_conv = self.loc_conv(att_prev.view(batch, 1, 1, self.h_length))
# att_conv: utt x att_conv_chans x 1 x frame -> utt x frame x att_conv_chans
att_conv = att_conv.squeeze(2).transpose(1, 2)
# att_conv: utt x frame x att_conv_chans -> utt x frame x att_dim
att_conv = self.mlp_att(att_conv)
# dec_z_tiled: utt x frame x att_dim
dec_z_tiled = self.mlp_dec(dec_z).view(batch, 1, self.att_dim)
# dot with gvec
# utt x frame x att_dim -> utt x frame
e = self.gvec(
torch.tanh(att_conv + self.pre_compute_enc_h + dec_z_tiled)
).squeeze(2)
# NOTE: consider zero padding when compute w.
if self.mask is None:
self.mask = to_device(enc_hs_pad, make_pad_mask(enc_hs_len))
e.masked_fill_(self.mask, -float("inf"))
w = F.softmax(scaling * e, dim=1)
# dynamic filter
if last_attended_idx is not None:
att_prev = _apply_dynamic_filter(
att_prev, last_attended_idx, backward_window, forward_window
)
# GDCA attention
att_prev_shift = F.pad(att_prev, (1, 0))[:, :-1]
trans_token = trans_token.squeeze(-1)
trans_token_shift = F.pad(trans_token, (1, 0))[:, :-1]
w = ((1 - trans_token_shift) * att_prev_shift + trans_token * att_prev) * w
# NOTE: clamp is needed to avoid nan gradient
w = F.normalize(torch.clamp(w, 1e-6), p=1, dim=1)
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c = torch.sum(self.enc_h * w.view(batch, self.h_length, 1), dim=1)
return c, w
| 73,205 | 36.217082 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/rnn/encoders.py | import logging
import numpy as np
import torch
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from espnet.nets.e2e_asr_common import get_vgg2l_odim
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask, to_device
class RNNP(torch.nn.Module):
"""RNN with projection layer module
:param int idim: dimension of inputs
:param int elayers: number of encoder layers
:param int cdim: number of rnn units (resulted in cdim * 2 if bidirectional)
:param int hdim: number of projection units
:param np.ndarray subsample: list of subsampling numbers
:param float dropout: dropout rate
:param str typ: The RNN type
"""
def __init__(self, idim, elayers, cdim, hdim, subsample, dropout, typ="blstm"):
super(RNNP, self).__init__()
bidir = typ[0] == "b"
for i in range(elayers):
if i == 0:
inputdim = idim
else:
inputdim = hdim
RNN = torch.nn.LSTM if "lstm" in typ else torch.nn.GRU
rnn = RNN(
inputdim, cdim, num_layers=1, bidirectional=bidir, batch_first=True
)
setattr(self, "%s%d" % ("birnn" if bidir else "rnn", i), rnn)
# bottleneck layer to merge
if bidir:
setattr(self, "bt%d" % i, torch.nn.Linear(2 * cdim, hdim))
else:
setattr(self, "bt%d" % i, torch.nn.Linear(cdim, hdim))
self.elayers = elayers
self.cdim = cdim
self.subsample = subsample
self.typ = typ
self.bidir = bidir
self.dropout = dropout
def forward(self, xs_pad, ilens, prev_state=None):
"""RNNP forward
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, idim)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor prev_state: batch of previous RNN states
:return: batch of hidden state sequences (B, Tmax, hdim)
:rtype: torch.Tensor
"""
logging.debug(self.__class__.__name__ + " input lengths: " + str(ilens))
elayer_states = []
for layer in range(self.elayers):
if not isinstance(ilens, torch.Tensor):
ilens = torch.tensor(ilens)
xs_pack = pack_padded_sequence(xs_pad, ilens.cpu(), batch_first=True)
rnn = getattr(self, ("birnn" if self.bidir else "rnn") + str(layer))
if self.training:
rnn.flatten_parameters()
if prev_state is not None and rnn.bidirectional:
prev_state = reset_backward_rnn_state(prev_state)
ys, states = rnn(
xs_pack, hx=None if prev_state is None else prev_state[layer]
)
elayer_states.append(states)
# ys: utt list of frame x cdim x 2 (2: means bidirectional)
ys_pad, ilens = pad_packed_sequence(ys, batch_first=True)
sub = self.subsample[layer + 1]
if sub > 1:
ys_pad = ys_pad[:, ::sub]
ilens = torch.tensor([int(i + 1) // sub for i in ilens])
# (sum _utt frame_utt) x dim
projection_layer = getattr(self, "bt%d" % layer)
projected = projection_layer(ys_pad.contiguous().view(-1, ys_pad.size(2)))
xs_pad = projected.view(ys_pad.size(0), ys_pad.size(1), -1)
if layer < self.elayers - 1:
xs_pad = torch.tanh(F.dropout(xs_pad, p=self.dropout))
return xs_pad, ilens, elayer_states # x: utt list of frame x dim
class RNN(torch.nn.Module):
"""RNN module
:param int idim: dimension of inputs
:param int elayers: number of encoder layers
:param int cdim: number of rnn units (resulted in cdim * 2 if bidirectional)
:param int hdim: number of final projection units
:param float dropout: dropout rate
:param str typ: The RNN type
"""
def __init__(self, idim, elayers, cdim, hdim, dropout, typ="blstm"):
super(RNN, self).__init__()
bidir = typ[0] == "b"
self.nbrnn = (
torch.nn.LSTM(
idim,
cdim,
elayers,
batch_first=True,
dropout=dropout,
bidirectional=bidir,
)
if "lstm" in typ
else torch.nn.GRU(
idim,
cdim,
elayers,
batch_first=True,
dropout=dropout,
bidirectional=bidir,
)
)
if bidir:
self.l_last = torch.nn.Linear(cdim * 2, hdim)
else:
self.l_last = torch.nn.Linear(cdim, hdim)
self.typ = typ
def forward(self, xs_pad, ilens, prev_state=None):
"""RNN forward
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, D)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor prev_state: batch of previous RNN states
:return: batch of hidden state sequences (B, Tmax, eprojs)
:rtype: torch.Tensor
"""
logging.debug(self.__class__.__name__ + " input lengths: " + str(ilens))
if not isinstance(ilens, torch.Tensor):
ilens = torch.tensor(ilens)
xs_pack = pack_padded_sequence(xs_pad, ilens.cpu(), batch_first=True)
if self.training:
self.nbrnn.flatten_parameters()
if prev_state is not None and self.nbrnn.bidirectional:
# We assume that when previous state is passed,
# it means that we're streaming the input
# and therefore cannot propagate backward BRNN state
# (otherwise it goes in the wrong direction)
prev_state = reset_backward_rnn_state(prev_state)
ys, states = self.nbrnn(xs_pack, hx=prev_state)
# ys: utt list of frame x cdim x 2 (2: means bidirectional)
ys_pad, ilens = pad_packed_sequence(ys, batch_first=True)
# (sum _utt frame_utt) x dim
projected = torch.tanh(
self.l_last(ys_pad.contiguous().view(-1, ys_pad.size(2)))
)
xs_pad = projected.view(ys_pad.size(0), ys_pad.size(1), -1)
return xs_pad, ilens, states # x: utt list of frame x dim
def reset_backward_rnn_state(states):
"""Sets backward BRNN states to zeroes
Useful in processing of sliding windows over the inputs
"""
if isinstance(states, (list, tuple)):
for state in states:
state[1::2] = 0.0
else:
states[1::2] = 0.0
return states
class VGG2L(torch.nn.Module):
"""VGG-like module
:param int in_channel: number of input channels
"""
def __init__(self, in_channel=1):
super(VGG2L, self).__init__()
# CNN layer (VGG motivated)
self.conv1_1 = torch.nn.Conv2d(in_channel, 64, 3, stride=1, padding=1)
self.conv1_2 = torch.nn.Conv2d(64, 64, 3, stride=1, padding=1)
self.conv2_1 = torch.nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.conv2_2 = torch.nn.Conv2d(128, 128, 3, stride=1, padding=1)
self.in_channel = in_channel
def forward(self, xs_pad, ilens, **kwargs):
"""VGG2L forward
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, D)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:return: batch of padded hidden state sequences (B, Tmax // 4, 128 * D // 4)
:rtype: torch.Tensor
"""
logging.debug(self.__class__.__name__ + " input lengths: " + str(ilens))
# x: utt x frame x dim
# xs_pad = F.pad_sequence(xs_pad)
# x: utt x 1 (input channel num) x frame x dim
xs_pad = xs_pad.view(
xs_pad.size(0),
xs_pad.size(1),
self.in_channel,
xs_pad.size(2) // self.in_channel,
).transpose(1, 2)
# NOTE: max_pool1d ?
xs_pad = F.relu(self.conv1_1(xs_pad))
xs_pad = F.relu(self.conv1_2(xs_pad))
xs_pad = F.max_pool2d(xs_pad, 2, stride=2, ceil_mode=True)
xs_pad = F.relu(self.conv2_1(xs_pad))
xs_pad = F.relu(self.conv2_2(xs_pad))
xs_pad = F.max_pool2d(xs_pad, 2, stride=2, ceil_mode=True)
if torch.is_tensor(ilens):
ilens = ilens.cpu().numpy()
else:
ilens = np.array(ilens, dtype=np.float32)
ilens = np.array(np.ceil(ilens / 2), dtype=np.int64)
ilens = np.array(
np.ceil(np.array(ilens, dtype=np.float32) / 2), dtype=np.int64
).tolist()
# x: utt_list of frame (remove zeropaded frames) x (input channel num x dim)
xs_pad = xs_pad.transpose(1, 2)
xs_pad = xs_pad.contiguous().view(
xs_pad.size(0), xs_pad.size(1), xs_pad.size(2) * xs_pad.size(3)
)
return xs_pad, ilens, None # no state in this layer
class Encoder(torch.nn.Module):
"""Encoder module
:param str etype: type of encoder network
:param int idim: number of dimensions of encoder network
:param int elayers: number of layers of encoder network
:param int eunits: number of lstm units of encoder network
:param int eprojs: number of projection units of encoder network
:param np.ndarray subsample: list of subsampling numbers
:param float dropout: dropout rate
:param int in_channel: number of input channels
"""
def __init__(
self, etype, idim, elayers, eunits, eprojs, subsample, dropout, in_channel=1
):
super(Encoder, self).__init__()
typ = etype.lstrip("vgg").rstrip("p")
if typ not in ["lstm", "gru", "blstm", "bgru"]:
logging.error("Error: need to specify an appropriate encoder architecture")
if etype.startswith("vgg"):
if etype[-1] == "p":
self.enc = torch.nn.ModuleList(
[
VGG2L(in_channel),
RNNP(
get_vgg2l_odim(idim, in_channel=in_channel),
elayers,
eunits,
eprojs,
subsample,
dropout,
typ=typ,
),
]
)
logging.info("Use CNN-VGG + " + typ.upper() + "P for encoder")
else:
self.enc = torch.nn.ModuleList(
[
VGG2L(in_channel),
RNN(
get_vgg2l_odim(idim, in_channel=in_channel),
elayers,
eunits,
eprojs,
dropout,
typ=typ,
),
]
)
logging.info("Use CNN-VGG + " + typ.upper() + " for encoder")
self.conv_subsampling_factor = 4
else:
if etype[-1] == "p":
self.enc = torch.nn.ModuleList(
[RNNP(idim, elayers, eunits, eprojs, subsample, dropout, typ=typ)]
)
logging.info(typ.upper() + " with every-layer projection for encoder")
else:
self.enc = torch.nn.ModuleList(
[RNN(idim, elayers, eunits, eprojs, dropout, typ=typ)]
)
logging.info(typ.upper() + " without projection for encoder")
self.conv_subsampling_factor = 1
def forward(self, xs_pad, ilens, prev_states=None):
"""Encoder forward
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, D)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor prev_state: batch of previous encoder hidden states (?, ...)
:return: batch of hidden state sequences (B, Tmax, eprojs)
:rtype: torch.Tensor
"""
if prev_states is None:
prev_states = [None] * len(self.enc)
assert len(prev_states) == len(self.enc)
current_states = []
for module, prev_state in zip(self.enc, prev_states):
xs_pad, ilens, states = module(xs_pad, ilens, prev_state=prev_state)
current_states.append(states)
# make mask to remove bias value in padded part
mask = to_device(xs_pad, make_pad_mask(ilens).unsqueeze(-1))
return xs_pad.masked_fill(mask, 0.0), ilens, current_states
def encoder_for(args, idim, subsample):
"""Instantiates an encoder module given the program arguments
:param Namespace args: The arguments
:param int or List of integer idim: dimension of input, e.g. 83, or
List of dimensions of inputs, e.g. [83,83]
:param List or List of List subsample: subsample factors, e.g. [1,2,2,1,1], or
List of subsample factors of each encoder.
e.g. [[1,2,2,1,1], [1,2,2,1,1]]
:rtype torch.nn.Module
:return: The encoder module
"""
num_encs = getattr(args, "num_encs", 1) # use getattr to keep compatibility
if num_encs == 1:
# compatible with single encoder asr mode
return Encoder(
args.etype,
idim,
args.elayers,
args.eunits,
args.eprojs,
subsample,
args.dropout_rate,
)
elif num_encs >= 1:
enc_list = torch.nn.ModuleList()
for idx in range(num_encs):
enc = Encoder(
args.etype[idx],
idim[idx],
args.elayers[idx],
args.eunits[idx],
args.eprojs,
subsample[idx],
args.dropout_rate[idx],
)
enc_list.append(enc)
return enc_list
else:
raise ValueError(
"Number of encoders needs to be more than one. {}".format(num_encs)
)
| 14,152 | 37.045699 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/lm/transformer.py | """Transformer language model."""
import logging
from typing import Any, List, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from espnet.nets.lm_interface import LMInterface
from espnet.nets.pytorch_backend.transformer.embedding import PositionalEncoding
from espnet.nets.pytorch_backend.transformer.encoder import Encoder
from espnet.nets.pytorch_backend.transformer.mask import subsequent_mask
from espnet.nets.scorer_interface import BatchScorerInterface
from espnet.utils.cli_utils import strtobool
class TransformerLM(nn.Module, LMInterface, BatchScorerInterface):
"""Transformer language model."""
@staticmethod
def add_arguments(parser):
"""Add arguments to command line argument parser."""
parser.add_argument(
"--layer", type=int, default=4, help="Number of hidden layers"
)
parser.add_argument(
"--unit",
type=int,
default=1024,
help="Number of hidden units in feedforward layer",
)
parser.add_argument(
"--att-unit",
type=int,
default=256,
help="Number of hidden units in attention layer",
)
parser.add_argument(
"--embed-unit",
type=int,
default=128,
help="Number of hidden units in embedding layer",
)
parser.add_argument(
"--head", type=int, default=2, help="Number of multi head attention"
)
parser.add_argument(
"--dropout-rate", type=float, default=0.5, help="dropout probability"
)
parser.add_argument(
"--att-dropout-rate",
type=float,
default=0.0,
help="att dropout probability",
)
parser.add_argument(
"--emb-dropout-rate",
type=float,
default=0.0,
help="emb dropout probability",
)
parser.add_argument(
"--tie-weights",
type=strtobool,
default=False,
help="Tie input and output embeddings",
)
parser.add_argument(
"--pos-enc",
default="sinusoidal",
choices=["sinusoidal", "none"],
help="positional encoding",
)
return parser
def __init__(self, n_vocab, args):
"""Initialize class.
Args:
n_vocab (int): The size of the vocabulary
args (argparse.Namespace): configurations. see py:method:`add_arguments`
"""
nn.Module.__init__(self)
# NOTE: for a compatibility with less than 0.9.7 version models
emb_dropout_rate = getattr(args, "emb_dropout_rate", 0.0)
# NOTE: for a compatibility with less than 0.9.7 version models
tie_weights = getattr(args, "tie_weights", False)
# NOTE: for a compatibility with less than 0.9.7 version models
att_dropout_rate = getattr(args, "att_dropout_rate", 0.0)
if args.pos_enc == "sinusoidal":
pos_enc_class = PositionalEncoding
elif args.pos_enc == "none":
def pos_enc_class(*args, **kwargs):
return nn.Sequential() # indentity
else:
raise ValueError(f"unknown pos-enc option: {args.pos_enc}")
self.embed = nn.Embedding(n_vocab, args.embed_unit)
if emb_dropout_rate == 0.0:
self.embed_drop = None
else:
self.embed_drop = nn.Dropout(emb_dropout_rate)
self.encoder = Encoder(
idim=args.embed_unit,
attention_dim=args.att_unit,
attention_heads=args.head,
linear_units=args.unit,
num_blocks=args.layer,
dropout_rate=args.dropout_rate,
attention_dropout_rate=att_dropout_rate,
input_layer="linear",
pos_enc_class=pos_enc_class,
)
self.decoder = nn.Linear(args.att_unit, n_vocab)
logging.info("Tie weights set to {}".format(tie_weights))
logging.info("Dropout set to {}".format(args.dropout_rate))
logging.info("Emb Dropout set to {}".format(emb_dropout_rate))
logging.info("Att Dropout set to {}".format(att_dropout_rate))
if tie_weights:
assert (
args.att_unit == args.embed_unit
), "Tie Weights: True need embedding and final dimensions to match"
self.decoder.weight = self.embed.weight
def _target_mask(self, ys_in_pad):
ys_mask = ys_in_pad != 0
m = subsequent_mask(ys_mask.size(-1), device=ys_mask.device).unsqueeze(0)
return ys_mask.unsqueeze(-2) & m
def forward(
self, x: torch.Tensor, t: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Compute LM loss value from buffer sequences.
Args:
x (torch.Tensor): Input ids. (batch, len)
t (torch.Tensor): Target ids. (batch, len)
Returns:
tuple[torch.Tensor, torch.Tensor, torch.Tensor]: Tuple of
loss to backward (scalar),
negative log-likelihood of t: -log p(t) (scalar) and
the number of elements in x (scalar)
Notes:
The last two return values are used
in perplexity: p(t)^{-n} = exp(-log p(t) / n)
"""
xm = x != 0
if self.embed_drop is not None:
emb = self.embed_drop(self.embed(x))
else:
emb = self.embed(x)
h, _ = self.encoder(emb, self._target_mask(x))
y = self.decoder(h)
loss = F.cross_entropy(y.view(-1, y.shape[-1]), t.view(-1), reduction="none")
mask = xm.to(dtype=loss.dtype)
logp = loss * mask.view(-1)
logp = logp.sum()
count = mask.sum()
return logp / count, logp, count
def score(
self, y: torch.Tensor, state: Any, x: torch.Tensor
) -> Tuple[torch.Tensor, Any]:
"""Score new token.
Args:
y (torch.Tensor): 1D torch.int64 prefix tokens.
state: Scorer state for prefix tokens
x (torch.Tensor): encoder feature that generates ys.
Returns:
tuple[torch.Tensor, Any]: Tuple of
torch.float32 scores for next token (n_vocab)
and next state for ys
"""
y = y.unsqueeze(0)
if self.embed_drop is not None:
emb = self.embed_drop(self.embed(y))
else:
emb = self.embed(y)
h, _, cache = self.encoder.forward_one_step(
emb, self._target_mask(y), cache=state
)
h = self.decoder(h[:, -1])
logp = h.log_softmax(dim=-1).squeeze(0)
return logp, cache
# batch beam search API (see BatchScorerInterface)
def batch_score(
self, ys: torch.Tensor, states: List[Any], xs: torch.Tensor
) -> Tuple[torch.Tensor, List[Any]]:
"""Score new token batch (required).
Args:
ys (torch.Tensor): torch.int64 prefix tokens (n_batch, ylen).
states (List[Any]): Scorer states for prefix tokens.
xs (torch.Tensor):
The encoder feature that generates ys (n_batch, xlen, n_feat).
Returns:
tuple[torch.Tensor, List[Any]]: Tuple of
batchfied scores for next token with shape of `(n_batch, n_vocab)`
and next state list for ys.
"""
# merge states
n_batch = len(ys)
n_layers = len(self.encoder.encoders)
if states[0] is None:
batch_state = None
else:
# transpose state of [batch, layer] into [layer, batch]
batch_state = [
torch.stack([states[b][i] for b in range(n_batch)])
for i in range(n_layers)
]
if self.embed_drop is not None:
emb = self.embed_drop(self.embed(ys))
else:
emb = self.embed(ys)
# batch decoding
h, _, states = self.encoder.forward_one_step(
emb, self._target_mask(ys), cache=batch_state
)
h = self.decoder(h[:, -1])
logp = h.log_softmax(dim=-1)
# transpose state of [layer, batch] into [batch, layer]
state_list = [[states[i][b] for i in range(n_layers)] for b in range(n_batch)]
return logp, state_list
| 8,410 | 32.50996 | 86 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/lm/default.py | """Default Recurrent Neural Network Languge Model in `lm_train.py`."""
import logging
from typing import Any, List, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from espnet.nets.lm_interface import LMInterface
from espnet.nets.pytorch_backend.e2e_asr import to_device
from espnet.nets.scorer_interface import BatchScorerInterface
from espnet.utils.cli_utils import strtobool
class DefaultRNNLM(BatchScorerInterface, LMInterface, nn.Module):
"""Default RNNLM for `LMInterface` Implementation.
Note:
PyTorch seems to have memory leak when one GPU compute this after data parallel.
If parallel GPUs compute this, it seems to be fine.
See also https://github.com/espnet/espnet/issues/1075
"""
@staticmethod
def add_arguments(parser):
"""Add arguments to command line argument parser."""
parser.add_argument(
"--type",
type=str,
default="lstm",
nargs="?",
choices=["lstm", "gru"],
help="Which type of RNN to use",
)
parser.add_argument(
"--layer", "-l", type=int, default=2, help="Number of hidden layers"
)
parser.add_argument(
"--unit", "-u", type=int, default=650, help="Number of hidden units"
)
parser.add_argument(
"--embed-unit",
default=None,
type=int,
help="Number of hidden units in embedding layer, "
"if it is not specified, it keeps the same number with hidden units.",
)
parser.add_argument(
"--dropout-rate", type=float, default=0.5, help="dropout probability"
)
parser.add_argument(
"--emb-dropout-rate",
type=float,
default=0.0,
help="emb dropout probability",
)
parser.add_argument(
"--tie-weights",
type=strtobool,
default=False,
help="Tie input and output embeddings",
)
return parser
def __init__(self, n_vocab, args):
"""Initialize class.
Args:
n_vocab (int): The size of the vocabulary
args (argparse.Namespace): configurations. see py:method:`add_arguments`
"""
nn.Module.__init__(self)
# NOTE: for a compatibility with less than 0.5.0 version models
dropout_rate = getattr(args, "dropout_rate", 0.0)
# NOTE: for a compatibility with less than 0.6.1 version models
embed_unit = getattr(args, "embed_unit", None)
# NOTE: for a compatibility with less than 0.9.7 version models
emb_dropout_rate = getattr(args, "emb_dropout_rate", 0.0)
# NOTE: for a compatibility with less than 0.9.7 version models
tie_weights = getattr(args, "tie_weights", False)
self.model = ClassifierWithState(
RNNLM(
n_vocab,
args.layer,
args.unit,
embed_unit,
args.type,
dropout_rate,
emb_dropout_rate,
tie_weights,
)
)
def state_dict(self):
"""Dump state dict."""
return self.model.state_dict()
def load_state_dict(self, d):
"""Load state dict."""
self.model.load_state_dict(d)
def forward(self, x, t):
"""Compute LM loss value from buffer sequences.
Args:
x (torch.Tensor): Input ids. (batch, len)
t (torch.Tensor): Target ids. (batch, len)
Returns:
tuple[torch.Tensor, torch.Tensor, torch.Tensor]: Tuple of
loss to backward (scalar),
negative log-likelihood of t: -log p(t) (scalar) and
the number of elements in x (scalar)
Notes:
The last two return values are used
in perplexity: p(t)^{-n} = exp(-log p(t) / n)
"""
loss = 0
logp = 0
count = torch.tensor(0).long()
state = None
batch_size, sequence_length = x.shape
for i in range(sequence_length):
# Compute the loss at this time step and accumulate it
state, loss_batch = self.model(state, x[:, i], t[:, i])
non_zeros = torch.sum(x[:, i] != 0, dtype=loss_batch.dtype)
loss += loss_batch.mean() * non_zeros
logp += torch.sum(loss_batch * non_zeros)
count += int(non_zeros)
return loss / batch_size, loss, count.to(loss.device)
def score(self, y, state, x):
"""Score new token.
Args:
y (torch.Tensor): 1D torch.int64 prefix tokens.
state: Scorer state for prefix tokens
x (torch.Tensor): 2D encoder feature that generates ys.
Returns:
tuple[torch.Tensor, Any]: Tuple of
torch.float32 scores for next token (n_vocab)
and next state for ys
"""
new_state, scores = self.model.predict(state, y[-1].unsqueeze(0))
return scores.squeeze(0), new_state
def final_score(self, state):
"""Score eos.
Args:
state: Scorer state for prefix tokens
Returns:
float: final score
"""
return self.model.final(state)
# batch beam search API (see BatchScorerInterface)
def batch_score(
self, ys: torch.Tensor, states: List[Any], xs: torch.Tensor
) -> Tuple[torch.Tensor, List[Any]]:
"""Score new token batch.
Args:
ys (torch.Tensor): torch.int64 prefix tokens (n_batch, ylen).
states (List[Any]): Scorer states for prefix tokens.
xs (torch.Tensor):
The encoder feature that generates ys (n_batch, xlen, n_feat).
Returns:
tuple[torch.Tensor, List[Any]]: Tuple of
batchfied scores for next token with shape of `(n_batch, n_vocab)`
and next state list for ys.
"""
# merge states
n_batch = len(ys)
n_layers = self.model.predictor.n_layers
if self.model.predictor.typ == "lstm":
keys = ("c", "h")
else:
keys = ("h",)
if states[0] is None:
states = None
else:
# transpose state of [batch, key, layer] into [key, layer, batch]
states = {
k: [
torch.stack([states[b][k][i] for b in range(n_batch)])
for i in range(n_layers)
]
for k in keys
}
states, logp = self.model.predict(states, ys[:, -1])
# transpose state of [key, layer, batch] into [batch, key, layer]
return (
logp,
[
{k: [states[k][i][b] for i in range(n_layers)] for k in keys}
for b in range(n_batch)
],
)
class ClassifierWithState(nn.Module):
"""A wrapper for pytorch RNNLM."""
def __init__(
self, predictor, lossfun=nn.CrossEntropyLoss(reduction="none"), label_key=-1
):
"""Initialize class.
:param torch.nn.Module predictor : The RNNLM
:param function lossfun : The loss function to use
:param int/str label_key :
"""
if not (isinstance(label_key, (int, str))):
raise TypeError("label_key must be int or str, but is %s" % type(label_key))
super(ClassifierWithState, self).__init__()
self.lossfun = lossfun
self.y = None
self.loss = None
self.label_key = label_key
self.predictor = predictor
def forward(self, state, *args, **kwargs):
"""Compute the loss value for an input and label pair.
Notes:
It also computes accuracy and stores it to the attribute.
When ``label_key`` is ``int``, the corresponding element in ``args``
is treated as ground truth labels. And when it is ``str``, the
element in ``kwargs`` is used.
The all elements of ``args`` and ``kwargs`` except the groundtruth
labels are features.
It feeds features to the predictor and compare the result
with ground truth labels.
:param torch.Tensor state : the LM state
:param list[torch.Tensor] args : Input minibatch
:param dict[torch.Tensor] kwargs : Input minibatch
:return loss value
:rtype torch.Tensor
"""
if isinstance(self.label_key, int):
if not (-len(args) <= self.label_key < len(args)):
msg = "Label key %d is out of bounds" % self.label_key
raise ValueError(msg)
t = args[self.label_key]
if self.label_key == -1:
args = args[:-1]
else:
args = args[: self.label_key] + args[self.label_key + 1 :]
elif isinstance(self.label_key, str):
if self.label_key not in kwargs:
msg = 'Label key "%s" is not found' % self.label_key
raise ValueError(msg)
t = kwargs[self.label_key]
del kwargs[self.label_key]
self.y = None
self.loss = None
state, self.y = self.predictor(state, *args, **kwargs)
self.loss = self.lossfun(self.y, t)
return state, self.loss
def predict(self, state, x):
"""Predict log probabilities for given state and input x using the predictor.
:param torch.Tensor state : The current state
:param torch.Tensor x : The input
:return a tuple (new state, log prob vector)
:rtype (torch.Tensor, torch.Tensor)
"""
if hasattr(self.predictor, "normalized") and self.predictor.normalized:
return self.predictor(state, x)
else:
state, z = self.predictor(state, x)
return state, F.log_softmax(z, dim=1)
def buff_predict(self, state, x, n):
"""Predict new tokens from buffered inputs."""
if self.predictor.__class__.__name__ == "RNNLM":
return self.predict(state, x)
new_state = []
new_log_y = []
for i in range(n):
state_i = None if state is None else state[i]
state_i, log_y = self.predict(state_i, x[i].unsqueeze(0))
new_state.append(state_i)
new_log_y.append(log_y)
return new_state, torch.cat(new_log_y)
def final(self, state, index=None):
"""Predict final log probabilities for given state using the predictor.
:param state: The state
:return The final log probabilities
:rtype torch.Tensor
"""
if hasattr(self.predictor, "final"):
if index is not None:
return self.predictor.final(state[index])
else:
return self.predictor.final(state)
else:
return 0.0
# Definition of a recurrent net for language modeling
class RNNLM(nn.Module):
"""A pytorch RNNLM."""
def __init__(
self,
n_vocab,
n_layers,
n_units,
n_embed=None,
typ="lstm",
dropout_rate=0.5,
emb_dropout_rate=0.0,
tie_weights=False,
):
"""Initialize class.
:param int n_vocab: The size of the vocabulary
:param int n_layers: The number of layers to create
:param int n_units: The number of units per layer
:param str typ: The RNN type
"""
super(RNNLM, self).__init__()
if n_embed is None:
n_embed = n_units
self.embed = nn.Embedding(n_vocab, n_embed)
if emb_dropout_rate == 0.0:
self.embed_drop = None
else:
self.embed_drop = nn.Dropout(emb_dropout_rate)
if typ == "lstm":
self.rnn = nn.ModuleList(
[nn.LSTMCell(n_embed, n_units)]
+ [nn.LSTMCell(n_units, n_units) for _ in range(n_layers - 1)]
)
else:
self.rnn = nn.ModuleList(
[nn.GRUCell(n_embed, n_units)]
+ [nn.GRUCell(n_units, n_units) for _ in range(n_layers - 1)]
)
self.dropout = nn.ModuleList(
[nn.Dropout(dropout_rate) for _ in range(n_layers + 1)]
)
self.lo = nn.Linear(n_units, n_vocab)
self.n_layers = n_layers
self.n_units = n_units
self.typ = typ
logging.info("Tie weights set to {}".format(tie_weights))
logging.info("Dropout set to {}".format(dropout_rate))
logging.info("Emb Dropout set to {}".format(emb_dropout_rate))
if tie_weights:
assert (
n_embed == n_units
), "Tie Weights: True need embedding and final dimensions to match"
self.lo.weight = self.embed.weight
# initialize parameters from uniform distribution
for param in self.parameters():
param.data.uniform_(-0.1, 0.1)
def zero_state(self, batchsize):
"""Initialize state."""
p = next(self.parameters())
return torch.zeros(batchsize, self.n_units).to(device=p.device, dtype=p.dtype)
def forward(self, state, x):
"""Forward neural networks."""
if state is None:
h = [to_device(x, self.zero_state(x.size(0))) for n in range(self.n_layers)]
state = {"h": h}
if self.typ == "lstm":
c = [
to_device(x, self.zero_state(x.size(0)))
for n in range(self.n_layers)
]
state = {"c": c, "h": h}
h = [None] * self.n_layers
if self.embed_drop is not None:
emb = self.embed_drop(self.embed(x))
else:
emb = self.embed(x)
if self.typ == "lstm":
c = [None] * self.n_layers
h[0], c[0] = self.rnn[0](
self.dropout[0](emb), (state["h"][0], state["c"][0])
)
for n in range(1, self.n_layers):
h[n], c[n] = self.rnn[n](
self.dropout[n](h[n - 1]), (state["h"][n], state["c"][n])
)
state = {"c": c, "h": h}
else:
h[0] = self.rnn[0](self.dropout[0](emb), state["h"][0])
for n in range(1, self.n_layers):
h[n] = self.rnn[n](self.dropout[n](h[n - 1]), state["h"][n])
state = {"h": h}
y = self.lo(self.dropout[-1](h[-1]))
return state, y
| 14,561 | 32.865116 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/lm/seq_rnn.py | """Sequential implementation of Recurrent Neural Network Language Model."""
import torch
import torch.nn as nn
import torch.nn.functional as F
from espnet.nets.lm_interface import LMInterface
class SequentialRNNLM(LMInterface, torch.nn.Module):
"""Sequential RNNLM.
See also:
https://github.com/pytorch/examples/blob/4581968193699de14b56527296262dd76ab43557/word_language_model/model.py
"""
@staticmethod
def add_arguments(parser):
"""Add arguments to command line argument parser."""
parser.add_argument(
"--type",
type=str,
default="lstm",
nargs="?",
choices=["lstm", "gru"],
help="Which type of RNN to use",
)
parser.add_argument(
"--layer", "-l", type=int, default=2, help="Number of hidden layers"
)
parser.add_argument(
"--unit", "-u", type=int, default=650, help="Number of hidden units"
)
parser.add_argument(
"--dropout-rate", type=float, default=0.5, help="dropout probability"
)
return parser
def __init__(self, n_vocab, args):
"""Initialize class.
Args:
n_vocab (int): The size of the vocabulary
args (argparse.Namespace): configurations. see py:method:`add_arguments`
"""
torch.nn.Module.__init__(self)
self._setup(
rnn_type=args.type.upper(),
ntoken=n_vocab,
ninp=args.unit,
nhid=args.unit,
nlayers=args.layer,
dropout=args.dropout_rate,
)
def _setup(
self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False
):
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
if rnn_type in ["LSTM", "GRU"]:
self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
else:
try:
nonlinearity = {"RNN_TANH": "tanh", "RNN_RELU": "relu"}[rnn_type]
except KeyError:
raise ValueError(
"An invalid option for `--model` was supplied, "
"options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']"
)
self.rnn = nn.RNN(
ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout
)
self.decoder = nn.Linear(nhid, ntoken)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers:
# A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
if nhid != ninp:
raise ValueError(
"When using the tied flag, nhid must be equal to emsize"
)
self.decoder.weight = self.encoder.weight
self._init_weights()
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
def _init_weights(self):
# NOTE: original init in pytorch/examples
# initrange = 0.1
# self.encoder.weight.data.uniform_(-initrange, initrange)
# self.decoder.bias.data.zero_()
# self.decoder.weight.data.uniform_(-initrange, initrange)
# NOTE: our default.py:RNNLM init
for param in self.parameters():
param.data.uniform_(-0.1, 0.1)
def forward(self, x, t):
"""Compute LM loss value from buffer sequences.
Args:
x (torch.Tensor): Input ids. (batch, len)
t (torch.Tensor): Target ids. (batch, len)
Returns:
tuple[torch.Tensor, torch.Tensor, torch.Tensor]: Tuple of
loss to backward (scalar),
negative log-likelihood of t: -log p(t) (scalar) and
the number of elements in x (scalar)
Notes:
The last two return values are used
in perplexity: p(t)^{-n} = exp(-log p(t) / n)
"""
y = self._before_loss(x, None)[0]
mask = (x != 0).to(y.dtype)
loss = F.cross_entropy(y.view(-1, y.shape[-1]), t.view(-1), reduction="none")
logp = loss * mask.view(-1)
logp = logp.sum()
count = mask.sum()
return logp / count, logp, count
def _before_loss(self, input, hidden):
emb = self.drop(self.encoder(input))
output, hidden = self.rnn(emb, hidden)
output = self.drop(output)
decoded = self.decoder(
output.view(output.size(0) * output.size(1), output.size(2))
)
return decoded.view(output.size(0), output.size(1), decoded.size(1)), hidden
def init_state(self, x):
"""Get an initial state for decoding.
Args:
x (torch.Tensor): The encoded feature tensor
Returns: initial state
"""
bsz = 1
weight = next(self.parameters())
if self.rnn_type == "LSTM":
return (
weight.new_zeros(self.nlayers, bsz, self.nhid),
weight.new_zeros(self.nlayers, bsz, self.nhid),
)
else:
return weight.new_zeros(self.nlayers, bsz, self.nhid)
def score(self, y, state, x):
"""Score new token.
Args:
y (torch.Tensor): 1D torch.int64 prefix tokens.
state: Scorer state for prefix tokens
x (torch.Tensor): 2D encoder feature that generates ys.
Returns:
tuple[torch.Tensor, Any]: Tuple of
torch.float32 scores for next token (n_vocab)
and next state for ys
"""
y, new_state = self._before_loss(y[-1].view(1, 1), state)
logp = y.log_softmax(dim=-1).view(-1)
return logp, new_state
| 5,940 | 32.189944 | 118 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/conformer/encoder_layer.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Johns Hopkins University (Shinji Watanabe)
# Northwestern Polytechnical University (Pengcheng Guo)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Encoder self-attention layer definition."""
import torch
from torch import nn
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
class EncoderLayer(nn.Module):
"""Encoder layer module.
Args:
size (int): Input dimension.
self_attn (torch.nn.Module): Self-attention module instance.
`MultiHeadedAttention` or `RelPositionMultiHeadedAttention` instance
can be used as the argument.
feed_forward (torch.nn.Module): Feed-forward module instance.
`PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance
can be used as the argument.
feed_forward_macaron (torch.nn.Module): Additional feed-forward module instance.
`PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance
can be used as the argument.
conv_module (torch.nn.Module): Convolution module instance.
`ConvlutionModule` instance can be used as the argument.
dropout_rate (float): Dropout rate.
normalize_before (bool): Whether to use layer_norm before the first block.
concat_after (bool): Whether to concat attention layer's input and output.
if True, additional linear will be applied.
i.e. x -> x + linear(concat(x, att(x)))
if False, no additional linear will be applied. i.e. x -> x + att(x)
stochastic_depth_rate (float): Proability to skip this layer.
During training, the layer may skip residual computation and return input
as-is with given probability.
"""
def __init__(
self,
size,
self_attn,
feed_forward,
feed_forward_macaron,
conv_module,
dropout_rate,
normalize_before=True,
concat_after=False,
stochastic_depth_rate=0.0,
):
"""Construct an EncoderLayer object."""
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.feed_forward_macaron = feed_forward_macaron
self.conv_module = conv_module
self.norm_ff = LayerNorm(size) # for the FNN module
self.norm_mha = LayerNorm(size) # for the MHA module
if feed_forward_macaron is not None:
self.norm_ff_macaron = LayerNorm(size)
self.ff_scale = 0.5
else:
self.ff_scale = 1.0
if self.conv_module is not None:
self.norm_conv = LayerNorm(size) # for the CNN module
self.norm_final = LayerNorm(size) # for the final output of the block
self.dropout = nn.Dropout(dropout_rate)
self.size = size
self.normalize_before = normalize_before
self.concat_after = concat_after
if self.concat_after:
self.concat_linear = nn.Linear(size + size, size)
self.stochastic_depth_rate = stochastic_depth_rate
def forward(self, x_input, mask, cache=None):
"""Compute encoded features.
Args:
x_input (Union[Tuple, torch.Tensor]): Input tensor w/ or w/o pos emb.
- w/ pos emb: Tuple of tensors [(#batch, time, size), (1, time, size)].
- w/o pos emb: Tensor (#batch, time, size).
mask (torch.Tensor): Mask tensor for the input (#batch, 1, time).
cache (torch.Tensor): Cache tensor of the input (#batch, time - 1, size).
Returns:
torch.Tensor: Output tensor (#batch, time, size).
torch.Tensor: Mask tensor (#batch, 1, time).
"""
if isinstance(x_input, tuple):
x, pos_emb = x_input[0], x_input[1]
else:
x, pos_emb = x_input, None
skip_layer = False
# with stochastic depth, residual connection `x + f(x)` becomes
# `x <- x + 1 / (1 - p) * f(x)` at training time.
stoch_layer_coeff = 1.0
if self.training and self.stochastic_depth_rate > 0:
skip_layer = torch.rand(1).item() < self.stochastic_depth_rate
stoch_layer_coeff = 1.0 / (1 - self.stochastic_depth_rate)
if skip_layer:
if cache is not None:
x = torch.cat([cache, x], dim=1)
if pos_emb is not None:
return (x, pos_emb), mask
return x, mask
# whether to use macaron style
if self.feed_forward_macaron is not None:
residual = x
if self.normalize_before:
x = self.norm_ff_macaron(x)
x = residual + stoch_layer_coeff * self.ff_scale * self.dropout(
self.feed_forward_macaron(x)
)
if not self.normalize_before:
x = self.norm_ff_macaron(x)
# multi-headed self-attention module
residual = x
if self.normalize_before:
x = self.norm_mha(x)
if cache is None:
x_q = x
else:
assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size)
x_q = x[:, -1:, :]
residual = residual[:, -1:, :]
mask = None if mask is None else mask[:, -1:, :]
if pos_emb is not None:
x_att = self.self_attn(x_q, x, x, pos_emb, mask)
else:
x_att = self.self_attn(x_q, x, x, mask)
if self.concat_after:
x_concat = torch.cat((x, x_att), dim=-1)
x = residual + stoch_layer_coeff * self.concat_linear(x_concat)
else:
x = residual + stoch_layer_coeff * self.dropout(x_att)
if not self.normalize_before:
x = self.norm_mha(x)
# convolution module
if self.conv_module is not None:
residual = x
if self.normalize_before:
x = self.norm_conv(x)
x = residual + stoch_layer_coeff * self.dropout(self.conv_module(x))
if not self.normalize_before:
x = self.norm_conv(x)
# feed forward module
residual = x
if self.normalize_before:
x = self.norm_ff(x)
x = residual + stoch_layer_coeff * self.ff_scale * self.dropout(
self.feed_forward(x)
)
if not self.normalize_before:
x = self.norm_ff(x)
if self.conv_module is not None:
x = self.norm_final(x)
if cache is not None:
x = torch.cat([cache, x], dim=1)
if pos_emb is not None:
return (x, pos_emb), mask
return x, mask
| 6,761 | 36.566667 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/conformer/encoder.py | # Copyright 2020 Johns Hopkins University (Shinji Watanabe)
# Northwestern Polytechnical University (Pengcheng Guo)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Encoder definition."""
import logging
import torch
from espnet.nets.pytorch_backend.conformer.convolution import ConvolutionModule
from espnet.nets.pytorch_backend.conformer.encoder_layer import EncoderLayer
from espnet.nets.pytorch_backend.nets_utils import get_activation
from espnet.nets.pytorch_backend.transducer.vgg2l import VGG2L
from espnet.nets.pytorch_backend.transformer.attention import (
LegacyRelPositionMultiHeadedAttention,
MultiHeadedAttention,
RelPositionMultiHeadedAttention,
)
from espnet.nets.pytorch_backend.transformer.embedding import (
LegacyRelPositionalEncoding,
PositionalEncoding,
RelPositionalEncoding,
ScaledPositionalEncoding,
)
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
from espnet.nets.pytorch_backend.transformer.multi_layer_conv import (
Conv1dLinear,
MultiLayeredConv1d,
)
from espnet.nets.pytorch_backend.transformer.positionwise_feed_forward import (
PositionwiseFeedForward,
)
from espnet.nets.pytorch_backend.transformer.repeat import repeat
from espnet.nets.pytorch_backend.transformer.subsampling import Conv2dSubsampling
class Encoder(torch.nn.Module):
"""Conformer encoder module.
Args:
idim (int): Input dimension.
attention_dim (int): Dimension of attention.
attention_heads (int): The number of heads of multi head attention.
linear_units (int): The number of units of position-wise feed forward.
num_blocks (int): The number of decoder blocks.
dropout_rate (float): Dropout rate.
positional_dropout_rate (float): Dropout rate after adding positional encoding.
attention_dropout_rate (float): Dropout rate in attention.
input_layer (Union[str, torch.nn.Module]): Input layer type.
normalize_before (bool): Whether to use layer_norm before the first block.
concat_after (bool): Whether to concat attention layer's input and output.
if True, additional linear will be applied.
i.e. x -> x + linear(concat(x, att(x)))
if False, no additional linear will be applied. i.e. x -> x + att(x)
positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear".
positionwise_conv_kernel_size (int): Kernel size of positionwise conv1d layer.
macaron_style (bool): Whether to use macaron style for positionwise layer.
pos_enc_layer_type (str): Encoder positional encoding layer type.
selfattention_layer_type (str): Encoder attention layer type.
activation_type (str): Encoder activation function type.
use_cnn_module (bool): Whether to use convolution module.
zero_triu (bool): Whether to zero the upper triangular part of attention matrix.
cnn_module_kernel (int): Kernerl size of convolution module.
padding_idx (int): Padding idx for input_layer=embed.
stochastic_depth_rate (float): Maximum probability to skip the encoder layer.
intermediate_layers (Union[List[int], None]): indices of intermediate CTC layer.
indices start from 1.
if not None, intermediate outputs are returned (which changes return type
signature.)
"""
def __init__(
self,
idim,
attention_dim=256,
attention_heads=4,
linear_units=2048,
num_blocks=6,
dropout_rate=0.1,
positional_dropout_rate=0.1,
attention_dropout_rate=0.0,
input_layer="conv2d",
normalize_before=True,
concat_after=False,
positionwise_layer_type="linear",
positionwise_conv_kernel_size=1,
macaron_style=False,
pos_enc_layer_type="abs_pos",
selfattention_layer_type="selfattn",
activation_type="swish",
use_cnn_module=False,
zero_triu=False,
cnn_module_kernel=31,
padding_idx=-1,
stochastic_depth_rate=0.0,
intermediate_layers=None,
ctc_softmax=None,
conditioning_layer_dim=None,
):
"""Construct an Encoder object."""
super(Encoder, self).__init__()
activation = get_activation(activation_type)
if pos_enc_layer_type == "abs_pos":
pos_enc_class = PositionalEncoding
elif pos_enc_layer_type == "scaled_abs_pos":
pos_enc_class = ScaledPositionalEncoding
elif pos_enc_layer_type == "rel_pos":
assert selfattention_layer_type == "rel_selfattn"
pos_enc_class = RelPositionalEncoding
elif pos_enc_layer_type == "legacy_rel_pos":
pos_enc_class = LegacyRelPositionalEncoding
assert selfattention_layer_type == "legacy_rel_selfattn"
else:
raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type)
self.conv_subsampling_factor = 1
if input_layer == "linear":
self.embed = torch.nn.Sequential(
torch.nn.Linear(idim, attention_dim),
torch.nn.LayerNorm(attention_dim),
torch.nn.Dropout(dropout_rate),
pos_enc_class(attention_dim, positional_dropout_rate),
)
elif input_layer == "conv2d":
self.embed = Conv2dSubsampling(
idim,
attention_dim,
dropout_rate,
pos_enc_class(attention_dim, positional_dropout_rate),
)
self.conv_subsampling_factor = 4
elif input_layer == "vgg2l":
self.embed = VGG2L(idim, attention_dim)
self.conv_subsampling_factor = 4
elif input_layer == "embed":
self.embed = torch.nn.Sequential(
torch.nn.Embedding(idim, attention_dim, padding_idx=padding_idx),
pos_enc_class(attention_dim, positional_dropout_rate),
)
elif isinstance(input_layer, torch.nn.Module):
self.embed = torch.nn.Sequential(
input_layer,
pos_enc_class(attention_dim, positional_dropout_rate),
)
elif input_layer is None:
self.embed = torch.nn.Sequential(
pos_enc_class(attention_dim, positional_dropout_rate)
)
else:
raise ValueError("unknown input_layer: " + input_layer)
self.normalize_before = normalize_before
# self-attention module definition
if selfattention_layer_type == "selfattn":
logging.info("encoder self-attention layer type = self-attention")
encoder_selfattn_layer = MultiHeadedAttention
encoder_selfattn_layer_args = (
attention_heads,
attention_dim,
attention_dropout_rate,
)
elif selfattention_layer_type == "legacy_rel_selfattn":
assert pos_enc_layer_type == "legacy_rel_pos"
encoder_selfattn_layer = LegacyRelPositionMultiHeadedAttention
encoder_selfattn_layer_args = (
attention_heads,
attention_dim,
attention_dropout_rate,
)
elif selfattention_layer_type == "rel_selfattn":
logging.info("encoder self-attention layer type = relative self-attention")
assert pos_enc_layer_type == "rel_pos"
encoder_selfattn_layer = RelPositionMultiHeadedAttention
encoder_selfattn_layer_args = (
attention_heads,
attention_dim,
attention_dropout_rate,
zero_triu,
)
else:
raise ValueError("unknown encoder_attn_layer: " + selfattention_layer_type)
# feed-forward module definition
if positionwise_layer_type == "linear":
positionwise_layer = PositionwiseFeedForward
positionwise_layer_args = (
attention_dim,
linear_units,
dropout_rate,
activation,
)
elif positionwise_layer_type == "conv1d":
positionwise_layer = MultiLayeredConv1d
positionwise_layer_args = (
attention_dim,
linear_units,
positionwise_conv_kernel_size,
dropout_rate,
)
elif positionwise_layer_type == "conv1d-linear":
positionwise_layer = Conv1dLinear
positionwise_layer_args = (
attention_dim,
linear_units,
positionwise_conv_kernel_size,
dropout_rate,
)
else:
raise NotImplementedError("Support only linear or conv1d.")
# convolution module definition
convolution_layer = ConvolutionModule
convolution_layer_args = (attention_dim, cnn_module_kernel, activation)
self.encoders = repeat(
num_blocks,
lambda lnum: EncoderLayer(
attention_dim,
encoder_selfattn_layer(*encoder_selfattn_layer_args),
positionwise_layer(*positionwise_layer_args),
positionwise_layer(*positionwise_layer_args) if macaron_style else None,
convolution_layer(*convolution_layer_args) if use_cnn_module else None,
dropout_rate,
normalize_before,
concat_after,
stochastic_depth_rate * float(1 + lnum) / num_blocks,
),
)
if self.normalize_before:
self.after_norm = LayerNorm(attention_dim)
self.intermediate_layers = intermediate_layers
self.use_conditioning = True if ctc_softmax is not None else False
if self.use_conditioning:
self.ctc_softmax = ctc_softmax
self.conditioning_layer = torch.nn.Linear(
conditioning_layer_dim, attention_dim
)
def forward(self, xs, masks):
"""Encode input sequence.
Args:
xs (torch.Tensor): Input tensor (#batch, time, idim).
masks (torch.Tensor): Mask tensor (#batch, 1, time).
Returns:
torch.Tensor: Output tensor (#batch, time, attention_dim).
torch.Tensor: Mask tensor (#batch, 1, time).
"""
if isinstance(self.embed, (Conv2dSubsampling, VGG2L)):
xs, masks = self.embed(xs, masks)
else:
xs = self.embed(xs)
if self.intermediate_layers is None:
xs, masks = self.encoders(xs, masks)
else:
intermediate_outputs = []
for layer_idx, encoder_layer in enumerate(self.encoders):
xs, masks = encoder_layer(xs, masks)
if (
self.intermediate_layers is not None
and layer_idx + 1 in self.intermediate_layers
):
# intermediate branches also require normalization.
encoder_output = xs
if isinstance(encoder_output, tuple):
encoder_output = encoder_output[0]
if self.normalize_before:
encoder_output = self.after_norm(encoder_output)
intermediate_outputs.append(encoder_output)
if self.use_conditioning:
intermediate_result = self.ctc_softmax(encoder_output)
if isinstance(xs, tuple):
x, pos_emb = xs[0], xs[1]
x = x + self.conditioning_layer(intermediate_result)
xs = (x, pos_emb)
else:
xs = xs + self.conditioning_layer(intermediate_result)
if isinstance(xs, tuple):
xs = xs[0]
if self.normalize_before:
xs = self.after_norm(xs)
if self.intermediate_layers is not None:
return xs, masks, intermediate_outputs
return xs, masks
| 12,154 | 39.516667 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/conformer/convolution.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Johns Hopkins University (Shinji Watanabe)
# Northwestern Polytechnical University (Pengcheng Guo)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""ConvolutionModule definition."""
from torch import nn
class ConvolutionModule(nn.Module):
"""ConvolutionModule in Conformer model.
Args:
channels (int): The number of channels of conv layers.
kernel_size (int): Kernerl size of conv layers.
"""
def __init__(self, channels, kernel_size, activation=nn.ReLU(), bias=True):
"""Construct an ConvolutionModule object."""
super(ConvolutionModule, self).__init__()
# kernerl_size should be a odd number for 'SAME' padding
assert (kernel_size - 1) % 2 == 0
self.pointwise_conv1 = nn.Conv1d(
channels,
2 * channels,
kernel_size=1,
stride=1,
padding=0,
bias=bias,
)
self.depthwise_conv = nn.Conv1d(
channels,
channels,
kernel_size,
stride=1,
padding=(kernel_size - 1) // 2,
groups=channels,
bias=bias,
)
self.norm = nn.BatchNorm1d(channels)
self.pointwise_conv2 = nn.Conv1d(
channels,
channels,
kernel_size=1,
stride=1,
padding=0,
bias=bias,
)
self.activation = activation
def forward(self, x):
"""Compute convolution module.
Args:
x (torch.Tensor): Input tensor (#batch, time, channels).
Returns:
torch.Tensor: Output tensor (#batch, time, channels).
"""
# exchange the temporal dimension and the feature dimension
x = x.transpose(1, 2)
# GLU mechanism
x = self.pointwise_conv1(x) # (batch, 2*channel, dim)
x = nn.functional.glu(x, dim=1) # (batch, channel, dim)
# 1D Depthwise Conv
x = self.depthwise_conv(x)
x = self.activation(self.norm(x))
x = self.pointwise_conv2(x)
return x.transpose(1, 2)
| 2,193 | 26.425 | 79 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/conformer/contextual_block_encoder_layer.py | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 21 16:57:31 2021.
@author: Keqi Deng (UCAS)
"""
import torch
from torch import nn
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
class ContextualBlockEncoderLayer(nn.Module):
"""Contexutal Block Encoder layer module.
Args:
size (int): Input dimension.
self_attn (torch.nn.Module): Self-attention module instance.
`MultiHeadedAttention` or `RelPositionMultiHeadedAttention` instance
can be used as the argument.
feed_forward (torch.nn.Module): Feed-forward module instance.
`PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance
can be used as the argument.
feed_forward_macaron (torch.nn.Module): Additional feed-forward module instance.
`PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance
can be used as the argument.
conv_module (torch.nn.Module): Convolution module instance.
`ConvlutionModule` instance can be used as the argument.
dropout_rate (float): Dropout rate.
total_layer_num (int): Total number of layers
normalize_before (bool): Whether to use layer_norm before the first block.
concat_after (bool): Whether to concat attention layer's input and output.
if True, additional linear will be applied.
i.e. x -> x + linear(concat(x, att(x)))
if False, no additional linear will be applied. i.e. x -> x + att(x)
"""
def __init__(
self,
size,
self_attn,
feed_forward,
feed_forward_macaron,
conv_module,
dropout_rate,
total_layer_num,
normalize_before=True,
concat_after=False,
):
"""Construct an EncoderLayer object."""
super(ContextualBlockEncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.feed_forward_macaron = feed_forward_macaron
self.conv_module = conv_module
self.norm1 = LayerNorm(size)
self.norm2 = LayerNorm(size)
if feed_forward_macaron is not None:
self.norm_ff_macaron = LayerNorm(size)
self.ff_scale = 0.5
else:
self.ff_scale = 1.0
if self.conv_module is not None:
self.norm_conv = LayerNorm(size) # for the CNN module
self.norm_final = LayerNorm(size) # for the final output of the block
self.dropout = nn.Dropout(dropout_rate)
self.size = size
self.normalize_before = normalize_before
self.concat_after = concat_after
self.total_layer_num = total_layer_num
if self.concat_after:
self.concat_linear = nn.Linear(size + size, size)
def forward(
self,
x,
mask,
infer_mode=False,
past_ctx=None,
next_ctx=None,
is_short_segment=False,
layer_idx=0,
cache=None,
):
"""Calculate forward propagation."""
if self.training or not infer_mode:
return self.forward_train(x, mask, past_ctx, next_ctx, layer_idx, cache)
else:
return self.forward_infer(
x, mask, past_ctx, next_ctx, is_short_segment, layer_idx, cache
)
def forward_train(
self, x, mask, past_ctx=None, next_ctx=None, layer_idx=0, cache=None
):
"""Compute encoded features.
Args:
x_input (torch.Tensor): Input tensor (#batch, time, size).
mask (torch.Tensor): Mask tensor for the input (#batch, time).
past_ctx (torch.Tensor): Previous contexutal vector
next_ctx (torch.Tensor): Next contexutal vector
cache (torch.Tensor): Cache tensor of the input (#batch, time - 1, size).
Returns:
torch.Tensor: Output tensor (#batch, time, size).
torch.Tensor: Mask tensor (#batch, time).
cur_ctx (torch.Tensor): Current contexutal vector
next_ctx (torch.Tensor): Next contexutal vector
layer_idx (int): layer index number
"""
nbatch = x.size(0)
nblock = x.size(1)
if past_ctx is not None:
if next_ctx is None:
# store all context vectors in one tensor
next_ctx = past_ctx.new_zeros(
nbatch, nblock, self.total_layer_num, x.size(-1)
)
else:
x[:, :, 0] = past_ctx[:, :, layer_idx]
# reshape ( nbatch, nblock, block_size + 2, dim )
# -> ( nbatch * nblock, block_size + 2, dim )
x = x.view(-1, x.size(-2), x.size(-1))
if mask is not None:
mask = mask.view(-1, mask.size(-2), mask.size(-1))
# whether to use macaron style
if self.feed_forward_macaron is not None:
residual = x
if self.normalize_before:
x = self.norm_ff_macaron(x)
x = residual + self.ff_scale * self.dropout(self.feed_forward_macaron(x))
if not self.normalize_before:
x = self.norm_ff_macaron(x)
residual = x
if self.normalize_before:
x = self.norm1(x)
if cache is None:
x_q = x
else:
assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size)
x_q = x[:, -1:, :]
residual = residual[:, -1:, :]
mask = None if mask is None else mask[:, -1:, :]
if self.concat_after:
x_concat = torch.cat((x, self.self_attn(x_q, x, x, mask)), dim=-1)
x = residual + self.concat_linear(x_concat)
else:
x = residual + self.dropout(self.self_attn(x_q, x, x, mask))
if not self.normalize_before:
x = self.norm1(x)
# convolution module
if self.conv_module is not None:
residual = x
if self.normalize_before:
x = self.norm_conv(x)
x = residual + self.dropout(self.conv_module(x))
if not self.normalize_before:
x = self.norm_conv(x)
residual = x
if self.normalize_before:
x = self.norm2(x)
x = residual + self.ff_scale * self.dropout(self.feed_forward(x))
if not self.normalize_before:
x = self.norm2(x)
if self.conv_module is not None:
x = self.norm_final(x)
if cache is not None:
x = torch.cat([cache, x], dim=1)
layer_idx += 1
# reshape ( nbatch * nblock, block_size + 2, dim )
# -> ( nbatch, nblock, block_size + 2, dim )
x = x.view(nbatch, -1, x.size(-2), x.size(-1)).squeeze(1)
if mask is not None:
mask = mask.view(nbatch, -1, mask.size(-2), mask.size(-1)).squeeze(1)
if next_ctx is not None and layer_idx < self.total_layer_num:
next_ctx[:, 0, layer_idx, :] = x[:, 0, -1, :]
next_ctx[:, 1:, layer_idx, :] = x[:, 0:-1, -1, :]
return x, mask, False, next_ctx, next_ctx, False, layer_idx
def forward_infer(
self,
x,
mask,
past_ctx=None,
next_ctx=None,
is_short_segment=False,
layer_idx=0,
cache=None,
):
"""Compute encoded features.
Args:
x_input (torch.Tensor): Input tensor (#batch, time, size).
mask (torch.Tensor): Mask tensor for the input (#batch, 1, time).
past_ctx (torch.Tensor): Previous contexutal vector
next_ctx (torch.Tensor): Next contexutal vector
cache (torch.Tensor): Cache tensor of the input (#batch, time - 1, size).
Returns:
torch.Tensor: Output tensor (#batch, time, size).
torch.Tensor: Mask tensor (#batch, 1, time).
cur_ctx (torch.Tensor): Current contexutal vector
next_ctx (torch.Tensor): Next contexutal vector
layer_idx (int): layer index number
"""
nbatch = x.size(0)
nblock = x.size(1)
# if layer_idx == 0, next_ctx has to be None
if layer_idx == 0:
assert next_ctx is None
next_ctx = x.new_zeros(nbatch, self.total_layer_num, x.size(-1))
# reshape ( nbatch, nblock, block_size + 2, dim )
# -> ( nbatch * nblock, block_size + 2, dim )
x = x.view(-1, x.size(-2), x.size(-1))
if mask is not None:
mask = mask.view(-1, mask.size(-2), mask.size(-1))
# whether to use macaron style
if self.feed_forward_macaron is not None:
residual = x
if self.normalize_before:
x = self.norm_ff_macaron(x)
x = residual + self.ff_scale * self.dropout(self.feed_forward_macaron(x))
if not self.normalize_before:
x = self.norm_ff_macaron(x)
residual = x
if self.normalize_before:
x = self.norm1(x)
if cache is None:
x_q = x
else:
assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size)
x_q = x[:, -1:, :]
residual = residual[:, -1:, :]
mask = None if mask is None else mask[:, -1:, :]
if self.concat_after:
x_concat = torch.cat((x, self.self_attn(x_q, x, x, mask)), dim=-1)
x = residual + self.concat_linear(x_concat)
else:
x = residual + self.dropout(self.self_attn(x_q, x, x, mask))
if not self.normalize_before:
x = self.norm1(x)
# convolution module
if self.conv_module is not None:
residual = x
if self.normalize_before:
x = self.norm_conv(x)
x = residual + self.dropout(self.conv_module(x))
if not self.normalize_before:
x = self.norm_conv(x)
residual = x
if self.normalize_before:
x = self.norm2(x)
x = residual + self.ff_scale * self.dropout(self.feed_forward(x))
if not self.normalize_before:
x = self.norm2(x)
if self.conv_module is not None:
x = self.norm_final(x)
if cache is not None:
x = torch.cat([cache, x], dim=1)
# reshape ( nbatch * nblock, block_size + 2, dim )
# -> ( nbatch, nblock, block_size + 2, dim )
x = x.view(nbatch, nblock, x.size(-2), x.size(-1))
if mask is not None:
mask = mask.view(nbatch, nblock, mask.size(-2), mask.size(-1))
# Propagete context information (the last frame of each block)
# to the first frame
# of the next block
if not is_short_segment:
if past_ctx is None:
# First block of an utterance
x[:, 0, 0, :] = x[:, 0, -1, :]
else:
x[:, 0, 0, :] = past_ctx[:, layer_idx, :]
if nblock > 1:
x[:, 1:, 0, :] = x[:, 0:-1, -1, :]
next_ctx[:, layer_idx, :] = x[:, -1, -1, :]
else:
next_ctx = None
return x, mask, True, past_ctx, next_ctx, is_short_segment, layer_idx + 1
| 11,220 | 35.080386 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/conformer/swish.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Johns Hopkins University (Shinji Watanabe)
# Northwestern Polytechnical University (Pengcheng Guo)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Swish() activation function for Conformer."""
import torch
class Swish(torch.nn.Module):
"""Construct an Swish object."""
def forward(self, x):
"""Return Swich activation function."""
return x * torch.sigmoid(x)
| 483 | 24.473684 | 70 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/maskctc/add_mask_token.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Johns Hopkins University (Shinji Watanabe)
# Waseda University (Yosuke Higuchi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Token masking module for Masked LM."""
import numpy
def mask_uniform(ys_pad, mask_token, eos, ignore_id):
"""Replace random tokens with <mask> label and add <eos> label.
The number of <mask> is chosen from a uniform distribution
between one and the target sequence's length.
:param torch.Tensor ys_pad: batch of padded target sequences (B, Lmax)
:param int mask_token: index of <mask>
:param int eos: index of <eos>
:param int ignore_id: index of padding
:return: padded tensor (B, Lmax)
:rtype: torch.Tensor
:return: padded tensor (B, Lmax)
:rtype: torch.Tensor
"""
from espnet.nets.pytorch_backend.nets_utils import pad_list
ys = [y[y != ignore_id] for y in ys_pad] # parse padded ys
ys_out = [y.new(y.size()).fill_(ignore_id) for y in ys]
ys_in = [y.clone() for y in ys]
for i in range(len(ys)):
num_samples = numpy.random.randint(1, len(ys[i]) + 1)
idx = numpy.random.choice(len(ys[i]), num_samples)
ys_in[i][idx] = mask_token
ys_out[i][idx] = ys[i][idx]
return pad_list(ys_in, eos), pad_list(ys_out, ignore_id)
| 1,352 | 32.825 | 74 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/maskctc/mask.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Johns Hopkins University (Shinji Watanabe)
# Waseda University (Yosuke Higuchi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Attention masking module for Masked LM."""
def square_mask(ys_in_pad, ignore_id):
"""Create attention mask to avoid attending on padding tokens.
:param torch.Tensor ys_pad: batch of padded target sequences (B, Lmax)
:param int ignore_id: index of padding
:param torch.dtype dtype: result dtype
:rtype: torch.Tensor (B, Lmax, Lmax)
"""
ys_mask = (ys_in_pad != ignore_id).unsqueeze(-2)
ymax = ys_mask.size(-1)
ys_mask_tmp = ys_mask.transpose(1, 2).repeat(1, 1, ymax)
ys_mask = ys_mask.repeat(1, ymax, 1) & ys_mask_tmp
return ys_mask
| 803 | 31.16 | 74 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transformer/embedding.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Positional Encoding Module."""
import math
import torch
def _pre_hook(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
"""Perform pre-hook in load_state_dict for backward compatibility.
Note:
We saved self.pe until v.0.5.2 but we have omitted it later.
Therefore, we remove the item "pe" from `state_dict` for backward compatibility.
"""
k = prefix + "pe"
if k in state_dict:
state_dict.pop(k)
class PositionalEncoding(torch.nn.Module):
"""Positional encoding.
Args:
d_model (int): Embedding dimension.
dropout_rate (float): Dropout rate.
max_len (int): Maximum input length.
reverse (bool): Whether to reverse the input position. Only for
the class LegacyRelPositionalEncoding. We remove it in the current
class RelPositionalEncoding.
"""
def __init__(self, d_model, dropout_rate, max_len=5000, reverse=False):
"""Construct an PositionalEncoding object."""
super(PositionalEncoding, self).__init__()
self.d_model = d_model
self.reverse = reverse
self.xscale = math.sqrt(self.d_model)
self.dropout = torch.nn.Dropout(p=dropout_rate)
self.pe = None
self.extend_pe(torch.tensor(0.0).expand(1, max_len))
self._register_load_state_dict_pre_hook(_pre_hook)
def extend_pe(self, x):
"""Reset the positional encodings."""
if self.pe is not None:
if self.pe.size(1) >= x.size(1):
if self.pe.dtype != x.dtype or self.pe.device != x.device:
self.pe = self.pe.to(dtype=x.dtype, device=x.device)
return
pe = torch.zeros(x.size(1), self.d_model)
if self.reverse:
position = torch.arange(
x.size(1) - 1, -1, -1.0, dtype=torch.float32
).unsqueeze(1)
else:
position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, self.d_model, 2, dtype=torch.float32)
* -(math.log(10000.0) / self.d_model)
)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.pe = pe.to(device=x.device, dtype=x.dtype)
def forward(self, x: torch.Tensor):
"""Add positional encoding.
Args:
x (torch.Tensor): Input tensor (batch, time, `*`).
Returns:
torch.Tensor: Encoded tensor (batch, time, `*`).
"""
self.extend_pe(x)
x = x * self.xscale + self.pe[:, : x.size(1)]
return self.dropout(x)
class ScaledPositionalEncoding(PositionalEncoding):
"""Scaled positional encoding module.
See Sec. 3.2 https://arxiv.org/abs/1809.08895
Args:
d_model (int): Embedding dimension.
dropout_rate (float): Dropout rate.
max_len (int): Maximum input length.
"""
def __init__(self, d_model, dropout_rate, max_len=5000):
"""Initialize class."""
super().__init__(d_model=d_model, dropout_rate=dropout_rate, max_len=max_len)
self.alpha = torch.nn.Parameter(torch.tensor(1.0))
def reset_parameters(self):
"""Reset parameters."""
self.alpha.data = torch.tensor(1.0)
def forward(self, x):
"""Add positional encoding.
Args:
x (torch.Tensor): Input tensor (batch, time, `*`).
Returns:
torch.Tensor: Encoded tensor (batch, time, `*`).
"""
self.extend_pe(x)
x = x + self.alpha * self.pe[:, : x.size(1)]
return self.dropout(x)
class LearnableFourierPosEnc(torch.nn.Module):
"""Learnable Fourier Features for Positional Encoding.
See https://arxiv.org/pdf/2106.02795.pdf
Args:
d_model (int): Embedding dimension.
dropout_rate (float): Dropout rate.
max_len (int): Maximum input length.
gamma (float): init parameter for the positional kernel variance
see https://arxiv.org/pdf/2106.02795.pdf.
apply_scaling (bool): Whether to scale the input before adding the pos encoding.
hidden_dim (int): if not None, we modulate the pos encodings with
an MLP whose hidden layer has hidden_dim neurons.
"""
def __init__(
self,
d_model,
dropout_rate=0.0,
max_len=5000,
gamma=1.0,
apply_scaling=False,
hidden_dim=None,
):
"""Initialize class."""
super(LearnableFourierPosEnc, self).__init__()
self.d_model = d_model
if apply_scaling:
self.xscale = math.sqrt(self.d_model)
else:
self.xscale = 1.0
self.dropout = torch.nn.Dropout(dropout_rate)
self.max_len = max_len
self.gamma = gamma
if self.gamma is None:
self.gamma = self.d_model // 2
assert (
d_model % 2 == 0
), "d_model should be divisible by two in order to use this layer."
self.w_r = torch.nn.Parameter(torch.empty(1, d_model // 2))
self._reset() # init the weights
self.hidden_dim = hidden_dim
if self.hidden_dim is not None:
self.mlp = torch.nn.Sequential(
torch.nn.Linear(d_model, hidden_dim),
torch.nn.GELU(),
torch.nn.Linear(hidden_dim, d_model),
)
def _reset(self):
self.w_r.data = torch.normal(
0, (1 / math.sqrt(self.gamma)), (1, self.d_model // 2)
)
def extend_pe(self, x):
"""Reset the positional encodings."""
position_v = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1).to(x)
cosine = torch.cos(torch.matmul(position_v, self.w_r))
sine = torch.sin(torch.matmul(position_v, self.w_r))
pos_enc = torch.cat((cosine, sine), -1)
pos_enc /= math.sqrt(self.d_model)
if self.hidden_dim is None:
return pos_enc.unsqueeze(0)
else:
return self.mlp(pos_enc.unsqueeze(0))
def forward(self, x: torch.Tensor):
"""Add positional encoding.
Args:
x (torch.Tensor): Input tensor (batch, time, `*`).
Returns:
torch.Tensor: Encoded tensor (batch, time, `*`).
"""
pe = self.extend_pe(x)
x = x * self.xscale + pe
return self.dropout(x)
class LegacyRelPositionalEncoding(PositionalEncoding):
"""Relative positional encoding module (old version).
Details can be found in https://github.com/espnet/espnet/pull/2816.
See : Appendix B in https://arxiv.org/abs/1901.02860
Args:
d_model (int): Embedding dimension.
dropout_rate (float): Dropout rate.
max_len (int): Maximum input length.
"""
def __init__(self, d_model, dropout_rate, max_len=5000):
"""Initialize class."""
super().__init__(
d_model=d_model,
dropout_rate=dropout_rate,
max_len=max_len,
reverse=True,
)
def forward(self, x):
"""Compute positional encoding.
Args:
x (torch.Tensor): Input tensor (batch, time, `*`).
Returns:
torch.Tensor: Encoded tensor (batch, time, `*`).
torch.Tensor: Positional embedding tensor (1, time, `*`).
"""
self.extend_pe(x)
x = x * self.xscale
pos_emb = self.pe[:, : x.size(1)]
return self.dropout(x), self.dropout(pos_emb)
class RelPositionalEncoding(torch.nn.Module):
"""Relative positional encoding module (new implementation).
Details can be found in https://github.com/espnet/espnet/pull/2816.
See : Appendix B in https://arxiv.org/abs/1901.02860
Args:
d_model (int): Embedding dimension.
dropout_rate (float): Dropout rate.
max_len (int): Maximum input length.
"""
def __init__(self, d_model, dropout_rate, max_len=5000):
"""Construct an PositionalEncoding object."""
super(RelPositionalEncoding, self).__init__()
self.d_model = d_model
self.xscale = math.sqrt(self.d_model)
self.dropout = torch.nn.Dropout(p=dropout_rate)
self.pe = None
self.extend_pe(torch.tensor(0.0).expand(1, max_len))
def extend_pe(self, x):
"""Reset the positional encodings."""
if self.pe is not None:
# self.pe contains both positive and negative parts
# the length of self.pe is 2 * input_len - 1
if self.pe.size(1) >= x.size(1) * 2 - 1:
if self.pe.dtype != x.dtype or self.pe.device != x.device:
self.pe = self.pe.to(dtype=x.dtype, device=x.device)
return
# Suppose `i` means to the position of query vecotr and `j` means the
# position of key vector. We use position relative positions when keys
# are to the left (i>j) and negative relative positions otherwise (i<j).
pe_positive = torch.zeros(x.size(1), self.d_model)
pe_negative = torch.zeros(x.size(1), self.d_model)
position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, self.d_model, 2, dtype=torch.float32)
* -(math.log(10000.0) / self.d_model)
)
pe_positive[:, 0::2] = torch.sin(position * div_term)
pe_positive[:, 1::2] = torch.cos(position * div_term)
pe_negative[:, 0::2] = torch.sin(-1 * position * div_term)
pe_negative[:, 1::2] = torch.cos(-1 * position * div_term)
# Reserve the order of positive indices and concat both positive and
# negative indices. This is used to support the shifting trick
# as in https://arxiv.org/abs/1901.02860
pe_positive = torch.flip(pe_positive, [0]).unsqueeze(0)
pe_negative = pe_negative[1:].unsqueeze(0)
pe = torch.cat([pe_positive, pe_negative], dim=1)
self.pe = pe.to(device=x.device, dtype=x.dtype)
def forward(self, x: torch.Tensor):
"""Add positional encoding.
Args:
x (torch.Tensor): Input tensor (batch, time, `*`).
Returns:
torch.Tensor: Encoded tensor (batch, time, `*`).
"""
self.extend_pe(x)
x = x * self.xscale
pos_emb = self.pe[
:,
self.pe.size(1) // 2 - x.size(1) + 1 : self.pe.size(1) // 2 + x.size(1),
]
return self.dropout(x), self.dropout(pos_emb)
class StreamPositionalEncoding(torch.nn.Module):
"""Streaming Positional encoding.
Args:
d_model (int): Embedding dimension.
dropout_rate (float): Dropout rate.
max_len (int): Maximum input length.
"""
def __init__(self, d_model, dropout_rate, max_len=5000):
"""Construct an PositionalEncoding object."""
super(StreamPositionalEncoding, self).__init__()
self.d_model = d_model
self.xscale = math.sqrt(self.d_model)
self.dropout = torch.nn.Dropout(p=dropout_rate)
self.pe = None
self.tmp = torch.tensor(0.0).expand(1, max_len)
self.extend_pe(self.tmp.size(1), self.tmp.device, self.tmp.dtype)
self._register_load_state_dict_pre_hook(_pre_hook)
def extend_pe(self, length, device, dtype):
"""Reset the positional encodings."""
if self.pe is not None:
if self.pe.size(1) >= length:
if self.pe.dtype != dtype or self.pe.device != device:
self.pe = self.pe.to(dtype=dtype, device=device)
return
pe = torch.zeros(length, self.d_model)
position = torch.arange(0, length, dtype=torch.float32).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, self.d_model, 2, dtype=torch.float32)
* -(math.log(10000.0) / self.d_model)
)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.pe = pe.to(device=device, dtype=dtype)
def forward(self, x: torch.Tensor, start_idx: int = 0):
"""Add positional encoding.
Args:
x (torch.Tensor): Input tensor (batch, time, `*`).
Returns:
torch.Tensor: Encoded tensor (batch, time, `*`).
"""
self.extend_pe(x.size(1) + start_idx, x.device, x.dtype)
x = x * self.xscale + self.pe[:, start_idx : start_idx + x.size(1)]
return self.dropout(x)
| 12,758 | 32.054404 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transformer/lightconv2d.py | """Lightweight 2-Dimensional Convolution module."""
import numpy
import torch
import torch.nn.functional as F
from torch import nn
MIN_VALUE = float(numpy.finfo(numpy.float32).min)
class LightweightConvolution2D(nn.Module):
"""Lightweight 2-Dimensional Convolution layer.
This implementation is based on
https://github.com/pytorch/fairseq/tree/master/fairseq
Args:
wshare (int): the number of kernel of convolution
n_feat (int): the number of features
dropout_rate (float): dropout_rate
kernel_size (int): kernel size (length)
use_kernel_mask (bool): Use causal mask or not for convolution kernel
use_bias (bool): Use bias term or not.
"""
def __init__(
self,
wshare,
n_feat,
dropout_rate,
kernel_size,
use_kernel_mask=False,
use_bias=False,
):
"""Construct Lightweight 2-Dimensional Convolution layer."""
super(LightweightConvolution2D, self).__init__()
assert n_feat % wshare == 0
self.wshare = wshare
self.use_kernel_mask = use_kernel_mask
self.dropout_rate = dropout_rate
self.kernel_size = kernel_size
self.padding_size = int(kernel_size / 2)
# linear -> GLU -> lightconv -> linear
self.linear1 = nn.Linear(n_feat, n_feat * 2)
self.linear2 = nn.Linear(n_feat * 2, n_feat)
self.act = nn.GLU()
# lightconv related
self.weight = nn.Parameter(
torch.Tensor(self.wshare, 1, kernel_size).uniform_(0, 1)
)
self.weight_f = nn.Parameter(torch.Tensor(1, 1, kernel_size).uniform_(0, 1))
self.use_bias = use_bias
if self.use_bias:
self.bias = nn.Parameter(torch.Tensor(n_feat))
# mask of kernel
kernel_mask0 = torch.zeros(self.wshare, int(kernel_size / 2))
kernel_mask1 = torch.ones(self.wshare, int(kernel_size / 2 + 1))
self.kernel_mask = torch.cat((kernel_mask1, kernel_mask0), dim=-1).unsqueeze(1)
def forward(self, query, key, value, mask):
"""Forward of 'Lightweight 2-Dimensional Convolution'.
This function takes query, key and value but uses only query.
This is just for compatibility with self-attention layer (attention.py)
Args:
query (torch.Tensor): (batch, time1, d_model) input tensor
key (torch.Tensor): (batch, time2, d_model) NOT USED
value (torch.Tensor): (batch, time2, d_model) NOT USED
mask (torch.Tensor): (batch, time1, time2) mask
Return:
x (torch.Tensor): (batch, time1, d_model) output
"""
# linear -> GLU -> lightconv -> linear
x = query
B, T, C = x.size()
H = self.wshare
# first liner layer
x = self.linear1(x)
# GLU activation
x = self.act(x)
# convolution along frequency axis
weight_f = F.softmax(self.weight_f, dim=-1)
weight_f = F.dropout(weight_f, self.dropout_rate, training=self.training)
weight_new = torch.zeros(
B * T, 1, self.kernel_size, device=x.device, dtype=x.dtype
).copy_(weight_f)
xf = F.conv1d(
x.view(1, B * T, C), weight_new, padding=self.padding_size, groups=B * T
).view(B, T, C)
# lightconv
x = x.transpose(1, 2).contiguous().view(-1, H, T) # B x C x T
weight = F.dropout(self.weight, self.dropout_rate, training=self.training)
if self.use_kernel_mask:
self.kernel_mask = self.kernel_mask.to(x.device)
weight = weight.masked_fill(self.kernel_mask == 0.0, float("-inf"))
weight = F.softmax(weight, dim=-1)
x = F.conv1d(x, weight, padding=self.padding_size, groups=self.wshare).view(
B, C, T
)
if self.use_bias:
x = x + self.bias.view(1, -1, 1)
x = x.transpose(1, 2) # B x T x C
x = torch.cat((x, xf), -1) # B x T x Cx2
if mask is not None and not self.use_kernel_mask:
mask = mask.transpose(-1, -2)
x = x.masked_fill(mask == 0, 0.0)
# second linear layer
x = self.linear2(x)
return x
| 4,229 | 33.112903 | 87 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transformer/encoder_layer.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Encoder self-attention layer definition."""
import torch
from torch import nn
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
class EncoderLayer(nn.Module):
"""Encoder layer module.
Args:
size (int): Input dimension.
self_attn (torch.nn.Module): Self-attention module instance.
`MultiHeadedAttention` or `RelPositionMultiHeadedAttention` instance
can be used as the argument.
feed_forward (torch.nn.Module): Feed-forward module instance.
`PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance
can be used as the argument.
dropout_rate (float): Dropout rate.
normalize_before (bool): Whether to use layer_norm before the first block.
concat_after (bool): Whether to concat attention layer's input and output.
if True, additional linear will be applied.
i.e. x -> x + linear(concat(x, att(x)))
if False, no additional linear will be applied. i.e. x -> x + att(x)
stochastic_depth_rate (float): Proability to skip this layer.
During training, the layer may skip residual computation and return input
as-is with given probability.
"""
def __init__(
self,
size,
self_attn,
feed_forward,
dropout_rate,
normalize_before=True,
concat_after=False,
stochastic_depth_rate=0.0,
):
"""Construct an EncoderLayer object."""
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.norm1 = LayerNorm(size)
self.norm2 = LayerNorm(size)
self.dropout = nn.Dropout(dropout_rate)
self.size = size
self.normalize_before = normalize_before
self.concat_after = concat_after
if self.concat_after:
self.concat_linear = nn.Linear(size + size, size)
self.stochastic_depth_rate = stochastic_depth_rate
def forward(self, x, mask, cache=None):
"""Compute encoded features.
Args:
x_input (torch.Tensor): Input tensor (#batch, time, size).
mask (torch.Tensor): Mask tensor for the input (#batch, 1, time).
cache (torch.Tensor): Cache tensor of the input (#batch, time - 1, size).
Returns:
torch.Tensor: Output tensor (#batch, time, size).
torch.Tensor: Mask tensor (#batch, 1, time).
"""
skip_layer = False
# with stochastic depth, residual connection `x + f(x)` becomes
# `x <- x + 1 / (1 - p) * f(x)` at training time.
stoch_layer_coeff = 1.0
if self.training and self.stochastic_depth_rate > 0:
skip_layer = torch.rand(1).item() < self.stochastic_depth_rate
stoch_layer_coeff = 1.0 / (1 - self.stochastic_depth_rate)
if skip_layer:
if cache is not None:
x = torch.cat([cache, x], dim=1)
return x, mask
residual = x
if self.normalize_before:
x = self.norm1(x)
if cache is None:
x_q = x
else:
assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size)
x_q = x[:, -1:, :]
residual = residual[:, -1:, :]
mask = None if mask is None else mask[:, -1:, :]
if self.concat_after:
x_concat = torch.cat((x, self.self_attn(x_q, x, x, mask)), dim=-1)
x = residual + stoch_layer_coeff * self.concat_linear(x_concat)
else:
x = residual + stoch_layer_coeff * self.dropout(
self.self_attn(x_q, x, x, mask)
)
if not self.normalize_before:
x = self.norm1(x)
residual = x
if self.normalize_before:
x = self.norm2(x)
x = residual + stoch_layer_coeff * self.dropout(self.feed_forward(x))
if not self.normalize_before:
x = self.norm2(x)
if cache is not None:
x = torch.cat([cache, x], dim=1)
return x, mask
| 4,261 | 34.516667 | 87 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transformer/label_smoothing_loss.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Label smoothing module."""
import torch
from torch import nn
class LabelSmoothingLoss(nn.Module):
"""Label-smoothing loss.
:param int size: the number of class
:param int padding_idx: ignored class id
:param float smoothing: smoothing rate (0.0 means the conventional CE)
:param bool normalize_length: normalize loss by sequence length if True
:param torch.nn.Module criterion: loss function to be smoothed
"""
def __init__(
self,
size,
padding_idx,
smoothing,
normalize_length=False,
criterion=nn.KLDivLoss(reduction="none"),
):
"""Construct an LabelSmoothingLoss object."""
super(LabelSmoothingLoss, self).__init__()
self.criterion = criterion
self.padding_idx = padding_idx
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.size = size
self.true_dist = None
self.normalize_length = normalize_length
def forward(self, x, target):
"""Compute loss between x and target.
:param torch.Tensor x: prediction (batch, seqlen, class)
:param torch.Tensor target:
target signal masked with self.padding_id (batch, seqlen)
:return: scalar float value
:rtype torch.Tensor
"""
assert x.size(2) == self.size
batch_size = x.size(0)
x = x.view(-1, self.size)
target = target.view(-1)
with torch.no_grad():
true_dist = x.clone()
true_dist.fill_(self.smoothing / (self.size - 1))
ignore = target == self.padding_idx # (B,)
total = len(target) - ignore.sum().item()
target = target.masked_fill(ignore, 0) # avoid -1 index
true_dist.scatter_(1, target.unsqueeze(1), self.confidence)
kl = self.criterion(torch.log_softmax(x, dim=1), true_dist)
denom = total if self.normalize_length else batch_size
return kl.masked_fill(ignore.unsqueeze(1), 0).sum() / denom
| 2,164 | 32.828125 | 75 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transformer/positionwise_feed_forward.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Positionwise feed forward layer definition."""
import torch
class PositionwiseFeedForward(torch.nn.Module):
"""Positionwise feed forward layer.
Args:
idim (int): Input dimenstion.
hidden_units (int): The number of hidden units.
dropout_rate (float): Dropout rate.
"""
def __init__(self, idim, hidden_units, dropout_rate, activation=torch.nn.ReLU()):
"""Construct an PositionwiseFeedForward object."""
super(PositionwiseFeedForward, self).__init__()
self.w_1 = torch.nn.Linear(idim, hidden_units)
self.w_2 = torch.nn.Linear(hidden_units, idim)
self.dropout = torch.nn.Dropout(dropout_rate)
self.activation = activation
def forward(self, x):
"""Forward function."""
return self.w_2(self.dropout(self.activation(self.w_1(x))))
| 983 | 28.818182 | 85 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transformer/encoder_mix.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Encoder Mix definition."""
import torch
from espnet.nets.pytorch_backend.transducer.vgg2l import VGG2L
from espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention
from espnet.nets.pytorch_backend.transformer.embedding import PositionalEncoding
from espnet.nets.pytorch_backend.transformer.encoder import Encoder
from espnet.nets.pytorch_backend.transformer.encoder_layer import EncoderLayer
from espnet.nets.pytorch_backend.transformer.repeat import repeat
from espnet.nets.pytorch_backend.transformer.subsampling import Conv2dSubsampling
class EncoderMix(Encoder, torch.nn.Module):
"""Transformer encoder module.
:param int idim: input dim
:param int attention_dim: dimension of attention
:param int attention_heads: the number of heads of multi head attention
:param int linear_units: the number of units of position-wise feed forward
:param int num_blocks: the number of decoder blocks
:param float dropout_rate: dropout rate
:param float attention_dropout_rate: dropout rate in attention
:param float positional_dropout_rate: dropout rate after adding positional encoding
:param str or torch.nn.Module input_layer: input layer type
:param class pos_enc_class: PositionalEncoding or ScaledPositionalEncoding
:param bool normalize_before: whether to use layer_norm before the first block
:param bool concat_after: whether to concat attention layer's input and output
if True, additional linear will be applied.
i.e. x -> x + linear(concat(x, att(x)))
if False, no additional linear will be applied. i.e. x -> x + att(x)
:param str positionwise_layer_type: linear of conv1d
:param int positionwise_conv_kernel_size: kernel size of positionwise conv1d layer
:param int padding_idx: padding_idx for input_layer=embed
"""
def __init__(
self,
idim,
attention_dim=256,
attention_heads=4,
linear_units=2048,
num_blocks_sd=4,
num_blocks_rec=8,
dropout_rate=0.1,
positional_dropout_rate=0.1,
attention_dropout_rate=0.0,
input_layer="conv2d",
pos_enc_class=PositionalEncoding,
normalize_before=True,
concat_after=False,
positionwise_layer_type="linear",
positionwise_conv_kernel_size=1,
padding_idx=-1,
num_spkrs=2,
):
"""Construct an Encoder object."""
super(EncoderMix, self).__init__(
idim=idim,
selfattention_layer_type="selfattn",
attention_dim=attention_dim,
attention_heads=attention_heads,
linear_units=linear_units,
num_blocks=num_blocks_rec,
dropout_rate=dropout_rate,
positional_dropout_rate=positional_dropout_rate,
attention_dropout_rate=attention_dropout_rate,
input_layer=input_layer,
pos_enc_class=pos_enc_class,
normalize_before=normalize_before,
concat_after=concat_after,
positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size,
padding_idx=padding_idx,
)
positionwise_layer, positionwise_layer_args = self.get_positionwise_layer(
positionwise_layer_type,
attention_dim,
linear_units,
dropout_rate,
positionwise_conv_kernel_size,
)
self.num_spkrs = num_spkrs
self.encoders_sd = torch.nn.ModuleList(
[
repeat(
num_blocks_sd,
lambda lnum: EncoderLayer(
attention_dim,
MultiHeadedAttention(
attention_heads, attention_dim, attention_dropout_rate
),
positionwise_layer(*positionwise_layer_args),
dropout_rate,
normalize_before,
concat_after,
),
)
for i in range(num_spkrs)
]
)
def forward(self, xs, masks):
"""Encode input sequence.
:param torch.Tensor xs: input tensor
:param torch.Tensor masks: input mask
:return: position embedded tensor and mask
:rtype Tuple[torch.Tensor, torch.Tensor]:
"""
if isinstance(self.embed, (Conv2dSubsampling, VGG2L)):
xs, masks = self.embed(xs, masks)
else:
xs = self.embed(xs)
xs_sd, masks_sd = [None] * self.num_spkrs, [None] * self.num_spkrs
for ns in range(self.num_spkrs):
xs_sd[ns], masks_sd[ns] = self.encoders_sd[ns](xs, masks)
xs_sd[ns], masks_sd[ns] = self.encoders(xs_sd[ns], masks_sd[ns]) # Enc_rec
if self.normalize_before:
xs_sd[ns] = self.after_norm(xs_sd[ns])
return xs_sd, masks_sd
def forward_one_step(self, xs, masks, cache=None):
"""Encode input frame.
:param torch.Tensor xs: input tensor
:param torch.Tensor masks: input mask
:param List[torch.Tensor] cache: cache tensors
:return: position embedded tensor, mask and new cache
:rtype Tuple[torch.Tensor, torch.Tensor, List[torch.Tensor]]:
"""
if isinstance(self.embed, Conv2dSubsampling):
xs, masks = self.embed(xs, masks)
else:
xs = self.embed(xs)
new_cache_sd = []
for ns in range(self.num_spkrs):
if cache is None:
cache = [
None for _ in range(len(self.encoders_sd) + len(self.encoders_rec))
]
new_cache = []
for c, e in zip(cache[: len(self.encoders_sd)], self.encoders_sd[ns]):
xs, masks = e(xs, masks, cache=c)
new_cache.append(xs)
for c, e in zip(cache[: len(self.encoders_sd) :], self.encoders_rec):
xs, masks = e(xs, masks, cache=c)
new_cache.append(xs)
new_cache_sd.append(new_cache)
if self.normalize_before:
xs = self.after_norm(xs)
return xs, masks, new_cache_sd
| 6,407 | 38.801242 | 87 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transformer/argument.py | # Copyright 2020 Hirofumi Inaguma
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Transformer common arguments."""
from distutils.util import strtobool
def add_arguments_transformer_common(group):
"""Add Transformer common arguments."""
group.add_argument(
"--transformer-init",
type=str,
default="pytorch",
choices=[
"pytorch",
"xavier_uniform",
"xavier_normal",
"kaiming_uniform",
"kaiming_normal",
],
help="how to initialize transformer parameters",
)
group.add_argument(
"--transformer-input-layer",
type=str,
default="conv2d",
choices=["conv2d", "linear", "embed"],
help="transformer input layer type",
)
group.add_argument(
"--transformer-attn-dropout-rate",
default=None,
type=float,
help="dropout in transformer attention. use --dropout-rate if None is set",
)
group.add_argument(
"--transformer-lr",
default=10.0,
type=float,
help="Initial value of learning rate",
)
group.add_argument(
"--transformer-warmup-steps",
default=25000,
type=int,
help="optimizer warmup steps",
)
group.add_argument(
"--transformer-length-normalized-loss",
default=True,
type=strtobool,
help="normalize loss by length",
)
group.add_argument(
"--transformer-encoder-selfattn-layer-type",
type=str,
default="selfattn",
choices=[
"selfattn",
"rel_selfattn",
"lightconv",
"lightconv2d",
"dynamicconv",
"dynamicconv2d",
"light-dynamicconv2d",
],
help="transformer encoder self-attention layer type",
)
group.add_argument(
"--transformer-decoder-selfattn-layer-type",
type=str,
default="selfattn",
choices=[
"selfattn",
"lightconv",
"lightconv2d",
"dynamicconv",
"dynamicconv2d",
"light-dynamicconv2d",
],
help="transformer decoder self-attention layer type",
)
# Lightweight/Dynamic convolution related parameters.
# See https://arxiv.org/abs/1912.11793v2
# and https://arxiv.org/abs/1901.10430 for detail of the method.
# Configurations used in the first paper are in
# egs/{csj, librispeech}/asr1/conf/tuning/ld_conv/
group.add_argument(
"--wshare",
default=4,
type=int,
help="Number of parameter shargin for lightweight convolution",
)
group.add_argument(
"--ldconv-encoder-kernel-length",
default="21_23_25_27_29_31_33_35_37_39_41_43",
type=str,
help="kernel size for lightweight/dynamic convolution: "
'Encoder side. For example, "21_23_25" means kernel length 21 for '
"First layer, 23 for Second layer and so on.",
)
group.add_argument(
"--ldconv-decoder-kernel-length",
default="11_13_15_17_19_21",
type=str,
help="kernel size for lightweight/dynamic convolution: "
'Decoder side. For example, "21_23_25" means kernel length 21 for '
"First layer, 23 for Second layer and so on.",
)
group.add_argument(
"--ldconv-usebias",
type=strtobool,
default=False,
help="use bias term in lightweight/dynamic convolution",
)
group.add_argument(
"--dropout-rate",
default=0.0,
type=float,
help="Dropout rate for the encoder",
)
group.add_argument(
"--intermediate-ctc-weight",
default=0.0,
type=float,
help="Weight of intermediate CTC weight",
)
group.add_argument(
"--intermediate-ctc-layer",
default="",
type=str,
help="Position of intermediate CTC layer. {int} or {int},{int},...,{int}",
)
group.add_argument(
"--self-conditioning",
default=False,
type=strtobool,
help="use self-conditioning at intermediate CTC layers",
)
# Encoder
group.add_argument(
"--elayers",
default=4,
type=int,
help="Number of encoder layers (for shared recognition part "
"in multi-speaker asr mode)",
)
group.add_argument(
"--eunits",
"-u",
default=300,
type=int,
help="Number of encoder hidden units",
)
# Attention
group.add_argument(
"--adim",
default=320,
type=int,
help="Number of attention transformation dimensions",
)
group.add_argument(
"--aheads",
default=4,
type=int,
help="Number of heads for multi head attention",
)
group.add_argument(
"--stochastic-depth-rate",
default=0.0,
type=float,
help="Skip probability of stochastic layer regularization",
)
# Decoder
group.add_argument(
"--dlayers", default=1, type=int, help="Number of decoder layers"
)
group.add_argument(
"--dunits", default=320, type=int, help="Number of decoder hidden units"
)
return group
| 5,280 | 27.701087 | 83 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transformer/subsampling.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Subsampling layer definition."""
import torch
from espnet.nets.pytorch_backend.transformer.embedding import PositionalEncoding
class TooShortUttError(Exception):
"""Raised when the utt is too short for subsampling.
Args:
message (str): Message for error catch
actual_size (int): the short size that cannot pass the subsampling
limit (int): the limit size for subsampling
"""
def __init__(self, message, actual_size, limit):
"""Construct a TooShortUttError for error handler."""
super().__init__(message)
self.actual_size = actual_size
self.limit = limit
def check_short_utt(ins, size):
"""Check if the utterance is too short for subsampling."""
if isinstance(ins, Conv1dSubsampling2) and size < 5:
return True, 5
if isinstance(ins, Conv1dSubsampling3) and size < 7:
return True, 7
if isinstance(ins, Conv2dSubsampling1) and size < 5:
return True, 5
if isinstance(ins, Conv2dSubsampling2) and size < 7:
return True, 7
if isinstance(ins, Conv2dSubsampling) and size < 7:
return True, 7
if isinstance(ins, Conv2dSubsampling6) and size < 11:
return True, 11
if isinstance(ins, Conv2dSubsampling8) and size < 15:
return True, 15
return False, -1
class Conv1dSubsampling2(torch.nn.Module):
"""Convolutional 1D subsampling (to 1/2 length).
Args:
idim (int): Input dimension.
odim (int): Output dimension.
dropout_rate (float): Dropout rate.
pos_enc (torch.nn.Module): Custom position encoding layer.
"""
def __init__(self, idim, odim, dropout_rate, pos_enc=None):
"""Construct an Conv1dSubsampling2 object."""
super(Conv1dSubsampling2, self).__init__()
self.conv = torch.nn.Sequential(
torch.nn.Conv1d(idim, odim, 3, 1),
torch.nn.ReLU(),
torch.nn.Conv1d(odim, odim, 3, 2),
torch.nn.ReLU(),
)
self.out = torch.nn.Sequential(
torch.nn.Linear(odim, odim),
pos_enc if pos_enc is not None else PositionalEncoding(odim, dropout_rate),
)
def forward(self, x, x_mask):
"""Subsample x.
Args:
x (torch.Tensor): Input tensor (#batch, time, idim).
x_mask (torch.Tensor): Input mask (#batch, 1, time).
Returns:
torch.Tensor: Subsampled tensor (#batch, time', odim),
where time' = time // 2.
torch.Tensor: Subsampled mask (#batch, 1, time'),
where time' = time // 2.
"""
x = x.transpose(2, 1) # (#batch, idim, time)
x = self.conv(x)
b, c, t = x.size()
x = self.out(x.transpose(1, 2).contiguous())
if x_mask is None:
return x, None
return x, x_mask[:, :, :-2:1][:, :, :-2:2]
def __getitem__(self, key):
"""Get item.
When reset_parameters() is called, if use_scaled_pos_enc is used,
return the positioning encoding.
"""
if key != -1:
raise NotImplementedError("Support only `-1` (for `reset_parameters`).")
return self.out[key]
class Conv1dSubsampling3(torch.nn.Module):
"""Convolutional 1D subsampling (to 1/3 length).
Args:
idim (int): Input dimension.
odim (int): Output dimension.
dropout_rate (float): Dropout rate.
pos_enc (torch.nn.Module): Custom position encoding layer.
"""
def __init__(self, idim, odim, dropout_rate, pos_enc=None):
"""Construct an Conv1dSubsampling3 object."""
super(Conv1dSubsampling3, self).__init__()
self.conv = torch.nn.Sequential(
torch.nn.Conv1d(idim, odim, 3, 1),
torch.nn.ReLU(),
torch.nn.Conv1d(odim, odim, 5, 3),
torch.nn.ReLU(),
)
self.out = torch.nn.Sequential(
torch.nn.Linear(odim, odim),
pos_enc if pos_enc is not None else PositionalEncoding(odim, dropout_rate),
)
def forward(self, x, x_mask):
"""Subsample x.
Args:
x (torch.Tensor): Input tensor (#batch, time, idim).
x_mask (torch.Tensor): Input mask (#batch, 1, time).
Returns:
torch.Tensor: Subsampled tensor (#batch, time', odim),
where time' = time // 2.
torch.Tensor: Subsampled mask (#batch, 1, time'),
where time' = time // 2.
"""
x = x.transpose(2, 1) # (#batch, idim, time)
x = self.conv(x)
b, c, t = x.size()
x = self.out(x.transpose(1, 2).contiguous())
if x_mask is None:
return x, None
return x, x_mask[:, :, :-2:1][:, :, :-4:3]
def __getitem__(self, key):
"""Get item.
When reset_parameters() is called, if use_scaled_pos_enc is used,
return the positioning encoding.
"""
if key != -1:
raise NotImplementedError("Support only `-1` (for `reset_parameters`).")
return self.out[key]
class Conv2dSubsampling(torch.nn.Module):
"""Convolutional 2D subsampling (to 1/4 length).
Args:
idim (int): Input dimension.
odim (int): Output dimension.
dropout_rate (float): Dropout rate.
pos_enc (torch.nn.Module): Custom position encoding layer.
"""
def __init__(self, idim, odim, dropout_rate, pos_enc=None):
"""Construct an Conv2dSubsampling object."""
super(Conv2dSubsampling, self).__init__()
self.conv = torch.nn.Sequential(
torch.nn.Conv2d(1, odim, 3, 2),
torch.nn.ReLU(),
torch.nn.Conv2d(odim, odim, 3, 2),
torch.nn.ReLU(),
)
self.out = torch.nn.Sequential(
torch.nn.Linear(odim * (((idim - 1) // 2 - 1) // 2), odim),
pos_enc if pos_enc is not None else PositionalEncoding(odim, dropout_rate),
)
def forward(self, x, x_mask):
"""Subsample x.
Args:
x (torch.Tensor): Input tensor (#batch, time, idim).
x_mask (torch.Tensor): Input mask (#batch, 1, time).
Returns:
torch.Tensor: Subsampled tensor (#batch, time', odim),
where time' = time // 4.
torch.Tensor: Subsampled mask (#batch, 1, time'),
where time' = time // 4.
"""
x = x.unsqueeze(1) # (b, c, t, f)
x = self.conv(x)
b, c, t, f = x.size()
x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))
if x_mask is None:
return x, None
return x, x_mask[:, :, :-2:2][:, :, :-2:2]
def __getitem__(self, key):
"""Get item.
When reset_parameters() is called, if use_scaled_pos_enc is used,
return the positioning encoding.
"""
if key != -1:
raise NotImplementedError("Support only `-1` (for `reset_parameters`).")
return self.out[key]
class Conv2dSubsampling1(torch.nn.Module):
"""Similar to Conv2dSubsampling module, but without any subsampling performed.
Args:
idim (int): Input dimension.
odim (int): Output dimension.
dropout_rate (float): Dropout rate.
pos_enc (torch.nn.Module): Custom position encoding layer.
"""
def __init__(self, idim, odim, dropout_rate, pos_enc=None):
"""Construct an Conv2dSubsampling1 object."""
super(Conv2dSubsampling1, self).__init__()
self.conv = torch.nn.Sequential(
torch.nn.Conv2d(1, odim, 3, 1),
torch.nn.ReLU(),
torch.nn.Conv2d(odim, odim, 3, 1),
torch.nn.ReLU(),
)
self.out = torch.nn.Sequential(
torch.nn.Linear(odim * (idim - 4), odim),
pos_enc if pos_enc is not None else PositionalEncoding(odim, dropout_rate),
)
def forward(self, x, x_mask):
"""Pass x through 2 Conv2d layers without subsampling.
Args:
x (torch.Tensor): Input tensor (#batch, time, idim).
x_mask (torch.Tensor): Input mask (#batch, 1, time).
Returns:
torch.Tensor: Subsampled tensor (#batch, time', odim).
where time' = time - 4.
torch.Tensor: Subsampled mask (#batch, 1, time').
where time' = time - 4.
"""
x = x.unsqueeze(1) # (b, c, t, f)
x = self.conv(x)
b, c, t, f = x.size()
x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))
if x_mask is None:
return x, None
return x, x_mask[:, :, :-4]
def __getitem__(self, key):
"""Get item.
When reset_parameters() is called, if use_scaled_pos_enc is used,
return the positioning encoding.
"""
if key != -1:
raise NotImplementedError("Support only `-1` (for `reset_parameters`).")
return self.out[key]
class Conv2dSubsampling2(torch.nn.Module):
"""Convolutional 2D subsampling (to 1/2 length).
Args:
idim (int): Input dimension.
odim (int): Output dimension.
dropout_rate (float): Dropout rate.
pos_enc (torch.nn.Module): Custom position encoding layer.
"""
def __init__(self, idim, odim, dropout_rate, pos_enc=None):
"""Construct an Conv2dSubsampling2 object."""
super(Conv2dSubsampling2, self).__init__()
self.conv = torch.nn.Sequential(
torch.nn.Conv2d(1, odim, 3, 2),
torch.nn.ReLU(),
torch.nn.Conv2d(odim, odim, 3, 1),
torch.nn.ReLU(),
)
self.out = torch.nn.Sequential(
torch.nn.Linear(odim * (((idim - 1) // 2 - 2)), odim),
pos_enc if pos_enc is not None else PositionalEncoding(odim, dropout_rate),
)
def forward(self, x, x_mask):
"""Subsample x.
Args:
x (torch.Tensor): Input tensor (#batch, time, idim).
x_mask (torch.Tensor): Input mask (#batch, 1, time).
Returns:
torch.Tensor: Subsampled tensor (#batch, time', odim),
where time' = time // 2.
torch.Tensor: Subsampled mask (#batch, 1, time'),
where time' = time // 2.
"""
x = x.unsqueeze(1) # (b, c, t, f)
x = self.conv(x)
b, c, t, f = x.size()
x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))
if x_mask is None:
return x, None
return x, x_mask[:, :, :-2:2][:, :, :-2:1]
def __getitem__(self, key):
"""Get item.
When reset_parameters() is called, if use_scaled_pos_enc is used,
return the positioning encoding.
"""
if key != -1:
raise NotImplementedError("Support only `-1` (for `reset_parameters`).")
return self.out[key]
class Conv2dSubsampling6(torch.nn.Module):
"""Convolutional 2D subsampling (to 1/6 length).
Args:
idim (int): Input dimension.
odim (int): Output dimension.
dropout_rate (float): Dropout rate.
pos_enc (torch.nn.Module): Custom position encoding layer.
"""
def __init__(self, idim, odim, dropout_rate, pos_enc=None):
"""Construct an Conv2dSubsampling6 object."""
super(Conv2dSubsampling6, self).__init__()
self.conv = torch.nn.Sequential(
torch.nn.Conv2d(1, odim, 3, 2),
torch.nn.ReLU(),
torch.nn.Conv2d(odim, odim, 5, 3),
torch.nn.ReLU(),
)
self.out = torch.nn.Sequential(
torch.nn.Linear(odim * (((idim - 1) // 2 - 2) // 3), odim),
pos_enc if pos_enc is not None else PositionalEncoding(odim, dropout_rate),
)
def forward(self, x, x_mask):
"""Subsample x.
Args:
x (torch.Tensor): Input tensor (#batch, time, idim).
x_mask (torch.Tensor): Input mask (#batch, 1, time).
Returns:
torch.Tensor: Subsampled tensor (#batch, time', odim),
where time' = time // 6.
torch.Tensor: Subsampled mask (#batch, 1, time'),
where time' = time // 6.
"""
x = x.unsqueeze(1) # (b, c, t, f)
x = self.conv(x)
b, c, t, f = x.size()
x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))
if x_mask is None:
return x, None
return x, x_mask[:, :, :-2:2][:, :, :-4:3]
class Conv2dSubsampling8(torch.nn.Module):
"""Convolutional 2D subsampling (to 1/8 length).
Args:
idim (int): Input dimension.
odim (int): Output dimension.
dropout_rate (float): Dropout rate.
pos_enc (torch.nn.Module): Custom position encoding layer.
"""
def __init__(self, idim, odim, dropout_rate, pos_enc=None):
"""Construct an Conv2dSubsampling8 object."""
super(Conv2dSubsampling8, self).__init__()
self.conv = torch.nn.Sequential(
torch.nn.Conv2d(1, odim, 3, 2),
torch.nn.ReLU(),
torch.nn.Conv2d(odim, odim, 3, 2),
torch.nn.ReLU(),
torch.nn.Conv2d(odim, odim, 3, 2),
torch.nn.ReLU(),
)
self.out = torch.nn.Sequential(
torch.nn.Linear(odim * ((((idim - 1) // 2 - 1) // 2 - 1) // 2), odim),
pos_enc if pos_enc is not None else PositionalEncoding(odim, dropout_rate),
)
def forward(self, x, x_mask):
"""Subsample x.
Args:
x (torch.Tensor): Input tensor (#batch, time, idim).
x_mask (torch.Tensor): Input mask (#batch, 1, time).
Returns:
torch.Tensor: Subsampled tensor (#batch, time', odim),
where time' = time // 8.
torch.Tensor: Subsampled mask (#batch, 1, time'),
where time' = time // 8.
"""
x = x.unsqueeze(1) # (b, c, t, f)
x = self.conv(x)
b, c, t, f = x.size()
x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))
if x_mask is None:
return x, None
return x, x_mask[:, :, :-2:2][:, :, :-2:2][:, :, :-2:2]
| 14,351 | 31.544218 | 87 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transformer/encoder.py | # Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Encoder definition."""
import logging
import torch
from espnet.nets.pytorch_backend.nets_utils import rename_state_dict
from espnet.nets.pytorch_backend.transducer.vgg2l import VGG2L
from espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention
from espnet.nets.pytorch_backend.transformer.dynamic_conv import DynamicConvolution
from espnet.nets.pytorch_backend.transformer.dynamic_conv2d import DynamicConvolution2D
from espnet.nets.pytorch_backend.transformer.embedding import PositionalEncoding
from espnet.nets.pytorch_backend.transformer.encoder_layer import EncoderLayer
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
from espnet.nets.pytorch_backend.transformer.lightconv import LightweightConvolution
from espnet.nets.pytorch_backend.transformer.lightconv2d import LightweightConvolution2D
from espnet.nets.pytorch_backend.transformer.multi_layer_conv import (
Conv1dLinear,
MultiLayeredConv1d,
)
from espnet.nets.pytorch_backend.transformer.positionwise_feed_forward import (
PositionwiseFeedForward,
)
from espnet.nets.pytorch_backend.transformer.repeat import repeat
from espnet.nets.pytorch_backend.transformer.subsampling import (
Conv2dSubsampling,
Conv2dSubsampling6,
Conv2dSubsampling8,
)
def _pre_hook(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
# https://github.com/espnet/espnet/commit/21d70286c354c66c0350e65dc098d2ee236faccc#diff-bffb1396f038b317b2b64dd96e6d3563
rename_state_dict(prefix + "input_layer.", prefix + "embed.", state_dict)
# https://github.com/espnet/espnet/commit/3d422f6de8d4f03673b89e1caef698745ec749ea#diff-bffb1396f038b317b2b64dd96e6d3563
rename_state_dict(prefix + "norm.", prefix + "after_norm.", state_dict)
class Encoder(torch.nn.Module):
"""Transformer encoder module.
Args:
idim (int): Input dimension.
attention_dim (int): Dimension of attention.
attention_heads (int): The number of heads of multi head attention.
conv_wshare (int): The number of kernel of convolution. Only used in
selfattention_layer_type == "lightconv*" or "dynamiconv*".
conv_kernel_length (Union[int, str]): Kernel size str of convolution
(e.g. 71_71_71_71_71_71). Only used in selfattention_layer_type
== "lightconv*" or "dynamiconv*".
conv_usebias (bool): Whether to use bias in convolution. Only used in
selfattention_layer_type == "lightconv*" or "dynamiconv*".
linear_units (int): The number of units of position-wise feed forward.
num_blocks (int): The number of decoder blocks.
dropout_rate (float): Dropout rate.
positional_dropout_rate (float): Dropout rate after adding positional encoding.
attention_dropout_rate (float): Dropout rate in attention.
input_layer (Union[str, torch.nn.Module]): Input layer type.
pos_enc_class (torch.nn.Module): Positional encoding module class.
`PositionalEncoding `or `ScaledPositionalEncoding`
normalize_before (bool): Whether to use layer_norm before the first block.
concat_after (bool): Whether to concat attention layer's input and output.
if True, additional linear will be applied.
i.e. x -> x + linear(concat(x, att(x)))
if False, no additional linear will be applied. i.e. x -> x + att(x)
positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear".
positionwise_conv_kernel_size (int): Kernel size of positionwise conv1d layer.
selfattention_layer_type (str): Encoder attention layer type.
padding_idx (int): Padding idx for input_layer=embed.
stochastic_depth_rate (float): Maximum probability to skip the encoder layer.
intermediate_layers (Union[List[int], None]): indices of intermediate CTC layer.
indices start from 1.
if not None, intermediate outputs are returned (which changes return type
signature.)
"""
def __init__(
self,
idim,
attention_dim=256,
attention_heads=4,
conv_wshare=4,
conv_kernel_length="11",
conv_usebias=False,
linear_units=2048,
num_blocks=6,
dropout_rate=0.1,
positional_dropout_rate=0.1,
attention_dropout_rate=0.0,
input_layer="conv2d",
pos_enc_class=PositionalEncoding,
normalize_before=True,
concat_after=False,
positionwise_layer_type="linear",
positionwise_conv_kernel_size=1,
selfattention_layer_type="selfattn",
padding_idx=-1,
stochastic_depth_rate=0.0,
intermediate_layers=None,
ctc_softmax=None,
conditioning_layer_dim=None,
):
"""Construct an Encoder object."""
super(Encoder, self).__init__()
self._register_load_state_dict_pre_hook(_pre_hook)
self.conv_subsampling_factor = 1
if input_layer == "linear":
self.embed = torch.nn.Sequential(
torch.nn.Linear(idim, attention_dim),
torch.nn.LayerNorm(attention_dim),
torch.nn.Dropout(dropout_rate),
torch.nn.ReLU(),
pos_enc_class(attention_dim, positional_dropout_rate),
)
elif input_layer == "conv2d":
self.embed = Conv2dSubsampling(idim, attention_dim, dropout_rate)
self.conv_subsampling_factor = 4
elif input_layer == "conv2d-scaled-pos-enc":
self.embed = Conv2dSubsampling(
idim,
attention_dim,
dropout_rate,
pos_enc_class(attention_dim, positional_dropout_rate),
)
self.conv_subsampling_factor = 4
elif input_layer == "conv2d6":
self.embed = Conv2dSubsampling6(idim, attention_dim, dropout_rate)
self.conv_subsampling_factor = 6
elif input_layer == "conv2d8":
self.embed = Conv2dSubsampling8(idim, attention_dim, dropout_rate)
self.conv_subsampling_factor = 8
elif input_layer == "vgg2l":
self.embed = VGG2L(idim, attention_dim)
self.conv_subsampling_factor = 4
elif input_layer == "embed":
self.embed = torch.nn.Sequential(
torch.nn.Embedding(idim, attention_dim, padding_idx=padding_idx),
pos_enc_class(attention_dim, positional_dropout_rate),
)
elif isinstance(input_layer, torch.nn.Module):
self.embed = torch.nn.Sequential(
input_layer,
pos_enc_class(attention_dim, positional_dropout_rate),
)
elif input_layer is None:
self.embed = torch.nn.Sequential(
pos_enc_class(attention_dim, positional_dropout_rate)
)
else:
raise ValueError("unknown input_layer: " + input_layer)
self.normalize_before = normalize_before
positionwise_layer, positionwise_layer_args = self.get_positionwise_layer(
positionwise_layer_type,
attention_dim,
linear_units,
dropout_rate,
positionwise_conv_kernel_size,
)
if selfattention_layer_type in [
"selfattn",
"rel_selfattn",
"legacy_rel_selfattn",
]:
logging.info("encoder self-attention layer type = self-attention")
encoder_selfattn_layer = MultiHeadedAttention
encoder_selfattn_layer_args = [
(
attention_heads,
attention_dim,
attention_dropout_rate,
)
] * num_blocks
elif selfattention_layer_type == "lightconv":
logging.info("encoder self-attention layer type = lightweight convolution")
encoder_selfattn_layer = LightweightConvolution
encoder_selfattn_layer_args = [
(
conv_wshare,
attention_dim,
attention_dropout_rate,
int(conv_kernel_length.split("_")[lnum]),
False,
conv_usebias,
)
for lnum in range(num_blocks)
]
elif selfattention_layer_type == "lightconv2d":
logging.info(
"encoder self-attention layer "
"type = lightweight convolution 2-dimensional"
)
encoder_selfattn_layer = LightweightConvolution2D
encoder_selfattn_layer_args = [
(
conv_wshare,
attention_dim,
attention_dropout_rate,
int(conv_kernel_length.split("_")[lnum]),
False,
conv_usebias,
)
for lnum in range(num_blocks)
]
elif selfattention_layer_type == "dynamicconv":
logging.info("encoder self-attention layer type = dynamic convolution")
encoder_selfattn_layer = DynamicConvolution
encoder_selfattn_layer_args = [
(
conv_wshare,
attention_dim,
attention_dropout_rate,
int(conv_kernel_length.split("_")[lnum]),
False,
conv_usebias,
)
for lnum in range(num_blocks)
]
elif selfattention_layer_type == "dynamicconv2d":
logging.info(
"encoder self-attention layer type = dynamic convolution 2-dimensional"
)
encoder_selfattn_layer = DynamicConvolution2D
encoder_selfattn_layer_args = [
(
conv_wshare,
attention_dim,
attention_dropout_rate,
int(conv_kernel_length.split("_")[lnum]),
False,
conv_usebias,
)
for lnum in range(num_blocks)
]
else:
raise NotImplementedError(selfattention_layer_type)
self.encoders = repeat(
num_blocks,
lambda lnum: EncoderLayer(
attention_dim,
encoder_selfattn_layer(*encoder_selfattn_layer_args[lnum]),
positionwise_layer(*positionwise_layer_args),
dropout_rate,
normalize_before,
concat_after,
stochastic_depth_rate * float(1 + lnum) / num_blocks,
),
)
if self.normalize_before:
self.after_norm = LayerNorm(attention_dim)
self.intermediate_layers = intermediate_layers
self.use_conditioning = True if ctc_softmax is not None else False
if self.use_conditioning:
self.ctc_softmax = ctc_softmax
self.conditioning_layer = torch.nn.Linear(
conditioning_layer_dim, attention_dim
)
def get_positionwise_layer(
self,
positionwise_layer_type="linear",
attention_dim=256,
linear_units=2048,
dropout_rate=0.1,
positionwise_conv_kernel_size=1,
):
"""Define positionwise layer."""
if positionwise_layer_type == "linear":
positionwise_layer = PositionwiseFeedForward
positionwise_layer_args = (attention_dim, linear_units, dropout_rate)
elif positionwise_layer_type == "conv1d":
positionwise_layer = MultiLayeredConv1d
positionwise_layer_args = (
attention_dim,
linear_units,
positionwise_conv_kernel_size,
dropout_rate,
)
elif positionwise_layer_type == "conv1d-linear":
positionwise_layer = Conv1dLinear
positionwise_layer_args = (
attention_dim,
linear_units,
positionwise_conv_kernel_size,
dropout_rate,
)
else:
raise NotImplementedError("Support only linear or conv1d.")
return positionwise_layer, positionwise_layer_args
def forward(self, xs, masks):
"""Encode input sequence.
Args:
xs (torch.Tensor): Input tensor (#batch, time, idim).
masks (torch.Tensor): Mask tensor (#batch, 1, time).
Returns:
torch.Tensor: Output tensor (#batch, time, attention_dim).
torch.Tensor: Mask tensor (#batch, 1, time).
"""
if isinstance(
self.embed,
(Conv2dSubsampling, Conv2dSubsampling6, Conv2dSubsampling8, VGG2L),
):
xs, masks = self.embed(xs, masks)
else:
xs = self.embed(xs)
if self.intermediate_layers is None:
xs, masks = self.encoders(xs, masks)
else:
intermediate_outputs = []
for layer_idx, encoder_layer in enumerate(self.encoders):
xs, masks = encoder_layer(xs, masks)
if (
self.intermediate_layers is not None
and layer_idx + 1 in self.intermediate_layers
):
encoder_output = xs
# intermediate branches also require normalization.
if self.normalize_before:
encoder_output = self.after_norm(encoder_output)
intermediate_outputs.append(encoder_output)
if self.use_conditioning:
intermediate_result = self.ctc_softmax(encoder_output)
xs = xs + self.conditioning_layer(intermediate_result)
if self.normalize_before:
xs = self.after_norm(xs)
if self.intermediate_layers is not None:
return xs, masks, intermediate_outputs
return xs, masks
def forward_one_step(self, xs, masks, cache=None):
"""Encode input frame.
Args:
xs (torch.Tensor): Input tensor.
masks (torch.Tensor): Mask tensor.
cache (List[torch.Tensor]): List of cache tensors.
Returns:
torch.Tensor: Output tensor.
torch.Tensor: Mask tensor.
List[torch.Tensor]: List of new cache tensors.
"""
if isinstance(self.embed, Conv2dSubsampling):
xs, masks = self.embed(xs, masks)
else:
xs = self.embed(xs)
if cache is None:
cache = [None for _ in range(len(self.encoders))]
new_cache = []
for c, e in zip(cache, self.encoders):
xs, masks = e(xs, masks, cache=c)
new_cache.append(xs)
if self.normalize_before:
xs = self.after_norm(xs)
return xs, masks, new_cache
| 15,107 | 38.757895 | 124 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transformer/longformer_attention.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2022 Roshan Sharma (Carnegie Mellon University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Longformer based Local Attention Definition."""
from longformer.longformer import LongformerConfig, LongformerSelfAttention
from torch import nn
class LongformerAttention(nn.Module):
"""Longformer based Local Attention Definition."""
def __init__(self, config: LongformerConfig, layer_id: int):
"""Compute Longformer based Self-Attention.
Args:
config : Longformer attention configuration
layer_id: Integer representing the layer index
"""
super().__init__()
self.attention_window = config.attention_window[layer_id]
self.attention_layer = LongformerSelfAttention(config, layer_id=layer_id)
self.attention = None
def forward(self, query, key, value, mask):
"""Compute Longformer Self-Attention with masking.
Expects `len(hidden_states)` to be multiple of `attention_window`.
Padding to `attention_window` happens in :meth:`encoder.forward`
to avoid redoing the padding on each layer.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
pos_emb (torch.Tensor): Positional embedding tensor
(#batch, 2*time1-1, size).
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
torch.Tensor: Output tensor (#batch, time1, d_model).
"""
attention_mask = mask.int()
attention_mask[mask == 0] = -1
attention_mask[mask == 1] = 0
output, self.attention = self.attention_layer(
hidden_states=query,
attention_mask=attention_mask.unsqueeze(1),
head_mask=None,
output_attentions=True,
)
return output
| 2,070 | 35.982143 | 81 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transformer/contextual_block_encoder_layer.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Emiru Tsunoo
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Encoder self-attention layer definition."""
import torch
from torch import nn
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
class ContextualBlockEncoderLayer(nn.Module):
"""Contexutal Block Encoder layer module.
Args:
size (int): Input dimension.
self_attn (torch.nn.Module): Self-attention module instance.
`MultiHeadedAttention` or `RelPositionMultiHeadedAttention` instance
can be used as the argument.
feed_forward (torch.nn.Module): Feed-forward module instance.
`PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance
can be used as the argument.
dropout_rate (float): Dropout rate.
total_layer_num (int): Total number of layers
normalize_before (bool): Whether to use layer_norm before the first block.
concat_after (bool): Whether to concat attention layer's input and output.
if True, additional linear will be applied.
i.e. x -> x + linear(concat(x, att(x)))
if False, no additional linear will be applied. i.e. x -> x + att(x)
"""
def __init__(
self,
size,
self_attn,
feed_forward,
dropout_rate,
total_layer_num,
normalize_before=True,
concat_after=False,
):
"""Construct an EncoderLayer object."""
super(ContextualBlockEncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.norm1 = LayerNorm(size)
self.norm2 = LayerNorm(size)
self.dropout = nn.Dropout(dropout_rate)
self.size = size
self.normalize_before = normalize_before
self.concat_after = concat_after
self.total_layer_num = total_layer_num
if self.concat_after:
self.concat_linear = nn.Linear(size + size, size)
def forward(
self,
x,
mask,
infer_mode=False,
past_ctx=None,
next_ctx=None,
is_short_segment=False,
layer_idx=0,
cache=None,
):
"""Calculate forward propagation."""
if self.training or not infer_mode:
return self.forward_train(x, mask, past_ctx, next_ctx, layer_idx, cache)
else:
return self.forward_infer(
x, mask, past_ctx, next_ctx, is_short_segment, layer_idx, cache
)
def forward_train(
self, x, mask, past_ctx=None, next_ctx=None, layer_idx=0, cache=None
):
"""Compute encoded features.
Args:
x_input (torch.Tensor): Input tensor (#batch, time, size).
mask (torch.Tensor): Mask tensor for the input (#batch, 1, time).
past_ctx (torch.Tensor): Previous contexutal vector
next_ctx (torch.Tensor): Next contexutal vector
cache (torch.Tensor): Cache tensor of the input (#batch, time - 1, size).
Returns:
torch.Tensor: Output tensor (#batch, time, size).
torch.Tensor: Mask tensor (#batch, 1, time).
cur_ctx (torch.Tensor): Current contexutal vector
next_ctx (torch.Tensor): Next contexutal vector
layer_idx (int): layer index number
"""
nbatch = x.size(0)
nblock = x.size(1)
if past_ctx is not None:
if next_ctx is None:
# store all context vectors in one tensor
next_ctx = past_ctx.new_zeros(
nbatch, nblock, self.total_layer_num, x.size(-1)
)
else:
x[:, :, 0] = past_ctx[:, :, layer_idx]
# reshape ( nbatch, nblock, block_size + 2, dim )
# -> ( nbatch * nblock, block_size + 2, dim )
x = x.view(-1, x.size(-2), x.size(-1))
if mask is not None:
mask = mask.view(-1, mask.size(-2), mask.size(-1))
residual = x
if self.normalize_before:
x = self.norm1(x)
if cache is None:
x_q = x
else:
assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size)
x_q = x[:, -1:, :]
residual = residual[:, -1:, :]
mask = None if mask is None else mask[:, -1:, :]
if self.concat_after:
x_concat = torch.cat((x, self.self_attn(x_q, x, x, mask)), dim=-1)
x = residual + self.concat_linear(x_concat)
else:
x = residual + self.dropout(self.self_attn(x_q, x, x, mask))
if not self.normalize_before:
x = self.norm1(x)
residual = x
if self.normalize_before:
x = self.norm2(x)
x = residual + self.dropout(self.feed_forward(x))
if not self.normalize_before:
x = self.norm2(x)
if cache is not None:
x = torch.cat([cache, x], dim=1)
layer_idx += 1
# reshape ( nbatch * nblock, block_size + 2, dim )
# -> ( nbatch, nblock, block_size + 2, dim )
x = x.view(nbatch, -1, x.size(-2), x.size(-1)).squeeze(1)
if mask is not None:
mask = mask.view(nbatch, -1, mask.size(-2), mask.size(-1)).squeeze(1)
if next_ctx is not None and layer_idx < self.total_layer_num:
next_ctx[:, 0, layer_idx, :] = x[:, 0, -1, :]
next_ctx[:, 1:, layer_idx, :] = x[:, 0:-1, -1, :]
return x, mask, False, next_ctx, next_ctx, False, layer_idx
def forward_infer(
self,
x,
mask,
past_ctx=None,
next_ctx=None,
is_short_segment=False,
layer_idx=0,
cache=None,
):
"""Compute encoded features.
Args:
x_input (torch.Tensor): Input tensor (#batch, time, size).
mask (torch.Tensor): Mask tensor for the input (#batch, 1, time).
past_ctx (torch.Tensor): Previous contexutal vector
next_ctx (torch.Tensor): Next contexutal vector
cache (torch.Tensor): Cache tensor of the input (#batch, time - 1, size).
Returns:
torch.Tensor: Output tensor (#batch, time, size).
torch.Tensor: Mask tensor (#batch, 1, time).
cur_ctx (torch.Tensor): Current contexutal vector
next_ctx (torch.Tensor): Next contexutal vector
layer_idx (int): layer index number
"""
nbatch = x.size(0)
nblock = x.size(1)
# if layer_idx == 0, next_ctx has to be None
if layer_idx == 0:
assert next_ctx is None
next_ctx = x.new_zeros(nbatch, self.total_layer_num, x.size(-1))
# reshape ( nbatch, nblock, block_size + 2, dim )
# -> ( nbatch * nblock, block_size + 2, dim )
x = x.view(-1, x.size(-2), x.size(-1))
if mask is not None:
mask = mask.view(-1, mask.size(-2), mask.size(-1))
residual = x
if self.normalize_before:
x = self.norm1(x)
if cache is None:
x_q = x
else:
assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size)
x_q = x[:, -1:, :]
residual = residual[:, -1:, :]
mask = None if mask is None else mask[:, -1:, :]
if self.concat_after:
x_concat = torch.cat((x, self.self_attn(x_q, x, x, mask)), dim=-1)
x = residual + self.concat_linear(x_concat)
else:
x = residual + self.dropout(self.self_attn(x_q, x, x, mask))
if not self.normalize_before:
x = self.norm1(x)
residual = x
if self.normalize_before:
x = self.norm2(x)
x = residual + self.dropout(self.feed_forward(x))
if not self.normalize_before:
x = self.norm2(x)
if cache is not None:
x = torch.cat([cache, x], dim=1)
# reshape ( nbatch * nblock, block_size + 2, dim )
# -> ( nbatch, nblock, block_size + 2, dim )
x = x.view(nbatch, nblock, x.size(-2), x.size(-1))
if mask is not None:
mask = mask.view(nbatch, nblock, mask.size(-2), mask.size(-1))
# Propagete context information (the last frame of each block)
# to the first frame
# of the next block
if not is_short_segment:
if past_ctx is None:
# First block of an utterance
x[:, 0, 0, :] = x[:, 0, -1, :]
else:
x[:, 0, 0, :] = past_ctx[:, layer_idx, :]
if nblock > 1:
x[:, 1:, 0, :] = x[:, 0:-1, -1, :]
next_ctx[:, layer_idx, :] = x[:, -1, -1, :]
else:
next_ctx = None
return x, mask, True, past_ctx, next_ctx, is_short_segment, layer_idx + 1
| 8,899 | 34.177866 | 87 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transformer/lightconv.py | """Lightweight Convolution Module."""
import numpy
import torch
import torch.nn.functional as F
from torch import nn
MIN_VALUE = float(numpy.finfo(numpy.float32).min)
class LightweightConvolution(nn.Module):
"""Lightweight Convolution layer.
This implementation is based on
https://github.com/pytorch/fairseq/tree/master/fairseq
Args:
wshare (int): the number of kernel of convolution
n_feat (int): the number of features
dropout_rate (float): dropout_rate
kernel_size (int): kernel size (length)
use_kernel_mask (bool): Use causal mask or not for convolution kernel
use_bias (bool): Use bias term or not.
"""
def __init__(
self,
wshare,
n_feat,
dropout_rate,
kernel_size,
use_kernel_mask=False,
use_bias=False,
):
"""Construct Lightweight Convolution layer."""
super(LightweightConvolution, self).__init__()
assert n_feat % wshare == 0
self.wshare = wshare
self.use_kernel_mask = use_kernel_mask
self.dropout_rate = dropout_rate
self.kernel_size = kernel_size
self.padding_size = int(kernel_size / 2)
# linear -> GLU -> lightconv -> linear
self.linear1 = nn.Linear(n_feat, n_feat * 2)
self.linear2 = nn.Linear(n_feat, n_feat)
self.act = nn.GLU()
# lightconv related
self.weight = nn.Parameter(
torch.Tensor(self.wshare, 1, kernel_size).uniform_(0, 1)
)
self.use_bias = use_bias
if self.use_bias:
self.bias = nn.Parameter(torch.Tensor(n_feat))
# mask of kernel
kernel_mask0 = torch.zeros(self.wshare, int(kernel_size / 2))
kernel_mask1 = torch.ones(self.wshare, int(kernel_size / 2 + 1))
self.kernel_mask = torch.cat((kernel_mask1, kernel_mask0), dim=-1).unsqueeze(1)
def forward(self, query, key, value, mask):
"""Forward of 'Lightweight Convolution'.
This function takes query, key and value but uses only query.
This is just for compatibility with self-attention layer (attention.py)
Args:
query (torch.Tensor): (batch, time1, d_model) input tensor
key (torch.Tensor): (batch, time2, d_model) NOT USED
value (torch.Tensor): (batch, time2, d_model) NOT USED
mask (torch.Tensor): (batch, time1, time2) mask
Return:
x (torch.Tensor): (batch, time1, d_model) output
"""
# linear -> GLU -> lightconv -> linear
x = query
B, T, C = x.size()
H = self.wshare
# first liner layer
x = self.linear1(x)
# GLU activation
x = self.act(x)
# lightconv
x = x.transpose(1, 2).contiguous().view(-1, H, T) # B x C x T
weight = F.dropout(self.weight, self.dropout_rate, training=self.training)
if self.use_kernel_mask:
self.kernel_mask = self.kernel_mask.to(x.device)
weight = weight.masked_fill(self.kernel_mask == 0.0, float("-inf"))
weight = F.softmax(weight, dim=-1)
x = F.conv1d(x, weight, padding=self.padding_size, groups=self.wshare).view(
B, C, T
)
if self.use_bias:
x = x + self.bias.view(1, -1, 1)
x = x.transpose(1, 2) # B x T x C
if mask is not None and not self.use_kernel_mask:
mask = mask.transpose(-1, -2)
x = x.masked_fill(mask == 0, 0.0)
# second linear layer
x = self.linear2(x)
return x
| 3,589 | 31.053571 | 87 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transformer/multi_layer_conv.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Layer modules for FFT block in FastSpeech (Feed-forward Transformer)."""
import torch
class MultiLayeredConv1d(torch.nn.Module):
"""Multi-layered conv1d for Transformer block.
This is a module of multi-leyered conv1d designed
to replace positionwise feed-forward network
in Transforner block, which is introduced in
`FastSpeech: Fast, Robust and Controllable Text to Speech`_.
.. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:
https://arxiv.org/pdf/1905.09263.pdf
"""
def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):
"""Initialize MultiLayeredConv1d module.
Args:
in_chans (int): Number of input channels.
hidden_chans (int): Number of hidden channels.
kernel_size (int): Kernel size of conv1d.
dropout_rate (float): Dropout rate.
"""
super(MultiLayeredConv1d, self).__init__()
self.w_1 = torch.nn.Conv1d(
in_chans,
hidden_chans,
kernel_size,
stride=1,
padding=(kernel_size - 1) // 2,
)
self.w_2 = torch.nn.Conv1d(
hidden_chans,
in_chans,
kernel_size,
stride=1,
padding=(kernel_size - 1) // 2,
)
self.dropout = torch.nn.Dropout(dropout_rate)
def forward(self, x):
"""Calculate forward propagation.
Args:
x (torch.Tensor): Batch of input tensors (B, T, in_chans).
Returns:
torch.Tensor: Batch of output tensors (B, T, hidden_chans).
"""
x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)
return self.w_2(self.dropout(x).transpose(-1, 1)).transpose(-1, 1)
class Conv1dLinear(torch.nn.Module):
"""Conv1D + Linear for Transformer block.
A variant of MultiLayeredConv1d, which replaces second conv-layer to linear.
"""
def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):
"""Initialize Conv1dLinear module.
Args:
in_chans (int): Number of input channels.
hidden_chans (int): Number of hidden channels.
kernel_size (int): Kernel size of conv1d.
dropout_rate (float): Dropout rate.
"""
super(Conv1dLinear, self).__init__()
self.w_1 = torch.nn.Conv1d(
in_chans,
hidden_chans,
kernel_size,
stride=1,
padding=(kernel_size - 1) // 2,
)
self.w_2 = torch.nn.Linear(hidden_chans, in_chans)
self.dropout = torch.nn.Dropout(dropout_rate)
def forward(self, x):
"""Calculate forward propagation.
Args:
x (torch.Tensor): Batch of input tensors (B, T, in_chans).
Returns:
torch.Tensor: Batch of output tensors (B, T, hidden_chans).
"""
x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)
return self.w_2(self.dropout(x))
| 3,164 | 28.858491 | 80 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transformer/subsampling_without_posenc.py | # Copyright 2020 Emiru Tsunoo
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Subsampling layer definition."""
import math
import torch
class Conv2dSubsamplingWOPosEnc(torch.nn.Module):
"""Convolutional 2D subsampling.
Args:
idim (int): Input dimension.
odim (int): Output dimension.
dropout_rate (float): Dropout rate.
kernels (list): kernel sizes
strides (list): stride sizes
"""
def __init__(self, idim, odim, dropout_rate, kernels, strides):
"""Construct an Conv2dSubsamplingWOPosEnc object."""
assert len(kernels) == len(strides)
super().__init__()
conv = []
olen = idim
for i, (k, s) in enumerate(zip(kernels, strides)):
conv += [
torch.nn.Conv2d(1 if i == 0 else odim, odim, k, s),
torch.nn.ReLU(),
]
olen = math.floor((olen - k) / s + 1)
self.conv = torch.nn.Sequential(*conv)
self.out = torch.nn.Linear(odim * olen, odim)
self.strides = strides
self.kernels = kernels
def forward(self, x, x_mask):
"""Subsample x.
Args:
x (torch.Tensor): Input tensor (#batch, time, idim).
x_mask (torch.Tensor): Input mask (#batch, 1, time).
Returns:
torch.Tensor: Subsampled tensor (#batch, time', odim),
where time' = time // 4.
torch.Tensor: Subsampled mask (#batch, 1, time'),
where time' = time // 4.
"""
x = x.unsqueeze(1) # (b, c, t, f)
x = self.conv(x)
b, c, t, f = x.size()
x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))
if x_mask is None:
return x, None
for k, s in zip(self.kernels, self.strides):
x_mask = x_mask[:, :, : -k + 1 : s]
return x, x_mask
| 1,899 | 29.15873 | 70 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transformer/repeat.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Repeat the same layer definition."""
import torch
class MultiSequential(torch.nn.Sequential):
"""Multi-input multi-output torch.nn.Sequential."""
def __init__(self, *args, layer_drop_rate=0.0):
"""Initialize MultiSequential with layer_drop.
Args:
layer_drop_rate (float): Probability of dropping out each fn (layer).
"""
super(MultiSequential, self).__init__(*args)
self.layer_drop_rate = layer_drop_rate
def forward(self, *args):
"""Repeat."""
_probs = torch.empty(len(self)).uniform_()
for idx, m in enumerate(self):
if not self.training or (_probs[idx] >= self.layer_drop_rate):
args = m(*args)
return args
def repeat(N, fn, layer_drop_rate=0.0):
"""Repeat module N times.
Args:
N (int): Number of repeat time.
fn (Callable): Function to generate module.
layer_drop_rate (float): Probability of dropping out each fn (layer).
Returns:
MultiSequential: Repeated model instance.
"""
return MultiSequential(*[fn(n) for n in range(N)], layer_drop_rate=layer_drop_rate)
| 1,299 | 26.659574 | 87 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transformer/decoder.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Decoder definition."""
import logging
from typing import Any, List, Tuple
import torch
from espnet.nets.pytorch_backend.nets_utils import rename_state_dict
from espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention
from espnet.nets.pytorch_backend.transformer.decoder_layer import DecoderLayer
from espnet.nets.pytorch_backend.transformer.dynamic_conv import DynamicConvolution
from espnet.nets.pytorch_backend.transformer.dynamic_conv2d import DynamicConvolution2D
from espnet.nets.pytorch_backend.transformer.embedding import PositionalEncoding
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
from espnet.nets.pytorch_backend.transformer.lightconv import LightweightConvolution
from espnet.nets.pytorch_backend.transformer.lightconv2d import LightweightConvolution2D
from espnet.nets.pytorch_backend.transformer.mask import subsequent_mask
from espnet.nets.pytorch_backend.transformer.positionwise_feed_forward import (
PositionwiseFeedForward,
)
from espnet.nets.pytorch_backend.transformer.repeat import repeat
from espnet.nets.scorer_interface import BatchScorerInterface
def _pre_hook(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
# https://github.com/espnet/espnet/commit/3d422f6de8d4f03673b89e1caef698745ec749ea#diff-bffb1396f038b317b2b64dd96e6d3563
rename_state_dict(prefix + "output_norm.", prefix + "after_norm.", state_dict)
class Decoder(BatchScorerInterface, torch.nn.Module):
"""Transfomer decoder module.
Args:
odim (int): Output diminsion.
self_attention_layer_type (str): Self-attention layer type.
attention_dim (int): Dimension of attention.
attention_heads (int): The number of heads of multi head attention.
conv_wshare (int): The number of kernel of convolution. Only used in
self_attention_layer_type == "lightconv*" or "dynamiconv*".
conv_kernel_length (Union[int, str]): Kernel size str of convolution
(e.g. 71_71_71_71_71_71). Only used in self_attention_layer_type
== "lightconv*" or "dynamiconv*".
conv_usebias (bool): Whether to use bias in convolution. Only used in
self_attention_layer_type == "lightconv*" or "dynamiconv*".
linear_units (int): The number of units of position-wise feed forward.
num_blocks (int): The number of decoder blocks.
dropout_rate (float): Dropout rate.
positional_dropout_rate (float): Dropout rate after adding positional encoding.
self_attention_dropout_rate (float): Dropout rate in self-attention.
src_attention_dropout_rate (float): Dropout rate in source-attention.
input_layer (Union[str, torch.nn.Module]): Input layer type.
use_output_layer (bool): Whether to use output layer.
pos_enc_class (torch.nn.Module): Positional encoding module class.
`PositionalEncoding `or `ScaledPositionalEncoding`
normalize_before (bool): Whether to use layer_norm before the first block.
concat_after (bool): Whether to concat attention layer's input and output.
if True, additional linear will be applied.
i.e. x -> x + linear(concat(x, att(x)))
if False, no additional linear will be applied. i.e. x -> x + att(x)
"""
def __init__(
self,
odim,
selfattention_layer_type="selfattn",
attention_dim=256,
attention_heads=4,
conv_wshare=4,
conv_kernel_length=11,
conv_usebias=False,
linear_units=2048,
num_blocks=6,
dropout_rate=0.1,
positional_dropout_rate=0.1,
self_attention_dropout_rate=0.0,
src_attention_dropout_rate=0.0,
input_layer="embed",
use_output_layer=True,
pos_enc_class=PositionalEncoding,
normalize_before=True,
concat_after=False,
):
"""Construct an Decoder object."""
torch.nn.Module.__init__(self)
self._register_load_state_dict_pre_hook(_pre_hook)
if input_layer == "embed":
self.embed = torch.nn.Sequential(
torch.nn.Embedding(odim, attention_dim),
pos_enc_class(attention_dim, positional_dropout_rate),
)
elif input_layer == "linear":
self.embed = torch.nn.Sequential(
torch.nn.Linear(odim, attention_dim),
torch.nn.LayerNorm(attention_dim),
torch.nn.Dropout(dropout_rate),
torch.nn.ReLU(),
pos_enc_class(attention_dim, positional_dropout_rate),
)
elif isinstance(input_layer, torch.nn.Module):
self.embed = torch.nn.Sequential(
input_layer, pos_enc_class(attention_dim, positional_dropout_rate)
)
else:
raise NotImplementedError("only `embed` or torch.nn.Module is supported.")
self.normalize_before = normalize_before
# self-attention module definition
if selfattention_layer_type == "selfattn":
logging.info("decoder self-attention layer type = self-attention")
decoder_selfattn_layer = MultiHeadedAttention
decoder_selfattn_layer_args = [
(
attention_heads,
attention_dim,
self_attention_dropout_rate,
)
] * num_blocks
elif selfattention_layer_type == "lightconv":
logging.info("decoder self-attention layer type = lightweight convolution")
decoder_selfattn_layer = LightweightConvolution
decoder_selfattn_layer_args = [
(
conv_wshare,
attention_dim,
self_attention_dropout_rate,
int(conv_kernel_length.split("_")[lnum]),
True,
conv_usebias,
)
for lnum in range(num_blocks)
]
elif selfattention_layer_type == "lightconv2d":
logging.info(
"decoder self-attention layer "
"type = lightweight convolution 2-dimensional"
)
decoder_selfattn_layer = LightweightConvolution2D
decoder_selfattn_layer_args = [
(
conv_wshare,
attention_dim,
self_attention_dropout_rate,
int(conv_kernel_length.split("_")[lnum]),
True,
conv_usebias,
)
for lnum in range(num_blocks)
]
elif selfattention_layer_type == "dynamicconv":
logging.info("decoder self-attention layer type = dynamic convolution")
decoder_selfattn_layer = DynamicConvolution
decoder_selfattn_layer_args = [
(
conv_wshare,
attention_dim,
self_attention_dropout_rate,
int(conv_kernel_length.split("_")[lnum]),
True,
conv_usebias,
)
for lnum in range(num_blocks)
]
elif selfattention_layer_type == "dynamicconv2d":
logging.info(
"decoder self-attention layer type = dynamic convolution 2-dimensional"
)
decoder_selfattn_layer = DynamicConvolution2D
decoder_selfattn_layer_args = [
(
conv_wshare,
attention_dim,
self_attention_dropout_rate,
int(conv_kernel_length.split("_")[lnum]),
True,
conv_usebias,
)
for lnum in range(num_blocks)
]
self.decoders = repeat(
num_blocks,
lambda lnum: DecoderLayer(
attention_dim,
decoder_selfattn_layer(*decoder_selfattn_layer_args[lnum]),
MultiHeadedAttention(
attention_heads, attention_dim, src_attention_dropout_rate
),
PositionwiseFeedForward(attention_dim, linear_units, dropout_rate),
dropout_rate,
normalize_before,
concat_after,
),
)
self.selfattention_layer_type = selfattention_layer_type
if self.normalize_before:
self.after_norm = LayerNorm(attention_dim)
if use_output_layer:
self.output_layer = torch.nn.Linear(attention_dim, odim)
else:
self.output_layer = None
def forward(self, tgt, tgt_mask, memory, memory_mask):
"""Forward decoder.
Args:
tgt (torch.Tensor): Input token ids, int64 (#batch, maxlen_out) if
input_layer == "embed". In the other case, input tensor
(#batch, maxlen_out, odim).
tgt_mask (torch.Tensor): Input token mask (#batch, maxlen_out).
dtype=torch.uint8 in PyTorch 1.2- and dtype=torch.bool in PyTorch 1.2+
(include 1.2).
memory (torch.Tensor): Encoded memory, float32 (#batch, maxlen_in, feat).
memory_mask (torch.Tensor): Encoded memory mask (#batch, maxlen_in).
dtype=torch.uint8 in PyTorch 1.2- and dtype=torch.bool in PyTorch 1.2+
(include 1.2).
Returns:
torch.Tensor: Decoded token score before softmax (#batch, maxlen_out, odim)
if use_output_layer is True. In the other case,final block outputs
(#batch, maxlen_out, attention_dim).
torch.Tensor: Score mask before softmax (#batch, maxlen_out).
"""
x = self.embed(tgt)
x, tgt_mask, memory, memory_mask = self.decoders(
x, tgt_mask, memory, memory_mask
)
if self.normalize_before:
x = self.after_norm(x)
if self.output_layer is not None:
x = self.output_layer(x)
return x, tgt_mask
def forward_one_step(self, tgt, tgt_mask, memory, cache=None):
"""Forward one step.
Args:
tgt (torch.Tensor): Input token ids, int64 (#batch, maxlen_out).
tgt_mask (torch.Tensor): Input token mask (#batch, maxlen_out).
dtype=torch.uint8 in PyTorch 1.2- and dtype=torch.bool in PyTorch 1.2+
(include 1.2).
memory (torch.Tensor): Encoded memory, float32 (#batch, maxlen_in, feat).
cache (List[torch.Tensor]): List of cached tensors.
Each tensor shape should be (#batch, maxlen_out - 1, size).
Returns:
torch.Tensor: Output tensor (batch, maxlen_out, odim).
List[torch.Tensor]: List of cache tensors of each decoder layer.
"""
x = self.embed(tgt)
if cache is None:
cache = [None] * len(self.decoders)
new_cache = []
for c, decoder in zip(cache, self.decoders):
x, tgt_mask, memory, memory_mask = decoder(
x, tgt_mask, memory, None, cache=c
)
new_cache.append(x)
if self.normalize_before:
y = self.after_norm(x[:, -1])
else:
y = x[:, -1]
if self.output_layer is not None:
y = torch.log_softmax(self.output_layer(y), dim=-1)
return y, new_cache
# beam search API (see ScorerInterface)
def score(self, ys, state, x):
"""Score."""
ys_mask = subsequent_mask(len(ys), device=x.device).unsqueeze(0)
if self.selfattention_layer_type != "selfattn":
# TODO(karita): implement cache
logging.warning(
f"{self.selfattention_layer_type} does not support cached decoding."
)
state = None
logp, state = self.forward_one_step(
ys.unsqueeze(0), ys_mask, x.unsqueeze(0), cache=state
)
return logp.squeeze(0), state
# batch beam search API (see BatchScorerInterface)
def batch_score(
self, ys: torch.Tensor, states: List[Any], xs: torch.Tensor
) -> Tuple[torch.Tensor, List[Any]]:
"""Score new token batch (required).
Args:
ys (torch.Tensor): torch.int64 prefix tokens (n_batch, ylen).
states (List[Any]): Scorer states for prefix tokens.
xs (torch.Tensor):
The encoder feature that generates ys (n_batch, xlen, n_feat).
Returns:
tuple[torch.Tensor, List[Any]]: Tuple of
batchfied scores for next token with shape of `(n_batch, n_vocab)`
and next state list for ys.
"""
# merge states
n_batch = len(ys)
n_layers = len(self.decoders)
if states[0] is None:
batch_state = None
else:
# transpose state of [batch, layer] into [layer, batch]
batch_state = [
torch.stack([states[b][i] for b in range(n_batch)])
for i in range(n_layers)
]
# batch decoding
ys_mask = subsequent_mask(ys.size(-1), device=xs.device).unsqueeze(0)
logp, states = self.forward_one_step(ys, ys_mask, xs, cache=batch_state)
# transpose state of [layer, batch] into [batch, layer]
state_list = [[states[i][b] for i in range(n_layers)] for b in range(n_batch)]
return logp, state_list
| 13,734 | 39.756677 | 124 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transformer/plot.py | # Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import logging
import os
import numpy
from espnet.asr import asr_utils
def _plot_and_save_attention(att_w, filename, xtokens=None, ytokens=None):
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
d = os.path.dirname(filename)
if not os.path.exists(d):
os.makedirs(d)
w, h = plt.figaspect(1.0 / len(att_w))
fig = plt.Figure(figsize=(w * 2, h * 2))
axes = fig.subplots(1, len(att_w))
if len(att_w) == 1:
axes = [axes]
for ax, aw in zip(axes, att_w):
# plt.subplot(1, len(att_w), h)
ax.imshow(aw.astype(numpy.float32), aspect="auto")
ax.set_xlabel("Input")
ax.set_ylabel("Output")
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
# Labels for major ticks
if xtokens is not None:
ax.set_xticks(numpy.linspace(0, len(xtokens), len(xtokens) + 1))
ax.set_xticks(numpy.linspace(0, len(xtokens), 1), minor=True)
ax.set_xticklabels(xtokens + [""], rotation=40)
if ytokens is not None:
ax.set_yticks(numpy.linspace(0, len(ytokens), len(ytokens) + 1))
ax.set_yticks(numpy.linspace(0, len(ytokens), 1), minor=True)
ax.set_yticklabels(ytokens + [""])
fig.tight_layout()
return fig
def savefig(plot, filename):
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
plot.savefig(filename)
plt.clf()
def plot_multi_head_attention(
data,
uttid_list,
attn_dict,
outdir,
suffix="png",
savefn=savefig,
ikey="input",
iaxis=0,
okey="output",
oaxis=0,
subsampling_factor=4,
):
"""Plot multi head attentions.
:param dict data: utts info from json file
:param List uttid_list: utterance IDs
:param dict[str, torch.Tensor] attn_dict: multi head attention dict.
values should be torch.Tensor (head, input_length, output_length)
:param str outdir: dir to save fig
:param str suffix: filename suffix including image type (e.g., png)
:param savefn: function to save
:param str ikey: key to access input
:param int iaxis: dimension to access input
:param str okey: key to access output
:param int oaxis: dimension to access output
:param subsampling_factor: subsampling factor in encoder
"""
for name, att_ws in attn_dict.items():
for idx, att_w in enumerate(att_ws):
data_i = data[uttid_list[idx]]
filename = "%s/%s.%s.%s" % (outdir, uttid_list[idx], name, suffix)
dec_len = int(data_i[okey][oaxis]["shape"][0]) + 1 # +1 for <eos>
enc_len = int(data_i[ikey][iaxis]["shape"][0])
is_mt = "token" in data_i[ikey][iaxis].keys()
# for ASR/ST
if not is_mt:
enc_len //= subsampling_factor
xtokens, ytokens = None, None
if "encoder" in name:
att_w = att_w[:, :enc_len, :enc_len]
# for MT
if is_mt:
xtokens = data_i[ikey][iaxis]["token"].split()
ytokens = xtokens[:]
elif "decoder" in name:
if "self" in name:
# self-attention
att_w = att_w[:, :dec_len, :dec_len]
if "token" in data_i[okey][oaxis].keys():
ytokens = data_i[okey][oaxis]["token"].split() + ["<eos>"]
xtokens = ["<sos>"] + data_i[okey][oaxis]["token"].split()
else:
# cross-attention
att_w = att_w[:, :dec_len, :enc_len]
if "token" in data_i[okey][oaxis].keys():
ytokens = data_i[okey][oaxis]["token"].split() + ["<eos>"]
# for MT
if is_mt:
xtokens = data_i[ikey][iaxis]["token"].split()
else:
logging.warning("unknown name for shaping attention")
fig = _plot_and_save_attention(att_w, filename, xtokens, ytokens)
savefn(fig, filename)
class PlotAttentionReport(asr_utils.PlotAttentionReport):
def plotfn(self, *args, **kwargs):
kwargs["ikey"] = self.ikey
kwargs["iaxis"] = self.iaxis
kwargs["okey"] = self.okey
kwargs["oaxis"] = self.oaxis
kwargs["subsampling_factor"] = self.factor
plot_multi_head_attention(*args, **kwargs)
def __call__(self, trainer):
attn_dict, uttid_list = self.get_attention_weights()
suffix = "ep.{.updater.epoch}.png".format(trainer)
self.plotfn(self.data_dict, uttid_list, attn_dict, self.outdir, suffix, savefig)
def get_attention_weights(self):
return_batch, uttid_list = self.transform(self.data, return_uttid=True)
batch = self.converter([return_batch], self.device)
if isinstance(batch, tuple):
att_ws = self.att_vis_fn(*batch)
elif isinstance(batch, dict):
att_ws = self.att_vis_fn(**batch)
return att_ws, uttid_list
def log_attentions(self, logger, step):
def log_fig(plot, filename):
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
logger.add_figure(os.path.basename(filename), plot, step)
plt.clf()
attn_dict, uttid_list = self.get_attention_weights()
self.plotfn(self.data_dict, uttid_list, attn_dict, self.outdir, "", log_fig)
| 5,718 | 34.968553 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transformer/layer_norm.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Layer normalization module."""
import torch
class LayerNorm(torch.nn.LayerNorm):
"""Layer normalization module.
Args:
nout (int): Output dim size.
dim (int): Dimension to be normalized.
"""
def __init__(self, nout, dim=-1):
"""Construct an LayerNorm object."""
super(LayerNorm, self).__init__(nout, eps=1e-12)
self.dim = dim
def forward(self, x):
"""Apply layer normalization.
Args:
x (torch.Tensor): Input tensor.
Returns:
torch.Tensor: Normalized tensor.
"""
if self.dim == -1:
return super(LayerNorm, self).forward(x)
return (
super(LayerNorm, self)
.forward(x.transpose(self.dim, -1))
.transpose(self.dim, -1)
)
| 958 | 21.302326 | 59 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transformer/add_sos_eos.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Unility functions for Transformer."""
import torch
def add_sos_eos(ys_pad, sos, eos, ignore_id):
"""Add <sos> and <eos> labels.
:param torch.Tensor ys_pad: batch of padded target sequences (B, Lmax)
:param int sos: index of <sos>
:param int eos: index of <eos>
:param int ignore_id: index of padding
:return: padded tensor (B, Lmax)
:rtype: torch.Tensor
:return: padded tensor (B, Lmax)
:rtype: torch.Tensor
"""
from espnet.nets.pytorch_backend.nets_utils import pad_list
_sos = ys_pad.new([sos])
_eos = ys_pad.new([eos])
ys = [y[y != ignore_id] for y in ys_pad] # parse padded ys
ys_in = [torch.cat([_sos, y], dim=0) for y in ys]
ys_out = [torch.cat([y, _eos], dim=0) for y in ys]
return pad_list(ys_in, eos), pad_list(ys_out, ignore_id)
| 957 | 28.9375 | 74 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transformer/attention.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Multi-Head Attention layer definition."""
import math
import torch
from torch import nn
class MultiHeadedAttention(nn.Module):
"""Multi-Head Attention layer.
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
"""
def __init__(self, n_head, n_feat, dropout_rate):
"""Construct an MultiHeadedAttention object."""
super(MultiHeadedAttention, self).__init__()
assert n_feat % n_head == 0
# We assume d_v always equals d_k
self.d_k = n_feat // n_head
self.h = n_head
self.linear_q = nn.Linear(n_feat, n_feat)
self.linear_k = nn.Linear(n_feat, n_feat)
self.linear_v = nn.Linear(n_feat, n_feat)
self.linear_out = nn.Linear(n_feat, n_feat)
self.attn = None
self.dropout = nn.Dropout(p=dropout_rate)
def forward_qkv(self, query, key, value):
"""Transform query, key and value.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
Returns:
torch.Tensor: Transformed query tensor (#batch, n_head, time1, d_k).
torch.Tensor: Transformed key tensor (#batch, n_head, time2, d_k).
torch.Tensor: Transformed value tensor (#batch, n_head, time2, d_k).
"""
n_batch = query.size(0)
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
q = q.transpose(1, 2) # (batch, head, time1, d_k)
k = k.transpose(1, 2) # (batch, head, time2, d_k)
v = v.transpose(1, 2) # (batch, head, time2, d_k)
return q, k, v
def forward_attention(self, value, scores, mask):
"""Compute attention context vector.
Args:
value (torch.Tensor): Transformed value (#batch, n_head, time2, d_k).
scores (torch.Tensor): Attention score (#batch, n_head, time1, time2).
mask (torch.Tensor): Mask (#batch, 1, time2) or (#batch, time1, time2).
Returns:
torch.Tensor: Transformed value (#batch, time1, d_model)
weighted by the attention score (#batch, time1, time2).
"""
n_batch = value.size(0)
if mask is not None:
mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)
min_value = torch.finfo(scores.dtype).min
scores = scores.masked_fill(mask, min_value)
self.attn = torch.softmax(scores, dim=-1).masked_fill(
mask, 0.0
) # (batch, head, time1, time2)
else:
self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
p_attn = self.dropout(self.attn)
x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)
x = (
x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
) # (batch, time1, d_model)
return self.linear_out(x) # (batch, time1, d_model)
def forward(self, query, key, value, mask):
"""Compute scaled dot product attention.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
torch.Tensor: Output tensor (#batch, time1, d_model).
"""
q, k, v = self.forward_qkv(query, key, value)
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
return self.forward_attention(v, scores, mask)
class LegacyRelPositionMultiHeadedAttention(MultiHeadedAttention):
"""Multi-Head Attention layer with relative position encoding (old version).
Details can be found in https://github.com/espnet/espnet/pull/2816.
Paper: https://arxiv.org/abs/1901.02860
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
zero_triu (bool): Whether to zero the upper triangular part of attention matrix.
"""
def __init__(self, n_head, n_feat, dropout_rate, zero_triu=False):
"""Construct an RelPositionMultiHeadedAttention object."""
super().__init__(n_head, n_feat, dropout_rate)
self.zero_triu = zero_triu
# linear transformation for positional encoding
self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)
# these two learnable bias are used in matrix c and matrix d
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))
self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))
torch.nn.init.xavier_uniform_(self.pos_bias_u)
torch.nn.init.xavier_uniform_(self.pos_bias_v)
def rel_shift(self, x):
"""Compute relative positional encoding.
Args:
x (torch.Tensor): Input tensor (batch, head, time1, time2).
Returns:
torch.Tensor: Output tensor.
"""
zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=-1)
x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))
x = x_padded[:, :, 1:].view_as(x)
if self.zero_triu:
ones = torch.ones((x.size(2), x.size(3)))
x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]
return x
def forward(self, query, key, value, pos_emb, mask):
"""Compute 'Scaled Dot Product Attention' with rel. positional encoding.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
pos_emb (torch.Tensor): Positional embedding tensor (#batch, time1, size).
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
torch.Tensor: Output tensor (#batch, time1, d_model).
"""
q, k, v = self.forward_qkv(query, key, value)
q = q.transpose(1, 2) # (batch, time1, head, d_k)
n_batch_pos = pos_emb.size(0)
p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)
p = p.transpose(1, 2) # (batch, head, time1, d_k)
# (batch, head, time1, d_k)
q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)
# (batch, head, time1, d_k)
q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)
# compute attention score
# first compute matrix a and matrix c
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
# (batch, head, time1, time2)
matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))
# compute matrix b and matrix d
# (batch, head, time1, time1)
matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))
matrix_bd = self.rel_shift(matrix_bd)
scores = (matrix_ac + matrix_bd) / math.sqrt(
self.d_k
) # (batch, head, time1, time2)
return self.forward_attention(v, scores, mask)
class RelPositionMultiHeadedAttention(MultiHeadedAttention):
"""Multi-Head Attention layer with relative position encoding (new implementation).
Details can be found in https://github.com/espnet/espnet/pull/2816.
Paper: https://arxiv.org/abs/1901.02860
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
zero_triu (bool): Whether to zero the upper triangular part of attention matrix.
"""
def __init__(self, n_head, n_feat, dropout_rate, zero_triu=False):
"""Construct an RelPositionMultiHeadedAttention object."""
super().__init__(n_head, n_feat, dropout_rate)
self.zero_triu = zero_triu
# linear transformation for positional encoding
self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)
# these two learnable bias are used in matrix c and matrix d
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))
self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))
torch.nn.init.xavier_uniform_(self.pos_bias_u)
torch.nn.init.xavier_uniform_(self.pos_bias_v)
def rel_shift(self, x):
"""Compute relative positional encoding.
Args:
x (torch.Tensor): Input tensor (batch, head, time1, 2*time1-1).
time1 means the length of query vector.
Returns:
torch.Tensor: Output tensor.
"""
zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=-1)
x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))
x = x_padded[:, :, 1:].view_as(x)[
:, :, :, : x.size(-1) // 2 + 1
] # only keep the positions from 0 to time2
if self.zero_triu:
ones = torch.ones((x.size(2), x.size(3)), device=x.device)
x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]
return x
def forward(self, query, key, value, pos_emb, mask):
"""Compute 'Scaled Dot Product Attention' with rel. positional encoding.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
pos_emb (torch.Tensor): Positional embedding tensor
(#batch, 2*time1-1, size).
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
torch.Tensor: Output tensor (#batch, time1, d_model).
"""
q, k, v = self.forward_qkv(query, key, value)
q = q.transpose(1, 2) # (batch, time1, head, d_k)
n_batch_pos = pos_emb.size(0)
p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)
p = p.transpose(1, 2) # (batch, head, 2*time1-1, d_k)
# (batch, head, time1, d_k)
q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)
# (batch, head, time1, d_k)
q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)
# compute attention score
# first compute matrix a and matrix c
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
# (batch, head, time1, time2)
matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))
# compute matrix b and matrix d
# (batch, head, time1, 2*time1-1)
matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))
matrix_bd = self.rel_shift(matrix_bd)
scores = (matrix_ac + matrix_bd) / math.sqrt(
self.d_k
) # (batch, head, time1, time2)
return self.forward_attention(v, scores, mask)
| 11,646 | 37.062092 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transformer/decoder_layer.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Decoder self-attention layer definition."""
import torch
from torch import nn
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
class DecoderLayer(nn.Module):
"""Single decoder layer module.
Args:
size (int): Input dimension.
self_attn (torch.nn.Module): Self-attention module instance.
`MultiHeadedAttention` instance can be used as the argument.
src_attn (torch.nn.Module): Self-attention module instance.
`MultiHeadedAttention` instance can be used as the argument.
feed_forward (torch.nn.Module): Feed-forward module instance.
`PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance
can be used as the argument.
dropout_rate (float): Dropout rate.
normalize_before (bool): Whether to use layer_norm before the first block.
concat_after (bool): Whether to concat attention layer's input and output.
if True, additional linear will be applied.
i.e. x -> x + linear(concat(x, att(x)))
if False, no additional linear will be applied. i.e. x -> x + att(x)
"""
def __init__(
self,
size,
self_attn,
src_attn,
feed_forward,
dropout_rate,
normalize_before=True,
concat_after=False,
):
"""Construct an DecoderLayer object."""
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward = feed_forward
self.norm1 = LayerNorm(size)
self.norm2 = LayerNorm(size)
self.norm3 = LayerNorm(size)
self.dropout = nn.Dropout(dropout_rate)
self.normalize_before = normalize_before
self.concat_after = concat_after
if self.concat_after:
self.concat_linear1 = nn.Linear(size + size, size)
self.concat_linear2 = nn.Linear(size + size, size)
def forward(self, tgt, tgt_mask, memory, memory_mask, cache=None):
"""Compute decoded features.
Args:
tgt (torch.Tensor): Input tensor (#batch, maxlen_out, size).
tgt_mask (torch.Tensor): Mask for input tensor (#batch, maxlen_out).
memory (torch.Tensor): Encoded memory, float32 (#batch, maxlen_in, size).
memory_mask (torch.Tensor): Encoded memory mask (#batch, maxlen_in).
cache (List[torch.Tensor]): List of cached tensors.
Each tensor shape should be (#batch, maxlen_out - 1, size).
Returns:
torch.Tensor: Output tensor(#batch, maxlen_out, size).
torch.Tensor: Mask for output tensor (#batch, maxlen_out).
torch.Tensor: Encoded memory (#batch, maxlen_in, size).
torch.Tensor: Encoded memory mask (#batch, maxlen_in).
"""
residual = tgt
if self.normalize_before:
tgt = self.norm1(tgt)
if cache is None:
tgt_q = tgt
tgt_q_mask = tgt_mask
else:
# compute only the last frame query keeping dim: max_time_out -> 1
assert cache.shape == (
tgt.shape[0],
tgt.shape[1] - 1,
self.size,
), f"{cache.shape} == {(tgt.shape[0], tgt.shape[1] - 1, self.size)}"
tgt_q = tgt[:, -1:, :]
residual = residual[:, -1:, :]
tgt_q_mask = None
if tgt_mask is not None:
tgt_q_mask = tgt_mask[:, -1:, :]
if self.concat_after:
tgt_concat = torch.cat(
(tgt_q, self.self_attn(tgt_q, tgt, tgt, tgt_q_mask)), dim=-1
)
x = residual + self.concat_linear1(tgt_concat)
else:
x = residual + self.dropout(self.self_attn(tgt_q, tgt, tgt, tgt_q_mask))
if not self.normalize_before:
x = self.norm1(x)
residual = x
if self.normalize_before:
x = self.norm2(x)
if self.concat_after:
x_concat = torch.cat(
(x, self.src_attn(x, memory, memory, memory_mask)), dim=-1
)
x = residual + self.concat_linear2(x_concat)
else:
x = residual + self.dropout(self.src_attn(x, memory, memory, memory_mask))
if not self.normalize_before:
x = self.norm2(x)
residual = x
if self.normalize_before:
x = self.norm3(x)
x = residual + self.dropout(self.feed_forward(x))
if not self.normalize_before:
x = self.norm3(x)
if cache is not None:
x = torch.cat([cache, x], dim=1)
return x, tgt_mask, memory, memory_mask
| 4,877 | 35.133333 | 87 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transformer/dynamic_conv2d.py | """Dynamic 2-Dimensional Convolution module."""
import numpy
import torch
import torch.nn.functional as F
from torch import nn
MIN_VALUE = float(numpy.finfo(numpy.float32).min)
class DynamicConvolution2D(nn.Module):
"""Dynamic 2-Dimensional Convolution layer.
This implementation is based on
https://github.com/pytorch/fairseq/tree/master/fairseq
Args:
wshare (int): the number of kernel of convolution
n_feat (int): the number of features
dropout_rate (float): dropout_rate
kernel_size (int): kernel size (length)
use_kernel_mask (bool): Use causal mask or not for convolution kernel
use_bias (bool): Use bias term or not.
"""
def __init__(
self,
wshare,
n_feat,
dropout_rate,
kernel_size,
use_kernel_mask=False,
use_bias=False,
):
"""Construct Dynamic 2-Dimensional Convolution layer."""
super(DynamicConvolution2D, self).__init__()
assert n_feat % wshare == 0
self.wshare = wshare
self.use_kernel_mask = use_kernel_mask
self.dropout_rate = dropout_rate
self.kernel_size = kernel_size
self.padding_size = int(kernel_size / 2)
self.attn_t = None
self.attn_f = None
# linear -> GLU -- -> lightconv -> linear
# \ /
# Linear
self.linear1 = nn.Linear(n_feat, n_feat * 2)
self.linear2 = nn.Linear(n_feat * 2, n_feat)
self.linear_weight = nn.Linear(n_feat, self.wshare * 1 * kernel_size)
nn.init.xavier_uniform(self.linear_weight.weight)
self.linear_weight_f = nn.Linear(n_feat, kernel_size)
nn.init.xavier_uniform(self.linear_weight_f.weight)
self.act = nn.GLU()
# dynamic conv related
self.use_bias = use_bias
if self.use_bias:
self.bias = nn.Parameter(torch.Tensor(n_feat))
def forward(self, query, key, value, mask):
"""Forward of 'Dynamic 2-Dimensional Convolution'.
This function takes query, key and value but uses only query.
This is just for compatibility with self-attention layer (attention.py)
Args:
query (torch.Tensor): (batch, time1, d_model) input tensor
key (torch.Tensor): (batch, time2, d_model) NOT USED
value (torch.Tensor): (batch, time2, d_model) NOT USED
mask (torch.Tensor): (batch, time1, time2) mask
Return:
x (torch.Tensor): (batch, time1, d_model) output
"""
# linear -> GLU -- -> lightconv -> linear
# \ /
# Linear
x = query
B, T, C = x.size()
H = self.wshare
k = self.kernel_size
# first liner layer
x = self.linear1(x)
# GLU activation
x = self.act(x)
# convolution of frequency axis
weight_f = self.linear_weight_f(x).view(B * T, 1, k) # B x T x k
self.attn_f = weight_f.view(B, T, k).unsqueeze(1)
xf = F.conv1d(
x.view(1, B * T, C), weight_f, padding=self.padding_size, groups=B * T
)
xf = xf.view(B, T, C)
# get kernel of convolution
weight = self.linear_weight(x) # B x T x kH
weight = F.dropout(weight, self.dropout_rate, training=self.training)
weight = weight.view(B, T, H, k).transpose(1, 2).contiguous() # B x H x T x k
weight_new = torch.zeros(B * H * T * (T + k - 1), dtype=weight.dtype)
weight_new = weight_new.view(B, H, T, T + k - 1).fill_(float("-inf"))
weight_new = weight_new.to(x.device) # B x H x T x T+k-1
weight_new.as_strided(
(B, H, T, k), ((T + k - 1) * T * H, (T + k - 1) * T, T + k, 1)
).copy_(weight)
weight_new = weight_new.narrow(-1, int((k - 1) / 2), T) # B x H x T x T(k)
if self.use_kernel_mask:
kernel_mask = torch.tril(torch.ones(T, T, device=x.device)).unsqueeze(0)
weight_new = weight_new.masked_fill(kernel_mask == 0.0, float("-inf"))
weight_new = F.softmax(weight_new, dim=-1)
self.attn_t = weight_new
weight_new = weight_new.view(B * H, T, T)
# convolution
x = x.transpose(1, 2).contiguous() # B x C x T
x = x.view(B * H, int(C / H), T).transpose(1, 2)
x = torch.bmm(weight_new, x)
x = x.transpose(1, 2).contiguous().view(B, C, T)
if self.use_bias:
x = x + self.bias.view(1, -1, 1)
x = x.transpose(1, 2) # B x T x C
x = torch.cat((x, xf), -1) # B x T x Cx2
if mask is not None and not self.use_kernel_mask:
mask = mask.transpose(-1, -2)
x = x.masked_fill(mask == 0, 0.0)
# second linear layer
x = self.linear2(x)
return x
| 4,862 | 34.23913 | 86 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transformer/mask.py | # Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Mask module."""
import torch
def subsequent_mask(size, device="cpu", dtype=torch.bool):
"""Create mask for subsequent steps (size, size).
:param int size: size of mask
:param str device: "cpu" or "cuda" or torch.Tensor.device
:param torch.dtype dtype: result dtype
:rtype: torch.Tensor
>>> subsequent_mask(3)
[[1, 0, 0],
[1, 1, 0],
[1, 1, 1]]
"""
ret = torch.ones(size, size, device=device, dtype=dtype)
return torch.tril(ret, out=ret)
def target_mask(ys_in_pad, ignore_id):
"""Create mask for decoder self-attention.
:param torch.Tensor ys_pad: batch of padded target sequences (B, Lmax)
:param int ignore_id: index of padding
:param torch.dtype dtype: result dtype
:rtype: torch.Tensor (B, Lmax, Lmax)
"""
ys_mask = ys_in_pad != ignore_id
m = subsequent_mask(ys_mask.size(-1), device=ys_mask.device).unsqueeze(0)
return ys_mask.unsqueeze(-2) & m
| 1,037 | 27.833333 | 77 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transformer/dynamic_conv.py | """Dynamic Convolution module."""
import numpy
import torch
import torch.nn.functional as F
from torch import nn
MIN_VALUE = float(numpy.finfo(numpy.float32).min)
class DynamicConvolution(nn.Module):
"""Dynamic Convolution layer.
This implementation is based on
https://github.com/pytorch/fairseq/tree/master/fairseq
Args:
wshare (int): the number of kernel of convolution
n_feat (int): the number of features
dropout_rate (float): dropout_rate
kernel_size (int): kernel size (length)
use_kernel_mask (bool): Use causal mask or not for convolution kernel
use_bias (bool): Use bias term or not.
"""
def __init__(
self,
wshare,
n_feat,
dropout_rate,
kernel_size,
use_kernel_mask=False,
use_bias=False,
):
"""Construct Dynamic Convolution layer."""
super(DynamicConvolution, self).__init__()
assert n_feat % wshare == 0
self.wshare = wshare
self.use_kernel_mask = use_kernel_mask
self.dropout_rate = dropout_rate
self.kernel_size = kernel_size
self.attn = None
# linear -> GLU -- -> lightconv -> linear
# \ /
# Linear
self.linear1 = nn.Linear(n_feat, n_feat * 2)
self.linear2 = nn.Linear(n_feat, n_feat)
self.linear_weight = nn.Linear(n_feat, self.wshare * 1 * kernel_size)
nn.init.xavier_uniform(self.linear_weight.weight)
self.act = nn.GLU()
# dynamic conv related
self.use_bias = use_bias
if self.use_bias:
self.bias = nn.Parameter(torch.Tensor(n_feat))
def forward(self, query, key, value, mask):
"""Forward of 'Dynamic Convolution'.
This function takes query, key and value but uses only quert.
This is just for compatibility with self-attention layer (attention.py)
Args:
query (torch.Tensor): (batch, time1, d_model) input tensor
key (torch.Tensor): (batch, time2, d_model) NOT USED
value (torch.Tensor): (batch, time2, d_model) NOT USED
mask (torch.Tensor): (batch, time1, time2) mask
Return:
x (torch.Tensor): (batch, time1, d_model) output
"""
# linear -> GLU -- -> lightconv -> linear
# \ /
# Linear
x = query
B, T, C = x.size()
H = self.wshare
k = self.kernel_size
# first liner layer
x = self.linear1(x)
# GLU activation
x = self.act(x)
# get kernel of convolution
weight = self.linear_weight(x) # B x T x kH
weight = F.dropout(weight, self.dropout_rate, training=self.training)
weight = weight.view(B, T, H, k).transpose(1, 2).contiguous() # B x H x T x k
weight_new = torch.zeros(B * H * T * (T + k - 1), dtype=weight.dtype)
weight_new = weight_new.view(B, H, T, T + k - 1).fill_(float("-inf"))
weight_new = weight_new.to(x.device) # B x H x T x T+k-1
weight_new.as_strided(
(B, H, T, k), ((T + k - 1) * T * H, (T + k - 1) * T, T + k, 1)
).copy_(weight)
weight_new = weight_new.narrow(-1, int((k - 1) / 2), T) # B x H x T x T(k)
if self.use_kernel_mask:
kernel_mask = torch.tril(torch.ones(T, T, device=x.device)).unsqueeze(0)
weight_new = weight_new.masked_fill(kernel_mask == 0.0, float("-inf"))
weight_new = F.softmax(weight_new, dim=-1)
self.attn = weight_new
weight_new = weight_new.view(B * H, T, T)
# convolution
x = x.transpose(1, 2).contiguous() # B x C x T
x = x.view(B * H, int(C / H), T).transpose(1, 2)
x = torch.bmm(weight_new, x) # BH x T x C/H
x = x.transpose(1, 2).contiguous().view(B, C, T)
if self.use_bias:
x = x + self.bias.view(1, -1, 1)
x = x.transpose(1, 2) # B x T x C
if mask is not None and not self.use_kernel_mask:
mask = mask.transpose(-1, -2)
x = x.masked_fill(mask == 0, 0.0)
# second linear layer
x = self.linear2(x)
return x
| 4,243 | 32.952 | 86 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transformer/initializer.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Parameter initialization."""
import torch
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
def initialize(model, init_type="pytorch"):
"""Initialize Transformer module.
:param torch.nn.Module model: transformer instance
:param str init_type: initialization type
"""
if init_type == "pytorch":
return
# weight init
for p in model.parameters():
if p.dim() > 1:
if init_type == "xavier_uniform":
torch.nn.init.xavier_uniform_(p.data)
elif init_type == "xavier_normal":
torch.nn.init.xavier_normal_(p.data)
elif init_type == "kaiming_uniform":
torch.nn.init.kaiming_uniform_(p.data, nonlinearity="relu")
elif init_type == "kaiming_normal":
torch.nn.init.kaiming_normal_(p.data, nonlinearity="relu")
else:
raise ValueError("Unknown initialization: " + init_type)
# bias init
for p in model.parameters():
if p.dim() == 1:
p.data.zero_()
# reset some modules with default init
for m in model.modules():
if isinstance(m, (torch.nn.Embedding, LayerNorm)):
m.reset_parameters()
| 1,383 | 29.755556 | 75 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/transformer/optimizer.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Optimizer module."""
import torch
class NoamOpt(object):
"""Optim wrapper that implements rate."""
def __init__(self, model_size, factor, warmup, optimizer):
"""Construct an NoamOpt object."""
self.optimizer = optimizer
self._step = 0
self.warmup = warmup
self.factor = factor
self.model_size = model_size
self._rate = 0
@property
def param_groups(self):
"""Return param_groups."""
return self.optimizer.param_groups
def step(self):
"""Update parameters and rate."""
self._step += 1
rate = self.rate()
for p in self.optimizer.param_groups:
p["lr"] = rate
self._rate = rate
self.optimizer.step()
def rate(self, step=None):
"""Implement `lrate` above."""
if step is None:
step = self._step
return (
self.factor
* self.model_size ** (-0.5)
* min(step ** (-0.5), step * self.warmup ** (-1.5))
)
def zero_grad(self):
"""Reset gradient."""
self.optimizer.zero_grad()
def state_dict(self):
"""Return state_dict."""
return {
"_step": self._step,
"warmup": self.warmup,
"factor": self.factor,
"model_size": self.model_size,
"_rate": self._rate,
"optimizer": self.optimizer.state_dict(),
}
def load_state_dict(self, state_dict):
"""Load state_dict."""
for key, value in state_dict.items():
if key == "optimizer":
self.optimizer.load_state_dict(state_dict["optimizer"])
else:
setattr(self, key, value)
def get_std_opt(model_params, d_model, warmup, factor):
"""Get standard NoamOpt."""
base = torch.optim.Adam(model_params, lr=0, betas=(0.9, 0.98), eps=1e-9)
return NoamOpt(d_model, factor, warmup, base)
| 2,094 | 26.565789 | 76 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/frontends/dnn_wpe.py | from typing import Tuple
import torch
from pytorch_wpe import wpe_one_iteration
from torch_complex.tensor import ComplexTensor
from espnet.nets.pytorch_backend.frontends.mask_estimator import MaskEstimator
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
class DNN_WPE(torch.nn.Module):
def __init__(
self,
wtype: str = "blstmp",
widim: int = 257,
wlayers: int = 3,
wunits: int = 300,
wprojs: int = 320,
dropout_rate: float = 0.0,
taps: int = 5,
delay: int = 3,
use_dnn_mask: bool = True,
iterations: int = 1,
normalization: bool = False,
):
super().__init__()
self.iterations = iterations
self.taps = taps
self.delay = delay
self.normalization = normalization
self.use_dnn_mask = use_dnn_mask
self.inverse_power = True
if self.use_dnn_mask:
self.mask_est = MaskEstimator(
wtype, widim, wlayers, wunits, wprojs, dropout_rate, nmask=1
)
def forward(
self, data: ComplexTensor, ilens: torch.LongTensor
) -> Tuple[ComplexTensor, torch.LongTensor, ComplexTensor]:
"""The forward function
Notation:
B: Batch
C: Channel
T: Time or Sequence length
F: Freq or Some dimension of the feature vector
Args:
data: (B, C, T, F)
ilens: (B,)
Returns:
data: (B, C, T, F)
ilens: (B,)
"""
# (B, T, C, F) -> (B, F, C, T)
enhanced = data = data.permute(0, 3, 2, 1)
mask = None
for i in range(self.iterations):
# Calculate power: (..., C, T)
power = enhanced.real**2 + enhanced.imag**2
if i == 0 and self.use_dnn_mask:
# mask: (B, F, C, T)
(mask,), _ = self.mask_est(enhanced, ilens)
if self.normalization:
# Normalize along T
mask = mask / mask.sum(dim=-1)[..., None]
# (..., C, T) * (..., C, T) -> (..., C, T)
power = power * mask
# Averaging along the channel axis: (..., C, T) -> (..., T)
power = power.mean(dim=-2)
# enhanced: (..., C, T) -> (..., C, T)
enhanced = wpe_one_iteration(
data.contiguous(),
power,
taps=self.taps,
delay=self.delay,
inverse_power=self.inverse_power,
)
enhanced.masked_fill_(make_pad_mask(ilens, enhanced.real), 0)
# (B, F, C, T) -> (B, T, C, F)
enhanced = enhanced.permute(0, 3, 2, 1)
if mask is not None:
mask = mask.transpose(-1, -3)
return enhanced, ilens, mask
| 2,851 | 29.340426 | 78 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/frontends/feature_transform.py | from typing import List, Tuple, Union
import librosa
import numpy as np
import torch
from torch_complex.tensor import ComplexTensor
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
class FeatureTransform(torch.nn.Module):
def __init__(
self,
# Mel options,
fs: int = 16000,
n_fft: int = 512,
n_mels: int = 80,
fmin: float = 0.0,
fmax: float = None,
# Normalization
stats_file: str = None,
apply_uttmvn: bool = True,
uttmvn_norm_means: bool = True,
uttmvn_norm_vars: bool = False,
):
super().__init__()
self.apply_uttmvn = apply_uttmvn
self.logmel = LogMel(fs=fs, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax)
self.stats_file = stats_file
if stats_file is not None:
self.global_mvn = GlobalMVN(stats_file)
else:
self.global_mvn = None
if self.apply_uttmvn is not None:
self.uttmvn = UtteranceMVN(
norm_means=uttmvn_norm_means, norm_vars=uttmvn_norm_vars
)
else:
self.uttmvn = None
def forward(
self, x: ComplexTensor, ilens: Union[torch.LongTensor, np.ndarray, List[int]]
) -> Tuple[torch.Tensor, torch.LongTensor]:
# (B, T, F) or (B, T, C, F)
if x.dim() not in (3, 4):
raise ValueError(f"Input dim must be 3 or 4: {x.dim()}")
if not torch.is_tensor(ilens):
ilens = torch.from_numpy(np.asarray(ilens)).to(x.device)
if x.dim() == 4:
# h: (B, T, C, F) -> h: (B, T, F)
if self.training:
# Select 1ch randomly
ch = np.random.randint(x.size(2))
h = x[:, :, ch, :]
else:
# Use the first channel
h = x[:, :, 0, :]
else:
h = x
# h: ComplexTensor(B, T, F) -> torch.Tensor(B, T, F)
h = h.real**2 + h.imag**2
h, _ = self.logmel(h, ilens)
if self.stats_file is not None:
h, _ = self.global_mvn(h, ilens)
if self.apply_uttmvn:
h, _ = self.uttmvn(h, ilens)
return h, ilens
class LogMel(torch.nn.Module):
"""Convert STFT to fbank feats
The arguments is same as librosa.filters.mel
Args:
fs: number > 0 [scalar] sampling rate of the incoming signal
n_fft: int > 0 [scalar] number of FFT components
n_mels: int > 0 [scalar] number of Mel bands to generate
fmin: float >= 0 [scalar] lowest frequency (in Hz)
fmax: float >= 0 [scalar] highest frequency (in Hz).
If `None`, use `fmax = fs / 2.0`
htk: use HTK formula instead of Slaney
norm: {None, 1, np.inf} [scalar]
if 1, divide the triangular mel weights by the width of the mel band
(area normalization). Otherwise, leave all the triangles aiming for
a peak value of 1.0
"""
def __init__(
self,
fs: int = 16000,
n_fft: int = 512,
n_mels: int = 80,
fmin: float = 0.0,
fmax: float = None,
htk: bool = False,
norm=1,
):
super().__init__()
_mel_options = dict(
sr=fs, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax, htk=htk, norm=norm
)
self.mel_options = _mel_options
# Note(kamo): The mel matrix of librosa is different from kaldi.
melmat = librosa.filters.mel(**_mel_options)
# melmat: (D2, D1) -> (D1, D2)
self.register_buffer("melmat", torch.from_numpy(melmat.T).float())
def extra_repr(self):
return ", ".join(f"{k}={v}" for k, v in self.mel_options.items())
def forward(
self, feat: torch.Tensor, ilens: torch.LongTensor
) -> Tuple[torch.Tensor, torch.LongTensor]:
# feat: (B, T, D1) x melmat: (D1, D2) -> mel_feat: (B, T, D2)
mel_feat = torch.matmul(feat, self.melmat)
logmel_feat = (mel_feat + 1e-20).log()
# Zero padding
logmel_feat = logmel_feat.masked_fill(make_pad_mask(ilens, logmel_feat, 1), 0.0)
return logmel_feat, ilens
class GlobalMVN(torch.nn.Module):
"""Apply global mean and variance normalization
Args:
stats_file(str): npy file of 1-dim array or text file.
From the _first element to
the {(len(array) - 1) / 2}th element are treated as
the sum of features,
and the rest excluding the last elements are
treated as the sum of the square value of features,
and the last elements eqauls to the number of samples.
std_floor(float):
"""
def __init__(
self,
stats_file: str,
norm_means: bool = True,
norm_vars: bool = True,
eps: float = 1.0e-20,
):
super().__init__()
self.norm_means = norm_means
self.norm_vars = norm_vars
self.stats_file = stats_file
stats = np.load(stats_file)
stats = stats.astype(float)
assert (len(stats) - 1) % 2 == 0, stats.shape
count = stats.flatten()[-1]
mean = stats[: (len(stats) - 1) // 2] / count
var = stats[(len(stats) - 1) // 2 : -1] / count - mean * mean
std = np.maximum(np.sqrt(var), eps)
self.register_buffer("bias", torch.from_numpy(-mean.astype(np.float32)))
self.register_buffer("scale", torch.from_numpy(1 / std.astype(np.float32)))
def extra_repr(self):
return (
f"stats_file={self.stats_file}, "
f"norm_means={self.norm_means}, norm_vars={self.norm_vars}"
)
def forward(
self, x: torch.Tensor, ilens: torch.LongTensor
) -> Tuple[torch.Tensor, torch.LongTensor]:
# feat: (B, T, D)
if self.norm_means:
x += self.bias.type_as(x)
x.masked_fill(make_pad_mask(ilens, x, 1), 0.0)
if self.norm_vars:
x *= self.scale.type_as(x)
return x, ilens
class UtteranceMVN(torch.nn.Module):
def __init__(
self, norm_means: bool = True, norm_vars: bool = False, eps: float = 1.0e-20
):
super().__init__()
self.norm_means = norm_means
self.norm_vars = norm_vars
self.eps = eps
def extra_repr(self):
return f"norm_means={self.norm_means}, norm_vars={self.norm_vars}"
def forward(
self, x: torch.Tensor, ilens: torch.LongTensor
) -> Tuple[torch.Tensor, torch.LongTensor]:
return utterance_mvn(
x, ilens, norm_means=self.norm_means, norm_vars=self.norm_vars, eps=self.eps
)
def utterance_mvn(
x: torch.Tensor,
ilens: torch.LongTensor,
norm_means: bool = True,
norm_vars: bool = False,
eps: float = 1.0e-20,
) -> Tuple[torch.Tensor, torch.LongTensor]:
"""Apply utterance mean and variance normalization
Args:
x: (B, T, D), assumed zero padded
ilens: (B, T, D)
norm_means:
norm_vars:
eps:
"""
ilens_ = ilens.type_as(x)
# mean: (B, D)
mean = x.sum(dim=1) / ilens_[:, None]
if norm_means:
x -= mean[:, None, :]
x_ = x
else:
x_ = x - mean[:, None, :]
# Zero padding
x_.masked_fill(make_pad_mask(ilens, x_, 1), 0.0)
if norm_vars:
var = x_.pow(2).sum(dim=1) / ilens_[:, None]
var = torch.clamp(var, min=eps)
x /= var.sqrt()[:, None, :]
x_ = x
return x_, ilens
def feature_transform_for(args, n_fft):
return FeatureTransform(
# Mel options,
fs=args.fbank_fs,
n_fft=n_fft,
n_mels=args.n_mels,
fmin=args.fbank_fmin,
fmax=args.fbank_fmax,
# Normalization
stats_file=args.stats_file,
apply_uttmvn=args.apply_uttmvn,
uttmvn_norm_means=args.uttmvn_norm_means,
uttmvn_norm_vars=args.uttmvn_norm_vars,
)
| 7,935 | 29.290076 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/frontends/beamformer.py | import torch
from torch_complex import functional as FC
from torch_complex.tensor import ComplexTensor
def get_power_spectral_density_matrix(
xs: ComplexTensor, mask: torch.Tensor, normalization=True, eps: float = 1e-15
) -> ComplexTensor:
"""Return cross-channel power spectral density (PSD) matrix
Args:
xs (ComplexTensor): (..., F, C, T)
mask (torch.Tensor): (..., F, C, T)
normalization (bool):
eps (float):
Returns
psd (ComplexTensor): (..., F, C, C)
"""
# outer product: (..., C_1, T) x (..., C_2, T) -> (..., T, C, C_2)
psd_Y = FC.einsum("...ct,...et->...tce", [xs, xs.conj()])
# Averaging mask along C: (..., C, T) -> (..., T)
mask = mask.mean(dim=-2)
# Normalized mask along T: (..., T)
if normalization:
# If assuming the tensor is padded with zero, the summation along
# the time axis is same regardless of the padding length.
mask = mask / (mask.sum(dim=-1, keepdim=True) + eps)
# psd: (..., T, C, C)
psd = psd_Y * mask[..., None, None]
# (..., T, C, C) -> (..., C, C)
psd = psd.sum(dim=-3)
return psd
def get_mvdr_vector(
psd_s: ComplexTensor,
psd_n: ComplexTensor,
reference_vector: torch.Tensor,
eps: float = 1e-15,
) -> ComplexTensor:
"""Return the MVDR(Minimum Variance Distortionless Response) vector:
h = (Npsd^-1 @ Spsd) / (Tr(Npsd^-1 @ Spsd)) @ u
Reference:
On optimal frequency-domain multichannel linear filtering
for noise reduction; M. Souden et al., 2010;
https://ieeexplore.ieee.org/document/5089420
Args:
psd_s (ComplexTensor): (..., F, C, C)
psd_n (ComplexTensor): (..., F, C, C)
reference_vector (torch.Tensor): (..., C)
eps (float):
Returns:
beamform_vector (ComplexTensor)r: (..., F, C)
"""
# Add eps
C = psd_n.size(-1)
eye = torch.eye(C, dtype=psd_n.dtype, device=psd_n.device)
shape = [1 for _ in range(psd_n.dim() - 2)] + [C, C]
eye = eye.view(*shape)
psd_n += eps * eye
# numerator: (..., C_1, C_2) x (..., C_2, C_3) -> (..., C_1, C_3)
numerator = FC.einsum("...ec,...cd->...ed", [psd_n.inverse(), psd_s])
# ws: (..., C, C) / (...,) -> (..., C, C)
ws = numerator / (FC.trace(numerator)[..., None, None] + eps)
# h: (..., F, C_1, C_2) x (..., C_2) -> (..., F, C_1)
beamform_vector = FC.einsum("...fec,...c->...fe", [ws, reference_vector])
return beamform_vector
def apply_beamforming_vector(
beamform_vector: ComplexTensor, mix: ComplexTensor
) -> ComplexTensor:
# (..., C) x (..., C, T) -> (..., T)
es = FC.einsum("...c,...ct->...t", [beamform_vector.conj(), mix])
return es
| 2,731 | 31.141176 | 81 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/frontends/mask_estimator.py | from typing import Tuple
import numpy as np
import torch
from torch.nn import functional as F
from torch_complex.tensor import ComplexTensor
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
from espnet.nets.pytorch_backend.rnn.encoders import RNN, RNNP
class MaskEstimator(torch.nn.Module):
def __init__(self, type, idim, layers, units, projs, dropout, nmask=1):
super().__init__()
subsample = np.ones(layers + 1, dtype=np.int64)
typ = type.lstrip("vgg").rstrip("p")
if type[-1] == "p":
self.brnn = RNNP(idim, layers, units, projs, subsample, dropout, typ=typ)
else:
self.brnn = RNN(idim, layers, units, projs, dropout, typ=typ)
self.type = type
self.nmask = nmask
self.linears = torch.nn.ModuleList(
[torch.nn.Linear(projs, idim) for _ in range(nmask)]
)
def forward(
self, xs: ComplexTensor, ilens: torch.LongTensor
) -> Tuple[Tuple[torch.Tensor, ...], torch.LongTensor]:
"""The forward function
Args:
xs: (B, F, C, T)
ilens: (B,)
Returns:
hs (torch.Tensor): The hidden vector (B, F, C, T)
masks: A tuple of the masks. (B, F, C, T)
ilens: (B,)
"""
assert xs.size(0) == ilens.size(0), (xs.size(0), ilens.size(0))
_, _, C, input_length = xs.size()
# (B, F, C, T) -> (B, C, T, F)
xs = xs.permute(0, 2, 3, 1)
# Calculate amplitude: (B, C, T, F) -> (B, C, T, F)
xs = (xs.real**2 + xs.imag**2) ** 0.5
# xs: (B, C, T, F) -> xs: (B * C, T, F)
xs = xs.contiguous().view(-1, xs.size(-2), xs.size(-1))
# ilens: (B,) -> ilens_: (B * C)
ilens_ = ilens[:, None].expand(-1, C).contiguous().view(-1)
# xs: (B * C, T, F) -> xs: (B * C, T, D)
xs, _, _ = self.brnn(xs, ilens_)
# xs: (B * C, T, D) -> xs: (B, C, T, D)
xs = xs.view(-1, C, xs.size(-2), xs.size(-1))
masks = []
for linear in self.linears:
# xs: (B, C, T, D) -> mask:(B, C, T, F)
mask = linear(xs)
mask = torch.sigmoid(mask)
# Zero padding
mask.masked_fill(make_pad_mask(ilens, mask, length_dim=2), 0)
# (B, C, T, F) -> (B, F, C, T)
mask = mask.permute(0, 3, 1, 2)
# Take cares of multi gpu cases: If input_length > max(ilens)
if mask.size(-1) < input_length:
mask = F.pad(mask, [0, input_length - mask.size(-1)], value=0)
masks.append(mask)
return tuple(masks), ilens
| 2,637 | 33.25974 | 85 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/frontends/frontend.py | from typing import List, Optional, Tuple, Union
import numpy
import torch
import torch.nn as nn
from torch_complex.tensor import ComplexTensor
from espnet.nets.pytorch_backend.frontends.dnn_beamformer import DNN_Beamformer
from espnet.nets.pytorch_backend.frontends.dnn_wpe import DNN_WPE
class Frontend(nn.Module):
def __init__(
self,
idim: int,
# WPE options
use_wpe: bool = False,
wtype: str = "blstmp",
wlayers: int = 3,
wunits: int = 300,
wprojs: int = 320,
wdropout_rate: float = 0.0,
taps: int = 5,
delay: int = 3,
use_dnn_mask_for_wpe: bool = True,
# Beamformer options
use_beamformer: bool = False,
btype: str = "blstmp",
blayers: int = 3,
bunits: int = 300,
bprojs: int = 320,
bnmask: int = 2,
badim: int = 320,
ref_channel: int = -1,
bdropout_rate=0.0,
):
super().__init__()
self.use_beamformer = use_beamformer
self.use_wpe = use_wpe
self.use_dnn_mask_for_wpe = use_dnn_mask_for_wpe
# use frontend for all the data,
# e.g. in the case of multi-speaker speech separation
self.use_frontend_for_all = bnmask > 2
if self.use_wpe:
if self.use_dnn_mask_for_wpe:
# Use DNN for power estimation
# (Not observed significant gains)
iterations = 1
else:
# Performing as conventional WPE, without DNN Estimator
iterations = 2
self.wpe = DNN_WPE(
wtype=wtype,
widim=idim,
wunits=wunits,
wprojs=wprojs,
wlayers=wlayers,
taps=taps,
delay=delay,
dropout_rate=wdropout_rate,
iterations=iterations,
use_dnn_mask=use_dnn_mask_for_wpe,
)
else:
self.wpe = None
if self.use_beamformer:
self.beamformer = DNN_Beamformer(
btype=btype,
bidim=idim,
bunits=bunits,
bprojs=bprojs,
blayers=blayers,
bnmask=bnmask,
dropout_rate=bdropout_rate,
badim=badim,
ref_channel=ref_channel,
)
else:
self.beamformer = None
def forward(
self, x: ComplexTensor, ilens: Union[torch.LongTensor, numpy.ndarray, List[int]]
) -> Tuple[ComplexTensor, torch.LongTensor, Optional[ComplexTensor]]:
assert len(x) == len(ilens), (len(x), len(ilens))
# (B, T, F) or (B, T, C, F)
if x.dim() not in (3, 4):
raise ValueError(f"Input dim must be 3 or 4: {x.dim()}")
if not torch.is_tensor(ilens):
ilens = torch.from_numpy(numpy.asarray(ilens)).to(x.device)
mask = None
h = x
if h.dim() == 4:
if self.training:
choices = [(False, False)] if not self.use_frontend_for_all else []
if self.use_wpe:
choices.append((True, False))
if self.use_beamformer:
choices.append((False, True))
use_wpe, use_beamformer = choices[numpy.random.randint(len(choices))]
else:
use_wpe = self.use_wpe
use_beamformer = self.use_beamformer
# 1. WPE
if use_wpe:
# h: (B, T, C, F) -> h: (B, T, C, F)
h, ilens, mask = self.wpe(h, ilens)
# 2. Beamformer
if use_beamformer:
# h: (B, T, C, F) -> h: (B, T, F)
h, ilens, mask = self.beamformer(h, ilens)
return h, ilens, mask
def frontend_for(args, idim):
return Frontend(
idim=idim,
# WPE options
use_wpe=args.use_wpe,
wtype=args.wtype,
wlayers=args.wlayers,
wunits=args.wunits,
wprojs=args.wprojs,
wdropout_rate=args.wdropout_rate,
taps=args.wpe_taps,
delay=args.wpe_delay,
use_dnn_mask_for_wpe=args.use_dnn_mask_for_wpe,
# Beamformer options
use_beamformer=args.use_beamformer,
btype=args.btype,
blayers=args.blayers,
bunits=args.bunits,
bprojs=args.bprojs,
bnmask=args.bnmask,
badim=args.badim,
ref_channel=args.ref_channel,
bdropout_rate=args.bdropout_rate,
)
| 4,557 | 29.590604 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/frontends/dnn_beamformer.py | """DNN beamformer module."""
from typing import Tuple
import torch
from torch.nn import functional as F
from torch_complex.tensor import ComplexTensor
from espnet.nets.pytorch_backend.frontends.beamformer import ( # noqa: H301
apply_beamforming_vector,
get_mvdr_vector,
get_power_spectral_density_matrix,
)
from espnet.nets.pytorch_backend.frontends.mask_estimator import MaskEstimator
class DNN_Beamformer(torch.nn.Module):
"""DNN mask based Beamformer
Citation:
Multichannel End-to-end Speech Recognition; T. Ochiai et al., 2017;
https://arxiv.org/abs/1703.04783
"""
def __init__(
self,
bidim,
btype="blstmp",
blayers=3,
bunits=300,
bprojs=320,
bnmask=2,
dropout_rate=0.0,
badim=320,
ref_channel: int = -1,
beamformer_type="mvdr",
):
super().__init__()
self.mask = MaskEstimator(
btype, bidim, blayers, bunits, bprojs, dropout_rate, nmask=bnmask
)
self.ref = AttentionReference(bidim, badim)
self.ref_channel = ref_channel
self.nmask = bnmask
if beamformer_type != "mvdr":
raise ValueError(
"Not supporting beamformer_type={}".format(beamformer_type)
)
self.beamformer_type = beamformer_type
def forward(
self, data: ComplexTensor, ilens: torch.LongTensor
) -> Tuple[ComplexTensor, torch.LongTensor, ComplexTensor]:
"""The forward function
Notation:
B: Batch
C: Channel
T: Time or Sequence length
F: Freq
Args:
data (ComplexTensor): (B, T, C, F)
ilens (torch.Tensor): (B,)
Returns:
enhanced (ComplexTensor): (B, T, F)
ilens (torch.Tensor): (B,)
"""
def apply_beamforming(data, ilens, psd_speech, psd_noise):
# u: (B, C)
if self.ref_channel < 0:
u, _ = self.ref(psd_speech, ilens)
else:
# (optional) Create onehot vector for fixed reference microphone
u = torch.zeros(
*(data.size()[:-3] + (data.size(-2),)), device=data.device
)
u[..., self.ref_channel].fill_(1)
ws = get_mvdr_vector(psd_speech, psd_noise, u)
enhanced = apply_beamforming_vector(ws, data)
return enhanced, ws
# data (B, T, C, F) -> (B, F, C, T)
data = data.permute(0, 3, 2, 1)
# mask: (B, F, C, T)
masks, _ = self.mask(data, ilens)
assert self.nmask == len(masks)
if self.nmask == 2: # (mask_speech, mask_noise)
mask_speech, mask_noise = masks
psd_speech = get_power_spectral_density_matrix(data, mask_speech)
psd_noise = get_power_spectral_density_matrix(data, mask_noise)
enhanced, ws = apply_beamforming(data, ilens, psd_speech, psd_noise)
# (..., F, T) -> (..., T, F)
enhanced = enhanced.transpose(-1, -2)
mask_speech = mask_speech.transpose(-1, -3)
else: # multi-speaker case: (mask_speech1, ..., mask_noise)
mask_speech = list(masks[:-1])
mask_noise = masks[-1]
psd_speeches = [
get_power_spectral_density_matrix(data, mask) for mask in mask_speech
]
psd_noise = get_power_spectral_density_matrix(data, mask_noise)
enhanced = []
ws = []
for i in range(self.nmask - 1):
psd_speech = psd_speeches.pop(i)
# treat all other speakers' psd_speech as noises
enh, w = apply_beamforming(
data, ilens, psd_speech, sum(psd_speeches) + psd_noise
)
psd_speeches.insert(i, psd_speech)
# (..., F, T) -> (..., T, F)
enh = enh.transpose(-1, -2)
mask_speech[i] = mask_speech[i].transpose(-1, -3)
enhanced.append(enh)
ws.append(w)
return enhanced, ilens, mask_speech
class AttentionReference(torch.nn.Module):
def __init__(self, bidim, att_dim):
super().__init__()
self.mlp_psd = torch.nn.Linear(bidim, att_dim)
self.gvec = torch.nn.Linear(att_dim, 1)
def forward(
self, psd_in: ComplexTensor, ilens: torch.LongTensor, scaling: float = 2.0
) -> Tuple[torch.Tensor, torch.LongTensor]:
"""The forward function
Args:
psd_in (ComplexTensor): (B, F, C, C)
ilens (torch.Tensor): (B,)
scaling (float):
Returns:
u (torch.Tensor): (B, C)
ilens (torch.Tensor): (B,)
"""
B, _, C = psd_in.size()[:3]
assert psd_in.size(2) == psd_in.size(3), psd_in.size()
# psd_in: (B, F, C, C)
psd = psd_in.masked_fill(
torch.eye(C, dtype=torch.bool, device=psd_in.device), 0
)
# psd: (B, F, C, C) -> (B, C, F)
psd = (psd.sum(dim=-1) / (C - 1)).transpose(-1, -2)
# Calculate amplitude
psd_feat = (psd.real**2 + psd.imag**2) ** 0.5
# (B, C, F) -> (B, C, F2)
mlp_psd = self.mlp_psd(psd_feat)
# (B, C, F2) -> (B, C, 1) -> (B, C)
e = self.gvec(torch.tanh(mlp_psd)).squeeze(-1)
u = F.softmax(scaling * e, dim=-1)
return u, ilens
| 5,480 | 30.682081 | 85 | py |
espnet | espnet-master/espnet/mt/pytorch_backend/mt.py | #!/usr/bin/env python3
# encoding: utf-8
# Copyright 2019 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Training/decoding definition for the text translation task."""
import itertools
import json
import logging
import os
import numpy as np
import torch
from chainer import training
from chainer.training import extensions
from espnet.asr.asr_utils import (
CompareValueTrigger,
adadelta_eps_decay,
adam_lr_decay,
add_results_to_json,
restore_snapshot,
snapshot_object,
torch_load,
torch_resume,
torch_snapshot,
)
from espnet.asr.pytorch_backend.asr import (
CustomEvaluator,
CustomUpdater,
load_trained_model,
)
from espnet.nets.mt_interface import MTInterface
from espnet.nets.pytorch_backend.e2e_asr import pad_list
from espnet.utils.dataset import ChainerDataLoader, TransformDataset
from espnet.utils.deterministic_utils import set_deterministic_pytorch
from espnet.utils.dynamic_import import dynamic_import
from espnet.utils.io_utils import LoadInputsAndTargets
from espnet.utils.training.batchfy import make_batchset
from espnet.utils.training.iterators import ShufflingEnabler
from espnet.utils.training.tensorboard_logger import TensorboardLogger
from espnet.utils.training.train_utils import check_early_stop, set_early_stop
class CustomConverter(object):
"""Custom batch converter for Pytorch."""
def __init__(self):
"""Construct a CustomConverter object."""
self.ignore_id = -1
self.pad = 0
# NOTE: we reserve index:0 for <pad> although this is reserved for a blank class
# in ASR. However,
# blank labels are not used in NMT. To keep the vocabulary size,
# we use index:0 for padding instead of adding one more class.
def __call__(self, batch, device=torch.device("cpu")):
"""Transform a batch and send it to a device.
Args:
batch (list): The batch to transform.
device (torch.device): The device to send to.
Returns:
tuple(torch.Tensor, torch.Tensor, torch.Tensor)
"""
# batch should be located in list
assert len(batch) == 1
xs, ys = batch[0]
# get batch of lengths of input sequences
ilens = np.array([x.shape[0] for x in xs])
# perform padding and convert to tensor
xs_pad = pad_list([torch.from_numpy(x).long() for x in xs], self.pad).to(device)
ilens = torch.from_numpy(ilens).to(device)
ys_pad = pad_list([torch.from_numpy(y).long() for y in ys], self.ignore_id).to(
device
)
return xs_pad, ilens, ys_pad
def train(args):
"""Train with the given args.
Args:
args (namespace): The program arguments.
"""
set_deterministic_pytorch(args)
# check cuda availability
if not torch.cuda.is_available():
logging.warning("cuda is not available")
# get input and output dimension info
with open(args.valid_json, "rb") as f:
valid_json = json.load(f)["utts"]
utts = list(valid_json.keys())
idim = int(valid_json[utts[0]]["output"][1]["shape"][1])
odim = int(valid_json[utts[0]]["output"][0]["shape"][1])
logging.info("#input dims : " + str(idim))
logging.info("#output dims: " + str(odim))
# specify model architecture
model_class = dynamic_import(args.model_module)
model = model_class(idim, odim, args)
assert isinstance(model, MTInterface)
# write model config
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
model_conf = args.outdir + "/model.json"
with open(model_conf, "wb") as f:
logging.info("writing a model config file to " + model_conf)
f.write(
json.dumps(
(idim, odim, vars(args)), indent=4, ensure_ascii=False, sort_keys=True
).encode("utf_8")
)
for key in sorted(vars(args).keys()):
logging.info("ARGS: " + key + ": " + str(vars(args)[key]))
reporter = model.reporter
# check the use of multi-gpu
if args.ngpu > 1:
if args.batch_size != 0:
logging.warning(
"batch size is automatically increased (%d -> %d)"
% (args.batch_size, args.batch_size * args.ngpu)
)
args.batch_size *= args.ngpu
# set torch device
device = torch.device("cuda" if args.ngpu > 0 else "cpu")
if args.train_dtype in ("float16", "float32", "float64"):
dtype = getattr(torch, args.train_dtype)
else:
dtype = torch.float32
model = model.to(device=device, dtype=dtype)
logging.warning(
"num. model params: {:,} (num. trained: {:,} ({:.1f}%))".format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
sum(p.numel() for p in model.parameters() if p.requires_grad)
* 100.0
/ sum(p.numel() for p in model.parameters()),
)
)
# Setup an optimizer
if args.opt == "adadelta":
optimizer = torch.optim.Adadelta(
model.parameters(), rho=0.95, eps=args.eps, weight_decay=args.weight_decay
)
elif args.opt == "adam":
optimizer = torch.optim.Adam(
model.parameters(), lr=args.lr, weight_decay=args.weight_decay
)
elif args.opt == "noam":
from espnet.nets.pytorch_backend.transformer.optimizer import get_std_opt
optimizer = get_std_opt(
model.parameters(),
args.adim,
args.transformer_warmup_steps,
args.transformer_lr,
)
else:
raise NotImplementedError("unknown optimizer: " + args.opt)
# setup apex.amp
if args.train_dtype in ("O0", "O1", "O2", "O3"):
try:
from apex import amp
except ImportError as e:
logging.error(
f"You need to install apex for --train-dtype {args.train_dtype}. "
"See https://github.com/NVIDIA/apex#linux"
)
raise e
if args.opt == "noam":
model, optimizer.optimizer = amp.initialize(
model, optimizer.optimizer, opt_level=args.train_dtype
)
else:
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.train_dtype
)
use_apex = True
else:
use_apex = False
# FIXME: TOO DIRTY HACK
setattr(optimizer, "target", reporter)
setattr(optimizer, "serialize", lambda s: reporter.serialize(s))
# Setup a converter
converter = CustomConverter()
# read json data
with open(args.train_json, "rb") as f:
train_json = json.load(f)["utts"]
with open(args.valid_json, "rb") as f:
valid_json = json.load(f)["utts"]
use_sortagrad = args.sortagrad == -1 or args.sortagrad > 0
# make minibatch list (variable length)
train = make_batchset(
train_json,
args.batch_size,
args.maxlen_in,
args.maxlen_out,
args.minibatches,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
shortest_first=use_sortagrad,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
mt=True,
iaxis=1,
oaxis=0,
)
valid = make_batchset(
valid_json,
args.batch_size,
args.maxlen_in,
args.maxlen_out,
args.minibatches,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
mt=True,
iaxis=1,
oaxis=0,
)
load_tr = LoadInputsAndTargets(mode="mt", load_output=True)
load_cv = LoadInputsAndTargets(mode="mt", load_output=True)
# hack to make batchsize argument as 1
# actual bathsize is included in a list
# default collate function converts numpy array to pytorch tensor
# we used an empty collate function instead which returns list
train_iter = ChainerDataLoader(
dataset=TransformDataset(train, lambda data: converter([load_tr(data)])),
batch_size=1,
num_workers=args.n_iter_processes,
shuffle=not use_sortagrad,
collate_fn=lambda x: x[0],
)
valid_iter = ChainerDataLoader(
dataset=TransformDataset(valid, lambda data: converter([load_cv(data)])),
batch_size=1,
shuffle=False,
collate_fn=lambda x: x[0],
num_workers=args.n_iter_processes,
)
# Set up a trainer
updater = CustomUpdater(
model,
args.grad_clip,
{"main": train_iter},
optimizer,
device,
args.ngpu,
False,
args.accum_grad,
use_apex=use_apex,
)
trainer = training.Trainer(updater, (args.epochs, "epoch"), out=args.outdir)
if use_sortagrad:
trainer.extend(
ShufflingEnabler([train_iter]),
trigger=(args.sortagrad if args.sortagrad != -1 else args.epochs, "epoch"),
)
# Resume from a snapshot
if args.resume:
logging.info("resumed from %s" % args.resume)
torch_resume(args.resume, trainer)
# Evaluate the model with the test dataset for each epoch
if args.save_interval_iters > 0:
trainer.extend(
CustomEvaluator(model, {"main": valid_iter}, reporter, device, args.ngpu),
trigger=(args.save_interval_iters, "iteration"),
)
else:
trainer.extend(
CustomEvaluator(model, {"main": valid_iter}, reporter, device, args.ngpu)
)
# Save attention weight each epoch
if args.num_save_attention > 0:
# NOTE: sort it by output lengths
data = sorted(
list(valid_json.items())[: args.num_save_attention],
key=lambda x: int(x[1]["output"][0]["shape"][0]),
reverse=True,
)
if hasattr(model, "module"):
att_vis_fn = model.module.calculate_all_attentions
plot_class = model.module.attention_plot_class
else:
att_vis_fn = model.calculate_all_attentions
plot_class = model.attention_plot_class
att_reporter = plot_class(
att_vis_fn,
data,
args.outdir + "/att_ws",
converter=converter,
transform=load_cv,
device=device,
ikey="output",
iaxis=1,
)
trainer.extend(att_reporter, trigger=(1, "epoch"))
else:
att_reporter = None
# Make a plot for training and validation values
trainer.extend(
extensions.PlotReport(
["main/loss", "validation/main/loss"], "epoch", file_name="loss.png"
)
)
trainer.extend(
extensions.PlotReport(
["main/acc", "validation/main/acc"], "epoch", file_name="acc.png"
)
)
trainer.extend(
extensions.PlotReport(
["main/ppl", "validation/main/ppl"], "epoch", file_name="ppl.png"
)
)
trainer.extend(
extensions.PlotReport(
["main/bleu", "validation/main/bleu"], "epoch", file_name="bleu.png"
)
)
# Save best models
trainer.extend(
snapshot_object(model, "model.loss.best"),
trigger=training.triggers.MinValueTrigger("validation/main/loss"),
)
trainer.extend(
snapshot_object(model, "model.acc.best"),
trigger=training.triggers.MaxValueTrigger("validation/main/acc"),
)
# save snapshot which contains model and optimizer states
if args.save_interval_iters > 0:
trainer.extend(
torch_snapshot(filename="snapshot.iter.{.updater.iteration}"),
trigger=(args.save_interval_iters, "iteration"),
)
else:
trainer.extend(torch_snapshot(), trigger=(1, "epoch"))
# epsilon decay in the optimizer
if args.opt == "adadelta":
if args.criterion == "acc":
trainer.extend(
restore_snapshot(
model, args.outdir + "/model.acc.best", load_fn=torch_load
),
trigger=CompareValueTrigger(
"validation/main/acc",
lambda best_value, current_value: best_value > current_value,
),
)
trainer.extend(
adadelta_eps_decay(args.eps_decay),
trigger=CompareValueTrigger(
"validation/main/acc",
lambda best_value, current_value: best_value > current_value,
),
)
elif args.criterion == "loss":
trainer.extend(
restore_snapshot(
model, args.outdir + "/model.loss.best", load_fn=torch_load
),
trigger=CompareValueTrigger(
"validation/main/loss",
lambda best_value, current_value: best_value < current_value,
),
)
trainer.extend(
adadelta_eps_decay(args.eps_decay),
trigger=CompareValueTrigger(
"validation/main/loss",
lambda best_value, current_value: best_value < current_value,
),
)
elif args.opt == "adam":
if args.criterion == "acc":
trainer.extend(
restore_snapshot(
model, args.outdir + "/model.acc.best", load_fn=torch_load
),
trigger=CompareValueTrigger(
"validation/main/acc",
lambda best_value, current_value: best_value > current_value,
),
)
trainer.extend(
adam_lr_decay(args.lr_decay),
trigger=CompareValueTrigger(
"validation/main/acc",
lambda best_value, current_value: best_value > current_value,
),
)
elif args.criterion == "loss":
trainer.extend(
restore_snapshot(
model, args.outdir + "/model.loss.best", load_fn=torch_load
),
trigger=CompareValueTrigger(
"validation/main/loss",
lambda best_value, current_value: best_value < current_value,
),
)
trainer.extend(
adam_lr_decay(args.lr_decay),
trigger=CompareValueTrigger(
"validation/main/loss",
lambda best_value, current_value: best_value < current_value,
),
)
# Write a log of evaluation statistics for each epoch
trainer.extend(
extensions.LogReport(trigger=(args.report_interval_iters, "iteration"))
)
report_keys = [
"epoch",
"iteration",
"main/loss",
"validation/main/loss",
"main/acc",
"validation/main/acc",
"main/ppl",
"validation/main/ppl",
"elapsed_time",
]
if args.opt == "adadelta":
trainer.extend(
extensions.observe_value(
"eps",
lambda trainer: trainer.updater.get_optimizer("main").param_groups[0][
"eps"
],
),
trigger=(args.report_interval_iters, "iteration"),
)
report_keys.append("eps")
elif args.opt in ["adam", "noam"]:
trainer.extend(
extensions.observe_value(
"lr",
lambda trainer: trainer.updater.get_optimizer("main").param_groups[0][
"lr"
],
),
trigger=(args.report_interval_iters, "iteration"),
)
report_keys.append("lr")
if args.report_bleu:
report_keys.append("main/bleu")
report_keys.append("validation/main/bleu")
trainer.extend(
extensions.PrintReport(report_keys),
trigger=(args.report_interval_iters, "iteration"),
)
trainer.extend(extensions.ProgressBar(update_interval=args.report_interval_iters))
set_early_stop(trainer, args)
if args.tensorboard_dir is not None and args.tensorboard_dir != "":
from torch.utils.tensorboard import SummaryWriter
trainer.extend(
TensorboardLogger(SummaryWriter(args.tensorboard_dir), att_reporter),
trigger=(args.report_interval_iters, "iteration"),
)
# Run the training
trainer.run()
check_early_stop(trainer, args.epochs)
def trans(args):
"""Decode with the given args.
Args:
args (namespace): The program arguments.
"""
set_deterministic_pytorch(args)
model, train_args = load_trained_model(args.model)
assert isinstance(model, MTInterface)
model.trans_args = args
# gpu
if args.ngpu == 1:
gpu_id = list(range(args.ngpu))
logging.info("gpu id: " + str(gpu_id))
model.cuda()
# read json data
with open(args.trans_json, "rb") as f:
js = json.load(f)["utts"]
new_js = {}
# remove enmpy utterances
if train_args.multilingual:
js = {
k: v
for k, v in js.items()
if v["output"][0]["shape"][0] > 1 and v["output"][1]["shape"][0] > 1
}
else:
js = {
k: v
for k, v in js.items()
if v["output"][0]["shape"][0] > 0 and v["output"][1]["shape"][0] > 0
}
if args.batchsize == 0:
with torch.no_grad():
for idx, name in enumerate(js.keys(), 1):
logging.info("(%d/%d) decoding " + name, idx, len(js.keys()))
feat = [js[name]["output"][1]["tokenid"].split()]
nbest_hyps = model.translate(feat, args, train_args.char_list)
new_js[name] = add_results_to_json(
js[name], nbest_hyps, train_args.char_list
)
else:
def grouper(n, iterable, fillvalue=None):
kargs = [iter(iterable)] * n
return itertools.zip_longest(*kargs, fillvalue=fillvalue)
# sort data
keys = list(js.keys())
feat_lens = [js[key]["output"][1]["shape"][0] for key in keys]
sorted_index = sorted(range(len(feat_lens)), key=lambda i: -feat_lens[i])
keys = [keys[i] for i in sorted_index]
with torch.no_grad():
for names in grouper(args.batchsize, keys, None):
names = [name for name in names if name]
feats = [
np.fromiter(
map(int, js[name]["output"][1]["tokenid"].split()),
dtype=np.int64,
)
for name in names
]
nbest_hyps = model.translate_batch(
feats,
args,
train_args.char_list,
)
for i, nbest_hyp in enumerate(nbest_hyps):
name = names[i]
new_js[name] = add_results_to_json(
js[name], nbest_hyp, train_args.char_list
)
with open(args.result_label, "wb") as f:
f.write(
json.dumps(
{"utts": new_js}, indent=4, ensure_ascii=False, sort_keys=True
).encode("utf_8")
)
| 19,744 | 32.240741 | 88 | py |
espnet | espnet-master/espnet/bin/asr_recog.py | #!/usr/bin/env python3
# encoding: utf-8
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""End-to-end speech recognition model decoding script."""
import logging
import os
import random
import sys
import configargparse
import numpy as np
from espnet.utils.cli_utils import strtobool
# NOTE: you need this func to generate our sphinx doc
def get_parser():
"""Get default arguments."""
parser = configargparse.ArgumentParser(
description="Transcribe text from speech using "
"a speech recognition model on one CPU or GPU",
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter,
)
# general configuration
parser.add("--config", is_config_file=True, help="Config file path")
parser.add(
"--config2",
is_config_file=True,
help="Second config file path that overwrites the settings in `--config`",
)
parser.add(
"--config3",
is_config_file=True,
help="Third config file path that overwrites the settings "
"in `--config` and `--config2`",
)
parser.add_argument("--ngpu", type=int, default=0, help="Number of GPUs")
parser.add_argument(
"--dtype",
choices=("float16", "float32", "float64"),
default="float32",
help="Float precision (only available in --api v2)",
)
parser.add_argument(
"--backend",
type=str,
default="chainer",
choices=["chainer", "pytorch"],
help="Backend library",
)
parser.add_argument("--debugmode", type=int, default=1, help="Debugmode")
parser.add_argument("--seed", type=int, default=1, help="Random seed")
parser.add_argument("--verbose", "-V", type=int, default=1, help="Verbose option")
parser.add_argument(
"--batchsize",
type=int,
default=1,
help="Batch size for beam search (0: means no batch processing)",
)
parser.add_argument(
"--preprocess-conf",
type=str,
default=None,
help="The configuration file for the pre-processing",
)
parser.add_argument(
"--api",
default="v1",
choices=["v1", "v2"],
help="Beam search APIs "
"v1: Default API. It only supports the ASRInterface.recognize method "
"and DefaultRNNLM. "
"v2: Experimental API. It supports any models that implements ScorerInterface.",
)
# task related
parser.add_argument(
"--recog-json", type=str, help="Filename of recognition data (json)"
)
parser.add_argument(
"--result-label",
type=str,
required=True,
help="Filename of result label data (json)",
)
# model (parameter) related
parser.add_argument(
"--model", type=str, required=True, help="Model file parameters to read"
)
parser.add_argument(
"--model-conf", type=str, default=None, help="Model config file"
)
parser.add_argument(
"--num-spkrs",
type=int,
default=1,
choices=[1, 2],
help="Number of speakers in the speech",
)
parser.add_argument(
"--num-encs", default=1, type=int, help="Number of encoders in the model."
)
# search related
parser.add_argument("--nbest", type=int, default=1, help="Output N-best hypotheses")
parser.add_argument("--beam-size", type=int, default=1, help="Beam size")
parser.add_argument("--penalty", type=float, default=0.0, help="Incertion penalty")
parser.add_argument(
"--maxlenratio",
type=float,
default=0.0,
help="""Input length ratio to obtain max output length.
If maxlenratio=0.0 (default), it uses a end-detect function
to automatically find maximum hypothesis lengths.
If maxlenratio<0.0, its absolute value is interpreted
as a constant max output length""",
)
parser.add_argument(
"--minlenratio",
type=float,
default=0.0,
help="Input length ratio to obtain min output length",
)
parser.add_argument(
"--ctc-weight", type=float, default=0.0, help="CTC weight in joint decoding"
)
parser.add_argument(
"--weights-ctc-dec",
type=float,
action="append",
help="ctc weight assigned to each encoder during decoding."
"[in multi-encoder mode only]",
)
parser.add_argument(
"--ctc-window-margin",
type=int,
default=0,
help="""Use CTC window with margin parameter to accelerate
CTC/attention decoding especially on GPU. Smaller magin
makes decoding faster, but may increase search errors.
If margin=0 (default), this function is disabled""",
)
# transducer related
parser.add_argument(
"--search-type",
type=str,
default="default",
choices=["default", "nsc", "tsd", "alsd", "maes"],
help="""Type of beam search implementation to use during inference.
Can be either: default beam search ("default"),
N-Step Constrained beam search ("nsc"), Time-Synchronous Decoding ("tsd"),
Alignment-Length Synchronous Decoding ("alsd") or
modified Adaptive Expansion Search ("maes").""",
)
parser.add_argument(
"--nstep",
type=int,
default=1,
help="""Number of expansion steps allowed in NSC beam search or mAES
(nstep > 0 for NSC and nstep > 1 for mAES).""",
)
parser.add_argument(
"--prefix-alpha",
type=int,
default=2,
help="Length prefix difference allowed in NSC beam search or mAES.",
)
parser.add_argument(
"--max-sym-exp",
type=int,
default=2,
help="Number of symbol expansions allowed in TSD.",
)
parser.add_argument(
"--u-max",
type=int,
default=400,
help="Length prefix difference allowed in ALSD.",
)
parser.add_argument(
"--expansion-gamma",
type=float,
default=2.3,
help="Allowed logp difference for prune-by-value method in mAES.",
)
parser.add_argument(
"--expansion-beta",
type=int,
default=2,
help="""Number of additional candidates for expanded hypotheses
selection in mAES.""",
)
parser.add_argument(
"--score-norm",
type=strtobool,
nargs="?",
default=True,
help="Normalize final hypotheses' score by length",
)
parser.add_argument(
"--softmax-temperature",
type=float,
default=1.0,
help="Penalization term for softmax function.",
)
# rnnlm related
parser.add_argument(
"--rnnlm", type=str, default=None, help="RNNLM model file to read"
)
parser.add_argument(
"--rnnlm-conf", type=str, default=None, help="RNNLM model config file to read"
)
parser.add_argument(
"--word-rnnlm", type=str, default=None, help="Word RNNLM model file to read"
)
parser.add_argument(
"--word-rnnlm-conf",
type=str,
default=None,
help="Word RNNLM model config file to read",
)
parser.add_argument("--word-dict", type=str, default=None, help="Word list to read")
parser.add_argument("--lm-weight", type=float, default=0.1, help="RNNLM weight")
# ngram related
parser.add_argument(
"--ngram-model", type=str, default=None, help="ngram model file to read"
)
parser.add_argument("--ngram-weight", type=float, default=0.1, help="ngram weight")
parser.add_argument(
"--ngram-scorer",
type=str,
default="part",
choices=("full", "part"),
help="""if the ngram is set as a part scorer, similar with CTC scorer,
ngram scorer only scores topK hypethesis.
if the ngram is set as full scorer, ngram scorer scores all hypthesis
the decoding speed of part scorer is musch faster than full one""",
)
# streaming related
parser.add_argument(
"--streaming-mode",
type=str,
default=None,
choices=["window", "segment"],
help="""Use streaming recognizer for inference.
`--batchsize` must be set to 0 to enable this mode""",
)
parser.add_argument("--streaming-window", type=int, default=10, help="Window size")
parser.add_argument(
"--streaming-min-blank-dur",
type=int,
default=10,
help="Minimum blank duration threshold",
)
parser.add_argument(
"--streaming-onset-margin", type=int, default=1, help="Onset margin"
)
parser.add_argument(
"--streaming-offset-margin", type=int, default=1, help="Offset margin"
)
# non-autoregressive related
# Mask CTC related. See https://arxiv.org/abs/2005.08700 for the detail.
parser.add_argument(
"--maskctc-n-iterations",
type=int,
default=10,
help="Number of decoding iterations."
"For Mask CTC, set 0 to predict 1 mask/iter.",
)
parser.add_argument(
"--maskctc-probability-threshold",
type=float,
default=0.999,
help="Threshold probability for CTC output",
)
# quantize model related
parser.add_argument(
"--quantize-config",
nargs="*",
help="""Config for dynamic quantization provided as a list of modules,
separated by a comma. E.g.: --quantize-config=[Linear,LSTM,GRU].
Each specified module should be an attribute of 'torch.nn', e.g.:
torch.nn.Linear, torch.nn.LSTM, torch.nn.GRU, ...""",
)
parser.add_argument(
"--quantize-dtype",
type=str,
default="qint8",
choices=["float16", "qint8"],
help="Dtype for dynamic quantization.",
)
parser.add_argument(
"--quantize-asr-model",
type=bool,
default=False,
help="Apply dynamic quantization to ASR model.",
)
parser.add_argument(
"--quantize-lm-model",
type=bool,
default=False,
help="Apply dynamic quantization to LM.",
)
return parser
def main(args):
"""Run the main decoding function."""
parser = get_parser()
args = parser.parse_args(args)
if args.ngpu == 0 and args.dtype == "float16":
raise ValueError(f"--dtype {args.dtype} does not support the CPU backend.")
# logging info
if args.verbose == 1:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose == 2:
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check CUDA_VISIBLE_DEVICES
if args.ngpu > 0:
cvd = os.environ.get("CUDA_VISIBLE_DEVICES")
if cvd is None:
logging.warning("CUDA_VISIBLE_DEVICES is not set.")
elif args.ngpu != len(cvd.split(",")):
logging.error("#gpus is not matched with CUDA_VISIBLE_DEVICES.")
sys.exit(1)
# TODO(mn5k): support of multiple GPUs
if args.ngpu > 1:
logging.error("The program only supports ngpu=1.")
sys.exit(1)
# display PYTHONPATH
logging.info("python path = " + os.environ.get("PYTHONPATH", "(None)"))
# seed setting
random.seed(args.seed)
np.random.seed(args.seed)
logging.info("set random seed = %d" % args.seed)
# validate rnn options
if args.rnnlm is not None and args.word_rnnlm is not None:
logging.error(
"It seems that both --rnnlm and --word-rnnlm are specified. "
"Please use either option."
)
sys.exit(1)
# recog
logging.info("backend = " + args.backend)
if args.num_spkrs == 1:
if args.backend == "chainer":
from espnet.asr.chainer_backend.asr import recog
recog(args)
elif args.backend == "pytorch":
if args.num_encs == 1:
# Experimental API that supports custom LMs
if args.api == "v2":
from espnet.asr.pytorch_backend.recog import recog_v2
recog_v2(args)
else:
from espnet.asr.pytorch_backend.asr import recog
if args.dtype != "float32":
raise NotImplementedError(
f"`--dtype {args.dtype}` is only available with `--api v2`"
)
recog(args)
else:
if args.api == "v2":
raise NotImplementedError(
f"--num-encs {args.num_encs} > 1 is not supported in --api v2"
)
else:
from espnet.asr.pytorch_backend.asr import recog
recog(args)
else:
raise ValueError("Only chainer and pytorch are supported.")
elif args.num_spkrs == 2:
if args.backend == "pytorch":
from espnet.asr.pytorch_backend.asr_mix import recog
recog(args)
else:
raise ValueError("Only pytorch is supported.")
if __name__ == "__main__":
main(sys.argv[1:])
| 13,800 | 32.497573 | 88 | py |
espnet | espnet-master/espnet/bin/asr_train.py | #!/usr/bin/env python3
# encoding: utf-8
# Copyright 2017 Tomoki Hayashi (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Automatic speech recognition model training script."""
import logging
import os
import random
import subprocess
import sys
import configargparse
import numpy as np
from espnet import __version__
from espnet.utils.cli_utils import strtobool
from espnet.utils.training.batchfy import BATCH_COUNT_CHOICES
# NOTE: you need this func to generate our sphinx doc
def get_parser(parser=None, required=True):
"""Get default arguments."""
if parser is None:
parser = configargparse.ArgumentParser(
description="Train an automatic speech recognition (ASR) model on one CPU, "
"one or multiple GPUs",
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter,
)
# general configuration
parser.add("--config", is_config_file=True, help="config file path")
parser.add(
"--config2",
is_config_file=True,
help="second config file path that overwrites the settings in `--config`.",
)
parser.add(
"--config3",
is_config_file=True,
help="third config file path that overwrites the settings in "
"`--config` and `--config2`.",
)
parser.add_argument(
"--ngpu",
default=None,
type=int,
help="Number of GPUs. If not given, use all visible devices",
)
parser.add_argument(
"--use-ddp",
default=False,
action="store_true",
help="Enable process-based data parallel. "
"--ngpu's GPUs will be used. "
"If --ngpu is not given, this tries to identify "
"how many GPUs can be used. But, if it fails, "
"the application will abort. "
"And, currently, single node multi GPUs job is only supported.",
)
parser.add_argument(
"--train-dtype",
default="float32",
choices=["float16", "float32", "float64", "O0", "O1", "O2", "O3"],
help="Data type for training (only pytorch backend). "
"O0,O1,.. flags require apex. "
"See https://nvidia.github.io/apex/amp.html#opt-levels",
)
parser.add_argument(
"--backend",
default="chainer",
type=str,
choices=["chainer", "pytorch"],
help="Backend library",
)
parser.add_argument(
"--outdir", type=str, required=required, help="Output directory"
)
parser.add_argument("--debugmode", default=1, type=int, help="Debugmode")
parser.add_argument("--dict", required=required, help="Dictionary")
parser.add_argument("--seed", default=1, type=int, help="Random seed")
parser.add_argument("--debugdir", type=str, help="Output directory for debugging")
parser.add_argument(
"--resume",
"-r",
default="",
nargs="?",
help="Resume the training from snapshot",
)
parser.add_argument(
"--minibatches",
"-N",
type=int,
default="-1",
help="Process only N minibatches (for debug)",
)
parser.add_argument("--verbose", "-V", default=0, type=int, help="Verbose option")
parser.add_argument(
"--tensorboard-dir",
default=None,
type=str,
nargs="?",
help="Tensorboard log dir path",
)
parser.add_argument(
"--report-interval-iters",
default=100,
type=int,
help="Report interval iterations",
)
parser.add_argument(
"--save-interval-iters",
default=0,
type=int,
help="Save snapshot interval iterations",
)
# task related
parser.add_argument(
"--train-json",
type=str,
default=None,
help="Filename of train label data (json)",
)
parser.add_argument(
"--valid-json",
type=str,
default=None,
help="Filename of validation label data (json)",
)
# network architecture
parser.add_argument(
"--model-module",
type=str,
default=None,
help="model defined module (default: espnet.nets.xxx_backend.e2e_asr:E2E)",
)
# encoder
parser.add_argument(
"--num-encs", default=1, type=int, help="Number of encoders in the model."
)
# loss related
parser.add_argument(
"--ctc_type",
default="builtin",
type=str,
choices=["builtin", "gtnctc", "cudnnctc"],
help="Type of CTC implementation to calculate loss.",
)
parser.add_argument(
"--mtlalpha",
default=0.5,
type=float,
help="Multitask learning coefficient, "
"alpha: alpha*ctc_loss + (1-alpha)*att_loss ",
)
parser.add_argument(
"--lsm-weight", default=0.0, type=float, help="Label smoothing weight"
)
# recognition options to compute CER/WER
parser.add_argument(
"--report-cer",
default=False,
action="store_true",
help="Compute CER on development set",
)
parser.add_argument(
"--report-wer",
default=False,
action="store_true",
help="Compute WER on development set",
)
parser.add_argument("--nbest", type=int, default=1, help="Output N-best hypotheses")
parser.add_argument("--beam-size", type=int, default=4, help="Beam size")
parser.add_argument("--penalty", default=0.0, type=float, help="Incertion penalty")
parser.add_argument(
"--maxlenratio",
default=0.0,
type=float,
help="""Input length ratio to obtain max output length.
If maxlenratio=0.0 (default), it uses a end-detect function
to automatically find maximum hypothesis lengths""",
)
parser.add_argument(
"--minlenratio",
default=0.0,
type=float,
help="Input length ratio to obtain min output length",
)
parser.add_argument(
"--ctc-weight", default=0.3, type=float, help="CTC weight in joint decoding"
)
parser.add_argument(
"--rnnlm", type=str, default=None, help="RNNLM model file to read"
)
parser.add_argument(
"--rnnlm-conf", type=str, default=None, help="RNNLM model config file to read"
)
parser.add_argument("--lm-weight", default=0.1, type=float, help="RNNLM weight.")
parser.add_argument("--sym-space", default="<space>", type=str, help="Space symbol")
parser.add_argument("--sym-blank", default="<blank>", type=str, help="Blank symbol")
# minibatch related
parser.add_argument(
"--sortagrad",
default=0,
type=int,
nargs="?",
help="How many epochs to use sortagrad for. 0 = deactivated, -1 = all epochs",
)
parser.add_argument(
"--batch-count",
default="auto",
choices=BATCH_COUNT_CHOICES,
help="How to count batch_size. "
"The default (auto) will find how to count by args.",
)
parser.add_argument(
"--batch-size",
"--batch-seqs",
"-b",
default=0,
type=int,
help="Maximum seqs in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-bins",
default=0,
type=int,
help="Maximum bins in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-frames-in",
default=0,
type=int,
help="Maximum input frames in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-frames-out",
default=0,
type=int,
help="Maximum output frames in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-frames-inout",
default=0,
type=int,
help="Maximum input+output frames in a minibatch (0 to disable)",
)
parser.add_argument(
"--maxlen-in",
"--batch-seq-maxlen-in",
default=800,
type=int,
metavar="ML",
help="When --batch-count=seq, "
"batch size is reduced if the input sequence length > ML.",
)
parser.add_argument(
"--maxlen-out",
"--batch-seq-maxlen-out",
default=150,
type=int,
metavar="ML",
help="When --batch-count=seq, "
"batch size is reduced if the output sequence length > ML",
)
parser.add_argument(
"--n-iter-processes",
default=0,
type=int,
help="Number of processes of iterator",
)
parser.add_argument(
"--preprocess-conf",
type=str,
default=None,
nargs="?",
help="The configuration file for the pre-processing",
)
# optimization related
parser.add_argument(
"--opt",
default="adadelta",
type=str,
choices=["adadelta", "adam", "noam"],
help="Optimizer",
)
parser.add_argument(
"--accum-grad", default=1, type=int, help="Number of gradient accumuration"
)
parser.add_argument(
"--eps", default=1e-8, type=float, help="Epsilon constant for optimizer"
)
parser.add_argument(
"--eps-decay", default=0.01, type=float, help="Decaying ratio of epsilon"
)
parser.add_argument(
"--weight-decay", default=0.0, type=float, help="Weight decay ratio"
)
parser.add_argument(
"--criterion",
default="acc",
type=str,
choices=["loss", "loss_eps_decay_only", "acc"],
help="Criterion to perform epsilon decay",
)
parser.add_argument(
"--threshold", default=1e-4, type=float, help="Threshold to stop iteration"
)
parser.add_argument(
"--epochs", "-e", default=30, type=int, help="Maximum number of epochs"
)
parser.add_argument(
"--early-stop-criterion",
default="validation/main/acc",
type=str,
nargs="?",
help="Value to monitor to trigger an early stopping of the training",
)
parser.add_argument(
"--patience",
default=3,
type=int,
nargs="?",
help="Number of epochs to wait without improvement "
"before stopping the training",
)
parser.add_argument(
"--grad-clip", default=5, type=float, help="Gradient norm threshold to clip"
)
parser.add_argument(
"--num-save-attention",
default=3,
type=int,
help="Number of samples of attention to be saved",
)
parser.add_argument(
"--num-save-ctc",
default=3,
type=int,
help="Number of samples of CTC probability to be saved",
)
parser.add_argument(
"--grad-noise",
type=strtobool,
default=False,
help="The flag to switch to use noise injection to gradients during training",
)
# asr_mix related
parser.add_argument(
"--num-spkrs",
default=1,
type=int,
choices=[1, 2],
help="Number of speakers in the speech.",
)
# decoder related
parser.add_argument(
"--context-residual",
default=False,
type=strtobool,
nargs="?",
help="The flag to switch to use context vector residual in the decoder network",
)
# finetuning related
parser.add_argument(
"--enc-init",
default=None,
type=str,
help="Pre-trained ASR model to initialize encoder.",
)
parser.add_argument(
"--enc-init-mods",
default="enc.enc.",
type=lambda s: [str(mod) for mod in s.split(",") if s != ""],
help="List of encoder modules to initialize, separated by a comma.",
)
parser.add_argument(
"--dec-init",
default=None,
type=str,
help="Pre-trained ASR, MT or LM model to initialize decoder.",
)
parser.add_argument(
"--dec-init-mods",
default="att.,dec.",
type=lambda s: [str(mod) for mod in s.split(",") if s != ""],
help="List of decoder modules to initialize, separated by a comma.",
)
parser.add_argument(
"--freeze-mods",
default=None,
type=lambda s: [str(mod) for mod in s.split(",") if s != ""],
help="List of modules to freeze, separated by a comma.",
)
# front end related
parser.add_argument(
"--use-frontend",
type=strtobool,
default=False,
help="The flag to switch to use frontend system.",
)
# WPE related
parser.add_argument(
"--use-wpe",
type=strtobool,
default=False,
help="Apply Weighted Prediction Error",
)
parser.add_argument(
"--wtype",
default="blstmp",
type=str,
choices=[
"lstm",
"blstm",
"lstmp",
"blstmp",
"vgglstmp",
"vggblstmp",
"vgglstm",
"vggblstm",
"gru",
"bgru",
"grup",
"bgrup",
"vgggrup",
"vggbgrup",
"vgggru",
"vggbgru",
],
help="Type of encoder network architecture "
"of the mask estimator for WPE. "
"",
)
parser.add_argument("--wlayers", type=int, default=2, help="")
parser.add_argument("--wunits", type=int, default=300, help="")
parser.add_argument("--wprojs", type=int, default=300, help="")
parser.add_argument("--wdropout-rate", type=float, default=0.0, help="")
parser.add_argument("--wpe-taps", type=int, default=5, help="")
parser.add_argument("--wpe-delay", type=int, default=3, help="")
parser.add_argument(
"--use-dnn-mask-for-wpe",
type=strtobool,
default=False,
help="Use DNN to estimate the power spectrogram. "
"This option is experimental.",
)
# Beamformer related
parser.add_argument("--use-beamformer", type=strtobool, default=True, help="")
parser.add_argument(
"--btype",
default="blstmp",
type=str,
choices=[
"lstm",
"blstm",
"lstmp",
"blstmp",
"vgglstmp",
"vggblstmp",
"vgglstm",
"vggblstm",
"gru",
"bgru",
"grup",
"bgrup",
"vgggrup",
"vggbgrup",
"vgggru",
"vggbgru",
],
help="Type of encoder network architecture "
"of the mask estimator for Beamformer.",
)
parser.add_argument("--blayers", type=int, default=2, help="")
parser.add_argument("--bunits", type=int, default=300, help="")
parser.add_argument("--bprojs", type=int, default=300, help="")
parser.add_argument("--badim", type=int, default=320, help="")
parser.add_argument(
"--bnmask",
type=int,
default=2,
help="Number of beamforming masks, " "default is 2 for [speech, noise].",
)
parser.add_argument(
"--ref-channel",
type=int,
default=-1,
help="The reference channel used for beamformer. "
"By default, the channel is estimated by DNN.",
)
parser.add_argument("--bdropout-rate", type=float, default=0.0, help="")
# Feature transform: Normalization
parser.add_argument(
"--stats-file",
type=str,
default=None,
help="The stats file for the feature normalization",
)
parser.add_argument(
"--apply-uttmvn",
type=strtobool,
default=True,
help="Apply utterance level mean " "variance normalization.",
)
parser.add_argument("--uttmvn-norm-means", type=strtobool, default=True, help="")
parser.add_argument("--uttmvn-norm-vars", type=strtobool, default=False, help="")
# Feature transform: Fbank
parser.add_argument(
"--fbank-fs",
type=int,
default=16000,
help="The sample frequency used for " "the mel-fbank creation.",
)
parser.add_argument(
"--n-mels", type=int, default=80, help="The number of mel-frequency bins."
)
parser.add_argument("--fbank-fmin", type=float, default=0.0, help="")
parser.add_argument("--fbank-fmax", type=float, default=None, help="")
return parser
def setup_logging(verbose):
"""Make logging setup with a given log level."""
if verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
def main(cmd_args):
"""Run the main training function."""
parser = get_parser()
args, _ = parser.parse_known_args(cmd_args)
if args.backend == "chainer" and args.train_dtype != "float32":
raise NotImplementedError(
f"chainer backend does not support --train-dtype {args.train_dtype}."
"Use --dtype float32."
)
if args.ngpu == 0 and args.train_dtype in ("O0", "O1", "O2", "O3", "float16"):
raise ValueError(
f"--train-dtype {args.train_dtype} does not support the CPU backend."
)
from espnet.utils.dynamic_import import dynamic_import
if args.model_module is None:
if args.num_spkrs == 1:
model_module = "espnet.nets." + args.backend + "_backend.e2e_asr:E2E"
else:
model_module = "espnet.nets." + args.backend + "_backend.e2e_asr_mix:E2E"
else:
model_module = args.model_module
model_class = dynamic_import(model_module)
model_class.add_arguments(parser)
args = parser.parse_args(cmd_args)
args.model_module = model_module
if "chainer_backend" in args.model_module:
args.backend = "chainer"
if "pytorch_backend" in args.model_module:
args.backend = "pytorch"
# add version info in args
args.version = __version__
# logging info
setup_logging(args.verbose)
# If --ngpu is not given,
# 1. if CUDA_VISIBLE_DEVICES is set, all visible devices
# 2. if nvidia-smi exists, use all devices
# 3. else ngpu=0
if args.ngpu is None:
cvd = os.environ.get("CUDA_VISIBLE_DEVICES")
if cvd is not None:
ngpu = len(cvd.split(","))
else:
logging.warning("CUDA_VISIBLE_DEVICES is not set.")
try:
p = subprocess.run(
["nvidia-smi", "-L"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
except (subprocess.CalledProcessError, FileNotFoundError):
ngpu = 0
else:
ngpu = len(p.stderr.decode().split("\n")) - 1
else:
if args.ngpu != 1:
logging.debug(
"There are some bugs with multi-GPU processing in PyTorch 1.2+"
+ " (see https://github.com/pytorch/pytorch/issues/21108)"
)
ngpu = args.ngpu
if args.use_ddp and ngpu <= 0:
raise ValueError("DDP requires at least 1 GPU.")
logging.info(f"ngpu: {ngpu}")
# display PYTHONPATH
logging.info("python path = " + os.environ.get("PYTHONPATH", "(None)"))
# set random seed
logging.info("random seed = %d" % args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
# load dictionary for debug log
if args.dict is not None:
with open(args.dict, "rb") as f:
dictionary = f.readlines()
char_list = [entry.decode("utf-8").split(" ")[0] for entry in dictionary]
char_list.insert(0, "<blank>")
char_list.append("<eos>")
# for non-autoregressive maskctc model
if "maskctc" in args.model_module:
char_list.append("<mask>")
args.char_list = char_list
else:
args.char_list = None
# train
logging.info("backend = " + args.backend)
if args.use_ddp:
# When using DDP, only PyTorch is supported.
# Chainer is out-of-scope.
if args.num_spkrs == 1:
if args.backend == "chainer":
raise ValueError("Chainer with DDP is not supported.")
from espnet.distributed.pytorch_backend.launch import (
launch,
set_start_method,
)
# NOTE: it's necessary to set "spawn" as a multiprocessing
# start method. Because, in this use case, CUDA initialization
# procedure has been already done, but CUDA context can't be
# shared with processes.
# By default, multiprocessing tries to launch a process with
# "fork" method. But, it will make processes which share
# memory address spaces with a parent process.
# To ensure a separate memory space, "spawn" method is required.
set_start_method("spawn")
launch(_reinitialize_logging_and_call_train, args, args.ngpu)
else:
raise ValueError("Single speaker is only supported when using DDP.")
else:
if args.num_spkrs == 1:
if args.backend == "chainer":
from espnet.asr.chainer_backend.asr import train
train(args)
elif args.backend == "pytorch":
from espnet.asr.pytorch_backend.asr import train
train(args)
else:
raise ValueError("Only chainer and pytorch are supported.")
else:
# FIXME(kamo): Support --model-module
if args.backend == "pytorch":
from espnet.asr.pytorch_backend.asr_mix import train
train(args)
else:
raise ValueError("Only pytorch is supported.")
def _reinitialize_logging_and_call_train(args):
# NOTE: it looks like logging setting is cleared
# by launching processes with "spawn" method.
# Within each worker process,
# logging configuraiton must be set again.
from espnet.asr.pytorch_backend.asr import train
setup_logging(args.verbose)
train(args)
if __name__ == "__main__":
main(sys.argv[1:])
| 22,288 | 31.163059 | 88 | py |
espnet | espnet-master/espnet/bin/lm_train.py | #!/usr/bin/env python3
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
# This code is ported from the following implementation written in Torch.
# https://github.com/chainer/chainer/blob/master/examples/ptb/train_ptb_custom_loop.py
"""Language model training script."""
import logging
import os
import random
import subprocess
import sys
import configargparse
import numpy as np
from espnet import __version__
from espnet.nets.lm_interface import dynamic_import_lm
from espnet.optimizer.factory import dynamic_import_optimizer
from espnet.scheduler.scheduler import dynamic_import_scheduler
# NOTE: you need this func to generate our sphinx doc
def get_parser(parser=None, required=True):
"""Get parser."""
if parser is None:
parser = configargparse.ArgumentParser(
description="Train a new language model on one CPU or one GPU",
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter,
)
# general configuration
parser.add("--config", is_config_file=True, help="config file path")
parser.add(
"--config2",
is_config_file=True,
help="second config file path that overwrites the settings in `--config`.",
)
parser.add(
"--config3",
is_config_file=True,
help="third config file path that overwrites the settings "
"in `--config` and `--config2`.",
)
parser.add_argument(
"--ngpu",
default=None,
type=int,
help="Number of GPUs. If not given, use all visible devices",
)
parser.add_argument(
"--train-dtype",
default="float32",
choices=["float16", "float32", "float64", "O0", "O1", "O2", "O3"],
help="Data type for training (only pytorch backend). "
"O0,O1,.. flags require apex. "
"See https://nvidia.github.io/apex/amp.html#opt-levels",
)
parser.add_argument(
"--backend",
default="chainer",
type=str,
choices=["chainer", "pytorch"],
help="Backend library",
)
parser.add_argument(
"--outdir", type=str, required=required, help="Output directory"
)
parser.add_argument("--debugmode", default=1, type=int, help="Debugmode")
parser.add_argument("--dict", type=str, required=required, help="Dictionary")
parser.add_argument("--seed", default=1, type=int, help="Random seed")
parser.add_argument(
"--resume",
"-r",
default="",
nargs="?",
help="Resume the training from snapshot",
)
parser.add_argument("--verbose", "-V", default=0, type=int, help="Verbose option")
parser.add_argument(
"--tensorboard-dir",
default=None,
type=str,
nargs="?",
help="Tensorboard log dir path",
)
parser.add_argument(
"--report-interval-iters",
default=100,
type=int,
help="Report interval iterations",
)
# task related
parser.add_argument(
"--train-label",
type=str,
required=required,
help="Filename of train label data",
)
parser.add_argument(
"--valid-label",
type=str,
required=required,
help="Filename of validation label data",
)
parser.add_argument("--test-label", type=str, help="Filename of test label data")
parser.add_argument(
"--dump-hdf5-path",
type=str,
default=None,
help="Path to dump a preprocessed dataset as hdf5",
)
# training configuration
parser.add_argument("--opt", default="sgd", type=str, help="Optimizer")
parser.add_argument(
"--sortagrad",
default=0,
type=int,
nargs="?",
help="How many epochs to use sortagrad for. 0 = deactivated, -1 = all epochs",
)
parser.add_argument(
"--batchsize",
"-b",
type=int,
default=300,
help="Number of examples in each mini-batch",
)
parser.add_argument(
"--accum-grad", type=int, default=1, help="Number of gradient accumueration"
)
parser.add_argument(
"--epoch",
"-e",
type=int,
default=20,
help="Number of sweeps over the dataset to train",
)
parser.add_argument(
"--early-stop-criterion",
default="validation/main/loss",
type=str,
nargs="?",
help="Value to monitor to trigger an early stopping of the training",
)
parser.add_argument(
"--patience",
default=3,
type=int,
nargs="?",
help="Number of epochs "
"to wait without improvement before stopping the training",
)
parser.add_argument(
"--schedulers",
default=None,
action="append",
type=lambda kv: kv.split("="),
help="optimizer schedulers, you can configure params like:"
" <optimizer-param>-<scheduler-name>-<schduler-param>"
' e.g., "--schedulers lr=noam --lr-noam-warmup 1000".',
)
parser.add_argument(
"--gradclip",
"-c",
type=float,
default=5,
help="Gradient norm threshold to clip",
)
parser.add_argument(
"--maxlen",
type=int,
default=40,
help="Batch size is reduced if the input sequence > ML",
)
parser.add_argument(
"--model-module",
type=str,
default="default",
help="model defined module "
"(default: espnet.nets.xxx_backend.lm.default:DefaultRNNLM)",
)
return parser
def main(cmd_args):
"""Train LM."""
parser = get_parser()
args, _ = parser.parse_known_args(cmd_args)
if args.backend == "chainer" and args.train_dtype != "float32":
raise NotImplementedError(
f"chainer backend does not support --train-dtype {args.train_dtype}."
"Use --dtype float32."
)
if args.ngpu == 0 and args.train_dtype in ("O0", "O1", "O2", "O3", "float16"):
raise ValueError(
f"--train-dtype {args.train_dtype} does not support the CPU backend."
)
# parse arguments dynamically
model_class = dynamic_import_lm(args.model_module, args.backend)
model_class.add_arguments(parser)
if args.schedulers is not None:
for k, v in args.schedulers:
scheduler_class = dynamic_import_scheduler(v)
scheduler_class.add_arguments(k, parser)
opt_class = dynamic_import_optimizer(args.opt, args.backend)
opt_class.add_arguments(parser)
args = parser.parse_args(cmd_args)
# add version info in args
args.version = __version__
# logging info
if args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# If --ngpu is not given,
# 1. if CUDA_VISIBLE_DEVICES is set, all visible devices
# 2. if nvidia-smi exists, use all devices
# 3. else ngpu=0
if args.ngpu is None:
cvd = os.environ.get("CUDA_VISIBLE_DEVICES")
if cvd is not None:
ngpu = len(cvd.split(","))
else:
logging.warning("CUDA_VISIBLE_DEVICES is not set.")
try:
p = subprocess.run(
["nvidia-smi", "-L"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
except (subprocess.CalledProcessError, FileNotFoundError):
ngpu = 0
else:
ngpu = len(p.stderr.decode().split("\n")) - 1
args.ngpu = ngpu
else:
ngpu = args.ngpu
logging.info(f"ngpu: {ngpu}")
# display PYTHONPATH
logging.info("python path = " + os.environ.get("PYTHONPATH", "(None)"))
# seed setting
nseed = args.seed
random.seed(nseed)
np.random.seed(nseed)
# load dictionary
with open(args.dict, "rb") as f:
dictionary = f.readlines()
char_list = [entry.decode("utf-8").split(" ")[0] for entry in dictionary]
char_list.insert(0, "<blank>")
char_list.append("<eos>")
args.char_list_dict = {x: i for i, x in enumerate(char_list)}
args.n_vocab = len(char_list)
# train
logging.info("backend = " + args.backend)
if args.backend == "chainer":
from espnet.lm.chainer_backend.lm import train
train(args)
elif args.backend == "pytorch":
from espnet.lm.pytorch_backend.lm import train
train(args)
else:
raise ValueError("Only chainer and pytorch are supported.")
if __name__ == "__main__":
main(sys.argv[1:])
| 8,939 | 29.934256 | 88 | py |
espnet | espnet-master/espnet/bin/tts_train.py | #!/usr/bin/env python3
# Copyright 2018 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Text-to-speech model training script."""
import logging
import os
import random
import subprocess
import sys
import configargparse
import numpy as np
from espnet import __version__
from espnet.nets.tts_interface import TTSInterface
from espnet.utils.cli_utils import strtobool
from espnet.utils.training.batchfy import BATCH_COUNT_CHOICES
# NOTE: you need this func to generate our sphinx doc
def get_parser():
"""Get parser of training arguments."""
parser = configargparse.ArgumentParser(
description="Train a new text-to-speech (TTS) model on one CPU, "
"one or multiple GPUs",
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter,
)
# general configuration
parser.add("--config", is_config_file=True, help="config file path")
parser.add(
"--config2",
is_config_file=True,
help="second config file path that overwrites the settings in `--config`.",
)
parser.add(
"--config3",
is_config_file=True,
help="third config file path that overwrites "
"the settings in `--config` and `--config2`.",
)
parser.add_argument(
"--ngpu",
default=None,
type=int,
help="Number of GPUs. If not given, use all visible devices",
)
parser.add_argument(
"--backend",
default="pytorch",
type=str,
choices=["chainer", "pytorch"],
help="Backend library",
)
parser.add_argument("--outdir", type=str, required=True, help="Output directory")
parser.add_argument("--debugmode", default=1, type=int, help="Debugmode")
parser.add_argument("--seed", default=1, type=int, help="Random seed")
parser.add_argument(
"--resume",
"-r",
default="",
type=str,
nargs="?",
help="Resume the training from snapshot",
)
parser.add_argument(
"--minibatches",
"-N",
type=int,
default="-1",
help="Process only N minibatches (for debug)",
)
parser.add_argument("--verbose", "-V", default=0, type=int, help="Verbose option")
parser.add_argument(
"--tensorboard-dir",
default=None,
type=str,
nargs="?",
help="Tensorboard log directory path",
)
parser.add_argument(
"--eval-interval-epochs", default=1, type=int, help="Evaluation interval epochs"
)
parser.add_argument(
"--save-interval-epochs", default=1, type=int, help="Save interval epochs"
)
parser.add_argument(
"--report-interval-iters",
default=100,
type=int,
help="Report interval iterations",
)
# task related
parser.add_argument(
"--train-json", type=str, required=True, help="Filename of training json"
)
parser.add_argument(
"--valid-json", type=str, required=True, help="Filename of validation json"
)
# network architecture
parser.add_argument(
"--model-module",
type=str,
default="espnet.nets.pytorch_backend.e2e_tts_tacotron2:Tacotron2",
help="model defined module",
)
# minibatch related
parser.add_argument(
"--sortagrad",
default=0,
type=int,
nargs="?",
help="How many epochs to use sortagrad for. 0 = deactivated, -1 = all epochs",
)
parser.add_argument(
"--batch-sort-key",
default="shuffle",
type=str,
choices=["shuffle", "output", "input"],
nargs="?",
help='Batch sorting key. "shuffle" only work with --batch-count "seq".',
)
parser.add_argument(
"--batch-count",
default="auto",
choices=BATCH_COUNT_CHOICES,
help="How to count batch_size. "
"The default (auto) will find how to count by args.",
)
parser.add_argument(
"--batch-size",
"--batch-seqs",
"-b",
default=0,
type=int,
help="Maximum seqs in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-bins",
default=0,
type=int,
help="Maximum bins in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-frames-in",
default=0,
type=int,
help="Maximum input frames in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-frames-out",
default=0,
type=int,
help="Maximum output frames in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-frames-inout",
default=0,
type=int,
help="Maximum input+output frames in a minibatch (0 to disable)",
)
parser.add_argument(
"--maxlen-in",
"--batch-seq-maxlen-in",
default=100,
type=int,
metavar="ML",
help="When --batch-count=seq, "
"batch size is reduced if the input sequence length > ML.",
)
parser.add_argument(
"--maxlen-out",
"--batch-seq-maxlen-out",
default=200,
type=int,
metavar="ML",
help="When --batch-count=seq, "
"batch size is reduced if the output sequence length > ML",
)
parser.add_argument(
"--num-iter-processes",
default=0,
type=int,
help="Number of processes of iterator",
)
parser.add_argument(
"--preprocess-conf",
type=str,
default=None,
help="The configuration file for the pre-processing",
)
parser.add_argument(
"--use-speaker-embedding",
default=False,
type=strtobool,
help="Whether to use speaker embedding",
)
parser.add_argument(
"--use-second-target",
default=False,
type=strtobool,
help="Whether to use second target",
)
# optimization related
parser.add_argument(
"--opt", default="adam", type=str, choices=["adam", "noam"], help="Optimizer"
)
parser.add_argument(
"--accum-grad", default=1, type=int, help="Number of gradient accumuration"
)
parser.add_argument(
"--lr", default=1e-3, type=float, help="Learning rate for optimizer"
)
parser.add_argument("--eps", default=1e-6, type=float, help="Epsilon for optimizer")
parser.add_argument(
"--weight-decay",
default=1e-6,
type=float,
help="Weight decay coefficient for optimizer",
)
parser.add_argument(
"--epochs", "-e", default=30, type=int, help="Number of maximum epochs"
)
parser.add_argument(
"--early-stop-criterion",
default="validation/main/loss",
type=str,
nargs="?",
help="Value to monitor to trigger an early stopping of the training",
)
parser.add_argument(
"--patience",
default=3,
type=int,
nargs="?",
help="Number of epochs to wait "
"without improvement before stopping the training",
)
parser.add_argument(
"--grad-clip", default=1, type=float, help="Gradient norm threshold to clip"
)
parser.add_argument(
"--num-save-attention",
default=5,
type=int,
help="Number of samples of attention to be saved",
)
parser.add_argument(
"--keep-all-data-on-mem",
default=False,
type=strtobool,
help="Whether to keep all data on memory",
)
# finetuning related
parser.add_argument(
"--enc-init",
default=None,
type=str,
help="Pre-trained TTS model path to initialize encoder.",
)
parser.add_argument(
"--enc-init-mods",
default="enc.",
type=lambda s: [str(mod) for mod in s.split(",") if s != ""],
help="List of encoder modules to initialize, separated by a comma.",
)
parser.add_argument(
"--dec-init",
default=None,
type=str,
help="Pre-trained TTS model path to initialize decoder.",
)
parser.add_argument(
"--dec-init-mods",
default="dec.",
type=lambda s: [str(mod) for mod in s.split(",") if s != ""],
help="List of decoder modules to initialize, separated by a comma.",
)
parser.add_argument(
"--freeze-mods",
default=None,
type=lambda s: [str(mod) for mod in s.split(",") if s != ""],
help="List of modules to freeze (not to train), separated by a comma.",
)
return parser
def main(cmd_args):
"""Run training."""
parser = get_parser()
args, _ = parser.parse_known_args(cmd_args)
from espnet.utils.dynamic_import import dynamic_import
model_class = dynamic_import(args.model_module)
assert issubclass(model_class, TTSInterface)
model_class.add_arguments(parser)
args = parser.parse_args(cmd_args)
# add version info in args
args.version = __version__
# logging info
if args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# If --ngpu is not given,
# 1. if CUDA_VISIBLE_DEVICES is set, all visible devices
# 2. if nvidia-smi exists, use all devices
# 3. else ngpu=0
if args.ngpu is None:
cvd = os.environ.get("CUDA_VISIBLE_DEVICES")
if cvd is not None:
ngpu = len(cvd.split(","))
else:
logging.warning("CUDA_VISIBLE_DEVICES is not set.")
try:
p = subprocess.run(
["nvidia-smi", "-L"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
except (subprocess.CalledProcessError, FileNotFoundError):
ngpu = 0
else:
ngpu = len(p.stderr.decode().split("\n")) - 1
args.ngpu = ngpu
else:
ngpu = args.ngpu
logging.info(f"ngpu: {ngpu}")
# set random seed
logging.info("random seed = %d" % args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
if args.backend == "pytorch":
from espnet.tts.pytorch_backend.tts import train
train(args)
else:
raise NotImplementedError("Only pytorch is supported.")
if __name__ == "__main__":
main(sys.argv[1:])
| 10,680 | 28.669444 | 88 | py |
espnet | espnet-master/espnet/bin/asr_enhance.py | #!/usr/bin/env python3
import logging
import os
import random
import sys
from distutils.util import strtobool
import configargparse
import numpy as np
from espnet.asr.pytorch_backend.asr import enhance
# NOTE: you need this func to generate our sphinx doc
def get_parser():
parser = configargparse.ArgumentParser(
description="Enhance noisy speech for speech recognition",
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter,
)
# general configuration
parser.add("--config", is_config_file=True, help="config file path")
parser.add(
"--config2",
is_config_file=True,
help="second config file path that overwrites the settings in `--config`.",
)
parser.add(
"--config3",
is_config_file=True,
help="third config file path that overwrites the settings "
"in `--config` and `--config2`.",
)
parser.add_argument("--ngpu", default=0, type=int, help="Number of GPUs")
parser.add_argument(
"--backend",
default="chainer",
type=str,
choices=["chainer", "pytorch"],
help="Backend library",
)
parser.add_argument("--debugmode", default=1, type=int, help="Debugmode")
parser.add_argument("--seed", default=1, type=int, help="Random seed")
parser.add_argument("--verbose", "-V", default=1, type=int, help="Verbose option")
parser.add_argument(
"--batchsize",
default=1,
type=int,
help="Batch size for beam search (0: means no batch processing)",
)
parser.add_argument(
"--preprocess-conf",
type=str,
default=None,
help="The configuration file for the pre-processing",
)
# task related
parser.add_argument(
"--recog-json", type=str, help="Filename of recognition data (json)"
)
# model (parameter) related
parser.add_argument(
"--model", type=str, required=True, help="Model file parameters to read"
)
parser.add_argument(
"--model-conf", type=str, default=None, help="Model config file"
)
# Outputs configuration
parser.add_argument(
"--enh-wspecifier",
type=str,
default=None,
help="Specify the output way for enhanced speech."
"e.g. ark,scp:outdir,wav.scp",
)
parser.add_argument(
"--enh-filetype",
type=str,
default="sound",
choices=["mat", "hdf5", "sound.hdf5", "sound"],
help="Specify the file format for enhanced speech. "
'"mat" is the matrix format in kaldi',
)
parser.add_argument("--fs", type=int, default=16000, help="The sample frequency")
parser.add_argument(
"--keep-length",
type=strtobool,
default=True,
help="Adjust the output length to match " "with the input for enhanced speech",
)
parser.add_argument(
"--image-dir", type=str, default=None, help="The directory saving the images."
)
parser.add_argument(
"--num-images",
type=int,
default=20,
help="The number of images files to be saved. "
"If negative, all samples are to be saved.",
)
# IStft
parser.add_argument(
"--apply-istft",
type=strtobool,
default=True,
help="Apply istft to the output from the network",
)
parser.add_argument(
"--istft-win-length",
type=int,
default=512,
help="The window length for istft. "
"This option is ignored "
"if stft is found in the preprocess-conf",
)
parser.add_argument(
"--istft-n-shift",
type=str,
default=256,
help="The window type for istft. "
"This option is ignored "
"if stft is found in the preprocess-conf",
)
parser.add_argument(
"--istft-window",
type=str,
default="hann",
help="The window type for istft. "
"This option is ignored "
"if stft is found in the preprocess-conf",
)
return parser
def main(args):
parser = get_parser()
args = parser.parse_args(args)
# logging info
if args.verbose == 1:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose == 2:
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check CUDA_VISIBLE_DEVICES
if args.ngpu > 0:
cvd = os.environ.get("CUDA_VISIBLE_DEVICES")
if cvd is None:
logging.warning("CUDA_VISIBLE_DEVICES is not set.")
elif args.ngpu != len(cvd.split(",")):
logging.error("#gpus is not matched with CUDA_VISIBLE_DEVICES.")
sys.exit(1)
# TODO(kamo): support of multiple GPUs
if args.ngpu > 1:
logging.error("The program only supports ngpu=1.")
sys.exit(1)
# display PYTHONPATH
logging.info("python path = " + os.environ.get("PYTHONPATH", "(None)"))
# seed setting
random.seed(args.seed)
np.random.seed(args.seed)
logging.info("set random seed = %d" % args.seed)
# recog
logging.info("backend = " + args.backend)
if args.backend == "pytorch":
enhance(args)
else:
raise ValueError("Only pytorch is supported.")
if __name__ == "__main__":
main(sys.argv[1:])
| 5,778 | 29.098958 | 87 | py |
espnet | espnet-master/espnet/bin/vc_train.py | #!/usr/bin/env python3
# Copyright 2020 Nagoya University (Wen-Chin Huang)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Voice conversion model training script."""
import logging
import os
import random
import subprocess
import sys
import configargparse
import numpy as np
from espnet import __version__
from espnet.nets.tts_interface import TTSInterface
from espnet.utils.cli_utils import strtobool
from espnet.utils.training.batchfy import BATCH_COUNT_CHOICES
# NOTE: you need this func to generate our sphinx doc
def get_parser():
"""Get parser of training arguments."""
parser = configargparse.ArgumentParser(
description="Train a new voice conversion (VC) model on one CPU, "
"one or multiple GPUs",
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter,
)
# general configuration
parser.add("--config", is_config_file=True, help="config file path")
parser.add(
"--config2",
is_config_file=True,
help="second config file path that overwrites the settings in `--config`.",
)
parser.add(
"--config3",
is_config_file=True,
help="third config file path that overwrites the settings "
"in `--config` and `--config2`.",
)
parser.add_argument(
"--ngpu",
default=None,
type=int,
help="Number of GPUs. If not given, use all visible devices",
)
parser.add_argument(
"--backend",
default="pytorch",
type=str,
choices=["chainer", "pytorch"],
help="Backend library",
)
parser.add_argument("--outdir", type=str, required=True, help="Output directory")
parser.add_argument("--debugmode", default=1, type=int, help="Debugmode")
parser.add_argument("--seed", default=1, type=int, help="Random seed")
parser.add_argument(
"--resume",
"-r",
default="",
type=str,
nargs="?",
help="Resume the training from snapshot",
)
parser.add_argument(
"--minibatches",
"-N",
type=int,
default="-1",
help="Process only N minibatches (for debug)",
)
parser.add_argument("--verbose", "-V", default=0, type=int, help="Verbose option")
parser.add_argument(
"--tensorboard-dir",
default=None,
type=str,
nargs="?",
help="Tensorboard log directory path",
)
parser.add_argument(
"--eval-interval-epochs",
default=100,
type=int,
help="Evaluation interval epochs",
)
parser.add_argument(
"--save-interval-epochs", default=1, type=int, help="Save interval epochs"
)
parser.add_argument(
"--report-interval-iters",
default=10,
type=int,
help="Report interval iterations",
)
# task related
parser.add_argument("--srcspk", type=str, help="Source speaker")
parser.add_argument("--trgspk", type=str, help="Target speaker")
parser.add_argument(
"--train-json", type=str, required=True, help="Filename of training json"
)
parser.add_argument(
"--valid-json", type=str, required=True, help="Filename of validation json"
)
# network architecture
parser.add_argument(
"--model-module",
type=str,
default="espnet.nets.pytorch_backend.e2e_tts_tacotron2:Tacotron2",
help="model defined module",
)
# minibatch related
parser.add_argument(
"--sortagrad",
default=0,
type=int,
nargs="?",
help="How many epochs to use sortagrad for. 0 = deactivated, -1 = all epochs",
)
parser.add_argument(
"--batch-sort-key",
default="shuffle",
type=str,
choices=["shuffle", "output", "input"],
nargs="?",
help='Batch sorting key. "shuffle" only work with --batch-count "seq".',
)
parser.add_argument(
"--batch-count",
default="auto",
choices=BATCH_COUNT_CHOICES,
help="How to count batch_size. "
"The default (auto) will find how to count by args.",
)
parser.add_argument(
"--batch-size",
"--batch-seqs",
"-b",
default=0,
type=int,
help="Maximum seqs in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-bins",
default=0,
type=int,
help="Maximum bins in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-frames-in",
default=0,
type=int,
help="Maximum input frames in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-frames-out",
default=0,
type=int,
help="Maximum output frames in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-frames-inout",
default=0,
type=int,
help="Maximum input+output frames in a minibatch (0 to disable)",
)
parser.add_argument(
"--maxlen-in",
"--batch-seq-maxlen-in",
default=100,
type=int,
metavar="ML",
help="When --batch-count=seq, "
"batch size is reduced if the input sequence length > ML.",
)
parser.add_argument(
"--maxlen-out",
"--batch-seq-maxlen-out",
default=200,
type=int,
metavar="ML",
help="When --batch-count=seq, "
"batch size is reduced if the output sequence length > ML",
)
parser.add_argument(
"--num-iter-processes",
default=0,
type=int,
help="Number of processes of iterator",
)
parser.add_argument(
"--preprocess-conf",
type=str,
default=None,
help="The configuration file for the pre-processing",
)
parser.add_argument(
"--use-speaker-embedding",
default=False,
type=strtobool,
help="Whether to use speaker embedding",
)
parser.add_argument(
"--use-second-target",
default=False,
type=strtobool,
help="Whether to use second target",
)
# optimization related
parser.add_argument(
"--opt",
default="adam",
type=str,
choices=["adam", "noam", "lamb"],
help="Optimizer",
)
parser.add_argument(
"--accum-grad", default=1, type=int, help="Number of gradient accumuration"
)
parser.add_argument(
"--lr", default=1e-3, type=float, help="Learning rate for optimizer"
)
parser.add_argument("--eps", default=1e-6, type=float, help="Epsilon for optimizer")
parser.add_argument(
"--weight-decay",
default=1e-6,
type=float,
help="Weight decay coefficient for optimizer",
)
parser.add_argument(
"--epochs", "-e", default=30, type=int, help="Number of maximum epochs"
)
parser.add_argument(
"--early-stop-criterion",
default="validation/main/loss",
type=str,
nargs="?",
help="Value to monitor to trigger an early stopping of the training",
)
parser.add_argument(
"--patience",
default=3,
type=int,
nargs="?",
help="Number of epochs to wait without improvement "
"before stopping the training",
)
parser.add_argument(
"--grad-clip", default=1, type=float, help="Gradient norm threshold to clip"
)
parser.add_argument(
"--num-save-attention",
default=5,
type=int,
help="Number of samples of attention to be saved",
)
parser.add_argument(
"--keep-all-data-on-mem",
default=False,
type=strtobool,
help="Whether to keep all data on memory",
)
parser.add_argument(
"--enc-init",
default=None,
type=str,
help="Pre-trained model path to initialize encoder.",
)
parser.add_argument(
"--enc-init-mods",
default="enc.",
type=lambda s: [str(mod) for mod in s.split(",") if s != ""],
help="List of encoder modules to initialize, separated by a comma.",
)
parser.add_argument(
"--dec-init",
default=None,
type=str,
help="Pre-trained model path to initialize decoder.",
)
parser.add_argument(
"--dec-init-mods",
default="dec.",
type=lambda s: [str(mod) for mod in s.split(",") if s != ""],
help="List of decoder modules to initialize, separated by a comma.",
)
parser.add_argument(
"--freeze-mods",
default=None,
type=lambda s: [str(mod) for mod in s.split(",") if s != ""],
help="List of modules to freeze (not to train), separated by a comma.",
)
return parser
def main(cmd_args):
"""Run training."""
parser = get_parser()
args, _ = parser.parse_known_args(cmd_args)
from espnet.utils.dynamic_import import dynamic_import
model_class = dynamic_import(args.model_module)
assert issubclass(model_class, TTSInterface)
model_class.add_arguments(parser)
args = parser.parse_args(cmd_args)
# add version info in args
args.version = __version__
# logging info
if args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# If --ngpu is not given,
# 1. if CUDA_VISIBLE_DEVICES is set, all visible devices
# 2. if nvidia-smi exists, use all devices
# 3. else ngpu=0
if args.ngpu is None:
cvd = os.environ.get("CUDA_VISIBLE_DEVICES")
if cvd is not None:
ngpu = len(cvd.split(","))
else:
logging.warning("CUDA_VISIBLE_DEVICES is not set.")
try:
p = subprocess.run(
["nvidia-smi", "-L"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
except (subprocess.CalledProcessError, FileNotFoundError):
ngpu = 0
else:
ngpu = len(p.stderr.decode().split("\n")) - 1
else:
ngpu = args.ngpu
logging.info(f"ngpu: {ngpu}")
# set random seed
logging.info("random seed = %d" % args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
if args.backend == "pytorch":
from espnet.vc.pytorch_backend.vc import train
train(args)
else:
raise NotImplementedError("Only pytorch is supported.")
if __name__ == "__main__":
main(sys.argv[1:])
| 10,830 | 28.352304 | 88 | py |
espnet | espnet-master/espnet/bin/tts_decode.py | #!/usr/bin/env python3
# Copyright 2018 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""TTS decoding script."""
import logging
import os
import subprocess
import sys
import configargparse
from espnet.utils.cli_utils import strtobool
# NOTE: you need this func to generate our sphinx doc
def get_parser():
"""Get parser of decoding arguments."""
parser = configargparse.ArgumentParser(
description="Synthesize speech from text using a TTS model on one CPU",
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter,
)
# general configuration
parser.add("--config", is_config_file=True, help="config file path")
parser.add(
"--config2",
is_config_file=True,
help="second config file path that overwrites the settings in `--config`.",
)
parser.add(
"--config3",
is_config_file=True,
help="third config file path that overwrites "
"the settings in `--config` and `--config2`.",
)
parser.add_argument("--ngpu", default=0, type=int, help="Number of GPUs")
parser.add_argument(
"--backend",
default="pytorch",
type=str,
choices=["chainer", "pytorch"],
help="Backend library",
)
parser.add_argument("--debugmode", default=1, type=int, help="Debugmode")
parser.add_argument("--seed", default=1, type=int, help="Random seed")
parser.add_argument("--out", type=str, required=True, help="Output filename")
parser.add_argument("--verbose", "-V", default=0, type=int, help="Verbose option")
parser.add_argument(
"--preprocess-conf",
type=str,
default=None,
help="The configuration file for the pre-processing",
)
# task related
parser.add_argument(
"--json", type=str, required=True, help="Filename of train label data (json)"
)
parser.add_argument(
"--model", type=str, required=True, help="Model file parameters to read"
)
parser.add_argument(
"--model-conf", type=str, default=None, help="Model config file"
)
# decoding related
parser.add_argument(
"--maxlenratio", type=float, default=5, help="Maximum length ratio in decoding"
)
parser.add_argument(
"--minlenratio", type=float, default=0, help="Minimum length ratio in decoding"
)
parser.add_argument(
"--threshold", type=float, default=0.5, help="Threshold value in decoding"
)
parser.add_argument(
"--use-att-constraint",
type=strtobool,
default=False,
help="Whether to use the attention constraint",
)
parser.add_argument(
"--backward-window",
type=int,
default=1,
help="Backward window size in the attention constraint",
)
parser.add_argument(
"--forward-window",
type=int,
default=3,
help="Forward window size in the attention constraint",
)
parser.add_argument(
"--fastspeech-alpha",
type=float,
default=1.0,
help="Alpha to change the speed for FastSpeech",
)
# save related
parser.add_argument(
"--save-durations",
default=False,
type=strtobool,
help="Whether to save durations converted from attentions",
)
parser.add_argument(
"--save-focus-rates",
default=False,
type=strtobool,
help="Whether to save focus rates of attentions",
)
return parser
def main(args):
"""Run deocding."""
parser = get_parser()
args = parser.parse_args(args)
# logging info
if args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check CUDA_VISIBLE_DEVICES
if args.ngpu > 0:
if "clsp.jhu.edu" in subprocess.check_output(["hostname", "-f"]).decode():
cvd = (
subprocess.check_output(
["/usr/local/bin/free-gpu", "-n", str(args.ngpu)]
)
.decode()
.strip()
)
logging.info("CLSP: use gpu" + cvd)
os.environ["CUDA_VISIBLE_DEVICES"] = cvd
cvd = os.environ.get("CUDA_VISIBLE_DEVICES")
if cvd is None:
logging.warning("CUDA_VISIBLE_DEVICES is not set.")
elif args.ngpu != len(cvd.split(",")):
logging.error("#gpus is not matched with CUDA_VISIBLE_DEVICES.")
sys.exit(1)
# display PYTHONPATH
logging.info("python path = " + os.environ.get("PYTHONPATH", "(None)"))
# extract
logging.info("backend = " + args.backend)
if args.backend == "pytorch":
from espnet.tts.pytorch_backend.tts import decode
decode(args)
else:
raise NotImplementedError("Only pytorch is supported.")
if __name__ == "__main__":
main(sys.argv[1:])
| 5,267 | 29.807018 | 87 | py |
espnet | espnet-master/espnet/bin/mt_trans.py | #!/usr/bin/env python3
# encoding: utf-8
# Copyright 2019 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Neural machine translation model decoding script."""
import logging
import os
import random
import sys
import configargparse
import numpy as np
# NOTE: you need this func to generate our sphinx doc
def get_parser():
"""Get default arguments."""
parser = configargparse.ArgumentParser(
description="Translate text from speech "
"using a speech translation model on one CPU or GPU",
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter,
)
# general configuration
parser.add("--config", is_config_file=True, help="Config file path")
parser.add(
"--config2",
is_config_file=True,
help="Second config file path that overwrites the settings in `--config`",
)
parser.add(
"--config3",
is_config_file=True,
help="Third config file path "
"that overwrites the settings in `--config` and `--config2`",
)
parser.add_argument("--ngpu", type=int, default=0, help="Number of GPUs")
parser.add_argument(
"--dtype",
choices=("float16", "float32", "float64"),
default="float32",
help="Float precision (only available in --api v2)",
)
parser.add_argument(
"--backend",
type=str,
default="chainer",
choices=["chainer", "pytorch"],
help="Backend library",
)
parser.add_argument("--debugmode", type=int, default=1, help="Debugmode")
parser.add_argument("--seed", type=int, default=1, help="Random seed")
parser.add_argument("--verbose", "-V", type=int, default=1, help="Verbose option")
parser.add_argument(
"--batchsize",
type=int,
default=1,
help="Batch size for beam search (0: means no batch processing)",
)
parser.add_argument(
"--preprocess-conf",
type=str,
default=None,
help="The configuration file for the pre-processing",
)
parser.add_argument(
"--api",
default="v1",
choices=["v1", "v2"],
help="Beam search APIs "
"v1: Default API. It only supports "
"the ASRInterface.recognize method and DefaultRNNLM. "
"v2: Experimental API. "
"It supports any models that implements ScorerInterface.",
)
# task related
parser.add_argument(
"--trans-json", type=str, help="Filename of translation data (json)"
)
parser.add_argument(
"--result-label",
type=str,
required=True,
help="Filename of result label data (json)",
)
# model (parameter) related
parser.add_argument(
"--model", type=str, required=True, help="Model file parameters to read"
)
parser.add_argument(
"--model-conf", type=str, default=None, help="Model config file"
)
# search related
parser.add_argument("--nbest", type=int, default=1, help="Output N-best hypotheses")
parser.add_argument("--beam-size", type=int, default=1, help="Beam size")
parser.add_argument("--penalty", type=float, default=0.1, help="Incertion penalty")
parser.add_argument(
"--maxlenratio",
type=float,
default=3.0,
help="""Input length ratio to obtain max output length.
If maxlenratio=0.0 (default), it uses a end-detect function
to automatically find maximum hypothesis lengths""",
)
parser.add_argument(
"--minlenratio",
type=float,
default=0.0,
help="Input length ratio to obtain min output length",
)
# multilingual related
parser.add_argument(
"--tgt-lang",
default=False,
type=str,
help="target language ID (e.g., <en>, <de>, and <fr> etc.)",
)
return parser
def main(args):
"""Run the main decoding function."""
parser = get_parser()
args = parser.parse_args(args)
# logging info
if args.verbose == 1:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose == 2:
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check CUDA_VISIBLE_DEVICES
if args.ngpu > 0:
cvd = os.environ.get("CUDA_VISIBLE_DEVICES")
if cvd is None:
logging.warning("CUDA_VISIBLE_DEVICES is not set.")
elif args.ngpu != len(cvd.split(",")):
logging.error("#gpus is not matched with CUDA_VISIBLE_DEVICES.")
sys.exit(1)
# TODO(mn5k): support of multiple GPUs
if args.ngpu > 1:
logging.error("The program only supports ngpu=1.")
sys.exit(1)
# display PYTHONPATH
logging.info("python path = " + os.environ.get("PYTHONPATH", "(None)"))
# seed setting
random.seed(args.seed)
np.random.seed(args.seed)
logging.info("set random seed = %d" % args.seed)
# trans
logging.info("backend = " + args.backend)
if args.backend == "pytorch":
# Experimental API that supports custom LMs
from espnet.mt.pytorch_backend.mt import trans
if args.dtype != "float32":
raise NotImplementedError(
f"`--dtype {args.dtype}` is only available with `--api v2`"
)
trans(args)
else:
raise ValueError("Only pytorch are supported.")
if __name__ == "__main__":
main(sys.argv[1:])
| 5,956 | 30.855615 | 88 | py |
espnet | espnet-master/espnet/bin/vc_decode.py | #!/usr/bin/env python3
# Copyright 2020 Nagoya University (Wen-Chin Huang)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""VC decoding script."""
import logging
import os
import subprocess
import sys
import configargparse
from espnet.utils.cli_utils import strtobool
# NOTE: you need this func to generate our sphinx doc
def get_parser():
"""Get parser of decoding arguments."""
parser = configargparse.ArgumentParser(
description="Converting speech using a VC model on one CPU",
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter,
)
# general configuration
parser.add("--config", is_config_file=True, help="config file path")
parser.add(
"--config2",
is_config_file=True,
help="second config file path that overwrites the settings in `--config`.",
)
parser.add(
"--config3",
is_config_file=True,
help="third config file path that overwrites the settings "
"in `--config` and `--config2`.",
)
parser.add_argument("--ngpu", default=0, type=int, help="Number of GPUs")
parser.add_argument(
"--backend",
default="pytorch",
type=str,
choices=["chainer", "pytorch"],
help="Backend library",
)
parser.add_argument("--debugmode", default=1, type=int, help="Debugmode")
parser.add_argument("--seed", default=1, type=int, help="Random seed")
parser.add_argument("--out", type=str, required=True, help="Output filename")
parser.add_argument("--verbose", "-V", default=0, type=int, help="Verbose option")
parser.add_argument(
"--preprocess-conf",
type=str,
default=None,
help="The configuration file for the pre-processing",
)
# task related
parser.add_argument(
"--json", type=str, required=True, help="Filename of train label data (json)"
)
parser.add_argument(
"--model", type=str, required=True, help="Model file parameters to read"
)
parser.add_argument(
"--model-conf", type=str, default=None, help="Model config file"
)
# decoding related
parser.add_argument(
"--maxlenratio", type=float, default=5, help="Maximum length ratio in decoding"
)
parser.add_argument(
"--minlenratio", type=float, default=0, help="Minimum length ratio in decoding"
)
parser.add_argument(
"--threshold", type=float, default=0.5, help="Threshold value in decoding"
)
parser.add_argument(
"--use-att-constraint",
type=strtobool,
default=False,
help="Whether to use the attention constraint",
)
parser.add_argument(
"--backward-window",
type=int,
default=1,
help="Backward window size in the attention constraint",
)
parser.add_argument(
"--forward-window",
type=int,
default=3,
help="Forward window size in the attention constraint",
)
# save related
parser.add_argument(
"--save-durations",
default=False,
type=strtobool,
help="Whether to save durations converted from attentions",
)
parser.add_argument(
"--save-focus-rates",
default=False,
type=strtobool,
help="Whether to save focus rates of attentions",
)
return parser
def main(args):
"""Run deocding."""
parser = get_parser()
args = parser.parse_args(args)
# logging info
if args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check CUDA_VISIBLE_DEVICES
if args.ngpu > 0:
if "clsp.jhu.edu" in subprocess.check_output(["hostname", "-f"]).decode():
cvd = (
subprocess.check_output(
["/usr/local/bin/free-gpu", "-n", str(args.ngpu)]
)
.decode()
.strip()
)
logging.info("CLSP: use gpu" + cvd)
os.environ["CUDA_VISIBLE_DEVICES"] = cvd
cvd = os.environ.get("CUDA_VISIBLE_DEVICES")
if cvd is None:
logging.warning("CUDA_VISIBLE_DEVICES is not set.")
elif args.ngpu != len(cvd.split(",")):
logging.error("#gpus is not matched with CUDA_VISIBLE_DEVICES.")
sys.exit(1)
# display PYTHONPATH
logging.info("python path = " + os.environ.get("PYTHONPATH", "(None)"))
# extract
logging.info("backend = " + args.backend)
if args.backend == "pytorch":
from espnet.vc.pytorch_backend.vc import decode
decode(args)
else:
raise NotImplementedError("Only pytorch is supported.")
if __name__ == "__main__":
main(sys.argv[1:])
| 5,094 | 29.878788 | 87 | py |
espnet | espnet-master/espnet/bin/asr_align.py | #!/usr/bin/env python3
# encoding: utf-8
# Copyright 2020 Johns Hopkins University (Xuankai Chang)
# 2020, Technische Universität München; Dominik Winkelbauer, Ludwig Kürzinger
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
This program performs CTC segmentation to align utterances within audio files.
Inputs:
`--data-json`:
A json containing list of utterances and audio files
`--model`:
An already trained ASR model
Output:
`--output`:
A plain `segments` file with utterance positions in the audio files.
Selected parameters:
`--min-window-size`:
Minimum window size considered for a single utterance. The current default value
should be OK in most cases. Larger values might give better results; too large
values cause IndexErrors.
`--subsampling-factor`:
If the encoder sub-samples its input, the number of frames at the CTC layer is
reduced by this factor.
`--frame-duration`:
This is the non-overlapping duration of a single frame in milliseconds (the
inverse of frames per millisecond).
`--set-blank`:
In the rare case that the blank token has not the index 0 in the character
dictionary, this parameter sets the index of the blank token.
`--gratis-blank`:
Sets the transition cost for blank tokens to zero. Useful if there are longer
unrelated segments between segments.
`--replace-spaces-with-blanks`:
Spaces are replaced with blanks. Helps to model pauses between words. May
increase length of ground truth. May lead to misaligned segments when combined
with the option `--gratis-blank`.
"""
import json
import logging
import os
import sys
import configargparse
import torch
# imports for CTC segmentation
from ctc_segmentation import (
CtcSegmentationParameters,
ctc_segmentation,
determine_utterance_segments,
prepare_text,
)
# imports for inference
from espnet.asr.pytorch_backend.asr_init import load_trained_model
from espnet.nets.asr_interface import ASRInterface
from espnet.utils.io_utils import LoadInputsAndTargets
# NOTE: you need this func to generate our sphinx doc
def get_parser():
"""Get default arguments."""
parser = configargparse.ArgumentParser(
description="Align text to audio using CTC segmentation."
"using a pre-trained speech recognition model.",
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter,
)
# general configuration
parser.add("--config", is_config_file=True, help="Decoding config file path.")
parser.add_argument(
"--ngpu", type=int, default=0, help="Number of GPUs (max. 1 is supported)"
)
parser.add_argument(
"--dtype",
choices=("float16", "float32", "float64"),
default="float32",
help="Float precision (only available in --api v2)",
)
parser.add_argument(
"--backend",
type=str,
default="pytorch",
choices=["pytorch"],
help="Backend library",
)
parser.add_argument("--debugmode", type=int, default=1, help="Debugmode")
parser.add_argument("--verbose", "-V", type=int, default=1, help="Verbose option")
parser.add_argument(
"--preprocess-conf",
type=str,
default=None,
help="The configuration file for the pre-processing",
)
# task related
parser.add_argument(
"--data-json", type=str, help="Json of recognition data for audio and text"
)
parser.add_argument("--utt-text", type=str, help="Text separated into utterances")
# model (parameter) related
parser.add_argument(
"--model", type=str, required=True, help="Model file parameters to read"
)
parser.add_argument(
"--model-conf", type=str, default=None, help="Model config file"
)
parser.add_argument(
"--num-encs", default=1, type=int, help="Number of encoders in the model."
)
# ctc-segmentation related
parser.add_argument(
"--subsampling-factor",
type=int,
default=None,
help="Subsampling factor."
" If the encoder sub-samples its input, the number of frames at the CTC layer"
" is reduced by this factor. For example, a BLSTMP with subsampling 1_2_2_1_1"
" has a subsampling factor of 4.",
)
parser.add_argument(
"--frame-duration",
type=int,
default=None,
help="Non-overlapping duration of a single frame in milliseconds.",
)
parser.add_argument(
"--min-window-size",
type=int,
default=None,
help="Minimum window size considered for utterance.",
)
parser.add_argument(
"--max-window-size",
type=int,
default=None,
help="Maximum window size considered for utterance.",
)
parser.add_argument(
"--use-dict-blank",
type=int,
default=None,
help="DEPRECATED.",
)
parser.add_argument(
"--set-blank",
type=int,
default=None,
help="Index of model dictionary for blank token (default: 0).",
)
parser.add_argument(
"--gratis-blank",
type=int,
default=None,
help="Set the transition cost of the blank token to zero. Audio sections"
" labeled with blank tokens can then be skipped without penalty. Useful"
" if there are unrelated audio segments between utterances.",
)
parser.add_argument(
"--replace-spaces-with-blanks",
type=int,
default=None,
help="Fill blanks in between words to better model pauses between words."
" Segments can be misaligned if this option is combined with --gratis-blank."
" May increase length of ground truth.",
)
parser.add_argument(
"--scoring-length",
type=int,
default=None,
help="Changes partitioning length L for calculation of the confidence score.",
)
parser.add_argument(
"--output",
type=configargparse.FileType("w"),
required=True,
help="Output segments file",
)
return parser
def main(args):
"""Run the main decoding function."""
parser = get_parser()
args, extra = parser.parse_known_args(args)
# logging info
if args.verbose == 1:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose == 2:
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
if args.ngpu == 0 and args.dtype == "float16":
raise ValueError(f"--dtype {args.dtype} does not support the CPU backend.")
# check CUDA_VISIBLE_DEVICES
device = "cpu"
if args.ngpu == 1:
device = "cuda"
cvd = os.environ.get("CUDA_VISIBLE_DEVICES")
if cvd is None:
logging.warning("CUDA_VISIBLE_DEVICES is not set.")
elif args.ngpu > 1:
logging.error("Decoding only supports ngpu=1.")
sys.exit(1)
# display PYTHONPATH
logging.info("python path = " + os.environ.get("PYTHONPATH", "(None)"))
# recog
logging.info("backend = " + args.backend)
if args.backend == "pytorch":
ctc_align(args, device)
else:
raise ValueError("Only pytorch is supported.")
sys.exit(0)
def ctc_align(args, device):
"""ESPnet-specific interface for CTC segmentation.
Parses configuration, infers the CTC posterior probabilities,
and then aligns start and end of utterances using CTC segmentation.
Results are written to the output file given in the args.
:param args: given configuration
:param device: for inference; one of ['cuda', 'cpu']
:return: 0 on success
"""
model, train_args = load_trained_model(args.model)
assert isinstance(model, ASRInterface)
load_inputs_and_targets = LoadInputsAndTargets(
mode="asr",
load_output=True,
sort_in_input_length=False,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None
else args.preprocess_conf,
preprocess_args={"train": False},
)
logging.info(f"Decoding device={device}")
# Warn for nets with high memory consumption on long audio files
if hasattr(model, "enc"):
encoder_module = model.enc.__class__.__module__
elif hasattr(model, "encoder"):
encoder_module = model.encoder.__class__.__module__
else:
encoder_module = "Unknown"
logging.info(f"Encoder module: {encoder_module}")
logging.info(f"CTC module: {model.ctc.__class__.__module__}")
if "rnn" not in encoder_module:
logging.warning("No BLSTM model detected; memory consumption may be high.")
model.to(device=device).eval()
# read audio and text json data
with open(args.data_json, "rb") as f:
js = json.load(f)["utts"]
with open(args.utt_text, "r", encoding="utf-8") as f:
lines = f.readlines()
i = 0
text = {}
segment_names = {}
for name in js.keys():
text_per_audio = []
segment_names_per_audio = []
while i < len(lines) and lines[i].startswith(name):
text_per_audio.append(lines[i][lines[i].find(" ") + 1 :])
segment_names_per_audio.append(lines[i][: lines[i].find(" ")])
i += 1
text[name] = text_per_audio
segment_names[name] = segment_names_per_audio
# apply configuration
config = CtcSegmentationParameters()
subsampling_factor = 1
frame_duration_ms = 10
if args.subsampling_factor is not None:
subsampling_factor = args.subsampling_factor
if args.frame_duration is not None:
frame_duration_ms = args.frame_duration
# Backwards compatibility to ctc_segmentation <= 1.5.3
if hasattr(config, "index_duration"):
config.index_duration = frame_duration_ms * subsampling_factor / 1000
else:
config.subsampling_factor = subsampling_factor
config.frame_duration_ms = frame_duration_ms
if args.min_window_size is not None:
config.min_window_size = args.min_window_size
if args.max_window_size is not None:
config.max_window_size = args.max_window_size
config.char_list = train_args.char_list
if args.use_dict_blank is not None:
logging.warning(
"The option --use-dict-blank is deprecated. If needed,"
" use --set-blank instead."
)
if args.set_blank is not None:
config.blank = args.set_blank
if args.replace_spaces_with_blanks is not None:
if args.replace_spaces_with_blanks:
config.replace_spaces_with_blanks = True
else:
config.replace_spaces_with_blanks = False
if args.gratis_blank:
config.blank_transition_cost_zero = True
if config.blank_transition_cost_zero and args.replace_spaces_with_blanks:
logging.error(
"Blanks are inserted between words, and also the transition cost of blank"
" is zero. This configuration may lead to misalignments!"
)
if args.scoring_length is not None:
config.score_min_mean_over_L = args.scoring_length
logging.info(f"Frame timings: {frame_duration_ms}ms * {subsampling_factor}")
# Iterate over audio files to decode and align
for idx, name in enumerate(js.keys(), 1):
logging.info("(%d/%d) Aligning " + name, idx, len(js.keys()))
batch = [(name, js[name])]
feat, label = load_inputs_and_targets(batch)
feat = feat[0]
with torch.no_grad():
# Encode input frames
enc_output = model.encode(torch.as_tensor(feat).to(device)).unsqueeze(0)
# Apply ctc layer to obtain log character probabilities
lpz = model.ctc.log_softmax(enc_output)[0].cpu().numpy()
# Prepare the text for aligning
ground_truth_mat, utt_begin_indices = prepare_text(config, text[name])
# Align using CTC segmentation
timings, char_probs, state_list = ctc_segmentation(
config, lpz, ground_truth_mat
)
logging.debug(f"state_list = {state_list}")
# Obtain list of utterances with time intervals and confidence score
segments = determine_utterance_segments(
config, utt_begin_indices, char_probs, timings, text[name]
)
# Write to "segments" file
for i, boundary in enumerate(segments):
utt_segment = (
f"{segment_names[name][i]} {name} {boundary[0]:.2f}"
f" {boundary[1]:.2f} {boundary[2]:.9f}\n"
)
args.output.write(utt_segment)
return 0
if __name__ == "__main__":
main(sys.argv[1:])
| 13,207 | 35.893855 | 88 | py |
espnet | espnet-master/espnet/bin/st_train.py | #!/usr/bin/env python3
# encoding: utf-8
# Copyright 2019 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""End-to-end speech translation model training script."""
import logging
import os
import random
import subprocess
import sys
import configargparse
import numpy as np
from espnet import __version__
from espnet.utils.cli_utils import strtobool
from espnet.utils.training.batchfy import BATCH_COUNT_CHOICES
# NOTE: you need this func to generate our sphinx doc
def get_parser(parser=None, required=True):
"""Get default arguments."""
if parser is None:
parser = configargparse.ArgumentParser(
description="Train a speech translation (ST) model on one CPU, "
"one or multiple GPUs",
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter,
)
# general configuration
parser.add("--config", is_config_file=True, help="config file path")
parser.add(
"--config2",
is_config_file=True,
help="second config file path that overwrites the settings in `--config`.",
)
parser.add(
"--config3",
is_config_file=True,
help="third config file path that overwrites the settings "
"in `--config` and `--config2`.",
)
parser.add_argument(
"--ngpu",
default=None,
type=int,
help="Number of GPUs. If not given, use all visible devices",
)
parser.add_argument(
"--train-dtype",
default="float32",
choices=["float16", "float32", "float64", "O0", "O1", "O2", "O3"],
help="Data type for training (only pytorch backend). "
"O0,O1,.. flags require apex. "
"See https://nvidia.github.io/apex/amp.html#opt-levels",
)
parser.add_argument(
"--backend",
default="chainer",
type=str,
choices=["chainer", "pytorch"],
help="Backend library",
)
parser.add_argument(
"--outdir", type=str, required=required, help="Output directory"
)
parser.add_argument("--debugmode", default=1, type=int, help="Debugmode")
parser.add_argument("--dict", required=required, help="Dictionary")
parser.add_argument("--seed", default=1, type=int, help="Random seed")
parser.add_argument("--debugdir", type=str, help="Output directory for debugging")
parser.add_argument(
"--resume",
"-r",
default="",
nargs="?",
help="Resume the training from snapshot",
)
parser.add_argument(
"--minibatches",
"-N",
type=int,
default="-1",
help="Process only N minibatches (for debug)",
)
parser.add_argument("--verbose", "-V", default=0, type=int, help="Verbose option")
parser.add_argument(
"--tensorboard-dir",
default=None,
type=str,
nargs="?",
help="Tensorboard log dir path",
)
parser.add_argument(
"--report-interval-iters",
default=100,
type=int,
help="Report interval iterations",
)
parser.add_argument(
"--save-interval-iters",
default=0,
type=int,
help="Save snapshot interval iterations",
)
# task related
parser.add_argument(
"--train-json",
type=str,
default=None,
help="Filename of train label data (json)",
)
parser.add_argument(
"--valid-json",
type=str,
default=None,
help="Filename of validation label data (json)",
)
# network architecture
parser.add_argument(
"--model-module",
type=str,
default=None,
help="model defined module (default: espnet.nets.xxx_backend.e2e_st:E2E)",
)
# loss related
parser.add_argument(
"--ctc_type",
default="builtin",
type=str,
choices=["builtin", "gtnctc", "cudnnctc"],
help="Type of CTC implementation to calculate loss.",
)
parser.add_argument(
"--mtlalpha",
default=0.0,
type=float,
help="Multitask learning coefficient, alpha: \
alpha*ctc_loss + (1-alpha)*att_loss",
)
parser.add_argument(
"--asr-weight",
default=0.0,
type=float,
help="Multitask learning coefficient for ASR task, weight: "
" asr_weight*(alpha*ctc_loss + (1-alpha)*att_loss)"
" + (1-asr_weight-mt_weight)*st_loss",
)
parser.add_argument(
"--mt-weight",
default=0.0,
type=float,
help="Multitask learning coefficient for MT task, weight: \
mt_weight*mt_loss + (1-mt_weight-asr_weight)*st_loss",
)
parser.add_argument(
"--lsm-weight", default=0.0, type=float, help="Label smoothing weight"
)
# recognition options to compute CER/WER
parser.add_argument(
"--report-cer",
default=False,
action="store_true",
help="Compute CER on development set",
)
parser.add_argument(
"--report-wer",
default=False,
action="store_true",
help="Compute WER on development set",
)
# translations options to compute BLEU
parser.add_argument(
"--report-bleu",
default=True,
action="store_true",
help="Compute BLEU on development set",
)
parser.add_argument("--nbest", type=int, default=1, help="Output N-best hypotheses")
parser.add_argument("--beam-size", type=int, default=4, help="Beam size")
parser.add_argument("--penalty", default=0.0, type=float, help="Incertion penalty")
parser.add_argument(
"--maxlenratio",
default=0.0,
type=float,
help="""Input length ratio to obtain max output length.
If maxlenratio=0.0 (default), it uses a end-detect function
to automatically find maximum hypothesis lengths""",
)
parser.add_argument(
"--minlenratio",
default=0.0,
type=float,
help="Input length ratio to obtain min output length",
)
parser.add_argument(
"--rnnlm", type=str, default=None, help="RNNLM model file to read"
)
parser.add_argument(
"--rnnlm-conf", type=str, default=None, help="RNNLM model config file to read"
)
parser.add_argument("--lm-weight", default=0.0, type=float, help="RNNLM weight.")
parser.add_argument("--sym-space", default="<space>", type=str, help="Space symbol")
parser.add_argument("--sym-blank", default="<blank>", type=str, help="Blank symbol")
# minibatch related
parser.add_argument(
"--sortagrad",
default=0,
type=int,
nargs="?",
help="How many epochs to use sortagrad for. 0 = deactivated, -1 = all epochs",
)
parser.add_argument(
"--batch-count",
default="auto",
choices=BATCH_COUNT_CHOICES,
help="How to count batch_size. "
"The default (auto) will find how to count by args.",
)
parser.add_argument(
"--batch-size",
"--batch-seqs",
"-b",
default=0,
type=int,
help="Maximum seqs in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-bins",
default=0,
type=int,
help="Maximum bins in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-frames-in",
default=0,
type=int,
help="Maximum input frames in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-frames-out",
default=0,
type=int,
help="Maximum output frames in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-frames-inout",
default=0,
type=int,
help="Maximum input+output frames in a minibatch (0 to disable)",
)
parser.add_argument(
"--maxlen-in",
"--batch-seq-maxlen-in",
default=800,
type=int,
metavar="ML",
help="When --batch-count=seq, batch size is reduced "
"if the input sequence length > ML.",
)
parser.add_argument(
"--maxlen-out",
"--batch-seq-maxlen-out",
default=150,
type=int,
metavar="ML",
help="When --batch-count=seq, "
"batch size is reduced if the output sequence length > ML",
)
parser.add_argument(
"--n-iter-processes",
default=0,
type=int,
help="Number of processes of iterator",
)
parser.add_argument(
"--preprocess-conf",
type=str,
default=None,
nargs="?",
help="The configuration file for the pre-processing",
)
# optimization related
parser.add_argument(
"--opt",
default="adadelta",
type=str,
choices=["adadelta", "adam", "noam"],
help="Optimizer",
)
parser.add_argument(
"--accum-grad", default=1, type=int, help="Number of gradient accumuration"
)
parser.add_argument(
"--eps", default=1e-8, type=float, help="Epsilon constant for optimizer"
)
parser.add_argument(
"--eps-decay", default=0.01, type=float, help="Decaying ratio of epsilon"
)
parser.add_argument(
"--lr", default=1e-3, type=float, help="Learning rate for optimizer"
)
parser.add_argument(
"--lr-decay", default=1.0, type=float, help="Decaying ratio of learning rate"
)
parser.add_argument(
"--weight-decay", default=0.0, type=float, help="Weight decay ratio"
)
parser.add_argument(
"--criterion",
default="acc",
type=str,
choices=["loss", "acc"],
help="Criterion to perform epsilon decay",
)
parser.add_argument(
"--threshold", default=1e-4, type=float, help="Threshold to stop iteration"
)
parser.add_argument(
"--epochs", "-e", default=30, type=int, help="Maximum number of epochs"
)
parser.add_argument(
"--early-stop-criterion",
default="validation/main/acc",
type=str,
nargs="?",
help="Value to monitor to trigger an early stopping of the training",
)
parser.add_argument(
"--patience",
default=3,
type=int,
nargs="?",
help="Number of epochs to wait "
"without improvement before stopping the training",
)
parser.add_argument(
"--grad-clip", default=5, type=float, help="Gradient norm threshold to clip"
)
parser.add_argument(
"--num-save-attention",
default=3,
type=int,
help="Number of samples of attention to be saved",
)
parser.add_argument(
"--num-save-ctc",
default=3,
type=int,
help="Number of samples of CTC probability to be saved",
)
parser.add_argument(
"--grad-noise",
type=strtobool,
default=False,
help="The flag to switch to use noise injection to gradients during training",
)
# speech translation related
parser.add_argument(
"--context-residual",
default=False,
type=strtobool,
nargs="?",
help="The flag to switch to use context vector residual in the decoder network",
)
# finetuning related
parser.add_argument(
"--enc-init",
default=None,
type=str,
nargs="?",
help="Pre-trained ASR model to initialize encoder.",
)
parser.add_argument(
"--enc-init-mods",
default="enc.enc.",
type=lambda s: [str(mod) for mod in s.split(",") if s != ""],
help="List of encoder modules to initialize, separated by a comma.",
)
parser.add_argument(
"--dec-init",
default=None,
type=str,
nargs="?",
help="Pre-trained ASR, MT or LM model to initialize decoder.",
)
parser.add_argument(
"--dec-init-mods",
default="att., dec.",
type=lambda s: [str(mod) for mod in s.split(",") if s != ""],
help="List of decoder modules to initialize, separated by a comma.",
)
# multilingual related
parser.add_argument(
"--multilingual",
default=False,
type=strtobool,
help="Prepend target language ID to the source sentence. "
" Both source/target language IDs must be prepend in the pre-processing stage.",
)
parser.add_argument(
"--replace-sos",
default=False,
type=strtobool,
help="Replace <sos> in the decoder with a target language ID \
(the first token in the target sequence)",
)
# Feature transform: Normalization
parser.add_argument(
"--stats-file",
type=str,
default=None,
help="The stats file for the feature normalization",
)
parser.add_argument(
"--apply-uttmvn",
type=strtobool,
default=True,
help="Apply utterance level mean " "variance normalization.",
)
parser.add_argument("--uttmvn-norm-means", type=strtobool, default=True, help="")
parser.add_argument("--uttmvn-norm-vars", type=strtobool, default=False, help="")
# Feature transform: Fbank
parser.add_argument(
"--fbank-fs",
type=int,
default=16000,
help="The sample frequency used for " "the mel-fbank creation.",
)
parser.add_argument(
"--n-mels", type=int, default=80, help="The number of mel-frequency bins."
)
parser.add_argument("--fbank-fmin", type=float, default=0.0, help="")
parser.add_argument("--fbank-fmax", type=float, default=None, help="")
return parser
def main(cmd_args):
"""Run the main training function."""
parser = get_parser()
args, _ = parser.parse_known_args(cmd_args)
if args.backend == "chainer" and args.train_dtype != "float32":
raise NotImplementedError(
f"chainer backend does not support --train-dtype {args.train_dtype}."
"Use --dtype float32."
)
if args.ngpu == 0 and args.train_dtype in ("O0", "O1", "O2", "O3", "float16"):
raise ValueError(
f"--train-dtype {args.train_dtype} does not support the CPU backend."
)
from espnet.utils.dynamic_import import dynamic_import
if args.model_module is None:
model_module = "espnet.nets." + args.backend + "_backend.e2e_st:E2E"
else:
model_module = args.model_module
model_class = dynamic_import(model_module)
model_class.add_arguments(parser)
args = parser.parse_args(cmd_args)
args.model_module = model_module
if "chainer_backend" in args.model_module:
args.backend = "chainer"
if "pytorch_backend" in args.model_module:
args.backend = "pytorch"
# add version info in args
args.version = __version__
# logging info
if args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# If --ngpu is not given,
# 1. if CUDA_VISIBLE_DEVICES is set, all visible devices
# 2. if nvidia-smi exists, use all devices
# 3. else ngpu=0
if args.ngpu is None:
cvd = os.environ.get("CUDA_VISIBLE_DEVICES")
if cvd is not None:
ngpu = len(cvd.split(","))
else:
logging.warning("CUDA_VISIBLE_DEVICES is not set.")
try:
p = subprocess.run(
["nvidia-smi", "-L"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
except (subprocess.CalledProcessError, FileNotFoundError):
ngpu = 0
else:
ngpu = len(p.stderr.decode().split("\n")) - 1
args.ngpu = ngpu
else:
if args.ngpu != 1:
logging.debug(
"There are some bugs with multi-GPU processing in PyTorch 1.2+"
+ " (see https://github.com/pytorch/pytorch/issues/21108)"
)
ngpu = args.ngpu
logging.info(f"ngpu: {ngpu}")
# display PYTHONPATH
logging.info("python path = " + os.environ.get("PYTHONPATH", "(None)"))
# set random seed
logging.info("random seed = %d" % args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
# load dictionary for debug log
if args.dict is not None:
with open(args.dict, "rb") as f:
dictionary = f.readlines()
char_list = [entry.decode("utf-8").split(" ")[0] for entry in dictionary]
char_list.insert(0, "<blank>")
char_list.append("<eos>")
args.char_list = char_list
else:
args.char_list = None
# train
logging.info("backend = " + args.backend)
if args.backend == "pytorch":
from espnet.st.pytorch_backend.st import train
train(args)
else:
raise ValueError("Only pytorch are supported.")
if __name__ == "__main__":
main(sys.argv[1:])
| 17,332 | 30.687386 | 88 | py |
espnet | espnet-master/espnet/bin/mt_train.py | #!/usr/bin/env python3
# encoding: utf-8
# Copyright 2019 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Neural machine translation model training script."""
import logging
import os
import random
import subprocess
import sys
import configargparse
import numpy as np
from espnet import __version__
from espnet.utils.cli_utils import strtobool
from espnet.utils.training.batchfy import BATCH_COUNT_CHOICES
# NOTE: you need this func to generate our sphinx doc
def get_parser(parser=None, required=True):
"""Get default arguments."""
if parser is None:
parser = configargparse.ArgumentParser(
description="Train a neural machine translation (NMT) model on one CPU, "
"one or multiple GPUs",
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter,
)
# general configuration
parser.add("--config", is_config_file=True, help="config file path")
parser.add(
"--config2",
is_config_file=True,
help="second config file path that overwrites the settings in `--config`.",
)
parser.add(
"--config3",
is_config_file=True,
help="third config file path that overwrites the settings "
"in `--config` and `--config2`.",
)
parser.add_argument(
"--ngpu",
default=None,
type=int,
help="Number of GPUs. If not given, use all visible devices",
)
parser.add_argument(
"--train-dtype",
default="float32",
choices=["float16", "float32", "float64", "O0", "O1", "O2", "O3"],
help="Data type for training (only pytorch backend). "
"O0,O1,.. flags require apex. "
"See https://nvidia.github.io/apex/amp.html#opt-levels",
)
parser.add_argument(
"--backend",
default="chainer",
type=str,
choices=["chainer", "pytorch"],
help="Backend library",
)
parser.add_argument(
"--outdir", type=str, required=required, help="Output directory"
)
parser.add_argument("--debugmode", default=1, type=int, help="Debugmode")
parser.add_argument(
"--dict", required=required, help="Dictionary for source/target languages"
)
parser.add_argument("--seed", default=1, type=int, help="Random seed")
parser.add_argument("--debugdir", type=str, help="Output directory for debugging")
parser.add_argument(
"--resume",
"-r",
default="",
nargs="?",
help="Resume the training from snapshot",
)
parser.add_argument(
"--minibatches",
"-N",
type=int,
default="-1",
help="Process only N minibatches (for debug)",
)
parser.add_argument("--verbose", "-V", default=0, type=int, help="Verbose option")
parser.add_argument(
"--tensorboard-dir",
default=None,
type=str,
nargs="?",
help="Tensorboard log dir path",
)
parser.add_argument(
"--report-interval-iters",
default=100,
type=int,
help="Report interval iterations",
)
parser.add_argument(
"--save-interval-iters",
default=0,
type=int,
help="Save snapshot interval iterations",
)
# task related
parser.add_argument(
"--train-json",
type=str,
default=None,
help="Filename of train label data (json)",
)
parser.add_argument(
"--valid-json",
type=str,
default=None,
help="Filename of validation label data (json)",
)
# network architecture
parser.add_argument(
"--model-module",
type=str,
default=None,
help="model defined module (default: espnet.nets.xxx_backend.e2e_mt:E2E)",
)
# loss related
parser.add_argument(
"--lsm-weight", default=0.0, type=float, help="Label smoothing weight"
)
# translations options to compute BLEU
parser.add_argument(
"--report-bleu",
default=True,
action="store_true",
help="Compute BLEU on development set",
)
parser.add_argument("--nbest", type=int, default=1, help="Output N-best hypotheses")
parser.add_argument("--beam-size", type=int, default=4, help="Beam size")
parser.add_argument("--penalty", default=0.0, type=float, help="Incertion penalty")
parser.add_argument(
"--maxlenratio",
default=0.0,
type=float,
help="""Input length ratio to obtain max output length.
If maxlenratio=0.0 (default), it uses a end-detect function
to automatically find maximum hypothesis lengths""",
)
parser.add_argument(
"--minlenratio",
default=0.0,
type=float,
help="Input length ratio to obtain min output length",
)
parser.add_argument(
"--rnnlm", type=str, default=None, help="RNNLM model file to read"
)
parser.add_argument(
"--rnnlm-conf", type=str, default=None, help="RNNLM model config file to read"
)
parser.add_argument("--lm-weight", default=0.0, type=float, help="RNNLM weight.")
parser.add_argument("--sym-space", default="<space>", type=str, help="Space symbol")
parser.add_argument("--sym-blank", default="<blank>", type=str, help="Blank symbol")
# minibatch related
parser.add_argument(
"--sortagrad",
default=0,
type=int,
nargs="?",
help="How many epochs to use sortagrad for. 0 = deactivated, -1 = all epochs",
)
parser.add_argument(
"--batch-count",
default="auto",
choices=BATCH_COUNT_CHOICES,
help="How to count batch_size. "
"The default (auto) will find how to count by args.",
)
parser.add_argument(
"--batch-size",
"--batch-seqs",
"-b",
default=0,
type=int,
help="Maximum seqs in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-bins",
default=0,
type=int,
help="Maximum bins in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-frames-in",
default=0,
type=int,
help="Maximum input frames in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-frames-out",
default=0,
type=int,
help="Maximum output frames in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-frames-inout",
default=0,
type=int,
help="Maximum input+output frames in a minibatch (0 to disable)",
)
parser.add_argument(
"--maxlen-in",
"--batch-seq-maxlen-in",
default=100,
type=int,
metavar="ML",
help="When --batch-count=seq, "
"batch size is reduced if the input sequence length > ML.",
)
parser.add_argument(
"--maxlen-out",
"--batch-seq-maxlen-out",
default=100,
type=int,
metavar="ML",
help="When --batch-count=seq, "
"batch size is reduced if the output sequence length > ML",
)
parser.add_argument(
"--n-iter-processes",
default=0,
type=int,
help="Number of processes of iterator",
)
# optimization related
parser.add_argument(
"--opt",
default="adadelta",
type=str,
choices=["adadelta", "adam", "noam"],
help="Optimizer",
)
parser.add_argument(
"--accum-grad", default=1, type=int, help="Number of gradient accumuration"
)
parser.add_argument(
"--eps", default=1e-8, type=float, help="Epsilon constant for optimizer"
)
parser.add_argument(
"--eps-decay", default=0.01, type=float, help="Decaying ratio of epsilon"
)
parser.add_argument(
"--lr", default=1e-3, type=float, help="Learning rate for optimizer"
)
parser.add_argument(
"--lr-decay", default=1.0, type=float, help="Decaying ratio of learning rate"
)
parser.add_argument(
"--weight-decay", default=0.0, type=float, help="Weight decay ratio"
)
parser.add_argument(
"--criterion",
default="acc",
type=str,
choices=["loss", "acc"],
help="Criterion to perform epsilon decay",
)
parser.add_argument(
"--threshold", default=1e-4, type=float, help="Threshold to stop iteration"
)
parser.add_argument(
"--epochs", "-e", default=30, type=int, help="Maximum number of epochs"
)
parser.add_argument(
"--early-stop-criterion",
default="validation/main/acc",
type=str,
nargs="?",
help="Value to monitor to trigger an early stopping of the training",
)
parser.add_argument(
"--patience",
default=3,
type=int,
nargs="?",
help="Number of epochs to wait "
"without improvement before stopping the training",
)
parser.add_argument(
"--grad-clip", default=5, type=float, help="Gradient norm threshold to clip"
)
parser.add_argument(
"--num-save-attention",
default=3,
type=int,
help="Number of samples of attention to be saved",
)
# decoder related
parser.add_argument(
"--context-residual",
default=False,
type=strtobool,
nargs="?",
help="The flag to switch to use context vector residual in the decoder network",
)
parser.add_argument(
"--tie-src-tgt-embedding",
default=False,
type=strtobool,
nargs="?",
help="Tie parameters of source embedding and target embedding.",
)
parser.add_argument(
"--tie-classifier",
default=False,
type=strtobool,
nargs="?",
help="Tie parameters of target embedding and output projection layer.",
)
# finetuning related
parser.add_argument(
"--enc-init",
default=None,
type=str,
nargs="?",
help="Pre-trained ASR model to initialize encoder.",
)
parser.add_argument(
"--enc-init-mods",
default="enc.enc.",
type=lambda s: [str(mod) for mod in s.split(",") if s != ""],
help="List of encoder modules to initialize, separated by a comma.",
)
parser.add_argument(
"--dec-init",
default=None,
type=str,
nargs="?",
help="Pre-trained ASR, MT or LM model to initialize decoder.",
)
parser.add_argument(
"--dec-init-mods",
default="att., dec.",
type=lambda s: [str(mod) for mod in s.split(",") if s != ""],
help="List of decoder modules to initialize, separated by a comma.",
)
# multilingual related
parser.add_argument(
"--multilingual",
default=False,
type=strtobool,
help="Prepend target language ID to the source sentence. "
"Both source/target language IDs must be prepend in the pre-processing stage.",
)
parser.add_argument(
"--replace-sos",
default=False,
type=strtobool,
help="Replace <sos> in the decoder with a target language ID "
"(the first token in the target sequence)",
)
return parser
def main(cmd_args):
"""Run the main training function."""
parser = get_parser()
args, _ = parser.parse_known_args(cmd_args)
if args.backend == "chainer" and args.train_dtype != "float32":
raise NotImplementedError(
f"chainer backend does not support --train-dtype {args.train_dtype}."
"Use --dtype float32."
)
if args.ngpu == 0 and args.train_dtype in ("O0", "O1", "O2", "O3", "float16"):
raise ValueError(
f"--train-dtype {args.train_dtype} does not support the CPU backend."
)
from espnet.utils.dynamic_import import dynamic_import
if args.model_module is None:
model_module = "espnet.nets." + args.backend + "_backend.e2e_mt:E2E"
else:
model_module = args.model_module
model_class = dynamic_import(model_module)
model_class.add_arguments(parser)
args = parser.parse_args(cmd_args)
args.model_module = model_module
if "chainer_backend" in args.model_module:
args.backend = "chainer"
if "pytorch_backend" in args.model_module:
args.backend = "pytorch"
# add version info in args
args.version = __version__
# logging info
if args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# If --ngpu is not given,
# 1. if CUDA_VISIBLE_DEVICES is set, all visible devices
# 2. if nvidia-smi exists, use all devices
# 3. else ngpu=0
if args.ngpu is None:
cvd = os.environ.get("CUDA_VISIBLE_DEVICES")
if cvd is not None:
ngpu = len(cvd.split(","))
else:
logging.warning("CUDA_VISIBLE_DEVICES is not set.")
try:
p = subprocess.run(
["nvidia-smi", "-L"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
except (subprocess.CalledProcessError, FileNotFoundError):
ngpu = 0
else:
ngpu = len(p.stderr.decode().split("\n")) - 1
args.ngpu = ngpu
else:
if args.ngpu != 1:
logging.debug(
"There are some bugs with multi-GPU processing in PyTorch 1.2+"
+ " (see https://github.com/pytorch/pytorch/issues/21108)"
)
ngpu = args.ngpu
logging.info(f"ngpu: {ngpu}")
# display PYTHONPATH
logging.info("python path = " + os.environ.get("PYTHONPATH", "(None)"))
# set random seed
logging.info("random seed = %d" % args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
# load dictionary for debug log
if args.dict is not None:
with open(args.dict, "rb") as f:
dictionary = f.readlines()
char_list = [entry.decode("utf-8").split(" ")[0] for entry in dictionary]
char_list.insert(0, "<blank>")
char_list.append("<eos>")
args.char_list = char_list
else:
args.char_list = None
# train
logging.info("backend = " + args.backend)
if args.backend == "pytorch":
from espnet.mt.pytorch_backend.mt import train
train(args)
else:
raise ValueError("Only pytorch are supported.")
if __name__ == "__main__":
main(sys.argv[1:])
| 14,923 | 30.352941 | 88 | py |
espnet | espnet-master/espnet/bin/st_trans.py | #!/usr/bin/env python3
# encoding: utf-8
# Copyright 2019 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""End-to-end speech translation model decoding script."""
import logging
import os
import random
import sys
import configargparse
import numpy as np
# NOTE: you need this func to generate our sphinx doc
def get_parser():
"""Get default arguments."""
parser = configargparse.ArgumentParser(
description="Translate text from speech using a speech translation "
"model on one CPU or GPU",
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter,
)
# general configuration
parser.add("--config", is_config_file=True, help="Config file path")
parser.add(
"--config2",
is_config_file=True,
help="Second config file path that overwrites the settings in `--config`",
)
parser.add(
"--config3",
is_config_file=True,
help="Third config file path that overwrites "
"the settings in `--config` and `--config2`",
)
parser.add_argument("--ngpu", type=int, default=0, help="Number of GPUs")
parser.add_argument(
"--dtype",
choices=("float16", "float32", "float64"),
default="float32",
help="Float precision (only available in --api v2)",
)
parser.add_argument(
"--backend",
type=str,
default="chainer",
choices=["chainer", "pytorch"],
help="Backend library",
)
parser.add_argument("--debugmode", type=int, default=1, help="Debugmode")
parser.add_argument("--seed", type=int, default=1, help="Random seed")
parser.add_argument("--verbose", "-V", type=int, default=1, help="Verbose option")
parser.add_argument(
"--batchsize",
type=int,
default=1,
help="Batch size for beam search (0: means no batch processing)",
)
parser.add_argument(
"--preprocess-conf",
type=str,
default=None,
help="The configuration file for the pre-processing",
)
parser.add_argument(
"--api",
default="v1",
choices=["v1", "v2"],
help="Beam search APIs "
"v1: Default API. "
"It only supports the ASRInterface.recognize method and DefaultRNNLM. "
"v2: Experimental API. "
"It supports any models that implements ScorerInterface.",
)
# task related
parser.add_argument(
"--trans-json", type=str, help="Filename of translation data (json)"
)
parser.add_argument(
"--result-label",
type=str,
required=True,
help="Filename of result label data (json)",
)
# model (parameter) related
parser.add_argument(
"--model", type=str, required=True, help="Model file parameters to read"
)
# search related
parser.add_argument("--nbest", type=int, default=1, help="Output N-best hypotheses")
parser.add_argument("--beam-size", type=int, default=1, help="Beam size")
parser.add_argument("--penalty", type=float, default=0.0, help="Incertion penalty")
parser.add_argument(
"--maxlenratio",
type=float,
default=0.0,
help="""Input length ratio to obtain max output length.
If maxlenratio=0.0 (default), it uses a end-detect function
to automatically find maximum hypothesis lengths""",
)
parser.add_argument(
"--minlenratio",
type=float,
default=0.0,
help="Input length ratio to obtain min output length",
)
# multilingual related
parser.add_argument(
"--tgt-lang",
default=False,
type=str,
help="target language ID (e.g., <en>, <de>, and <fr> etc.)",
)
return parser
def main(args):
"""Run the main decoding function."""
parser = get_parser()
args = parser.parse_args(args)
# logging info
if args.verbose == 1:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose == 2:
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check CUDA_VISIBLE_DEVICES
if args.ngpu > 0:
cvd = os.environ.get("CUDA_VISIBLE_DEVICES")
if cvd is None:
logging.warning("CUDA_VISIBLE_DEVICES is not set.")
elif args.ngpu != len(cvd.split(",")):
logging.error("#gpus is not matched with CUDA_VISIBLE_DEVICES.")
sys.exit(1)
# TODO(mn5k): support of multiple GPUs
if args.ngpu > 1:
logging.error("The program only supports ngpu=1.")
sys.exit(1)
# display PYTHONPATH
logging.info("python path = " + os.environ.get("PYTHONPATH", "(None)"))
# seed setting
random.seed(args.seed)
np.random.seed(args.seed)
logging.info("set random seed = %d" % args.seed)
# trans
logging.info("backend = " + args.backend)
if args.backend == "pytorch":
# Experimental API that supports custom LMs
from espnet.st.pytorch_backend.st import trans
if args.dtype != "float32":
raise NotImplementedError(
f"`--dtype {args.dtype}` is only available with `--api v2`"
)
trans(args)
else:
raise ValueError("Only pytorch are supported.")
if __name__ == "__main__":
main(sys.argv[1:])
| 5,855 | 30.826087 | 88 | py |
espnet | espnet-master/espnet/st/pytorch_backend/st.py | # Copyright 2019 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Training/decoding definition for the speech translation task."""
import itertools
import json
import logging
import os
import numpy as np
import torch
from chainer import training
from chainer.training import extensions
from espnet.asr.asr_utils import (
CompareValueTrigger,
adadelta_eps_decay,
adam_lr_decay,
add_results_to_json,
restore_snapshot,
snapshot_object,
torch_load,
torch_resume,
torch_snapshot,
)
from espnet.asr.pytorch_backend.asr import CustomConverter as ASRCustomConverter
from espnet.asr.pytorch_backend.asr import CustomEvaluator, CustomUpdater
from espnet.asr.pytorch_backend.asr_init import load_trained_model, load_trained_modules
from espnet.nets.pytorch_backend.e2e_asr import pad_list
from espnet.nets.st_interface import STInterface
from espnet.utils.dataset import ChainerDataLoader, TransformDataset
from espnet.utils.deterministic_utils import set_deterministic_pytorch
from espnet.utils.dynamic_import import dynamic_import
from espnet.utils.io_utils import LoadInputsAndTargets
from espnet.utils.training.batchfy import make_batchset
from espnet.utils.training.iterators import ShufflingEnabler
from espnet.utils.training.tensorboard_logger import TensorboardLogger
from espnet.utils.training.train_utils import check_early_stop, set_early_stop
class CustomConverter(ASRCustomConverter):
"""Custom batch converter for Pytorch.
Args:
subsampling_factor (int): The subsampling factor.
dtype (torch.dtype): Data type to convert.
use_source_text (bool): use source transcription.
"""
def __init__(
self, subsampling_factor=1, dtype=torch.float32, use_source_text=False
):
"""Construct a CustomConverter object."""
super().__init__(subsampling_factor=subsampling_factor, dtype=dtype)
self.use_source_text = use_source_text
def __call__(self, batch, device=torch.device("cpu")):
"""Transform a batch and send it to a device.
Args:
batch (list): The batch to transform.
device (torch.device): The device to send to.
Returns:
tuple(torch.Tensor, torch.Tensor, torch.Tensor)
"""
# batch should be located in list
assert len(batch) == 1
xs, ys, ys_src = batch[0]
# get batch of lengths of input sequences
ilens = np.array([x.shape[0] for x in xs])
ilens = torch.from_numpy(ilens).to(device)
xs_pad = pad_list([torch.from_numpy(x).float() for x in xs], 0).to(
device, dtype=self.dtype
)
ys_pad = pad_list(
[torch.from_numpy(np.array(y, dtype=np.int64)) for y in ys],
self.ignore_id,
).to(device)
if self.use_source_text:
ys_pad_src = pad_list(
[torch.from_numpy(np.array(y, dtype=np.int64)) for y in ys_src],
self.ignore_id,
).to(device)
else:
ys_pad_src = None
return xs_pad, ilens, ys_pad, ys_pad_src
def train(args):
"""Train with the given args.
Args:
args (namespace): The program arguments.
"""
set_deterministic_pytorch(args)
# check cuda availability
if not torch.cuda.is_available():
logging.warning("cuda is not available")
# get input and output dimension info
with open(args.valid_json, "rb") as f:
valid_json = json.load(f)["utts"]
utts = list(valid_json.keys())
idim = int(valid_json[utts[0]]["input"][0]["shape"][-1])
odim = int(valid_json[utts[0]]["output"][0]["shape"][-1])
logging.info("#input dims : " + str(idim))
logging.info("#output dims: " + str(odim))
# Initialize with pre-trained ASR encoder and MT decoder
if args.enc_init is not None or args.dec_init is not None:
model = load_trained_modules(idim, odim, args, interface=STInterface)
else:
model_class = dynamic_import(args.model_module)
model = model_class(idim, odim, args)
assert isinstance(model, STInterface)
total_subsampling_factor = model.get_total_subsampling_factor()
# write model config
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
model_conf = args.outdir + "/model.json"
with open(model_conf, "wb") as f:
logging.info("writing a model config file to " + model_conf)
f.write(
json.dumps(
(idim, odim, vars(args)), indent=4, ensure_ascii=False, sort_keys=True
).encode("utf_8")
)
for key in sorted(vars(args).keys()):
logging.info("ARGS: " + key + ": " + str(vars(args)[key]))
reporter = model.reporter
# check the use of multi-gpu
if args.ngpu > 1:
if args.batch_size != 0:
logging.warning(
"batch size is automatically increased (%d -> %d)"
% (args.batch_size, args.batch_size * args.ngpu)
)
args.batch_size *= args.ngpu
# set torch device
device = torch.device("cuda" if args.ngpu > 0 else "cpu")
if args.train_dtype in ("float16", "float32", "float64"):
dtype = getattr(torch, args.train_dtype)
else:
dtype = torch.float32
model = model.to(device=device, dtype=dtype)
logging.warning(
"num. model params: {:,} (num. trained: {:,} ({:.1f}%))".format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
sum(p.numel() for p in model.parameters() if p.requires_grad)
* 100.0
/ sum(p.numel() for p in model.parameters()),
)
)
# Setup an optimizer
if args.opt == "adadelta":
optimizer = torch.optim.Adadelta(
model.parameters(), rho=0.95, eps=args.eps, weight_decay=args.weight_decay
)
elif args.opt == "adam":
optimizer = torch.optim.Adam(
model.parameters(), lr=args.lr, weight_decay=args.weight_decay
)
elif args.opt == "noam":
from espnet.nets.pytorch_backend.transformer.optimizer import get_std_opt
optimizer = get_std_opt(
model.parameters(),
args.adim,
args.transformer_warmup_steps,
args.transformer_lr,
)
else:
raise NotImplementedError("unknown optimizer: " + args.opt)
# setup apex.amp
if args.train_dtype in ("O0", "O1", "O2", "O3"):
try:
from apex import amp
except ImportError as e:
logging.error(
f"You need to install apex for --train-dtype {args.train_dtype}. "
"See https://github.com/NVIDIA/apex#linux"
)
raise e
if args.opt == "noam":
model, optimizer.optimizer = amp.initialize(
model, optimizer.optimizer, opt_level=args.train_dtype
)
else:
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.train_dtype
)
use_apex = True
else:
use_apex = False
# FIXME: TOO DIRTY HACK
setattr(optimizer, "target", reporter)
setattr(optimizer, "serialize", lambda s: reporter.serialize(s))
# Setup a converter
converter = CustomConverter(
subsampling_factor=model.subsample[0],
dtype=dtype,
use_source_text=args.asr_weight > 0 or args.mt_weight > 0,
)
# read json data
with open(args.train_json, "rb") as f:
train_json = json.load(f)["utts"]
with open(args.valid_json, "rb") as f:
valid_json = json.load(f)["utts"]
use_sortagrad = args.sortagrad == -1 or args.sortagrad > 0
# make minibatch list (variable length)
train = make_batchset(
train_json,
args.batch_size,
args.maxlen_in,
args.maxlen_out,
args.minibatches,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
shortest_first=use_sortagrad,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
iaxis=0,
oaxis=0,
)
valid = make_batchset(
valid_json,
args.batch_size,
args.maxlen_in,
args.maxlen_out,
args.minibatches,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
iaxis=0,
oaxis=0,
)
load_tr = LoadInputsAndTargets(
mode="asr",
load_output=True,
preprocess_conf=args.preprocess_conf,
preprocess_args={"train": True}, # Switch the mode of preprocessing
)
load_cv = LoadInputsAndTargets(
mode="asr",
load_output=True,
preprocess_conf=args.preprocess_conf,
preprocess_args={"train": False}, # Switch the mode of preprocessing
)
# hack to make batchsize argument as 1
# actual bathsize is included in a list
# default collate function converts numpy array to pytorch tensor
# we used an empty collate function instead which returns list
train_iter = ChainerDataLoader(
dataset=TransformDataset(train, lambda data: converter([load_tr(data)])),
batch_size=1,
num_workers=args.n_iter_processes,
shuffle=not use_sortagrad,
collate_fn=lambda x: x[0],
)
valid_iter = ChainerDataLoader(
dataset=TransformDataset(valid, lambda data: converter([load_cv(data)])),
batch_size=1,
shuffle=False,
collate_fn=lambda x: x[0],
num_workers=args.n_iter_processes,
)
# Set up a trainer
updater = CustomUpdater(
model,
args.grad_clip,
{"main": train_iter},
optimizer,
device,
args.ngpu,
args.grad_noise,
args.accum_grad,
use_apex=use_apex,
)
trainer = training.Trainer(updater, (args.epochs, "epoch"), out=args.outdir)
if use_sortagrad:
trainer.extend(
ShufflingEnabler([train_iter]),
trigger=(args.sortagrad if args.sortagrad != -1 else args.epochs, "epoch"),
)
# Resume from a snapshot
if args.resume:
logging.info("resumed from %s" % args.resume)
torch_resume(args.resume, trainer)
# Evaluate the model with the test dataset for each epoch
if args.save_interval_iters > 0:
trainer.extend(
CustomEvaluator(model, {"main": valid_iter}, reporter, device, args.ngpu),
trigger=(args.save_interval_iters, "iteration"),
)
else:
trainer.extend(
CustomEvaluator(model, {"main": valid_iter}, reporter, device, args.ngpu)
)
# Save attention weight at each epoch
if args.num_save_attention > 0:
data = sorted(
list(valid_json.items())[: args.num_save_attention],
key=lambda x: int(x[1]["input"][0]["shape"][1]),
reverse=True,
)
if hasattr(model, "module"):
att_vis_fn = model.module.calculate_all_attentions
plot_class = model.module.attention_plot_class
else:
att_vis_fn = model.calculate_all_attentions
plot_class = model.attention_plot_class
att_reporter = plot_class(
att_vis_fn,
data,
args.outdir + "/att_ws",
converter=converter,
transform=load_cv,
device=device,
subsampling_factor=total_subsampling_factor,
)
trainer.extend(att_reporter, trigger=(1, "epoch"))
else:
att_reporter = None
# Save CTC prob at each epoch
if (args.asr_weight > 0 and args.mtlalpha > 0) and args.num_save_ctc > 0:
# NOTE: sort it by output lengths
data = sorted(
list(valid_json.items())[: args.num_save_ctc],
key=lambda x: int(x[1]["output"][0]["shape"][0]),
reverse=True,
)
if hasattr(model, "module"):
ctc_vis_fn = model.module.calculate_all_ctc_probs
plot_class = model.module.ctc_plot_class
else:
ctc_vis_fn = model.calculate_all_ctc_probs
plot_class = model.ctc_plot_class
ctc_reporter = plot_class(
ctc_vis_fn,
data,
args.outdir + "/ctc_prob",
converter=converter,
transform=load_cv,
device=device,
subsampling_factor=total_subsampling_factor,
)
trainer.extend(ctc_reporter, trigger=(1, "epoch"))
else:
ctc_reporter = None
# Make a plot for training and validation values
trainer.extend(
extensions.PlotReport(
[
"main/loss",
"validation/main/loss",
"main/loss_asr",
"validation/main/loss_asr",
"main/loss_mt",
"validation/main/loss_mt",
"main/loss_st",
"validation/main/loss_st",
],
"epoch",
file_name="loss.png",
)
)
trainer.extend(
extensions.PlotReport(
[
"main/acc",
"validation/main/acc",
"main/acc_asr",
"validation/main/acc_asr",
"main/acc_mt",
"validation/main/acc_mt",
],
"epoch",
file_name="acc.png",
)
)
trainer.extend(
extensions.PlotReport(
["main/bleu", "validation/main/bleu"], "epoch", file_name="bleu.png"
)
)
# Save best models
trainer.extend(
snapshot_object(model, "model.loss.best"),
trigger=training.triggers.MinValueTrigger("validation/main/loss"),
)
trainer.extend(
snapshot_object(model, "model.acc.best"),
trigger=training.triggers.MaxValueTrigger("validation/main/acc"),
)
# save snapshot which contains model and optimizer states
if args.save_interval_iters > 0:
trainer.extend(
torch_snapshot(filename="snapshot.iter.{.updater.iteration}"),
trigger=(args.save_interval_iters, "iteration"),
)
else:
trainer.extend(torch_snapshot(), trigger=(1, "epoch"))
# epsilon decay in the optimizer
if args.opt == "adadelta":
if args.criterion == "acc":
trainer.extend(
restore_snapshot(
model, args.outdir + "/model.acc.best", load_fn=torch_load
),
trigger=CompareValueTrigger(
"validation/main/acc",
lambda best_value, current_value: best_value > current_value,
),
)
trainer.extend(
adadelta_eps_decay(args.eps_decay),
trigger=CompareValueTrigger(
"validation/main/acc",
lambda best_value, current_value: best_value > current_value,
),
)
elif args.criterion == "loss":
trainer.extend(
restore_snapshot(
model, args.outdir + "/model.loss.best", load_fn=torch_load
),
trigger=CompareValueTrigger(
"validation/main/loss",
lambda best_value, current_value: best_value < current_value,
),
)
trainer.extend(
adadelta_eps_decay(args.eps_decay),
trigger=CompareValueTrigger(
"validation/main/loss",
lambda best_value, current_value: best_value < current_value,
),
)
elif args.opt == "adam":
if args.criterion == "acc":
trainer.extend(
restore_snapshot(
model, args.outdir + "/model.acc.best", load_fn=torch_load
),
trigger=CompareValueTrigger(
"validation/main/acc",
lambda best_value, current_value: best_value > current_value,
),
)
trainer.extend(
adam_lr_decay(args.lr_decay),
trigger=CompareValueTrigger(
"validation/main/acc",
lambda best_value, current_value: best_value > current_value,
),
)
elif args.criterion == "loss":
trainer.extend(
restore_snapshot(
model, args.outdir + "/model.loss.best", load_fn=torch_load
),
trigger=CompareValueTrigger(
"validation/main/loss",
lambda best_value, current_value: best_value < current_value,
),
)
trainer.extend(
adam_lr_decay(args.lr_decay),
trigger=CompareValueTrigger(
"validation/main/loss",
lambda best_value, current_value: best_value < current_value,
),
)
# Write a log of evaluation statistics for each epoch
trainer.extend(
extensions.LogReport(trigger=(args.report_interval_iters, "iteration"))
)
report_keys = [
"epoch",
"iteration",
"main/loss",
"main/loss_st",
"main/loss_asr",
"validation/main/loss",
"validation/main/loss_st",
"validation/main/loss_asr",
"main/acc",
"validation/main/acc",
]
if args.asr_weight > 0:
report_keys.append("main/acc_asr")
report_keys.append("validation/main/acc_asr")
report_keys += ["elapsed_time"]
if args.opt == "adadelta":
trainer.extend(
extensions.observe_value(
"eps",
lambda trainer: trainer.updater.get_optimizer("main").param_groups[0][
"eps"
],
),
trigger=(args.report_interval_iters, "iteration"),
)
report_keys.append("eps")
elif args.opt in ["adam", "noam"]:
trainer.extend(
extensions.observe_value(
"lr",
lambda trainer: trainer.updater.get_optimizer("main").param_groups[0][
"lr"
],
),
trigger=(args.report_interval_iters, "iteration"),
)
report_keys.append("lr")
if args.asr_weight > 0:
if args.mtlalpha > 0:
report_keys.append("main/cer_ctc")
report_keys.append("validation/main/cer_ctc")
if args.mtlalpha < 1:
if args.report_cer:
report_keys.append("validation/main/cer")
if args.report_wer:
report_keys.append("validation/main/wer")
if args.report_bleu:
report_keys.append("main/bleu")
report_keys.append("validation/main/bleu")
trainer.extend(
extensions.PrintReport(report_keys),
trigger=(args.report_interval_iters, "iteration"),
)
trainer.extend(extensions.ProgressBar(update_interval=args.report_interval_iters))
set_early_stop(trainer, args)
if args.tensorboard_dir is not None and args.tensorboard_dir != "":
from torch.utils.tensorboard import SummaryWriter
trainer.extend(
TensorboardLogger(
SummaryWriter(args.tensorboard_dir),
att_reporter=att_reporter,
ctc_reporter=ctc_reporter,
),
trigger=(args.report_interval_iters, "iteration"),
)
# Run the training
trainer.run()
check_early_stop(trainer, args.epochs)
def trans(args):
"""Decode with the given args.
Args:
args (namespace): The program arguments.
"""
set_deterministic_pytorch(args)
model, train_args = load_trained_model(args.model)
assert isinstance(model, STInterface)
model.trans_args = args
# gpu
if args.ngpu == 1:
gpu_id = list(range(args.ngpu))
logging.info("gpu id: " + str(gpu_id))
model.cuda()
# read json data
with open(args.trans_json, "rb") as f:
js = json.load(f)["utts"]
new_js = {}
load_inputs_and_targets = LoadInputsAndTargets(
mode="asr",
load_output=False,
sort_in_input_length=False,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None
else args.preprocess_conf,
preprocess_args={"train": False},
)
if args.batchsize == 0:
with torch.no_grad():
for idx, name in enumerate(js.keys(), 1):
logging.info("(%d/%d) decoding " + name, idx, len(js.keys()))
batch = [(name, js[name])]
feat = load_inputs_and_targets(batch)[0][0]
nbest_hyps = model.translate(
feat,
args,
train_args.char_list,
)
new_js[name] = add_results_to_json(
js[name], nbest_hyps, train_args.char_list
)
else:
def grouper(n, iterable, fillvalue=None):
kargs = [iter(iterable)] * n
return itertools.zip_longest(*kargs, fillvalue=fillvalue)
# sort data if batchsize > 1
keys = list(js.keys())
if args.batchsize > 1:
feat_lens = [js[key]["input"][0]["shape"][0] for key in keys]
sorted_index = sorted(range(len(feat_lens)), key=lambda i: -feat_lens[i])
keys = [keys[i] for i in sorted_index]
with torch.no_grad():
for names in grouper(args.batchsize, keys, None):
names = [name for name in names if name]
batch = [(name, js[name]) for name in names]
feats = load_inputs_and_targets(batch)[0]
nbest_hyps = model.translate_batch(
feats,
args,
train_args.char_list,
)
for i, nbest_hyp in enumerate(nbest_hyps):
name = names[i]
new_js[name] = add_results_to_json(
js[name], nbest_hyp, train_args.char_list
)
with open(args.result_label, "wb") as f:
f.write(
json.dumps(
{"utts": new_js}, indent=4, ensure_ascii=False, sort_keys=True
).encode("utf_8")
)
| 22,845 | 32.795858 | 88 | py |
espnet | espnet-master/espnet/distributed/pytorch_backend/launch.py | #
# SPDX-FileCopyrightText:
# Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""This is a helper module for distributed training.
The code uses an official implementation of
distributed data parallel launcher as just a reference.
https://github.com/pytorch/pytorch/blob/v1.8.2/torch/distributed/launch.py
One main difference is this code focuses on
launching simple function with given arguments.
"""
import multiprocessing
import os
import signal
import socket
import time
if hasattr(signal, "valid_signals"):
_signalno_name_map = {
s.value: s.name for s in signal.valid_signals() if isinstance(s, signal.Signals)
}
else:
# TODO(lazykyama): It should be deprecated
# once Python 3.7 is removed from supported platform.
_signalno_name_map = dict(
[
(1, "SIGHUP"),
(2, "SIGINT"),
(3, "SIGQUIT"),
(4, "SIGILL"),
(5, "SIGTRAP"),
(6, "SIGABRT"),
(7, "SIGBUS"),
(8, "SIGFPE"),
(9, "SIGKILL"),
(10, "SIGUSR1"),
(11, "SIGSEGV"),
(12, "SIGUSR2"),
(13, "SIGPIPE"),
(14, "SIGALRM"),
(15, "SIGTERM"),
(17, "SIGCHLD"),
(18, "SIGCONT"),
(19, "SIGSTOP"),
(20, "SIGTSTP"),
(21, "SIGTTIN"),
(22, "SIGTTOU"),
(23, "SIGURG"),
(24, "SIGXCPU"),
(25, "SIGXFSZ"),
(26, "SIGVTALRM"),
(27, "SIGPROF"),
(28, "SIGWINCH"),
(29, "SIGIO"),
(30, "SIGPWR"),
(31, "SIGSYS"),
(34, "SIGRTMIN"),
(64, "SIGRTMAX"),
]
)
class WorkerError(multiprocessing.ProcessError):
"""An error happened within each worker."""
def __init__(self, *, msg, exitcode, worker_id):
"""Initialize error class."""
super(WorkerError, self).__init__(msg)
self._exitcode = exitcode
self._worker_id = worker_id
def __str__(self):
"""Construct and return a special error message."""
return f"worker[{self._worker_id}] failed with exitcode={self._exitcode}"
@property
def exitcode(self):
"""Return exitcode from worker process."""
return self._exitcode
@property
def worker_id(self):
"""Return worker ID related to a process causes this error."""
return self._worker_id
class MainProcessError(multiprocessing.ProcessError):
"""An error happened from main process."""
def __init__(self, *, signal_no):
"""Initialize error class."""
msg = (
f"{_signalno_name_map[signal_no]} received, "
f"exiting due to {signal.strsignal(signal_no)}."
)
super(MainProcessError, self).__init__(msg)
self._signal_no = signal_no
self._msg = msg
def __str__(self):
"""Return a custom error message."""
return self._msg
@property
def signal_no(self):
"""Return signal number which stops main process."""
return self._signal_no
def set_start_method(method):
"""Set multiprocess start method."""
assert method in ("fork", "spawn", "forkserver")
return multiprocessing.set_start_method(method)
def free_port():
"""Find free port using bind().
There are some interval between finding this port and using it
and the other process might catch the port by that time.
Thus it is not guaranteed that the port is really empty.
"""
# This method is copied from ESPnet v2's utility below.
# https://github.com/espnet/espnet/blob/43ce0c69fb32961235534b348700dc6c74ad5792/espnet2/train/distributed_utils.py#L187-L198
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.bind(("", 0))
return sock.getsockname()[1]
def _kill_processes(processes):
# TODO(lazykyama): This implementation can't stop all processes
# which have grandchildren processes launched
# within each child process directly forked from this script.
# Need improvement for more safe termination.
for p in processes:
try:
# NOTE: multiprocessing.Process.kill() was introduced in 3.7.
# https://docs.python.org/3.7/library/multiprocessing.html#multiprocessing.Process.kill
if not hasattr(p, "kill"):
p.terminate()
else:
p.kill()
except Exception: # noqa: E722
# NOTE: Ignore any exception happens during killing a process
# because this intends to send kill signal to *all* processes.
pass
def launch(func, args, nprocs, master_addr="localhost", master_port=None):
"""Launch processes with a given function and given arguments.
.. note:: Current implementaiton supports only single node case.
"""
if master_port is None:
master_port = free_port()
# Set PyTorch distributed related environmental variables
# NOTE: in contrast to subprocess.Popen,
# explicit environment variables can not be specified.
# It's necessary to add additional variables to
# current environment variable list.
original_env = os.environ.copy()
# TODO(lazykyama): multi-node support
os.environ["WORLD_SIZE"] = str(nprocs)
os.environ["MASTER_ADDR"] = master_addr
os.environ["MASTER_PORT"] = str(master_port)
processes = []
for local_rank in range(nprocs):
# Each process's rank
# TODO(lazykyama): multi-node support
os.environ["RANK"] = str(local_rank)
os.environ["LOCAL_RANK"] = str(local_rank)
process = multiprocessing.Process(target=func, args=(args,))
process.start()
processes.append(process)
# Set signal handler to capture signals sent to main process,
# and ensure that all children processes will be terminated.
def _handler(signal_no, _):
_kill_processes(processes)
raise MainProcessError(signal_no=signal_no)
signal.signal(signal.SIGINT, _handler)
signal.signal(signal.SIGTERM, _handler)
# Recovery environment variables.
os.environ.clear()
os.environ.update(original_env)
# Monitor all workers.
worker_error = None
finished_process_ids = set()
while len(processes) > len(finished_process_ids):
for localrank, p in enumerate(processes):
if p.pid in finished_process_ids:
# Skip rest of checks becuase
# this process has been already finished.
continue
if p.is_alive():
# This process is still running.
continue
elif p.exitcode == 0:
# This process properly finished.
finished_process_ids.add(p.pid)
else:
# An error happens in one process.
# Will try to terminate all other processes.
worker_error = WorkerError(
msg=(f"{func.__name__} failed with error code: {p.exitcode}"),
exitcode=p.exitcode,
worker_id=localrank,
)
break
if worker_error is not None:
# Go out of this while loop to terminate all processes.
break
time.sleep(1.0)
if worker_error is not None:
# Trying to stop all workers.
_kill_processes(processes)
raise worker_error
| 7,551 | 31.551724 | 129 | py |
espnet | espnet-master/espnet/asr/asr_utils.py | # Copyright 2017 Johns Hopkins University (Shinji Watanabe)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import copy
import json
import logging
import os
import shutil
import tempfile
import numpy as np
import torch
# * -------------------- training iterator related -------------------- *
class CompareValueTrigger(object):
"""Trigger invoked when key value getting bigger or lower than before.
Args:
key (str) : Key of value.
compare_fn ((float, float) -> bool) : Function to compare the values.
trigger (tuple(int, str)) : Trigger that decide the comparison interval.
"""
def __init__(self, key, compare_fn, trigger=(1, "epoch")):
from chainer import training
self._key = key
self._best_value = None
self._interval_trigger = training.util.get_trigger(trigger)
self._init_summary()
self._compare_fn = compare_fn
def __call__(self, trainer):
"""Get value related to the key and compare with current value."""
observation = trainer.observation
summary = self._summary
key = self._key
if key in observation:
summary.add({key: observation[key]})
if not self._interval_trigger(trainer):
return False
stats = summary.compute_mean()
value = float(stats[key]) # copy to CPU
self._init_summary()
if self._best_value is None:
# initialize best value
self._best_value = value
return False
elif self._compare_fn(self._best_value, value):
return True
else:
self._best_value = value
return False
def _init_summary(self):
import chainer
self._summary = chainer.reporter.DictSummary()
try:
from chainer.training import extension
except ImportError:
PlotAttentionReport = None
else:
class PlotAttentionReport(extension.Extension):
"""Plot attention reporter.
Args:
att_vis_fn (espnet.nets.*_backend.e2e_asr.E2E.calculate_all_attentions):
Function of attention visualization.
data (list[tuple(str, dict[str, list[Any]])]): List json utt key items.
outdir (str): Directory to save figures.
converter (espnet.asr.*_backend.asr.CustomConverter):
Function to convert data.
device (int | torch.device): Device.
reverse (bool): If True, input and output length are reversed.
ikey (str): Key to access input
(for ASR/ST ikey="input", for MT ikey="output".)
iaxis (int): Dimension to access input
(for ASR/ST iaxis=0, for MT iaxis=1.)
okey (str): Key to access output
(for ASR/ST okey="input", MT okay="output".)
oaxis (int): Dimension to access output
(for ASR/ST oaxis=0, for MT oaxis=0.)
subsampling_factor (int): subsampling factor in encoder
"""
def __init__(
self,
att_vis_fn,
data,
outdir,
converter,
transform,
device,
reverse=False,
ikey="input",
iaxis=0,
okey="output",
oaxis=0,
subsampling_factor=1,
):
self.att_vis_fn = att_vis_fn
self.data = copy.deepcopy(data)
self.data_dict = {k: v for k, v in copy.deepcopy(data)}
# key is utterance ID
self.outdir = outdir
self.converter = converter
self.transform = transform
self.device = device
self.reverse = reverse
self.ikey = ikey
self.iaxis = iaxis
self.okey = okey
self.oaxis = oaxis
self.factor = subsampling_factor
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
def __call__(self, trainer):
"""Plot and save image file of att_ws matrix."""
att_ws, uttid_list = self.get_attention_weights()
if isinstance(att_ws, list): # multi-encoder case
num_encs = len(att_ws) - 1
# atts
for i in range(num_encs):
for idx, att_w in enumerate(att_ws[i]):
filename = "%s/%s.ep.{.updater.epoch}.att%d.png" % (
self.outdir,
uttid_list[idx],
i + 1,
)
att_w = self.trim_attention_weight(uttid_list[idx], att_w)
np_filename = "%s/%s.ep.{.updater.epoch}.att%d.npy" % (
self.outdir,
uttid_list[idx],
i + 1,
)
np.save(np_filename.format(trainer), att_w)
self._plot_and_save_attention(att_w, filename.format(trainer))
# han
for idx, att_w in enumerate(att_ws[num_encs]):
filename = "%s/%s.ep.{.updater.epoch}.han.png" % (
self.outdir,
uttid_list[idx],
)
att_w = self.trim_attention_weight(uttid_list[idx], att_w)
np_filename = "%s/%s.ep.{.updater.epoch}.han.npy" % (
self.outdir,
uttid_list[idx],
)
np.save(np_filename.format(trainer), att_w)
self._plot_and_save_attention(
att_w, filename.format(trainer), han_mode=True
)
else:
for idx, att_w in enumerate(att_ws):
filename = "%s/%s.ep.{.updater.epoch}.png" % (
self.outdir,
uttid_list[idx],
)
att_w = self.trim_attention_weight(uttid_list[idx], att_w)
np_filename = "%s/%s.ep.{.updater.epoch}.npy" % (
self.outdir,
uttid_list[idx],
)
np.save(np_filename.format(trainer), att_w)
self._plot_and_save_attention(att_w, filename.format(trainer))
def log_attentions(self, logger, step):
"""Add image files of att_ws matrix to the tensorboard."""
att_ws, uttid_list = self.get_attention_weights()
if isinstance(att_ws, list): # multi-encoder case
num_encs = len(att_ws) - 1
# atts
for i in range(num_encs):
for idx, att_w in enumerate(att_ws[i]):
att_w = self.trim_attention_weight(uttid_list[idx], att_w)
plot = self.draw_attention_plot(att_w)
logger.add_figure(
"%s_att%d" % (uttid_list[idx], i + 1),
plot.gcf(),
step,
)
# han
for idx, att_w in enumerate(att_ws[num_encs]):
att_w = self.trim_attention_weight(uttid_list[idx], att_w)
plot = self.draw_han_plot(att_w)
logger.add_figure(
"%s_han" % (uttid_list[idx]),
plot.gcf(),
step,
)
else:
for idx, att_w in enumerate(att_ws):
att_w = self.trim_attention_weight(uttid_list[idx], att_w)
plot = self.draw_attention_plot(att_w)
logger.add_figure("%s" % (uttid_list[idx]), plot.gcf(), step)
def get_attention_weights(self):
"""Return attention weights.
Returns:
numpy.ndarray: attention weights. float. Its shape would be
differ from backend.
* pytorch-> 1) multi-head case => (B, H, Lmax, Tmax), 2)
other case => (B, Lmax, Tmax).
* chainer-> (B, Lmax, Tmax)
"""
return_batch, uttid_list = self.transform(self.data, return_uttid=True)
batch = self.converter([return_batch], self.device)
if isinstance(batch, tuple):
att_ws = self.att_vis_fn(*batch)
else:
att_ws = self.att_vis_fn(**batch)
return att_ws, uttid_list
def trim_attention_weight(self, uttid, att_w):
"""Transform attention matrix with regard to self.reverse."""
if self.reverse:
enc_key, enc_axis = self.okey, self.oaxis
dec_key, dec_axis = self.ikey, self.iaxis
else:
enc_key, enc_axis = self.ikey, self.iaxis
dec_key, dec_axis = self.okey, self.oaxis
dec_len = int(self.data_dict[uttid][dec_key][dec_axis]["shape"][0])
enc_len = int(self.data_dict[uttid][enc_key][enc_axis]["shape"][0])
if self.factor > 1:
enc_len //= self.factor
if len(att_w.shape) == 3:
att_w = att_w[:, :dec_len, :enc_len]
else:
att_w = att_w[:dec_len, :enc_len]
return att_w
def draw_attention_plot(self, att_w):
"""Plot the att_w matrix.
Returns:
matplotlib.pyplot: pyplot object with attention matrix image.
"""
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
plt.clf()
att_w = att_w.astype(np.float32)
if len(att_w.shape) == 3:
for h, aw in enumerate(att_w, 1):
plt.subplot(1, len(att_w), h)
plt.imshow(aw, aspect="auto")
plt.xlabel("Encoder Index")
plt.ylabel("Decoder Index")
else:
plt.imshow(att_w, aspect="auto")
plt.xlabel("Encoder Index")
plt.ylabel("Decoder Index")
plt.tight_layout()
return plt
def draw_han_plot(self, att_w):
"""Plot the att_w matrix for hierarchical attention.
Returns:
matplotlib.pyplot: pyplot object with attention matrix image.
"""
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
plt.clf()
if len(att_w.shape) == 3:
for h, aw in enumerate(att_w, 1):
legends = []
plt.subplot(1, len(att_w), h)
for i in range(aw.shape[1]):
plt.plot(aw[:, i])
legends.append("Att{}".format(i))
plt.ylim([0, 1.0])
plt.xlim([0, aw.shape[0]])
plt.grid(True)
plt.ylabel("Attention Weight")
plt.xlabel("Decoder Index")
plt.legend(legends)
else:
legends = []
for i in range(att_w.shape[1]):
plt.plot(att_w[:, i])
legends.append("Att{}".format(i))
plt.ylim([0, 1.0])
plt.xlim([0, att_w.shape[0]])
plt.grid(True)
plt.ylabel("Attention Weight")
plt.xlabel("Decoder Index")
plt.legend(legends)
plt.tight_layout()
return plt
def _plot_and_save_attention(self, att_w, filename, han_mode=False):
if han_mode:
plt = self.draw_han_plot(att_w)
else:
plt = self.draw_attention_plot(att_w)
plt.savefig(filename)
plt.close()
try:
from chainer.training import extension
except ImportError:
PlotCTCReport = None
else:
class PlotCTCReport(extension.Extension):
"""Plot CTC reporter.
Args:
ctc_vis_fn (espnet.nets.*_backend.e2e_asr.E2E.calculate_all_ctc_probs):
Function of CTC visualization.
data (list[tuple(str, dict[str, list[Any]])]): List json utt key items.
outdir (str): Directory to save figures.
converter (espnet.asr.*_backend.asr.CustomConverter):
Function to convert data.
device (int | torch.device): Device.
reverse (bool): If True, input and output length are reversed.
ikey (str): Key to access input
(for ASR/ST ikey="input", for MT ikey="output".)
iaxis (int): Dimension to access input
(for ASR/ST iaxis=0, for MT iaxis=1.)
okey (str): Key to access output
(for ASR/ST okey="input", MT okay="output".)
oaxis (int): Dimension to access output
(for ASR/ST oaxis=0, for MT oaxis=0.)
subsampling_factor (int): subsampling factor in encoder
"""
def __init__(
self,
ctc_vis_fn,
data,
outdir,
converter,
transform,
device,
reverse=False,
ikey="input",
iaxis=0,
okey="output",
oaxis=0,
subsampling_factor=1,
):
self.ctc_vis_fn = ctc_vis_fn
self.data = copy.deepcopy(data)
self.data_dict = {k: v for k, v in copy.deepcopy(data)}
# key is utterance ID
self.outdir = outdir
self.converter = converter
self.transform = transform
self.device = device
self.reverse = reverse
self.ikey = ikey
self.iaxis = iaxis
self.okey = okey
self.oaxis = oaxis
self.factor = subsampling_factor
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
def __call__(self, trainer):
"""Plot and save image file of ctc prob."""
ctc_probs, uttid_list = self.get_ctc_probs()
if isinstance(ctc_probs, list): # multi-encoder case
num_encs = len(ctc_probs) - 1
for i in range(num_encs):
for idx, ctc_prob in enumerate(ctc_probs[i]):
filename = "%s/%s.ep.{.updater.epoch}.ctc%d.png" % (
self.outdir,
uttid_list[idx],
i + 1,
)
ctc_prob = self.trim_ctc_prob(uttid_list[idx], ctc_prob)
np_filename = "%s/%s.ep.{.updater.epoch}.ctc%d.npy" % (
self.outdir,
uttid_list[idx],
i + 1,
)
np.save(np_filename.format(trainer), ctc_prob)
self._plot_and_save_ctc(ctc_prob, filename.format(trainer))
else:
for idx, ctc_prob in enumerate(ctc_probs):
filename = "%s/%s.ep.{.updater.epoch}.png" % (
self.outdir,
uttid_list[idx],
)
ctc_prob = self.trim_ctc_prob(uttid_list[idx], ctc_prob)
np_filename = "%s/%s.ep.{.updater.epoch}.npy" % (
self.outdir,
uttid_list[idx],
)
np.save(np_filename.format(trainer), ctc_prob)
self._plot_and_save_ctc(ctc_prob, filename.format(trainer))
def log_ctc_probs(self, logger, step):
"""Add image files of ctc probs to the tensorboard."""
ctc_probs, uttid_list = self.get_ctc_probs()
if isinstance(ctc_probs, list): # multi-encoder case
num_encs = len(ctc_probs) - 1
for i in range(num_encs):
for idx, ctc_prob in enumerate(ctc_probs[i]):
ctc_prob = self.trim_ctc_prob(uttid_list[idx], ctc_prob)
plot = self.draw_ctc_plot(ctc_prob)
logger.add_figure(
"%s_ctc%d" % (uttid_list[idx], i + 1),
plot.gcf(),
step,
)
else:
for idx, ctc_prob in enumerate(ctc_probs):
ctc_prob = self.trim_ctc_prob(uttid_list[idx], ctc_prob)
plot = self.draw_ctc_plot(ctc_prob)
logger.add_figure("%s" % (uttid_list[idx]), plot.gcf(), step)
def get_ctc_probs(self):
"""Return CTC probs.
Returns:
numpy.ndarray: CTC probs. float. Its shape would be
differ from backend. (B, Tmax, vocab).
"""
return_batch, uttid_list = self.transform(self.data, return_uttid=True)
batch = self.converter([return_batch], self.device)
if isinstance(batch, tuple):
probs = self.ctc_vis_fn(*batch)
else:
probs = self.ctc_vis_fn(**batch)
return probs, uttid_list
def trim_ctc_prob(self, uttid, prob):
"""Trim CTC posteriors accoding to input lengths."""
enc_len = int(self.data_dict[uttid][self.ikey][self.iaxis]["shape"][0])
if self.factor > 1:
enc_len //= self.factor
prob = prob[:enc_len]
return prob
def draw_ctc_plot(self, ctc_prob):
"""Plot the ctc_prob matrix.
Returns:
matplotlib.pyplot: pyplot object with CTC prob matrix image.
"""
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
ctc_prob = ctc_prob.astype(np.float32)
plt.clf()
topk_ids = np.argsort(ctc_prob, axis=1)
n_frames, vocab = ctc_prob.shape
times_probs = np.arange(n_frames)
plt.figure(figsize=(20, 8))
# NOTE: index 0 is reserved for blank
for idx in set(topk_ids.reshape(-1).tolist()):
if idx == 0:
plt.plot(
times_probs, ctc_prob[:, 0], ":", label="<blank>", color="grey"
)
else:
plt.plot(times_probs, ctc_prob[:, idx])
plt.xlabel("Input [frame]", fontsize=12)
plt.ylabel("Posteriors", fontsize=12)
plt.xticks(list(range(0, int(n_frames) + 1, 10)))
plt.yticks(list(range(0, 2, 1)))
plt.tight_layout()
return plt
def _plot_and_save_ctc(self, ctc_prob, filename):
plt = self.draw_ctc_plot(ctc_prob)
plt.savefig(filename)
plt.close()
def restore_snapshot(model, snapshot, load_fn=None):
"""Extension to restore snapshot.
Returns:
An extension function.
"""
import chainer
from chainer import training
if load_fn is None:
load_fn = chainer.serializers.load_npz
@training.make_extension(trigger=(1, "epoch"))
def restore_snapshot(trainer):
_restore_snapshot(model, snapshot, load_fn)
return restore_snapshot
def _restore_snapshot(model, snapshot, load_fn=None):
if load_fn is None:
import chainer
load_fn = chainer.serializers.load_npz
load_fn(snapshot, model)
logging.info("restored from " + str(snapshot))
def adadelta_eps_decay(eps_decay):
"""Extension to perform adadelta eps decay.
Args:
eps_decay (float): Decay rate of eps.
Returns:
An extension function.
"""
from chainer import training
@training.make_extension(trigger=(1, "epoch"))
def adadelta_eps_decay(trainer):
_adadelta_eps_decay(trainer, eps_decay)
return adadelta_eps_decay
def _adadelta_eps_decay(trainer, eps_decay):
optimizer = trainer.updater.get_optimizer("main")
# for chainer
if hasattr(optimizer, "eps"):
current_eps = optimizer.eps
setattr(optimizer, "eps", current_eps * eps_decay)
logging.info("adadelta eps decayed to " + str(optimizer.eps))
# pytorch
else:
for p in optimizer.param_groups:
p["eps"] *= eps_decay
logging.info("adadelta eps decayed to " + str(p["eps"]))
def adam_lr_decay(eps_decay):
"""Extension to perform adam lr decay.
Args:
eps_decay (float): Decay rate of lr.
Returns:
An extension function.
"""
from chainer import training
@training.make_extension(trigger=(1, "epoch"))
def adam_lr_decay(trainer):
_adam_lr_decay(trainer, eps_decay)
return adam_lr_decay
def _adam_lr_decay(trainer, eps_decay):
optimizer = trainer.updater.get_optimizer("main")
# for chainer
if hasattr(optimizer, "lr"):
current_lr = optimizer.lr
setattr(optimizer, "lr", current_lr * eps_decay)
logging.info("adam lr decayed to " + str(optimizer.lr))
# pytorch
else:
for p in optimizer.param_groups:
p["lr"] *= eps_decay
logging.info("adam lr decayed to " + str(p["lr"]))
def torch_snapshot(savefun=torch.save, filename="snapshot.ep.{.updater.epoch}"):
"""Extension to take snapshot of the trainer for pytorch.
Returns:
An extension function.
"""
from chainer.training import extension
@extension.make_extension(trigger=(1, "epoch"), priority=-100)
def torch_snapshot(trainer):
_torch_snapshot_object(trainer, trainer, filename.format(trainer), savefun)
return torch_snapshot
def _torch_snapshot_object(trainer, target, filename, savefun):
from chainer.serializers import DictionarySerializer
# make snapshot_dict dictionary
s = DictionarySerializer()
s.save(trainer)
if hasattr(trainer.updater.model, "model"):
# (for TTS)
if hasattr(trainer.updater.model.model, "module"):
model_state_dict = trainer.updater.model.model.module.state_dict()
else:
model_state_dict = trainer.updater.model.model.state_dict()
else:
# (for ASR)
if hasattr(trainer.updater.model, "module"):
model_state_dict = trainer.updater.model.module.state_dict()
else:
model_state_dict = trainer.updater.model.state_dict()
snapshot_dict = {
"trainer": s.target,
"model": model_state_dict,
"optimizer": trainer.updater.get_optimizer("main").state_dict(),
}
# save snapshot dictionary
fn = filename.format(trainer)
prefix = "tmp" + fn
tmpdir = tempfile.mkdtemp(prefix=prefix, dir=trainer.out)
tmppath = os.path.join(tmpdir, fn)
try:
savefun(snapshot_dict, tmppath)
shutil.move(tmppath, os.path.join(trainer.out, fn))
finally:
shutil.rmtree(tmpdir)
def add_gradient_noise(model, iteration, duration=100, eta=1.0, scale_factor=0.55):
"""Adds noise from a standard normal distribution to the gradients.
The standard deviation (`sigma`) is controlled by the three hyper-parameters below.
`sigma` goes to zero (no noise) with more iterations.
Args:
model (torch.nn.model): Model.
iteration (int): Number of iterations.
duration (int) {100, 1000}:
Number of durations to control the interval of the `sigma` change.
eta (float) {0.01, 0.3, 1.0}: The magnitude of `sigma`.
scale_factor (float) {0.55}: The scale of `sigma`.
"""
interval = (iteration // duration) + 1
sigma = eta / interval**scale_factor
for param in model.parameters():
if param.grad is not None:
_shape = param.grad.size()
noise = sigma * torch.randn(_shape).to(param.device)
param.grad += noise
# * -------------------- general -------------------- *
def get_model_conf(model_path, conf_path=None):
"""Get model config information by reading a model config file (model.json).
Args:
model_path (str): Model path.
conf_path (str): Optional model config path.
Returns:
list[int, int, dict[str, Any]]: Config information loaded from json file.
"""
if conf_path is None:
model_conf = os.path.dirname(model_path) + "/model.json"
else:
model_conf = conf_path
with open(model_conf, "rb") as f:
logging.info("reading a config file from " + model_conf)
confs = json.load(f)
if isinstance(confs, dict):
# for lm
args = confs
return argparse.Namespace(**args)
else:
# for asr, tts, mt
idim, odim, args = confs
return idim, odim, argparse.Namespace(**args)
def chainer_load(path, model):
"""Load chainer model parameters.
Args:
path (str): Model path or snapshot file path to be loaded.
model (chainer.Chain): Chainer model.
"""
import chainer
if "snapshot" in os.path.basename(path):
chainer.serializers.load_npz(path, model, path="updater/model:main/")
else:
chainer.serializers.load_npz(path, model)
def torch_save(path, model):
"""Save torch model states.
Args:
path (str): Model path to be saved.
model (torch.nn.Module): Torch model.
"""
if hasattr(model, "module"):
torch.save(model.module.state_dict(), path)
else:
torch.save(model.state_dict(), path)
def snapshot_object(target, filename):
"""Returns a trainer extension to take snapshots of a given object.
Args:
target (model): Object to serialize.
filename (str): Name of the file into which the object is serialized.It can
be a format string, where the trainer object is passed to
the :meth: `str.format` method. For example,
``'snapshot_{.updater.iteration}'`` is converted to
``'snapshot_10000'`` at the 10,000th iteration.
Returns:
An extension function.
"""
from chainer.training import extension
@extension.make_extension(trigger=(1, "epoch"), priority=-100)
def snapshot_object(trainer):
torch_save(os.path.join(trainer.out, filename.format(trainer)), target)
return snapshot_object
def torch_load(path, model):
"""Load torch model states.
Args:
path (str): Model path or snapshot file path to be loaded.
model (torch.nn.Module): Torch model.
"""
if "snapshot" in os.path.basename(path):
model_state_dict = torch.load(path, map_location=lambda storage, loc: storage)[
"model"
]
else:
model_state_dict = torch.load(path, map_location=lambda storage, loc: storage)
if hasattr(model, "module"):
model.module.load_state_dict(model_state_dict)
else:
model.load_state_dict(model_state_dict)
del model_state_dict
def torch_resume(snapshot_path, trainer):
"""Resume from snapshot for pytorch.
Args:
snapshot_path (str): Snapshot file path.
trainer (chainer.training.Trainer): Chainer's trainer instance.
"""
from chainer.serializers import NpzDeserializer
# load snapshot
snapshot_dict = torch.load(snapshot_path, map_location=lambda storage, loc: storage)
# restore trainer states
d = NpzDeserializer(snapshot_dict["trainer"])
d.load(trainer)
# restore model states
if hasattr(trainer.updater.model, "model"):
# (for TTS model)
if hasattr(trainer.updater.model.model, "module"):
trainer.updater.model.model.module.load_state_dict(snapshot_dict["model"])
else:
trainer.updater.model.model.load_state_dict(snapshot_dict["model"])
else:
# (for ASR model)
if hasattr(trainer.updater.model, "module"):
trainer.updater.model.module.load_state_dict(snapshot_dict["model"])
else:
trainer.updater.model.load_state_dict(snapshot_dict["model"])
# retore optimizer states
trainer.updater.get_optimizer("main").load_state_dict(snapshot_dict["optimizer"])
# delete opened snapshot
del snapshot_dict
# * ------------------ recognition related ------------------ *
def parse_hypothesis(hyp, char_list):
"""Parse hypothesis.
Args:
hyp (list[dict[str, Any]]): Recognition hypothesis.
char_list (list[str]): List of characters.
Returns:
tuple(str, str, str, float)
"""
# remove sos and get results
tokenid_as_list = list(map(int, hyp["yseq"][1:]))
token_as_list = [char_list[idx] for idx in tokenid_as_list]
score = float(hyp["score"])
# convert to string
tokenid = " ".join([str(idx) for idx in tokenid_as_list])
token = " ".join(token_as_list)
text = "".join(token_as_list).replace("<space>", " ")
return text, token, tokenid, score
def add_results_to_json(js, nbest_hyps, char_list):
"""Add N-best results to json.
Args:
js (dict[str, Any]): Groundtruth utterance dict.
nbest_hyps_sd (list[dict[str, Any]]):
List of hypothesis for multi_speakers: nutts x nspkrs.
char_list (list[str]): List of characters.
Returns:
dict[str, Any]: N-best results added utterance dict.
"""
# copy old json info
new_js = dict()
new_js["utt2spk"] = js["utt2spk"]
new_js["output"] = []
for n, hyp in enumerate(nbest_hyps, 1):
# parse hypothesis
rec_text, rec_token, rec_tokenid, score = parse_hypothesis(hyp, char_list)
# copy ground-truth
if len(js["output"]) > 0:
out_dic = dict(js["output"][0].items())
else:
# for no reference case (e.g., speech translation)
out_dic = {"name": ""}
# update name
out_dic["name"] += "[%d]" % n
# add recognition results
out_dic["rec_text"] = rec_text
out_dic["rec_token"] = rec_token
out_dic["rec_tokenid"] = rec_tokenid
out_dic["score"] = score
# add to list of N-best result dicts
new_js["output"].append(out_dic)
# show 1-best result
if n == 1:
if "text" in out_dic.keys():
logging.info("groundtruth: %s" % out_dic["text"])
logging.info("prediction : %s" % out_dic["rec_text"])
return new_js
def plot_spectrogram(
plt,
spec,
mode="db",
fs=None,
frame_shift=None,
bottom=True,
left=True,
right=True,
top=False,
labelbottom=True,
labelleft=True,
labelright=True,
labeltop=False,
cmap="inferno",
):
"""Plot spectrogram using matplotlib.
Args:
plt (matplotlib.pyplot): pyplot object.
spec (numpy.ndarray): Input stft (Freq, Time)
mode (str): db or linear.
fs (int): Sample frequency. To convert y-axis to kHz unit.
frame_shift (int): The frame shift of stft. To convert x-axis to second unit.
bottom (bool):Whether to draw the respective ticks.
left (bool):
right (bool):
top (bool):
labelbottom (bool):Whether to draw the respective tick labels.
labelleft (bool):
labelright (bool):
labeltop (bool):
cmap (str): Colormap defined in matplotlib.
"""
spec = np.abs(spec)
if mode == "db":
x = 20 * np.log10(spec + np.finfo(spec.dtype).eps)
elif mode == "linear":
x = spec
else:
raise ValueError(mode)
if fs is not None:
ytop = fs / 2000
ylabel = "kHz"
else:
ytop = x.shape[0]
ylabel = "bin"
if frame_shift is not None and fs is not None:
xtop = x.shape[1] * frame_shift / fs
xlabel = "s"
else:
xtop = x.shape[1]
xlabel = "frame"
extent = (0, xtop, 0, ytop)
plt.imshow(x[::-1], cmap=cmap, extent=extent)
if labelbottom:
plt.xlabel("time [{}]".format(xlabel))
if labelleft:
plt.ylabel("freq [{}]".format(ylabel))
plt.colorbar().set_label("{}".format(mode))
plt.tick_params(
bottom=bottom,
left=left,
right=right,
top=top,
labelbottom=labelbottom,
labelleft=labelleft,
labelright=labelright,
labeltop=labeltop,
)
plt.axis("auto")
# * ------------------ recognition related ------------------ *
def format_mulenc_args(args):
"""Format args for multi-encoder setup.
It deals with following situations: (when args.num_encs=2):
1. args.elayers = None -> args.elayers = [4, 4];
2. args.elayers = 4 -> args.elayers = [4, 4];
3. args.elayers = [4, 4, 4] -> args.elayers = [4, 4].
"""
# default values when None is assigned.
default_dict = {
"etype": "blstmp",
"elayers": 4,
"eunits": 300,
"subsample": "1",
"dropout_rate": 0.0,
"atype": "dot",
"adim": 320,
"awin": 5,
"aheads": 4,
"aconv_chans": -1,
"aconv_filts": 100,
}
for k in default_dict.keys():
if isinstance(vars(args)[k], list):
if len(vars(args)[k]) != args.num_encs:
logging.warning(
"Length mismatch {}: Convert {} to {}.".format(
k, vars(args)[k], vars(args)[k][: args.num_encs]
)
)
vars(args)[k] = vars(args)[k][: args.num_encs]
else:
if not vars(args)[k]:
# assign default value if it is None
vars(args)[k] = default_dict[k]
logging.warning(
"{} is not specified, use default value {}.".format(
k, default_dict[k]
)
)
# duplicate
logging.warning(
"Type mismatch {}: Convert {} to {}.".format(
k, vars(args)[k], [vars(args)[k] for _ in range(args.num_encs)]
)
)
vars(args)[k] = [vars(args)[k] for _ in range(args.num_encs)]
return args
| 34,453 | 32.646484 | 88 | py |
espnet | espnet-master/espnet/asr/asr_mix_utils.py | #!/usr/bin/env python3
"""
This script is used to provide utility functions designed for multi-speaker ASR.
Copyright 2017 Johns Hopkins University (Shinji Watanabe)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
Most functions can be directly used as in asr_utils.py:
CompareValueTrigger, restore_snapshot, adadelta_eps_decay, chainer_load,
torch_snapshot, torch_save, torch_resume, AttributeDict, get_model_conf.
"""
import copy
import logging
import os
from chainer.training import extension
from espnet.asr.asr_utils import parse_hypothesis
# * -------------------- chainer extension related -------------------- *
class PlotAttentionReport(extension.Extension):
"""Plot attention reporter.
Args:
att_vis_fn (espnet.nets.*_backend.e2e_asr.calculate_all_attentions):
Function of attention visualization.
data (list[tuple(str, dict[str, dict[str, Any]])]): List json utt key items.
outdir (str): Directory to save figures.
converter (espnet.asr.*_backend.asr.CustomConverter):
CustomConverter object. Function to convert data.
device (torch.device): The destination device to send tensor.
reverse (bool): If True, input and output length are reversed.
"""
def __init__(self, att_vis_fn, data, outdir, converter, device, reverse=False):
"""Initialize PlotAttentionReport."""
self.att_vis_fn = att_vis_fn
self.data = copy.deepcopy(data)
self.outdir = outdir
self.converter = converter
self.device = device
self.reverse = reverse
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
def __call__(self, trainer):
"""Plot and save imaged matrix of att_ws."""
att_ws_sd = self.get_attention_weights()
for ns, att_ws in enumerate(att_ws_sd):
for idx, att_w in enumerate(att_ws):
filename = "%s/%s.ep.{.updater.epoch}.output%d.png" % (
self.outdir,
self.data[idx][0],
ns + 1,
)
att_w = self.get_attention_weight(idx, att_w, ns)
self._plot_and_save_attention(att_w, filename.format(trainer))
def log_attentions(self, logger, step):
"""Add image files of attention matrix to tensorboard."""
att_ws_sd = self.get_attention_weights()
for ns, att_ws in enumerate(att_ws_sd):
for idx, att_w in enumerate(att_ws):
att_w = self.get_attention_weight(idx, att_w, ns)
plot = self.draw_attention_plot(att_w)
logger.add_figure("%s" % (self.data[idx][0]), plot.gcf(), step)
plot.clf()
def get_attention_weights(self):
"""Return attention weights.
Returns:
arr_ws_sd (numpy.ndarray): attention weights. It's shape would be
differ from bachend.dtype=float
* pytorch-> 1) multi-head case => (B, H, Lmax, Tmax). 2)
other case => (B, Lmax, Tmax).
* chainer-> attention weights (B, Lmax, Tmax).
"""
batch = self.converter([self.converter.transform(self.data)], self.device)
att_ws_sd = self.att_vis_fn(*batch)
return att_ws_sd
def get_attention_weight(self, idx, att_w, spkr_idx):
"""Transform attention weight in regard to self.reverse."""
if self.reverse:
dec_len = int(self.data[idx][1]["input"][0]["shape"][0])
enc_len = int(self.data[idx][1]["output"][spkr_idx]["shape"][0])
else:
dec_len = int(self.data[idx][1]["output"][spkr_idx]["shape"][0])
enc_len = int(self.data[idx][1]["input"][0]["shape"][0])
if len(att_w.shape) == 3:
att_w = att_w[:, :dec_len, :enc_len]
else:
att_w = att_w[:dec_len, :enc_len]
return att_w
def draw_attention_plot(self, att_w):
"""Visualize attention weights matrix.
Args:
att_w(Tensor): Attention weight matrix.
Returns:
matplotlib.pyplot: pyplot object with attention matrix image.
"""
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
if len(att_w.shape) == 3:
for h, aw in enumerate(att_w, 1):
plt.subplot(1, len(att_w), h)
plt.imshow(aw, aspect="auto")
plt.xlabel("Encoder Index")
plt.ylabel("Decoder Index")
else:
plt.imshow(att_w, aspect="auto")
plt.xlabel("Encoder Index")
plt.ylabel("Decoder Index")
plt.tight_layout()
return plt
def _plot_and_save_attention(self, att_w, filename):
plt = self.draw_attention_plot(att_w)
plt.savefig(filename)
plt.close()
def add_results_to_json(js, nbest_hyps_sd, char_list):
"""Add N-best results to json.
Args:
js (dict[str, Any]): Groundtruth utterance dict.
nbest_hyps_sd (list[dict[str, Any]]):
List of hypothesis for multi_speakers (# Utts x # Spkrs).
char_list (list[str]): List of characters.
Returns:
dict[str, Any]: N-best results added utterance dict.
"""
# copy old json info
new_js = dict()
new_js["utt2spk"] = js["utt2spk"]
num_spkrs = len(nbest_hyps_sd)
new_js["output"] = []
for ns in range(num_spkrs):
tmp_js = []
nbest_hyps = nbest_hyps_sd[ns]
for n, hyp in enumerate(nbest_hyps, 1):
# parse hypothesis
rec_text, rec_token, rec_tokenid, score = parse_hypothesis(hyp, char_list)
# copy ground-truth
out_dic = dict(js["output"][ns].items())
# update name
out_dic["name"] += "[%d]" % n
# add recognition results
out_dic["rec_text"] = rec_text
out_dic["rec_token"] = rec_token
out_dic["rec_tokenid"] = rec_tokenid
out_dic["score"] = score
# add to list of N-best result dicts
tmp_js.append(out_dic)
# show 1-best result
if n == 1:
logging.info("groundtruth: %s" % out_dic["text"])
logging.info("prediction : %s" % out_dic["rec_text"])
new_js["output"].append(tmp_js)
return new_js
| 6,407 | 33.451613 | 86 | py |
espnet | espnet-master/espnet/asr/pytorch_backend/asr_init.py | """Finetuning methods."""
import logging
import os
import re
from collections import OrderedDict
import torch
from espnet.asr.asr_utils import get_model_conf, torch_load
from espnet.nets.asr_interface import ASRInterface
from espnet.nets.mt_interface import MTInterface
from espnet.nets.pytorch_backend.transducer.utils import custom_torch_load
from espnet.nets.tts_interface import TTSInterface
from espnet.utils.dynamic_import import dynamic_import
def freeze_modules(model, modules):
"""Freeze model parameters according to modules list.
Args:
model (torch.nn.Module): Main model.
modules (List): Specified module(s) to freeze.
Return:
model (torch.nn.Module) : Updated main model.
model_params (filter): Filtered model parameters.
"""
for mod, param in model.named_parameters():
if any(mod.startswith(m) for m in modules):
logging.warning(f"Freezing {mod}. It will not be updated during training.")
param.requires_grad = False
model_params = filter(lambda x: x.requires_grad, model.parameters())
return model, model_params
def transfer_verification(model_state_dict, partial_state_dict, modules):
"""Verify tuples (key, shape) for input model modules match specified modules.
Args:
model_state_dict (Dict) : Main model state dict.
partial_state_dict (Dict): Pre-trained model state dict.
modules (List): Specified module(s) to transfer.
Return:
(bool): Whether transfer learning is allowed.
"""
model_modules = []
partial_modules = []
for key_m, value_m in model_state_dict.items():
if any(key_m.startswith(m) for m in modules):
model_modules += [(key_m, value_m.shape)]
model_modules = sorted(model_modules, key=lambda x: (x[0], x[1]))
for key_p, value_p in partial_state_dict.items():
if any(key_p.startswith(m) for m in modules):
partial_modules += [(key_p, value_p.shape)]
partial_modules = sorted(partial_modules, key=lambda x: (x[0], x[1]))
module_match = model_modules == partial_modules
if not module_match:
logging.error(
"Some specified modules from the pre-trained model "
"don't match with the new model modules:"
)
logging.error(f"Pre-trained: {set(partial_modules) - set(model_modules)}")
logging.error(f"New model: {set(model_modules) - set(partial_modules)}")
exit(1)
return module_match
def get_partial_state_dict(model_state_dict, modules):
"""Create state dict with specified modules matching input model modules.
Args:
model_state_dict (Dict): Pre-trained model state dict.
modules (Dict): Specified module(s) to transfer.
Return:
new_state_dict (Dict): State dict with specified modules weights.
"""
new_state_dict = OrderedDict()
for key, value in model_state_dict.items():
if any(key.startswith(m) for m in modules):
new_state_dict[key] = value
return new_state_dict
def get_lm_state_dict(lm_state_dict):
"""Create compatible ASR decoder state dict from LM state dict.
Args:
lm_state_dict (Dict): Pre-trained LM state dict.
Return:
new_state_dict (Dict): State dict with compatible key names.
"""
new_state_dict = OrderedDict()
for key, value in list(lm_state_dict.items()):
if key == "predictor.embed.weight":
new_state_dict["dec.embed.weight"] = value
elif key.startswith("predictor.rnn."):
_split = key.split(".")
new_key = "dec.decoder." + _split[2] + "." + _split[3] + "_l0"
new_state_dict[new_key] = value
return new_state_dict
def filter_modules(model_state_dict, modules):
"""Filter non-matched modules in model state dict.
Args:
model_state_dict (Dict): Pre-trained model state dict.
modules (List): Specified module(s) to transfer.
Return:
new_mods (List): Filtered module list.
"""
new_mods = []
incorrect_mods = []
mods_model = list(model_state_dict.keys())
for mod in modules:
if any(key.startswith(mod) for key in mods_model):
new_mods += [mod]
else:
incorrect_mods += [mod]
if incorrect_mods:
logging.error(
"Specified module(s) don't match or (partially match) "
f"available modules in model. You specified: {incorrect_mods}."
)
logging.error("The existing modules in model are:")
logging.error(f"{mods_model}")
exit(1)
return new_mods
def create_transducer_compatible_state_dict(
model_state_dict, encoder_type, encoder_units
):
"""Create a compatible transducer model state dict for transfer learning.
If RNN encoder modules from a non-Transducer model are found in
the pre-trained model state dict, the corresponding modules keys are
renamed for compatibility.
Args:
model_state_dict (Dict): Pre-trained model state dict
encoder_type (str): Type of pre-trained encoder.
encoder_units (int): Number of encoder units in pre-trained model.
Returns:
new_state_dict (Dict): Transducer compatible pre-trained model state dict.
"""
if encoder_type.endswith("p") or not encoder_type.endswith(("lstm", "gru")):
return model_state_dict
new_state_dict = OrderedDict()
rnn_key_name = "birnn" if "b" in encoder_type else "rnn"
for key, value in list(model_state_dict.items()):
if any(k in key for k in ["l_last", "nbrnn"]):
if "nbrnn" in key:
layer_name = rnn_key_name + re.search("_l([0-9]+)", key).group(1)
key = re.sub(
"_l([0-9]+)",
"_l0",
key.replace("nbrnn", layer_name),
)
if (encoder_units * 2) == value.size(-1):
value = value[:, :encoder_units] + value[:, encoder_units:]
new_state_dict[key] = value
return new_state_dict
def load_trained_model(model_path, training=True):
"""Load the trained model for recognition.
Args:
model_path (str): Path to model.***.best
training (bool): Training mode specification for transducer model.
Returns:
model (torch.nn.Module): Trained model.
train_args (Namespace): Trained model arguments.
"""
idim, odim, train_args = get_model_conf(
model_path, os.path.join(os.path.dirname(model_path), "model.json")
)
logging.info(f"Reading model parameters from {model_path}")
if hasattr(train_args, "model_module"):
model_module = train_args.model_module
else:
model_module = "espnet.nets.pytorch_backend.e2e_asr:E2E"
# CTC Loss is not needed, default to builtin to prevent import errors
if hasattr(train_args, "ctc_type"):
train_args.ctc_type = "builtin"
model_class = dynamic_import(model_module)
if "transducer" in model_module:
model = model_class(idim, odim, train_args, training=training)
custom_torch_load(model_path, model, training=training)
else:
model = model_class(idim, odim, train_args)
torch_load(model_path, model)
return model, train_args
def get_trained_model_state_dict(model_path, new_is_transducer):
"""Extract the trained model state dict for pre-initialization.
Args:
model_path (str): Path to trained model.
new_is_transducer (bool): Whether the new model is Transducer-based.
Return:
(Dict): Trained model state dict.
"""
logging.info(f"Reading model parameters from {model_path}")
conf_path = os.path.join(os.path.dirname(model_path), "model.json")
if "rnnlm" in model_path:
return get_lm_state_dict(torch.load(model_path))
idim, odim, args = get_model_conf(model_path, conf_path)
if hasattr(args, "model_module"):
model_module = args.model_module
else:
model_module = "espnet.nets.pytorch_backend.e2e_asr:E2E"
model_class = dynamic_import(model_module)
model = model_class(idim, odim, args)
torch_load(model_path, model)
assert (
isinstance(model, MTInterface)
or isinstance(model, ASRInterface)
or isinstance(model, TTSInterface)
)
if new_is_transducer and "transducer" not in args.model_module:
return create_transducer_compatible_state_dict(
model.state_dict(),
args.etype,
args.eunits,
)
return model.state_dict()
def load_trained_modules(idim, odim, args, interface=ASRInterface):
"""Load ASR/MT/TTS model with pre-trained weights for specified modules.
Args:
idim (int): Input dimension.
odim (int): Output dimension.
args Namespace: Model arguments.
interface (ASRInterface|MTInterface|TTSInterface): Model interface.
Return:
main_model (torch.nn.Module): Model with pre-initialized weights.
"""
def print_new_keys(state_dict, modules, model_path):
logging.info(f"Loading {modules} from model: {model_path}")
for k in state_dict.keys():
logging.warning(f"Overriding module {k}")
enc_model_path = args.enc_init
dec_model_path = args.dec_init
enc_modules = args.enc_init_mods
dec_modules = args.dec_init_mods
model_class = dynamic_import(args.model_module)
main_model = model_class(idim, odim, args)
assert isinstance(main_model, interface)
main_state_dict = main_model.state_dict()
logging.warning("Model(s) found for pre-initialization.")
for model_path, modules in [
(enc_model_path, enc_modules),
(dec_model_path, dec_modules),
]:
if model_path is not None:
if os.path.isfile(model_path):
model_state_dict = get_trained_model_state_dict(
model_path, "transducer" in args.model_module
)
modules = filter_modules(model_state_dict, modules)
partial_state_dict = get_partial_state_dict(model_state_dict, modules)
if partial_state_dict:
if transfer_verification(
main_state_dict, partial_state_dict, modules
):
print_new_keys(partial_state_dict, modules, model_path)
main_state_dict.update(partial_state_dict)
else:
logging.error(f"Specified model was not found: {model_path}")
exit(1)
main_model.load_state_dict(main_state_dict)
return main_model
| 10,707 | 30.40176 | 87 | py |
espnet | espnet-master/espnet/asr/pytorch_backend/recog.py | """V2 backend for `asr_recog.py` using py:class:`espnet.nets.beam_search.BeamSearch`."""
import json
import logging
import torch
from packaging.version import parse as V
from espnet.asr.asr_utils import add_results_to_json, get_model_conf, torch_load
from espnet.asr.pytorch_backend.asr import load_trained_model
from espnet.nets.asr_interface import ASRInterface
from espnet.nets.batch_beam_search import BatchBeamSearch
from espnet.nets.beam_search import BeamSearch
from espnet.nets.lm_interface import dynamic_import_lm
from espnet.nets.scorer_interface import BatchScorerInterface
from espnet.nets.scorers.length_bonus import LengthBonus
from espnet.utils.deterministic_utils import set_deterministic_pytorch
from espnet.utils.io_utils import LoadInputsAndTargets
def recog_v2(args):
"""Decode with custom models that implements ScorerInterface.
Notes:
The previous backend espnet.asr.pytorch_backend.asr.recog
only supports E2E and RNNLM
Args:
args (namespace): The program arguments.
See py:func:`espnet.bin.asr_recog.get_parser` for details
"""
logging.warning("experimental API for custom LMs is selected by --api v2")
if args.batchsize > 1:
raise NotImplementedError("multi-utt batch decoding is not implemented")
if args.streaming_mode is not None:
raise NotImplementedError("streaming mode is not implemented")
if args.word_rnnlm:
raise NotImplementedError("word LM is not implemented")
set_deterministic_pytorch(args)
model, train_args = load_trained_model(args.model)
assert isinstance(model, ASRInterface)
if args.quantize_config is not None:
q_config = set([getattr(torch.nn, q) for q in args.quantize_config])
else:
q_config = {torch.nn.Linear}
if args.quantize_asr_model:
logging.info("Use quantized asr model for decoding")
# See https://github.com/espnet/espnet/pull/3616 for more information.
if (
V(torch.__version__) < V("1.4.0")
and "lstm" in train_args.etype
and torch.nn.LSTM in q_config
):
raise ValueError(
"Quantized LSTM in ESPnet is only supported with torch 1.4+."
)
if args.quantize_dtype == "float16" and V(torch.__version__) < V("1.5.0"):
raise ValueError(
"float16 dtype for dynamic quantization is not supported with torch "
"version < 1.5.0. Switching to qint8 dtype instead."
)
dtype = getattr(torch, args.quantize_dtype)
model = torch.quantization.quantize_dynamic(model, q_config, dtype=dtype)
model.eval()
load_inputs_and_targets = LoadInputsAndTargets(
mode="asr",
load_output=False,
sort_in_input_length=False,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None
else args.preprocess_conf,
preprocess_args={"train": False},
)
if args.rnnlm:
lm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)
# NOTE: for a compatibility with less than 0.5.0 version models
lm_model_module = getattr(lm_args, "model_module", "default")
lm_class = dynamic_import_lm(lm_model_module, lm_args.backend)
lm = lm_class(len(train_args.char_list), lm_args)
torch_load(args.rnnlm, lm)
if args.quantize_lm_model:
logging.info("Use quantized lm model")
dtype = getattr(torch, args.quantize_dtype)
lm = torch.quantization.quantize_dynamic(lm, q_config, dtype=dtype)
lm.eval()
else:
lm = None
if args.ngram_model:
from espnet.nets.scorers.ngram import NgramFullScorer, NgramPartScorer
if args.ngram_scorer == "full":
ngram = NgramFullScorer(args.ngram_model, train_args.char_list)
else:
ngram = NgramPartScorer(args.ngram_model, train_args.char_list)
else:
ngram = None
scorers = model.scorers()
scorers["lm"] = lm
scorers["ngram"] = ngram
scorers["length_bonus"] = LengthBonus(len(train_args.char_list))
weights = dict(
decoder=1.0 - args.ctc_weight,
ctc=args.ctc_weight,
lm=args.lm_weight,
ngram=args.ngram_weight,
length_bonus=args.penalty,
)
beam_search = BeamSearch(
beam_size=args.beam_size,
vocab_size=len(train_args.char_list),
weights=weights,
scorers=scorers,
sos=model.sos,
eos=model.eos,
token_list=train_args.char_list,
pre_beam_score_key=None if args.ctc_weight == 1.0 else "full",
)
# TODO(karita): make all scorers batchfied
if args.batchsize == 1:
non_batch = [
k
for k, v in beam_search.full_scorers.items()
if not isinstance(v, BatchScorerInterface)
]
if len(non_batch) == 0:
beam_search.__class__ = BatchBeamSearch
logging.info("BatchBeamSearch implementation is selected.")
else:
logging.warning(
f"As non-batch scorers {non_batch} are found, "
f"fall back to non-batch implementation."
)
if args.ngpu > 1:
raise NotImplementedError("only single GPU decoding is supported")
if args.ngpu == 1:
device = "cuda"
else:
device = "cpu"
dtype = getattr(torch, args.dtype)
logging.info(f"Decoding device={device}, dtype={dtype}")
model.to(device=device, dtype=dtype).eval()
beam_search.to(device=device, dtype=dtype).eval()
# read json data
with open(args.recog_json, "rb") as f:
js = json.load(f)["utts"]
new_js = {}
with torch.no_grad():
for idx, name in enumerate(js.keys(), 1):
logging.info("(%d/%d) decoding " + name, idx, len(js.keys()))
batch = [(name, js[name])]
feat = load_inputs_and_targets(batch)[0][0]
enc = model.encode(torch.as_tensor(feat).to(device=device, dtype=dtype))
nbest_hyps = beam_search(
x=enc, maxlenratio=args.maxlenratio, minlenratio=args.minlenratio
)
nbest_hyps = [
h.asdict() for h in nbest_hyps[: min(len(nbest_hyps), args.nbest)]
]
new_js[name] = add_results_to_json(
js[name], nbest_hyps, train_args.char_list
)
with open(args.result_label, "wb") as f:
f.write(
json.dumps(
{"utts": new_js}, indent=4, ensure_ascii=False, sort_keys=True
).encode("utf_8")
)
| 6,635 | 35.262295 | 88 | py |
espnet | espnet-master/espnet/asr/pytorch_backend/asr.py | # Copyright 2017 Johns Hopkins University (Shinji Watanabe)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Training/decoding definition for the speech recognition task."""
import copy
import itertools
import json
import logging
import math
import os
import numpy as np
import torch
import torch.distributed as dist
from chainer import reporter as reporter_module
from chainer import training
from chainer.training import extensions
from chainer.training.updater import StandardUpdater
from packaging.version import parse as V
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.nn.parallel import data_parallel
from torch.utils.data.distributed import DistributedSampler
import espnet.lm.pytorch_backend.extlm as extlm_pytorch
import espnet.nets.pytorch_backend.lm.default as lm_pytorch
from espnet.asr.asr_utils import (
CompareValueTrigger,
adadelta_eps_decay,
add_results_to_json,
format_mulenc_args,
get_model_conf,
plot_spectrogram,
restore_snapshot,
snapshot_object,
torch_load,
torch_resume,
torch_snapshot,
)
from espnet.asr.pytorch_backend.asr_init import (
freeze_modules,
load_trained_model,
load_trained_modules,
)
from espnet.nets.asr_interface import ASRInterface
from espnet.nets.beam_search_transducer import BeamSearchTransducer
from espnet.nets.pytorch_backend.e2e_asr import pad_list
from espnet.nets.pytorch_backend.streaming.segment import SegmentStreamingE2E
from espnet.nets.pytorch_backend.streaming.window import WindowStreamingE2E
from espnet.transform.spectrogram import IStft
from espnet.transform.transformation import Transformation
from espnet.utils.cli_writers import file_writer_helper
from espnet.utils.dataset import ChainerDataLoader, Transform, TransformDataset
from espnet.utils.deterministic_utils import set_deterministic_pytorch
from espnet.utils.dynamic_import import dynamic_import
from espnet.utils.io_utils import LoadInputsAndTargets
from espnet.utils.training.batchfy import make_batchset
from espnet.utils.training.evaluator import BaseEvaluator
from espnet.utils.training.iterators import ShufflingEnabler
from espnet.utils.training.tensorboard_logger import TensorboardLogger
from espnet.utils.training.train_utils import check_early_stop, set_early_stop
def _recursive_to(xs, device):
if torch.is_tensor(xs):
return xs.to(device)
if isinstance(xs, tuple):
return tuple(_recursive_to(x, device) for x in xs)
return xs
class DistributedDictSummary:
"""Distributed version of DictSummary.
This implementation is based on an official implementation below.
https://github.com/chainer/chainer/blob/v6.7.0/chainer/reporter.py
To gather stats information from all processes and calculate exact mean values,
this class is running AllReduce operation in compute_mean().
"""
def __init__(self, device=None):
self._local_summary = reporter_module.DictSummary()
self._summary_names = None
self._device = device
def add(self, d):
if self._summary_names is None:
# This assumes that `d` always includes the same name list,
# and the name list is identical accross all processes.
self._summary_names = frozenset(d.keys())
return self._local_summary.add(d)
def compute_mean(self):
# Even if `self._local_summary` doesn't have a few keys
# due to invalid observations like NaN, zero, etc,
# `raw_values` can properly these entries
# thanks to zero as an initial value.
raw_values = {name: [0.0, 0] for name in self._summary_names}
for name, summary in self._local_summary._summaries.items():
raw_values[name][0] += summary._x
raw_values[name][1] += summary._n
sum_list = []
count_list = []
for name in sorted(self._summary_names):
sum_list.append(raw_values[name][0])
count_list.append(raw_values[name][1])
sum_tensor = torch.tensor(sum_list, device=self._device)
count_tensor = torch.tensor(count_list, device=self._device)
# AllReduce both of sum and count in parallel.
sum_handle = dist.all_reduce(sum_tensor, async_op=True)
count_handle = dist.all_reduce(count_tensor, async_op=True)
sum_handle.wait()
count_handle.wait()
# Once both ops are enqueued, putting an op to calculate actual average value.
mean_tensor = sum_tensor / count_tensor
result_dict = {}
for idx, name in enumerate(sorted(self._summary_names)):
if name not in self._local_summary._summaries:
# If an entry with a target name doesn't exist in `self._local_summary`,
# this entry must be removed from `result_dict`.
# This behavior is the same with original DictSummary.
continue
result_dict[name] = mean_tensor[idx].item()
return result_dict
class CustomEvaluator(BaseEvaluator):
"""Custom Evaluator for Pytorch.
Args:
model (torch.nn.Module): The model to evaluate.
iterator (chainer.dataset.Iterator) : The train iterator.
target (link | dict[str, link]) :Link object or a dictionary of
links to evaluate. If this is just a link object, the link is
registered by the name ``'main'``.
device (torch.device): The device used.
ngpu (int): The number of GPUs.
use_ddp (bool): The flag to use DDP.
"""
def __init__(self, model, iterator, target, device, ngpu=None, use_ddp=False):
super(CustomEvaluator, self).__init__(iterator, target)
self.model = model
self.device = device
if ngpu is not None:
self.ngpu = ngpu
elif device.type == "cpu":
self.ngpu = 0
else:
self.ngpu = 1
self.use_ddp = use_ddp
# The core part of the update routine can be customized by overriding
def evaluate(self):
"""Main evaluate routine for CustomEvaluator."""
iterator = self._iterators["main"]
if self.eval_hook:
self.eval_hook(self)
if hasattr(iterator, "reset"):
iterator.reset()
it = iterator
else:
it = copy.copy(iterator)
if self.use_ddp:
summary = DistributedDictSummary(self.device)
else:
summary = reporter_module.DictSummary()
self.model.eval()
with torch.no_grad():
for batch in it:
x = _recursive_to(batch, self.device)
observation = {}
with reporter_module.report_scope(observation):
# read scp files
# x: original json with loaded features
# will be converted to chainer variable later
if self.ngpu == 0 or self.use_ddp:
self.model(*x)
else:
# apex does not support torch.nn.DataParallel
data_parallel(self.model, x, range(self.ngpu))
summary.add(observation)
self.model.train()
return summary.compute_mean()
class CustomUpdater(StandardUpdater):
"""Custom Updater for Pytorch.
Args:
model (torch.nn.Module): The model to update.
grad_clip_threshold (float): The gradient clipping value to use.
train_iter (chainer.dataset.Iterator): The training iterator.
optimizer (torch.optim.optimizer): The training optimizer.
device (torch.device): The device to use.
ngpu (int): The number of gpus to use.
use_apex (bool): The flag to use Apex in backprop.
use_ddp (bool): The flag to use DDP for multi-GPU training.
"""
def __init__(
self,
model,
grad_clip_threshold,
train_iter,
optimizer,
device,
ngpu,
grad_noise=False,
accum_grad=1,
use_apex=False,
use_ddp=False,
):
super(CustomUpdater, self).__init__(train_iter, optimizer)
self.model = model
self.grad_clip_threshold = grad_clip_threshold
self.device = device
self.ngpu = ngpu
self.accum_grad = accum_grad
self.forward_count = 0
self.grad_noise = grad_noise
self.iteration = 0
self.use_apex = use_apex
self.use_ddp = use_ddp
# The core part of the update routine can be customized by overriding.
def update_core(self):
"""Main update routine of the CustomUpdater."""
# When we pass one iterator and optimizer to StandardUpdater.__init__,
# they are automatically named 'main'.
train_iter = self.get_iterator("main")
optimizer = self.get_optimizer("main")
epoch = train_iter.epoch
# Get the next batch (a list of json files)
batch = train_iter.next()
# self.iteration += 1 # Increase may result in early report,
# which is done in other place automatically.
x = _recursive_to(batch, self.device)
is_new_epoch = train_iter.epoch != epoch
# When the last minibatch in the current epoch is given,
# gradient accumulation is turned off in order to evaluate the model
# on the validation set in every epoch.
# see details in https://github.com/espnet/espnet/pull/1388
# Compute the loss at this time step and accumulate it
if self.ngpu == 0 or self.use_ddp:
loss = self.model(*x).mean() / self.accum_grad
else:
# apex does not support torch.nn.DataParallel
loss = (
data_parallel(self.model, x, range(self.ngpu)).mean() / self.accum_grad
)
if self.use_apex:
from apex import amp
# NOTE: for a compatibility with noam optimizer
opt = optimizer.optimizer if hasattr(optimizer, "optimizer") else optimizer
with amp.scale_loss(loss, opt) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# gradient noise injection
if self.grad_noise:
from espnet.asr.asr_utils import add_gradient_noise
add_gradient_noise(
self.model, self.iteration, duration=100, eta=1.0, scale_factor=0.55
)
# update parameters
self.forward_count += 1
if not is_new_epoch and self.forward_count != self.accum_grad:
return
self.forward_count = 0
# compute the gradient norm to check if it is normal or not
grad_norm = torch.nn.utils.clip_grad_norm_(
self.model.parameters(), self.grad_clip_threshold
)
if self.use_ddp:
# NOTE: assuming gradients have not been reduced yet here.
# Try to gather the norm of gradients from all workers,
# and calculate average grad norm.
dist.all_reduce(grad_norm)
logging.info("grad norm={}".format(grad_norm))
if math.isnan(grad_norm):
logging.warning("grad norm is nan. Do not update model.")
else:
optimizer.step()
optimizer.zero_grad()
def update(self):
self.update_core()
# #iterations with accum_grad > 1
# Ref.: https://github.com/espnet/espnet/issues/777
if self.forward_count == 0:
self.iteration += 1
class CustomConverter(object):
"""Custom batch converter for Pytorch.
Args:
subsampling_factor (int): The subsampling factor.
dtype (torch.dtype): Data type to convert.
"""
def __init__(self, subsampling_factor=1, dtype=torch.float32):
"""Construct a CustomConverter object."""
self.subsampling_factor = subsampling_factor
self.ignore_id = -1
self.dtype = dtype
def __call__(self, batch, device=torch.device("cpu")):
"""Transform a batch and send it to a device.
Args:
batch (list): The batch to transform.
device (torch.device): The device to send to.
Returns:
tuple(torch.Tensor, torch.Tensor, torch.Tensor)
"""
# batch should be located in list
assert len(batch) == 1
xs, ys = batch[0]
# perform subsampling
if self.subsampling_factor > 1:
xs = [x[:: self.subsampling_factor, :] for x in xs]
# get batch of lengths of input sequences
ilens = np.array([x.shape[0] for x in xs])
# perform padding and convert to tensor
# currently only support real number
if xs[0].dtype.kind == "c":
xs_pad_real = pad_list(
[torch.from_numpy(x.real).float() for x in xs], 0
).to(device, dtype=self.dtype)
xs_pad_imag = pad_list(
[torch.from_numpy(x.imag).float() for x in xs], 0
).to(device, dtype=self.dtype)
# Note(kamo):
# {'real': ..., 'imag': ...} will be changed to ComplexTensor in E2E.
# Don't create ComplexTensor and give it E2E here
# because torch.nn.DataParellel can't handle it.
xs_pad = {"real": xs_pad_real, "imag": xs_pad_imag}
else:
xs_pad = pad_list([torch.from_numpy(x).float() for x in xs], 0).to(
device, dtype=self.dtype
)
ilens = torch.from_numpy(ilens).to(device)
# NOTE: this is for multi-output (e.g., speech translation)
ys_pad = pad_list(
[
torch.from_numpy(
np.array(y[0][:]) if isinstance(y, tuple) else y
).long()
for y in ys
],
self.ignore_id,
).to(device)
return xs_pad, ilens, ys_pad
class CustomConverterMulEnc(object):
"""Custom batch converter for Pytorch in multi-encoder case.
Args:
subsampling_factors (list): List of subsampling factors for each encoder.
dtype (torch.dtype): Data type to convert.
"""
def __init__(self, subsampling_factors=[1, 1], dtype=torch.float32):
"""Initialize the converter."""
self.subsampling_factors = subsampling_factors
self.ignore_id = -1
self.dtype = dtype
self.num_encs = len(subsampling_factors)
def __call__(self, batch, device=torch.device("cpu")):
"""Transform a batch and send it to a device.
Args:
batch (list): The batch to transform.
device (torch.device): The device to send to.
Returns:
tuple( list(torch.Tensor), list(torch.Tensor), torch.Tensor)
"""
# batch should be located in list
assert len(batch) == 1
xs_list = batch[0][: self.num_encs]
ys = batch[0][-1]
# perform subsampling
if np.sum(self.subsampling_factors) > self.num_encs:
xs_list = [
[x[:: self.subsampling_factors[i], :] for x in xs_list[i]]
for i in range(self.num_encs)
]
# get batch of lengths of input sequences
ilens_list = [
np.array([x.shape[0] for x in xs_list[i]]) for i in range(self.num_encs)
]
# perform padding and convert to tensor
# currently only support real number
xs_list_pad = [
pad_list([torch.from_numpy(x).float() for x in xs_list[i]], 0).to(
device, dtype=self.dtype
)
for i in range(self.num_encs)
]
ilens_list = [
torch.from_numpy(ilens_list[i]).to(device) for i in range(self.num_encs)
]
# NOTE: this is for multi-task learning (e.g., speech translation)
ys_pad = pad_list(
[
torch.from_numpy(np.array(y[0]) if isinstance(y, tuple) else y).long()
for y in ys
],
self.ignore_id,
).to(device)
return xs_list_pad, ilens_list, ys_pad
def is_writable_process(args, worldsize, rank, localrank):
return not args.use_ddp or rank == 0
def train(args):
"""Train with the given args.
Args:
args (namespace): The program arguments.
"""
if args.use_ddp:
# initialize distributed environment.
# NOTE: current implementation supports
# only single-node training.
# get process information.
worldsize = os.environ.get("WORLD_SIZE", None)
assert worldsize is not None
worldsize = int(worldsize)
assert worldsize == args.ngpu
rank = os.environ.get("RANK", None)
assert rank is not None
rank = int(rank)
localrank = os.environ.get("LOCAL_RANK", None)
assert localrank is not None
localrank = int(localrank)
dist.init_process_group(
backend="nccl",
init_method="env://",
rank=rank,
world_size=worldsize,
)
if rank != 0:
# Disable all logs in non-master process.
logging.disable()
else:
worldsize = 1
rank = 0
localrank = 0
set_deterministic_pytorch(args)
if args.num_encs > 1:
args = format_mulenc_args(args)
# check cuda availability
if not torch.cuda.is_available():
logging.warning("cuda is not available")
# get input and output dimension info
with open(args.valid_json, "rb") as f:
valid_json = json.load(f)["utts"]
utts = list(valid_json.keys())
idim_list = [
int(valid_json[utts[0]]["input"][i]["shape"][-1]) for i in range(args.num_encs)
]
odim = int(valid_json[utts[0]]["output"][0]["shape"][-1])
for i in range(args.num_encs):
logging.info("stream{}: input dims : {}".format(i + 1, idim_list[i]))
logging.info("#output dims: " + str(odim))
# specify attention, CTC, hybrid mode
if "transducer" in args.model_module:
if (
getattr(args, "etype", False) == "custom"
or getattr(args, "dtype", False) == "custom"
):
mtl_mode = "custom_transducer"
else:
mtl_mode = "transducer"
logging.info("Pure transducer mode")
elif args.mtlalpha == 1.0:
mtl_mode = "ctc"
logging.info("Pure CTC mode")
elif args.mtlalpha == 0.0:
mtl_mode = "att"
logging.info("Pure attention mode")
else:
mtl_mode = "mtl"
logging.info("Multitask learning mode")
if (args.enc_init is not None or args.dec_init is not None) and args.num_encs == 1:
model = load_trained_modules(idim_list[0], odim, args)
else:
model_class = dynamic_import(args.model_module)
model = model_class(
idim_list[0] if args.num_encs == 1 else idim_list, odim, args
)
assert isinstance(model, ASRInterface)
total_subsampling_factor = model.get_total_subsampling_factor()
logging.info(
" Total parameter of the model = "
+ str(sum(p.numel() for p in model.parameters()))
)
if args.rnnlm is not None:
rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(len(args.char_list), rnnlm_args.layer, rnnlm_args.unit)
)
torch_load(args.rnnlm, rnnlm)
model.rnnlm = rnnlm
if is_writable_process(args, worldsize, rank, localrank):
# write model config
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
model_conf = args.outdir + "/model.json"
with open(model_conf, "wb") as f:
logging.info("writing a model config file to " + model_conf)
f.write(
json.dumps(
(
idim_list[0] if args.num_encs == 1 else idim_list,
odim,
vars(args),
),
indent=4,
ensure_ascii=False,
sort_keys=True,
).encode("utf_8")
)
for key in sorted(vars(args).keys()):
logging.info("ARGS: " + key + ": " + str(vars(args)[key]))
reporter = model.reporter
if args.use_ddp:
if args.num_encs > 1:
# TODO(ruizhili): implement data parallel for multi-encoder setup.
raise NotImplementedError(
"Data parallel is not supported for multi-encoder setup."
)
else:
# check the use of multi-gpu
if args.ngpu > 1:
if args.batch_size != 0:
logging.warning(
"batch size is automatically increased (%d -> %d)"
% (args.batch_size, args.batch_size * args.ngpu)
)
args.batch_size *= args.ngpu
if args.num_encs > 1:
# TODO(ruizhili): implement data parallel for multi-encoder setup.
raise NotImplementedError(
"Data parallel is not supported for multi-encoder setup."
)
# set torch device
if args.use_ddp:
device = torch.device(f"cuda:{localrank}")
else:
device = torch.device("cuda" if args.ngpu > 0 else "cpu")
if args.train_dtype in ("float16", "float32", "float64"):
dtype = getattr(torch, args.train_dtype)
else:
dtype = torch.float32
model = model.to(device=device, dtype=dtype)
if args.freeze_mods:
model, model_params = freeze_modules(model, args.freeze_mods)
else:
model_params = model.parameters()
logging.warning(
"num. model params: {:,} (num. trained: {:,} ({:.1f}%))".format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
sum(p.numel() for p in model.parameters() if p.requires_grad)
* 100.0
/ sum(p.numel() for p in model.parameters()),
)
)
# Setup an optimizer
if args.opt == "adadelta":
optimizer = torch.optim.Adadelta(
model_params, rho=0.95, eps=args.eps, weight_decay=args.weight_decay
)
elif args.opt == "adam":
optimizer = torch.optim.Adam(model_params, weight_decay=args.weight_decay)
elif args.opt == "noam":
from espnet.nets.pytorch_backend.transformer.optimizer import get_std_opt
if "transducer" in mtl_mode:
if args.noam_adim > 0:
optimizer = get_std_opt(
model_params,
args.noam_adim,
args.optimizer_warmup_steps,
args.noam_lr,
)
else:
raise ValueError("noam-adim option should be set to use Noam scheduler")
else:
optimizer = get_std_opt(
model_params,
args.adim,
args.transformer_warmup_steps,
args.transformer_lr,
)
else:
raise NotImplementedError("unknown optimizer: " + args.opt)
# setup apex.amp
if args.train_dtype in ("O0", "O1", "O2", "O3"):
try:
from apex import amp
except ImportError as e:
logging.error(
f"You need to install apex for --train-dtype {args.train_dtype}. "
"See https://github.com/NVIDIA/apex#linux"
)
raise e
if args.opt == "noam":
model, optimizer.optimizer = amp.initialize(
model, optimizer.optimizer, opt_level=args.train_dtype
)
else:
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.train_dtype
)
use_apex = True
from espnet.nets.pytorch_backend.ctc import CTC
amp.register_float_function(CTC, "loss_fn")
amp.init()
logging.warning("register ctc as float function")
else:
use_apex = False
# FIXME: TOO DIRTY HACK
setattr(optimizer, "target", reporter)
setattr(optimizer, "serialize", lambda s: reporter.serialize(s))
# Setup a converter
if args.num_encs == 1:
converter = CustomConverter(subsampling_factor=model.subsample[0], dtype=dtype)
else:
converter = CustomConverterMulEnc(
[i[0] for i in model.subsample_list], dtype=dtype
)
# read json data
with open(args.train_json, "rb") as f:
train_json = json.load(f)["utts"]
with open(args.valid_json, "rb") as f:
valid_json = json.load(f)["utts"]
use_sortagrad = args.sortagrad == -1 or args.sortagrad > 0
# make minibatch list (variable length)
if args.use_ddp:
# When using DDP, minimum batch size for each process is 1.
min_batch_size = 1
else:
min_batch_size = args.ngpu if args.ngpu > 1 else 1
train = make_batchset(
train_json,
args.batch_size,
args.maxlen_in,
args.maxlen_out,
args.minibatches,
min_batch_size=min_batch_size,
shortest_first=use_sortagrad,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
iaxis=0,
oaxis=0,
)
valid = make_batchset(
valid_json,
args.batch_size,
args.maxlen_in,
args.maxlen_out,
args.minibatches,
min_batch_size=min_batch_size,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
iaxis=0,
oaxis=0,
)
load_tr = LoadInputsAndTargets(
mode="asr",
load_output=True,
preprocess_conf=args.preprocess_conf,
preprocess_args={"train": True}, # Switch the mode of preprocessing
)
load_cv = LoadInputsAndTargets(
mode="asr",
load_output=True,
preprocess_conf=args.preprocess_conf,
preprocess_args={"train": False}, # Switch the mode of preprocessing
)
# hack to make batchsize argument as 1
# actual bathsize is included in a list
# default collate function converts numpy array to pytorch tensor
# we used an empty collate function instead which returns list
train_ds = TransformDataset(train, Transform(converter, load_tr))
val_ds = TransformDataset(valid, Transform(converter, load_cv))
train_sampler = None
val_sampler = None
shuffle = not use_sortagrad
if args.use_ddp:
train_sampler = DistributedSampler(train_ds)
val_sampler = DistributedSampler(val_ds)
shuffle = False
train_iter = ChainerDataLoader(
dataset=train_ds,
batch_size=1,
num_workers=args.n_iter_processes,
shuffle=shuffle,
sampler=train_sampler,
collate_fn=ChainerDataLoader.get_first_element,
)
valid_iter = ChainerDataLoader(
dataset=val_ds,
batch_size=1,
shuffle=False,
sampler=val_sampler,
collate_fn=ChainerDataLoader.get_first_element,
num_workers=args.n_iter_processes,
)
# Set up a trainer
if args.use_ddp:
model = DDP(model, device_ids=[localrank])
updater = CustomUpdater(
model,
args.grad_clip,
{"main": train_iter},
optimizer,
device,
args.ngpu,
args.grad_noise,
args.accum_grad,
use_apex=use_apex,
use_ddp=args.use_ddp,
)
trainer = training.Trainer(updater, (args.epochs, "epoch"), out=args.outdir)
# call DistributedSampler.set_epoch at begining of each epoch.
if args.use_ddp:
@training.make_extension(trigger=(1, "epoch"))
def set_epoch_to_distributed_sampler(trainer):
# NOTE: at the first time when this fuction is called,
# `sampler.epoch` should be 0, and a given trainer object
# has 1 as a `trainer.updater.epoch`.
# This means that, in the first epoch,
# dataset is shuffled with random seed and a value 0,
# and, in the second epoch, dataset is shuffled
# with the same random seed and a value 1.
#
# See a link below for more details.
# https://pytorch.org/docs/stable/data.html#torch.utils.data.distributed.DistributedSampler
train_sampler.set_epoch(trainer.updater.epoch)
val_sampler.set_epoch(trainer.updater.epoch)
trainer.extend(set_epoch_to_distributed_sampler)
if use_sortagrad:
trainer.extend(
ShufflingEnabler([train_iter]),
trigger=(args.sortagrad if args.sortagrad != -1 else args.epochs, "epoch"),
)
# Resume from a snapshot
if args.resume:
logging.info("resumed from %s" % args.resume)
torch_resume(args.resume, trainer)
# Evaluate the model with the test dataset for each epoch
if args.save_interval_iters > 0:
trainer.extend(
CustomEvaluator(
model, {"main": valid_iter}, reporter, device, args.ngpu, args.use_ddp
),
trigger=(args.save_interval_iters, "iteration"),
)
else:
trainer.extend(
CustomEvaluator(
model, {"main": valid_iter}, reporter, device, args.ngpu, args.use_ddp
)
)
if is_writable_process(args, worldsize, rank, localrank):
# Save attention weight each epoch
is_attn_plot = (
"transformer" in args.model_module
or "conformer" in args.model_module
or mtl_mode in ["att", "mtl", "custom_transducer"]
)
if args.num_save_attention > 0 and is_attn_plot:
data = sorted(
list(valid_json.items())[: args.num_save_attention],
key=lambda x: int(x[1]["input"][0]["shape"][1]),
reverse=True,
)
if hasattr(model, "module"):
att_vis_fn = model.module.calculate_all_attentions
plot_class = model.module.attention_plot_class
else:
att_vis_fn = model.calculate_all_attentions
plot_class = model.attention_plot_class
att_reporter = plot_class(
att_vis_fn,
data,
args.outdir + "/att_ws",
converter=converter,
transform=load_cv,
device=device,
subsampling_factor=total_subsampling_factor,
)
trainer.extend(att_reporter, trigger=(1, "epoch"))
else:
att_reporter = None
# Save CTC prob at each epoch
if mtl_mode in ["ctc", "mtl"] and args.num_save_ctc > 0:
# NOTE: sort it by output lengths
data = sorted(
list(valid_json.items())[: args.num_save_ctc],
key=lambda x: int(x[1]["output"][0]["shape"][0]),
reverse=True,
)
if hasattr(model, "module"):
ctc_vis_fn = model.module.calculate_all_ctc_probs
plot_class = model.module.ctc_plot_class
else:
ctc_vis_fn = model.calculate_all_ctc_probs
plot_class = model.ctc_plot_class
ctc_reporter = plot_class(
ctc_vis_fn,
data,
args.outdir + "/ctc_prob",
converter=converter,
transform=load_cv,
device=device,
subsampling_factor=total_subsampling_factor,
)
trainer.extend(ctc_reporter, trigger=(1, "epoch"))
else:
ctc_reporter = None
# Make a plot for training and validation values
if args.num_encs > 1:
report_keys_loss_ctc = [
"main/loss_ctc{}".format(i + 1) for i in range(model.num_encs)
] + [
"validation/main/loss_ctc{}".format(i + 1)
for i in range(model.num_encs)
]
report_keys_cer_ctc = [
"main/cer_ctc{}".format(i + 1) for i in range(model.num_encs)
] + [
"validation/main/cer_ctc{}".format(i + 1) for i in range(model.num_encs)
]
if hasattr(model, "is_transducer"):
trans_keys = [
"main/loss",
"validation/main/loss",
"main/loss_trans",
"validation/main/loss_trans",
]
ctc_keys = (
["main/loss_ctc", "validation/main/loss_ctc"]
if args.use_ctc_loss
else []
)
aux_trans_keys = (
[
"main/loss_aux_trans",
"validation/main/loss_aux_trans",
]
if args.use_aux_transducer_loss
else []
)
symm_kl_div_keys = (
[
"main/loss_symm_kl_div",
"validation/main/loss_symm_kl_div",
]
if args.use_symm_kl_div_loss
else []
)
lm_keys = (
[
"main/loss_lm",
"validation/main/loss_lm",
]
if args.use_lm_loss
else []
)
transducer_keys = (
trans_keys + ctc_keys + aux_trans_keys + symm_kl_div_keys + lm_keys
)
trainer.extend(
extensions.PlotReport(
transducer_keys,
"epoch",
file_name="loss.png",
)
)
else:
trainer.extend(
extensions.PlotReport(
[
"main/loss",
"validation/main/loss",
"main/loss_ctc",
"validation/main/loss_ctc",
"main/loss_att",
"validation/main/loss_att",
]
+ ([] if args.num_encs == 1 else report_keys_loss_ctc),
"epoch",
file_name="loss.png",
)
)
trainer.extend(
extensions.PlotReport(
["main/acc", "validation/main/acc"], "epoch", file_name="acc.png"
)
)
trainer.extend(
extensions.PlotReport(
["main/cer_ctc", "validation/main/cer_ctc"]
+ ([] if args.num_encs == 1 else report_keys_loss_ctc),
"epoch",
file_name="cer.png",
)
)
# Save best models
trainer.extend(
snapshot_object(model, "model.loss.best"),
trigger=training.triggers.MinValueTrigger("validation/main/loss"),
)
if mtl_mode not in ["ctc", "transducer", "custom_transducer"]:
trainer.extend(
snapshot_object(model, "model.acc.best"),
trigger=training.triggers.MaxValueTrigger("validation/main/acc"),
)
# save snapshot which contains model and optimizer states
if args.save_interval_iters > 0:
trainer.extend(
torch_snapshot(filename="snapshot.iter.{.updater.iteration}"),
trigger=(args.save_interval_iters, "iteration"),
)
# save snapshot at every epoch - for model averaging
trainer.extend(torch_snapshot(), trigger=(1, "epoch"))
# epsilon decay in the optimizer
if args.opt == "adadelta":
if args.criterion == "acc" and mtl_mode != "ctc":
trainer.extend(
restore_snapshot(
model, args.outdir + "/model.acc.best", load_fn=torch_load
),
trigger=CompareValueTrigger(
"validation/main/acc",
lambda best_value, current_value: best_value > current_value,
),
)
trainer.extend(
adadelta_eps_decay(args.eps_decay),
trigger=CompareValueTrigger(
"validation/main/acc",
lambda best_value, current_value: best_value > current_value,
),
)
elif args.criterion == "loss":
trainer.extend(
restore_snapshot(
model, args.outdir + "/model.loss.best", load_fn=torch_load
),
trigger=CompareValueTrigger(
"validation/main/loss",
lambda best_value, current_value: best_value < current_value,
),
)
trainer.extend(
adadelta_eps_decay(args.eps_decay),
trigger=CompareValueTrigger(
"validation/main/loss",
lambda best_value, current_value: best_value < current_value,
),
)
# NOTE: In some cases, it may take more than one epoch for the model's loss
# to escape from a local minimum.
# Thus, restore_snapshot extension is not used here.
# see details in https://github.com/espnet/espnet/pull/2171
elif args.criterion == "loss_eps_decay_only":
trainer.extend(
adadelta_eps_decay(args.eps_decay),
trigger=CompareValueTrigger(
"validation/main/loss",
lambda best_value, current_value: best_value < current_value,
),
)
if is_writable_process(args, worldsize, rank, localrank):
# Write a log of evaluation statistics for each epoch
trainer.extend(
extensions.LogReport(trigger=(args.report_interval_iters, "iteration"))
)
if hasattr(model, "is_transducer"):
report_keys = (
[
"epoch",
"iteration",
]
+ transducer_keys
+ ["elapsed_time"]
)
else:
report_keys = [
"epoch",
"iteration",
"main/loss",
"main/loss_ctc",
"main/loss_att",
"validation/main/loss",
"validation/main/loss_ctc",
"validation/main/loss_att",
"main/acc",
"validation/main/acc",
"main/cer_ctc",
"validation/main/cer_ctc",
"elapsed_time",
] + (
[] if args.num_encs == 1 else report_keys_cer_ctc + report_keys_loss_ctc
)
if args.opt == "adadelta":
trainer.extend(
extensions.observe_value(
"eps",
lambda trainer: trainer.updater.get_optimizer("main").param_groups[
0
]["eps"],
),
trigger=(args.report_interval_iters, "iteration"),
)
report_keys.append("eps")
if args.report_cer:
report_keys.append("validation/main/cer")
if args.report_wer:
report_keys.append("validation/main/wer")
trainer.extend(
extensions.PrintReport(report_keys),
trigger=(args.report_interval_iters, "iteration"),
)
trainer.extend(
extensions.ProgressBar(update_interval=args.report_interval_iters)
)
set_early_stop(trainer, args)
if is_writable_process(args, worldsize, rank, localrank):
if args.tensorboard_dir is not None and args.tensorboard_dir != "":
from torch.utils.tensorboard import SummaryWriter
trainer.extend(
TensorboardLogger(
SummaryWriter(args.tensorboard_dir),
att_reporter=att_reporter,
ctc_reporter=ctc_reporter,
),
trigger=(args.report_interval_iters, "iteration"),
)
if args.use_ddp:
# To avoid busy wait on non-main processes
# during a main process is writing plot, logs, etc,
# one additional extension must be added at the last.
# Within this additional extension,
# a main process will send a notification tensor
# to other processes when the main process finishes
# all operations like writing plot, log, etc.
src_rank = 0 # TODO(lazykyama): removing hard-coded value.
@training.make_extension(trigger=(1, "epoch"))
def barrier_extension_per_epoch(trainer):
notification = torch.zeros(1, device=device)
dist.broadcast(notification, src=src_rank)
torch.cuda.synchronize(device=device)
trainer.extend(barrier_extension_per_epoch)
# Run the training
trainer.run()
if is_writable_process(args, worldsize, rank, localrank):
check_early_stop(trainer, args.epochs)
def recog(args):
"""Decode with the given args.
Args:
args (namespace): The program arguments.
"""
set_deterministic_pytorch(args)
model, train_args = load_trained_model(args.model, training=False)
assert isinstance(model, ASRInterface)
model.recog_args = args
if args.quantize_config is not None:
q_config = set([getattr(torch.nn, q) for q in args.quantize_config])
else:
q_config = {torch.nn.Linear}
if args.quantize_asr_model:
logging.info("Use a quantized ASR model for decoding.")
# It seems quantized LSTM only supports non-packed sequence before torch 1.4.0.
# Reference issue: https://github.com/pytorch/pytorch/issues/27963
if (
V(torch.__version__) < V("1.4.0")
and "lstm" in train_args.etype
and torch.nn.LSTM in q_config
):
raise ValueError(
"Quantized LSTM in ESPnet is only supported with torch 1.4+."
)
# Dunno why but weight_observer from dynamic quantized module must have
# dtype=torch.qint8 with torch < 1.5 although dtype=torch.float16 is supported.
if args.quantize_dtype == "float16" and V(torch.__version__) < V("1.5.0"):
raise ValueError(
"float16 dtype for dynamic quantization is not supported with torch "
"version < 1.5.0. Switching to qint8 dtype instead."
)
dtype = getattr(torch, args.quantize_dtype)
model = torch.quantization.quantize_dynamic(model, q_config, dtype=dtype)
if args.streaming_mode and "transformer" in train_args.model_module:
raise NotImplementedError("streaming mode for transformer is not implemented")
logging.info(
" Total parameter of the model = "
+ str(sum(p.numel() for p in model.parameters()))
)
# read rnnlm
if args.rnnlm:
rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)
if getattr(rnnlm_args, "model_module", "default") != "default":
raise ValueError(
"use '--api v2' option to decode with non-default language model"
)
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(
len(train_args.char_list),
rnnlm_args.layer,
rnnlm_args.unit,
getattr(rnnlm_args, "embed_unit", None), # for backward compatibility
)
)
torch_load(args.rnnlm, rnnlm)
if args.quantize_lm_model:
dtype = getattr(torch, args.quantize_dtype)
rnnlm = torch.quantization.quantize_dynamic(rnnlm, q_config, dtype=dtype)
rnnlm.eval()
else:
rnnlm = None
if args.word_rnnlm:
rnnlm_args = get_model_conf(args.word_rnnlm, args.word_rnnlm_conf)
word_dict = rnnlm_args.char_list_dict
char_dict = {x: i for i, x in enumerate(train_args.char_list)}
word_rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(
len(word_dict),
rnnlm_args.layer,
rnnlm_args.unit,
getattr(rnnlm_args, "embed_unit", None), # for backward compatibility
)
)
torch_load(args.word_rnnlm, word_rnnlm)
word_rnnlm.eval()
if rnnlm is not None:
rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.MultiLevelLM(
word_rnnlm.predictor, rnnlm.predictor, word_dict, char_dict
)
)
else:
rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.LookAheadWordLM(
word_rnnlm.predictor, word_dict, char_dict
)
)
# gpu
if args.ngpu == 1:
gpu_id = list(range(args.ngpu))
logging.info("gpu id: " + str(gpu_id))
model.cuda()
if rnnlm:
rnnlm.cuda()
# read json data
with open(args.recog_json, "rb") as f:
js = json.load(f)["utts"]
new_js = {}
load_inputs_and_targets = LoadInputsAndTargets(
mode="asr",
load_output=False,
sort_in_input_length=False,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None
else args.preprocess_conf,
preprocess_args={"train": False},
)
# load transducer beam search
if hasattr(model, "is_transducer"):
if hasattr(model, "dec"):
trans_decoder = model.dec
else:
trans_decoder = model.decoder
joint_network = model.transducer_tasks.joint_network
beam_search_transducer = BeamSearchTransducer(
decoder=trans_decoder,
joint_network=joint_network,
beam_size=args.beam_size,
lm=rnnlm,
lm_weight=args.lm_weight,
search_type=args.search_type,
max_sym_exp=args.max_sym_exp,
u_max=args.u_max,
nstep=args.nstep,
prefix_alpha=args.prefix_alpha,
expansion_gamma=args.expansion_gamma,
expansion_beta=args.expansion_beta,
score_norm=args.score_norm,
softmax_temperature=args.softmax_temperature,
nbest=args.nbest,
quantization=args.quantize_asr_model,
)
if args.batchsize == 0:
with torch.no_grad():
for idx, name in enumerate(js.keys(), 1):
logging.info("(%d/%d) decoding " + name, idx, len(js.keys()))
batch = [(name, js[name])]
feat = load_inputs_and_targets(batch)
feat = (
feat[0][0]
if args.num_encs == 1
else [feat[idx][0] for idx in range(model.num_encs)]
)
if args.streaming_mode == "window" and args.num_encs == 1:
logging.info(
"Using streaming recognizer with window size %d frames",
args.streaming_window,
)
se2e = WindowStreamingE2E(e2e=model, recog_args=args, rnnlm=rnnlm)
for i in range(0, feat.shape[0], args.streaming_window):
logging.info(
"Feeding frames %d - %d", i, i + args.streaming_window
)
se2e.accept_input(feat[i : i + args.streaming_window])
logging.info("Running offline attention decoder")
se2e.decode_with_attention_offline()
logging.info("Offline attention decoder finished")
nbest_hyps = se2e.retrieve_recognition()
elif args.streaming_mode == "segment" and args.num_encs == 1:
logging.info(
"Using streaming recognizer with threshold value %d",
args.streaming_min_blank_dur,
)
nbest_hyps = []
for n in range(args.nbest):
nbest_hyps.append({"yseq": [], "score": 0.0})
se2e = SegmentStreamingE2E(e2e=model, recog_args=args, rnnlm=rnnlm)
r = np.prod(model.subsample)
for i in range(0, feat.shape[0], r):
hyps = se2e.accept_input(feat[i : i + r])
if hyps is not None:
text = "".join(
[
train_args.char_list[int(x)]
for x in hyps[0]["yseq"][1:-1]
if int(x) != -1
]
)
text = text.replace(
"\u2581", " "
).strip() # for SentencePiece
text = text.replace(model.space, " ")
text = text.replace(model.blank, "")
logging.info(text)
for n in range(args.nbest):
nbest_hyps[n]["yseq"].extend(hyps[n]["yseq"])
nbest_hyps[n]["score"] += hyps[n]["score"]
elif hasattr(model, "is_transducer"):
nbest_hyps = model.recognize(feat, beam_search_transducer)
else:
nbest_hyps = model.recognize(
feat, args, train_args.char_list, rnnlm
)
new_js[name] = add_results_to_json(
js[name], nbest_hyps, train_args.char_list
)
else:
def grouper(n, iterable, fillvalue=None):
kargs = [iter(iterable)] * n
return itertools.zip_longest(*kargs, fillvalue=fillvalue)
# sort data if batchsize > 1
keys = list(js.keys())
if args.batchsize > 1:
feat_lens = [js[key]["input"][0]["shape"][0] for key in keys]
sorted_index = sorted(range(len(feat_lens)), key=lambda i: -feat_lens[i])
keys = [keys[i] for i in sorted_index]
with torch.no_grad():
for names in grouper(args.batchsize, keys, None):
names = [name for name in names if name]
batch = [(name, js[name]) for name in names]
feats = (
load_inputs_and_targets(batch)[0]
if args.num_encs == 1
else load_inputs_and_targets(batch)
)
if args.streaming_mode == "window" and args.num_encs == 1:
raise NotImplementedError
elif args.streaming_mode == "segment" and args.num_encs == 1:
if args.batchsize > 1:
raise NotImplementedError
feat = feats[0]
nbest_hyps = []
for n in range(args.nbest):
nbest_hyps.append({"yseq": [], "score": 0.0})
se2e = SegmentStreamingE2E(e2e=model, recog_args=args, rnnlm=rnnlm)
r = np.prod(model.subsample)
for i in range(0, feat.shape[0], r):
hyps = se2e.accept_input(feat[i : i + r])
if hyps is not None:
text = "".join(
[
train_args.char_list[int(x)]
for x in hyps[0]["yseq"][1:-1]
if int(x) != -1
]
)
text = text.replace(
"\u2581", " "
).strip() # for SentencePiece
text = text.replace(model.space, " ")
text = text.replace(model.blank, "")
logging.info(text)
for n in range(args.nbest):
nbest_hyps[n]["yseq"].extend(hyps[n]["yseq"])
nbest_hyps[n]["score"] += hyps[n]["score"]
nbest_hyps = [nbest_hyps]
else:
nbest_hyps = model.recognize_batch(
feats, args, train_args.char_list, rnnlm=rnnlm
)
for i, nbest_hyp in enumerate(nbest_hyps):
name = names[i]
new_js[name] = add_results_to_json(
js[name], nbest_hyp, train_args.char_list
)
with open(args.result_label, "wb") as f:
f.write(
json.dumps(
{"utts": new_js}, indent=4, ensure_ascii=False, sort_keys=True
).encode("utf_8")
)
def enhance(args):
"""Dumping enhanced speech and mask.
Args:
args (namespace): The program arguments.
"""
set_deterministic_pytorch(args)
# read training config
idim, odim, train_args = get_model_conf(args.model, args.model_conf)
# TODO(ruizhili): implement enhance for multi-encoder model
assert args.num_encs == 1, "number of encoder should be 1 ({} is given)".format(
args.num_encs
)
# load trained model parameters
logging.info("reading model parameters from " + args.model)
model_class = dynamic_import(train_args.model_module)
model = model_class(idim, odim, train_args)
assert isinstance(model, ASRInterface)
torch_load(args.model, model)
model.recog_args = args
# gpu
if args.ngpu == 1:
gpu_id = list(range(args.ngpu))
logging.info("gpu id: " + str(gpu_id))
model.cuda()
# read json data
with open(args.recog_json, "rb") as f:
js = json.load(f)["utts"]
load_inputs_and_targets = LoadInputsAndTargets(
mode="asr",
load_output=False,
sort_in_input_length=False,
preprocess_conf=None, # Apply pre_process in outer func
)
if args.batchsize == 0:
args.batchsize = 1
# Creates writers for outputs from the network
if args.enh_wspecifier is not None:
enh_writer = file_writer_helper(args.enh_wspecifier, filetype=args.enh_filetype)
else:
enh_writer = None
# Creates a Transformation instance
preprocess_conf = (
train_args.preprocess_conf
if args.preprocess_conf is None
else args.preprocess_conf
)
if preprocess_conf is not None:
logging.info(f"Use preprocessing: {preprocess_conf}")
transform = Transformation(preprocess_conf)
else:
transform = None
# Creates a IStft instance
istft = None
frame_shift = args.istft_n_shift # Used for plot the spectrogram
if args.apply_istft:
if preprocess_conf is not None:
# Read the conffile and find stft setting
with open(preprocess_conf) as f:
# Json format: e.g.
# {"process": [{"type": "stft",
# "win_length": 400,
# "n_fft": 512, "n_shift": 160,
# "window": "han"},
# {"type": "foo", ...}, ...]}
conf = json.load(f)
assert "process" in conf, conf
# Find stft setting
for p in conf["process"]:
if p["type"] == "stft":
istft = IStft(
win_length=p["win_length"],
n_shift=p["n_shift"],
window=p.get("window", "hann"),
)
logging.info(
"stft is found in {}. "
"Setting istft config from it\n{}".format(
preprocess_conf, istft
)
)
frame_shift = p["n_shift"]
break
if istft is None:
# Set from command line arguments
istft = IStft(
win_length=args.istft_win_length,
n_shift=args.istft_n_shift,
window=args.istft_window,
)
logging.info(
"Setting istft config from the command line args\n{}".format(istft)
)
# sort data
keys = list(js.keys())
feat_lens = [js[key]["input"][0]["shape"][0] for key in keys]
sorted_index = sorted(range(len(feat_lens)), key=lambda i: -feat_lens[i])
keys = [keys[i] for i in sorted_index]
def grouper(n, iterable, fillvalue=None):
kargs = [iter(iterable)] * n
return itertools.zip_longest(*kargs, fillvalue=fillvalue)
num_images = 0
if not os.path.exists(args.image_dir):
os.makedirs(args.image_dir)
for names in grouper(args.batchsize, keys, None):
batch = [(name, js[name]) for name in names]
# May be in time region: (Batch, [Time, Channel])
org_feats = load_inputs_and_targets(batch)[0]
if transform is not None:
# May be in time-freq region: : (Batch, [Time, Channel, Freq])
feats = transform(org_feats, train=False)
else:
feats = org_feats
with torch.no_grad():
enhanced, mask, ilens = model.enhance(feats)
for idx, name in enumerate(names):
# Assuming mask, feats : [Batch, Time, Channel. Freq]
# enhanced : [Batch, Time, Freq]
enh = enhanced[idx][: ilens[idx]]
mas = mask[idx][: ilens[idx]]
feat = feats[idx]
# Plot spectrogram
if args.image_dir is not None and num_images < args.num_images:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
num_images += 1
ref_ch = 0
plt.figure(figsize=(20, 10))
plt.subplot(4, 1, 1)
plt.title("Mask [ref={}ch]".format(ref_ch))
plot_spectrogram(
plt,
mas[:, ref_ch].T,
fs=args.fs,
mode="linear",
frame_shift=frame_shift,
bottom=False,
labelbottom=False,
)
plt.subplot(4, 1, 2)
plt.title("Noisy speech [ref={}ch]".format(ref_ch))
plot_spectrogram(
plt,
feat[:, ref_ch].T,
fs=args.fs,
mode="db",
frame_shift=frame_shift,
bottom=False,
labelbottom=False,
)
plt.subplot(4, 1, 3)
plt.title("Masked speech [ref={}ch]".format(ref_ch))
plot_spectrogram(
plt,
(feat[:, ref_ch] * mas[:, ref_ch]).T,
frame_shift=frame_shift,
fs=args.fs,
mode="db",
bottom=False,
labelbottom=False,
)
plt.subplot(4, 1, 4)
plt.title("Enhanced speech")
plot_spectrogram(
plt, enh.T, fs=args.fs, mode="db", frame_shift=frame_shift
)
plt.savefig(os.path.join(args.image_dir, name + ".png"))
plt.clf()
# Write enhanced wave files
if enh_writer is not None:
if istft is not None:
enh = istft(enh)
else:
enh = enh
if args.keep_length:
if len(org_feats[idx]) < len(enh):
# Truncate the frames added by stft padding
enh = enh[: len(org_feats[idx])]
elif len(org_feats) > len(enh):
padwidth = [(0, (len(org_feats[idx]) - len(enh)))] + [
(0, 0)
] * (enh.ndim - 1)
enh = np.pad(enh, padwidth, mode="constant")
if args.enh_filetype in ("sound", "sound.hdf5"):
enh_writer[name] = (args.fs, enh)
else:
# Hint: To dump stft_signal, mask or etc,
# enh_filetype='hdf5' might be convenient.
enh_writer[name] = enh
if num_images >= args.num_images and enh_writer is None:
logging.info("Breaking the process.")
break
| 61,311 | 35.386944 | 103 | py |
espnet | espnet-master/espnet/asr/pytorch_backend/asr_mix.py | #!/usr/bin/env python3
"""
This script is used for multi-speaker speech recognition.
Copyright 2017 Johns Hopkins University (Shinji Watanabe)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
import json
import logging
import os
from itertools import zip_longest as zip_longest
import numpy as np
import torch
# chainer related
from chainer import training
from chainer.training import extensions
import espnet.lm.pytorch_backend.extlm as extlm_pytorch
import espnet.nets.pytorch_backend.lm.default as lm_pytorch
from espnet.asr.asr_mix_utils import add_results_to_json
from espnet.asr.asr_utils import (
CompareValueTrigger,
adadelta_eps_decay,
get_model_conf,
restore_snapshot,
snapshot_object,
torch_load,
torch_resume,
torch_snapshot,
)
from espnet.asr.pytorch_backend.asr import (
CustomEvaluator,
CustomUpdater,
load_trained_model,
)
from espnet.nets.asr_interface import ASRInterface
from espnet.nets.pytorch_backend.e2e_asr_mix import pad_list
from espnet.utils.dataset import ChainerDataLoader, TransformDataset
from espnet.utils.deterministic_utils import set_deterministic_pytorch
from espnet.utils.dynamic_import import dynamic_import
from espnet.utils.io_utils import LoadInputsAndTargets
from espnet.utils.training.batchfy import make_batchset
from espnet.utils.training.iterators import ShufflingEnabler
from espnet.utils.training.tensorboard_logger import TensorboardLogger
from espnet.utils.training.train_utils import check_early_stop, set_early_stop
class CustomConverter(object):
"""Custom batch converter for Pytorch.
Args:
subsampling_factor (int): The subsampling factor.
dtype (torch.dtype): Data type to convert.
"""
def __init__(self, subsampling_factor=1, dtype=torch.float32, num_spkrs=2):
"""Initialize the converter."""
self.subsampling_factor = subsampling_factor
self.ignore_id = -1
self.dtype = dtype
self.num_spkrs = num_spkrs
def __call__(self, batch, device=torch.device("cpu")):
"""Transform a batch and send it to a device.
Args:
batch (list(tuple(str, dict[str, dict[str, Any]]))): The batch to transform.
device (torch.device): The device to send to.
Returns:
tuple(torch.Tensor, torch.Tensor, torch.Tensor): Transformed batch.
"""
# batch should be located in list
assert len(batch) == 1
xs, ys = batch[0][0], batch[0][-self.num_spkrs :]
# perform subsampling
if self.subsampling_factor > 1:
xs = [x[:: self.subsampling_factor, :] for x in xs]
# get batch of lengths of input sequences
ilens = np.array([x.shape[0] for x in xs])
# perform padding and convert to tensor
# currently only support real number
if xs[0].dtype.kind == "c":
xs_pad_real = pad_list(
[torch.from_numpy(x.real).float() for x in xs], 0
).to(device, dtype=self.dtype)
xs_pad_imag = pad_list(
[torch.from_numpy(x.imag).float() for x in xs], 0
).to(device, dtype=self.dtype)
# Note(kamo):
# {'real': ..., 'imag': ...} will be changed to ComplexTensor in E2E.
# Don't create ComplexTensor and give it to E2E here
# because torch.nn.DataParallel can't handle it.
xs_pad = {"real": xs_pad_real, "imag": xs_pad_imag}
else:
xs_pad = pad_list([torch.from_numpy(x).float() for x in xs], 0).to(
device, dtype=self.dtype
)
ilens = torch.from_numpy(ilens).to(device)
if not isinstance(ys[0], np.ndarray):
ys_pad = []
for i in range(len(ys)): # speakers
ys_pad += [torch.from_numpy(y).long() for y in ys[i]]
ys_pad = pad_list(ys_pad, self.ignore_id)
ys_pad = (
ys_pad.view(self.num_spkrs, -1, ys_pad.size(1))
.transpose(0, 1)
.to(device)
) # (B, num_spkrs, Tmax)
else:
ys_pad = pad_list(
[torch.from_numpy(y).long() for y in ys], self.ignore_id
).to(device)
return xs_pad, ilens, ys_pad
def train(args):
"""Train with the given args.
Args:
args (namespace): The program arguments.
"""
set_deterministic_pytorch(args)
# check cuda availability
if not torch.cuda.is_available():
logging.warning("cuda is not available")
# get input and output dimension info
with open(args.valid_json, "rb") as f:
valid_json = json.load(f)["utts"]
utts = list(valid_json.keys())
idim = int(valid_json[utts[0]]["input"][0]["shape"][-1])
odim = int(valid_json[utts[0]]["output"][0]["shape"][-1])
logging.info("#input dims : " + str(idim))
logging.info("#output dims: " + str(odim))
# specify attention, CTC, hybrid mode
if args.mtlalpha == 1.0:
mtl_mode = "ctc"
logging.info("Pure CTC mode")
elif args.mtlalpha == 0.0:
mtl_mode = "att"
logging.info("Pure attention mode")
else:
mtl_mode = "mtl"
logging.info("Multitask learning mode")
# specify model architecture
model_class = dynamic_import(args.model_module)
model = model_class(idim, odim, args)
assert isinstance(model, ASRInterface)
subsampling_factor = model.subsample[0]
if args.rnnlm is not None:
rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(
len(args.char_list),
rnnlm_args.layer,
rnnlm_args.unit,
getattr(rnnlm_args, "embed_unit", None), # for backward compatibility
)
)
torch.load(args.rnnlm, rnnlm)
model.rnnlm = rnnlm
# write model config
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
model_conf = args.outdir + "/model.json"
with open(model_conf, "wb") as f:
logging.info("writing a model config file to " + model_conf)
f.write(
json.dumps(
(idim, odim, vars(args)), indent=4, ensure_ascii=False, sort_keys=True
).encode("utf_8")
)
for key in sorted(vars(args).keys()):
logging.info("ARGS: " + key + ": " + str(vars(args)[key]))
reporter = model.reporter
# check the use of multi-gpu
if args.ngpu > 1:
if args.batch_size != 0:
logging.warning(
"batch size is automatically increased (%d -> %d)"
% (args.batch_size, args.batch_size * args.ngpu)
)
args.batch_size *= args.ngpu
# set torch device
device = torch.device("cuda" if args.ngpu > 0 else "cpu")
if args.train_dtype in ("float16", "float32", "float64"):
dtype = getattr(torch, args.train_dtype)
else:
dtype = torch.float32
model = model.to(device=device, dtype=dtype)
logging.warning(
"num. model params: {:,} (num. trained: {:,} ({:.1f}%))".format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
sum(p.numel() for p in model.parameters() if p.requires_grad)
* 100.0
/ sum(p.numel() for p in model.parameters()),
)
)
# Setup an optimizer
if args.opt == "adadelta":
optimizer = torch.optim.Adadelta(
model.parameters(), rho=0.95, eps=args.eps, weight_decay=args.weight_decay
)
elif args.opt == "adam":
optimizer = torch.optim.Adam(model.parameters(), weight_decay=args.weight_decay)
elif args.opt == "noam":
from espnet.nets.pytorch_backend.transformer.optimizer import get_std_opt
optimizer = get_std_opt(
model.parameters(),
args.adim,
args.transformer_warmup_steps,
args.transformer_lr,
)
else:
raise NotImplementedError("unknown optimizer: " + args.opt)
# setup apex.amp
if args.train_dtype in ("O0", "O1", "O2", "O3"):
try:
from apex import amp
except ImportError as e:
logging.error(
f"You need to install apex for --train-dtype {args.train_dtype}. "
"See https://github.com/NVIDIA/apex#linux"
)
raise e
if args.opt == "noam":
model, optimizer.optimizer = amp.initialize(
model, optimizer.optimizer, opt_level=args.train_dtype
)
else:
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.train_dtype
)
use_apex = True
else:
use_apex = False
# FIXME: TOO DIRTY HACK
setattr(optimizer, "target", reporter)
setattr(optimizer, "serialize", lambda s: reporter.serialize(s))
# Setup a converter
converter = CustomConverter(
subsampling_factor=subsampling_factor, dtype=dtype, num_spkrs=args.num_spkrs
)
# read json data
with open(args.train_json, "rb") as f:
train_json = json.load(f)["utts"]
with open(args.valid_json, "rb") as f:
valid_json = json.load(f)["utts"]
use_sortagrad = args.sortagrad == -1 or args.sortagrad > 0
# make minibatch list (variable length)
train = make_batchset(
train_json,
args.batch_size,
args.maxlen_in,
args.maxlen_out,
args.minibatches,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
shortest_first=use_sortagrad,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
iaxis=0,
oaxis=-1,
)
valid = make_batchset(
valid_json,
args.batch_size,
args.maxlen_in,
args.maxlen_out,
args.minibatches,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
iaxis=0,
oaxis=-1,
)
load_tr = LoadInputsAndTargets(
mode="asr",
load_output=True,
preprocess_conf=args.preprocess_conf,
preprocess_args={"train": True}, # Switch the mode of preprocessing
)
load_cv = LoadInputsAndTargets(
mode="asr",
load_output=True,
preprocess_conf=args.preprocess_conf,
preprocess_args={"train": False}, # Switch the mode of preprocessing
)
# hack to make batchsize argument as 1
# actual bathsize is included in a list
# default collate function converts numpy array to pytorch tensor
# we used an empty collate function instead which returns list
train_iter = {
"main": ChainerDataLoader(
dataset=TransformDataset(train, lambda data: converter([load_tr(data)])),
batch_size=1,
num_workers=args.n_iter_processes,
shuffle=True,
collate_fn=lambda x: x[0],
)
}
valid_iter = {
"main": ChainerDataLoader(
dataset=TransformDataset(valid, lambda data: converter([load_cv(data)])),
batch_size=1,
shuffle=False,
collate_fn=lambda x: x[0],
num_workers=args.n_iter_processes,
)
}
# Set up a trainer
updater = CustomUpdater(
model,
args.grad_clip,
train_iter,
optimizer,
device,
args.ngpu,
args.grad_noise,
args.accum_grad,
use_apex=use_apex,
)
trainer = training.Trainer(updater, (args.epochs, "epoch"), out=args.outdir)
if use_sortagrad:
trainer.extend(
ShufflingEnabler([train_iter]),
trigger=(args.sortagrad if args.sortagrad != -1 else args.epochs, "epoch"),
)
# Resume from a snapshot
if args.resume:
logging.info("resumed from %s" % args.resume)
torch_resume(args.resume, trainer)
# Evaluate the model with the test dataset for each epoch
trainer.extend(CustomEvaluator(model, valid_iter, reporter, device, args.ngpu))
# Save attention weight each epoch
if args.num_save_attention > 0 and args.mtlalpha != 1.0:
data = sorted(
list(valid_json.items())[: args.num_save_attention],
key=lambda x: int(x[1]["input"][0]["shape"][1]),
reverse=True,
)
if hasattr(model, "module"):
att_vis_fn = model.module.calculate_all_attentions
plot_class = model.module.attention_plot_class
else:
att_vis_fn = model.calculate_all_attentions
plot_class = model.attention_plot_class
att_reporter = plot_class(
att_vis_fn,
data,
args.outdir + "/att_ws",
converter=converter,
transform=load_cv,
device=device,
)
trainer.extend(att_reporter, trigger=(1, "epoch"))
else:
att_reporter = None
# Make a plot for training and validation values
trainer.extend(
extensions.PlotReport(
[
"main/loss",
"validation/main/loss",
"main/loss_ctc",
"validation/main/loss_ctc",
"main/loss_att",
"validation/main/loss_att",
],
"epoch",
file_name="loss.png",
)
)
trainer.extend(
extensions.PlotReport(
["main/acc", "validation/main/acc"], "epoch", file_name="acc.png"
)
)
trainer.extend(
extensions.PlotReport(
["main/cer_ctc", "validation/main/cer_ctc"], "epoch", file_name="cer.png"
)
)
# Save best models
trainer.extend(
snapshot_object(model, "model.loss.best"),
trigger=training.triggers.MinValueTrigger("validation/main/loss"),
)
if mtl_mode != "ctc":
trainer.extend(
snapshot_object(model, "model.acc.best"),
trigger=training.triggers.MaxValueTrigger("validation/main/acc"),
)
# save snapshot which contains model and optimizer states
trainer.extend(torch_snapshot(), trigger=(1, "epoch"))
# epsilon decay in the optimizer
if args.opt == "adadelta":
if args.criterion == "acc" and mtl_mode != "ctc":
trainer.extend(
restore_snapshot(
model, args.outdir + "/model.acc.best", load_fn=torch_load
),
trigger=CompareValueTrigger(
"validation/main/acc",
lambda best_value, current_value: best_value > current_value,
),
)
trainer.extend(
adadelta_eps_decay(args.eps_decay),
trigger=CompareValueTrigger(
"validation/main/acc",
lambda best_value, current_value: best_value > current_value,
),
)
elif args.criterion == "loss":
trainer.extend(
restore_snapshot(
model, args.outdir + "/model.loss.best", load_fn=torch_load
),
trigger=CompareValueTrigger(
"validation/main/loss",
lambda best_value, current_value: best_value < current_value,
),
)
trainer.extend(
adadelta_eps_decay(args.eps_decay),
trigger=CompareValueTrigger(
"validation/main/loss",
lambda best_value, current_value: best_value < current_value,
),
)
# Write a log of evaluation statistics for each epoch
trainer.extend(
extensions.LogReport(trigger=(args.report_interval_iters, "iteration"))
)
report_keys = [
"epoch",
"iteration",
"main/loss",
"main/loss_ctc",
"main/loss_att",
"validation/main/loss",
"validation/main/loss_ctc",
"validation/main/loss_att",
"main/acc",
"validation/main/acc",
"main/cer_ctc",
"validation/main/cer_ctc",
"elapsed_time",
]
if args.opt == "adadelta":
trainer.extend(
extensions.observe_value(
"eps",
lambda trainer: trainer.updater.get_optimizer("main").param_groups[0][
"eps"
],
),
trigger=(args.report_interval_iters, "iteration"),
)
report_keys.append("eps")
if args.report_cer:
report_keys.append("validation/main/cer")
if args.report_wer:
report_keys.append("validation/main/wer")
trainer.extend(
extensions.PrintReport(report_keys),
trigger=(args.report_interval_iters, "iteration"),
)
trainer.extend(extensions.ProgressBar(update_interval=args.report_interval_iters))
set_early_stop(trainer, args)
if args.tensorboard_dir is not None and args.tensorboard_dir != "":
from torch.utils.tensorboard import SummaryWriter
trainer.extend(
TensorboardLogger(SummaryWriter(args.tensorboard_dir), att_reporter),
trigger=(args.report_interval_iters, "iteration"),
)
# Run the training
trainer.run()
check_early_stop(trainer, args.epochs)
def recog(args):
"""Decode with the given args.
Args:
args (namespace): The program arguments.
"""
set_deterministic_pytorch(args)
model, train_args = load_trained_model(args.model)
assert isinstance(model, ASRInterface)
model.recog_args = args
# read rnnlm
if args.rnnlm:
rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)
if getattr(rnnlm_args, "model_module", "default") != "default":
raise ValueError(
"use '--api v2' option to decode with non-default language model"
)
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(
len(train_args.char_list),
rnnlm_args.layer,
rnnlm_args.unit,
getattr(rnnlm_args, "embed_unit", None), # for backward compatibility
)
)
torch_load(args.rnnlm, rnnlm)
rnnlm.eval()
else:
rnnlm = None
if args.word_rnnlm:
rnnlm_args = get_model_conf(args.word_rnnlm, args.word_rnnlm_conf)
word_dict = rnnlm_args.char_list_dict
char_dict = {x: i for i, x in enumerate(train_args.char_list)}
word_rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(len(word_dict), rnnlm_args.layer, rnnlm_args.unit)
)
torch_load(args.word_rnnlm, word_rnnlm)
word_rnnlm.eval()
if rnnlm is not None:
rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.MultiLevelLM(
word_rnnlm.predictor, rnnlm.predictor, word_dict, char_dict
)
)
else:
rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.LookAheadWordLM(
word_rnnlm.predictor, word_dict, char_dict
)
)
# gpu
if args.ngpu == 1:
gpu_id = list(range(args.ngpu))
logging.info("gpu id: " + str(gpu_id))
model.cuda()
if rnnlm:
rnnlm.cuda()
# read json data
with open(args.recog_json, "rb") as f:
js = json.load(f)["utts"]
new_js = {}
load_inputs_and_targets = LoadInputsAndTargets(
mode="asr",
load_output=False,
sort_in_input_length=False,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None
else args.preprocess_conf,
preprocess_args={"train": False},
)
if args.batchsize == 0:
with torch.no_grad():
for idx, name in enumerate(js.keys(), 1):
logging.info("(%d/%d) decoding " + name, idx, len(js.keys()))
batch = [(name, js[name])]
feat = load_inputs_and_targets(batch)[0][0]
nbest_hyps = model.recognize(feat, args, train_args.char_list, rnnlm)
new_js[name] = add_results_to_json(
js[name], nbest_hyps, train_args.char_list
)
else:
def grouper(n, iterable, fillvalue=None):
kargs = [iter(iterable)] * n
return zip_longest(*kargs, fillvalue=fillvalue)
# sort data if batchsize > 1
keys = list(js.keys())
if args.batchsize > 1:
feat_lens = [js[key]["input"][0]["shape"][0] for key in keys]
sorted_index = sorted(range(len(feat_lens)), key=lambda i: -feat_lens[i])
keys = [keys[i] for i in sorted_index]
with torch.no_grad():
for names in grouper(args.batchsize, keys, None):
names = [name for name in names if name]
batch = [(name, js[name]) for name in names]
feats = load_inputs_and_targets(batch)[0]
nbest_hyps = model.recognize_batch(
feats, args, train_args.char_list, rnnlm=rnnlm
)
for i, name in enumerate(names):
nbest_hyp = [hyp[i] for hyp in nbest_hyps]
new_js[name] = add_results_to_json(
js[name], nbest_hyp, train_args.char_list
)
with open(args.result_label, "wb") as f:
f.write(
json.dumps(
{"utts": new_js}, indent=4, ensure_ascii=False, sort_keys=True
).encode("utf_8")
)
| 22,054 | 32.723242 | 88 | py |
espnet | espnet-master/espnet/lm/pytorch_backend/extlm.py | #!/usr/bin/env python3
# Copyright 2018 Mitsubishi Electric Research Laboratories (Takaaki Hori)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from espnet.lm.lm_utils import make_lexical_tree
from espnet.nets.pytorch_backend.nets_utils import to_device
# Definition of a multi-level (subword/word) language model
class MultiLevelLM(nn.Module):
logzero = -10000000000.0
zero = 1.0e-10
def __init__(
self,
wordlm,
subwordlm,
word_dict,
subword_dict,
subwordlm_weight=0.8,
oov_penalty=1.0,
open_vocab=True,
):
super(MultiLevelLM, self).__init__()
self.wordlm = wordlm
self.subwordlm = subwordlm
self.word_eos = word_dict["<eos>"]
self.word_unk = word_dict["<unk>"]
self.var_word_eos = torch.LongTensor([self.word_eos])
self.var_word_unk = torch.LongTensor([self.word_unk])
self.space = subword_dict["<space>"]
self.eos = subword_dict["<eos>"]
self.lexroot = make_lexical_tree(word_dict, subword_dict, self.word_unk)
self.log_oov_penalty = math.log(oov_penalty)
self.open_vocab = open_vocab
self.subword_dict_size = len(subword_dict)
self.subwordlm_weight = subwordlm_weight
self.normalized = True
def forward(self, state, x):
# update state with input label x
if state is None: # make initial states and log-prob vectors
self.var_word_eos = to_device(x, self.var_word_eos)
self.var_word_unk = to_device(x, self.var_word_eos)
wlm_state, z_wlm = self.wordlm(None, self.var_word_eos)
wlm_logprobs = F.log_softmax(z_wlm, dim=1)
clm_state, z_clm = self.subwordlm(None, x)
log_y = F.log_softmax(z_clm, dim=1) * self.subwordlm_weight
new_node = self.lexroot
clm_logprob = 0.0
xi = self.space
else:
clm_state, wlm_state, wlm_logprobs, node, log_y, clm_logprob = state
xi = int(x)
if xi == self.space: # inter-word transition
if node is not None and node[1] >= 0: # check if the node is word end
w = to_device(x, torch.LongTensor([node[1]]))
else: # this node is not a word end, which means <unk>
w = self.var_word_unk
# update wordlm state and log-prob vector
wlm_state, z_wlm = self.wordlm(wlm_state, w)
wlm_logprobs = F.log_softmax(z_wlm, dim=1)
new_node = self.lexroot # move to the tree root
clm_logprob = 0.0
elif node is not None and xi in node[0]: # intra-word transition
new_node = node[0][xi]
clm_logprob += log_y[0, xi]
elif self.open_vocab: # if no path in the tree, enter open-vocabulary mode
new_node = None
clm_logprob += log_y[0, xi]
else: # if open_vocab flag is disabled, return 0 probabilities
log_y = to_device(
x, torch.full((1, self.subword_dict_size), self.logzero)
)
return (clm_state, wlm_state, wlm_logprobs, None, log_y, 0.0), log_y
clm_state, z_clm = self.subwordlm(clm_state, x)
log_y = F.log_softmax(z_clm, dim=1) * self.subwordlm_weight
# apply word-level probabilies for <space> and <eos> labels
if xi != self.space:
if new_node is not None and new_node[1] >= 0: # if new node is word end
wlm_logprob = wlm_logprobs[:, new_node[1]] - clm_logprob
else:
wlm_logprob = wlm_logprobs[:, self.word_unk] + self.log_oov_penalty
log_y[:, self.space] = wlm_logprob
log_y[:, self.eos] = wlm_logprob
else:
log_y[:, self.space] = self.logzero
log_y[:, self.eos] = self.logzero
return (
(clm_state, wlm_state, wlm_logprobs, new_node, log_y, float(clm_logprob)),
log_y,
)
def final(self, state):
clm_state, wlm_state, wlm_logprobs, node, log_y, clm_logprob = state
if node is not None and node[1] >= 0: # check if the node is word end
w = to_device(wlm_logprobs, torch.LongTensor([node[1]]))
else: # this node is not a word end, which means <unk>
w = self.var_word_unk
wlm_state, z_wlm = self.wordlm(wlm_state, w)
return float(F.log_softmax(z_wlm, dim=1)[:, self.word_eos])
# Definition of a look-ahead word language model
class LookAheadWordLM(nn.Module):
logzero = -10000000000.0
zero = 1.0e-10
def __init__(
self, wordlm, word_dict, subword_dict, oov_penalty=0.0001, open_vocab=True
):
super(LookAheadWordLM, self).__init__()
self.wordlm = wordlm
self.word_eos = word_dict["<eos>"]
self.word_unk = word_dict["<unk>"]
self.var_word_eos = torch.LongTensor([self.word_eos])
self.var_word_unk = torch.LongTensor([self.word_unk])
self.space = subword_dict["<space>"]
self.eos = subword_dict["<eos>"]
self.lexroot = make_lexical_tree(word_dict, subword_dict, self.word_unk)
self.oov_penalty = oov_penalty
self.open_vocab = open_vocab
self.subword_dict_size = len(subword_dict)
self.zero_tensor = torch.FloatTensor([self.zero])
self.normalized = True
def forward(self, state, x):
# update state with input label x
if state is None: # make initial states and cumlative probability vector
self.var_word_eos = to_device(x, self.var_word_eos)
self.var_word_unk = to_device(x, self.var_word_eos)
self.zero_tensor = to_device(x, self.zero_tensor)
wlm_state, z_wlm = self.wordlm(None, self.var_word_eos)
cumsum_probs = torch.cumsum(F.softmax(z_wlm, dim=1), dim=1)
new_node = self.lexroot
xi = self.space
else:
wlm_state, cumsum_probs, node = state
xi = int(x)
if xi == self.space: # inter-word transition
if node is not None and node[1] >= 0: # check if the node is word end
w = to_device(x, torch.LongTensor([node[1]]))
else: # this node is not a word end, which means <unk>
w = self.var_word_unk
# update wordlm state and cumlative probability vector
wlm_state, z_wlm = self.wordlm(wlm_state, w)
cumsum_probs = torch.cumsum(F.softmax(z_wlm, dim=1), dim=1)
new_node = self.lexroot # move to the tree root
elif node is not None and xi in node[0]: # intra-word transition
new_node = node[0][xi]
elif self.open_vocab: # if no path in the tree, enter open-vocabulary mode
new_node = None
else: # if open_vocab flag is disabled, return 0 probabilities
log_y = to_device(
x, torch.full((1, self.subword_dict_size), self.logzero)
)
return (wlm_state, None, None), log_y
if new_node is not None:
succ, wid, wids = new_node
# compute parent node probability
sum_prob = (
(cumsum_probs[:, wids[1]] - cumsum_probs[:, wids[0]])
if wids is not None
else 1.0
)
if sum_prob < self.zero:
log_y = to_device(
x, torch.full((1, self.subword_dict_size), self.logzero)
)
return (wlm_state, cumsum_probs, new_node), log_y
# set <unk> probability as a default value
unk_prob = (
cumsum_probs[:, self.word_unk] - cumsum_probs[:, self.word_unk - 1]
)
y = to_device(
x,
torch.full(
(1, self.subword_dict_size), float(unk_prob) * self.oov_penalty
),
)
# compute transition probabilities to child nodes
for cid, nd in succ.items():
y[:, cid] = (
cumsum_probs[:, nd[2][1]] - cumsum_probs[:, nd[2][0]]
) / sum_prob
# apply word-level probabilies for <space> and <eos> labels
if wid >= 0:
wlm_prob = (cumsum_probs[:, wid] - cumsum_probs[:, wid - 1]) / sum_prob
y[:, self.space] = wlm_prob
y[:, self.eos] = wlm_prob
elif xi == self.space:
y[:, self.space] = self.zero
y[:, self.eos] = self.zero
log_y = torch.log(torch.max(y, self.zero_tensor)) # clip to avoid log(0)
else: # if no path in the tree, transition probability is one
log_y = to_device(x, torch.zeros(1, self.subword_dict_size))
return (wlm_state, cumsum_probs, new_node), log_y
def final(self, state):
wlm_state, cumsum_probs, node = state
if node is not None and node[1] >= 0: # check if the node is word end
w = to_device(cumsum_probs, torch.LongTensor([node[1]]))
else: # this node is not a word end, which means <unk>
w = self.var_word_unk
wlm_state, z_wlm = self.wordlm(wlm_state, w)
return float(F.log_softmax(z_wlm, dim=1)[:, self.word_eos])
| 9,546 | 42.593607 | 87 | py |
espnet | espnet-master/espnet/lm/pytorch_backend/lm.py | #!/usr/bin/env python3
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
# This code is ported from the following implementation written in Torch.
# https://github.com/chainer/chainer/blob/master/examples/ptb/train_ptb_custom_loop.py
"""LM training in pytorch."""
import copy
import json
import logging
import numpy as np
import torch
import torch.nn as nn
from chainer import Chain, reporter, training
from chainer.dataset import convert
from chainer.training import extensions
from torch.nn.parallel import data_parallel
from espnet.asr.asr_utils import (
snapshot_object,
torch_load,
torch_resume,
torch_snapshot,
)
from espnet.lm.lm_utils import (
MakeSymlinkToBestModel,
ParallelSentenceIterator,
count_tokens,
load_dataset,
read_tokens,
)
from espnet.nets.lm_interface import LMInterface, dynamic_import_lm
from espnet.optimizer.factory import dynamic_import_optimizer
from espnet.scheduler.pytorch import PyTorchScheduler
from espnet.scheduler.scheduler import dynamic_import_scheduler
from espnet.utils.deterministic_utils import set_deterministic_pytorch
from espnet.utils.training.evaluator import BaseEvaluator
from espnet.utils.training.iterators import ShufflingEnabler
from espnet.utils.training.tensorboard_logger import TensorboardLogger
from espnet.utils.training.train_utils import check_early_stop, set_early_stop
def compute_perplexity(result):
"""Compute and add the perplexity to the LogReport.
:param dict result: The current observations
"""
# Routine to rewrite the result dictionary of LogReport to add perplexity values
result["perplexity"] = np.exp(result["main/nll"] / result["main/count"])
if "validation/main/nll" in result:
result["val_perplexity"] = np.exp(
result["validation/main/nll"] / result["validation/main/count"]
)
class Reporter(Chain):
"""Dummy module to use chainer's trainer."""
def report(self, loss):
"""Report nothing."""
pass
def concat_examples(batch, device=None, padding=None):
"""Concat examples in minibatch.
:param np.ndarray batch: The batch to concatenate
:param int device: The device to send to
:param Tuple[int,int] padding: The padding to use
:return: (inputs, targets)
:rtype (torch.Tensor, torch.Tensor)
"""
x, t = convert.concat_examples(batch, padding=padding)
x = torch.from_numpy(x)
t = torch.from_numpy(t)
if device is not None and device >= 0:
x = x.cuda(device)
t = t.cuda(device)
return x, t
class BPTTUpdater(training.StandardUpdater):
"""An updater for a pytorch LM."""
def __init__(
self,
train_iter,
model,
optimizer,
schedulers,
device,
gradclip=None,
use_apex=False,
accum_grad=1,
):
"""Initialize class.
Args:
train_iter (chainer.dataset.Iterator): The train iterator
model (LMInterface) : The model to update
optimizer (torch.optim.Optimizer): The optimizer for training
schedulers (espnet.scheduler.scheduler.SchedulerInterface):
The schedulers of `optimizer`
device (int): The device id
gradclip (float): The gradient clipping value to use
use_apex (bool): The flag to use Apex in backprop.
accum_grad (int): The number of gradient accumulation.
"""
super(BPTTUpdater, self).__init__(train_iter, optimizer)
self.model = model
self.device = device
self.gradclip = gradclip
self.use_apex = use_apex
self.scheduler = PyTorchScheduler(schedulers, optimizer)
self.accum_grad = accum_grad
# The core part of the update routine can be customized by overriding.
def update_core(self):
"""Update the model."""
# When we pass one iterator and optimizer to StandardUpdater.__init__,
# they are automatically named 'main'.
train_iter = self.get_iterator("main")
optimizer = self.get_optimizer("main")
# Progress the dataset iterator for sentences at each iteration.
self.model.zero_grad() # Clear the parameter gradients
accum = {"loss": 0.0, "nll": 0.0, "count": 0}
for _ in range(self.accum_grad):
batch = train_iter.__next__()
# Concatenate the token IDs to matrices and send them to the device
# self.converter does this job
# (it is chainer.dataset.concat_examples by default)
x, t = concat_examples(batch, device=self.device[0], padding=(0, -100))
if self.device[0] == -1:
loss, nll, count = self.model(x, t)
else:
# apex does not support torch.nn.DataParallel
loss, nll, count = data_parallel(self.model, (x, t), self.device)
# backward
loss = loss.mean() / self.accum_grad
if self.use_apex:
from apex import amp
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward() # Backprop
# accumulate stats
accum["loss"] += float(loss)
accum["nll"] += float(nll.sum())
accum["count"] += int(count.sum())
for k, v in accum.items():
reporter.report({k: v}, optimizer.target)
if self.gradclip is not None:
nn.utils.clip_grad_norm_(self.model.parameters(), self.gradclip)
optimizer.step() # Update the parameters
self.scheduler.step(n_iter=self.iteration)
class LMEvaluator(BaseEvaluator):
"""A custom evaluator for a pytorch LM."""
def __init__(self, val_iter, eval_model, reporter, device):
"""Initialize class.
:param chainer.dataset.Iterator val_iter : The validation iterator
:param LMInterface eval_model : The model to evaluate
:param chainer.Reporter reporter : The observations reporter
:param int device : The device id to use
"""
super(LMEvaluator, self).__init__(val_iter, reporter, device=-1)
self.model = eval_model
self.device = device
def evaluate(self):
"""Evaluate the model."""
val_iter = self.get_iterator("main")
loss = 0
nll = 0
count = 0
self.model.eval()
with torch.no_grad():
for batch in copy.copy(val_iter):
x, t = concat_examples(batch, device=self.device[0], padding=(0, -100))
if self.device[0] == -1:
l, n, c = self.model(x, t)
else:
# apex does not support torch.nn.DataParallel
l, n, c = data_parallel(self.model, (x, t), self.device)
loss += float(l.sum())
nll += float(n.sum())
count += int(c.sum())
self.model.train()
# report validation loss
observation = {}
with reporter.report_scope(observation):
reporter.report({"loss": loss}, self.model.reporter)
reporter.report({"nll": nll}, self.model.reporter)
reporter.report({"count": count}, self.model.reporter)
return observation
def train(args):
"""Train with the given args.
:param Namespace args: The program arguments
:param type model_class: LMInterface class for training
"""
model_class = dynamic_import_lm(args.model_module, args.backend)
assert issubclass(model_class, LMInterface), "model should implement LMInterface"
# display torch version
logging.info("torch version = " + torch.__version__)
set_deterministic_pytorch(args)
# check cuda and cudnn availability
if not torch.cuda.is_available():
logging.warning("cuda is not available")
# get special label ids
unk = args.char_list_dict["<unk>"]
eos = args.char_list_dict["<eos>"]
# read tokens as a sequence of sentences
val, n_val_tokens, n_val_oovs = load_dataset(
args.valid_label, args.char_list_dict, args.dump_hdf5_path
)
train, n_train_tokens, n_train_oovs = load_dataset(
args.train_label, args.char_list_dict, args.dump_hdf5_path
)
logging.info("#vocab = " + str(args.n_vocab))
logging.info("#sentences in the training data = " + str(len(train)))
logging.info("#tokens in the training data = " + str(n_train_tokens))
logging.info(
"oov rate in the training data = %.2f %%"
% (n_train_oovs / n_train_tokens * 100)
)
logging.info("#sentences in the validation data = " + str(len(val)))
logging.info("#tokens in the validation data = " + str(n_val_tokens))
logging.info(
"oov rate in the validation data = %.2f %%" % (n_val_oovs / n_val_tokens * 100)
)
use_sortagrad = args.sortagrad == -1 or args.sortagrad > 0
# Create the dataset iterators
batch_size = args.batchsize * max(args.ngpu, 1)
if batch_size * args.accum_grad > args.batchsize:
logging.info(
f"batch size is automatically increased "
f"({args.batchsize} -> {batch_size * args.accum_grad})"
)
train_iter = ParallelSentenceIterator(
train,
batch_size,
max_length=args.maxlen,
sos=eos,
eos=eos,
shuffle=not use_sortagrad,
)
val_iter = ParallelSentenceIterator(
val, batch_size, max_length=args.maxlen, sos=eos, eos=eos, repeat=False
)
epoch_iters = int(len(train_iter.batch_indices) / args.accum_grad)
logging.info("#iterations per epoch = %d" % epoch_iters)
logging.info("#total iterations = " + str(args.epoch * epoch_iters))
# Prepare an RNNLM model
if args.train_dtype in ("float16", "float32", "float64"):
dtype = getattr(torch, args.train_dtype)
else:
dtype = torch.float32
model = model_class(args.n_vocab, args).to(dtype=dtype)
if args.ngpu > 0:
model.to("cuda")
gpu_id = list(range(args.ngpu))
else:
gpu_id = [-1]
# Save model conf to json
model_conf = args.outdir + "/model.json"
with open(model_conf, "wb") as f:
logging.info("writing a model config file to " + model_conf)
f.write(
json.dumps(vars(args), indent=4, ensure_ascii=False, sort_keys=True).encode(
"utf_8"
)
)
logging.warning(
"num. model params: {:,} (num. trained: {:,} ({:.1f}%))".format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
sum(p.numel() for p in model.parameters() if p.requires_grad)
* 100.0
/ sum(p.numel() for p in model.parameters()),
)
)
# Set up an optimizer
opt_class = dynamic_import_optimizer(args.opt, args.backend)
optimizer = opt_class.from_args(model.parameters(), args)
if args.schedulers is None:
schedulers = []
else:
schedulers = [dynamic_import_scheduler(v)(k, args) for k, v in args.schedulers]
# setup apex.amp
if args.train_dtype in ("O0", "O1", "O2", "O3"):
try:
from apex import amp
except ImportError as e:
logging.error(
f"You need to install apex for --train-dtype {args.train_dtype}. "
"See https://github.com/NVIDIA/apex#linux"
)
raise e
model, optimizer = amp.initialize(model, optimizer, opt_level=args.train_dtype)
use_apex = True
else:
use_apex = False
# FIXME: TOO DIRTY HACK
reporter = Reporter()
setattr(model, "reporter", reporter)
setattr(optimizer, "target", reporter)
setattr(optimizer, "serialize", lambda s: reporter.serialize(s))
updater = BPTTUpdater(
train_iter,
model,
optimizer,
schedulers,
gpu_id,
gradclip=args.gradclip,
use_apex=use_apex,
accum_grad=args.accum_grad,
)
trainer = training.Trainer(updater, (args.epoch, "epoch"), out=args.outdir)
trainer.extend(LMEvaluator(val_iter, model, reporter, device=gpu_id))
trainer.extend(
extensions.LogReport(
postprocess=compute_perplexity,
trigger=(args.report_interval_iters, "iteration"),
)
)
trainer.extend(
extensions.PrintReport(
[
"epoch",
"iteration",
"main/loss",
"perplexity",
"val_perplexity",
"elapsed_time",
]
),
trigger=(args.report_interval_iters, "iteration"),
)
trainer.extend(extensions.ProgressBar(update_interval=args.report_interval_iters))
# Save best models
trainer.extend(torch_snapshot(filename="snapshot.ep.{.updater.epoch}"))
trainer.extend(snapshot_object(model, "rnnlm.model.{.updater.epoch}"))
# T.Hori: MinValueTrigger should be used, but it fails when resuming
trainer.extend(MakeSymlinkToBestModel("validation/main/loss", "rnnlm.model"))
if use_sortagrad:
trainer.extend(
ShufflingEnabler([train_iter]),
trigger=(args.sortagrad if args.sortagrad != -1 else args.epoch, "epoch"),
)
if args.resume:
logging.info("resumed from %s" % args.resume)
torch_resume(args.resume, trainer)
set_early_stop(trainer, args, is_lm=True)
if args.tensorboard_dir is not None and args.tensorboard_dir != "":
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter(args.tensorboard_dir)
trainer.extend(
TensorboardLogger(writer), trigger=(args.report_interval_iters, "iteration")
)
trainer.run()
check_early_stop(trainer, args.epoch)
# compute perplexity for test set
if args.test_label:
logging.info("test the best model")
torch_load(args.outdir + "/rnnlm.model.best", model)
test = read_tokens(args.test_label, args.char_list_dict)
n_test_tokens, n_test_oovs = count_tokens(test, unk)
logging.info("#sentences in the test data = " + str(len(test)))
logging.info("#tokens in the test data = " + str(n_test_tokens))
logging.info(
"oov rate in the test data = %.2f %%" % (n_test_oovs / n_test_tokens * 100)
)
test_iter = ParallelSentenceIterator(
test, batch_size, max_length=args.maxlen, sos=eos, eos=eos, repeat=False
)
evaluator = LMEvaluator(test_iter, model, reporter, device=gpu_id)
result = evaluator()
compute_perplexity(result)
logging.info(f"test perplexity: {result['perplexity']}")
| 14,856 | 35.414216 | 88 | py |
espnet | espnet-master/espnet/tts/pytorch_backend/tts.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""E2E-TTS training / decoding functions."""
import copy
import json
import logging
import math
import os
import time
import chainer
import kaldiio
import numpy as np
import torch
from chainer import training
from chainer.training import extensions
from espnet.asr.asr_utils import (
get_model_conf,
snapshot_object,
torch_load,
torch_resume,
torch_snapshot,
)
from espnet.asr.pytorch_backend.asr_init import load_trained_modules
from espnet.nets.pytorch_backend.nets_utils import pad_list
from espnet.nets.tts_interface import TTSInterface
from espnet.utils.dataset import ChainerDataLoader, TransformDataset
from espnet.utils.deterministic_utils import set_deterministic_pytorch
from espnet.utils.dynamic_import import dynamic_import
from espnet.utils.io_utils import LoadInputsAndTargets
from espnet.utils.training.batchfy import make_batchset
from espnet.utils.training.evaluator import BaseEvaluator
from espnet.utils.training.iterators import ShufflingEnabler
from espnet.utils.training.tensorboard_logger import TensorboardLogger
from espnet.utils.training.train_utils import check_early_stop, set_early_stop
class CustomEvaluator(BaseEvaluator):
"""Custom evaluator."""
def __init__(self, model, iterator, target, device):
"""Initilize module.
Args:
model (torch.nn.Module): Pytorch model instance.
iterator (chainer.dataset.Iterator): Iterator for validation.
target (chainer.Chain): Dummy chain instance.
device (torch.device): The device to be used in evaluation.
"""
super(CustomEvaluator, self).__init__(iterator, target)
self.model = model
self.device = device
# The core part of the update routine can be customized by overriding.
def evaluate(self):
"""Evaluate over validation iterator."""
iterator = self._iterators["main"]
if self.eval_hook:
self.eval_hook(self)
if hasattr(iterator, "reset"):
iterator.reset()
it = iterator
else:
it = copy.copy(iterator)
summary = chainer.reporter.DictSummary()
self.model.eval()
with torch.no_grad():
for batch in it:
if isinstance(batch, tuple):
x = tuple(arr.to(self.device) for arr in batch)
else:
x = batch
for key in x.keys():
x[key] = x[key].to(self.device)
observation = {}
with chainer.reporter.report_scope(observation):
# convert to torch tensor
if isinstance(x, tuple):
self.model(*x)
else:
self.model(**x)
summary.add(observation)
self.model.train()
return summary.compute_mean()
class CustomUpdater(training.StandardUpdater):
"""Custom updater."""
def __init__(self, model, grad_clip, iterator, optimizer, device, accum_grad=1):
"""Initilize module.
Args:
model (torch.nn.Module) model: Pytorch model instance.
grad_clip (float) grad_clip : The gradient clipping value.
iterator (chainer.dataset.Iterator): Iterator for training.
optimizer (torch.optim.Optimizer) : Pytorch optimizer instance.
device (torch.device): The device to be used in training.
"""
super(CustomUpdater, self).__init__(iterator, optimizer)
self.model = model
self.grad_clip = grad_clip
self.device = device
self.clip_grad_norm = torch.nn.utils.clip_grad_norm_
self.accum_grad = accum_grad
self.forward_count = 0
# The core part of the update routine can be customized by overriding.
def update_core(self):
"""Update model one step."""
# When we pass one iterator and optimizer to StandardUpdater.__init__,
# they are automatically named 'main'.
train_iter = self.get_iterator("main")
optimizer = self.get_optimizer("main")
# Get the next batch (a list of json files)
batch = train_iter.next()
if isinstance(batch, tuple):
x = tuple(arr.to(self.device) for arr in batch)
else:
x = batch
for key in x.keys():
x[key] = x[key].to(self.device)
# compute loss and gradient
if isinstance(x, tuple):
loss = self.model(*x).mean() / self.accum_grad
else:
loss = self.model(**x).mean() / self.accum_grad
loss.backward()
# update parameters
self.forward_count += 1
if self.forward_count != self.accum_grad:
return
self.forward_count = 0
# compute the gradient norm to check if it is normal or not
grad_norm = self.clip_grad_norm(self.model.parameters(), self.grad_clip)
logging.debug("grad norm={}".format(grad_norm))
if math.isnan(grad_norm):
logging.warning("grad norm is nan. Do not update model.")
else:
optimizer.step()
optimizer.zero_grad()
def update(self):
"""Run update function."""
self.update_core()
if self.forward_count == 0:
self.iteration += 1
class CustomConverter(object):
"""Custom converter."""
def __init__(self):
"""Initilize module."""
# NOTE: keep as class for future development
pass
def __call__(self, batch, device=torch.device("cpu")):
"""Convert a given batch.
Args:
batch (list): List of ndarrays.
device (torch.device): The device to be send.
Returns:
dict: Dict of converted tensors.
Examples:
>>> batch = [([np.arange(5), np.arange(3)],
[np.random.randn(8, 2), np.random.randn(4, 2)],
None, None)]
>>> conveter = CustomConverter()
>>> conveter(batch, torch.device("cpu"))
{'xs': tensor([[0, 1, 2, 3, 4],
[0, 1, 2, 0, 0]]),
'ilens': tensor([5, 3]),
'ys': tensor([[[-0.4197, -1.1157],
[-1.5837, -0.4299],
[-2.0491, 0.9215],
[-2.4326, 0.8891],
[ 1.2323, 1.7388],
[-0.3228, 0.6656],
[-0.6025, 1.3693],
[-1.0778, 1.3447]],
[[ 0.1768, -0.3119],
[ 0.4386, 2.5354],
[-1.2181, -0.5918],
[-0.6858, -0.8843],
[ 0.0000, 0.0000],
[ 0.0000, 0.0000],
[ 0.0000, 0.0000],
[ 0.0000, 0.0000]]]),
'labels': tensor([[0., 0., 0., 0., 0., 0., 0., 1.],
[0., 0., 0., 1., 1., 1., 1., 1.]]),
'olens': tensor([8, 4])}
"""
# batch should be located in list
assert len(batch) == 1
xs, ys, spembs, extras = batch[0]
# get list of lengths (must be tensor for DataParallel)
ilens = torch.from_numpy(np.array([x.shape[0] for x in xs])).long().to(device)
olens = torch.from_numpy(np.array([y.shape[0] for y in ys])).long().to(device)
# perform padding and conversion to tensor
xs = pad_list([torch.from_numpy(x).long() for x in xs], 0).to(device)
ys = pad_list([torch.from_numpy(y).float() for y in ys], 0).to(device)
# make labels for stop prediction
labels = ys.new_zeros(ys.size(0), ys.size(1))
for i, l in enumerate(olens):
labels[i, l - 1 :] = 1.0
# prepare dict
new_batch = {
"xs": xs,
"ilens": ilens,
"ys": ys,
"labels": labels,
"olens": olens,
}
# load speaker embedding
if spembs is not None:
spembs = torch.from_numpy(np.array(spembs)).float()
new_batch["spembs"] = spembs.to(device)
# load second target
if extras is not None:
extras = pad_list([torch.from_numpy(extra).float() for extra in extras], 0)
new_batch["extras"] = extras.to(device)
return new_batch
def train(args):
"""Train E2E-TTS model."""
set_deterministic_pytorch(args)
# check cuda availability
if not torch.cuda.is_available():
logging.warning("cuda is not available")
# get input and output dimension info
with open(args.valid_json, "rb") as f:
valid_json = json.load(f)["utts"]
utts = list(valid_json.keys())
# reverse input and output dimension
idim = int(valid_json[utts[0]]["output"][0]["shape"][1])
odim = int(valid_json[utts[0]]["input"][0]["shape"][1])
logging.info("#input dims : " + str(idim))
logging.info("#output dims: " + str(odim))
# get extra input and output dimenstion
if args.use_speaker_embedding:
args.spk_embed_dim = int(valid_json[utts[0]]["input"][1]["shape"][0])
else:
args.spk_embed_dim = None
if args.use_second_target:
args.spc_dim = int(valid_json[utts[0]]["input"][1]["shape"][1])
else:
args.spc_dim = None
# write model config
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
model_conf = args.outdir + "/model.json"
with open(model_conf, "wb") as f:
logging.info("writing a model config file to" + model_conf)
f.write(
json.dumps(
(idim, odim, vars(args)), indent=4, ensure_ascii=False, sort_keys=True
).encode("utf_8")
)
for key in sorted(vars(args).keys()):
logging.info("ARGS: " + key + ": " + str(vars(args)[key]))
# specify model architecture
if args.enc_init is not None or args.dec_init is not None:
model = load_trained_modules(idim, odim, args, TTSInterface)
else:
model_class = dynamic_import(args.model_module)
model = model_class(idim, odim, args)
assert isinstance(model, TTSInterface)
logging.info(model)
reporter = model.reporter
# check the use of multi-gpu
if args.ngpu > 1:
model = torch.nn.DataParallel(model, device_ids=list(range(args.ngpu)))
if args.batch_size != 0:
logging.warning(
"batch size is automatically increased (%d -> %d)"
% (args.batch_size, args.batch_size * args.ngpu)
)
args.batch_size *= args.ngpu
# set torch device
device = torch.device("cuda" if args.ngpu > 0 else "cpu")
model = model.to(device)
# freeze modules, if specified
if args.freeze_mods:
if hasattr(model, "module"):
freeze_mods = ["module." + x for x in args.freeze_mods]
else:
freeze_mods = args.freeze_mods
for mod, param in model.named_parameters():
if any(mod.startswith(key) for key in freeze_mods):
logging.info(f"{mod} is frozen not to be updated.")
param.requires_grad = False
model_params = filter(lambda x: x.requires_grad, model.parameters())
else:
model_params = model.parameters()
logging.warning(
"num. model params: {:,} (num. trained: {:,} ({:.1f}%))".format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
sum(p.numel() for p in model.parameters() if p.requires_grad)
* 100.0
/ sum(p.numel() for p in model.parameters()),
)
)
# Setup an optimizer
if args.opt == "adam":
optimizer = torch.optim.Adam(
model_params, args.lr, eps=args.eps, weight_decay=args.weight_decay
)
elif args.opt == "noam":
from espnet.nets.pytorch_backend.transformer.optimizer import get_std_opt
optimizer = get_std_opt(
model_params, args.adim, args.transformer_warmup_steps, args.transformer_lr
)
else:
raise NotImplementedError("unknown optimizer: " + args.opt)
# FIXME: TOO DIRTY HACK
setattr(optimizer, "target", reporter)
setattr(optimizer, "serialize", lambda s: reporter.serialize(s))
# read json data
with open(args.train_json, "rb") as f:
train_json = json.load(f)["utts"]
with open(args.valid_json, "rb") as f:
valid_json = json.load(f)["utts"]
use_sortagrad = args.sortagrad == -1 or args.sortagrad > 0
if use_sortagrad:
args.batch_sort_key = "input"
# make minibatch list (variable length)
train_batchset = make_batchset(
train_json,
args.batch_size,
args.maxlen_in,
args.maxlen_out,
args.minibatches,
batch_sort_key=args.batch_sort_key,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
shortest_first=use_sortagrad,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
swap_io=True,
iaxis=0,
oaxis=0,
)
valid_batchset = make_batchset(
valid_json,
args.batch_size,
args.maxlen_in,
args.maxlen_out,
args.minibatches,
batch_sort_key=args.batch_sort_key,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
swap_io=True,
iaxis=0,
oaxis=0,
)
load_tr = LoadInputsAndTargets(
mode="tts",
use_speaker_embedding=args.use_speaker_embedding,
use_second_target=args.use_second_target,
preprocess_conf=args.preprocess_conf,
preprocess_args={"train": True}, # Switch the mode of preprocessing
keep_all_data_on_mem=args.keep_all_data_on_mem,
)
load_cv = LoadInputsAndTargets(
mode="tts",
use_speaker_embedding=args.use_speaker_embedding,
use_second_target=args.use_second_target,
preprocess_conf=args.preprocess_conf,
preprocess_args={"train": False}, # Switch the mode of preprocessing
keep_all_data_on_mem=args.keep_all_data_on_mem,
)
converter = CustomConverter()
# hack to make batchsize argument as 1
# actual bathsize is included in a list
train_iter = {
"main": ChainerDataLoader(
dataset=TransformDataset(
train_batchset, lambda data: converter([load_tr(data)])
),
batch_size=1,
num_workers=args.num_iter_processes,
shuffle=not use_sortagrad,
collate_fn=lambda x: x[0],
)
}
valid_iter = {
"main": ChainerDataLoader(
dataset=TransformDataset(
valid_batchset, lambda data: converter([load_cv(data)])
),
batch_size=1,
shuffle=False,
collate_fn=lambda x: x[0],
num_workers=args.num_iter_processes,
)
}
# Set up a trainer
updater = CustomUpdater(
model, args.grad_clip, train_iter, optimizer, device, args.accum_grad
)
trainer = training.Trainer(updater, (args.epochs, "epoch"), out=args.outdir)
# Resume from a snapshot
if args.resume:
logging.info("resumed from %s" % args.resume)
torch_resume(args.resume, trainer)
# set intervals
eval_interval = (args.eval_interval_epochs, "epoch")
save_interval = (args.save_interval_epochs, "epoch")
report_interval = (args.report_interval_iters, "iteration")
# Evaluate the model with the test dataset for each epoch
trainer.extend(
CustomEvaluator(model, valid_iter, reporter, device), trigger=eval_interval
)
# Save snapshot for each epoch
trainer.extend(torch_snapshot(), trigger=save_interval)
# Save best models
trainer.extend(
snapshot_object(model, "model.loss.best"),
trigger=training.triggers.MinValueTrigger(
"validation/main/loss", trigger=eval_interval
),
)
# Save attention figure for each epoch
if args.num_save_attention > 0:
data = sorted(
list(valid_json.items())[: args.num_save_attention],
key=lambda x: int(x[1]["output"][0]["shape"][0]),
)
if hasattr(model, "module"):
att_vis_fn = model.module.calculate_all_attentions
plot_class = model.module.attention_plot_class
reduction_factor = model.module.reduction_factor
else:
att_vis_fn = model.calculate_all_attentions
plot_class = model.attention_plot_class
reduction_factor = model.reduction_factor
if reduction_factor > 1:
# fix the length to crop attention weight plot correctly
data = copy.deepcopy(data)
for idx in range(len(data)):
ilen = data[idx][1]["input"][0]["shape"][0]
data[idx][1]["input"][0]["shape"][0] = ilen // reduction_factor
att_reporter = plot_class(
att_vis_fn,
data,
args.outdir + "/att_ws",
converter=converter,
transform=load_cv,
device=device,
reverse=True,
)
trainer.extend(att_reporter, trigger=eval_interval)
else:
att_reporter = None
# Make a plot for training and validation values
if hasattr(model, "module"):
base_plot_keys = model.module.base_plot_keys
else:
base_plot_keys = model.base_plot_keys
plot_keys = []
for key in base_plot_keys:
plot_key = ["main/" + key, "validation/main/" + key]
trainer.extend(
extensions.PlotReport(plot_key, "epoch", file_name=key + ".png"),
trigger=eval_interval,
)
plot_keys += plot_key
trainer.extend(
extensions.PlotReport(plot_keys, "epoch", file_name="all_loss.png"),
trigger=eval_interval,
)
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport(trigger=report_interval))
report_keys = ["epoch", "iteration", "elapsed_time"] + plot_keys
trainer.extend(extensions.PrintReport(report_keys), trigger=report_interval)
trainer.extend(extensions.ProgressBar(), trigger=report_interval)
set_early_stop(trainer, args)
if args.tensorboard_dir is not None and args.tensorboard_dir != "":
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter(args.tensorboard_dir)
trainer.extend(TensorboardLogger(writer, att_reporter), trigger=report_interval)
if use_sortagrad:
trainer.extend(
ShufflingEnabler([train_iter]),
trigger=(args.sortagrad if args.sortagrad != -1 else args.epochs, "epoch"),
)
# Run the training
trainer.run()
check_early_stop(trainer, args.epochs)
@torch.no_grad()
def decode(args):
"""Decode with E2E-TTS model."""
set_deterministic_pytorch(args)
# read training config
idim, odim, train_args = get_model_conf(args.model, args.model_conf)
# show arguments
for key in sorted(vars(args).keys()):
logging.info("args: " + key + ": " + str(vars(args)[key]))
# define model
model_class = dynamic_import(train_args.model_module)
model = model_class(idim, odim, train_args)
assert isinstance(model, TTSInterface)
logging.info(model)
# load trained model parameters
logging.info("reading model parameters from " + args.model)
torch_load(args.model, model)
model.eval()
# set torch device
device = torch.device("cuda" if args.ngpu > 0 else "cpu")
model = model.to(device)
# read json data
with open(args.json, "rb") as f:
js = json.load(f)["utts"]
# check directory
outdir = os.path.dirname(args.out)
if len(outdir) != 0 and not os.path.exists(outdir):
os.makedirs(outdir)
load_inputs_and_targets = LoadInputsAndTargets(
mode="tts",
load_input=False,
sort_in_input_length=False,
use_speaker_embedding=train_args.use_speaker_embedding,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None
else args.preprocess_conf,
preprocess_args={"train": False}, # Switch the mode of preprocessing
)
# define function for plot prob and att_ws
def _plot_and_save(array, figname, figsize=(6, 4), dpi=150):
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
shape = array.shape
if len(shape) == 1:
# for eos probability
plt.figure(figsize=figsize, dpi=dpi)
plt.plot(array)
plt.xlabel("Frame")
plt.ylabel("Probability")
plt.ylim([0, 1])
elif len(shape) == 2:
# for tacotron 2 attention weights, whose shape is (out_length, in_length)
plt.figure(figsize=figsize, dpi=dpi)
plt.imshow(array, aspect="auto")
plt.xlabel("Input")
plt.ylabel("Output")
elif len(shape) == 4:
# for transformer attention weights,
# whose shape is (#leyers, #heads, out_length, in_length)
plt.figure(figsize=(figsize[0] * shape[0], figsize[1] * shape[1]), dpi=dpi)
for idx1, xs in enumerate(array):
for idx2, x in enumerate(xs, 1):
plt.subplot(shape[0], shape[1], idx1 * shape[1] + idx2)
plt.imshow(x, aspect="auto")
plt.xlabel("Input")
plt.ylabel("Output")
else:
raise NotImplementedError("Support only from 1D to 4D array.")
plt.tight_layout()
if not os.path.exists(os.path.dirname(figname)):
# NOTE: exist_ok = True is needed for parallel process decoding
os.makedirs(os.path.dirname(figname), exist_ok=True)
plt.savefig(figname)
plt.close()
# define function to calculate focus rate
# (see section 3.3 in https://arxiv.org/abs/1905.09263)
def _calculate_focus_rete(att_ws):
if att_ws is None:
# fastspeech case -> None
return 1.0
elif len(att_ws.shape) == 2:
# tacotron 2 case -> (L, T)
return float(att_ws.max(dim=-1)[0].mean())
elif len(att_ws.shape) == 4:
# transformer case -> (#layers, #heads, L, T)
return float(att_ws.max(dim=-1)[0].mean(dim=-1).max())
else:
raise ValueError("att_ws should be 2 or 4 dimensional tensor.")
# define function to convert attention to duration
def _convert_att_to_duration(att_ws):
if len(att_ws.shape) == 2:
# tacotron 2 case -> (L, T)
pass
elif len(att_ws.shape) == 4:
# transformer case -> (#layers, #heads, L, T)
# get the most diagonal head according to focus rate
att_ws = torch.cat(
[att_w for att_w in att_ws], dim=0
) # (#heads * #layers, L, T)
diagonal_scores = att_ws.max(dim=-1)[0].mean(dim=-1) # (#heads * #layers,)
diagonal_head_idx = diagonal_scores.argmax()
att_ws = att_ws[diagonal_head_idx] # (L, T)
else:
raise ValueError("att_ws should be 2 or 4 dimensional tensor.")
# calculate duration from 2d attention weight
durations = torch.stack(
[att_ws.argmax(-1).eq(i).sum() for i in range(att_ws.shape[1])]
)
return durations.view(-1, 1).float()
# define writer instances
feat_writer = kaldiio.WriteHelper("ark,scp:{o}.ark,{o}.scp".format(o=args.out))
if args.save_durations:
dur_writer = kaldiio.WriteHelper(
"ark,scp:{o}.ark,{o}.scp".format(o=args.out.replace("feats", "durations"))
)
if args.save_focus_rates:
fr_writer = kaldiio.WriteHelper(
"ark,scp:{o}.ark,{o}.scp".format(o=args.out.replace("feats", "focus_rates"))
)
# start decoding
for idx, utt_id in enumerate(js.keys()):
# setup inputs
batch = [(utt_id, js[utt_id])]
data = load_inputs_and_targets(batch)
x = torch.LongTensor(data[0][0]).to(device)
spemb = None
if train_args.use_speaker_embedding:
spemb = torch.FloatTensor(data[1][0]).to(device)
# decode and write
start_time = time.time()
outs, probs, att_ws = model.inference(x, args, spemb=spemb)
logging.info(
"inference speed = %.1f frames / sec."
% (int(outs.size(0)) / (time.time() - start_time))
)
if outs.size(0) == x.size(0) * args.maxlenratio:
logging.warning("output length reaches maximum length (%s)." % utt_id)
focus_rate = _calculate_focus_rete(att_ws)
logging.info(
"(%d/%d) %s (size: %d->%d, focus rate: %.3f)"
% (idx + 1, len(js.keys()), utt_id, x.size(0), outs.size(0), focus_rate)
)
feat_writer[utt_id] = outs.cpu().numpy()
if args.save_durations:
ds = _convert_att_to_duration(att_ws)
dur_writer[utt_id] = ds.cpu().numpy()
if args.save_focus_rates:
fr_writer[utt_id] = np.array(focus_rate).reshape(1, 1)
# plot and save prob and att_ws
if probs is not None:
_plot_and_save(
probs.cpu().numpy(),
os.path.dirname(args.out) + "/probs/%s_prob.png" % utt_id,
)
if att_ws is not None:
_plot_and_save(
att_ws.cpu().numpy(),
os.path.dirname(args.out) + "/att_ws/%s_att_ws.png" % utt_id,
)
# close file object
feat_writer.close()
if args.save_durations:
dur_writer.close()
if args.save_focus_rates:
fr_writer.close()
| 26,532 | 34.614765 | 88 | py |
espnet | espnet-master/espnet/utils/spec_augment.py | # -*- coding: utf-8 -*-
"""
This implementation is modified from https://github.com/zcaceres/spec_augment
MIT License
Copyright (c) 2019 Zach Caceres
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETjjHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import random
import torch
def specaug(
spec, W=5, F=30, T=40, num_freq_masks=2, num_time_masks=2, replace_with_zero=False
):
"""SpecAugment
Reference:
SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition
(https://arxiv.org/pdf/1904.08779.pdf)
This implementation modified from https://github.com/zcaceres/spec_augment
:param torch.Tensor spec: input tensor with the shape (T, dim)
:param int W: time warp parameter
:param int F: maximum width of each freq mask
:param int T: maximum width of each time mask
:param int num_freq_masks: number of frequency masks
:param int num_time_masks: number of time masks
:param bool replace_with_zero: if True, masked parts will be filled with 0,
if False, filled with mean
"""
return time_mask(
freq_mask(
time_warp(spec, W=W),
F=F,
num_masks=num_freq_masks,
replace_with_zero=replace_with_zero,
),
T=T,
num_masks=num_time_masks,
replace_with_zero=replace_with_zero,
)
def time_warp(spec, W=5):
"""Time warping
:param torch.Tensor spec: input tensor with shape (T, dim)
:param int W: time warp parameter
"""
spec = spec.unsqueeze(0)
spec_len = spec.shape[1]
num_rows = spec.shape[2]
device = spec.device
y = num_rows // 2
horizontal_line_at_ctr = spec[0, :, y]
assert len(horizontal_line_at_ctr) == spec_len
point_to_warp = horizontal_line_at_ctr[random.randrange(W, spec_len - W)]
assert isinstance(point_to_warp, torch.Tensor)
# Uniform distribution from (0,W) with chance to be up to W negative
dist_to_warp = random.randrange(-W, W)
src_pts, dest_pts = (
torch.tensor([[[point_to_warp, y]]], device=device),
torch.tensor([[[point_to_warp + dist_to_warp, y]]], device=device),
)
warped_spectro, dense_flows = sparse_image_warp(spec, src_pts, dest_pts)
return warped_spectro.squeeze(3).squeeze(0)
def freq_mask(spec, F=30, num_masks=1, replace_with_zero=False):
"""Frequency masking
:param torch.Tensor spec: input tensor with shape (T, dim)
:param int F: maximum width of each mask
:param int num_masks: number of masks
:param bool replace_with_zero: if True, masked parts will be filled with 0,
if False, filled with mean
"""
cloned = spec.unsqueeze(0).clone()
num_mel_channels = cloned.shape[2]
for i in range(0, num_masks):
f = random.randrange(0, F)
f_zero = random.randrange(0, num_mel_channels - f)
# avoids randrange error if values are equal and range is empty
if f_zero == f_zero + f:
return cloned.squeeze(0)
mask_end = random.randrange(f_zero, f_zero + f)
if replace_with_zero:
cloned[0][:, f_zero:mask_end] = 0
else:
cloned[0][:, f_zero:mask_end] = cloned.mean()
return cloned.squeeze(0)
def time_mask(spec, T=40, num_masks=1, replace_with_zero=False):
"""Time masking
:param torch.Tensor spec: input tensor with shape (T, dim)
:param int T: maximum width of each mask
:param int num_masks: number of masks
:param bool replace_with_zero: if True, masked parts will be filled with 0,
if False, filled with mean
"""
cloned = spec.unsqueeze(0).clone()
len_spectro = cloned.shape[1]
for i in range(0, num_masks):
t = random.randrange(0, T)
t_zero = random.randrange(0, len_spectro - t)
# avoids randrange error if values are equal and range is empty
if t_zero == t_zero + t:
return cloned.squeeze(0)
mask_end = random.randrange(t_zero, t_zero + t)
if replace_with_zero:
cloned[0][t_zero:mask_end, :] = 0
else:
cloned[0][t_zero:mask_end, :] = cloned.mean()
return cloned.squeeze(0)
def sparse_image_warp(
img_tensor,
source_control_point_locations,
dest_control_point_locations,
interpolation_order=2,
regularization_weight=0.0,
num_boundaries_points=0,
):
device = img_tensor.device
control_point_flows = dest_control_point_locations - source_control_point_locations
batch_size, image_height, image_width = img_tensor.shape
flattened_grid_locations = get_flat_grid_locations(
image_height, image_width, device
)
flattened_flows = interpolate_spline(
dest_control_point_locations,
control_point_flows,
flattened_grid_locations,
interpolation_order,
regularization_weight,
)
dense_flows = create_dense_flows(
flattened_flows, batch_size, image_height, image_width
)
warped_image = dense_image_warp(img_tensor, dense_flows)
return warped_image, dense_flows
def get_grid_locations(image_height, image_width, device):
y_range = torch.linspace(0, image_height - 1, image_height, device=device)
x_range = torch.linspace(0, image_width - 1, image_width, device=device)
y_grid, x_grid = torch.meshgrid(y_range, x_range)
return torch.stack((y_grid, x_grid), -1)
def flatten_grid_locations(grid_locations, image_height, image_width):
return torch.reshape(grid_locations, [image_height * image_width, 2])
def get_flat_grid_locations(image_height, image_width, device):
y_range = torch.linspace(0, image_height - 1, image_height, device=device)
x_range = torch.linspace(0, image_width - 1, image_width, device=device)
y_grid, x_grid = torch.meshgrid(y_range, x_range)
return torch.stack((y_grid, x_grid), -1).reshape([image_height * image_width, 2])
def create_dense_flows(flattened_flows, batch_size, image_height, image_width):
# possibly .view
return torch.reshape(flattened_flows, [batch_size, image_height, image_width, 2])
def interpolate_spline(
train_points,
train_values,
query_points,
order,
regularization_weight=0.0,
):
# First, fit the spline to the observed data.
w, v = solve_interpolation(train_points, train_values, order, regularization_weight)
# Then, evaluate the spline at the query locations.
query_values = apply_interpolation(query_points, train_points, w, v, order)
return query_values
def solve_interpolation(train_points, train_values, order, regularization_weight):
device = train_points.device
b, n, d = train_points.shape
k = train_values.shape[-1]
c = train_points
f = train_values.float()
matrix_a = phi(cross_squared_distance_matrix(c, c), order).unsqueeze(0) # [b, n, n]
# Append ones to the feature values for the bias term in the linear model.
ones = torch.ones(1, dtype=train_points.dtype, device=device).view([-1, 1, 1])
matrix_b = torch.cat((c, ones), 2).float() # [b, n, d + 1]
# [b, n + d + 1, n]
left_block = torch.cat((matrix_a, torch.transpose(matrix_b, 2, 1)), 1)
num_b_cols = matrix_b.shape[2] # d + 1
# In Tensorflow, zeros are used here. Pytorch solve fails with zeros
# for some reason we don't understand.
# So instead we use very tiny randn values (variance of one, zero mean)
# on one side of our multiplication.
lhs_zeros = torch.randn((b, num_b_cols, num_b_cols), device=device) / 1e10
right_block = torch.cat((matrix_b, lhs_zeros), 1) # [b, n + d + 1, d + 1]
lhs = torch.cat((left_block, right_block), 2) # [b, n + d + 1, n + d + 1]
rhs_zeros = torch.zeros(
(b, d + 1, k), dtype=train_points.dtype, device=device
).float()
rhs = torch.cat((f, rhs_zeros), 1) # [b, n + d + 1, k]
# Then, solve the linear system and unpack the results.
X, LU = torch.gesv(rhs, lhs)
w = X[:, :n, :]
v = X[:, n:, :]
return w, v
def cross_squared_distance_matrix(x, y):
"""Pairwise squared distance between two (batch) matrices' rows (2nd dim).
Computes the pairwise distances between rows of x and rows of y
Args:
x: [batch_size, n, d] float `Tensor`
y: [batch_size, m, d] float `Tensor`
Returns:
squared_dists: [batch_size, n, m] float `Tensor`, where
squared_dists[b,i,j] = ||x[b,i,:] - y[b,j,:]||^2
"""
x_norm_squared = torch.sum(torch.mul(x, x))
y_norm_squared = torch.sum(torch.mul(y, y))
x_y_transpose = torch.matmul(x.squeeze(0), y.squeeze(0).transpose(0, 1))
# squared_dists[b,i,j] = ||x_bi - y_bj||^2 = x_bi'x_bi- 2x_bi'x_bj + x_bj'x_bj
squared_dists = x_norm_squared - 2 * x_y_transpose + y_norm_squared
return squared_dists.float()
def phi(r, order):
"""Coordinate-wise nonlinearity used to define the order of the interpolation.
See https://en.wikipedia.org/wiki/Polyharmonic_spline for the definition.
Args:
r: input op
order: interpolation order
Returns:
phi_k evaluated coordinate-wise on r, for k = r
"""
EPSILON = torch.tensor(1e-10, device=r.device)
# using EPSILON prevents log(0), sqrt0), etc.
# sqrt(0) is well-defined, but its gradient is not
if order == 1:
r = torch.max(r, EPSILON)
r = torch.sqrt(r)
return r
elif order == 2:
return 0.5 * r * torch.log(torch.max(r, EPSILON))
elif order == 4:
return 0.5 * torch.square(r) * torch.log(torch.max(r, EPSILON))
elif order % 2 == 0:
r = torch.max(r, EPSILON)
return 0.5 * torch.pow(r, 0.5 * order) * torch.log(r)
else:
r = torch.max(r, EPSILON)
return torch.pow(r, 0.5 * order)
def apply_interpolation(query_points, train_points, w, v, order):
"""Apply polyharmonic interpolation model to data.
Notes:
Given coefficients w and v for the interpolation model, we evaluate
interpolated function values at query_points.
Args:
query_points: `[b, m, d]` x values to evaluate the interpolation at
train_points: `[b, n, d]` x values that act as the interpolation centers
( the c variables in the wikipedia article)
w: `[b, n, k]` weights on each interpolation center
v: `[b, d, k]` weights on each input dimension
order: order of the interpolation
Returns:
Polyharmonic interpolation evaluated at points defined in query_points.
"""
query_points = query_points.unsqueeze(0)
# First, compute the contribution from the rbf term.
pairwise_dists = cross_squared_distance_matrix(
query_points.float(), train_points.float()
)
phi_pairwise_dists = phi(pairwise_dists, order)
rbf_term = torch.matmul(phi_pairwise_dists, w)
# Then, compute the contribution from the linear term.
# Pad query_points with ones, for the bias term in the linear model.
ones = torch.ones_like(query_points[..., :1])
query_points_pad = torch.cat((query_points, ones), 2).float()
linear_term = torch.matmul(query_points_pad, v)
return rbf_term + linear_term
def dense_image_warp(image, flow):
"""Image warping using per-pixel flow vectors.
Apply a non-linear warp to the image, where the warp is specified by a dense
flow field of offset vectors that define the correspondences of pixel values
in the output image back to locations in the source image. Specifically, the
pixel value at output[b, j, i, c] is
images[b, j - flow[b, j, i, 0], i - flow[b, j, i, 1], c].
The locations specified by this formula do not necessarily map to an int
index. Therefore, the pixel value is obtained by bilinear
interpolation of the 4 nearest pixels around
(b, j - flow[b, j, i, 0], i - flow[b, j, i, 1]). For locations outside
of the image, we use the nearest pixel values at the image boundary.
Args:
image: 4-D float `Tensor` with shape `[batch, height, width, channels]`.
flow: A 4-D float `Tensor` with shape `[batch, height, width, 2]`.
name: A name for the operation (optional).
Note that image and flow can be of type tf.half, tf.float32, or tf.float64,
and do not necessarily have to be the same type.
Returns:
A 4-D float `Tensor` with shape`[batch, height, width, channels]`
and same type as input image.
Raises:
ValueError: if height < 2 or width < 2 or the inputs have the wrong number
of dimensions.
"""
image = image.unsqueeze(3) # add a single channel dimension to image tensor
batch_size, height, width, channels = image.shape
device = image.device
# The flow is defined on the image grid. Turn the flow into a list of query
# points in the grid space.
grid_x, grid_y = torch.meshgrid(
torch.arange(width, device=device), torch.arange(height, device=device)
)
stacked_grid = torch.stack((grid_y, grid_x), dim=2).float()
batched_grid = stacked_grid.unsqueeze(-1).permute(3, 1, 0, 2)
query_points_on_grid = batched_grid - flow
query_points_flattened = torch.reshape(
query_points_on_grid, [batch_size, height * width, 2]
)
# Compute values at the query points, then reshape the result back to the
# image grid.
interpolated = interpolate_bilinear(image, query_points_flattened)
interpolated = torch.reshape(interpolated, [batch_size, height, width, channels])
return interpolated
def interpolate_bilinear(
grid, query_points, name="interpolate_bilinear", indexing="ij"
):
"""Similar to Matlab's interp2 function.
Notes:
Finds values for query points on a grid using bilinear interpolation.
Args:
grid: a 4-D float `Tensor` of shape `[batch, height, width, channels]`.
query_points: a 3-D float `Tensor` of N points with shape `[batch, N, 2]`.
name: a name for the operation (optional).
indexing: whether the query points are specified as row and column (ij),
or Cartesian coordinates (xy).
Returns:
values: a 3-D `Tensor` with shape `[batch, N, channels]`
Raises:
ValueError: if the indexing mode is invalid, or if the shape of the inputs
invalid.
"""
if indexing != "ij" and indexing != "xy":
raise ValueError("Indexing mode must be 'ij' or 'xy'")
shape = grid.shape
if len(shape) != 4:
msg = "Grid must be 4 dimensional. Received size: "
raise ValueError(msg + str(grid.shape))
batch_size, height, width, channels = grid.shape
shape = [batch_size, height, width, channels]
query_type = query_points.dtype
grid_type = grid.dtype
grid_device = grid.device
num_queries = query_points.shape[1]
alphas = []
floors = []
ceils = []
index_order = [0, 1] if indexing == "ij" else [1, 0]
unstacked_query_points = query_points.unbind(2)
for dim in index_order:
queries = unstacked_query_points[dim]
size_in_indexing_dimension = shape[dim + 1]
# max_floor is size_in_indexing_dimension - 2 so that max_floor + 1
# is still a valid index into the grid.
max_floor = torch.tensor(
size_in_indexing_dimension - 2, dtype=query_type, device=grid_device
)
min_floor = torch.tensor(0.0, dtype=query_type, device=grid_device)
maxx = torch.max(min_floor, torch.floor(queries))
floor = torch.min(maxx, max_floor)
int_floor = floor.long()
floors.append(int_floor)
ceil = int_floor + 1
ceils.append(ceil)
# alpha has the same type as the grid, as we will directly use alpha
# when taking linear combinations of pixel values from the image.
alpha = torch.tensor((queries - floor), dtype=grid_type, device=grid_device)
min_alpha = torch.tensor(0.0, dtype=grid_type, device=grid_device)
max_alpha = torch.tensor(1.0, dtype=grid_type, device=grid_device)
alpha = torch.min(torch.max(min_alpha, alpha), max_alpha)
# Expand alpha to [b, n, 1] so we can use broadcasting
# (since the alpha values don't depend on the channel).
alpha = torch.unsqueeze(alpha, 2)
alphas.append(alpha)
flattened_grid = torch.reshape(grid, [batch_size * height * width, channels])
batch_offsets = torch.reshape(
torch.arange(batch_size, device=grid_device) * height * width, [batch_size, 1]
)
# This wraps array_ops.gather. We reshape the image data such that the
# batch, y, and x coordinates are pulled into the first dimension.
# Then we gather. Finally, we reshape the output back. It's possible this
# code would be made simpler by using array_ops.gather_nd.
def gather(y_coords, x_coords, name):
linear_coordinates = batch_offsets + y_coords * width + x_coords
gathered_values = torch.gather(flattened_grid.t(), 1, linear_coordinates)
return torch.reshape(gathered_values, [batch_size, num_queries, channels])
# grab the pixel values in the 4 corners around each query point
top_left = gather(floors[0], floors[1], "top_left")
top_right = gather(floors[0], ceils[1], "top_right")
bottom_left = gather(ceils[0], floors[1], "bottom_left")
bottom_right = gather(ceils[0], ceils[1], "bottom_right")
interp_top = alphas[1] * (top_right - top_left) + top_left
interp_bottom = alphas[1] * (bottom_right - bottom_left) + bottom_left
interp = alphas[0] * (interp_bottom - interp_top) + interp_top
return interp
| 18,458 | 35.844311 | 88 | py |
espnet | espnet-master/espnet/utils/dataset.py | #!/usr/bin/env python
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""pytorch dataset and dataloader implementation for chainer training."""
import torch
import torch.utils.data
class Transform:
"""Transform function container.
lambda can't work well when using DDP because
lambda is not pickable in the case of multi process.
This class is required for DDP use case.
Args:
converter: batch converter
load: function object to load data and create minibatch
"""
def __init__(self, converter, load):
"""Initialize."""
self._converter = converter
self._load = load
def __call__(self, data):
"""Apply a given converter and a given loader."""
return self._converter([self._load(data)])
class TransformDataset(torch.utils.data.Dataset):
"""Transform Dataset for pytorch backend.
Args:
data: list object from make_batchset
transform: transform function
"""
def __init__(self, data, transform):
"""Init function."""
super(TransformDataset).__init__()
self.data = data
self.transform = transform
def __len__(self):
"""Len function."""
return len(self.data)
def __getitem__(self, idx):
"""[] operator."""
return self.transform(self.data[idx])
class ChainerDataLoader(object):
"""Pytorch dataloader in chainer style.
Args:
all args for torch.utils.data.dataloader.Dataloader
"""
@staticmethod
def get_first_element(x):
"""Get first element of a given array-like object."""
return x[0]
def __init__(self, **kwargs):
"""Init function."""
self.loader = torch.utils.data.dataloader.DataLoader(**kwargs)
if hasattr(self.loader, "__len__"):
# To support DistribtedSampler.
# When using DDP, the size of dataset itself is different from
# the size returned by DataLoader.
# Unless using length of dataloader, at the end of iterations,
# this loader class can't recognize the end of each epoch.
self.len = len(self.loader)
else:
self.len = len(kwargs["dataset"])
self.current_position = 0
self.epoch = 0
self.iter = None
self.kwargs = kwargs
def next(self):
"""Implement next function."""
if self.iter is None:
self.iter = iter(self.loader)
try:
ret = next(self.iter)
except StopIteration:
self.iter = None
return self.next()
self.current_position += 1
if self.current_position == self.len:
self.epoch = self.epoch + 1
self.current_position = 0
return ret
def __iter__(self):
"""Implement iter function."""
for batch in self.loader:
yield batch
@property
def epoch_detail(self):
"""Epoch_detail required by chainer."""
return self.epoch + self.current_position / self.len
def serialize(self, serializer):
"""Serialize and deserialize function."""
epoch = serializer("epoch", self.epoch)
current_position = serializer("current_position", self.current_position)
self.epoch = epoch
self.current_position = current_position
def start_shuffle(self):
"""Shuffle function for sortagrad."""
self.kwargs["shuffle"] = True
self.loader = torch.utils.data.dataloader.DataLoader(**self.kwargs)
def finalize(self):
"""Implement finalize function."""
del self.loader
| 3,703 | 27.9375 | 80 | py |
espnet | espnet-master/espnet/utils/fill_missing_args.py | # -*- coding: utf-8 -*-
# Copyright 2018 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import logging
def fill_missing_args(args, add_arguments):
"""Fill missing arguments in args.
Args:
args (Namespace or None): Namesapce containing hyperparameters.
add_arguments (function): Function to add arguments.
Returns:
Namespace: Arguments whose missing ones are filled with default value.
Examples:
>>> from argparse import Namespace
>>> from espnet.nets.pytorch_backend.e2e_tts_tacotron2 import Tacotron2
>>> args = Namespace()
>>> fill_missing_args(args, Tacotron2.add_arguments_fn)
Namespace(aconv_chans=32, aconv_filts=15, adim=512, atype='location', ...)
"""
# check argument type
assert isinstance(args, argparse.Namespace) or args is None
assert callable(add_arguments)
# get default arguments
default_args, _ = add_arguments(argparse.ArgumentParser()).parse_known_args()
# convert to dict
args = {} if args is None else vars(args)
default_args = vars(default_args)
for key, value in default_args.items():
if key not in args:
logging.info(
'attribute "%s" does not exist. use default %s.' % (key, str(value))
)
args[key] = value
return argparse.Namespace(**args)
| 1,426 | 29.361702 | 84 | py |
espnet | espnet-master/espnet/utils/deterministic_utils.py | import logging
import os
import chainer
import torch
def set_deterministic_pytorch(args):
"""Ensures pytorch produces deterministic results depending on the program arguments
:param Namespace args: The program arguments
"""
# seed setting
torch.manual_seed(args.seed)
# debug mode setting
# 0 would be fastest, but 1 seems to be reasonable
# considering reproducibility
# remove type check
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = (
False # https://github.com/pytorch/pytorch/issues/6351
)
if args.debugmode < 2:
chainer.config.type_check = False
logging.info("torch type check is disabled")
# use deterministic computation or not
if args.debugmode < 1:
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
logging.info("torch cudnn deterministic is disabled")
def set_deterministic_chainer(args):
"""Ensures chainer produces deterministic results depending on the program arguments
:param Namespace args: The program arguments
"""
# seed setting (chainer seed may not need it)
os.environ["CHAINER_SEED"] = str(args.seed)
logging.info("chainer seed = " + os.environ["CHAINER_SEED"])
# debug mode setting
# 0 would be fastest, but 1 seems to be reasonable
# considering reproducibility
# remove type check
if args.debugmode < 2:
chainer.config.type_check = False
logging.info("chainer type check is disabled")
# use deterministic computation or not
if args.debugmode < 1:
chainer.config.cudnn_deterministic = False
logging.info("chainer cudnn deterministic is disabled")
else:
chainer.config.cudnn_deterministic = True
| 1,794 | 31.053571 | 88 | py |
espnet | espnet-master/espnet/utils/training/iterators.py | import chainer
import numpy as np
from chainer.iterators import MultiprocessIterator, SerialIterator, ShuffleOrderSampler
from chainer.training.extension import Extension
class ShufflingEnabler(Extension):
"""An extension enabling shuffling on an Iterator"""
def __init__(self, iterators):
"""Inits the ShufflingEnabler
:param list[Iterator] iterators: The iterators to enable shuffling on
"""
self.set = False
self.iterators = iterators
def __call__(self, trainer):
"""Calls the enabler on the given iterator
:param trainer: The iterator
"""
if not self.set:
for iterator in self.iterators:
iterator.start_shuffle()
self.set = True
class ToggleableShufflingSerialIterator(SerialIterator):
"""A SerialIterator having its shuffling property activated during training"""
def __init__(self, dataset, batch_size, repeat=True, shuffle=True):
"""Init the Iterator
:param torch.nn.Tensor dataset: The dataset to take batches from
:param int batch_size: The batch size
:param bool repeat: Whether to repeat data (allow multiple epochs)
:param bool shuffle: Whether to shuffle the batches
"""
super(ToggleableShufflingSerialIterator, self).__init__(
dataset, batch_size, repeat, shuffle
)
def start_shuffle(self):
"""Starts shuffling (or reshuffles) the batches"""
self._shuffle = True
if int(chainer._version.__version__[0]) <= 4:
self._order = np.random.permutation(len(self.dataset))
else:
self.order_sampler = ShuffleOrderSampler()
self._order = self.order_sampler(np.arange(len(self.dataset)), 0)
class ToggleableShufflingMultiprocessIterator(MultiprocessIterator):
"""A MultiprocessIterator having its shuffling property activated during training"""
def __init__(
self,
dataset,
batch_size,
repeat=True,
shuffle=True,
n_processes=None,
n_prefetch=1,
shared_mem=None,
maxtasksperchild=20,
):
"""Init the iterator
:param torch.nn.Tensor dataset: The dataset to take batches from
:param int batch_size: The batch size
:param bool repeat: Whether to repeat batches or not (enables multiple epochs)
:param bool shuffle: Whether to shuffle the order of the batches
:param int n_processes: How many processes to use
:param int n_prefetch: The number of prefetch to use
:param int shared_mem: How many memory to share between processes
:param int maxtasksperchild: Maximum number of tasks per child
"""
super(ToggleableShufflingMultiprocessIterator, self).__init__(
dataset=dataset,
batch_size=batch_size,
repeat=repeat,
shuffle=shuffle,
n_processes=n_processes,
n_prefetch=n_prefetch,
shared_mem=shared_mem,
maxtasksperchild=maxtasksperchild,
)
def start_shuffle(self):
"""Starts shuffling (or reshuffles) the batches"""
self.shuffle = True
if int(chainer._version.__version__[0]) <= 4:
self._order = np.random.permutation(len(self.dataset))
else:
self.order_sampler = ShuffleOrderSampler()
self._order = self.order_sampler(np.arange(len(self.dataset)), 0)
self._set_prefetch_state()
| 3,526 | 34.626263 | 88 | py |
espnet | espnet-master/espnet/optimizer/pytorch.py | """PyTorch optimizer builders."""
import argparse
import torch
from espnet.optimizer.factory import OptimizerFactoryInterface
from espnet.optimizer.parser import adadelta, adam, sgd
class AdamFactory(OptimizerFactoryInterface):
"""Adam factory."""
@staticmethod
def add_arguments(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
"""Register args."""
return adam(parser)
@staticmethod
def from_args(target, args: argparse.Namespace):
"""Initialize optimizer from argparse Namespace.
Args:
target: for pytorch `model.parameters()`,
for chainer `model`
args (argparse.Namespace): parsed command-line args
"""
return torch.optim.Adam(
target,
lr=args.lr,
weight_decay=args.weight_decay,
betas=(args.beta1, args.beta2),
)
class SGDFactory(OptimizerFactoryInterface):
"""SGD factory."""
@staticmethod
def add_arguments(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
"""Register args."""
return sgd(parser)
@staticmethod
def from_args(target, args: argparse.Namespace):
"""Initialize optimizer from argparse Namespace.
Args:
target: for pytorch `model.parameters()`,
for chainer `model`
args (argparse.Namespace): parsed command-line args
"""
return torch.optim.SGD(
target,
lr=args.lr,
weight_decay=args.weight_decay,
)
class AdadeltaFactory(OptimizerFactoryInterface):
"""Adadelta factory."""
@staticmethod
def add_arguments(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
"""Register args."""
return adadelta(parser)
@staticmethod
def from_args(target, args: argparse.Namespace):
"""Initialize optimizer from argparse Namespace.
Args:
target: for pytorch `model.parameters()`,
for chainer `model`
args (argparse.Namespace): parsed command-line args
"""
return torch.optim.Adadelta(
target,
rho=args.rho,
eps=args.eps,
weight_decay=args.weight_decay,
)
OPTIMIZER_FACTORY_DICT = {
"adam": AdamFactory,
"sgd": SGDFactory,
"adadelta": AdadeltaFactory,
}
| 2,399 | 25.086957 | 82 | py |
espnet | espnet-master/espnet/optimizer/factory.py | """Import optimizer class dynamically."""
import argparse
from espnet.utils.dynamic_import import dynamic_import
from espnet.utils.fill_missing_args import fill_missing_args
class OptimizerFactoryInterface:
"""Optimizer adaptor."""
@staticmethod
def from_args(target, args: argparse.Namespace):
"""Initialize optimizer from argparse Namespace.
Args:
target: for pytorch `model.parameters()`,
for chainer `model`
args (argparse.Namespace): parsed command-line args
"""
raise NotImplementedError()
@staticmethod
def add_arguments(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
"""Register args."""
return parser
@classmethod
def build(cls, target, **kwargs):
"""Initialize optimizer with python-level args.
Args:
target: for pytorch `model.parameters()`,
for chainer `model`
Returns:
new Optimizer
"""
args = argparse.Namespace(**kwargs)
args = fill_missing_args(args, cls.add_arguments)
return cls.from_args(target, args)
def dynamic_import_optimizer(name: str, backend: str) -> OptimizerFactoryInterface:
"""Import optimizer class dynamically.
Args:
name (str): alias name or dynamic import syntax `module:class`
backend (str): backend name e.g., chainer or pytorch
Returns:
OptimizerFactoryInterface or FunctionalOptimizerAdaptor
"""
if backend == "pytorch":
from espnet.optimizer.pytorch import OPTIMIZER_FACTORY_DICT
return OPTIMIZER_FACTORY_DICT[name]
elif backend == "chainer":
from espnet.optimizer.chainer import OPTIMIZER_FACTORY_DICT
return OPTIMIZER_FACTORY_DICT[name]
else:
raise NotImplementedError(f"unsupported backend: {backend}")
factory_class = dynamic_import(name)
assert issubclass(factory_class, OptimizerFactoryInterface)
return factory_class
| 2,013 | 27.771429 | 83 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.