repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
espnet | espnet-master/test/espnet2/asr/transducer/test_transducer_error_calculator.py | import pytest
import torch
from espnet2.asr.decoder.transducer_decoder import TransducerDecoder
from espnet2.asr.transducer.error_calculator import ErrorCalculatorTransducer
from espnet2.asr_transducer.joint_network import JointNetwork
@pytest.mark.parametrize(
"report_opts",
[
{"report_cer": False, "report_wer": False},
{"report_cer": True, "report_wer": True},
],
)
def test_transducer_error_calculator(report_opts):
token_list = ["<blank>", "a", "b", "c", "<space>"]
vocab_size = len(token_list)
encoder_output_size = 4
decoder_output_size = 4
decoder = TransducerDecoder(
vocab_size,
hidden_size=decoder_output_size,
)
joint_net = JointNetwork(
vocab_size, encoder_output_size, decoder_output_size, joint_space_size=2
)
error_calc = ErrorCalculatorTransducer(
decoder,
joint_net,
token_list,
"<space>",
"<blank>",
**report_opts,
)
enc_out = torch.randn(4, 30, encoder_output_size)
target = torch.randint(0, vocab_size, [4, 20], dtype=torch.int32)
with torch.no_grad():
_, _ = error_calc(enc_out, target)
| 1,178 | 25.2 | 80 | py |
espnet | espnet-master/test/espnet2/asr/specaug/test_specaug.py | import pytest
import torch
from espnet2.asr.specaug.specaug import SpecAug
@pytest.mark.parametrize("apply_time_warp", [False, True])
@pytest.mark.parametrize("apply_freq_mask", [False, True])
@pytest.mark.parametrize("apply_time_mask", [False, True])
@pytest.mark.parametrize("time_mask_width_range", [None, 100, (0, 100)])
@pytest.mark.parametrize("time_mask_width_ratio_range", [None, 0.1, (0.0, 0.1)])
def test_SpecAuc(
apply_time_warp,
apply_freq_mask,
apply_time_mask,
time_mask_width_range,
time_mask_width_ratio_range,
):
if (
(not apply_time_warp and not apply_time_mask and not apply_freq_mask)
or (
apply_time_mask
and time_mask_width_range is None
and time_mask_width_ratio_range is None
)
or (
apply_time_mask
and time_mask_width_range is not None
and time_mask_width_ratio_range is not None
)
):
with pytest.raises(ValueError):
specaug = SpecAug(
apply_time_warp=apply_time_warp,
apply_freq_mask=apply_freq_mask,
apply_time_mask=apply_time_mask,
time_mask_width_range=time_mask_width_range,
time_mask_width_ratio_range=time_mask_width_ratio_range,
)
else:
specaug = SpecAug(
apply_time_warp=apply_time_warp,
apply_freq_mask=apply_freq_mask,
apply_time_mask=apply_time_mask,
time_mask_width_range=time_mask_width_range,
time_mask_width_ratio_range=time_mask_width_ratio_range,
)
x = torch.randn(2, 1000, 80)
specaug(x)
@pytest.mark.parametrize("apply_time_warp", [False, True])
@pytest.mark.parametrize("apply_freq_mask", [False, True])
@pytest.mark.parametrize("apply_time_mask", [False, True])
@pytest.mark.parametrize("time_mask_width_range", [None, 100, (0, 100)])
@pytest.mark.parametrize("time_mask_width_ratio_range", [None, 0.1, (0.0, 0.1)])
def test_SpecAuc_repr(
apply_time_warp,
apply_freq_mask,
apply_time_mask,
time_mask_width_range,
time_mask_width_ratio_range,
):
if (
(not apply_time_warp and not apply_time_mask and not apply_freq_mask)
or (
apply_time_mask
and time_mask_width_range is None
and time_mask_width_ratio_range is None
)
or (
apply_time_mask
and time_mask_width_range is not None
and time_mask_width_ratio_range is not None
)
):
return
specaug = SpecAug(
apply_time_warp=apply_time_warp,
apply_freq_mask=apply_freq_mask,
apply_time_mask=apply_time_mask,
time_mask_width_range=time_mask_width_range,
time_mask_width_ratio_range=time_mask_width_ratio_range,
)
print(specaug)
| 2,869 | 32.372093 | 80 | py |
espnet | espnet-master/test/espnet2/asr/frontend/test_s3prl.py | import pytest
import torch
from packaging.version import parse as V
from espnet2.asr.frontend.s3prl import S3prlFrontend
is_torch_1_8_plus = V(torch.__version__) >= V("1.8.0")
def test_frontend_init():
if not is_torch_1_8_plus:
return
frontend = S3prlFrontend(
fs=16000,
frontend_conf=dict(upstream="mel"),
)
assert frontend.frontend_type == "s3prl"
assert frontend.output_size() > 0
def test_frontend_output_size():
# Skip some testing cases
if not is_torch_1_8_plus:
return
frontend = S3prlFrontend(
fs=16000,
frontend_conf=dict(upstream="mel"),
download_dir="./hub",
)
wavs = torch.randn(2, 1600)
lengths = torch.LongTensor([1600, 1600])
feats, _ = frontend(wavs, lengths)
assert feats.shape[-1] == frontend.output_size()
@pytest.mark.parametrize(
"fs, frontend_conf, multilayer_feature, layer",
[
(16000, dict(upstream="mel"), True, -1),
(16000, dict(upstream="mel"), False, -1),
(16000, dict(upstream="mel", tile_factor=1), False, -1),
(16000, dict(upstream="mel"), False, 0),
],
)
def test_frontend_backward(fs, frontend_conf, multilayer_feature, layer):
if not is_torch_1_8_plus:
return
frontend = S3prlFrontend(
fs=fs,
frontend_conf=frontend_conf,
download_dir="./hub",
multilayer_feature=multilayer_feature,
layer=layer,
)
wavs = torch.randn(2, 1600, requires_grad=True)
lengths = torch.LongTensor([1600, 1600])
feats, f_lengths = frontend(wavs, lengths)
feats.sum().backward()
| 1,627 | 24.84127 | 73 | py |
espnet | espnet-master/test/espnet2/asr/frontend/test_whisper.py | import sys
import pytest
import torch
from packaging.version import parse as V
from espnet2.asr.frontend.whisper import WhisperFrontend
pytest.importorskip("whisper")
# NOTE(Shih-Lun): required by `return_complex` in torch.stft()
is_torch_1_7_plus = V(torch.__version__) >= V("1.7.0")
is_python_3_8_plus = sys.version_info >= (3, 8)
@pytest.mark.skipif(
not is_python_3_8_plus or not is_torch_1_7_plus,
reason="whisper not supported on python<3.8, torch<1.7",
)
@pytest.fixture()
def whisper_frontend(request):
with torch.no_grad():
return WhisperFrontend("tiny")
@pytest.mark.skipif(
not is_python_3_8_plus or not is_torch_1_7_plus,
reason="whisper not supported on python<3.8, torch<1.7",
)
@pytest.mark.timeout(50)
def test_frontend_init():
frontend = WhisperFrontend("tiny")
assert frontend.output_size() == 384
@pytest.mark.skipif(
not is_python_3_8_plus or not is_torch_1_7_plus,
reason="whisper not supported on python<3.8, torch<1.7",
)
def test_frontend_invalid_init():
with pytest.raises(AssertionError):
frontend = WhisperFrontend("aaa")
del frontend
@pytest.mark.skipif(
not is_python_3_8_plus or not is_torch_1_7_plus,
reason="whisper not supported on python<3.8, torch<1.7",
)
@pytest.mark.timeout(50)
def test_frontend_forward_no_ilens(whisper_frontend):
input_tensor = torch.randn(
4, 3200, device=next(whisper_frontend.parameters()).device
)
feats, _ = whisper_frontend(input_tensor, None)
assert feats.size() == torch.Size([4, 10, 384])
@pytest.mark.skipif(
not is_python_3_8_plus or not is_torch_1_7_plus,
reason="whisper not supported on python<3.8, torch<1.7",
)
@pytest.mark.timeout(50)
def test_frontend_forward_ilens(whisper_frontend):
input_tensor = torch.randn(
4, 3200, device=next(whisper_frontend.parameters()).device
)
input_lens = torch.tensor(
[500, 1000, 1600, 3200], device=next(whisper_frontend.parameters()).device
)
feats, feats_lens = whisper_frontend(input_tensor, input_lens)
assert feats.size() == torch.Size([4, 10, 384])
assert torch.equal(feats_lens.cpu(), torch.tensor([2, 3, 5, 10]))
| 2,194 | 27.881579 | 82 | py |
espnet | espnet-master/test/espnet2/asr/frontend/test_windowing.py | import torch
from espnet2.asr.frontend.windowing import SlidingWindow
def test_frontend_output_size():
win_length = 400
frontend = SlidingWindow(win_length=win_length, hop_length=32, fs="16k")
assert frontend.output_size() == win_length
def test_frontend_forward():
frontend = SlidingWindow(fs=160, win_length=32, hop_length=32, padding=0)
x = torch.randn(2, 300, requires_grad=True)
x_lengths = torch.LongTensor([300, 89])
y, y_lengths = frontend(x, x_lengths)
y.sum().backward()
# check for correct output lengths
# needs change if padding applied!
assert all(y_lengths == torch.tensor([9, 2]))
assert y.shape == torch.Size([2, 9, 1, 32])
| 694 | 30.590909 | 77 | py |
espnet | espnet-master/test/espnet2/asr/frontend/test_fused.py | import torch
from espnet2.asr.frontend.fused import FusedFrontends
frontend1 = {"frontend_type": "default", "n_mels": 80, "n_fft": 512}
frontend2 = {"frontend_type": "default", "hop_length": 128}
list_frontends = [frontend1, frontend2]
def test_frontend_init():
frontend = FusedFrontends(
fs="16k",
align_method="linear_projection",
proj_dim=100,
frontends=list_frontends,
)
assert len(frontend.frontends) == 2
assert len(frontend.factors) == len(frontend.frontends)
assert frontend.frontends[0].frontend_type == "default"
def test_frontend_output_size():
frontend = FusedFrontends(
fs="16k",
align_method="linear_projection",
proj_dim=100,
frontends=list_frontends,
)
assert frontend.output_size() == 100 * len(list_frontends)
def test_frontend_backward():
frontend = FusedFrontends(
fs="16k",
align_method="linear_projection",
proj_dim=100,
frontends=list_frontends,
)
x = torch.randn(2, 300, requires_grad=True)
x_lengths = torch.LongTensor([300, 89])
y, y_lengths = frontend(x, x_lengths)
y.sum().backward()
| 1,173 | 25.681818 | 68 | py |
espnet | espnet-master/test/espnet2/asr/frontend/test_frontend.py | import pytest
import torch
from espnet2.asr.frontend.default import DefaultFrontend
from espnet2.torch_utils.set_all_random_seed import set_all_random_seed
def test_frontend_repr():
frontend = DefaultFrontend(fs="16k")
print(frontend)
def test_frontend_output_size():
frontend = DefaultFrontend(fs="16k", n_mels=40)
assert frontend.output_size() == 40
def test_frontend_backward():
frontend = DefaultFrontend(
fs=160, n_fft=128, win_length=32, hop_length=32, frontend_conf=None
)
x = torch.randn(2, 300, requires_grad=True)
x_lengths = torch.LongTensor([300, 89])
y, y_lengths = frontend(x, x_lengths)
y.sum().backward()
@pytest.mark.parametrize("use_wpe", [True, False])
@pytest.mark.parametrize("use_beamformer", [True, False])
@pytest.mark.parametrize("train", [True, False])
def test_frontend_backward_multi_channel(train, use_wpe, use_beamformer):
frontend = DefaultFrontend(
fs=300,
n_fft=128,
win_length=128,
frontend_conf={"use_wpe": use_wpe, "use_beamformer": use_beamformer},
)
if train:
frontend.train()
else:
frontend.eval()
set_all_random_seed(14)
x = torch.randn(2, 1000, 2, requires_grad=True)
x_lengths = torch.LongTensor([1000, 980])
y, y_lengths = frontend(x, x_lengths)
y.sum().backward()
| 1,349 | 27.723404 | 77 | py |
espnet | espnet-master/test/espnet2/asr/preencoder/test_sinc.py | import torch
from espnet2.asr.preencoder.sinc import LightweightSincConvs, SpatialDropout
def test_spatial_dropout():
dropout = SpatialDropout()
x = torch.randn([5, 20, 40], requires_grad=True)
y = dropout(x)
assert x.shape == y.shape
def test_lightweight_sinc_convolutions_output_size():
frontend = LightweightSincConvs()
idim = 400
# Get output dimension by making one inference.
# The test vector that is used has dimensions (1, T, 1, idim).
# T was set to idim without any special reason,
in_test = torch.zeros((1, idim, 1, idim))
out, _ = frontend.forward(in_test, [idim])
odim = out.size(2)
assert frontend.output_size() == odim
def test_lightweight_sinc_convolutions_forward():
frontend = LightweightSincConvs(fs="16000")
x = torch.randn([2, 50, 1, 400], requires_grad=True)
x_lengths = torch.LongTensor([30, 9])
y, y_lengths = frontend(x, x_lengths)
y.sum().backward()
assert y.shape == torch.Size([2, 50, 256])
| 1,003 | 30.375 | 76 | py |
espnet | espnet-master/test/espnet2/asr/preencoder/test_linear.py | import torch
from espnet2.asr.preencoder.linear import LinearProjection
def test_linear_projection_forward():
idim = 400
odim = 80
preencoder = LinearProjection(input_size=idim, output_size=odim)
x = torch.randn([2, 50, idim], requires_grad=True)
x_lengths = torch.LongTensor([30, 15])
y, y_lengths = preencoder(x, x_lengths)
y.sum().backward()
assert y.shape == torch.Size([2, 50, odim])
assert torch.equal(y_lengths, x_lengths)
| 469 | 28.375 | 68 | py |
espnet | espnet-master/test/espnet2/asr/encoder/test_rnn_encoder.py | import pytest
import torch
from espnet2.asr.encoder.rnn_encoder import RNNEncoder
@pytest.mark.parametrize("rnn_type", ["lstm", "gru"])
@pytest.mark.parametrize("bidirectional", [True, False])
@pytest.mark.parametrize("use_projection", [True, False])
@pytest.mark.parametrize("subsample", [None, (2, 2, 1, 1)])
def test_Encoder_forward_backward(rnn_type, bidirectional, use_projection, subsample):
encoder = RNNEncoder(
5,
rnn_type=rnn_type,
bidirectional=bidirectional,
use_projection=use_projection,
subsample=subsample,
)
x = torch.randn(2, 10, 5, requires_grad=True)
x_lens = torch.LongTensor([10, 8])
y, _, _ = encoder(x, x_lens)
y.sum().backward()
def test_Encoder_output_size():
encoder = RNNEncoder(5, output_size=10)
assert encoder.output_size() == 10
def test_Encoder_invalid_type():
with pytest.raises(ValueError):
RNNEncoder(5, rnn_type="fff")
| 947 | 27.727273 | 86 | py |
espnet | espnet-master/test/espnet2/asr/encoder/test_hubert_encoder.py | import pytest
import torch
from packaging.version import parse as V
from espnet2.asr.encoder.hubert_encoder import TorchAudioHuBERTPretrainEncoder
is_torch_1_12_1_plus = V(torch.__version__) >= V("1.12.1")
@pytest.mark.parametrize(
"finetuning, eval, freeze_encoder_updates",
[
(False, False, 0),
(True, False, 0),
(True, False, 1),
(True, True, 0),
],
)
def test_Encoder_forward_backward(finetuning, eval, freeze_encoder_updates):
if not is_torch_1_12_1_plus:
return
encoder = TorchAudioHuBERTPretrainEncoder(
20,
extractor_conv_layer_config=[(3, 3, 2)],
encoder_pos_conv_kernel=16,
encoder_pos_conv_groups=4,
encoder_embed_dim=4,
encoder_num_layers=1,
encoder_num_heads=1,
encoder_ff_interm_features=4,
num_classes=10,
final_dim=10,
finetuning=finetuning,
freeze_encoder_updates=freeze_encoder_updates,
)
x = torch.randn(2, 32, requires_grad=True)
y = torch.randint(low=0, high=10, size=(2, 15), dtype=torch.long)
x_lens = torch.LongTensor([32, 16])
y, _, p = encoder(x, x_lens, y)
if not eval:
encoder.train()
if not finetuning:
p.sum().backward()
else:
if freeze_encoder_updates == 0:
y.sum().backward()
else:
y.sum() # requires_grad=False if freezing
else:
encoder.eval()
y, _, p = encoder(x, x_lens, y)
y.sum()
def test_Encoder_output_size():
if not is_torch_1_12_1_plus:
return
encoder = TorchAudioHuBERTPretrainEncoder(
20,
encoder_embed_dim=16,
encoder_num_layers=1,
encoder_num_heads=1,
encoder_ff_interm_features=16,
num_classes=10,
final_dim=10,
)
assert encoder.output_size() == 16
def test_Encoder_reload_params():
if not is_torch_1_12_1_plus:
return
encoder = TorchAudioHuBERTPretrainEncoder(
20,
encoder_embed_dim=16,
encoder_num_layers=1,
encoder_num_heads=1,
encoder_ff_interm_features=16,
num_classes=10,
final_dim=10,
)
encoder.reload_pretrained_parameters()
def test_Encoder_invalid_type():
if not is_torch_1_12_1_plus:
return
with pytest.raises(ValueError):
TorchAudioHuBERTPretrainEncoder(
20,
encoder_embed_dim=10,
encoder_num_layers=1,
encoder_num_heads=1,
encoder_ff_interm_features=10,
num_classes=10,
final_dim=10,
freeze_encoder_updates=False,
)
| 2,678 | 25.009709 | 78 | py |
espnet | espnet-master/test/espnet2/asr/encoder/test_contextual_block_transformer_encoder.py | import pytest
import torch
from espnet2.asr.encoder.contextual_block_transformer_encoder import ( # noqa: H301
ContextualBlockTransformerEncoder,
)
@pytest.mark.parametrize("input_layer", ["linear", "conv2d", "embed", None])
@pytest.mark.parametrize("positionwise_layer_type", ["conv1d", "conv1d-linear"])
def test_Encoder_forward_backward(input_layer, positionwise_layer_type):
encoder = ContextualBlockTransformerEncoder(
20,
output_size=40,
input_layer=input_layer,
positionwise_layer_type=positionwise_layer_type,
block_size=4,
hop_size=2,
look_ahead=1,
)
if input_layer == "embed":
x = torch.randint(0, 10, [2, 10])
elif input_layer is None:
x = torch.randn(2, 10, 40, requires_grad=True)
else:
x = torch.randn(2, 10, 20, requires_grad=True)
x_lens = torch.LongTensor([10, 8])
y, _, _ = encoder(x, x_lens)
y.sum().backward()
# case of shorter than block size
encoder2 = ContextualBlockTransformerEncoder(
20,
output_size=40,
input_layer=input_layer,
positionwise_layer_type=positionwise_layer_type,
block_size=16,
hop_size=8,
look_ahead=4,
)
y2, _, _ = encoder2(x, x_lens)
y2.sum().backward()
def test_Encoder_output_size():
encoder = ContextualBlockTransformerEncoder(20, output_size=256)
assert encoder.output_size() == 256
def test_Encoder_invalid_type():
with pytest.raises(ValueError):
ContextualBlockTransformerEncoder(20, input_layer="fff")
| 1,574 | 28.716981 | 84 | py |
espnet | espnet-master/test/espnet2/asr/encoder/test_whisper_encoder.py | import sys
import pytest
import torch
from packaging.version import parse as V
from espnet2.asr.encoder.whisper_encoder import OpenAIWhisperEncoder
pytest.importorskip("whisper")
# NOTE(Shih-Lun): needed for `return_complex` param in torch.stft()
is_torch_1_7_plus = V(torch.__version__) >= V("1.7.0")
is_python_3_8_plus = sys.version_info >= (3, 8)
@pytest.mark.skipif(
not is_python_3_8_plus or not is_torch_1_7_plus,
reason="whisper not supported on python<3.8, torch<1.7",
)
@pytest.fixture()
def whisper_encoder(request):
encoder = OpenAIWhisperEncoder(whisper_model="tiny")
return encoder
@pytest.mark.skipif(
not is_python_3_8_plus or not is_torch_1_7_plus,
reason="whisper not supported on python<3.8, torch<1.7",
)
@pytest.mark.timeout(50)
def test_encoder_init(whisper_encoder):
assert whisper_encoder.output_size() == 384
@pytest.mark.skipif(
not is_python_3_8_plus or not is_torch_1_7_plus,
reason="whisper not supported on python<3.8, torch<1.7",
)
def test_encoder_invalid_init():
with pytest.raises(AssertionError):
encoder = OpenAIWhisperEncoder(whisper_model="aaa")
del encoder
@pytest.mark.skipif(
not is_python_3_8_plus or not is_torch_1_7_plus,
reason="whisper not supported on python<3.8, torch<1.7",
)
@pytest.mark.timeout(50)
def test_encoder_forward_no_ilens(whisper_encoder):
input_tensor = torch.randn(
4, 3200, device=next(whisper_encoder.parameters()).device
)
xs_pad, _, _ = whisper_encoder(input_tensor, None)
assert xs_pad.size() == torch.Size([4, 10, 384])
@pytest.mark.skipif(
not is_python_3_8_plus or not is_torch_1_7_plus,
reason="whisper not supported on python<3.8, torch<1.7",
)
@pytest.mark.timeout(50)
def test_encoder_forward_ilens(whisper_encoder):
input_tensor = torch.randn(
4, 3200, device=next(whisper_encoder.parameters()).device
)
input_lens = torch.tensor(
[500, 1000, 1600, 3200], device=next(whisper_encoder.parameters()).device
)
xs_pad, olens, _ = whisper_encoder(input_tensor, input_lens)
assert xs_pad.size() == torch.Size([4, 10, 384])
assert torch.equal(olens.cpu(), torch.tensor([2, 3, 5, 10]))
@pytest.mark.skipif(
not is_python_3_8_plus or not is_torch_1_7_plus,
reason="whisper not supported on python<3.8, torch<1.7",
)
@pytest.mark.timeout(50)
def test_encoder_backward(whisper_encoder):
whisper_encoder.train()
input_tensor = torch.randn(
4, 3200, device=next(whisper_encoder.parameters()).device
)
xs_pad, _, _ = whisper_encoder(input_tensor, None)
xs_pad.sum().backward()
| 2,635 | 27.967033 | 81 | py |
espnet | espnet-master/test/espnet2/asr/encoder/test_longformer_encoder.py | import pytest
import torch
from espnet2.asr.encoder.longformer_encoder import LongformerEncoder
pytest.importorskip("longformer")
@pytest.mark.parametrize(
"input_layer",
["linear", "conv2d", "conv2d1", "conv2d2", "conv2d6", "conv2d8", "embed"],
)
@pytest.mark.parametrize("positionwise_layer_type", ["conv1d", "conv1d-linear"])
@pytest.mark.parametrize(
"rel_pos_type, pos_enc_layer_type, selfattention_layer_type",
[
("legacy", "abs_pos", "lf_selfattn"),
],
)
def test_encoder_forward_backward(
input_layer,
positionwise_layer_type,
rel_pos_type,
pos_enc_layer_type,
selfattention_layer_type,
):
pytest.importorskip("longformer")
encoder = LongformerEncoder(
20,
output_size=2,
attention_heads=2,
linear_units=4,
num_blocks=2,
input_layer=input_layer,
macaron_style=False,
rel_pos_type=rel_pos_type,
pos_enc_layer_type=pos_enc_layer_type,
selfattention_layer_type=selfattention_layer_type,
activation_type="swish",
use_cnn_module=True,
cnn_module_kernel=3,
positionwise_layer_type=positionwise_layer_type,
attention_windows=[10, 10],
attention_dilation=[1, 1],
attention_mode="sliding_chunks",
)
if input_layer == "embed":
x = torch.randint(0, 10, [2, 32])
else:
x = torch.randn(2, 32, 20, requires_grad=True)
x_lens = torch.LongTensor([32, 28])
y, _, _ = encoder(x, x_lens)
y.sum().backward()
def test_encoder_invalid_layer_type():
pytest.importorskip("longformer")
with pytest.raises(ValueError):
LongformerEncoder(20, pos_enc_layer_type="abc_pos")
with pytest.raises(ValueError):
LongformerEncoder(20, pos_enc_layer_type="dummy")
with pytest.raises(ValueError):
LongformerEncoder(
20, pos_enc_layer_type="abc_pos", selfattention_layer_type="dummy"
)
def test_encoder_invalid_windows_parameter():
pytest.importorskip("longformer")
with pytest.raises(ValueError):
LongformerEncoder(20, attention_windows=[1, 1], num_blocks=4)
with pytest.raises(ValueError):
LongformerEncoder(20, attention_dilation=[1, 1], num_blocks=4)
def test_encoder_output_size():
pytest.importorskip("longformer")
encoder = LongformerEncoder(20, output_size=256)
assert encoder.output_size() == 256
def test_encoder_invalid_type():
pytest.importorskip("longformer")
with pytest.raises(ValueError):
LongformerEncoder(20, input_layer="fff")
| 2,575 | 28.953488 | 80 | py |
espnet | espnet-master/test/espnet2/asr/encoder/test_e_branchformer_encoder.py | import pytest
import torch
from espnet2.asr.ctc import CTC
from espnet2.asr.encoder.e_branchformer_encoder import EBranchformerEncoder
@pytest.mark.parametrize(
"input_layer",
[
"linear",
"conv1d2",
"conv2d",
"conv2d1",
"conv2d2",
"conv2d6",
"conv2d8",
"embed",
],
)
@pytest.mark.parametrize("use_linear_after_conv", [True, False])
@pytest.mark.parametrize(
"rel_pos_type, pos_enc_layer_type, attention_layer_type",
[
("legacy", "abs_pos", "selfattn"),
("latest", "rel_pos", "rel_selfattn"),
("legacy", "rel_pos", "rel_selfattn"),
("legacy", "legacy_rel_pos", "legacy_rel_selfattn"),
("legacy", "abs_pos", "fast_selfattn"),
],
)
@pytest.mark.parametrize("max_pos_emb_len", [128, 5000])
@pytest.mark.parametrize("use_ffn", [True, False])
@pytest.mark.parametrize("macaron_ffn", [True, False])
@pytest.mark.parametrize("linear_units", [1024, 2048])
@pytest.mark.parametrize("merge_conv_kernel", [3, 31])
@pytest.mark.parametrize("layer_drop_rate", [0.0, 0.1])
@pytest.mark.parametrize(
"interctc_layer_idx, interctc_use_conditioning",
[
([], False),
([1], False),
([1], True),
],
)
def test_encoder_forward_backward(
input_layer,
use_linear_after_conv,
rel_pos_type,
pos_enc_layer_type,
attention_layer_type,
max_pos_emb_len,
use_ffn,
macaron_ffn,
linear_units,
merge_conv_kernel,
layer_drop_rate,
interctc_layer_idx,
interctc_use_conditioning,
):
encoder = EBranchformerEncoder(
20,
output_size=2,
attention_heads=2,
attention_layer_type=attention_layer_type,
pos_enc_layer_type=pos_enc_layer_type,
rel_pos_type=rel_pos_type,
cgmlp_linear_units=4,
cgmlp_conv_kernel=3,
use_linear_after_conv=use_linear_after_conv,
gate_activation="identity",
num_blocks=2,
input_layer=input_layer,
max_pos_emb_len=max_pos_emb_len,
use_ffn=use_ffn,
macaron_ffn=macaron_ffn,
linear_units=linear_units,
merge_conv_kernel=merge_conv_kernel,
layer_drop_rate=layer_drop_rate,
interctc_layer_idx=interctc_layer_idx,
interctc_use_conditioning=interctc_use_conditioning,
)
if input_layer == "embed":
x = torch.randint(0, 10, [2, 32])
else:
x = torch.randn(2, 32, 20, requires_grad=True)
x_lens = torch.LongTensor([32, 28])
if len(interctc_layer_idx) > 0: # intermediate CTC
encoder.conditioning_layer = torch.nn.Linear(2, 2)
y, _, _ = encoder(x, x_lens, ctc=CTC(odim=2, encoder_output_size=2))
y, intermediate_outs = y
else:
y, _, _ = encoder(x, x_lens)
y.sum().backward()
def test_encoder_invalid_layer_type():
with pytest.raises(ValueError):
EBranchformerEncoder(20, input_layer="dummy")
with pytest.raises(ValueError):
EBranchformerEncoder(20, rel_pos_type="dummy")
with pytest.raises(ValueError):
EBranchformerEncoder(20, pos_enc_layer_type="dummy")
with pytest.raises(ValueError):
EBranchformerEncoder(
20, pos_enc_layer_type="abc_pos", attention_layer_type="dummy"
)
with pytest.raises(ValueError):
EBranchformerEncoder(20, positionwise_layer_type="dummy")
def test_encoder_invalid_rel_pos_combination():
with pytest.raises(AssertionError):
EBranchformerEncoder(
20,
rel_pos_type="latest",
pos_enc_layer_type="legacy_rel_pos",
attention_layer_type="legacy_rel_sselfattn",
)
with pytest.raises(AssertionError):
EBranchformerEncoder(
20,
pos_enc_layer_type="rel_pos",
attention_layer_type="legacy_rel_sselfattn",
)
with pytest.raises(AssertionError):
EBranchformerEncoder(
20,
pos_enc_layer_type="legacy_rel_pos",
attention_layer_type="rel_sselfattn",
)
with pytest.raises(AssertionError):
EBranchformerEncoder(
20,
attention_layer_type="fast_selfattn",
pos_enc_layer_type="rel_pos",
)
def test_encoder_output_size():
encoder = EBranchformerEncoder(20, output_size=256)
assert encoder.output_size() == 256
| 4,375 | 29.17931 | 76 | py |
espnet | espnet-master/test/espnet2/asr/encoder/test_conformer_encoder.py | import pytest
import torch
from espnet2.asr.ctc import CTC
from espnet2.asr.encoder.conformer_encoder import ConformerEncoder
@pytest.mark.parametrize(
"input_layer",
["linear", "conv2d", "conv2d1", "conv2d2", "conv2d6", "conv2d8", "embed"],
)
@pytest.mark.parametrize("positionwise_layer_type", ["conv1d", "conv1d-linear"])
@pytest.mark.parametrize(
"rel_pos_type, pos_enc_layer_type, selfattention_layer_type",
[
("legacy", "abs_pos", "selfattn"),
("latest", "rel_pos", "rel_selfattn"),
("legacy", "rel_pos", "rel_selfattn"),
("legacy", "legacy_rel_pos", "legacy_rel_selfattn"),
],
)
@pytest.mark.parametrize(
"interctc_layer_idx, interctc_use_conditioning",
[
([], False),
([1], False),
([1], True),
],
)
@pytest.mark.parametrize("stochastic_depth_rate", [0.0, 0.1, [0.1, 0.1]])
def test_encoder_forward_backward(
input_layer,
positionwise_layer_type,
rel_pos_type,
pos_enc_layer_type,
selfattention_layer_type,
interctc_layer_idx,
interctc_use_conditioning,
stochastic_depth_rate,
):
encoder = ConformerEncoder(
20,
output_size=2,
attention_heads=2,
linear_units=4,
num_blocks=2,
input_layer=input_layer,
macaron_style=False,
rel_pos_type=rel_pos_type,
pos_enc_layer_type=pos_enc_layer_type,
selfattention_layer_type=selfattention_layer_type,
activation_type="swish",
use_cnn_module=True,
cnn_module_kernel=3,
positionwise_layer_type=positionwise_layer_type,
interctc_layer_idx=interctc_layer_idx,
interctc_use_conditioning=interctc_use_conditioning,
stochastic_depth_rate=stochastic_depth_rate,
)
if input_layer == "embed":
x = torch.randint(0, 10, [2, 32])
else:
x = torch.randn(2, 32, 20, requires_grad=True)
x_lens = torch.LongTensor([32, 28])
if len(interctc_layer_idx) > 0:
ctc = None
if interctc_use_conditioning:
vocab_size = 5
output_size = encoder.output_size()
ctc = CTC(odim=vocab_size, encoder_output_size=output_size)
encoder.conditioning_layer = torch.nn.Linear(vocab_size, output_size)
y, _, _ = encoder(x, x_lens, ctc=ctc)
y = y[0]
else:
y, _, _ = encoder(x, x_lens)
y.sum().backward()
def test_encoder_invalid_layer_type():
with pytest.raises(ValueError):
ConformerEncoder(20, rel_pos_type="dummy")
with pytest.raises(ValueError):
ConformerEncoder(20, pos_enc_layer_type="dummy")
with pytest.raises(ValueError):
ConformerEncoder(
20, pos_enc_layer_type="abc_pos", selfattention_layer_type="dummy"
)
def test_encoder_invalid_rel_pos_combination():
with pytest.raises(AssertionError):
ConformerEncoder(
20,
rel_pos_type="latest",
pos_enc_layer_type="legacy_rel_pos",
selfattention_layer_type="legacy_rel_sselfattn",
)
with pytest.raises(AssertionError):
ConformerEncoder(
20,
pos_enc_layer_type="rel_pos",
selfattention_layer_type="legacy_rel_sselfattn",
)
with pytest.raises(AssertionError):
ConformerEncoder(
20,
pos_enc_layer_type="legacy_rel_pos",
selfattention_layer_type="rel_sselfattn",
)
def test_encoder_invalid_interctc_layer_idx():
with pytest.raises(AssertionError):
ConformerEncoder(
20,
num_blocks=2,
interctc_layer_idx=[0, 1],
)
with pytest.raises(AssertionError):
ConformerEncoder(
20,
num_blocks=2,
interctc_layer_idx=[1, 2],
)
def test_encoder_output_size():
encoder = ConformerEncoder(20, output_size=256)
assert encoder.output_size() == 256
def test_encoder_invalid_type():
with pytest.raises(ValueError):
ConformerEncoder(20, input_layer="fff")
def test_encoder_invalid_stochastic_depth_rate():
with pytest.raises(ValueError):
ConformerEncoder(
20,
num_blocks=2,
stochastic_depth_rate=[0.1],
)
with pytest.raises(ValueError):
ConformerEncoder(
20,
num_blocks=2,
stochastic_depth_rate=[0.1, 0.1, 0.1],
)
| 4,431 | 28.546667 | 81 | py |
espnet | espnet-master/test/espnet2/asr/encoder/test_transformer_encoder_multispkr.py | import pytest
import torch
from espnet2.asr.encoder.transformer_encoder_multispkr import TransformerEncoder
@pytest.mark.parametrize("input_layer", ["conv2d"])
@pytest.mark.parametrize("positionwise_layer_type", ["conv1d"])
@pytest.mark.parametrize("num_inf", [1, 2, 3])
def test_Encoder_forward_backward(
input_layer,
positionwise_layer_type,
num_inf,
):
encoder = TransformerEncoder(
20,
output_size=40,
input_layer=input_layer,
positionwise_layer_type=positionwise_layer_type,
num_blocks=1,
num_blocks_sd=1,
num_inf=num_inf,
)
if input_layer == "embed":
x = torch.randint(0, 10, [2, 10])
else:
x = torch.randn(2, 10, 20, requires_grad=True)
x_lens = torch.LongTensor([10, 8])
y, _, _ = encoder(x, x_lens)
assert y.shape[:2] == torch.Size((2, num_inf))
y.sum().backward()
def test_Encoder_output_size():
encoder = TransformerEncoder(
20,
output_size=256,
num_blocks=1,
num_blocks_sd=1,
num_inf=2,
)
assert encoder.output_size() == 256
def test_Encoder_invalid_type():
with pytest.raises(ValueError):
TransformerEncoder(20, input_layer="fff")
| 1,231 | 24.666667 | 80 | py |
espnet | espnet-master/test/espnet2/asr/encoder/test_branchformer_encoder.py | import pytest
import torch
from espnet2.asr.encoder.branchformer_encoder import BranchformerEncoder
@pytest.mark.parametrize(
"input_layer",
["linear", "conv2d", "conv2d1", "conv2d2", "conv2d6", "conv2d8", "embed"],
)
@pytest.mark.parametrize("use_linear_after_conv", [True, False])
@pytest.mark.parametrize(
"rel_pos_type, pos_enc_layer_type, attention_layer_type",
[
("legacy", "abs_pos", "selfattn"),
("latest", "rel_pos", "rel_selfattn"),
("legacy", "rel_pos", "rel_selfattn"),
("legacy", "legacy_rel_pos", "legacy_rel_selfattn"),
("legacy", "abs_pos", "fast_selfattn"),
],
)
@pytest.mark.parametrize(
"merge_method, cgmlp_weight, attn_branch_drop_rate",
[
("concat", 0.5, 0.0),
("learned_ave", 0.5, 0.0),
("learned_ave", 0.5, 0.1),
("learned_ave", 0.5, [0.1, 0.1]),
("fixed_ave", 0.5, 0.0),
("fixed_ave", [0.5, 0.5], 0.0),
("fixed_ave", 0.0, 0.0),
("fixed_ave", 1.0, 0.0),
],
)
@pytest.mark.parametrize("stochastic_depth_rate", [0.0, 0.1, [0.1, 0.1]])
def test_encoder_forward_backward(
input_layer,
use_linear_after_conv,
rel_pos_type,
pos_enc_layer_type,
attention_layer_type,
merge_method,
cgmlp_weight,
attn_branch_drop_rate,
stochastic_depth_rate,
):
encoder = BranchformerEncoder(
20,
output_size=2,
use_attn=True,
attention_heads=2,
attention_layer_type=attention_layer_type,
pos_enc_layer_type=pos_enc_layer_type,
rel_pos_type=rel_pos_type,
use_cgmlp=True,
cgmlp_linear_units=4,
cgmlp_conv_kernel=3,
use_linear_after_conv=use_linear_after_conv,
gate_activation="identity",
merge_method=merge_method,
cgmlp_weight=cgmlp_weight,
attn_branch_drop_rate=attn_branch_drop_rate,
num_blocks=2,
input_layer=input_layer,
stochastic_depth_rate=stochastic_depth_rate,
)
if input_layer == "embed":
x = torch.randint(0, 10, [2, 32])
else:
x = torch.randn(2, 32, 20, requires_grad=True)
x_lens = torch.LongTensor([32, 28])
y, _, _ = encoder(x, x_lens)
y.sum().backward()
def test_encoder_invalid_layer_type():
with pytest.raises(ValueError):
BranchformerEncoder(20, rel_pos_type="dummy")
with pytest.raises(ValueError):
BranchformerEncoder(20, pos_enc_layer_type="dummy")
with pytest.raises(ValueError):
BranchformerEncoder(
20, pos_enc_layer_type="abc_pos", attention_layer_type="dummy"
)
def test_encoder_invalid_rel_pos_combination():
with pytest.raises(AssertionError):
BranchformerEncoder(
20,
rel_pos_type="latest",
pos_enc_layer_type="legacy_rel_pos",
attention_layer_type="legacy_rel_sselfattn",
)
with pytest.raises(AssertionError):
BranchformerEncoder(
20,
pos_enc_layer_type="rel_pos",
attention_layer_type="legacy_rel_sselfattn",
)
with pytest.raises(AssertionError):
BranchformerEncoder(
20,
pos_enc_layer_type="legacy_rel_pos",
attention_layer_type="rel_sselfattn",
)
with pytest.raises(AssertionError):
BranchformerEncoder(
20,
attention_layer_type="fast_selfattn",
pos_enc_layer_type="rel_pos",
)
def test_encoder_output_size():
encoder = BranchformerEncoder(20, output_size=256)
assert encoder.output_size() == 256
def test_encoder_invalid_type():
with pytest.raises(ValueError):
BranchformerEncoder(20, input_layer="fff")
def test_encoder_invalid_cgmlp_weight():
with pytest.raises(AssertionError):
BranchformerEncoder(
20,
merge_method="fixed_ave",
cgmlp_weight=-1.0,
)
with pytest.raises(ValueError):
BranchformerEncoder(
20,
num_blocks=2,
cgmlp_weight=[0.1, 0.1, 0.1],
)
def test_encoder_invalid_merge_method():
with pytest.raises(ValueError):
BranchformerEncoder(
20,
merge_method="dummy",
)
def test_encoder_invalid_two_branches():
with pytest.raises(AssertionError):
BranchformerEncoder(
20,
use_attn=False,
use_cgmlp=False,
)
def test_encoder_invalid_attn_branch_drop_rate():
with pytest.raises(ValueError):
BranchformerEncoder(
20,
num_blocks=2,
attn_branch_drop_rate=[0.1, 0.1, 0.1],
)
def test_encoder_invalid_stochastic_depth_rate():
with pytest.raises(ValueError):
BranchformerEncoder(
20,
num_blocks=2,
stochastic_depth_rate=[0.1],
)
with pytest.raises(ValueError):
BranchformerEncoder(
20,
num_blocks=2,
stochastic_depth_rate=[0.1, 0.1, 0.1],
)
| 5,048 | 27.206704 | 78 | py |
espnet | espnet-master/test/espnet2/asr/encoder/test_vgg_rnn_encoder.py | import pytest
import torch
from espnet2.asr.encoder.vgg_rnn_encoder import VGGRNNEncoder
@pytest.mark.parametrize("rnn_type", ["lstm", "gru"])
@pytest.mark.parametrize("bidirectional", [True, False])
@pytest.mark.parametrize("use_projection", [True, False])
def test_Encoder_forward_backward(rnn_type, bidirectional, use_projection):
encoder = VGGRNNEncoder(
5, rnn_type=rnn_type, bidirectional=bidirectional, use_projection=use_projection
)
x = torch.randn(2, 10, 5, requires_grad=True)
x_lens = torch.LongTensor([10, 8])
y, _, _ = encoder(x, x_lens)
y.sum().backward()
def test_Encoder_output_size():
encoder = VGGRNNEncoder(5, output_size=10)
assert encoder.output_size() == 10
def test_Encoder_invalid_type():
with pytest.raises(ValueError):
VGGRNNEncoder(5, rnn_type="fff")
| 838 | 28.964286 | 88 | py |
espnet | espnet-master/test/espnet2/asr/encoder/test_transformer_encoder.py | import pytest
import torch
from espnet2.asr.ctc import CTC
from espnet2.asr.encoder.transformer_encoder import TransformerEncoder
@pytest.mark.parametrize("input_layer", ["linear", "conv2d", "embed", None])
@pytest.mark.parametrize("positionwise_layer_type", ["conv1d", "conv1d-linear"])
@pytest.mark.parametrize(
"interctc_layer_idx, interctc_use_conditioning",
[
([], False),
([1], False),
([1], True),
],
)
def test_Encoder_forward_backward(
input_layer,
positionwise_layer_type,
interctc_layer_idx,
interctc_use_conditioning,
):
encoder = TransformerEncoder(
20,
output_size=40,
input_layer=input_layer,
positionwise_layer_type=positionwise_layer_type,
interctc_layer_idx=interctc_layer_idx,
interctc_use_conditioning=interctc_use_conditioning,
)
if input_layer == "embed":
x = torch.randint(0, 10, [2, 10])
else:
x = torch.randn(2, 10, 20, requires_grad=True)
x_lens = torch.LongTensor([10, 8])
if len(interctc_layer_idx) > 0:
ctc = None
if interctc_use_conditioning:
vocab_size = 5
output_size = encoder.output_size()
ctc = CTC(odim=vocab_size, encoder_output_size=output_size)
encoder.conditioning_layer = torch.nn.Linear(vocab_size, output_size)
y, _, _ = encoder(x, x_lens, ctc=ctc)
y = y[0]
else:
y, _, _ = encoder(x, x_lens)
y.sum().backward()
def test_encoder_invalid_interctc_layer_idx():
with pytest.raises(AssertionError):
TransformerEncoder(
20,
num_blocks=2,
interctc_layer_idx=[0, 1],
)
with pytest.raises(AssertionError):
TransformerEncoder(
20,
num_blocks=2,
interctc_layer_idx=[1, 2],
)
def test_Encoder_output_size():
encoder = TransformerEncoder(20, output_size=256)
assert encoder.output_size() == 256
def test_Encoder_invalid_type():
with pytest.raises(ValueError):
TransformerEncoder(20, input_layer="fff")
| 2,110 | 27.527027 | 81 | py |
espnet | espnet-master/test/espnet2/asr/decoder/test_mlm_decoder.py | import pytest
import torch
from espnet2.asr.decoder.mlm_decoder import MLMDecoder
@pytest.mark.parametrize("input_layer", ["linear", "embed"])
@pytest.mark.parametrize("normalize_before", [True, False])
@pytest.mark.parametrize("use_output_layer", [True, False])
def test_MLMDecoder_backward(input_layer, normalize_before, use_output_layer):
vocab_size = 10
decoder = MLMDecoder(
vocab_size,
12,
linear_units=10,
num_blocks=2,
input_layer=input_layer,
normalize_before=normalize_before,
use_output_layer=use_output_layer,
)
x = torch.randn(2, 9, 12)
x_lens = torch.tensor([9, 7], dtype=torch.long)
if input_layer == "embed":
t = torch.randint(0, vocab_size + 1, [2, 4], dtype=torch.long)
else:
t = torch.randn(2, 4, vocab_size + 1)
t_lens = torch.tensor([4, 3], dtype=torch.long)
z_all, ys_in_lens = decoder(x, x_lens, t, t_lens)
z_all.sum().backward()
def test_MLMDecoder_invalid_type():
with pytest.raises(ValueError):
MLMDecoder(10, 12, input_layer="foo")
| 1,088 | 30.114286 | 78 | py |
espnet | espnet-master/test/espnet2/asr/decoder/test_hugging_face_transformers_decoder.py | import pytest
import torch
from espnet2.asr.decoder.hugging_face_transformers_decoder import (
HuggingFaceTransformersDecoder,
)
@pytest.mark.parametrize(
"model_name_or_path",
[
"akreal/tiny-random-t5",
"akreal/tiny-random-mbart",
],
)
@pytest.mark.parametrize("encoder_output_size", [16, 32])
@pytest.mark.execution_timeout(50)
def test_HuggingFaceTransformersDecoder_backward(
encoder_output_size, model_name_or_path
):
decoder = HuggingFaceTransformersDecoder(
vocab_size=5000, # not used
encoder_output_size=encoder_output_size,
model_name_or_path=model_name_or_path,
)
x = torch.randn(2, 9, encoder_output_size)
x_lens = torch.tensor([9, 7], dtype=torch.long)
t = torch.randint(0, 10, [2, 4], dtype=torch.long)
t_lens = torch.tensor([4, 3], dtype=torch.long)
z_all, ys_in_lens = decoder(x, x_lens, t, t_lens)
z_all.sum().backward()
@pytest.mark.execution_timeout(30)
def test_reload_pretrained_parameters():
decoder = HuggingFaceTransformersDecoder(5000, 32, "akreal/tiny-random-mbart")
saved_param = decoder.parameters().__next__().detach().clone()
decoder.parameters().__next__().data *= 0
new_param = decoder.parameters().__next__().detach().clone()
assert not torch.equal(saved_param, new_param)
decoder.reload_pretrained_parameters()
new_param = decoder.parameters().__next__().detach().clone()
assert torch.equal(saved_param, new_param)
| 1,481 | 31.217391 | 82 | py |
espnet | espnet-master/test/espnet2/asr/decoder/test_whisper_decoder.py | import sys
import pytest
import torch
from packaging.version import parse as V
from espnet2.asr.decoder.whisper_decoder import OpenAIWhisperDecoder
VOCAB_SIZE_WHISPER_MULTILINGUAL = 51865
pytest.importorskip("whisper")
# NOTE(Shih-Lun): needed for `persistent` param in
# torch.nn.Module.register_buffer()
is_torch_1_7_plus = V(torch.__version__) >= V("1.7.0")
is_python_3_8_plus = sys.version_info >= (3, 8)
@pytest.mark.skipif(
not is_python_3_8_plus or not is_torch_1_7_plus,
reason="whisper not supported on python<3.8, torch<1.7",
)
@pytest.fixture()
def whisper_decoder(request):
return OpenAIWhisperDecoder(
vocab_size=VOCAB_SIZE_WHISPER_MULTILINGUAL,
encoder_output_size=384,
whisper_model="tiny",
)
@pytest.mark.skipif(
not is_python_3_8_plus or not is_torch_1_7_plus,
reason="whisper not supported on python<3.8, torch<1.7",
)
@pytest.mark.timeout(50)
def test_decoder_init(whisper_decoder):
assert (
whisper_decoder.decoders.token_embedding.num_embeddings
== VOCAB_SIZE_WHISPER_MULTILINGUAL
)
@pytest.mark.skipif(
not is_python_3_8_plus or not is_torch_1_7_plus,
reason="whisper not supported on python<3.8, torch<1.7",
)
@pytest.mark.timeout(50)
def test_decoder_reinit_emb():
vocab_size = 1000
decoder = OpenAIWhisperDecoder(
vocab_size=vocab_size,
encoder_output_size=384,
whisper_model="tiny",
)
assert decoder.decoders.token_embedding.num_embeddings == vocab_size
@pytest.mark.skipif(
not is_python_3_8_plus or not is_torch_1_7_plus,
reason="whisper not supported on python<3.8, torch<1.7",
)
def test_decoder_invalid_init():
with pytest.raises(AssertionError):
decoder = OpenAIWhisperDecoder(
vocab_size=VOCAB_SIZE_WHISPER_MULTILINGUAL,
encoder_output_size=384,
whisper_model="aaa",
)
del decoder
@pytest.mark.skipif(
not is_python_3_8_plus or not is_torch_1_7_plus,
reason="whisper not supported on python<3.8, torch<1.7",
)
@pytest.mark.timeout(50)
def test_decoder_forward_backward(whisper_decoder):
hs_pad = torch.randn(4, 100, 384, device=next(whisper_decoder.parameters()).device)
ys_in_pad = torch.randint(
0, 3000, (4, 10), device=next(whisper_decoder.parameters()).device
)
out, _ = whisper_decoder(hs_pad, None, ys_in_pad, None)
assert out.size() == torch.Size([4, 10, VOCAB_SIZE_WHISPER_MULTILINGUAL])
out.sum().backward()
@pytest.mark.skipif(
not is_python_3_8_plus or not is_torch_1_7_plus,
reason="whisper not supported on python<3.8, torch<1.7",
)
@pytest.mark.timeout(50)
def test_decoder_scoring(whisper_decoder):
hs_pad = torch.randn(4, 100, 384, device=next(whisper_decoder.parameters()).device)
ys_in_pad = torch.randint(
0, 3000, (4, 10), device=next(whisper_decoder.parameters()).device
)
out, _ = whisper_decoder.batch_score(ys_in_pad, None, hs_pad)
assert out.size() == torch.Size([4, VOCAB_SIZE_WHISPER_MULTILINGUAL])
hs_pad = torch.randn(100, 384, device=next(whisper_decoder.parameters()).device)
ys_in_pad = torch.randint(
0, 3000, (10,), device=next(whisper_decoder.parameters()).device
)
out, _ = whisper_decoder.score(ys_in_pad, None, hs_pad)
assert out.size() == torch.Size([VOCAB_SIZE_WHISPER_MULTILINGUAL])
| 3,391 | 29.836364 | 87 | py |
espnet | espnet-master/test/espnet2/asr/decoder/test_s4_decoder.py | import pytest
import torch
from packaging.version import parse as V
from espnet2.asr.decoder.s4_decoder import S4Decoder
from espnet.nets.batch_beam_search import BatchBeamSearch
# Check to have torch.linalg
is_torch_1_10_plus = V(torch.__version__) >= V("1.10.0")
@pytest.mark.parametrize("input_layer", ["embed"])
@pytest.mark.parametrize("prenorm", [True, False])
@pytest.mark.parametrize("n_layers", [3, 6])
@pytest.mark.parametrize("residual", ["residual", None])
@pytest.mark.parametrize("norm", ["layer", "batch"])
@pytest.mark.parametrize("drop_path", [0.0, 0.1])
def test_S4Decoder_backward(input_layer, prenorm, n_layers, norm, residual, drop_path):
# Skip test for the lower pytorch versions
if not is_torch_1_10_plus:
return
layer = [
{"_name_": "s4", "keops": True}, # Do not use custom Cauchy kernel (CUDA)
{"_name_": "mha", "n_head": 4},
{"_name_": "ff"},
]
decoder = S4Decoder(
vocab_size=10,
encoder_output_size=12,
input_layer=input_layer,
prenorm=prenorm,
n_layers=n_layers,
layer=layer,
norm=norm,
residual=residual,
drop_path=drop_path,
)
x = torch.randn(2, 9, 12)
x_lens = torch.tensor([9, 7], dtype=torch.long)
t = torch.randint(0, 10, [2, 4], dtype=torch.long)
t_lens = torch.tensor([4, 3], dtype=torch.long)
z_all, ys_in_lens = decoder(x, x_lens, t, t_lens)
z_all.sum().backward()
@pytest.mark.parametrize("input_layer", ["embed"])
@pytest.mark.parametrize("prenorm", [True, False])
@pytest.mark.parametrize("n_layers", [3, 6])
@pytest.mark.parametrize("residual", ["residual", None])
@pytest.mark.parametrize("norm", ["layer", "batch"])
@pytest.mark.parametrize("drop_path", [0.0, 0.1])
@pytest.mark.parametrize("dtype", [torch.float32, torch.float64])
def test_S4Decoder_batch_beam_search(
input_layer, prenorm, n_layers, norm, residual, drop_path, dtype
):
# Skip test for the lower pytorch versions
if not is_torch_1_10_plus:
return
token_list = ["<blank>", "a", "b", "c", "unk", "<eos>"]
vocab_size = len(token_list)
encoder_output_size = 4
layer = [
{"_name_": "s4", "keops": True}, # Do not use custom Cauchy kernel
{"_name_": "mha", "n_head": 4},
{"_name_": "ff"},
]
decoder = S4Decoder(
vocab_size=vocab_size,
encoder_output_size=encoder_output_size,
input_layer=input_layer,
prenorm=prenorm,
n_layers=n_layers,
layer=layer,
norm=norm,
residual=residual,
drop_path=drop_path,
)
beam = BatchBeamSearch(
beam_size=3,
vocab_size=vocab_size,
weights={"test": 1.0},
scorers={"test": decoder},
token_list=token_list,
sos=vocab_size - 1,
eos=vocab_size - 1,
pre_beam_score_key=None,
)
beam.to(dtype=dtype).eval()
for module in beam.nn_dict.test.modules():
if hasattr(module, "setup_step"):
module.setup_step()
enc = torch.randn(10, encoder_output_size).type(dtype)
with torch.no_grad():
beam(
x=enc,
maxlenratio=0.0,
minlenratio=0.0,
)
| 3,230 | 30.676471 | 87 | py |
espnet | espnet-master/test/espnet2/asr/decoder/test_transducer_decoder.py | import pytest
import torch
from espnet2.asr.decoder.transducer_decoder import TransducerDecoder
from espnet2.asr.transducer.beam_search_transducer import Hypothesis
@pytest.mark.parametrize("rnn_type", ["lstm", "gru"])
def test_TransducerDecoder_forward(rnn_type):
ys = torch.randint(0, 10, [4, 10], dtype=torch.long)
decoder = TransducerDecoder(10, rnn_type=rnn_type)
decoder.set_device(ys.device)
_ = decoder(ys)
def test_TransducerDecoder_invalid_type():
with pytest.raises(ValueError):
TransducerDecoder(10, rnn_type="foo")
def test_TransducerDecoder_score():
decoder = TransducerDecoder(10, rnn_type="lstm")
dec_state = decoder.init_state(1)
hyp = Hypothesis(score=0.0, yseq=[0], dec_state=dec_state)
_, _, _ = decoder.score(hyp, {})
def test_TransducerDecoder_batch_score():
decoder = TransducerDecoder(10, rnn_type="lstm")
batch_state = decoder.init_state(3)
hyps = [
Hypothesis(score=0.0, yseq=[0], dec_state=decoder.select_state(batch_state, 0))
]
_, _, _ = decoder.batch_score(hyps, batch_state, {}, True)
def test_TransducerDecoder_cache_score():
decoder = TransducerDecoder(10, rnn_type="gru")
batch_state = decoder.init_state(3)
hyps = [
Hypothesis(score=0.0, yseq=[0], dec_state=decoder.select_state(batch_state, 0))
]
cache = {"0": hyps[0].dec_state}
dec_out, _, _ = decoder.score(hyps[0], cache)
batch_cache = {"0": (dec_out.view(1, 1, -1), hyps[0].dec_state)}
_, _, _ = decoder.batch_score(hyps, batch_state, batch_cache, False)
| 1,576 | 28.203704 | 87 | py |
espnet | espnet-master/test/espnet2/asr/decoder/test_transformer_decoder.py | import pytest
import torch
from espnet2.asr.ctc import CTC
from espnet2.asr.decoder.transformer_decoder import ( # noqa: H301
DynamicConvolution2DTransformerDecoder,
DynamicConvolutionTransformerDecoder,
LightweightConvolution2DTransformerDecoder,
LightweightConvolutionTransformerDecoder,
TransformerDecoder,
)
from espnet.nets.batch_beam_search import BatchBeamSearch
from espnet.nets.batch_beam_search_online_sim import BatchBeamSearchOnlineSim
from espnet.nets.beam_search import BeamSearch
from espnet.nets.scorers.ctc import CTCPrefixScorer
@pytest.mark.parametrize("input_layer", ["linear", "embed"])
@pytest.mark.parametrize("normalize_before", [True, False])
@pytest.mark.parametrize("use_output_layer", [True, False])
@pytest.mark.parametrize(
"decoder_class",
[
TransformerDecoder,
LightweightConvolutionTransformerDecoder,
LightweightConvolution2DTransformerDecoder,
DynamicConvolutionTransformerDecoder,
DynamicConvolution2DTransformerDecoder,
],
)
def test_TransformerDecoder_backward(
input_layer, normalize_before, use_output_layer, decoder_class
):
decoder = decoder_class(
10,
12,
input_layer=input_layer,
normalize_before=normalize_before,
use_output_layer=use_output_layer,
linear_units=10,
)
x = torch.randn(2, 9, 12)
x_lens = torch.tensor([9, 7], dtype=torch.long)
if input_layer == "embed":
t = torch.randint(0, 10, [2, 4], dtype=torch.long)
else:
t = torch.randn(2, 4, 10)
t_lens = torch.tensor([4, 3], dtype=torch.long)
z_all, ys_in_lens = decoder(x, x_lens, t, t_lens)
z_all.sum().backward()
@pytest.mark.parametrize(
"decoder_class",
[
TransformerDecoder,
LightweightConvolutionTransformerDecoder,
LightweightConvolution2DTransformerDecoder,
DynamicConvolutionTransformerDecoder,
DynamicConvolution2DTransformerDecoder,
],
)
def test_TransformerDecoder_init_state(decoder_class):
decoder = decoder_class(10, 12)
x = torch.randn(9, 12)
state = decoder.init_state(x)
t = torch.randint(0, 10, [4], dtype=torch.long)
decoder.score(t, state, x)
@pytest.mark.parametrize(
"decoder_class",
[
TransformerDecoder,
LightweightConvolutionTransformerDecoder,
LightweightConvolution2DTransformerDecoder,
DynamicConvolutionTransformerDecoder,
DynamicConvolution2DTransformerDecoder,
],
)
def test_TransformerDecoder_invalid_type(decoder_class):
with pytest.raises(ValueError):
decoder_class(10, 12, input_layer="foo")
@pytest.mark.parametrize("input_layer", ["embed"])
@pytest.mark.parametrize("normalize_before", [True, False])
@pytest.mark.parametrize("use_output_layer", [True])
@pytest.mark.parametrize("dtype", [torch.float32, torch.float64])
@pytest.mark.parametrize("maxlenratio", [1.0, 0.0, -1.0])
@pytest.mark.parametrize(
"decoder_class",
[
TransformerDecoder,
LightweightConvolutionTransformerDecoder,
LightweightConvolution2DTransformerDecoder,
DynamicConvolutionTransformerDecoder,
DynamicConvolution2DTransformerDecoder,
],
)
def test_TransformerDecoder_beam_search(
input_layer, normalize_before, use_output_layer, dtype, maxlenratio, decoder_class
):
token_list = ["<blank>", "a", "b", "c", "unk", "<eos>"]
vocab_size = len(token_list)
encoder_output_size = 4
decoder = decoder_class(
vocab_size=vocab_size,
encoder_output_size=encoder_output_size,
input_layer=input_layer,
normalize_before=normalize_before,
use_output_layer=use_output_layer,
linear_units=10,
)
beam = BeamSearch(
beam_size=3,
vocab_size=vocab_size,
weights={"test": 1.0},
scorers={"test": decoder},
token_list=token_list,
sos=vocab_size - 1,
eos=vocab_size - 1,
pre_beam_score_key=None,
)
beam.to(dtype=dtype)
enc = torch.randn(10, encoder_output_size).type(dtype)
with torch.no_grad():
beam(
x=enc,
maxlenratio=maxlenratio,
minlenratio=0.0,
)
@pytest.mark.parametrize("input_layer", ["embed"])
@pytest.mark.parametrize("normalize_before", [True, False])
@pytest.mark.parametrize("use_output_layer", [True])
@pytest.mark.parametrize("dtype", [torch.float32, torch.float64])
@pytest.mark.parametrize(
"decoder_class",
[
TransformerDecoder,
LightweightConvolutionTransformerDecoder,
LightweightConvolution2DTransformerDecoder,
DynamicConvolutionTransformerDecoder,
DynamicConvolution2DTransformerDecoder,
],
)
def test_TransformerDecoder_batch_beam_search(
input_layer, normalize_before, use_output_layer, dtype, decoder_class
):
token_list = ["<blank>", "a", "b", "c", "unk", "<eos>"]
vocab_size = len(token_list)
encoder_output_size = 4
decoder = decoder_class(
vocab_size=vocab_size,
encoder_output_size=encoder_output_size,
input_layer=input_layer,
normalize_before=normalize_before,
use_output_layer=use_output_layer,
linear_units=10,
)
beam = BatchBeamSearch(
beam_size=3,
vocab_size=vocab_size,
weights={"test": 1.0},
scorers={"test": decoder},
token_list=token_list,
sos=vocab_size - 1,
eos=vocab_size - 1,
pre_beam_score_key=None,
)
beam.to(dtype=dtype)
enc = torch.randn(10, encoder_output_size).type(dtype)
with torch.no_grad():
beam(
x=enc,
maxlenratio=0.0,
minlenratio=0.0,
)
@pytest.mark.parametrize("input_layer", ["embed"])
@pytest.mark.parametrize("normalize_before", [True, False])
@pytest.mark.parametrize("use_output_layer", [True])
@pytest.mark.parametrize("dtype", [torch.float32, torch.float64])
@pytest.mark.parametrize(
"decoder_class",
[
TransformerDecoder,
LightweightConvolutionTransformerDecoder,
LightweightConvolution2DTransformerDecoder,
DynamicConvolutionTransformerDecoder,
DynamicConvolution2DTransformerDecoder,
],
)
def test_TransformerDecoder_batch_beam_search_online(
input_layer, normalize_before, use_output_layer, dtype, decoder_class, tmp_path
):
token_list = ["<blank>", "a", "b", "c", "unk", "<eos>"]
vocab_size = len(token_list)
encoder_output_size = 8
decoder = decoder_class(
vocab_size=vocab_size,
encoder_output_size=encoder_output_size,
input_layer=input_layer,
normalize_before=normalize_before,
use_output_layer=use_output_layer,
linear_units=10,
)
ctc = CTC(odim=vocab_size, encoder_output_size=encoder_output_size)
ctc.to(dtype)
ctc_scorer = CTCPrefixScorer(ctc=ctc, eos=vocab_size - 1)
beam = BatchBeamSearchOnlineSim(
beam_size=3,
vocab_size=vocab_size,
weights={"test": 0.7, "ctc": 0.3},
scorers={"test": decoder, "ctc": ctc_scorer},
token_list=token_list,
sos=vocab_size - 1,
eos=vocab_size - 1,
pre_beam_score_key=None,
)
cp = tmp_path / "config.yaml"
yp = tmp_path / "dummy.yaml"
with cp.open("w") as f:
f.write("config: " + str(yp) + "\n")
with yp.open("w") as f:
f.write("encoder_conf:\n")
f.write(" block_size: 4\n")
f.write(" hop_size: 2\n")
f.write(" look_ahead: 1\n")
beam.set_streaming_config(cp)
with cp.open("w") as f:
f.write("encoder_conf:\n")
f.write(" block_size: 4\n")
f.write(" hop_size: 2\n")
f.write(" look_ahead: 1\n")
beam.set_streaming_config(cp)
beam.set_block_size(4)
beam.set_hop_size(2)
beam.set_look_ahead(1)
beam.to(dtype=dtype)
enc = torch.randn(10, encoder_output_size).type(dtype)
with torch.no_grad():
beam(
x=enc,
maxlenratio=0.0,
minlenratio=0.0,
)
| 8,092 | 30.490272 | 86 | py |
espnet | espnet-master/test/espnet2/asr/decoder/test_rnn_decoder.py | import pytest
import torch
from espnet2.asr.decoder.rnn_decoder import RNNDecoder
from espnet.nets.beam_search import BeamSearch
@pytest.mark.parametrize("context_residual", [True, False])
@pytest.mark.parametrize("rnn_type", ["lstm", "gru"])
def test_RNNDecoder_backward(context_residual, rnn_type):
decoder = RNNDecoder(10, 12, context_residual=context_residual, rnn_type=rnn_type)
x = torch.randn(2, 9, 12)
x_lens = torch.tensor([9, 7], dtype=torch.long)
t = torch.randint(0, 10, [2, 4], dtype=torch.long)
t_lens = torch.tensor([4, 3], dtype=torch.long)
z_all, ys_in_lens = decoder(x, x_lens, t, t_lens)
z_all.sum().backward()
@pytest.mark.parametrize("context_residual", [True, False])
@pytest.mark.parametrize("rnn_type", ["lstm", "gru"])
def test_RNNDecoder_init_state(context_residual, rnn_type):
decoder = RNNDecoder(10, 12, context_residual=context_residual, rnn_type=rnn_type)
x = torch.randn(9, 12)
state = decoder.init_state(x)
t = torch.randint(0, 10, [4], dtype=torch.long)
decoder.score(t, state, x)
def test_RNNDecoder_invalid_type():
with pytest.raises(ValueError):
RNNDecoder(10, 12, rnn_type="foo")
@pytest.mark.parametrize("context_residual", [True, False])
@pytest.mark.parametrize("rnn_type", ["lstm", "gru"])
@pytest.mark.parametrize("dtype", [torch.float32, torch.float64])
def test_RNNDecoder_beam_search(context_residual, rnn_type, dtype):
token_list = ["<blank>", "a", "b", "c", "unk", "<eos>"]
vocab_size = len(token_list)
encoder_output_size = 4
decoder = RNNDecoder(
vocab_size,
encoder_output_size=encoder_output_size,
context_residual=context_residual,
rnn_type=rnn_type,
)
beam = BeamSearch(
beam_size=3,
vocab_size=vocab_size,
weights={"test": 1.0},
scorers={"test": decoder},
token_list=token_list,
sos=vocab_size - 1,
eos=vocab_size - 1,
pre_beam_score_key=None,
)
beam.to(dtype=dtype)
enc = torch.randn(10, encoder_output_size).type(dtype)
with torch.no_grad():
beam(
x=enc,
maxlenratio=0.0,
minlenratio=0.0,
)
| 2,206 | 31.940299 | 86 | py |
espnet | espnet-master/test/espnet2/lm/test_seq_rnn_lm.py | import pytest
import torch
from espnet2.lm.seq_rnn_lm import SequentialRNNLM
from espnet.nets.batch_beam_search import BatchBeamSearch
from espnet.nets.beam_search import BeamSearch
@pytest.mark.parametrize("rnn_type", ["LSTM", "GRU", "RNN_TANH", "RNN_RELU"])
@pytest.mark.parametrize("tie_weights", [True, False])
def test_SequentialRNNLM_backward(rnn_type, tie_weights):
model = SequentialRNNLM(10, rnn_type=rnn_type, tie_weights=tie_weights)
input = torch.randint(0, 9, [2, 10])
out, h = model(input, None)
out, h = model(input, h)
out.sum().backward()
@pytest.mark.parametrize("rnn_type", ["LSTM", "GRU", "RNN_TANH", "RNN_RELU"])
@pytest.mark.parametrize("tie_weights", [True, False])
def test_SequentialRNNLM_score(rnn_type, tie_weights):
model = SequentialRNNLM(10, rnn_type=rnn_type, tie_weights=tie_weights)
input = torch.randint(0, 9, (12,))
state = model.init_state(None)
model.score(input, state, None)
def test_SequentialRNNLM_invalid_type():
with pytest.raises(ValueError):
SequentialRNNLM(10, rnn_type="foooo")
def test_SequentialRNNLM_tie_weights_value_error():
with pytest.raises(ValueError):
SequentialRNNLM(10, tie_weights=True, unit=20, nhid=10)
@pytest.mark.parametrize("rnn_type", ["LSTM", "GRU", "RNN_TANH", "RNN_RELU"])
@pytest.mark.parametrize("tie_weights", [True, False])
@pytest.mark.parametrize("dtype", [torch.float32, torch.float64])
def test_SequentialRNNLM_beam_search(rnn_type, tie_weights, dtype):
token_list = ["<blank>", "a", "b", "c", "unk", "<eos>"]
vocab_size = len(token_list)
model = SequentialRNNLM(
vocab_size, nlayers=2, rnn_type=rnn_type, tie_weights=tie_weights
)
beam = BeamSearch(
beam_size=3,
vocab_size=vocab_size,
weights={"test": 1.0},
scorers={"test": model},
token_list=token_list,
sos=vocab_size - 1,
eos=vocab_size - 1,
pre_beam_score_key=None,
)
beam.to(dtype=dtype)
enc = torch.randn(10, 20).type(dtype)
with torch.no_grad():
beam(
x=enc,
maxlenratio=0.0,
minlenratio=0.0,
)
@pytest.mark.parametrize("rnn_type", ["LSTM", "GRU", "RNN_TANH", "RNN_RELU"])
@pytest.mark.parametrize("tie_weights", [True, False])
@pytest.mark.parametrize("dtype", [torch.float32, torch.float64])
def test_SequentialRNNLM_batch_beam_search(rnn_type, tie_weights, dtype):
token_list = ["<blank>", "a", "b", "c", "unk", "<eos>"]
vocab_size = len(token_list)
model = SequentialRNNLM(
vocab_size, nlayers=2, rnn_type=rnn_type, tie_weights=tie_weights
)
beam = BatchBeamSearch(
beam_size=3,
vocab_size=vocab_size,
weights={"test": 1.0},
scorers={"test": model},
token_list=token_list,
sos=vocab_size - 1,
eos=vocab_size - 1,
pre_beam_score_key=None,
)
beam.to(dtype=dtype)
enc = torch.randn(10, 20).type(dtype)
with torch.no_grad():
beam(
x=enc,
maxlenratio=0.0,
minlenratio=0.0,
)
| 3,110 | 30.424242 | 77 | py |
espnet | espnet-master/test/espnet2/lm/test_transformer_lm.py | import pytest
import torch
from espnet2.lm.transformer_lm import TransformerLM
from espnet.nets.batch_beam_search import BatchBeamSearch
from espnet.nets.beam_search import BeamSearch
@pytest.mark.parametrize("pos_enc", ["sinusoidal", None])
def test_TransformerLM_backward(pos_enc):
model = TransformerLM(10, pos_enc=pos_enc, unit=10)
input = torch.randint(0, 9, [2, 5])
out, h = model(input, None)
out, h = model(input, h)
out.sum().backward()
@pytest.mark.parametrize("pos_enc", ["sinusoidal", None])
def test_TransformerLM_score(pos_enc):
model = TransformerLM(10, pos_enc=pos_enc, unit=10)
input = torch.randint(0, 9, (12,))
state = model.init_state(None)
model.score(input, state, None)
def test_TransformerLM_invalid_type():
with pytest.raises(ValueError):
TransformerLM(10, pos_enc="fooo")
@pytest.mark.parametrize("pos_enc", ["sinusoidal", None])
@pytest.mark.parametrize("dtype", [torch.float32, torch.float64])
def test_TransformerLM_beam_search(pos_enc, dtype):
token_list = ["<blank>", "a", "b", "c", "unk", "<eos>"]
vocab_size = len(token_list)
model = TransformerLM(vocab_size, pos_enc=pos_enc, unit=10)
beam = BeamSearch(
beam_size=3,
vocab_size=vocab_size,
weights={"test": 1.0},
scorers={"test": model},
token_list=token_list,
sos=vocab_size - 1,
eos=vocab_size - 1,
pre_beam_score_key=None,
)
beam.to(dtype=dtype)
enc = torch.randn(10, 20).type(dtype)
with torch.no_grad():
beam(
x=enc,
maxlenratio=0.0,
minlenratio=0.0,
)
@pytest.mark.parametrize("pos_enc", ["sinusoidal", None])
@pytest.mark.parametrize("dtype", [torch.float32, torch.float64])
def test_TransformerLM_batch_beam_search(pos_enc, dtype):
token_list = ["<blank>", "a", "b", "c", "unk", "<eos>"]
vocab_size = len(token_list)
model = TransformerLM(vocab_size, pos_enc=pos_enc, unit=10)
beam = BatchBeamSearch(
beam_size=3,
vocab_size=vocab_size,
weights={"test": 1.0},
scorers={"test": model},
token_list=token_list,
sos=vocab_size - 1,
eos=vocab_size - 1,
pre_beam_score_key=None,
)
beam.to(dtype=dtype)
enc = torch.randn(10, 20).type(dtype)
with torch.no_grad():
beam(
x=enc,
maxlenratio=0.0,
minlenratio=0.0,
)
| 2,457 | 27.581395 | 65 | py |
espnet | espnet-master/test/espnet2/iterators/test_sequence_iter_factory.py | import pytest
import torch
from espnet2.iterators.sequence_iter_factory import SequenceIterFactory
class Dataset:
def __getitem__(self, item):
return item
def collate_func(x):
return torch.tensor(x)
@pytest.mark.parametrize("collate", [None, collate_func])
def test_SequenceIterFactory_larger_than_num_iters(collate):
dataset = Dataset()
batches = [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
iter_factory = SequenceIterFactory(
dataset=dataset, batches=batches, num_iters_per_epoch=3, collate_fn=collate
)
seq = [
[list(map(int, it)) for it in iter_factory.build_iter(i)] for i in range(1, 5)
]
assert seq == [
[[0, 1], [2, 3], [4, 5]],
[[6, 7], [8, 9], [0, 1]],
[[2, 3], [4, 5], [6, 7]],
[[8, 9], [0, 1], [2, 3]],
]
@pytest.mark.parametrize("collate", [None, collate_func])
def test_SequenceIterFactory_smaller_than_num_iters(collate):
dataset = Dataset()
batches = [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
iter_factory = SequenceIterFactory(
dataset=dataset, batches=batches, num_iters_per_epoch=9, collate_fn=collate
)
seq = [
[list(map(int, it)) for it in iter_factory.build_iter(i)] for i in range(1, 5)
]
assert seq == [
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [0, 1], [2, 3], [4, 5], [6, 7]],
[[8, 9], [0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [0, 1], [2, 3], [4, 5]],
[[6, 7], [8, 9], [0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [0, 1], [2, 3]],
[[4, 5], [6, 7], [8, 9], [0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [0, 1]],
]
@pytest.mark.parametrize("collate", [None, collate_func])
@pytest.mark.parametrize("num_iters_per_epoch", [None, 3, 9])
def test_SequenceIterFactory_deterministic(collate, num_iters_per_epoch):
dataset = Dataset()
batches = [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
iter_factory = SequenceIterFactory(
dataset=dataset,
batches=batches,
num_iters_per_epoch=3,
shuffle=True,
collate_fn=collate,
)
for i in range(1, 10):
for v, v2 in zip(iter_factory.build_iter(i), iter_factory.build_iter(i)):
assert (v == v2).all()
| 2,206 | 30.528571 | 86 | py |
espnet | espnet-master/test/espnet2/tts/test_prodiff.py | import pytest
import torch
from packaging.version import parse as V
from espnet2.tts.prodiff import ProDiff
from espnet2.tts.prodiff.loss import SSimLoss
is_torch_1_7_plus = V(torch.__version__) >= V("1.7.0")
@pytest.mark.parametrize("reduction_factor", [1])
@pytest.mark.parametrize(
"spk_embed_dim, spk_embed_integration_type",
[(None, "add"), (2, "add"), (2, "concat")],
)
@pytest.mark.parametrize("encoder_type", ["transformer", "conformer"])
@pytest.mark.parametrize("diffusion_scheduler", ["linear", "cosine", "vpsde"])
@pytest.mark.parametrize(
"spks, langs, use_gst",
[(-1, -1, False), (5, 2, True)],
)
@pytest.mark.skipif(
not is_torch_1_7_plus,
reason="Pytorch >= 1.7 is required.",
)
def test_prodiff(
reduction_factor,
spk_embed_dim,
spk_embed_integration_type,
diffusion_scheduler,
encoder_type,
use_gst,
spks,
langs,
):
model = ProDiff(
idim=10,
odim=5,
adim=4,
aheads=2,
elayers=1,
eunits=4,
reduction_factor=reduction_factor,
encoder_type=encoder_type,
duration_predictor_layers=2,
duration_predictor_chans=4,
duration_predictor_kernel_size=3,
energy_predictor_layers=2,
energy_predictor_chans=4,
energy_predictor_kernel_size=3,
energy_predictor_dropout=0.5,
energy_embed_kernel_size=9,
energy_embed_dropout=0.5,
pitch_predictor_layers=2,
pitch_predictor_chans=4,
pitch_predictor_kernel_size=3,
pitch_predictor_dropout=0.5,
pitch_embed_kernel_size=9,
pitch_embed_dropout=0.5,
spks=spks,
langs=langs,
spk_embed_dim=spk_embed_dim,
spk_embed_integration_type=spk_embed_integration_type,
use_gst=use_gst,
gst_tokens=2,
gst_heads=4,
gst_conv_layers=2,
gst_conv_chans_list=[2, 4],
gst_conv_kernel_size=3,
gst_conv_stride=2,
gst_gru_layers=1,
gst_gru_units=4,
use_masking=False,
use_weighted_masking=True,
denoiser_layers=1,
denoiser_channels=4,
diffusion_steps=1,
diffusion_scheduler=diffusion_scheduler,
)
inputs = dict(
text=torch.randint(1, 10, (2, 2)),
text_lengths=torch.tensor([2, 1], dtype=torch.long),
feats=torch.randn(2, 4 * reduction_factor, 5),
feats_lengths=torch.tensor([4, 2], dtype=torch.long) * reduction_factor,
durations=torch.tensor([[2, 2, 0], [2, 0, 0]], dtype=torch.long),
pitch=torch.tensor([[2, 2, 0], [2, 0, 0]], dtype=torch.float).unsqueeze(-1),
energy=torch.tensor([[2, 2, 0], [2, 0, 0]], dtype=torch.float).unsqueeze(-1),
# NOTE(kan-bayashi): +1 for eos
durations_lengths=torch.tensor([2 + 1, 1 + 1], dtype=torch.long),
pitch_lengths=torch.tensor([2 + 1, 1 + 1], dtype=torch.long),
energy_lengths=torch.tensor([2 + 1, 1 + 1], dtype=torch.long),
)
if spk_embed_dim is not None:
inputs.update(spembs=torch.randn(2, spk_embed_dim))
if spks > 0:
inputs.update(sids=torch.randint(0, spks, (2, 1)))
if langs > 0:
inputs.update(lids=torch.randint(0, langs, (2, 1)))
loss, *_ = model(**inputs)
loss.backward()
with torch.no_grad():
model.eval()
inputs = dict(
text=torch.randint(0, 10, (2,)),
feats=torch.randn(2, 4 * reduction_factor, 5),
durations=torch.tensor([[2, 2, 0], [2, 0, 0]], dtype=torch.long),
)
if use_gst:
inputs.update(feats=torch.randn(5, 5))
if spk_embed_dim is not None:
inputs.update(spembs=torch.randn(spk_embed_dim))
if spks > 0:
inputs.update(sids=torch.randint(0, spks, (1,)))
if langs > 0:
inputs.update(lids=torch.randint(0, langs, (1,)))
model.inference(**inputs)
# teacher forcing
inputs.update(durations=torch.tensor([2, 2, 0], dtype=torch.long))
inputs.update(pitch=torch.tensor([2, 2, 0], dtype=torch.float).unsqueeze(-1))
inputs.update(energy=torch.tensor([2, 2, 0], dtype=torch.float).unsqueeze(-1))
model.inference(**inputs, use_teacher_forcing=True)
@pytest.mark.parametrize("reduction_type", ["none", "mean"])
@pytest.mark.skipif(
not is_torch_1_7_plus,
reason="Pytorch >= 1.7 is required.",
)
def test_ssim(reduction_type):
lossfun = SSimLoss(reduction=reduction_type)
feats = torch.randn(2, 4, 5)
lossfun(feats, feats)
| 4,549 | 32.211679 | 86 | py |
espnet | espnet-master/test/espnet2/tts/test_fastspeech.py | import pytest
import torch
from espnet2.tts.fastspeech import FastSpeech
@pytest.mark.parametrize("reduction_factor", [1, 3])
@pytest.mark.parametrize(
"spk_embed_dim, spk_embed_integration_type",
[(None, "add"), (2, "add"), (2, "concat")],
)
@pytest.mark.parametrize("encoder_type", ["transformer", "conformer"])
@pytest.mark.parametrize("decoder_type", ["transformer", "conformer"])
@pytest.mark.parametrize(
"spks, langs, use_gst",
[(-1, -1, False), (5, 2, True)],
)
def test_fastspeech(
reduction_factor,
encoder_type,
decoder_type,
spk_embed_dim,
spk_embed_integration_type,
spks,
langs,
use_gst,
):
model = FastSpeech(
idim=10,
odim=5,
adim=4,
aheads=2,
elayers=1,
eunits=4,
dlayers=1,
dunits=4,
postnet_layers=1,
postnet_chans=4,
postnet_filts=5,
reduction_factor=reduction_factor,
encoder_type=encoder_type,
decoder_type=decoder_type,
spks=spks,
langs=langs,
spk_embed_dim=spk_embed_dim,
spk_embed_integration_type=spk_embed_integration_type,
use_gst=use_gst,
gst_tokens=2,
gst_heads=4,
gst_conv_layers=2,
gst_conv_chans_list=[2, 4],
gst_conv_kernel_size=3,
gst_conv_stride=2,
gst_gru_layers=1,
gst_gru_units=4,
use_masking=True,
use_weighted_masking=False,
)
inputs = dict(
text=torch.randint(1, 10, (2, 2)),
text_lengths=torch.tensor([2, 1], dtype=torch.long),
feats=torch.randn(2, 4 * reduction_factor, 5),
feats_lengths=torch.tensor([4, 2], dtype=torch.long) * reduction_factor,
durations=torch.tensor([[2, 2, 0], [2, 0, 0]], dtype=torch.long),
# NOTE(kan-bayashi): +1 for eos
durations_lengths=torch.tensor([2 + 1, 1 + 1], dtype=torch.long),
)
if spk_embed_dim is not None:
inputs.update(spembs=torch.randn(2, spk_embed_dim))
if spks > 0:
inputs.update(sids=torch.randint(0, spks, (2, 1)))
if langs > 0:
inputs.update(lids=torch.randint(0, langs, (2, 1)))
loss, *_ = model(**inputs)
loss.backward()
with torch.no_grad():
model.eval()
inputs = dict(
text=torch.randint(0, 10, (2,)),
)
if use_gst:
inputs.update(feats=torch.randn(5, 5))
if spk_embed_dim is not None:
inputs.update(spembs=torch.randn(spk_embed_dim))
if spks > 0:
inputs.update(sids=torch.randint(0, spks, (1,)))
if langs > 0:
inputs.update(lids=torch.randint(0, langs, (1,)))
model.inference(**inputs)
# teacher forcing
inputs.update(durations=torch.tensor([2, 2, 1], dtype=torch.long))
model.inference(**inputs, use_teacher_forcing=True)
| 2,871 | 28.608247 | 80 | py |
espnet | espnet-master/test/espnet2/tts/test_tacotron2.py | import pytest
import torch
from espnet2.tts.tacotron2 import Tacotron2
@pytest.mark.parametrize("prenet_layers", [0, 1])
@pytest.mark.parametrize("postnet_layers", [0, 1])
@pytest.mark.parametrize("reduction_factor", [1, 3])
@pytest.mark.parametrize(
"spk_embed_dim, spk_embed_integration_type",
[(None, "add"), (2, "add"), (2, "concat")],
)
@pytest.mark.parametrize(
"spks, langs, use_gst",
[(-1, -1, False), (5, 2, True)],
)
@pytest.mark.parametrize("use_guided_attn_loss", [True, False])
def test_tacotron2(
prenet_layers,
postnet_layers,
reduction_factor,
spks,
langs,
spk_embed_dim,
spk_embed_integration_type,
use_gst,
use_guided_attn_loss,
):
model = Tacotron2(
idim=10,
odim=5,
adim=4,
embed_dim=4,
econv_layers=1,
econv_filts=5,
econv_chans=4,
elayers=1,
eunits=4,
dlayers=1,
dunits=4,
prenet_layers=prenet_layers,
prenet_units=4,
postnet_layers=postnet_layers,
postnet_chans=4,
postnet_filts=5,
reduction_factor=reduction_factor,
spks=spks,
langs=langs,
spk_embed_dim=spk_embed_dim,
spk_embed_integration_type=spk_embed_integration_type,
use_gst=use_gst,
gst_tokens=2,
gst_heads=4,
gst_conv_layers=2,
gst_conv_chans_list=[2, 4],
gst_conv_kernel_size=3,
gst_conv_stride=2,
gst_gru_layers=1,
gst_gru_units=4,
loss_type="L1+L2",
use_guided_attn_loss=use_guided_attn_loss,
)
inputs = dict(
text=torch.randint(0, 10, (2, 4)),
text_lengths=torch.tensor([4, 1], dtype=torch.long),
feats=torch.randn(2, 5, 5),
feats_lengths=torch.tensor([5, 3], dtype=torch.long),
)
if spk_embed_dim is not None:
inputs.update(spembs=torch.randn(2, spk_embed_dim))
if spks > 0:
inputs.update(sids=torch.randint(0, spks, (2, 1)))
if langs > 0:
inputs.update(lids=torch.randint(0, langs, (2, 1)))
loss, *_ = model(**inputs)
loss.backward()
with torch.no_grad():
model.eval()
# free running
inputs = dict(
text=torch.randint(0, 10, (2,)),
)
if use_gst:
inputs.update(feats=torch.randn(5, 5))
if spk_embed_dim is not None:
inputs.update(spembs=torch.randn(spk_embed_dim))
if spks > 0:
inputs.update(sids=torch.randint(0, spks, (1,)))
if langs > 0:
inputs.update(lids=torch.randint(0, langs, (1,)))
model.inference(**inputs, maxlenratio=1.0)
# teacher forcing
inputs.update(feats=torch.randn(5, 5))
model.inference(**inputs, use_teacher_forcing=True)
| 2,798 | 26.99 | 63 | py |
espnet | espnet-master/test/espnet2/tts/test_fastspeech2.py | import pytest
import torch
from espnet2.tts.fastspeech2 import FastSpeech2
@pytest.mark.parametrize("reduction_factor", [1, 3])
@pytest.mark.parametrize(
"spk_embed_dim, spk_embed_integration_type",
[(None, "add"), (2, "add"), (2, "concat")],
)
@pytest.mark.parametrize("encoder_type", ["transformer", "conformer"])
@pytest.mark.parametrize("decoder_type", ["transformer", "conformer"])
@pytest.mark.parametrize(
"spks, langs, use_gst",
[(-1, -1, False), (5, 2, True)],
)
def test_fastspeech2(
reduction_factor,
spk_embed_dim,
spk_embed_integration_type,
encoder_type,
decoder_type,
use_gst,
spks,
langs,
):
model = FastSpeech2(
idim=10,
odim=5,
adim=4,
aheads=2,
elayers=1,
eunits=4,
dlayers=1,
dunits=4,
postnet_layers=1,
postnet_chans=4,
postnet_filts=5,
reduction_factor=reduction_factor,
encoder_type=encoder_type,
decoder_type=decoder_type,
duration_predictor_layers=2,
duration_predictor_chans=4,
duration_predictor_kernel_size=3,
energy_predictor_layers=2,
energy_predictor_chans=4,
energy_predictor_kernel_size=3,
energy_predictor_dropout=0.5,
energy_embed_kernel_size=9,
energy_embed_dropout=0.5,
pitch_predictor_layers=2,
pitch_predictor_chans=4,
pitch_predictor_kernel_size=3,
pitch_predictor_dropout=0.5,
pitch_embed_kernel_size=9,
pitch_embed_dropout=0.5,
spks=spks,
langs=langs,
spk_embed_dim=spk_embed_dim,
spk_embed_integration_type=spk_embed_integration_type,
use_gst=use_gst,
gst_tokens=2,
gst_heads=4,
gst_conv_layers=2,
gst_conv_chans_list=[2, 4],
gst_conv_kernel_size=3,
gst_conv_stride=2,
gst_gru_layers=1,
gst_gru_units=4,
use_masking=False,
use_weighted_masking=True,
)
inputs = dict(
text=torch.randint(1, 10, (2, 2)),
text_lengths=torch.tensor([2, 1], dtype=torch.long),
feats=torch.randn(2, 4 * reduction_factor, 5),
feats_lengths=torch.tensor([4, 2], dtype=torch.long) * reduction_factor,
durations=torch.tensor([[2, 2, 0], [2, 0, 0]], dtype=torch.long),
pitch=torch.tensor([[2, 2, 0], [2, 0, 0]], dtype=torch.float).unsqueeze(-1),
energy=torch.tensor([[2, 2, 0], [2, 0, 0]], dtype=torch.float).unsqueeze(-1),
# NOTE(kan-bayashi): +1 for eos
durations_lengths=torch.tensor([2 + 1, 1 + 1], dtype=torch.long),
pitch_lengths=torch.tensor([2 + 1, 1 + 1], dtype=torch.long),
energy_lengths=torch.tensor([2 + 1, 1 + 1], dtype=torch.long),
)
if spk_embed_dim is not None:
inputs.update(spembs=torch.randn(2, spk_embed_dim))
if spks > 0:
inputs.update(sids=torch.randint(0, spks, (2, 1)))
if langs > 0:
inputs.update(lids=torch.randint(0, langs, (2, 1)))
loss, *_ = model(**inputs)
loss.backward()
with torch.no_grad():
model.eval()
inputs = dict(
text=torch.randint(0, 10, (2,)),
)
if use_gst:
inputs.update(feats=torch.randn(5, 5))
if spk_embed_dim is not None:
inputs.update(spembs=torch.randn(spk_embed_dim))
if spks > 0:
inputs.update(sids=torch.randint(0, spks, (1,)))
if langs > 0:
inputs.update(lids=torch.randint(0, langs, (1,)))
model.inference(**inputs)
# teacher forcing
inputs.update(durations=torch.tensor([2, 2, 0], dtype=torch.long))
inputs.update(pitch=torch.tensor([2, 2, 0], dtype=torch.float).unsqueeze(-1))
inputs.update(energy=torch.tensor([2, 2, 0], dtype=torch.float).unsqueeze(-1))
model.inference(**inputs, use_teacher_forcing=True)
| 3,903 | 32.084746 | 86 | py |
espnet | espnet-master/test/espnet2/tts/test_transformer.py | import pytest
import torch
from espnet2.tts.transformer import Transformer
@pytest.mark.parametrize("eprenet_conv_layers", [0, 1])
@pytest.mark.parametrize("dprenet_layers", [0, 1])
@pytest.mark.parametrize("postnet_layers", [0, 1])
@pytest.mark.parametrize("reduction_factor", [1, 3])
@pytest.mark.parametrize(
"spk_embed_dim, spk_embed_integration_type",
[(None, "add"), (2, "add"), (2, "concat")],
)
@pytest.mark.parametrize(
"spks, langs, use_gst",
[(-1, -1, False), (5, 2, True)],
)
@pytest.mark.parametrize(
"use_guided_attn_loss, modules_applied_guided_attn",
[
(False, ["encoder", "decoder", "encoder-decoder"]),
(True, ["encoder", "decoder", "encoder-decoder"]),
],
)
def test_tranformer(
eprenet_conv_layers,
dprenet_layers,
postnet_layers,
reduction_factor,
spks,
langs,
spk_embed_dim,
spk_embed_integration_type,
use_gst,
use_guided_attn_loss,
modules_applied_guided_attn,
):
model = Transformer(
idim=10,
odim=5,
embed_dim=4,
eprenet_conv_layers=eprenet_conv_layers,
eprenet_conv_filts=5,
dprenet_layers=dprenet_layers,
dprenet_units=4,
elayers=1,
eunits=6,
adim=4,
aheads=2,
dlayers=1,
dunits=4,
postnet_layers=postnet_layers,
postnet_chans=4,
postnet_filts=5,
positionwise_layer_type="conv1d",
positionwise_conv_kernel_size=1,
use_scaled_pos_enc=True,
use_batch_norm=True,
reduction_factor=reduction_factor,
spks=spks,
langs=langs,
spk_embed_dim=spk_embed_dim,
spk_embed_integration_type=spk_embed_integration_type,
use_gst=use_gst,
gst_tokens=2,
gst_heads=4,
gst_conv_layers=2,
gst_conv_chans_list=[2, 4],
gst_conv_kernel_size=3,
gst_conv_stride=2,
gst_gru_layers=1,
gst_gru_units=4,
loss_type="L1",
use_guided_attn_loss=use_guided_attn_loss,
modules_applied_guided_attn=modules_applied_guided_attn,
)
inputs = dict(
text=torch.randint(0, 10, (2, 4)),
text_lengths=torch.tensor([4, 1], dtype=torch.long),
feats=torch.randn(2, 5, 5),
feats_lengths=torch.tensor([5, 3], dtype=torch.long),
)
if spk_embed_dim is not None:
inputs.update(spembs=torch.randn(2, spk_embed_dim))
if spks > 0:
inputs.update(sids=torch.randint(0, spks, (2, 1)))
if langs > 0:
inputs.update(lids=torch.randint(0, langs, (2, 1)))
loss, *_ = model(**inputs)
loss.backward()
with torch.no_grad():
model.eval()
# free running
inputs = dict(
text=torch.randint(0, 10, (2,)),
)
if use_gst:
inputs.update(feats=torch.randn(5, 5))
if spk_embed_dim is not None:
inputs.update(spembs=torch.randn(spk_embed_dim))
if spks > 0:
inputs.update(sids=torch.randint(0, spks, (1,)))
if langs > 0:
inputs.update(lids=torch.randint(0, langs, (1,)))
model.inference(**inputs, maxlenratio=1.0)
# teacher forcing
inputs.update(feats=torch.randn(5, 5))
model.inference(**inputs, use_teacher_forcing=True)
| 3,311 | 28.052632 | 64 | py |
espnet | espnet-master/test/espnet2/tts/feats_extract/test_dio.py | import pytest
import torch
from espnet2.tts.feats_extract.dio import Dio
@pytest.mark.parametrize("use_continuous_f0", [False, True])
@pytest.mark.parametrize("use_log_f0", [False, True])
@pytest.mark.parametrize(
"use_token_averaged_f0, reduction_factor", [(False, 1), (True, 1), (True, 3)]
)
def test_forward(
use_continuous_f0, use_log_f0, use_token_averaged_f0, reduction_factor
):
layer = Dio(
n_fft=128,
hop_length=64,
f0min=40,
f0max=800,
fs="16k",
use_continuous_f0=use_continuous_f0,
use_log_f0=use_log_f0,
use_token_averaged_f0=use_token_averaged_f0,
reduction_factor=reduction_factor,
)
xs = torch.randn(2, 384)
if not use_token_averaged_f0:
layer(xs, torch.LongTensor([384, 128]))
else:
ds = torch.LongTensor([[3, 3, 1], [3, 0, 0]]) // reduction_factor
dlens = torch.LongTensor([3, 1])
ps, _ = layer(
xs, torch.LongTensor([384, 128]), durations=ds, durations_lengths=dlens
)
assert torch.isnan(ps).sum() == 0
@pytest.mark.parametrize(
"use_token_averaged_f0, reduction_factor", [(False, 1), (True, 1), (True, 3)]
)
def test_output_size(use_token_averaged_f0, reduction_factor):
layer = Dio(
n_fft=4,
hop_length=1,
f0min=40,
f0max=800,
fs="16k",
use_token_averaged_f0=use_token_averaged_f0,
reduction_factor=reduction_factor,
)
print(layer.output_size())
@pytest.mark.parametrize(
"use_token_averaged_f0, reduction_factor", [(False, 1), (True, 1), (True, 3)]
)
def test_get_parameters(use_token_averaged_f0, reduction_factor):
layer = Dio(
n_fft=4,
hop_length=1,
f0min=40,
f0max=800,
fs="16k",
use_token_averaged_f0=use_token_averaged_f0,
reduction_factor=reduction_factor,
)
print(layer.get_parameters())
| 1,928 | 27.367647 | 83 | py |
espnet | espnet-master/test/espnet2/tts/feats_extract/test_linear_spectrogram.py | import numpy as np
import torch
from espnet2.tts.feats_extract.linear_spectrogram import LinearSpectrogram
from espnet2.tts.feats_extract.log_mel_fbank import LogMelFbank
def test_forward():
layer = LinearSpectrogram(n_fft=4, hop_length=1)
x = torch.randn(2, 4, 9)
y, _ = layer(x, torch.LongTensor([4, 3]))
assert y.shape == (2, 5, 9, 3)
def test_backward_leaf_in():
layer = LinearSpectrogram(n_fft=4, hop_length=1)
x = torch.randn(2, 4, 9, requires_grad=True)
y, _ = layer(x, torch.LongTensor([4, 3]))
y.sum().backward()
def test_backward_not_leaf_in():
layer = LinearSpectrogram(n_fft=4, hop_length=1)
x = torch.randn(2, 4, 9, requires_grad=True)
x = x + 2
y, _ = layer(x, torch.LongTensor([4, 3]))
y.sum().backward()
def test_output_size():
layer = LinearSpectrogram(n_fft=4, hop_length=1)
print(layer.output_size())
def test_get_parameters():
layer = LinearSpectrogram(n_fft=4, hop_length=1)
print(layer.get_parameters())
def test_log_mel_equal():
layer1 = LinearSpectrogram(n_fft=4, hop_length=1)
layer2 = LogMelFbank(n_fft=4, hop_length=1, n_mels=2)
x = torch.randn(2, 4, 9)
y1, y1_lens = layer1(x, torch.LongTensor([4, 3]))
y2, _ = layer2(x, torch.LongTensor([4, 3]))
y1_2, _ = layer2.logmel(y1, y1_lens)
np.testing.assert_array_equal(
y2.detach().cpu().numpy(),
y1_2.detach().cpu().numpy(),
)
| 1,430 | 27.058824 | 74 | py |
espnet | espnet-master/test/espnet2/tts/feats_extract/test_log_mel_fbank.py | import numpy as np
import torch
from espnet2.tts.feats_extract.log_mel_fbank import LogMelFbank
from espnet.transform.spectrogram import logmelspectrogram
def test_forward():
layer = LogMelFbank(n_fft=4, hop_length=1, n_mels=2)
x = torch.randn(2, 4, 9)
y, _ = layer(x, torch.LongTensor([4, 3]))
assert y.shape == (2, 5, 9, 2)
def test_backward_leaf_in():
layer = LogMelFbank(n_fft=4, hop_length=1, n_mels=2)
x = torch.randn(2, 4, 9, requires_grad=True)
y, _ = layer(x, torch.LongTensor([4, 3]))
y.sum().backward()
def test_backward_not_leaf_in():
layer = LogMelFbank(n_fft=4, hop_length=1, n_mels=2)
x = torch.randn(2, 4, 9, requires_grad=True)
x = x + 2
y, _ = layer(x, torch.LongTensor([4, 3]))
y.sum().backward()
def test_output_size():
layer = LogMelFbank(n_fft=4, hop_length=1, n_mels=2, fs="16k")
print(layer.output_size())
def test_get_parameters():
layer = LogMelFbank(n_fft=4, hop_length=1, n_mels=2, fs="16k")
print(layer.get_parameters())
def test_compatible_with_espnet1():
layer = LogMelFbank(n_fft=16, hop_length=4, n_mels=4, fs="16k", fmin=80, fmax=7600)
x = torch.randn(1, 100)
y, _ = layer(x, torch.LongTensor([100]))
y = y.numpy()[0]
y2 = logmelspectrogram(
x[0].numpy(), n_fft=16, n_shift=4, n_mels=4, fs=16000, fmin=80, fmax=7600
)
np.testing.assert_allclose(y, y2, rtol=0, atol=1e-5)
| 1,422 | 28.040816 | 87 | py |
espnet | espnet-master/test/espnet2/tts/feats_extract/test_energy.py | import pytest
import torch
from espnet2.tts.feats_extract.energy import Energy
@pytest.mark.parametrize(
"use_token_averaged_energy, reduction_factor", [(False, None), (True, 1), (True, 3)]
)
def test_forward(use_token_averaged_energy, reduction_factor):
layer = Energy(
n_fft=128,
hop_length=64,
fs="16k",
use_token_averaged_energy=use_token_averaged_energy,
reduction_factor=reduction_factor,
)
xs = torch.randn(2, 384)
if not use_token_averaged_energy:
es, elens = layer(xs, torch.LongTensor([384, 128]))
assert es.shape[1] == max(elens)
else:
ds = torch.LongTensor([[3, 3, 1], [3, 0, 0]]) // reduction_factor
dlens = torch.LongTensor([3, 1])
es, _ = layer(
xs, torch.LongTensor([384, 128]), durations=ds, durations_lengths=dlens
)
assert torch.isnan(es).sum() == 0
@pytest.mark.parametrize(
"use_token_averaged_energy, reduction_factor", [(False, None), (True, 1), (True, 3)]
)
def test_output_size(use_token_averaged_energy, reduction_factor):
layer = Energy(
n_fft=4,
hop_length=1,
fs="16k",
use_token_averaged_energy=use_token_averaged_energy,
reduction_factor=reduction_factor,
)
print(layer.output_size())
@pytest.mark.parametrize(
"use_token_averaged_energy, reduction_factor", [(False, None), (True, 1), (True, 3)]
)
def test_get_parameters(use_token_averaged_energy, reduction_factor):
layer = Energy(
n_fft=4,
hop_length=1,
fs="16k",
use_token_averaged_energy=use_token_averaged_energy,
reduction_factor=reduction_factor,
)
print(layer.get_parameters())
| 1,718 | 29.157895 | 88 | py |
espnet | espnet-master/test/espnet2/tts/feats_extract/test_log_spectrogram.py | import numpy as np
import torch
from espnet2.tts.feats_extract.log_spectrogram import LogSpectrogram
from espnet.transform.spectrogram import spectrogram
def test_forward():
layer = LogSpectrogram(n_fft=4, hop_length=1)
x = torch.randn(2, 4, 9)
y, _ = layer(x, torch.LongTensor([4, 3]))
assert y.shape == (2, 5, 9, 3)
def test_backward_leaf_in():
layer = LogSpectrogram(n_fft=4, hop_length=1)
x = torch.randn(2, 4, 9, requires_grad=True)
y, _ = layer(x, torch.LongTensor([4, 3]))
y.sum().backward()
def test_backward_not_leaf_in():
layer = LogSpectrogram(n_fft=4, hop_length=1)
x = torch.randn(2, 4, 9, requires_grad=True)
x = x + 2
y, _ = layer(x, torch.LongTensor([4, 3]))
y.sum().backward()
def test_output_size():
layer = LogSpectrogram(n_fft=4, hop_length=1)
print(layer.output_size())
def test_get_parameters():
layer = LogSpectrogram(n_fft=4, hop_length=1)
print(layer.get_parameters())
def test_compatible_with_espnet1():
layer = LogSpectrogram(n_fft=16, hop_length=4)
x = torch.randn(1, 100)
y, _ = layer(x, torch.LongTensor([100]))
y = y.numpy()[0]
y2 = np.log10(spectrogram(x[0].numpy(), n_fft=16, n_shift=4))
np.testing.assert_allclose(y, y2, rtol=0, atol=1e-4)
| 1,279 | 26.234043 | 68 | py |
espnet | espnet-master/test/espnet2/optimizers/test_sgd.py | import torch
from espnet2.optimizers.sgd import SGD
def test_SGD():
linear = torch.nn.Linear(1, 1)
opt = SGD(linear.parameters())
x = torch.randn(1, 1)
linear(x).sum().backward()
opt.step()
| 213 | 16.833333 | 38 | py |
espnet | espnet-master/test/espnet2/asr_transducer/test_activation.py | import pytest
import torch
from espnet2.asr_transducer.decoder.stateless_decoder import StatelessDecoder
from espnet2.asr_transducer.encoder.encoder import Encoder
from espnet2.asr_transducer.espnet_transducer_model import ESPnetASRTransducerModel
from espnet2.asr_transducer.joint_network import JointNetwork
def prepare(model, input_size, vocab_size, batch_size):
n_token = vocab_size - 1
feat_len = [15, 11]
label_len = [13, 9]
feats = torch.randn(batch_size, max(feat_len), input_size)
labels = (torch.rand(batch_size, max(label_len)) * n_token % n_token).long()
for i in range(2):
feats[i, feat_len[i] :] = model.ignore_id
labels[i, label_len[i] :] = model.ignore_id
labels[labels == 0] = vocab_size - 2
return feats, labels, torch.tensor(feat_len), torch.tensor(label_len)
@pytest.mark.parametrize(
"act_type, act_params",
[
("ftswish", {"ftswish_threshold": -0.25, "ftswish_mean_shift": -0.1}),
("hardtanh", {"hardtanh_min_val": -2, "hardtanh_max_val": 2}),
("leaky_relu", {"leakyrelu_neg_slope": 0.02}),
("mish", {"softplus_beta": 1.125, "softplus_threshold": 10}),
("relu", {}),
("selu", {}),
("smish", {"smish_alpha": 1.125, "smish_beta": 1.125}),
("swish", {}),
("swish", {"swish_beta": 1.125}),
("tanh", {}),
("identity", {}),
],
)
def test_activation(act_type, act_params):
batch_size = 2
input_size = 10
token_list = ["<blank>", "a", "b", "c", "<space>"]
vocab_size = len(token_list)
encoder = Encoder(
input_size,
[
{
"block_type": "conformer",
"hidden_size": 8,
"linear_size": 4,
"conv_mod_kernel_size": 3,
}
],
main_conf=act_params,
)
decoder = StatelessDecoder(vocab_size, embed_size=4)
joint_network = JointNetwork(
vocab_size,
encoder.output_size,
decoder.output_size,
joint_activation_type=act_type,
**act_params,
)
model = ESPnetASRTransducerModel(
vocab_size,
token_list,
frontend=None,
specaug=None,
normalize=None,
encoder=encoder,
decoder=decoder,
joint_network=joint_network,
)
feats, labels, feat_len, label_len = prepare(
model, input_size, vocab_size, batch_size
)
_ = model(feats, feat_len, labels, label_len)
| 2,492 | 27.329545 | 83 | py |
espnet | espnet-master/test/espnet2/asr_transducer/test_error_calculator_transducer.py | import pytest
import torch
from espnet2.asr_transducer.decoder.rnn_decoder import RNNDecoder
from espnet2.asr_transducer.decoder.stateless_decoder import StatelessDecoder
from espnet2.asr_transducer.error_calculator import ErrorCalculator
from espnet2.asr_transducer.joint_network import JointNetwork
@pytest.mark.parametrize(
"report_opts, decoder_class, decoder_opts",
[
({"report_cer": False, "report_wer": False}, RNNDecoder, {"hidden_size": 4}),
({"report_cer": True, "report_wer": True}, RNNDecoder, {"hidden_size": 4}),
({"report_cer": False, "report_wer": False}, StatelessDecoder, {}),
({"report_cer": True, "report_wer": True}, StatelessDecoder, {}),
],
)
def test_error_calculator_transducer(report_opts, decoder_class, decoder_opts):
token_list = ["<blank>", "a", "b", "c", "<space>"]
vocab_size = len(token_list)
encoder_size = 4
decoder = decoder_class(vocab_size, embed_size=4, **decoder_opts)
joint_net = JointNetwork(vocab_size, encoder_size, 4, joint_space_size=2)
error_calc = ErrorCalculator(
decoder,
joint_net,
token_list,
"<space>",
"<blank>",
**report_opts,
)
enc_out = torch.randn(4, 30, encoder_size)
enc_out_lens = torch.tensor([30, 29, 28, 27])
target = torch.randint(0, vocab_size, [4, 20], dtype=torch.int32)
with torch.no_grad():
_, _ = error_calc(enc_out, target, enc_out_lens)
| 1,462 | 33.023256 | 85 | py |
espnet | espnet-master/test/espnet2/asr_transducer/test_beam_search_transducer.py | import numpy as np
import pytest
import torch
from espnet2.asr_transducer.beam_search_transducer import (
BeamSearchTransducer,
Hypothesis,
)
from espnet2.asr_transducer.decoder.mega_decoder import MEGADecoder
from espnet2.asr_transducer.decoder.rnn_decoder import RNNDecoder
from espnet2.asr_transducer.decoder.rwkv_decoder import RWKVDecoder
from espnet2.asr_transducer.decoder.stateless_decoder import StatelessDecoder
from espnet2.asr_transducer.joint_network import JointNetwork
from espnet2.lm.seq_rnn_lm import SequentialRNNLM
@pytest.mark.execution_timeout(5.0)
@pytest.mark.parametrize(
"decoder_class, decoder_opts, search_opts",
[
(
RNNDecoder,
{"hidden_size": 4},
{"search_type": "default", "score_norm": True},
),
(RNNDecoder, {"hidden_size": 4}, {"search_type": "default", "lm": None}),
(StatelessDecoder, {}, {"search_type": "default", "lm": None}),
(StatelessDecoder, {}, {"search_type": "default"}),
(MEGADecoder, {}, {"search_type": "default", "lm": None}),
(MEGADecoder, {"chunk_size": 2}, {"search_type": "default"}),
(RWKVDecoder, {"linear_size": 4}, {"search_type": "default", "lm": None}),
(RWKVDecoder, {"linear_size": 4}, {"search_type": "default"}),
(RNNDecoder, {"hidden_size": 4}, {"search_type": "alsd", "u_max": 10}),
(
RNNDecoder,
{"hidden_size": 4},
{"search_type": "alsd", "u_max": 10, "lm": None},
),
(StatelessDecoder, {}, {"search_type": "alsd", "u_max": 10}),
(StatelessDecoder, {}, {"search_type": "alsd", "u_max": 10, "lm": None}),
(MEGADecoder, {}, {"search_type": "alsd", "u_max": 10}),
(MEGADecoder, {}, {"search_type": "alsd", "u_max": 10, "lm": None}),
(RWKVDecoder, {"linear_size": 4}, {"search_type": "alsd", "u_max": 10}),
(
RWKVDecoder,
{"linear_size": 4},
{"search_type": "alsd", "u_max": 10, "lm": None},
),
(RNNDecoder, {"hidden_size": 4}, {"search_type": "tsd", "max_sym_exp": 3}),
(
RNNDecoder,
{"hidden_size": 4},
{"search_type": "tsd", "max_sym_exp": 3, "lm": None},
),
(StatelessDecoder, {}, {"search_type": "tsd", "max_sym_exp": 3}),
(StatelessDecoder, {}, {"search_type": "tsd", "max_sym_exp": 3, "lm": None}),
(MEGADecoder, {}, {"search_type": "tsd", "max_sym_exp": 3}),
(MEGADecoder, {}, {"search_type": "tsd", "max_sym_exp": 3, "lm": None}),
(RWKVDecoder, {"linear_size": 4}, {"search_type": "tsd", "max_sym_exp": 3}),
(
RWKVDecoder,
{"linear_size": 4},
{"search_type": "tsd", "max_sym_exp": 3, "lm": None},
),
(RNNDecoder, {"hidden_size": 4}, {"search_type": "maes", "nstep": 2}),
(
RNNDecoder,
{"hidden_size": 4},
{"search_type": "maes", "nstep": 2, "lm": None},
),
(StatelessDecoder, {}, {"search_type": "maes", "nstep": 2}),
(StatelessDecoder, {}, {"search_type": "maes", "nstep": 2, "lm": None}),
(MEGADecoder, {}, {"search_type": "maes", "nstep": 2}),
(
MEGADecoder,
{"chunk_size": 2},
{"search_type": "maes", "nstep": 2, "lm": None},
),
(RWKVDecoder, {"linear_size": 4}, {"search_type": "maes", "nstep": 2}),
(
RWKVDecoder,
{"linear_size": 4},
{"search_type": "maes", "nstep": 2, "lm": None},
),
],
)
def test_transducer_beam_search(decoder_class, decoder_opts, search_opts):
token_list = ["<blank>", "a", "b", "c"]
vocab_size = len(token_list)
encoder_size = 4
if decoder_class in (MEGADecoder, RWKVDecoder):
if decoder_class == RWKVDecoder and not torch.cuda.is_available():
pytest.skip("A GPU is required for WKV kernel computation")
decoder = decoder_class(vocab_size, block_size=4, **decoder_opts)
else:
decoder = decoder_class(vocab_size, embed_size=4, **decoder_opts)
joint_net = JointNetwork(vocab_size, encoder_size, 4, joint_space_size=2)
lm = search_opts.pop(
"lm", SequentialRNNLM(vocab_size, unit=8, nlayers=1, rnn_type="lstm")
)
beam = BeamSearchTransducer(
decoder,
joint_net,
beam_size=2,
lm=lm,
**search_opts,
)
enc_out = torch.randn(30, encoder_size)
with torch.no_grad():
_ = beam(enc_out)
@pytest.mark.parametrize(
"search_opts",
[
{"beam_size": 5},
{"beam_size": 2, "search_type": "tsd", "max_sym_exp": 1},
{"beam_size": 2, "search_type": "alsd", "u_max": -2},
{"beam_size": 2, "search_type": "maes", "expansion_beta": 2.3},
],
)
def test_integer_parameters_limits(search_opts):
vocab_size = 4
encoder_size = 4
decoder = StatelessDecoder(vocab_size, embed_size=4)
joint_net = JointNetwork(vocab_size, encoder_size, 4, joint_space_size=2)
with pytest.raises(AssertionError):
_ = BeamSearchTransducer(
decoder,
joint_net,
**search_opts,
)
def test_recombine_hyps():
decoder = StatelessDecoder(4, embed_size=4)
joint_net = JointNetwork(4, 4, 4, joint_space_size=2)
beam_search = BeamSearchTransducer(decoder, joint_net, 2)
test_hyp = [
Hypothesis(score=0.0, yseq=[0, 1, 2], dec_state=None),
Hypothesis(score=12.0, yseq=[0, 1, 2], dec_state=None),
]
final = beam_search.recombine_hyps(test_hyp)
assert len(final) == 1
assert final[0].score == np.logaddexp(0.0, 12.0)
| 5,704 | 34.880503 | 85 | py |
espnet | espnet-master/test/espnet2/asr_transducer/test_decoder.py | import pytest
import torch
from espnet2.asr_transducer.decoder.mega_decoder import MEGADecoder
from espnet2.asr_transducer.decoder.rnn_decoder import RNNDecoder
from espnet2.asr_transducer.decoder.rwkv_decoder import RWKVDecoder
from espnet2.asr_transducer.decoder.stateless_decoder import StatelessDecoder
def prepare():
batch_size = 2
vocab_size = 4
n_token = vocab_size - 1
label_len = [13, 9]
labels = (torch.rand(batch_size, max(label_len)) * n_token % n_token).long()
for i in range(2):
labels[i, label_len[i] :] = 0
return vocab_size, labels
@pytest.mark.parametrize(
"params",
[
{},
{"embed_size": 2, "hidden_size": 4, "rnn_type": "gru", "num_layers": 2},
{"rnn_type": "lstm", "num_layers": 2, "dropout_rate": 0.1},
],
)
def test_rnn_decoder(params):
vocab_size, labels = prepare()
decoder = RNNDecoder(vocab_size, **params)
_ = decoder(labels)
@pytest.mark.skipif(
not torch.cuda.is_available(),
reason="A GPU is required for WKV kernel computation.",
)
@pytest.mark.parametrize(
"params",
[
{"block_size": 4, "num_blocks": 2},
{"block_size": 4, "num_blocks": 2, "attention_size": 8, "linear_size": 8},
],
)
@pytest.mark.execution_timeout(20)
def test_rwkv_decoder(params):
vocab_size, labels = prepare()
decoder = RWKVDecoder(vocab_size, **params)
_ = decoder(labels)
def test_stateless_decoder():
vocab_size, labels = prepare()
decoder = StatelessDecoder(vocab_size, embed_size=2)
_ = decoder(labels)
@pytest.mark.parametrize(
"params",
[
{},
{"rel_pos_bias_type": "rotary"},
{"chunk_size": 8},
{"chunk_size": 16},
],
)
def test_mega_decoder(params):
vocab_size, labels = prepare()
decoder = MEGADecoder(vocab_size, **params)
_ = decoder(labels)
def test_mega_rel_pos_bias_type():
vocab_size, labels = prepare()
with pytest.raises(ValueError):
decoder = MEGADecoder(vocab_size, rel_pos_bias_type="foo")
@pytest.mark.parametrize(
"rel_pos_bias_type",
["simple", "rotary"],
)
def test_mega_rel_pos_bias(rel_pos_bias_type):
vocab_size, labels = prepare()
decoder = MEGADecoder(
vocab_size, max_positions=1, rel_pos_bias_type=rel_pos_bias_type
)
if rel_pos_bias_type == "simple":
with pytest.raises(ValueError):
_ = decoder(labels)
else:
_ = decoder(labels)
def test_rnn_type():
vocab_size, labels = prepare()
with pytest.raises(ValueError):
_ = RNNDecoder(vocab_size, rnn_type="foo")
| 2,615 | 22.781818 | 82 | py |
espnet | espnet-master/test/espnet2/asr_transducer/test_encoder.py | import pytest
import torch
from espnet2.asr_transducer.encoder.encoder import Encoder
from espnet2.asr_transducer.utils import TooShortUttError
@pytest.mark.parametrize(
"input_conf, body_conf, main_conf",
[
(
{"vgg_like": True, "susbsampling_factor": 4, "conv_size": 8},
[
{
"block_type": "conformer",
"hidden_size": 4,
"linear_size": 2,
"conv_mod_kernel_size": 1,
"num_blocks": 2,
"avg_eps": 1e-8,
}
],
{},
),
(
{"vgg_like": True, "susbsampling_factor": 6, "conv_size": 8},
[
{
"block_type": "conformer",
"hidden_size": 4,
"linear_size": 2,
"conv_mod_kernel_size": 1,
"num_blocks": 2,
"avg_eps": 1e-8,
}
],
{},
),
(
{"vgg_like": True, "subsampling_factor": 4},
[
{
"block_type": "conv1d",
"output_size": 4,
"kernel_size": 1,
"batch_norm": True,
"relu": True,
}
],
{},
),
(
{"vgg_like": True, "subsampling_factor": 6},
[
{
"block_type": "conv1d",
"output_size": 4,
"kernel_size": 1,
"batch_norm": True,
"relu": True,
}
],
{},
),
(
{},
[
{
"block_type": "conv1d",
"output_size": 8,
"kernel_size": 2,
"dilation": 2,
},
{
"block_type": "conformer",
"hidden_size": 8,
"linear_size": 2,
"conv_mod_kernel_size": 1,
"num_blocks": 2,
},
{"block_type": "conv1d", "output_size": 4, "kernel_size": 1},
],
{},
),
(
{"conv_size": (8, 4)},
[{"block_type": "conv1d", "output_size": 4, "kernel_size": 1}],
{},
),
(
{"conv_size": 4},
[
{
"block_type": "conformer",
"hidden_size": 4,
"linear_size": 2,
"conv_mod_kernel_size": 1,
}
],
{},
),
(
{"conv_size": 2},
[
{
"block_type": "conv1d",
"output_size": 4,
"kernel_size": 2,
"dilation": 2,
"batch_norm": True,
"relu": True,
},
{
"block_type": "conformer",
"hidden_size": 4,
"linear_size": 2,
"conv_mod_kernel_size": 1,
"num_blocks": 2,
},
],
{},
),
(
{"conv_size": 2},
[
{
"block_type": "conv1d",
"output_size": 8,
"kernel_size": 2,
},
{
"block_type": "conformer",
"hidden_size": 8,
"linear_size": 2,
"conv_mod_kernel_size": 1,
"num_blocks": 2,
},
],
{
"dynamic_chunk_training": True,
"norm_type": "scale_norm",
"short_chunk_size": 30,
"short_chunk_threshold": 0.01,
"num_left_chunks": 2,
},
),
(
{},
[
{
"block_type": "conformer",
"hidden_size": 8,
"linear_size": 2,
"conv_mod_kernel_size": 1,
"norm_eps": 1e-5,
"norm_partial": 0.8,
"conv_mod_norm_eps": 0.4,
"num_blocks": 2,
},
],
{
"simplified_att_score": True,
"norm_type": "rms_norm",
"conv_mod_norm_type": "basic_norm",
"dynamic_chunk_training": True,
"short_chunk_size": 1,
"num_left_chunks": 0,
},
),
(
{},
[
{
"block_type": "conformer",
"hidden_size": 4,
"linear_size": 2,
"conv_mod_kernel_size": 1,
}
],
{"norm_type": "rms_norm"},
),
],
)
def test_encoder(input_conf, body_conf, main_conf):
input_size = 8
encoder = Encoder(input_size, body_conf, input_conf=input_conf, main_conf=main_conf)
sequence = torch.randn(2, 30, input_size, requires_grad=True)
sequence_len = torch.tensor([30, 18], dtype=torch.long)
_ = encoder(sequence, sequence_len)
# Note (b-flo): For each test with Conformer blocks, we do the same testing with
# Branchformer and E-Branchformer blocks instead.
# The tests will be redesigned, for now we avoid writing too many configs.
_swap = False
branchformer_conf = body_conf[:]
ebranchformer_conf = body_conf[:]
for i, b in enumerate(body_conf):
if b["block_type"] == "conformer":
branchformer_conf[i]["block_type"] = "branchformer"
ebranchformer_conf[i]["block_type"] = "ebranchformer"
if _swap is False:
_swap = True
if _swap:
branchformer_encoder = Encoder(
input_size, branchformer_conf, input_conf=input_conf, main_conf=main_conf
)
_ = branchformer_encoder(sequence, sequence_len)
ebranchformer_encoder = Encoder(
input_size, ebranchformer_conf, input_conf=input_conf, main_conf=main_conf
)
_ = ebranchformer_encoder(sequence, sequence_len)
@pytest.mark.parametrize(
"input_conf, body_conf",
[
({}, [{}]),
({}, [{"block_type": "foo"}]),
],
)
def test_block_type(input_conf, body_conf):
with pytest.raises(ValueError):
_ = Encoder(8, body_conf, input_conf=input_conf)
@pytest.mark.parametrize(
"body_conf",
[
[{"block_type": "branchformer", "hidden_size": 4}],
[{"block_type": "branchformer", "hidden_size": 4, "linear_size": 2}],
[{"block_type": "conformer", "hidden_size": 4}],
[{"block_type": "conformer", "hidden_size": 4, "linear_size": 2}],
[{"block_type": "conv1d"}],
[{"block_type": "conv1d", "output_size": 8}, {}],
[{"block_type": "ebranchformer", "hidden_size": 4}],
[{"block_type": "ebranchformer", "hidden_size": 4, "linear_size": 2}],
],
)
def test_wrong_block_arguments(body_conf):
with pytest.raises(ValueError):
_ = Encoder(8, body_conf)
@pytest.mark.parametrize(
"input_conf, inputs",
[
({"subsampling_factor": 2}, [2, 2]),
({"subsampling_factor": 4}, [6, 6]),
({"subsampling_factor": 6}, [10, 5]),
({"vgg_like": True}, [6, 6]),
({"vgg_like": True, "subsampling_factor": 6}, [10, 5]),
],
)
def test_too_short_utterance(input_conf, inputs):
input_size = 20
body_conf = [
{
"block_type": "conformer",
"hidden_size": 4,
"linear_size": 2,
"conv_mod_kernel_size": 3,
}
]
encoder = Encoder(input_size, body_conf, input_conf=input_conf)
sequence = torch.randn(len(inputs), inputs[0], input_size, requires_grad=True)
sequence_len = torch.tensor(inputs, dtype=torch.long)
with pytest.raises(TooShortUttError):
_ = encoder(sequence, sequence_len)
@pytest.mark.parametrize(
"input_conf, body_conf",
[
(
{"subsampling_factor": 8},
[
{
"block_type": "branchformer",
"hidden_size": 4,
"linear_size": 2,
"conv_mod_kernel_size": 1,
}
],
),
(
{"vgg_like": True, "subsampling_factor": 8},
[
{
"block_type": "conformer",
"hidden_size": 4,
"linear_size": 2,
"conv_mod_kernel_size": 1,
}
],
),
],
)
def test_wrong_subsampling_factor(input_conf, body_conf):
with pytest.raises(ValueError):
_ = Encoder(8, body_conf, input_conf=input_conf)
@pytest.mark.parametrize(
"body_conf",
[
[
{"block_type": "conv1d", "output_size": 8, "kernel_size": 1},
{
"block_type": "branchformer",
"hidden_size": 4,
"conv_mod_kernel_size": 2,
"linear_size": 2,
},
],
[
{"block_type": "conv1d", "output_size": 8, "kernel_size": 1},
{
"block_type": "conformer",
"hidden_size": 4,
"conv_mod_kernel_size": 2,
"linear_size": 2,
},
],
[
{"block_type": "conv1d", "output_size": 8, "kernel_size": 1},
{
"block_type": "ebranchformer",
"hidden_size": 4,
"conv_mod_kernel_size": 2,
"linear_size": 2,
},
],
[
{
"block_type": "branchformer",
"hidden_size": 8,
"conv_mod_kernel_size": 2,
"linear_size": 2,
},
{
"block_type": "branchformer",
"hidden_size": 4,
"conv_mod_kernel_size": 2,
"linear_size": 2,
},
],
[
{
"block_type": "ebranchformer",
"hidden_size": 8,
"conv_mod_kernel_size": 2,
"linear_size": 2,
},
{
"block_type": "ebranchformer",
"hidden_size": 4,
"conv_mod_kernel_size": 2,
"linear_size": 2,
},
],
[
{
"block_type": "branchformer",
"hidden_size": 8,
"conv_mod_kernel_size": 2,
"linear_size": 2,
},
{
"block_type": "conformer",
"hidden_size": 4,
"conv_mod_kernel_size": 2,
"linear_size": 2,
},
],
[
{
"block_type": "conformer",
"hidden_size": 8,
"conv_mod_kernel_size": 2,
"linear_size": 2,
},
{
"block_type": "conformer",
"hidden_size": 4,
"conv_mod_kernel_size": 2,
"linear_size": 2,
},
],
[
{
"block_type": "conformer",
"hidden_size": 8,
"conv_mod_kernel_size": 2,
"linear_size": 2,
},
{
"block_type": "ebranchformer",
"hidden_size": 4,
"conv_mod_kernel_size": 2,
"linear_size": 2,
},
],
],
)
def test_wrong_block_io(body_conf):
with pytest.raises(ValueError):
_ = Encoder(8, body_conf)
| 12,111 | 27.838095 | 88 | py |
espnet | espnet-master/test/espnet2/asr_transducer/test_espnet_transducer_model.py | from pathlib import Path
import numpy as np
import pytest
import torch
from espnet2.asr.specaug.specaug import SpecAug
from espnet2.asr_transducer.decoder.mega_decoder import MEGADecoder
from espnet2.asr_transducer.decoder.rnn_decoder import RNNDecoder
from espnet2.asr_transducer.decoder.rwkv_decoder import RWKVDecoder
from espnet2.asr_transducer.decoder.stateless_decoder import StatelessDecoder
from espnet2.asr_transducer.encoder.encoder import Encoder
from espnet2.asr_transducer.espnet_transducer_model import ESPnetASRTransducerModel
from espnet2.asr_transducer.joint_network import JointNetwork
from espnet2.layers.global_mvn import GlobalMVN
from espnet2.layers.utterance_mvn import UtteranceMVN
@pytest.fixture
def stats_file(tmp_path: Path):
p = tmp_path / "stats.npy"
count = 5
x = np.random.randn(count, 10)
s = x.sum(0)
s = np.pad(s, [0, 1], mode="constant", constant_values=count)
s2 = (x**2).sum(0)
s2 = np.pad(s2, [0, 1], mode="constant", constant_values=0.0)
stats = np.stack([s, s2])
np.save(p, stats)
return p
def prepare(model, input_size, vocab_size, batch_size, use_k2_modified_loss=False):
n_token = vocab_size - 1
label_len = [13, 9]
# (b-flo): For k2 "modified", we need to ensure that T >= U after subsampling.
if use_k2_modified_loss:
feat_len = [i * 5 for i in label_len]
else:
feat_len = [15, 11]
feats = torch.randn(batch_size, max(feat_len), input_size)
labels = (torch.rand(batch_size, max(label_len)) * n_token % n_token).long()
for i in range(2):
feats[i, feat_len[i] :] = model.ignore_id
labels[i, label_len[i] :] = model.ignore_id
labels[labels == 0] = vocab_size - 2
return feats, labels, torch.tensor(feat_len), torch.tensor(label_len)
def get_decoder(vocab_size, params):
if "is_rwkv" in params:
del params["is_rwkv"]
decoder = RWKVDecoder(vocab_size, **params)
elif "rnn_type" in params:
decoder = RNNDecoder(vocab_size, **params)
elif "block_size" in params:
decoder = MEGADecoder(vocab_size, **params)
else:
decoder = StatelessDecoder(vocab_size, **params)
return decoder
def get_specaug():
return SpecAug(
apply_time_warp=True,
apply_freq_mask=True,
apply_time_mask=False,
)
@pytest.mark.parametrize(
"enc_params, enc_gen_params, dec_params, joint_net_params, main_params",
[
(
[
{
"block_type": "conformer",
"hidden_size": 4,
"linear_size": 4,
"conv_mod_kernel_size": 3,
}
],
{},
{"rnn_type": "lstm", "num_layers": 2},
{"joint_space_size": 4},
{"report_cer": True, "report_wer": True},
),
(
[
{
"block_type": "conformer",
"hidden_size": 4,
"linear_size": 4,
"conv_mod_kernel_size": 3,
}
],
{},
{"embed_size": 4},
{"joint_space_size": 4},
{"specaug": True},
),
(
[
{
"block_type": "conformer",
"hidden_size": 4,
"linear_size": 4,
"conv_mod_kernel_size": 3,
}
],
{},
{"embed_size": 4},
{"joint_space_size": 4},
{
"auxiliary_ctc_weight": 0.1,
"auxiliary_lm_loss_weight": 0.1,
"normalize": "global",
},
),
(
[
{
"block_type": "conformer",
"hidden_size": 4,
"linear_size": 4,
"conv_mod_kernel_size": 3,
}
],
{},
{"embed_size": 4},
{"joint_space_size": 4},
{"specaug": True, "normalize": "utterance"},
),
(
[
{
"block_type": "conformer",
"hidden_size": 4,
"linear_size": 4,
"conv_mod_kernel_size": 3,
},
{"block_type": "conv1d", "kernel_size": 1, "output_size": 2},
],
{},
{"embed_size": 4},
{"joint_space_size": 4},
{"transducer_weight": 1.0},
),
(
[
{
"block_type": "conformer",
"hidden_size": 4,
"linear_size": 4,
"conv_mod_kernel_size": 3,
},
{"block_type": "conv1d", "kernel_size": 1, "output_size": 2},
],
{
"dynamic_chunk_training": True,
"short_chunk_size": 1,
"num_left_chunks": 1,
},
{"embed_size": 4},
{"joint_space_size": 4},
{"transducer_weight": 1.0},
),
(
[
{
"block_type": "branchformer",
"hidden_size": 4,
"linear_size": 4,
"conv_mod_kernel_size": 3,
}
],
{},
{"rnn_type": "lstm", "num_layers": 2},
{"joint_space_size": 4},
{"report_cer": True, "report_wer": True},
),
(
[
{
"block_type": "branchformer",
"hidden_size": 4,
"linear_size": 4,
"conv_mod_kernel_size": 3,
},
{"block_type": "conv1d", "kernel_size": 1, "output_size": 2},
],
{
"dynamic_chunk_training": True,
"short_chunk_size": 1,
"num_left_chunks": 1,
},
{"embed_size": 4},
{"joint_space_size": 4},
{"transducer_weight": 1.0},
),
(
[
{
"block_type": "ebranchformer",
"hidden_size": 4,
"linear_size": 4,
"conv_mod_kernel_size": 3,
}
],
{},
{"rnn_type": "lstm", "num_layers": 2},
{"joint_space_size": 4},
{"report_cer": True, "report_wer": True},
),
(
[
{
"block_type": "ebranchformer",
"hidden_size": 4,
"linear_size": 4,
"conv_mod_kernel_size": 3,
},
{"block_type": "conv1d", "kernel_size": 1, "output_size": 2},
],
{
"dynamic_chunk_training": True,
"short_chunk_size": 1,
"num_left_chunks": 1,
},
{"embed_size": 4},
{"joint_space_size": 4},
{"transducer_weight": 1.0},
),
(
[
{
"block_type": "conformer",
"hidden_size": 4,
"linear_size": 4,
"conv_mod_kernel_size": 3,
},
{"block_type": "conv1d", "kernel_size": 1, "output_size": 2},
],
{
"dynamic_chunk_training": True,
"short_chunk_size": 1,
"num_left_chunks": 1,
},
{"block_size": 4, "chunk_size": 3},
{"joint_space_size": 4},
{"transducer_weight": 1.0},
),
(
[
{
"block_type": "conformer",
"hidden_size": 4,
"linear_size": 4,
"conv_mod_kernel_size": 3,
}
],
{},
{"block_size": 4, "rel_pos_bias_type": "rotary"},
{"joint_space_size": 4},
{"report_cer": True, "report_wer": True},
),
(
[
{
"block_type": "conformer",
"hidden_size": 4,
"linear_size": 4,
"conv_mod_kernel_size": 3,
},
{"block_type": "conv1d", "kernel_size": 1, "output_size": 2},
],
{
"dynamic_chunk_training": True,
"short_chunk_size": 1,
"num_left_chunks": 1,
},
{"block_size": 4, "linear_size": 4, "is_rwkv": True},
{"joint_space_size": 4},
{"transducer_weight": 1.0},
),
(
[
{
"block_type": "conformer",
"hidden_size": 4,
"linear_size": 4,
"conv_mod_kernel_size": 3,
}
],
{},
{"block_size": 4, "linear_size": 4, "is_rwkv": True},
{"joint_space_size": 4},
{"report_cer": True, "report_wer": True},
),
],
)
def test_model_training(
enc_params, enc_gen_params, dec_params, joint_net_params, main_params, stats_file
):
batch_size = 2
input_size = 10
token_list = ["<blank>", "a", "b", "c", "<space>"]
vocab_size = len(token_list)
if dec_params.get("is_rwkv") is not None and not torch.cuda.is_available():
pytest.skip("A GPU is required for WKV kernel computation")
encoder = Encoder(input_size, enc_params, main_conf=enc_gen_params)
decoder = get_decoder(vocab_size, dec_params)
joint_network = JointNetwork(
vocab_size, encoder.output_size, decoder.output_size, **joint_net_params
)
specaug = get_specaug() if main_params.pop("specaug", False) else None
normalize = main_params.pop("normalize", None)
if normalize is not None:
if normalize == "utterance":
normalize = UtteranceMVN(norm_means=True, norm_vars=True, eps=1e-13)
else:
normalize = GlobalMVN(stats_file, norm_means=True, norm_vars=True)
model = ESPnetASRTransducerModel(
vocab_size,
token_list,
frontend=None,
specaug=specaug,
normalize=normalize,
encoder=encoder,
decoder=decoder,
joint_network=joint_network,
**main_params,
)
feats, labels, feat_len, label_len = prepare(
model, input_size, vocab_size, batch_size
)
_ = model(feats, feat_len, labels, label_len)
if main_params.get("report_cer") or main_params.get("report_wer"):
model.training = False
_ = model(feats, feat_len, labels, label_len)
@pytest.mark.parametrize(
"k2_params",
[
{},
{"lm_scale": 0.25, "am_scale": 0.5},
{"loss_type": "modified"},
],
)
def test_model_training_with_k2(k2_params):
pytest.importorskip("k2")
batch_size = 2
input_size = 10
token_list = ["<blank>", "a", "b", "c", "<space>"]
vocab_size = len(token_list)
encoder = Encoder(
input_size,
[
{
"block_type": "conformer",
"hidden_size": 4,
"linear_size": 4,
"conv_mod_kernel_size": 3,
}
],
)
decoder = RNNDecoder(vocab_size, embed_size=8, hidden_size=8)
joint_network = JointNetwork(
vocab_size,
encoder.output_size,
decoder.output_size,
)
model = ESPnetASRTransducerModel(
vocab_size,
token_list,
frontend=None,
normalize=None,
specaug=None,
encoder=encoder,
decoder=decoder,
joint_network=joint_network,
use_k2_pruned_loss=True,
k2_pruned_loss_args=k2_params,
report_cer=True,
report_wer=True,
)
feats, labels, feat_len, label_len = prepare(
model,
input_size,
vocab_size,
batch_size,
use_k2_modified_loss=True,
)
_ = model(feats, feat_len, labels, label_len)
model.training = False
_ = model(feats, feat_len, labels, label_len)
@pytest.mark.parametrize("extract_feats", [True, False])
def test_collect_feats(extract_feats):
token_list = ["<blank>", "a", "b", "c", "<space>"]
vocab_size = len(token_list)
encoder = Encoder(
20,
[
{
"block_type": "conformer",
"hidden_size": 4,
"linear_size": 4,
"conv_mod_kernel_size": 3,
}
],
)
decoder = StatelessDecoder(vocab_size, embed_size=4)
joint_network = JointNetwork(
vocab_size, encoder.output_size, decoder.output_size, 8
)
model = ESPnetASRTransducerModel(
vocab_size,
token_list,
frontend=None,
specaug=None,
normalize=None,
encoder=encoder,
decoder=decoder,
joint_network=joint_network,
)
model.extract_feats_in_collect_stats = extract_feats
feats_dict = model.collect_feats(
torch.randn(2, 12),
torch.tensor([12, 11]),
torch.randn(2, 8),
torch.tensor([8, 8]),
)
assert set(("feats", "feats_lengths")) == feats_dict.keys()
| 13,503 | 27.670913 | 85 | py |
espnet | espnet-master/test/espnet2/schedulers/test_warmup_lr.py | import numpy as np
import torch
from espnet2.schedulers.noam_lr import NoamLR
from espnet2.schedulers.warmup_lr import WarmupLR
def test_WarumupLR():
linear = torch.nn.Linear(2, 2)
opt = torch.optim.SGD(linear.parameters(), 0.1)
sch = WarmupLR(opt)
lr = opt.param_groups[0]["lr"]
opt.step()
sch.step()
lr2 = opt.param_groups[0]["lr"]
assert lr != lr2
def test_WarumupLR_is_compatible_with_NoamLR():
lr = 10
model_size = 32
warmup_steps = 250
linear = torch.nn.Linear(2, 2)
noam_opt = torch.optim.SGD(linear.parameters(), lr)
noam = NoamLR(noam_opt, model_size=model_size, warmup_steps=warmup_steps)
new_lr = noam.lr_for_WarmupLR(lr)
linear = torch.nn.Linear(2, 2)
warmup_opt = torch.optim.SGD(linear.parameters(), new_lr)
warmup = WarmupLR(warmup_opt, warmup_steps=warmup_steps)
for i in range(3 * warmup_steps):
warmup_opt.step()
warmup.step()
noam_opt.step()
noam.step()
lr1 = noam_opt.param_groups[0]["lr"]
lr2 = warmup_opt.param_groups[0]["lr"]
np.testing.assert_almost_equal(lr1, lr2)
| 1,134 | 24.222222 | 77 | py |
espnet | espnet-master/test/espnet2/schedulers/test_warmup_reducelronplateau.py | import numpy as np
import torch
from espnet2.schedulers.warmup_reducelronplateau import WarmupReduceLROnPlateau
def test_WarmupReduceLROnPlateau():
linear = torch.nn.Linear(2, 2)
opt = torch.optim.SGD(linear.parameters(), 0.1)
sch = WarmupReduceLROnPlateau(opt, mode="min", factor=0.1, patience=1, cooldown=0)
lr = opt.param_groups[0]["lr"]
opt.step()
sch.step()
lr2 = opt.param_groups[0]["lr"]
assert lr != lr2
sch.step_num = sch.warmup_steps + 1
opt.step()
sch.step(2.5)
lr3 = opt.param_groups[0]["lr"]
assert lr3 == lr2
opt.step()
sch.step(3.5)
lr4 = opt.param_groups[0]["lr"]
assert lr4 == lr3
opt.step()
sch.step(4.5)
lr5 = opt.param_groups[0]["lr"]
assert lr5 == lr4 * 0.1
| 770 | 22.363636 | 86 | py |
espnet | espnet-master/test/espnet2/schedulers/test_warmup_step_lr.py | import numpy as np
import torch
from espnet2.schedulers.warmup_step_lr import WarmupStepLR
def test_WarmupStepLR():
linear = torch.nn.Linear(2, 2)
opt = torch.optim.SGD(linear.parameters(), 0.1)
sch = WarmupStepLR(opt)
lr = opt.param_groups[0]["lr"]
opt.step()
sch.step()
lr2 = opt.param_groups[0]["lr"]
assert lr != lr2
sch.step_num = sch.warmup_steps + 1
opt.step()
sch.step()
lr3 = opt.param_groups[0]["lr"]
assert lr2 != lr3
| 486 | 20.173913 | 58 | py |
espnet | espnet-master/test/espnet2/schedulers/test_noam_lr.py | import torch
from espnet2.schedulers.noam_lr import NoamLR
def test_NoamLR():
linear = torch.nn.Linear(2, 2)
opt = torch.optim.SGD(linear.parameters(), 0.1)
sch = NoamLR(opt)
lr = opt.param_groups[0]["lr"]
opt.step()
sch.step()
lr2 = opt.param_groups[0]["lr"]
assert lr != lr2
| 313 | 18.625 | 51 | py |
espnet | espnet-master/test/espnet2/utils/test_sized_dict.py | import multiprocessing
import sys
import numpy as np
import pytest
import torch.multiprocessing
from espnet2.utils.sized_dict import SizedDict, get_size
def test_get_size():
d = {}
x = np.random.randn(10)
d["a"] = x
size1 = sys.getsizeof(d)
assert size1 + get_size(x) + get_size("a") == get_size(d)
def test_SizedDict_size():
d = SizedDict()
assert d.size == 0
x = np.random.randn(10)
d["a"] = x
assert d.size == get_size(x) + sys.getsizeof("a")
y = np.random.randn(10)
d["b"] = y
assert d.size == get_size(x) + get_size(y) + sys.getsizeof("a") + sys.getsizeof("b")
# Overwrite
z = np.random.randn(10)
d["b"] = z
assert d.size == get_size(x) + get_size(z) + sys.getsizeof("a") + sys.getsizeof("b")
def _set(d):
d["a"][0] = 10
@pytest.mark.execution_timeout(5)
def test_SizedDict_shared():
d = SizedDict(shared=True)
x = torch.randn(10)
d["a"] = x
mp = multiprocessing.get_context("forkserver")
p = mp.Process(target=_set, args=(d,))
p.start()
p.join()
assert d["a"][0] == 10
def test_SizedDict_getitem():
d = SizedDict(data={"a": 2, "b": 5, "c": 10})
assert d["a"] == 2
def test_SizedDict_iter():
d = SizedDict(data={"a": 2, "b": 5, "c": 10})
assert list(iter(d)) == ["a", "b", "c"]
def test_SizedDict_contains():
d = SizedDict(data={"a": 2, "b": 5, "c": 10})
assert "a" in d
def test_SizedDict_len():
d = SizedDict(data={"a": 2, "b": 5, "c": 10})
assert len(d) == 3
| 1,525 | 20.194444 | 88 | py |
espnet | espnet-master/test/espnet2/tasks/test_abs_task.py | import configargparse
import pytest
import torch
from espnet2.tasks.abs_task import AbsTask
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet2.train.abs_espnet_model import AbsESPnetModel
from espnet2.train.collate_fn import CommonCollateFn
class DummyModel(AbsESPnetModel):
def __init__(self):
super().__init__()
self.layer1 = torch.nn.Linear(1, 1)
self.layer2 = torch.nn.Linear(1, 1)
def collect_feats(self):
return {}
def forward(self, x, x_lengths, **kwargs):
x = self.layer1(x)
x = self.layer2(x)
retval = {
"loss": x.mean(),
"stats": {"loss": x.mean()},
"weight": len(x),
"optim_idx": torch.randint(0, 2, [1]),
}
return force_gatherable(retval, device=x.device)
class TestTask(AbsTask):
num_optimizers: int = 2
@classmethod
def add_task_arguments(cls, parser):
pass
@classmethod
def build_collate_fn(cls, args, train):
return CommonCollateFn()
@classmethod
def build_preprocess_fn(cls, args, train):
return None
@classmethod
def required_data_names(cls, train=True, inference=False):
if not inference:
retval = ("x",)
else:
# Recognition mode
retval = ("x",)
return retval
@classmethod
def optional_data_names(cls, train=True, inference=False):
retval = ()
return retval
@classmethod
def build_model(cls, args):
model = DummyModel()
return model
@classmethod
def build_optimizers(cls, args, model):
optim = torch.optim.Adam(model.layer1.parameters())
optim2 = torch.optim.Adam(model.layer2.parameters())
optimizers = [optim, optim2]
return optimizers
@pytest.mark.parametrize("parser", [configargparse.ArgumentParser(), None])
def test_add_arguments(parser):
AbsTask.get_parser()
def test_add_arguments_help():
parser = AbsTask.get_parser()
with pytest.raises(SystemExit):
parser.parse_args(["--help"])
def test_main_help():
with pytest.raises(SystemExit):
AbsTask.main(cmd=["--help"])
def test_main_print_config():
with pytest.raises(SystemExit):
AbsTask.main(cmd=["--print_config"])
def test_main_with_no_args():
with pytest.raises(SystemExit):
AbsTask.main(cmd=[])
def test_print_config_and_load_it(tmp_path):
config_file = tmp_path / "config.yaml"
with config_file.open("w") as f:
AbsTask.print_config(f)
parser = AbsTask.get_parser()
parser.parse_args(["--config", str(config_file)])
# FIXME(kamo): This is an integration test, so it's hard to reduce time
@pytest.mark.execution_timeout(50)
def test_main(tmp_path):
train_text = tmp_path / "train.txt"
with train_text.open("w") as f:
f.write("a 10,1\n")
TestTask.main(
cmd=[
"--output_dir",
str(tmp_path / "out"),
"--train_data_path_and_name_and_type",
f"{train_text},x,rand_float",
"--train_shape_file",
str(train_text),
"--valid_data_path_and_name_and_type",
f"{train_text},x,rand_float",
"--valid_shape_file",
str(train_text),
"--batch_size",
"1",
"--batch_type",
"unsorted",
"--max_epoch",
"1",
]
)
| 3,469 | 24.703704 | 75 | py |
espnet | espnet-master/espnet/scheduler/pytorch.py | """PyTorch optimizer schdulers."""
from typing import List
from torch.optim import Optimizer
from espnet.scheduler.scheduler import SchedulerInterface
class PyTorchScheduler:
"""PyTorch optimizer scheduler."""
def __init__(self, schedulers: List[SchedulerInterface], optimizer: Optimizer):
"""Initialize class."""
self.schedulers = schedulers
self.optimizer = optimizer
for s in self.schedulers:
for group in optimizer.param_groups:
group.setdefault("initial_" + s.key, group[s.key])
def step(self, n_iter: int):
"""Update optimizer by scheduling."""
for s in self.schedulers:
for group in self.optimizer.param_groups:
group[s.key] = group["initial_" + s.key] * s.scale(n_iter)
| 801 | 29.846154 | 83 | py |
espnet | espnet-master/espnet/vc/pytorch_backend/vc.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Nagoya University (Wen-Chin Huang)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""E2E VC training / decoding functions."""
import copy
import json
import logging
import math
import os
import time
import chainer
import kaldiio
import numpy as np
import torch
from chainer import training
from chainer.training import extensions
from espnet.asr.asr_utils import (
get_model_conf,
snapshot_object,
torch_load,
torch_resume,
torch_snapshot,
)
from espnet.asr.pytorch_backend.asr_init import load_trained_modules
from espnet.nets.pytorch_backend.nets_utils import pad_list
from espnet.nets.tts_interface import TTSInterface
from espnet.utils.dataset import ChainerDataLoader, TransformDataset
from espnet.utils.deterministic_utils import set_deterministic_pytorch
from espnet.utils.dynamic_import import dynamic_import
from espnet.utils.io_utils import LoadInputsAndTargets
from espnet.utils.training.batchfy import make_batchset
from espnet.utils.training.evaluator import BaseEvaluator
from espnet.utils.training.iterators import ShufflingEnabler
from espnet.utils.training.tensorboard_logger import TensorboardLogger
from espnet.utils.training.train_utils import check_early_stop, set_early_stop
class CustomEvaluator(BaseEvaluator):
"""Custom evaluator."""
def __init__(self, model, iterator, target, device):
"""Initilize module.
Args:
model (torch.nn.Module): Pytorch model instance.
iterator (chainer.dataset.Iterator): Iterator for validation.
target (chainer.Chain): Dummy chain instance.
device (torch.device): The device to be used in evaluation.
"""
super(CustomEvaluator, self).__init__(iterator, target)
self.model = model
self.device = device
# The core part of the update routine can be customized by overriding.
def evaluate(self):
"""Evaluate over validation iterator."""
iterator = self._iterators["main"]
if self.eval_hook:
self.eval_hook(self)
if hasattr(iterator, "reset"):
iterator.reset()
it = iterator
else:
it = copy.copy(iterator)
summary = chainer.reporter.DictSummary()
self.model.eval()
with torch.no_grad():
for batch in it:
if isinstance(batch, tuple):
x = tuple(arr.to(self.device) for arr in batch)
else:
x = batch
for key in x.keys():
x[key] = x[key].to(self.device)
observation = {}
with chainer.reporter.report_scope(observation):
# convert to torch tensor
if isinstance(x, tuple):
self.model(*x)
else:
self.model(**x)
summary.add(observation)
self.model.train()
return summary.compute_mean()
class CustomUpdater(training.StandardUpdater):
"""Custom updater."""
def __init__(self, model, grad_clip, iterator, optimizer, device, accum_grad=1):
"""Initilize module.
Args:
model (torch.nn.Module) model: Pytorch model instance.
grad_clip (float) grad_clip : The gradient clipping value.
iterator (chainer.dataset.Iterator): Iterator for training.
optimizer (torch.optim.Optimizer) : Pytorch optimizer instance.
device (torch.device): The device to be used in training.
"""
super(CustomUpdater, self).__init__(iterator, optimizer)
self.model = model
self.grad_clip = grad_clip
self.device = device
self.clip_grad_norm = torch.nn.utils.clip_grad_norm_
self.accum_grad = accum_grad
self.forward_count = 0
# The core part of the update routine can be customized by overriding.
def update_core(self):
"""Update model one step."""
# When we pass one iterator and optimizer to StandardUpdater.__init__,
# they are automatically named 'main'.
train_iter = self.get_iterator("main")
optimizer = self.get_optimizer("main")
# Get the next batch (a list of json files)
batch = train_iter.next()
if isinstance(batch, tuple):
x = tuple(arr.to(self.device) for arr in batch)
else:
x = batch
for key in x.keys():
x[key] = x[key].to(self.device)
# compute loss and gradient
if isinstance(x, tuple):
loss = self.model(*x).mean() / self.accum_grad
else:
loss = self.model(**x).mean() / self.accum_grad
loss.backward()
# update parameters
self.forward_count += 1
if self.forward_count != self.accum_grad:
return
self.forward_count = 0
# compute the gradient norm to check if it is normal or not
grad_norm = self.clip_grad_norm(self.model.parameters(), self.grad_clip)
logging.debug("grad norm={}".format(grad_norm))
if math.isnan(grad_norm):
logging.warning("grad norm is nan. Do not update model.")
else:
optimizer.step()
optimizer.zero_grad()
def update(self):
"""Run update function."""
self.update_core()
if self.forward_count == 0:
self.iteration += 1
class CustomConverter(object):
"""Custom converter."""
def __init__(self):
"""Initilize module."""
# NOTE: keep as class for future development
pass
def __call__(self, batch, device=torch.device("cpu")):
"""Convert a given batch.
Args:
batch (list): List of ndarrays.
device (torch.device): The device to be send.
Returns:
dict: Dict of converted tensors.
Examples:
>>> batch = [([np.arange(5), np.arange(3)],
[np.random.randn(8, 2), np.random.randn(4, 2)],
None, None)]
>>> conveter = CustomConverter()
>>> conveter(batch, torch.device("cpu"))
{'xs': tensor([[0, 1, 2, 3, 4],
[0, 1, 2, 0, 0]]),
'ilens': tensor([5, 3]),
'ys': tensor([[[-0.4197, -1.1157],
[-1.5837, -0.4299],
[-2.0491, 0.9215],
[-2.4326, 0.8891],
[ 1.2323, 1.7388],
[-0.3228, 0.6656],
[-0.6025, 1.3693],
[-1.0778, 1.3447]],
[[ 0.1768, -0.3119],
[ 0.4386, 2.5354],
[-1.2181, -0.5918],
[-0.6858, -0.8843],
[ 0.0000, 0.0000],
[ 0.0000, 0.0000],
[ 0.0000, 0.0000],
[ 0.0000, 0.0000]]]),
'labels': tensor([[0., 0., 0., 0., 0., 0., 0., 1.],
[0., 0., 0., 1., 1., 1., 1., 1.]]),
'olens': tensor([8, 4])}
"""
# batch should be located in list
assert len(batch) == 1
xs, ys, spembs, extras = batch[0]
# get list of lengths (must be tensor for DataParallel)
ilens = torch.from_numpy(np.array([x.shape[0] for x in xs])).long().to(device)
olens = torch.from_numpy(np.array([y.shape[0] for y in ys])).long().to(device)
# perform padding and conversion to tensor
xs = pad_list([torch.from_numpy(x).float() for x in xs], 0).to(device)
ys = pad_list([torch.from_numpy(y).float() for y in ys], 0).to(device)
# make labels for stop prediction
labels = ys.new_zeros(ys.size(0), ys.size(1))
for i, l in enumerate(olens):
labels[i, l - 1 :] = 1.0
# prepare dict
new_batch = {
"xs": xs,
"ilens": ilens,
"ys": ys,
"labels": labels,
"olens": olens,
}
# load speaker embedding
if spembs is not None:
spembs = torch.from_numpy(np.array(spembs)).float()
new_batch["spembs"] = spembs.to(device)
# load second target
if extras is not None:
extras = pad_list([torch.from_numpy(extra).float() for extra in extras], 0)
new_batch["extras"] = extras.to(device)
return new_batch
def train(args):
"""Train E2E VC model."""
set_deterministic_pytorch(args)
# check cuda availability
if not torch.cuda.is_available():
logging.warning("cuda is not available")
# get input and output dimension info
with open(args.valid_json, "rb") as f:
valid_json = json.load(f)["utts"]
utts = list(valid_json.keys())
# In TTS, this is reversed, but not in VC. See `espnet.utils.training.batchfy`
idim = int(valid_json[utts[0]]["input"][0]["shape"][1])
odim = int(valid_json[utts[0]]["output"][0]["shape"][1])
logging.info("#input dims : " + str(idim))
logging.info("#output dims: " + str(odim))
# get extra input and output dimenstion
if args.use_speaker_embedding:
args.spk_embed_dim = int(valid_json[utts[0]]["input"][1]["shape"][0])
else:
args.spk_embed_dim = None
if args.use_second_target:
args.spc_dim = int(valid_json[utts[0]]["input"][1]["shape"][1])
else:
args.spc_dim = None
# write model config
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
model_conf = args.outdir + "/model.json"
with open(model_conf, "wb") as f:
logging.info("writing a model config file to" + model_conf)
f.write(
json.dumps(
(idim, odim, vars(args)), indent=4, ensure_ascii=False, sort_keys=True
).encode("utf_8")
)
for key in sorted(vars(args).keys()):
logging.info("ARGS: " + key + ": " + str(vars(args)[key]))
# specify model architecture
if args.enc_init is not None or args.dec_init is not None:
model = load_trained_modules(idim, odim, args, TTSInterface)
else:
model_class = dynamic_import(args.model_module)
model = model_class(idim, odim, args)
assert isinstance(model, TTSInterface)
logging.info(model)
reporter = model.reporter
# freeze modules, if specified
if args.freeze_mods:
for mod, param in model.named_parameters():
if any(mod.startswith(key) for key in args.freeze_mods):
logging.info("freezing %s" % mod)
param.requires_grad = False
for mod, param in model.named_parameters():
if not param.requires_grad:
logging.info("Frozen module %s" % mod)
# check the use of multi-gpu
if args.ngpu > 1:
model = torch.nn.DataParallel(model, device_ids=list(range(args.ngpu)))
if args.batch_size != 0:
logging.warning(
"batch size is automatically increased (%d -> %d)"
% (args.batch_size, args.batch_size * args.ngpu)
)
args.batch_size *= args.ngpu
# set torch device
device = torch.device("cuda" if args.ngpu > 0 else "cpu")
model = model.to(device)
logging.warning(
"num. model params: {:,} (num. trained: {:,} ({:.1f}%))".format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
sum(p.numel() for p in model.parameters() if p.requires_grad)
* 100.0
/ sum(p.numel() for p in model.parameters()),
)
)
# Setup an optimizer
if args.opt == "adam":
optimizer = torch.optim.Adam(
model.parameters(), args.lr, eps=args.eps, weight_decay=args.weight_decay
)
elif args.opt == "noam":
from espnet.nets.pytorch_backend.transformer.optimizer import get_std_opt
optimizer = get_std_opt(
model.parameters(),
args.adim,
args.transformer_warmup_steps,
args.transformer_lr,
)
elif args.opt == "lamb":
from pytorch_lamb import Lamb
optimizer = Lamb(
model.parameters(), lr=args.lr, weight_decay=0.01, betas=(0.9, 0.999)
)
else:
raise NotImplementedError("unknown optimizer: " + args.opt)
# FIXME: TOO DIRTY HACK
setattr(optimizer, "target", reporter)
setattr(optimizer, "serialize", lambda s: reporter.serialize(s))
# read json data
with open(args.train_json, "rb") as f:
train_json = json.load(f)["utts"]
with open(args.valid_json, "rb") as f:
valid_json = json.load(f)["utts"]
use_sortagrad = args.sortagrad == -1 or args.sortagrad > 0
if use_sortagrad:
args.batch_sort_key = "input"
# make minibatch list (variable length)
train_batchset = make_batchset(
train_json,
args.batch_size,
args.maxlen_in,
args.maxlen_out,
args.minibatches,
batch_sort_key=args.batch_sort_key,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
shortest_first=use_sortagrad,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
swap_io=False,
iaxis=0,
oaxis=0,
)
valid_batchset = make_batchset(
valid_json,
args.batch_size,
args.maxlen_in,
args.maxlen_out,
args.minibatches,
batch_sort_key=args.batch_sort_key,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
swap_io=False,
iaxis=0,
oaxis=0,
)
load_tr = LoadInputsAndTargets(
mode="vc",
use_speaker_embedding=args.use_speaker_embedding,
use_second_target=args.use_second_target,
preprocess_conf=args.preprocess_conf,
preprocess_args={"train": True}, # Switch the mode of preprocessing
keep_all_data_on_mem=args.keep_all_data_on_mem,
)
load_cv = LoadInputsAndTargets(
mode="vc",
use_speaker_embedding=args.use_speaker_embedding,
use_second_target=args.use_second_target,
preprocess_conf=args.preprocess_conf,
preprocess_args={"train": False}, # Switch the mode of preprocessing
keep_all_data_on_mem=args.keep_all_data_on_mem,
)
converter = CustomConverter()
# hack to make batchsize argument as 1
# actual bathsize is included in a list
train_iter = {
"main": ChainerDataLoader(
dataset=TransformDataset(
train_batchset, lambda data: converter([load_tr(data)])
),
batch_size=1,
num_workers=args.num_iter_processes,
shuffle=not use_sortagrad,
collate_fn=lambda x: x[0],
)
}
valid_iter = {
"main": ChainerDataLoader(
dataset=TransformDataset(
valid_batchset, lambda data: converter([load_cv(data)])
),
batch_size=1,
shuffle=False,
collate_fn=lambda x: x[0],
num_workers=args.num_iter_processes,
)
}
# Set up a trainer
updater = CustomUpdater(
model, args.grad_clip, train_iter, optimizer, device, args.accum_grad
)
trainer = training.Trainer(updater, (args.epochs, "epoch"), out=args.outdir)
# Resume from a snapshot
if args.resume:
logging.info("resumed from %s" % args.resume)
torch_resume(args.resume, trainer)
# set intervals
eval_interval = (args.eval_interval_epochs, "epoch")
save_interval = (args.save_interval_epochs, "epoch")
report_interval = (args.report_interval_iters, "iteration")
# Evaluate the model with the test dataset for each epoch
trainer.extend(
CustomEvaluator(model, valid_iter, reporter, device), trigger=eval_interval
)
# Save snapshot for each epoch
trainer.extend(torch_snapshot(), trigger=save_interval)
# Save best models
trainer.extend(
snapshot_object(model, "model.loss.best"),
trigger=training.triggers.MinValueTrigger(
"validation/main/loss", trigger=eval_interval
),
)
# Save attention figure for each epoch
if args.num_save_attention > 0:
data = sorted(
list(valid_json.items())[: args.num_save_attention],
key=lambda x: int(x[1]["input"][0]["shape"][1]),
reverse=True,
)
if hasattr(model, "module"):
att_vis_fn = model.module.calculate_all_attentions
plot_class = model.module.attention_plot_class
else:
att_vis_fn = model.calculate_all_attentions
plot_class = model.attention_plot_class
att_reporter = plot_class(
att_vis_fn,
data,
args.outdir + "/att_ws",
converter=converter,
transform=load_cv,
device=device,
reverse=True,
)
trainer.extend(att_reporter, trigger=eval_interval)
else:
att_reporter = None
# Make a plot for training and validation values
if hasattr(model, "module"):
base_plot_keys = model.module.base_plot_keys
else:
base_plot_keys = model.base_plot_keys
plot_keys = []
for key in base_plot_keys:
plot_key = ["main/" + key, "validation/main/" + key]
trainer.extend(
extensions.PlotReport(plot_key, "epoch", file_name=key + ".png"),
trigger=eval_interval,
)
plot_keys += plot_key
trainer.extend(
extensions.PlotReport(plot_keys, "epoch", file_name="all_loss.png"),
trigger=eval_interval,
)
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport(trigger=report_interval))
report_keys = ["epoch", "iteration", "elapsed_time"] + plot_keys
trainer.extend(extensions.PrintReport(report_keys), trigger=report_interval)
trainer.extend(extensions.ProgressBar(), trigger=report_interval)
set_early_stop(trainer, args)
if args.tensorboard_dir is not None and args.tensorboard_dir != "":
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter(args.tensorboard_dir)
trainer.extend(TensorboardLogger(writer, att_reporter), trigger=report_interval)
if use_sortagrad:
trainer.extend(
ShufflingEnabler([train_iter]),
trigger=(args.sortagrad if args.sortagrad != -1 else args.epochs, "epoch"),
)
# Run the training
trainer.run()
check_early_stop(trainer, args.epochs)
@torch.no_grad()
def decode(args):
"""Decode with E2E VC model."""
set_deterministic_pytorch(args)
# read training config
idim, odim, train_args = get_model_conf(args.model, args.model_conf)
# show arguments
for key in sorted(vars(args).keys()):
logging.info("args: " + key + ": " + str(vars(args)[key]))
# define model
model_class = dynamic_import(train_args.model_module)
model = model_class(idim, odim, train_args)
assert isinstance(model, TTSInterface)
logging.info(model)
# load trained model parameters
logging.info("reading model parameters from " + args.model)
torch_load(args.model, model)
model.eval()
# set torch device
device = torch.device("cuda" if args.ngpu > 0 else "cpu")
model = model.to(device)
# read json data
with open(args.json, "rb") as f:
js = json.load(f)["utts"]
# check directory
outdir = os.path.dirname(args.out)
if len(outdir) != 0 and not os.path.exists(outdir):
os.makedirs(outdir)
load_inputs_and_targets = LoadInputsAndTargets(
mode="vc",
load_output=False,
sort_in_input_length=False,
use_speaker_embedding=train_args.use_speaker_embedding,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None
else args.preprocess_conf,
preprocess_args={"train": False}, # Switch the mode of preprocessing
)
# define function for plot prob and att_ws
def _plot_and_save(array, figname, figsize=(6, 4), dpi=150):
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
shape = array.shape
if len(shape) == 1:
# for eos probability
plt.figure(figsize=figsize, dpi=dpi)
plt.plot(array)
plt.xlabel("Frame")
plt.ylabel("Probability")
plt.ylim([0, 1])
elif len(shape) == 2:
# for tacotron 2 attention weights, whose shape is (out_length, in_length)
plt.figure(figsize=figsize, dpi=dpi)
plt.imshow(array, aspect="auto")
plt.xlabel("Input")
plt.ylabel("Output")
elif len(shape) == 4:
# for transformer attention weights,
# whose shape is (#leyers, #heads, out_length, in_length)
plt.figure(figsize=(figsize[0] * shape[0], figsize[1] * shape[1]), dpi=dpi)
for idx1, xs in enumerate(array):
for idx2, x in enumerate(xs, 1):
plt.subplot(shape[0], shape[1], idx1 * shape[1] + idx2)
plt.imshow(x, aspect="auto")
plt.xlabel("Input")
plt.ylabel("Output")
else:
raise NotImplementedError("Support only from 1D to 4D array.")
plt.tight_layout()
if not os.path.exists(os.path.dirname(figname)):
# NOTE: exist_ok = True is needed for parallel process decoding
os.makedirs(os.path.dirname(figname), exist_ok=True)
plt.savefig(figname)
plt.close()
# define function to calculate focus rate
# (see section 3.3 in https://arxiv.org/abs/1905.09263)
def _calculate_focus_rete(att_ws):
if att_ws is None:
# fastspeech case -> None
return 1.0
elif len(att_ws.shape) == 2:
# tacotron 2 case -> (L, T)
return float(att_ws.max(dim=-1)[0].mean())
elif len(att_ws.shape) == 4:
# transformer case -> (#layers, #heads, L, T)
return float(att_ws.max(dim=-1)[0].mean(dim=-1).max())
else:
raise ValueError("att_ws should be 2 or 4 dimensional tensor.")
# define function to convert attention to duration
def _convert_att_to_duration(att_ws):
if len(att_ws.shape) == 2:
# tacotron 2 case -> (L, T)
pass
elif len(att_ws.shape) == 4:
# transformer case -> (#layers, #heads, L, T)
# get the most diagonal head according to focus rate
att_ws = torch.cat(
[att_w for att_w in att_ws], dim=0
) # (#heads * #layers, L, T)
diagonal_scores = att_ws.max(dim=-1)[0].mean(dim=-1) # (#heads * #layers,)
diagonal_head_idx = diagonal_scores.argmax()
att_ws = att_ws[diagonal_head_idx] # (L, T)
else:
raise ValueError("att_ws should be 2 or 4 dimensional tensor.")
# calculate duration from 2d attention weight
durations = torch.stack(
[att_ws.argmax(-1).eq(i).sum() for i in range(att_ws.shape[1])]
)
return durations.view(-1, 1).float()
# define writer instances
feat_writer = kaldiio.WriteHelper("ark,scp:{o}.ark,{o}.scp".format(o=args.out))
if args.save_durations:
dur_writer = kaldiio.WriteHelper(
"ark,scp:{o}.ark,{o}.scp".format(o=args.out.replace("feats", "durations"))
)
if args.save_focus_rates:
fr_writer = kaldiio.WriteHelper(
"ark,scp:{o}.ark,{o}.scp".format(o=args.out.replace("feats", "focus_rates"))
)
# start decoding
for idx, utt_id in enumerate(js.keys()):
# setup inputs
batch = [(utt_id, js[utt_id])]
data = load_inputs_and_targets(batch)
x = torch.FloatTensor(data[0][0]).to(device)
spemb = None
if train_args.use_speaker_embedding:
spemb = torch.FloatTensor(data[1][0]).to(device)
# decode and write
start_time = time.time()
outs, probs, att_ws = model.inference(x, args, spemb=spemb)
logging.info(
"inference speed = %.1f frames / sec."
% (int(outs.size(0)) / (time.time() - start_time))
)
if outs.size(0) == x.size(0) * args.maxlenratio:
logging.warning("output length reaches maximum length (%s)." % utt_id)
focus_rate = _calculate_focus_rete(att_ws)
logging.info(
"(%d/%d) %s (size: %d->%d, focus rate: %.3f)"
% (idx + 1, len(js.keys()), utt_id, x.size(0), outs.size(0), focus_rate)
)
feat_writer[utt_id] = outs.cpu().numpy()
if args.save_durations:
ds = _convert_att_to_duration(att_ws)
dur_writer[utt_id] = ds.cpu().numpy()
if args.save_focus_rates:
fr_writer[utt_id] = np.array(focus_rate).reshape(1, 1)
# plot and save prob and att_ws
if probs is not None:
_plot_and_save(
probs.cpu().numpy(),
os.path.dirname(args.out) + "/probs/%s_prob.png" % utt_id,
)
if att_ws is not None:
_plot_and_save(
att_ws.cpu().numpy(),
os.path.dirname(args.out) + "/att_ws/%s_att_ws.png" % utt_id,
)
# close file object
feat_writer.close()
if args.save_durations:
dur_writer.close()
if args.save_focus_rates:
fr_writer.close()
| 26,226 | 34.346361 | 88 | py |
espnet | espnet-master/espnet/nets/st_interface.py | """ST Interface module."""
from espnet.nets.asr_interface import ASRInterface
from espnet.utils.dynamic_import import dynamic_import
class STInterface(ASRInterface):
"""ST Interface for ESPnet model implementation.
NOTE: This class is inherited from ASRInterface to enable joint translation
and recognition when performing multi-task learning with the ASR task.
"""
def translate(self, x, trans_args, char_list=None, rnnlm=None, ensemble_models=[]):
"""Recognize x for evaluation.
:param ndarray x: input acouctic feature (B, T, D) or (T, D)
:param namespace trans_args: argment namespace contraining options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
raise NotImplementedError("translate method is not implemented")
def translate_batch(self, x, trans_args, char_list=None, rnnlm=None):
"""Beam search implementation for batch.
:param torch.Tensor x: encoder hidden state sequences (B, Tmax, Henc)
:param namespace trans_args: argument namespace containing options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
raise NotImplementedError("Batch decoding is not supported yet.")
predefined_st = {
"pytorch": {
"rnn": "espnet.nets.pytorch_backend.e2e_st:E2E",
"transformer": "espnet.nets.pytorch_backend.e2e_st_transformer:E2E",
},
# "chainer": {
# "rnn": "espnet.nets.chainer_backend.e2e_st:E2E",
# "transformer": "espnet.nets.chainer_backend.e2e_st_transformer:E2E",
# }
}
def dynamic_import_st(module, backend):
"""Import ST models dynamically.
Args:
module (str): module_name:class_name or alias in `predefined_st`
backend (str): NN backend. e.g., pytorch, chainer
Returns:
type: ST class
"""
model_class = dynamic_import(module, predefined_st.get(backend, dict()))
assert issubclass(
model_class, STInterface
), f"{module} does not implement STInterface"
return model_class
| 2,271 | 32.411765 | 87 | py |
espnet | espnet-master/espnet/nets/transducer_decoder_interface.py | """Transducer decoder interface module."""
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
@dataclass
class Hypothesis:
"""Default hypothesis definition for Transducer search algorithms."""
score: float
yseq: List[int]
dec_state: Union[
Tuple[torch.Tensor, Optional[torch.Tensor]],
List[Optional[torch.Tensor]],
torch.Tensor,
]
lm_state: Union[Dict[str, Any], List[Any]] = None
@dataclass
class ExtendedHypothesis(Hypothesis):
"""Extended hypothesis definition for NSC beam search and mAES."""
dec_out: List[torch.Tensor] = None
lm_scores: torch.Tensor = None
class TransducerDecoderInterface:
"""Decoder interface for Transducer models."""
def init_state(
self,
batch_size: int,
) -> Union[
Tuple[torch.Tensor, Optional[torch.Tensor]], List[Optional[torch.Tensor]]
]:
"""Initialize decoder states.
Args:
batch_size: Batch size.
Returns:
state: Initial decoder hidden states.
"""
raise NotImplementedError("init_state(...) is not implemented")
def score(
self,
hyp: Hypothesis,
cache: Dict[str, Any],
) -> Tuple[
torch.Tensor,
Union[
Tuple[torch.Tensor, Optional[torch.Tensor]], List[Optional[torch.Tensor]]
],
torch.Tensor,
]:
"""One-step forward hypothesis.
Args:
hyp: Hypothesis.
cache: Pairs of (dec_out, dec_state) for each token sequence. (key)
Returns:
dec_out: Decoder output sequence.
new_state: Decoder hidden states.
lm_tokens: Label ID for LM.
"""
raise NotImplementedError("score(...) is not implemented")
def batch_score(
self,
hyps: Union[List[Hypothesis], List[ExtendedHypothesis]],
dec_states: Union[
Tuple[torch.Tensor, Optional[torch.Tensor]], List[Optional[torch.Tensor]]
],
cache: Dict[str, Any],
use_lm: bool,
) -> Tuple[
torch.Tensor,
Union[
Tuple[torch.Tensor, Optional[torch.Tensor]], List[Optional[torch.Tensor]]
],
torch.Tensor,
]:
"""One-step forward hypotheses.
Args:
hyps: Hypotheses.
dec_states: Decoder hidden states.
cache: Pairs of (dec_out, dec_states) for each label sequence. (key)
use_lm: Whether to compute label ID sequences for LM.
Returns:
dec_out: Decoder output sequences.
dec_states: Decoder hidden states.
lm_labels: Label ID sequences for LM.
"""
raise NotImplementedError("batch_score(...) is not implemented")
def select_state(
self,
batch_states: Union[
Tuple[torch.Tensor, Optional[torch.Tensor]], List[torch.Tensor]
],
idx: int,
) -> Union[
Tuple[torch.Tensor, Optional[torch.Tensor]], List[Optional[torch.Tensor]]
]:
"""Get specified ID state from decoder hidden states.
Args:
batch_states: Decoder hidden states.
idx: State ID to extract.
Returns:
state_idx: Decoder hidden state for given ID.
"""
raise NotImplementedError("select_state(...) is not implemented")
def create_batch_states(
self,
states: Union[
Tuple[torch.Tensor, Optional[torch.Tensor]], List[Optional[torch.Tensor]]
],
new_states: List[
Union[
Tuple[torch.Tensor, Optional[torch.Tensor]],
List[Optional[torch.Tensor]],
]
],
l_tokens: List[List[int]],
) -> Union[
Tuple[torch.Tensor, Optional[torch.Tensor]], List[Optional[torch.Tensor]]
]:
"""Create decoder hidden states.
Args:
batch_states: Batch of decoder states
l_states: List of decoder states
l_tokens: List of token sequences for input batch
Returns:
batch_states: Batch of decoder states
"""
raise NotImplementedError("create_batch_states(...) is not implemented")
| 4,282 | 26.632258 | 85 | py |
espnet | espnet-master/espnet/nets/batch_beam_search_online_sim.py | """Parallel beam search module for online simulation."""
import logging
from pathlib import Path
from typing import List
import torch
import yaml
from espnet.nets.batch_beam_search import BatchBeamSearch
from espnet.nets.beam_search import Hypothesis
from espnet.nets.e2e_asr_common import end_detect
class BatchBeamSearchOnlineSim(BatchBeamSearch):
"""Online beam search implementation.
This simulates streaming decoding.
It requires encoded features of entire utterance and
extracts block by block from it as it shoud be done
in streaming processing.
This is based on Tsunoo et al, "STREAMING TRANSFORMER ASR
WITH BLOCKWISE SYNCHRONOUS BEAM SEARCH"
(https://arxiv.org/abs/2006.14941).
"""
def set_streaming_config(self, asr_config: str):
"""Set config file for streaming decoding.
Args:
asr_config (str): The config file for asr training
"""
train_config_file = Path(asr_config)
self.block_size = None
self.hop_size = None
self.look_ahead = None
config = None
with train_config_file.open("r", encoding="utf-8") as f:
args = yaml.safe_load(f)
if "encoder_conf" in args.keys():
if "block_size" in args["encoder_conf"].keys():
self.block_size = args["encoder_conf"]["block_size"]
if "hop_size" in args["encoder_conf"].keys():
self.hop_size = args["encoder_conf"]["hop_size"]
if "look_ahead" in args["encoder_conf"].keys():
self.look_ahead = args["encoder_conf"]["look_ahead"]
elif "config" in args.keys():
config = args["config"]
if config is None:
logging.info(
"Cannot find config file for streaming decoding: "
+ "apply batch beam search instead."
)
return
if (
self.block_size is None or self.hop_size is None or self.look_ahead is None
) and config is not None:
config_file = Path(config)
with config_file.open("r", encoding="utf-8") as f:
args = yaml.safe_load(f)
if "encoder_conf" in args.keys():
enc_args = args["encoder_conf"]
if enc_args and "block_size" in enc_args:
self.block_size = enc_args["block_size"]
if enc_args and "hop_size" in enc_args:
self.hop_size = enc_args["hop_size"]
if enc_args and "look_ahead" in enc_args:
self.look_ahead = enc_args["look_ahead"]
def set_block_size(self, block_size: int):
"""Set block size for streaming decoding.
Args:
block_size (int): The block size of encoder
"""
self.block_size = block_size
def set_hop_size(self, hop_size: int):
"""Set hop size for streaming decoding.
Args:
hop_size (int): The hop size of encoder
"""
self.hop_size = hop_size
def set_look_ahead(self, look_ahead: int):
"""Set look ahead size for streaming decoding.
Args:
look_ahead (int): The look ahead size of encoder
"""
self.look_ahead = look_ahead
def forward(
self, x: torch.Tensor, maxlenratio: float = 0.0, minlenratio: float = 0.0
) -> List[Hypothesis]:
"""Perform beam search.
Args:
x (torch.Tensor): Encoded speech feature (T, D)
maxlenratio (float): Input length ratio to obtain max output length.
If maxlenratio=0.0 (default), it uses a end-detect function
to automatically find maximum hypothesis lengths
minlenratio (float): Input length ratio to obtain min output length.
Returns:
list[Hypothesis]: N-best decoding results
"""
self.conservative = True # always true
if self.block_size and self.hop_size and self.look_ahead:
cur_end_frame = int(self.block_size - self.look_ahead)
else:
cur_end_frame = x.shape[0]
process_idx = 0
if cur_end_frame < x.shape[0]:
h = x.narrow(0, 0, cur_end_frame)
else:
h = x
# set length bounds
if maxlenratio == 0:
maxlen = x.shape[0]
else:
maxlen = max(1, int(maxlenratio * x.size(0)))
minlen = int(minlenratio * x.size(0))
logging.info("decoder input length: " + str(x.shape[0]))
logging.info("max output length: " + str(maxlen))
logging.info("min output length: " + str(minlen))
# main loop of prefix search
running_hyps = self.init_hyp(h)
prev_hyps = []
ended_hyps = []
prev_repeat = False
continue_decode = True
while continue_decode:
move_to_next_block = False
if cur_end_frame < x.shape[0]:
h = x.narrow(0, 0, cur_end_frame)
else:
h = x
# extend states for ctc
self.extend(h, running_hyps)
while process_idx < maxlen:
logging.debug("position " + str(process_idx))
best = self.search(running_hyps, h)
if process_idx == maxlen - 1:
# end decoding
running_hyps = self.post_process(
process_idx, maxlen, maxlenratio, best, ended_hyps
)
n_batch = best.yseq.shape[0]
local_ended_hyps = []
is_local_eos = (
best.yseq[torch.arange(n_batch), best.length - 1] == self.eos
)
for i in range(is_local_eos.shape[0]):
if is_local_eos[i]:
hyp = self._select(best, i)
local_ended_hyps.append(hyp)
# NOTE(tsunoo): check repetitions here
# This is a implicit implementation of
# Eq (11) in https://arxiv.org/abs/2006.14941
# A flag prev_repeat is used instead of using set
elif (
not prev_repeat
and best.yseq[i, -1] in best.yseq[i, :-1]
and cur_end_frame < x.shape[0]
):
move_to_next_block = True
prev_repeat = True
if maxlenratio == 0.0 and end_detect(
[lh.asdict() for lh in local_ended_hyps], process_idx
):
logging.info(f"end detected at {process_idx}")
continue_decode = False
break
if len(local_ended_hyps) > 0 and cur_end_frame < x.shape[0]:
move_to_next_block = True
if move_to_next_block:
if (
self.hop_size
and cur_end_frame + int(self.hop_size) + int(self.look_ahead)
< x.shape[0]
):
cur_end_frame += int(self.hop_size)
else:
cur_end_frame = x.shape[0]
logging.debug("Going to next block: %d", cur_end_frame)
if process_idx > 1 and len(prev_hyps) > 0 and self.conservative:
running_hyps = prev_hyps
process_idx -= 1
prev_hyps = []
break
prev_repeat = False
prev_hyps = running_hyps
running_hyps = self.post_process(
process_idx, maxlen, maxlenratio, best, ended_hyps
)
if cur_end_frame >= x.shape[0]:
for hyp in local_ended_hyps:
ended_hyps.append(hyp)
if len(running_hyps) == 0:
logging.info("no hypothesis. Finish decoding.")
continue_decode = False
break
else:
logging.debug(f"remained hypotheses: {len(running_hyps)}")
# increment number
process_idx += 1
nbest_hyps = sorted(ended_hyps, key=lambda x: x.score, reverse=True)
# check the number of hypotheses reaching to eos
if len(nbest_hyps) == 0:
logging.warning(
"there is no N-best results, perform recognition "
"again with smaller minlenratio."
)
return (
[]
if minlenratio < 0.1
else self.forward(x, maxlenratio, max(0.0, minlenratio - 0.1))
)
# report the best result
best = nbest_hyps[0]
for k, v in best.scores.items():
logging.info(
f"{v:6.2f} * {self.weights[k]:3} = {v * self.weights[k]:6.2f} for {k}"
)
logging.info(f"total log probability: {best.score:.2f}")
logging.info(f"normalized log probability: {best.score / len(best.yseq):.2f}")
logging.info(f"total number of ended hypotheses: {len(nbest_hyps)}")
if self.token_list is not None:
logging.info(
"best hypo: "
+ "".join([self.token_list[x] for x in best.yseq[1:-1]])
+ "\n"
)
if best.yseq[1:-1].shape[0] == x.shape[0]:
logging.warning(
"best hypo length: {} == max output length: {}".format(
best.yseq[1:-1].shape[0], maxlen
)
)
logging.warning(
"decoding may be stopped by the max output length limitation, "
+ "please consider to increase the maxlenratio."
)
return nbest_hyps
def extend(self, x: torch.Tensor, hyps: Hypothesis) -> List[Hypothesis]:
"""Extend probabilities and states with more encoded chunks.
Args:
x (torch.Tensor): The extended encoder output feature
hyps (Hypothesis): Current list of hypothesis
Returns:
Hypothesis: The extended hypothesis
"""
for k, d in self.scorers.items():
if hasattr(d, "extend_prob"):
d.extend_prob(x)
if hasattr(d, "extend_state"):
hyps.states[k] = d.extend_state(hyps.states[k])
| 10,567 | 36.742857 | 87 | py |
espnet | espnet-master/espnet/nets/e2e_asr_common.py | #!/usr/bin/env python3
# encoding: utf-8
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Common functions for ASR."""
import json
import logging
import sys
from itertools import groupby
import numpy as np
def end_detect(ended_hyps, i, M=3, D_end=np.log(1 * np.exp(-10))):
"""End detection.
described in Eq. (50) of S. Watanabe et al
"Hybrid CTC/Attention Architecture for End-to-End Speech Recognition"
:param ended_hyps:
:param i:
:param M:
:param D_end:
:return:
"""
if len(ended_hyps) == 0:
return False
count = 0
best_hyp = sorted(ended_hyps, key=lambda x: x["score"], reverse=True)[0]
for m in range(M):
# get ended_hyps with their length is i - m
hyp_length = i - m
hyps_same_length = [x for x in ended_hyps if len(x["yseq"]) == hyp_length]
if len(hyps_same_length) > 0:
best_hyp_same_length = sorted(
hyps_same_length, key=lambda x: x["score"], reverse=True
)[0]
if best_hyp_same_length["score"] - best_hyp["score"] < D_end:
count += 1
if count == M:
return True
else:
return False
# TODO(takaaki-hori): add different smoothing methods
def label_smoothing_dist(odim, lsm_type, transcript=None, blank=0):
"""Obtain label distribution for loss smoothing.
:param odim:
:param lsm_type:
:param blank:
:param transcript:
:return:
"""
if transcript is not None:
with open(transcript, "rb") as f:
trans_json = json.load(f)["utts"]
if lsm_type == "unigram":
assert transcript is not None, (
"transcript is required for %s label smoothing" % lsm_type
)
labelcount = np.zeros(odim)
for k, v in trans_json.items():
ids = np.array([int(n) for n in v["output"][0]["tokenid"].split()])
# to avoid an error when there is no text in an uttrance
if len(ids) > 0:
labelcount[ids] += 1
labelcount[odim - 1] = len(transcript) # count <eos>
labelcount[labelcount == 0] = 1 # flooring
labelcount[blank] = 0 # remove counts for blank
labeldist = labelcount.astype(np.float32) / np.sum(labelcount)
else:
logging.error("Error: unexpected label smoothing type: %s" % lsm_type)
sys.exit()
return labeldist
def get_vgg2l_odim(idim, in_channel=3, out_channel=128):
"""Return the output size of the VGG frontend.
:param in_channel: input channel size
:param out_channel: output channel size
:return: output size
:rtype int
"""
idim = idim / in_channel
idim = np.ceil(np.array(idim, dtype=np.float32) / 2) # 1st max pooling
idim = np.ceil(np.array(idim, dtype=np.float32) / 2) # 2nd max pooling
return int(idim) * out_channel # numer of channels
class ErrorCalculator(object):
"""Calculate CER and WER for E2E_ASR and CTC models during training.
:param y_hats: numpy array with predicted text
:param y_pads: numpy array with true (target) text
:param char_list:
:param sym_space:
:param sym_blank:
:return:
"""
def __init__(
self, char_list, sym_space, sym_blank, report_cer=False, report_wer=False
):
"""Construct an ErrorCalculator object."""
super(ErrorCalculator, self).__init__()
self.report_cer = report_cer
self.report_wer = report_wer
self.char_list = char_list
self.space = sym_space
self.blank = sym_blank
# NOTE (Shih-Lun): else case is for OpenAI Whisper ASR model,
# which doesn't use <blank> token
if self.blank in self.char_list:
self.idx_blank = self.char_list.index(self.blank)
else:
self.idx_blank = None
if self.space in self.char_list:
self.idx_space = self.char_list.index(self.space)
else:
self.idx_space = None
def __call__(self, ys_hat, ys_pad, is_ctc=False):
"""Calculate sentence-level WER/CER score.
:param torch.Tensor ys_hat: prediction (batch, seqlen)
:param torch.Tensor ys_pad: reference (batch, seqlen)
:param bool is_ctc: calculate CER score for CTC
:return: sentence-level WER score
:rtype float
:return: sentence-level CER score
:rtype float
"""
cer, wer = None, None
if is_ctc:
return self.calculate_cer_ctc(ys_hat, ys_pad)
elif not self.report_cer and not self.report_wer:
return cer, wer
seqs_hat, seqs_true = self.convert_to_char(ys_hat, ys_pad)
if self.report_cer:
cer = self.calculate_cer(seqs_hat, seqs_true)
if self.report_wer:
wer = self.calculate_wer(seqs_hat, seqs_true)
return cer, wer
def calculate_cer_ctc(self, ys_hat, ys_pad):
"""Calculate sentence-level CER score for CTC.
:param torch.Tensor ys_hat: prediction (batch, seqlen)
:param torch.Tensor ys_pad: reference (batch, seqlen)
:return: average sentence-level CER score
:rtype float
"""
import editdistance
cers, char_ref_lens = [], []
for i, y in enumerate(ys_hat):
y_hat = [x[0] for x in groupby(y)]
y_true = ys_pad[i]
seq_hat, seq_true = [], []
for idx in y_hat:
idx = int(idx)
if idx != -1 and idx != self.idx_blank and idx != self.idx_space:
seq_hat.append(self.char_list[int(idx)])
for idx in y_true:
idx = int(idx)
if idx != -1 and idx != self.idx_blank and idx != self.idx_space:
seq_true.append(self.char_list[int(idx)])
hyp_chars = "".join(seq_hat)
ref_chars = "".join(seq_true)
if len(ref_chars) > 0:
cers.append(editdistance.eval(hyp_chars, ref_chars))
char_ref_lens.append(len(ref_chars))
cer_ctc = float(sum(cers)) / sum(char_ref_lens) if cers else None
return cer_ctc
def convert_to_char(self, ys_hat, ys_pad):
"""Convert index to character.
:param torch.Tensor seqs_hat: prediction (batch, seqlen)
:param torch.Tensor seqs_true: reference (batch, seqlen)
:return: token list of prediction
:rtype list
:return: token list of reference
:rtype list
"""
seqs_hat, seqs_true = [], []
for i, y_hat in enumerate(ys_hat):
y_true = ys_pad[i]
eos_true = np.where(y_true == -1)[0]
ymax = eos_true[0] if len(eos_true) > 0 else len(y_true)
# NOTE: padding index (-1) in y_true is used to pad y_hat
seq_hat = [self.char_list[int(idx)] for idx in y_hat[:ymax]]
seq_true = [self.char_list[int(idx)] for idx in y_true if int(idx) != -1]
seq_hat_text = "".join(seq_hat).replace(self.space, " ")
seq_hat_text = seq_hat_text.replace(self.blank, "")
seq_true_text = "".join(seq_true).replace(self.space, " ")
seqs_hat.append(seq_hat_text)
seqs_true.append(seq_true_text)
return seqs_hat, seqs_true
def calculate_cer(self, seqs_hat, seqs_true):
"""Calculate sentence-level CER score.
:param list seqs_hat: prediction
:param list seqs_true: reference
:return: average sentence-level CER score
:rtype float
"""
import editdistance
char_eds, char_ref_lens = [], []
for i, seq_hat_text in enumerate(seqs_hat):
seq_true_text = seqs_true[i]
hyp_chars = seq_hat_text.replace(" ", "")
ref_chars = seq_true_text.replace(" ", "")
char_eds.append(editdistance.eval(hyp_chars, ref_chars))
char_ref_lens.append(len(ref_chars))
return float(sum(char_eds)) / sum(char_ref_lens)
def calculate_wer(self, seqs_hat, seqs_true):
"""Calculate sentence-level WER score.
:param list seqs_hat: prediction
:param list seqs_true: reference
:return: average sentence-level WER score
:rtype float
"""
import editdistance
word_eds, word_ref_lens = [], []
for i, seq_hat_text in enumerate(seqs_hat):
seq_true_text = seqs_true[i]
hyp_words = seq_hat_text.split()
ref_words = seq_true_text.split()
word_eds.append(editdistance.eval(hyp_words, ref_words))
word_ref_lens.append(len(ref_words))
return float(sum(word_eds)) / sum(word_ref_lens)
| 8,773 | 33.543307 | 85 | py |
espnet | espnet-master/espnet/nets/ctc_prefix_score.py | #!/usr/bin/env python3
# Copyright 2018 Mitsubishi Electric Research Labs (Takaaki Hori)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import numpy as np
import torch
class CTCPrefixScoreTH(object):
"""Batch processing of CTCPrefixScore
which is based on Algorithm 2 in WATANABE et al.
"HYBRID CTC/ATTENTION ARCHITECTURE FOR END-TO-END SPEECH RECOGNITION,"
but extended to efficiently compute the label probablities for multiple
hypotheses simultaneously
See also Seki et al. "Vectorized Beam Search for CTC-Attention-Based
Speech Recognition," In INTERSPEECH (pp. 3825-3829), 2019.
"""
def __init__(self, x, xlens, blank, eos, margin=0):
"""Construct CTC prefix scorer
:param torch.Tensor x: input label posterior sequences (B, T, O)
:param torch.Tensor xlens: input lengths (B,)
:param int blank: blank label id
:param int eos: end-of-sequence id
:param int margin: margin parameter for windowing (0 means no windowing)
"""
# In the comment lines,
# we assume T: input_length, B: batch size, W: beam width, O: output dim.
self.logzero = -10000000000.0
self.blank = blank
self.eos = eos
self.batch = x.size(0)
self.input_length = x.size(1)
self.odim = x.size(2)
self.dtype = x.dtype
self.device = (
torch.device("cuda:%d" % x.get_device())
if x.is_cuda
else torch.device("cpu")
)
# Pad the rest of posteriors in the batch
# TODO(takaaki-hori): need a better way without for-loops
for i, l in enumerate(xlens):
if l < self.input_length:
x[i, l:, :] = self.logzero
x[i, l:, blank] = 0
# Reshape input x
xn = x.transpose(0, 1) # (B, T, O) -> (T, B, O)
xb = xn[:, :, self.blank].unsqueeze(2).expand(-1, -1, self.odim)
self.x = torch.stack([xn, xb]) # (2, T, B, O)
self.end_frames = torch.as_tensor(xlens) - 1
# Setup CTC windowing
self.margin = margin
if margin > 0:
self.frame_ids = torch.arange(
self.input_length, dtype=self.dtype, device=self.device
)
# Base indices for index conversion
self.idx_bh = None
self.idx_b = torch.arange(self.batch, device=self.device)
self.idx_bo = (self.idx_b * self.odim).unsqueeze(1)
def __call__(self, y, state, scoring_ids=None, att_w=None):
"""Compute CTC prefix scores for next labels
:param list y: prefix label sequences
:param tuple state: previous CTC state
:param torch.Tensor pre_scores: scores for pre-selection of hypotheses (BW, O)
:param torch.Tensor att_w: attention weights to decide CTC window
:return new_state, ctc_local_scores (BW, O)
"""
output_length = len(y[0]) - 1 # ignore sos
last_ids = [yi[-1] for yi in y] # last output label ids
n_bh = len(last_ids) # batch * hyps
n_hyps = n_bh // self.batch # assuming each utterance has the same # of hyps
self.scoring_num = scoring_ids.size(-1) if scoring_ids is not None else 0
# prepare state info
if state is None:
r_prev = torch.full(
(self.input_length, 2, self.batch, n_hyps),
self.logzero,
dtype=self.dtype,
device=self.device,
)
r_prev[:, 1] = torch.cumsum(self.x[0, :, :, self.blank], 0).unsqueeze(2)
r_prev = r_prev.view(-1, 2, n_bh)
s_prev = 0.0
f_min_prev = 0
f_max_prev = 1
else:
r_prev, s_prev, f_min_prev, f_max_prev = state
# select input dimensions for scoring
if self.scoring_num > 0:
scoring_idmap = torch.full(
(n_bh, self.odim), -1, dtype=torch.long, device=self.device
)
snum = self.scoring_num
if self.idx_bh is None or n_bh > len(self.idx_bh):
self.idx_bh = torch.arange(n_bh, device=self.device).view(-1, 1)
scoring_idmap[self.idx_bh[:n_bh], scoring_ids] = torch.arange(
snum, device=self.device
)
scoring_idx = (
scoring_ids + self.idx_bo.repeat(1, n_hyps).view(-1, 1)
).view(-1)
x_ = torch.index_select(
self.x.view(2, -1, self.batch * self.odim), 2, scoring_idx
).view(2, -1, n_bh, snum)
else:
scoring_ids = None
scoring_idmap = None
snum = self.odim
x_ = self.x.unsqueeze(3).repeat(1, 1, 1, n_hyps, 1).view(2, -1, n_bh, snum)
# new CTC forward probs are prepared as a (T x 2 x BW x S) tensor
# that corresponds to r_t^n(h) and r_t^b(h) in a batch.
r = torch.full(
(self.input_length, 2, n_bh, snum),
self.logzero,
dtype=self.dtype,
device=self.device,
)
if output_length == 0:
r[0, 0] = x_[0, 0]
r_sum = torch.logsumexp(r_prev, 1)
log_phi = r_sum.unsqueeze(2).repeat(1, 1, snum)
if scoring_ids is not None:
for idx in range(n_bh):
pos = scoring_idmap[idx, last_ids[idx]]
if pos >= 0:
log_phi[:, idx, pos] = r_prev[:, 1, idx]
else:
for idx in range(n_bh):
log_phi[:, idx, last_ids[idx]] = r_prev[:, 1, idx]
# decide start and end frames based on attention weights
if att_w is not None and self.margin > 0:
f_arg = torch.matmul(att_w, self.frame_ids)
f_min = max(int(f_arg.min().cpu()), f_min_prev)
f_max = max(int(f_arg.max().cpu()), f_max_prev)
start = min(f_max_prev, max(f_min - self.margin, output_length, 1))
end = min(f_max + self.margin, self.input_length)
else:
f_min = f_max = 0
start = max(output_length, 1)
end = self.input_length
# compute forward probabilities log(r_t^n(h)) and log(r_t^b(h))
for t in range(start, end):
rp = r[t - 1]
rr = torch.stack([rp[0], log_phi[t - 1], rp[0], rp[1]]).view(
2, 2, n_bh, snum
)
r[t] = torch.logsumexp(rr, 1) + x_[:, t]
# compute log prefix probabilities log(psi)
log_phi_x = torch.cat((log_phi[0].unsqueeze(0), log_phi[:-1]), dim=0) + x_[0]
if scoring_ids is not None:
log_psi = torch.full(
(n_bh, self.odim), self.logzero, dtype=self.dtype, device=self.device
)
log_psi_ = torch.logsumexp(
torch.cat((log_phi_x[start:end], r[start - 1, 0].unsqueeze(0)), dim=0),
dim=0,
)
for si in range(n_bh):
log_psi[si, scoring_ids[si]] = log_psi_[si]
else:
log_psi = torch.logsumexp(
torch.cat((log_phi_x[start:end], r[start - 1, 0].unsqueeze(0)), dim=0),
dim=0,
)
for si in range(n_bh):
log_psi[si, self.eos] = r_sum[self.end_frames[si // n_hyps], si]
# exclude blank probs
log_psi[:, self.blank] = self.logzero
return (log_psi - s_prev), (r, log_psi, f_min, f_max, scoring_idmap)
def index_select_state(self, state, best_ids):
"""Select CTC states according to best ids
:param state : CTC state
:param best_ids : index numbers selected by beam pruning (B, W)
:return selected_state
"""
r, s, f_min, f_max, scoring_idmap = state
# convert ids to BHO space
n_bh = len(s)
n_hyps = n_bh // self.batch
vidx = (best_ids + (self.idx_b * (n_hyps * self.odim)).view(-1, 1)).view(-1)
# select hypothesis scores
s_new = torch.index_select(s.view(-1), 0, vidx)
s_new = s_new.view(-1, 1).repeat(1, self.odim).view(n_bh, self.odim)
# convert ids to BHS space (S: scoring_num)
if scoring_idmap is not None:
snum = self.scoring_num
hyp_idx = (best_ids // self.odim + (self.idx_b * n_hyps).view(-1, 1)).view(
-1
)
label_ids = torch.fmod(best_ids, self.odim).view(-1)
score_idx = scoring_idmap[hyp_idx, label_ids]
score_idx[score_idx == -1] = 0
vidx = score_idx + hyp_idx * snum
else:
snum = self.odim
# select forward probabilities
r_new = torch.index_select(r.view(-1, 2, n_bh * snum), 2, vidx).view(
-1, 2, n_bh
)
return r_new, s_new, f_min, f_max
def extend_prob(self, x):
"""Extend CTC prob.
:param torch.Tensor x: input label posterior sequences (B, T, O)
"""
if self.x.shape[1] < x.shape[1]: # self.x (2,T,B,O); x (B,T,O)
# Pad the rest of posteriors in the batch
# TODO(takaaki-hori): need a better way without for-loops
xlens = [x.size(1)]
for i, l in enumerate(xlens):
if l < self.input_length:
x[i, l:, :] = self.logzero
x[i, l:, self.blank] = 0
tmp_x = self.x
xn = x.transpose(0, 1) # (B, T, O) -> (T, B, O)
xb = xn[:, :, self.blank].unsqueeze(2).expand(-1, -1, self.odim)
self.x = torch.stack([xn, xb]) # (2, T, B, O)
self.x[:, : tmp_x.shape[1], :, :] = tmp_x
self.input_length = x.size(1)
self.end_frames = torch.as_tensor(xlens) - 1
def extend_state(self, state):
"""Compute CTC prefix state.
:param state : CTC state
:return ctc_state
"""
if state is None:
# nothing to do
return state
else:
r_prev, s_prev, f_min_prev, f_max_prev = state
r_prev_new = torch.full(
(self.input_length, 2),
self.logzero,
dtype=self.dtype,
device=self.device,
)
start = max(r_prev.shape[0], 1)
r_prev_new[0:start] = r_prev
for t in range(start, self.input_length):
r_prev_new[t, 1] = r_prev_new[t - 1, 1] + self.x[0, t, :, self.blank]
return (r_prev_new, s_prev, f_min_prev, f_max_prev)
class CTCPrefixScore(object):
"""Compute CTC label sequence scores
which is based on Algorithm 2 in WATANABE et al.
"HYBRID CTC/ATTENTION ARCHITECTURE FOR END-TO-END SPEECH RECOGNITION,"
but extended to efficiently compute the probablities of multiple labels
simultaneously
"""
def __init__(self, x, blank, eos, xp):
self.xp = xp
self.logzero = -10000000000.0
self.blank = blank
self.eos = eos
self.input_length = len(x)
self.x = x
def initial_state(self):
"""Obtain an initial CTC state
:return: CTC state
"""
# initial CTC state is made of a frame x 2 tensor that corresponds to
# r_t^n(<sos>) and r_t^b(<sos>), where 0 and 1 of axis=1 represent
# superscripts n and b (non-blank and blank), respectively.
r = self.xp.full((self.input_length, 2), self.logzero, dtype=np.float32)
r[0, 1] = self.x[0, self.blank]
for i in range(1, self.input_length):
r[i, 1] = r[i - 1, 1] + self.x[i, self.blank]
return r
def __call__(self, y, cs, r_prev):
"""Compute CTC prefix scores for next labels
:param y : prefix label sequence
:param cs : array of next labels
:param r_prev: previous CTC state
:return ctc_scores, ctc_states
"""
# initialize CTC states
output_length = len(y) - 1 # ignore sos
# new CTC states are prepared as a frame x (n or b) x n_labels tensor
# that corresponds to r_t^n(h) and r_t^b(h).
r = self.xp.ndarray((self.input_length, 2, len(cs)), dtype=np.float32)
xs = self.x[:, cs]
if output_length == 0:
r[0, 0] = xs[0]
r[0, 1] = self.logzero
else:
r[output_length - 1] = self.logzero
# prepare forward probabilities for the last label
r_sum = self.xp.logaddexp(
r_prev[:, 0], r_prev[:, 1]
) # log(r_t^n(g) + r_t^b(g))
last = y[-1]
if output_length > 0 and last in cs:
log_phi = self.xp.ndarray((self.input_length, len(cs)), dtype=np.float32)
for i in range(len(cs)):
log_phi[:, i] = r_sum if cs[i] != last else r_prev[:, 1]
else:
log_phi = r_sum
# compute forward probabilities log(r_t^n(h)), log(r_t^b(h)),
# and log prefix probabilities log(psi)
start = max(output_length, 1)
log_psi = r[start - 1, 0]
for t in range(start, self.input_length):
r[t, 0] = self.xp.logaddexp(r[t - 1, 0], log_phi[t - 1]) + xs[t]
r[t, 1] = (
self.xp.logaddexp(r[t - 1, 0], r[t - 1, 1]) + self.x[t, self.blank]
)
log_psi = self.xp.logaddexp(log_psi, log_phi[t - 1] + xs[t])
# get P(...eos|X) that ends with the prefix itself
eos_pos = self.xp.where(cs == self.eos)[0]
if len(eos_pos) > 0:
log_psi[eos_pos] = r_sum[-1] # log(r_T^n(g) + r_T^b(g))
# exclude blank probs
blank_pos = self.xp.where(cs == self.blank)[0]
if len(blank_pos) > 0:
log_psi[blank_pos] = self.logzero
# return the log prefix probability and CTC states, where the label axis
# of the CTC states is moved to the first axis to slice it easily
return log_psi, self.xp.rollaxis(r, 2)
| 13,899 | 37.826816 | 87 | py |
espnet | espnet-master/espnet/nets/beam_search_transducer.py | """Search algorithms for Transducer models."""
import logging
from typing import List, Union
import numpy as np
import torch
from espnet.nets.pytorch_backend.transducer.custom_decoder import CustomDecoder
from espnet.nets.pytorch_backend.transducer.joint_network import JointNetwork
from espnet.nets.pytorch_backend.transducer.rnn_decoder import RNNDecoder
from espnet.nets.pytorch_backend.transducer.utils import (
create_lm_batch_states,
init_lm_state,
is_prefix,
recombine_hyps,
select_k_expansions,
select_lm_state,
subtract,
)
from espnet.nets.transducer_decoder_interface import ExtendedHypothesis, Hypothesis
class BeamSearchTransducer:
"""Beam search implementation for Transducer."""
def __init__(
self,
decoder: Union[RNNDecoder, CustomDecoder],
joint_network: JointNetwork,
beam_size: int,
lm: torch.nn.Module = None,
lm_weight: float = 0.1,
search_type: str = "default",
max_sym_exp: int = 2,
u_max: int = 50,
nstep: int = 1,
prefix_alpha: int = 1,
expansion_gamma: int = 2.3,
expansion_beta: int = 2,
score_norm: bool = True,
softmax_temperature: float = 1.0,
nbest: int = 1,
quantization: bool = False,
):
"""Initialize Transducer search module.
Args:
decoder: Decoder module.
joint_network: Joint network module.
beam_size: Beam size.
lm: LM class.
lm_weight: LM weight for soft fusion.
search_type: Search algorithm to use during inference.
max_sym_exp: Number of maximum symbol expansions at each time step. (TSD)
u_max: Maximum output sequence length. (ALSD)
nstep: Number of maximum expansion steps at each time step. (NSC/mAES)
prefix_alpha: Maximum prefix length in prefix search. (NSC/mAES)
expansion_beta:
Number of additional candidates for expanded hypotheses selection. (mAES)
expansion_gamma: Allowed logp difference for prune-by-value method. (mAES)
score_norm: Normalize final scores by length. ("default")
softmax_temperature: Penalization term for softmax function.
nbest: Number of final hypothesis.
quantization: Whether dynamic quantization is used.
"""
self.decoder = decoder
self.joint_network = joint_network
self.beam_size = beam_size
self.hidden_size = decoder.dunits
self.vocab_size = decoder.odim
self.blank_id = decoder.blank_id
if self.beam_size <= 1:
self.search_algorithm = self.greedy_search
elif search_type == "default":
self.search_algorithm = self.default_beam_search
elif search_type == "tsd":
self.max_sym_exp = max_sym_exp
self.search_algorithm = self.time_sync_decoding
elif search_type == "alsd":
self.u_max = u_max
self.search_algorithm = self.align_length_sync_decoding
elif search_type == "nsc":
self.nstep = nstep
self.prefix_alpha = prefix_alpha
self.search_algorithm = self.nsc_beam_search
elif search_type == "maes":
self.nstep = nstep if nstep > 1 else 2
self.prefix_alpha = prefix_alpha
self.expansion_gamma = expansion_gamma
assert self.vocab_size >= beam_size + expansion_beta, (
"beam_size (%d) + expansion_beta (%d) "
"should be smaller or equal to vocabulary size (%d)."
% (beam_size, expansion_beta, self.vocab_size)
)
self.max_candidates = beam_size + expansion_beta
self.search_algorithm = self.modified_adaptive_expansion_search
else:
raise NotImplementedError
if lm is not None:
self.use_lm = True
self.lm = lm
self.is_wordlm = True if hasattr(lm.predictor, "wordlm") else False
self.lm_predictor = lm.predictor.wordlm if self.is_wordlm else lm.predictor
self.lm_layers = len(self.lm_predictor.rnn)
self.lm_weight = lm_weight
else:
self.use_lm = False
if softmax_temperature > 1.0 and lm is not None:
logging.warning(
"Softmax temperature is not supported with LM decoding."
"Setting softmax-temperature value to 1.0."
)
self.softmax_temperature = 1.0
else:
self.softmax_temperature = softmax_temperature
self.quantization = quantization
self.score_norm = score_norm
self.nbest = nbest
def __call__(
self, enc_out: torch.Tensor
) -> Union[List[Hypothesis], List[ExtendedHypothesis]]:
"""Perform beam search.
Args:
enc_out: Encoder output sequence. (T, D_enc)
Returns:
nbest_hyps: N-best decoding results
"""
self.decoder.set_device(enc_out.device)
nbest_hyps = self.search_algorithm(enc_out)
return nbest_hyps
def sort_nbest(
self, hyps: Union[List[Hypothesis], List[ExtendedHypothesis]]
) -> Union[List[Hypothesis], List[ExtendedHypothesis]]:
"""Sort hypotheses by score or score given sequence length.
Args:
hyps: Hypothesis.
Return:
hyps: Sorted hypothesis.
"""
if self.score_norm:
hyps.sort(key=lambda x: x.score / len(x.yseq), reverse=True)
else:
hyps.sort(key=lambda x: x.score, reverse=True)
return hyps[: self.nbest]
def prefix_search(
self, hyps: List[ExtendedHypothesis], enc_out_t: torch.Tensor
) -> List[ExtendedHypothesis]:
"""Prefix search for NSC and mAES strategies.
Based on https://arxiv.org/pdf/1211.3711.pdf
"""
for j, hyp_j in enumerate(hyps[:-1]):
for hyp_i in hyps[(j + 1) :]:
curr_id = len(hyp_j.yseq)
pref_id = len(hyp_i.yseq)
if (
is_prefix(hyp_j.yseq, hyp_i.yseq)
and (curr_id - pref_id) <= self.prefix_alpha
):
logp = torch.log_softmax(
self.joint_network(
enc_out_t, hyp_i.dec_out[-1], quantization=self.quantization
)
/ self.softmax_temperature,
dim=-1,
)
curr_score = hyp_i.score + float(logp[hyp_j.yseq[pref_id]])
for k in range(pref_id, (curr_id - 1)):
logp = torch.log_softmax(
self.joint_network(
enc_out_t,
hyp_j.dec_out[k],
quantization=self.quantization,
)
/ self.softmax_temperature,
dim=-1,
)
curr_score += float(logp[hyp_j.yseq[k + 1]])
hyp_j.score = np.logaddexp(hyp_j.score, curr_score)
return hyps
def greedy_search(self, enc_out: torch.Tensor) -> List[Hypothesis]:
"""Greedy search implementation.
Args:
enc_out: Encoder output sequence. (T, D_enc)
Returns:
hyp: 1-best hypotheses.
"""
dec_state = self.decoder.init_state(1)
hyp = Hypothesis(score=0.0, yseq=[self.blank_id], dec_state=dec_state)
cache = {}
dec_out, state, _ = self.decoder.score(hyp, cache)
for enc_out_t in enc_out:
logp = torch.log_softmax(
self.joint_network(enc_out_t, dec_out, quantization=self.quantization)
/ self.softmax_temperature,
dim=-1,
)
top_logp, pred = torch.max(logp, dim=-1)
if pred != self.blank_id:
hyp.yseq.append(int(pred))
hyp.score += float(top_logp)
hyp.dec_state = state
dec_out, state, _ = self.decoder.score(hyp, cache)
return [hyp]
def default_beam_search(self, enc_out: torch.Tensor) -> List[Hypothesis]:
"""Beam search implementation.
Modified from https://arxiv.org/pdf/1211.3711.pdf
Args:
enc_out: Encoder output sequence. (T, D)
Returns:
nbest_hyps: N-best hypothesis.
"""
beam = min(self.beam_size, self.vocab_size)
beam_k = min(beam, (self.vocab_size - 1))
dec_state = self.decoder.init_state(1)
kept_hyps = [Hypothesis(score=0.0, yseq=[self.blank_id], dec_state=dec_state)]
cache = {}
for enc_out_t in enc_out:
hyps = kept_hyps
kept_hyps = []
while True:
max_hyp = max(hyps, key=lambda x: x.score)
hyps.remove(max_hyp)
dec_out, state, lm_tokens = self.decoder.score(max_hyp, cache)
logp = torch.log_softmax(
self.joint_network(
enc_out_t, dec_out, quantization=self.quantization
)
/ self.softmax_temperature,
dim=-1,
)
top_k = logp[1:].topk(beam_k, dim=-1)
kept_hyps.append(
Hypothesis(
score=(max_hyp.score + float(logp[0:1])),
yseq=max_hyp.yseq[:],
dec_state=max_hyp.dec_state,
lm_state=max_hyp.lm_state,
)
)
if self.use_lm:
lm_state, lm_scores = self.lm.predict(max_hyp.lm_state, lm_tokens)
else:
lm_state = max_hyp.lm_state
for logp, k in zip(*top_k):
score = max_hyp.score + float(logp)
if self.use_lm:
score += self.lm_weight * lm_scores[0][k + 1]
hyps.append(
Hypothesis(
score=score,
yseq=max_hyp.yseq[:] + [int(k + 1)],
dec_state=state,
lm_state=lm_state,
)
)
hyps_max = float(max(hyps, key=lambda x: x.score).score)
kept_most_prob = sorted(
[hyp for hyp in kept_hyps if hyp.score > hyps_max],
key=lambda x: x.score,
)
if len(kept_most_prob) >= beam:
kept_hyps = kept_most_prob
break
return self.sort_nbest(kept_hyps)
def time_sync_decoding(self, enc_out: torch.Tensor) -> List[Hypothesis]:
"""Time synchronous beam search implementation.
Based on https://ieeexplore.ieee.org/document/9053040
Args:
enc_out: Encoder output sequence. (T, D)
Returns:
nbest_hyps: N-best hypothesis.
"""
beam = min(self.beam_size, self.vocab_size)
beam_state = self.decoder.init_state(beam)
B = [
Hypothesis(
yseq=[self.blank_id],
score=0.0,
dec_state=self.decoder.select_state(beam_state, 0),
)
]
cache = {}
if self.use_lm and not self.is_wordlm:
B[0].lm_state = init_lm_state(self.lm_predictor)
for enc_out_t in enc_out:
A = []
C = B
enc_out_t = enc_out_t.unsqueeze(0)
for v in range(self.max_sym_exp):
D = []
beam_dec_out, beam_state, beam_lm_tokens = self.decoder.batch_score(
C,
beam_state,
cache,
self.use_lm,
)
beam_logp = torch.log_softmax(
self.joint_network(enc_out_t, beam_dec_out)
/ self.softmax_temperature,
dim=-1,
)
beam_topk = beam_logp[:, 1:].topk(beam, dim=-1)
seq_A = [h.yseq for h in A]
for i, hyp in enumerate(C):
if hyp.yseq not in seq_A:
A.append(
Hypothesis(
score=(hyp.score + float(beam_logp[i, 0])),
yseq=hyp.yseq[:],
dec_state=hyp.dec_state,
lm_state=hyp.lm_state,
)
)
else:
dict_pos = seq_A.index(hyp.yseq)
A[dict_pos].score = np.logaddexp(
A[dict_pos].score, (hyp.score + float(beam_logp[i, 0]))
)
if v < (self.max_sym_exp - 1):
if self.use_lm:
beam_lm_states = create_lm_batch_states(
[c.lm_state for c in C], self.lm_layers, self.is_wordlm
)
beam_lm_states, beam_lm_scores = self.lm.buff_predict(
beam_lm_states, beam_lm_tokens, len(C)
)
for i, hyp in enumerate(C):
for logp, k in zip(beam_topk[0][i], beam_topk[1][i] + 1):
new_hyp = Hypothesis(
score=(hyp.score + float(logp)),
yseq=(hyp.yseq + [int(k)]),
dec_state=self.decoder.select_state(beam_state, i),
lm_state=hyp.lm_state,
)
if self.use_lm:
new_hyp.score += self.lm_weight * beam_lm_scores[i, k]
new_hyp.lm_state = select_lm_state(
beam_lm_states, i, self.lm_layers, self.is_wordlm
)
D.append(new_hyp)
C = sorted(D, key=lambda x: x.score, reverse=True)[:beam]
B = sorted(A, key=lambda x: x.score, reverse=True)[:beam]
return self.sort_nbest(B)
def align_length_sync_decoding(self, enc_out: torch.Tensor) -> List[Hypothesis]:
"""Alignment-length synchronous beam search implementation.
Based on https://ieeexplore.ieee.org/document/9053040
Args:
h: Encoder output sequences. (T, D)
Returns:
nbest_hyps: N-best hypothesis.
"""
beam = min(self.beam_size, self.vocab_size)
t_max = int(enc_out.size(0))
u_max = min(self.u_max, (t_max - 1))
beam_state = self.decoder.init_state(beam)
B = [
Hypothesis(
yseq=[self.blank_id],
score=0.0,
dec_state=self.decoder.select_state(beam_state, 0),
)
]
final = []
cache = {}
if self.use_lm and not self.is_wordlm:
B[0].lm_state = init_lm_state(self.lm_predictor)
for i in range(t_max + u_max):
A = []
B_ = []
B_enc_out = []
for hyp in B:
u = len(hyp.yseq) - 1
t = i - u
if t > (t_max - 1):
continue
B_.append(hyp)
B_enc_out.append((t, enc_out[t]))
if B_:
beam_dec_out, beam_state, beam_lm_tokens = self.decoder.batch_score(
B_,
beam_state,
cache,
self.use_lm,
)
beam_enc_out = torch.stack([x[1] for x in B_enc_out])
beam_logp = torch.log_softmax(
self.joint_network(beam_enc_out, beam_dec_out)
/ self.softmax_temperature,
dim=-1,
)
beam_topk = beam_logp[:, 1:].topk(beam, dim=-1)
if self.use_lm:
beam_lm_states = create_lm_batch_states(
[b.lm_state for b in B_], self.lm_layers, self.is_wordlm
)
beam_lm_states, beam_lm_scores = self.lm.buff_predict(
beam_lm_states, beam_lm_tokens, len(B_)
)
for i, hyp in enumerate(B_):
new_hyp = Hypothesis(
score=(hyp.score + float(beam_logp[i, 0])),
yseq=hyp.yseq[:],
dec_state=hyp.dec_state,
lm_state=hyp.lm_state,
)
A.append(new_hyp)
if B_enc_out[i][0] == (t_max - 1):
final.append(new_hyp)
for logp, k in zip(beam_topk[0][i], beam_topk[1][i] + 1):
new_hyp = Hypothesis(
score=(hyp.score + float(logp)),
yseq=(hyp.yseq[:] + [int(k)]),
dec_state=self.decoder.select_state(beam_state, i),
lm_state=hyp.lm_state,
)
if self.use_lm:
new_hyp.score += self.lm_weight * beam_lm_scores[i, k]
new_hyp.lm_state = select_lm_state(
beam_lm_states, i, self.lm_layers, self.is_wordlm
)
A.append(new_hyp)
B = sorted(A, key=lambda x: x.score, reverse=True)[:beam]
B = recombine_hyps(B)
if final:
return self.sort_nbest(final)
else:
return B
def nsc_beam_search(self, enc_out: torch.Tensor) -> List[ExtendedHypothesis]:
"""N-step constrained beam search implementation.
Based on/Modified from https://arxiv.org/pdf/2002.03577.pdf.
Please reference ESPnet (b-flo, PR #2444) for any usage outside ESPnet
until further modifications.
Args:
enc_out: Encoder output sequence. (T, D_enc)
Returns:
nbest_hyps: N-best hypothesis.
"""
beam = min(self.beam_size, self.vocab_size)
beam_k = min(beam, (self.vocab_size - 1))
beam_state = self.decoder.init_state(beam)
init_tokens = [
ExtendedHypothesis(
yseq=[self.blank_id],
score=0.0,
dec_state=self.decoder.select_state(beam_state, 0),
)
]
cache = {}
beam_dec_out, beam_state, beam_lm_tokens = self.decoder.batch_score(
init_tokens,
beam_state,
cache,
self.use_lm,
)
state = self.decoder.select_state(beam_state, 0)
if self.use_lm:
beam_lm_states, beam_lm_scores = self.lm.buff_predict(
None, beam_lm_tokens, 1
)
lm_state = select_lm_state(
beam_lm_states, 0, self.lm_layers, self.is_wordlm
)
lm_scores = beam_lm_scores[0]
else:
lm_state = None
lm_scores = None
kept_hyps = [
ExtendedHypothesis(
yseq=[self.blank_id],
score=0.0,
dec_state=state,
dec_out=[beam_dec_out[0]],
lm_state=lm_state,
lm_scores=lm_scores,
)
]
for enc_out_t in enc_out:
hyps = self.prefix_search(
sorted(kept_hyps, key=lambda x: len(x.yseq), reverse=True),
enc_out_t,
)
kept_hyps = []
beam_enc_out = enc_out_t.unsqueeze(0)
S = []
V = []
for n in range(self.nstep):
beam_dec_out = torch.stack([hyp.dec_out[-1] for hyp in hyps])
beam_logp = torch.log_softmax(
self.joint_network(beam_enc_out, beam_dec_out)
/ self.softmax_temperature,
dim=-1,
)
beam_topk = beam_logp[:, 1:].topk(beam_k, dim=-1)
for i, hyp in enumerate(hyps):
S.append(
ExtendedHypothesis(
yseq=hyp.yseq[:],
score=hyp.score + float(beam_logp[i, 0:1]),
dec_out=hyp.dec_out[:],
dec_state=hyp.dec_state,
lm_state=hyp.lm_state,
lm_scores=hyp.lm_scores,
)
)
for logp, k in zip(beam_topk[0][i], beam_topk[1][i] + 1):
score = hyp.score + float(logp)
if self.use_lm:
score += self.lm_weight * float(hyp.lm_scores[k])
V.append(
ExtendedHypothesis(
yseq=hyp.yseq[:] + [int(k)],
score=score,
dec_out=hyp.dec_out[:],
dec_state=hyp.dec_state,
lm_state=hyp.lm_state,
lm_scores=hyp.lm_scores,
)
)
V.sort(key=lambda x: x.score, reverse=True)
V = subtract(V, hyps)[:beam]
beam_state = self.decoder.create_batch_states(
beam_state,
[v.dec_state for v in V],
[v.yseq for v in V],
)
beam_dec_out, beam_state, beam_lm_tokens = self.decoder.batch_score(
V,
beam_state,
cache,
self.use_lm,
)
if self.use_lm:
beam_lm_states = create_lm_batch_states(
[v.lm_state for v in V], self.lm_layers, self.is_wordlm
)
beam_lm_states, beam_lm_scores = self.lm.buff_predict(
beam_lm_states, beam_lm_tokens, len(V)
)
if n < (self.nstep - 1):
for i, v in enumerate(V):
v.dec_out.append(beam_dec_out[i])
v.dec_state = self.decoder.select_state(beam_state, i)
if self.use_lm:
v.lm_state = select_lm_state(
beam_lm_states, i, self.lm_layers, self.is_wordlm
)
v.lm_scores = beam_lm_scores[i]
hyps = V[:]
else:
beam_logp = torch.log_softmax(
self.joint_network(beam_enc_out, beam_dec_out)
/ self.softmax_temperature,
dim=-1,
)
for i, v in enumerate(V):
if self.nstep != 1:
v.score += float(beam_logp[i, 0])
v.dec_out.append(beam_dec_out[i])
v.dec_state = self.decoder.select_state(beam_state, i)
if self.use_lm:
v.lm_state = select_lm_state(
beam_lm_states, i, self.lm_layers, self.is_wordlm
)
v.lm_scores = beam_lm_scores[i]
kept_hyps = sorted((S + V), key=lambda x: x.score, reverse=True)[:beam]
return self.sort_nbest(kept_hyps)
def modified_adaptive_expansion_search(
self, enc_out: torch.Tensor
) -> List[ExtendedHypothesis]:
"""It's the modified Adaptive Expansion Search (mAES) implementation.
Based on/modified from https://ieeexplore.ieee.org/document/9250505 and NSC.
Args:
enc_out: Encoder output sequence. (T, D_enc)
Returns:
nbest_hyps: N-best hypothesis.
"""
beam = min(self.beam_size, self.vocab_size)
beam_state = self.decoder.init_state(beam)
init_tokens = [
ExtendedHypothesis(
yseq=[self.blank_id],
score=0.0,
dec_state=self.decoder.select_state(beam_state, 0),
)
]
cache = {}
beam_dec_out, beam_state, beam_lm_tokens = self.decoder.batch_score(
init_tokens,
beam_state,
cache,
self.use_lm,
)
state = self.decoder.select_state(beam_state, 0)
if self.use_lm:
beam_lm_states, beam_lm_scores = self.lm.buff_predict(
None, beam_lm_tokens, 1
)
lm_state = select_lm_state(
beam_lm_states, 0, self.lm_layers, self.is_wordlm
)
lm_scores = beam_lm_scores[0]
else:
lm_state = None
lm_scores = None
kept_hyps = [
ExtendedHypothesis(
yseq=[self.blank_id],
score=0.0,
dec_state=state,
dec_out=[beam_dec_out[0]],
lm_state=lm_state,
lm_scores=lm_scores,
)
]
for enc_out_t in enc_out:
hyps = self.prefix_search(
sorted(kept_hyps, key=lambda x: len(x.yseq), reverse=True),
enc_out_t,
)
kept_hyps = []
beam_enc_out = enc_out_t.unsqueeze(0)
list_b = []
duplication_check = [hyp.yseq for hyp in hyps]
for n in range(self.nstep):
beam_dec_out = torch.stack([h.dec_out[-1] for h in hyps])
beam_logp, beam_idx = torch.log_softmax(
self.joint_network(beam_enc_out, beam_dec_out)
/ self.softmax_temperature,
dim=-1,
).topk(self.max_candidates, dim=-1)
k_expansions = select_k_expansions(
hyps,
beam_idx,
beam_logp,
self.expansion_gamma,
)
list_exp = []
for i, hyp in enumerate(hyps):
for k, new_score in k_expansions[i]:
new_hyp = ExtendedHypothesis(
yseq=hyp.yseq[:],
score=new_score,
dec_out=hyp.dec_out[:],
dec_state=hyp.dec_state,
lm_state=hyp.lm_state,
lm_scores=hyp.lm_scores,
)
if k == 0:
list_b.append(new_hyp)
else:
if new_hyp.yseq + [int(k)] not in duplication_check:
new_hyp.yseq.append(int(k))
if self.use_lm:
new_hyp.score += self.lm_weight * float(
hyp.lm_scores[k]
)
list_exp.append(new_hyp)
if not list_exp:
kept_hyps = sorted(list_b, key=lambda x: x.score, reverse=True)[
:beam
]
break
else:
beam_state = self.decoder.create_batch_states(
beam_state,
[hyp.dec_state for hyp in list_exp],
[hyp.yseq for hyp in list_exp],
)
beam_dec_out, beam_state, beam_lm_tokens = self.decoder.batch_score(
list_exp,
beam_state,
cache,
self.use_lm,
)
if self.use_lm:
beam_lm_states = create_lm_batch_states(
[hyp.lm_state for hyp in list_exp],
self.lm_layers,
self.is_wordlm,
)
beam_lm_states, beam_lm_scores = self.lm.buff_predict(
beam_lm_states, beam_lm_tokens, len(list_exp)
)
if n < (self.nstep - 1):
for i, hyp in enumerate(list_exp):
hyp.dec_out.append(beam_dec_out[i])
hyp.dec_state = self.decoder.select_state(beam_state, i)
if self.use_lm:
hyp.lm_state = select_lm_state(
beam_lm_states, i, self.lm_layers, self.is_wordlm
)
hyp.lm_scores = beam_lm_scores[i]
hyps = list_exp[:]
else:
beam_logp = torch.log_softmax(
self.joint_network(beam_enc_out, beam_dec_out)
/ self.softmax_temperature,
dim=-1,
)
for i, hyp in enumerate(list_exp):
hyp.score += float(beam_logp[i, 0])
hyp.dec_out.append(beam_dec_out[i])
hyp.dec_state = self.decoder.select_state(beam_state, i)
if self.use_lm:
hyp.lm_state = select_lm_state(
beam_lm_states, i, self.lm_layers, self.is_wordlm
)
hyp.lm_scores = beam_lm_scores[i]
kept_hyps = sorted(
list_b + list_exp, key=lambda x: x.score, reverse=True
)[:beam]
return self.sort_nbest(kept_hyps)
| 30,731 | 33.26087 | 88 | py |
espnet | espnet-master/espnet/nets/batch_beam_search.py | """Parallel beam search module."""
import logging
from typing import Any, Dict, List, NamedTuple, Tuple
import torch
from packaging.version import parse as V
from torch.nn.utils.rnn import pad_sequence
from espnet.nets.beam_search import BeamSearch, Hypothesis
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
class BatchHypothesis(NamedTuple):
"""Batchfied/Vectorized hypothesis data type."""
yseq: torch.Tensor = torch.tensor([]) # (batch, maxlen)
score: torch.Tensor = torch.tensor([]) # (batch,)
length: torch.Tensor = torch.tensor([]) # (batch,)
scores: Dict[str, torch.Tensor] = dict() # values: (batch,)
states: Dict[str, Dict] = dict()
def __len__(self) -> int:
"""Return a batch size."""
return len(self.length)
class BatchBeamSearch(BeamSearch):
"""Batch beam search implementation."""
def batchfy(self, hyps: List[Hypothesis]) -> BatchHypothesis:
"""Convert list to batch."""
if len(hyps) == 0:
return BatchHypothesis()
return BatchHypothesis(
yseq=pad_sequence(
[h.yseq for h in hyps], batch_first=True, padding_value=self.eos
),
length=torch.tensor([len(h.yseq) for h in hyps], dtype=torch.int64),
score=torch.tensor([h.score for h in hyps]),
scores={k: torch.tensor([h.scores[k] for h in hyps]) for k in self.scorers},
states={k: [h.states[k] for h in hyps] for k in self.scorers},
)
def _batch_select(self, hyps: BatchHypothesis, ids: List[int]) -> BatchHypothesis:
return BatchHypothesis(
yseq=hyps.yseq[ids],
score=hyps.score[ids],
length=hyps.length[ids],
scores={k: v[ids] for k, v in hyps.scores.items()},
states={
k: [self.scorers[k].select_state(v, i) for i in ids]
for k, v in hyps.states.items()
},
)
def _select(self, hyps: BatchHypothesis, i: int) -> Hypothesis:
return Hypothesis(
yseq=hyps.yseq[i, : hyps.length[i]],
score=hyps.score[i],
scores={k: v[i] for k, v in hyps.scores.items()},
states={
k: self.scorers[k].select_state(v, i) for k, v in hyps.states.items()
},
)
def unbatchfy(self, batch_hyps: BatchHypothesis) -> List[Hypothesis]:
"""Revert batch to list."""
return [
Hypothesis(
yseq=batch_hyps.yseq[i][: batch_hyps.length[i]],
score=batch_hyps.score[i],
scores={k: batch_hyps.scores[k][i] for k in self.scorers},
states={
k: v.select_state(batch_hyps.states[k], i)
for k, v in self.scorers.items()
},
)
for i in range(len(batch_hyps.length))
]
def batch_beam(
self, weighted_scores: torch.Tensor, ids: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""Batch-compute topk full token ids and partial token ids.
Args:
weighted_scores (torch.Tensor): The weighted sum scores for each tokens.
Its shape is `(n_beam, self.vocab_size)`.
ids (torch.Tensor): The partial token ids to compute topk.
Its shape is `(n_beam, self.pre_beam_size)`.
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
The topk full (prev_hyp, new_token) ids
and partial (prev_hyp, new_token) ids.
Their shapes are all `(self.beam_size,)`
"""
top_ids = weighted_scores.view(-1).topk(self.beam_size)[1]
# Because of the flatten above, `top_ids` is organized as:
# [hyp1 * V + token1, hyp2 * V + token2, ..., hypK * V + tokenK],
# where V is `self.n_vocab` and K is `self.beam_size`
if is_torch_1_9_plus:
prev_hyp_ids = torch.div(top_ids, self.n_vocab, rounding_mode="trunc")
else:
prev_hyp_ids = top_ids // self.n_vocab
new_token_ids = top_ids % self.n_vocab
return prev_hyp_ids, new_token_ids, prev_hyp_ids, new_token_ids
def init_hyp(self, x: torch.Tensor) -> BatchHypothesis:
"""Get an initial hypothesis data.
Args:
x (torch.Tensor): The encoder output feature
Returns:
Hypothesis: The initial hypothesis.
"""
init_states = dict()
init_scores = dict()
for k, d in self.scorers.items():
init_states[k] = d.batch_init_state(x)
init_scores[k] = 0.0
# NOTE (Shih-Lun): added for OpenAI Whisper ASR
primer = [self.sos] if self.hyp_primer is None else self.hyp_primer
return self.batchfy(
[
Hypothesis(
score=0.0,
scores=init_scores,
states=init_states,
yseq=torch.tensor(primer, device=x.device),
)
]
)
def score_full(
self, hyp: BatchHypothesis, x: torch.Tensor
) -> Tuple[Dict[str, torch.Tensor], Dict[str, Any]]:
"""Score new hypothesis by `self.full_scorers`.
Args:
hyp (Hypothesis): Hypothesis with prefix tokens to score
x (torch.Tensor): Corresponding input feature
Returns:
Tuple[Dict[str, torch.Tensor], Dict[str, Any]]: Tuple of
score dict of `hyp` that has string keys of `self.full_scorers`
and tensor score values of shape: `(self.n_vocab,)`,
and state dict that has string keys
and state values of `self.full_scorers`
"""
scores = dict()
states = dict()
for k, d in self.full_scorers.items():
scores[k], states[k] = d.batch_score(hyp.yseq, hyp.states[k], x)
return scores, states
def score_partial(
self, hyp: BatchHypothesis, ids: torch.Tensor, x: torch.Tensor
) -> Tuple[Dict[str, torch.Tensor], Dict[str, Any]]:
"""Score new hypothesis by `self.full_scorers`.
Args:
hyp (Hypothesis): Hypothesis with prefix tokens to score
ids (torch.Tensor): 2D tensor of new partial tokens to score
x (torch.Tensor): Corresponding input feature
Returns:
Tuple[Dict[str, torch.Tensor], Dict[str, Any]]: Tuple of
score dict of `hyp` that has string keys of `self.full_scorers`
and tensor score values of shape: `(self.n_vocab,)`,
and state dict that has string keys
and state values of `self.full_scorers`
"""
scores = dict()
states = dict()
for k, d in self.part_scorers.items():
scores[k], states[k] = d.batch_score_partial(
hyp.yseq, ids, hyp.states[k], x
)
return scores, states
def merge_states(self, states: Any, part_states: Any, part_idx: int) -> Any:
"""Merge states for new hypothesis.
Args:
states: states of `self.full_scorers`
part_states: states of `self.part_scorers`
part_idx (int): The new token id for `part_scores`
Returns:
Dict[str, torch.Tensor]: The new score dict.
Its keys are names of `self.full_scorers` and `self.part_scorers`.
Its values are states of the scorers.
"""
new_states = dict()
for k, v in states.items():
new_states[k] = v
for k, v in part_states.items():
new_states[k] = v
return new_states
def search(self, running_hyps: BatchHypothesis, x: torch.Tensor) -> BatchHypothesis:
"""Search new tokens for running hypotheses and encoded speech x.
Args:
running_hyps (BatchHypothesis): Running hypotheses on beam
x (torch.Tensor): Encoded speech feature (T, D)
Returns:
BatchHypothesis: Best sorted hypotheses
"""
n_batch = len(running_hyps)
part_ids = None # no pre-beam
# batch scoring
weighted_scores = torch.zeros(
n_batch, self.n_vocab, dtype=x.dtype, device=x.device
)
scores, states = self.score_full(running_hyps, x.expand(n_batch, *x.shape))
for k in self.full_scorers:
weighted_scores += self.weights[k] * scores[k]
# partial scoring
if self.do_pre_beam:
pre_beam_scores = (
weighted_scores
if self.pre_beam_score_key == "full"
else scores[self.pre_beam_score_key]
)
part_ids = torch.topk(pre_beam_scores, self.pre_beam_size, dim=-1)[1]
# NOTE(takaaki-hori): Unlike BeamSearch, we assume that score_partial returns
# full-size score matrices, which has non-zero scores for part_ids and zeros
# for others.
part_scores, part_states = self.score_partial(running_hyps, part_ids, x)
for k in self.part_scorers:
weighted_scores += self.weights[k] * part_scores[k]
# add previous hyp scores
weighted_scores += running_hyps.score.to(
dtype=x.dtype, device=x.device
).unsqueeze(1)
# TODO(karita): do not use list. use batch instead
# see also https://github.com/espnet/espnet/pull/1402#discussion_r354561029
# update hyps
best_hyps = []
prev_hyps = self.unbatchfy(running_hyps)
for (
full_prev_hyp_id,
full_new_token_id,
part_prev_hyp_id,
part_new_token_id,
) in zip(*self.batch_beam(weighted_scores, part_ids)):
prev_hyp = prev_hyps[full_prev_hyp_id]
best_hyps.append(
Hypothesis(
score=weighted_scores[full_prev_hyp_id, full_new_token_id],
yseq=self.append_token(prev_hyp.yseq, full_new_token_id),
scores=self.merge_scores(
prev_hyp.scores,
{k: v[full_prev_hyp_id] for k, v in scores.items()},
full_new_token_id,
{k: v[part_prev_hyp_id] for k, v in part_scores.items()},
part_new_token_id,
),
states=self.merge_states(
{
k: self.full_scorers[k].select_state(v, full_prev_hyp_id)
for k, v in states.items()
},
{
k: self.part_scorers[k].select_state(
v, part_prev_hyp_id, part_new_token_id
)
for k, v in part_states.items()
},
part_new_token_id,
),
)
)
return self.batchfy(best_hyps)
def post_process(
self,
i: int,
maxlen: int,
maxlenratio: float,
running_hyps: BatchHypothesis,
ended_hyps: List[Hypothesis],
) -> BatchHypothesis:
"""Perform post-processing of beam search iterations.
Args:
i (int): The length of hypothesis tokens.
maxlen (int): The maximum length of tokens in beam search.
maxlenratio (int): The maximum length ratio in beam search.
running_hyps (BatchHypothesis): The running hypotheses in beam search.
ended_hyps (List[Hypothesis]): The ended hypotheses in beam search.
Returns:
BatchHypothesis: The new running hypotheses.
"""
n_batch = running_hyps.yseq.shape[0]
logging.debug(f"the number of running hypothes: {n_batch}")
if self.token_list is not None:
logging.debug(
"best hypo: "
+ "".join(
[
self.token_list[x]
for x in running_hyps.yseq[0, 1 : running_hyps.length[0]]
]
)
)
# add eos in the final loop to avoid that there are no ended hyps
if i == maxlen - 1:
logging.info("adding <eos> in the last position in the loop")
yseq_eos = torch.cat(
(
running_hyps.yseq,
torch.full(
(n_batch, 1),
self.eos,
device=running_hyps.yseq.device,
dtype=torch.int64,
),
),
1,
)
running_hyps.yseq.resize_as_(yseq_eos)
running_hyps.yseq[:] = yseq_eos
running_hyps.length[:] = yseq_eos.shape[1]
# add ended hypotheses to a final list, and removed them from current hypotheses
# (this will be a probmlem, number of hyps < beam)
is_eos = (
running_hyps.yseq[torch.arange(n_batch), running_hyps.length - 1]
== self.eos
)
for b in torch.nonzero(is_eos, as_tuple=False).view(-1):
hyp = self._select(running_hyps, b)
ended_hyps.append(hyp)
remained_ids = torch.nonzero(is_eos == 0, as_tuple=False).view(-1).cpu()
return self._batch_select(running_hyps, remained_ids)
| 13,532 | 37.228814 | 88 | py |
espnet | espnet-master/espnet/nets/beam_search.py | """Beam search module."""
import logging
from itertools import chain
from typing import Any, Dict, List, NamedTuple, Tuple, Union
import torch
from espnet.nets.e2e_asr_common import end_detect
from espnet.nets.scorer_interface import PartialScorerInterface, ScorerInterface
class Hypothesis(NamedTuple):
"""Hypothesis data type."""
yseq: torch.Tensor
score: Union[float, torch.Tensor] = 0
scores: Dict[str, Union[float, torch.Tensor]] = dict()
states: Dict[str, Any] = dict()
def asdict(self) -> dict:
"""Convert data to JSON-friendly dict."""
return self._replace(
yseq=self.yseq.tolist(),
score=float(self.score),
scores={k: float(v) for k, v in self.scores.items()},
)._asdict()
class BeamSearch(torch.nn.Module):
"""Beam search implementation."""
def __init__(
self,
scorers: Dict[str, ScorerInterface],
weights: Dict[str, float],
beam_size: int,
vocab_size: int,
sos: int,
eos: int,
token_list: List[str] = None,
pre_beam_ratio: float = 1.5,
pre_beam_score_key: str = None,
hyp_primer: List[int] = None,
):
"""Initialize beam search.
Args:
scorers (dict[str, ScorerInterface]): Dict of decoder modules
e.g., Decoder, CTCPrefixScorer, LM
The scorer will be ignored if it is `None`
weights (dict[str, float]): Dict of weights for each scorers
The scorer will be ignored if its weight is 0
beam_size (int): The number of hypotheses kept during search
vocab_size (int): The number of vocabulary
sos (int): Start of sequence id
eos (int): End of sequence id
token_list (list[str]): List of tokens for debug log
pre_beam_score_key (str): key of scores to perform pre-beam search
pre_beam_ratio (float): beam size in the pre-beam search
will be `int(pre_beam_ratio * beam_size)`
"""
super().__init__()
# set scorers
self.weights = weights
self.scorers = dict()
self.full_scorers = dict()
self.part_scorers = dict()
# this module dict is required for recursive cast
# `self.to(device, dtype)` in `recog.py`
self.nn_dict = torch.nn.ModuleDict()
for k, v in scorers.items():
w = weights.get(k, 0)
if w == 0 or v is None:
continue
assert isinstance(
v, ScorerInterface
), f"{k} ({type(v)}) does not implement ScorerInterface"
self.scorers[k] = v
if isinstance(v, PartialScorerInterface):
self.part_scorers[k] = v
else:
self.full_scorers[k] = v
if isinstance(v, torch.nn.Module):
self.nn_dict[k] = v
# set configurations
self.sos = sos
self.eos = eos
# added for OpenAI Whisper decoding
self.hyp_primer = hyp_primer
self.token_list = token_list
self.pre_beam_size = int(pre_beam_ratio * beam_size)
self.beam_size = beam_size
self.n_vocab = vocab_size
if (
pre_beam_score_key is not None
and pre_beam_score_key != "full"
and pre_beam_score_key not in self.full_scorers
):
raise KeyError(f"{pre_beam_score_key} is not found in {self.full_scorers}")
self.pre_beam_score_key = pre_beam_score_key
self.do_pre_beam = (
self.pre_beam_score_key is not None
and self.pre_beam_size < self.n_vocab
and len(self.part_scorers) > 0
)
def set_hyp_primer(self, hyp_primer: List[int] = None) -> None:
"""Set the primer sequence for decoding.
Used for OpenAI Whisper models.
"""
self.hyp_primer = hyp_primer
def init_hyp(self, x: torch.Tensor) -> List[Hypothesis]:
"""Get an initial hypothesis data.
Args:
x (torch.Tensor): The encoder output feature
Returns:
Hypothesis: The initial hypothesis.
"""
init_states = dict()
init_scores = dict()
for k, d in self.scorers.items():
init_states[k] = d.init_state(x)
init_scores[k] = 0.0
# NOTE (Shih-Lun): added for OpenAI Whisper ASR
primer = [self.sos] if self.hyp_primer is None else self.hyp_primer
return [
Hypothesis(
score=0.0,
scores=init_scores,
states=init_states,
yseq=torch.tensor(primer, device=x.device),
)
]
@staticmethod
def append_token(xs: torch.Tensor, x: int) -> torch.Tensor:
"""Append new token to prefix tokens.
Args:
xs (torch.Tensor): The prefix token
x (int): The new token to append
Returns:
torch.Tensor: New tensor contains: xs + [x] with xs.dtype and xs.device
"""
x = torch.tensor([x], dtype=xs.dtype, device=xs.device)
return torch.cat((xs, x))
def score_full(
self, hyp: Hypothesis, x: torch.Tensor
) -> Tuple[Dict[str, torch.Tensor], Dict[str, Any]]:
"""Score new hypothesis by `self.full_scorers`.
Args:
hyp (Hypothesis): Hypothesis with prefix tokens to score
x (torch.Tensor): Corresponding input feature
Returns:
Tuple[Dict[str, torch.Tensor], Dict[str, Any]]: Tuple of
score dict of `hyp` that has string keys of `self.full_scorers`
and tensor score values of shape: `(self.n_vocab,)`,
and state dict that has string keys
and state values of `self.full_scorers`
"""
scores = dict()
states = dict()
for k, d in self.full_scorers.items():
scores[k], states[k] = d.score(hyp.yseq, hyp.states[k], x)
return scores, states
def score_partial(
self, hyp: Hypothesis, ids: torch.Tensor, x: torch.Tensor
) -> Tuple[Dict[str, torch.Tensor], Dict[str, Any]]:
"""Score new hypothesis by `self.part_scorers`.
Args:
hyp (Hypothesis): Hypothesis with prefix tokens to score
ids (torch.Tensor): 1D tensor of new partial tokens to score
x (torch.Tensor): Corresponding input feature
Returns:
Tuple[Dict[str, torch.Tensor], Dict[str, Any]]: Tuple of
score dict of `hyp` that has string keys of `self.part_scorers`
and tensor score values of shape: `(len(ids),)`,
and state dict that has string keys
and state values of `self.part_scorers`
"""
scores = dict()
states = dict()
for k, d in self.part_scorers.items():
scores[k], states[k] = d.score_partial(hyp.yseq, ids, hyp.states[k], x)
return scores, states
def beam(
self, weighted_scores: torch.Tensor, ids: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Compute topk full token ids and partial token ids.
Args:
weighted_scores (torch.Tensor): The weighted sum scores for each tokens.
Its shape is `(self.n_vocab,)`.
ids (torch.Tensor): The partial token ids to compute topk
Returns:
Tuple[torch.Tensor, torch.Tensor]:
The topk full token ids and partial token ids.
Their shapes are `(self.beam_size,)`
"""
# no pre beam performed
if weighted_scores.size(0) == ids.size(0):
top_ids = weighted_scores.topk(self.beam_size)[1]
return top_ids, top_ids
# mask pruned in pre-beam not to select in topk
tmp = weighted_scores[ids]
weighted_scores[:] = -float("inf")
weighted_scores[ids] = tmp
top_ids = weighted_scores.topk(self.beam_size)[1]
local_ids = weighted_scores[ids].topk(self.beam_size)[1]
return top_ids, local_ids
@staticmethod
def merge_scores(
prev_scores: Dict[str, float],
next_full_scores: Dict[str, torch.Tensor],
full_idx: int,
next_part_scores: Dict[str, torch.Tensor],
part_idx: int,
) -> Dict[str, torch.Tensor]:
"""Merge scores for new hypothesis.
Args:
prev_scores (Dict[str, float]):
The previous hypothesis scores by `self.scorers`
next_full_scores (Dict[str, torch.Tensor]): scores by `self.full_scorers`
full_idx (int): The next token id for `next_full_scores`
next_part_scores (Dict[str, torch.Tensor]):
scores of partial tokens by `self.part_scorers`
part_idx (int): The new token id for `next_part_scores`
Returns:
Dict[str, torch.Tensor]: The new score dict.
Its keys are names of `self.full_scorers` and `self.part_scorers`.
Its values are scalar tensors by the scorers.
"""
new_scores = dict()
for k, v in next_full_scores.items():
new_scores[k] = prev_scores[k] + v[full_idx]
for k, v in next_part_scores.items():
new_scores[k] = prev_scores[k] + v[part_idx]
return new_scores
def merge_states(self, states: Any, part_states: Any, part_idx: int) -> Any:
"""Merge states for new hypothesis.
Args:
states: states of `self.full_scorers`
part_states: states of `self.part_scorers`
part_idx (int): The new token id for `part_scores`
Returns:
Dict[str, torch.Tensor]: The new score dict.
Its keys are names of `self.full_scorers` and `self.part_scorers`.
Its values are states of the scorers.
"""
new_states = dict()
for k, v in states.items():
new_states[k] = v
for k, d in self.part_scorers.items():
new_states[k] = d.select_state(part_states[k], part_idx)
return new_states
def search(
self, running_hyps: List[Hypothesis], x: torch.Tensor
) -> List[Hypothesis]:
"""Search new tokens for running hypotheses and encoded speech x.
Args:
running_hyps (List[Hypothesis]): Running hypotheses on beam
x (torch.Tensor): Encoded speech feature (T, D)
Returns:
List[Hypotheses]: Best sorted hypotheses
"""
best_hyps = []
part_ids = torch.arange(self.n_vocab, device=x.device) # no pre-beam
for hyp in running_hyps:
# scoring
weighted_scores = torch.zeros(self.n_vocab, dtype=x.dtype, device=x.device)
scores, states = self.score_full(hyp, x)
for k in self.full_scorers:
weighted_scores += self.weights[k] * scores[k]
# partial scoring
if self.do_pre_beam:
pre_beam_scores = (
weighted_scores
if self.pre_beam_score_key == "full"
else scores[self.pre_beam_score_key]
)
part_ids = torch.topk(pre_beam_scores, self.pre_beam_size)[1]
part_scores, part_states = self.score_partial(hyp, part_ids, x)
for k in self.part_scorers:
weighted_scores[part_ids] += self.weights[k] * part_scores[k]
# add previous hyp score
weighted_scores += hyp.score
# update hyps
for j, part_j in zip(*self.beam(weighted_scores, part_ids)):
# will be (2 x beam at most)
best_hyps.append(
Hypothesis(
score=weighted_scores[j],
yseq=self.append_token(hyp.yseq, j),
scores=self.merge_scores(
hyp.scores, scores, j, part_scores, part_j
),
states=self.merge_states(states, part_states, part_j),
)
)
# sort and prune 2 x beam -> beam
best_hyps = sorted(best_hyps, key=lambda x: x.score, reverse=True)[
: min(len(best_hyps), self.beam_size)
]
return best_hyps
def forward(
self, x: torch.Tensor, maxlenratio: float = 0.0, minlenratio: float = 0.0
) -> List[Hypothesis]:
"""Perform beam search.
Args:
x (torch.Tensor): Encoded speech feature (T, D)
maxlenratio (float): Input length ratio to obtain max output length.
If maxlenratio=0.0 (default), it uses a end-detect function
to automatically find maximum hypothesis lengths
If maxlenratio<0.0, its absolute value is interpreted
as a constant max output length.
minlenratio (float): Input length ratio to obtain min output length.
If minlenratio<0.0, its absolute value is interpreted
as a constant min output length.
Returns:
list[Hypothesis]: N-best decoding results
"""
# set length bounds
if maxlenratio == 0:
maxlen = x.shape[0]
elif maxlenratio < 0:
maxlen = -1 * int(maxlenratio)
else:
maxlen = max(1, int(maxlenratio * x.size(0)))
if minlenratio < 0:
minlen = -1 * int(minlenratio)
else:
minlen = int(minlenratio * x.size(0))
logging.info("decoder input length: " + str(x.shape[0]))
logging.info("max output length: " + str(maxlen))
logging.info("min output length: " + str(minlen))
# main loop of prefix search
running_hyps = self.init_hyp(x)
ended_hyps = []
for i in range(maxlen):
logging.debug("position " + str(i))
best = self.search(running_hyps, x)
# post process of one iteration
running_hyps = self.post_process(i, maxlen, maxlenratio, best, ended_hyps)
# end detection
if maxlenratio == 0.0 and end_detect([h.asdict() for h in ended_hyps], i):
logging.info(f"end detected at {i}")
break
if len(running_hyps) == 0:
logging.info("no hypothesis. Finish decoding.")
break
else:
logging.debug(f"remained hypotheses: {len(running_hyps)}")
nbest_hyps = sorted(ended_hyps, key=lambda x: x.score, reverse=True)
# check the number of hypotheses reaching to eos
if len(nbest_hyps) == 0:
logging.warning(
"there is no N-best results, perform recognition "
"again with smaller minlenratio."
)
return (
[]
if minlenratio < 0.1
else self.forward(x, maxlenratio, max(0.0, minlenratio - 0.1))
)
# report the best result
best = nbest_hyps[0]
for k, v in best.scores.items():
logging.info(
f"{v:6.2f} * {self.weights[k]:3} = {v * self.weights[k]:6.2f} for {k}"
)
logging.info(f"total log probability: {best.score:.2f}")
logging.info(f"normalized log probability: {best.score / len(best.yseq):.2f}")
logging.info(f"total number of ended hypotheses: {len(nbest_hyps)}")
if self.token_list is not None:
logging.info(
"best hypo: "
+ "".join([self.token_list[x] for x in best.yseq[1:-1]])
+ "\n"
)
if best.yseq[1:-1].shape[0] == maxlen:
logging.warning(
"best hypo length: {} == max output length: {}".format(
best.yseq[1:-1].shape[0], maxlen
)
)
logging.warning(
"decoding may be stopped by the max output length limitation, "
+ "please consider to increase the maxlenratio."
)
return nbest_hyps
def post_process(
self,
i: int,
maxlen: int,
maxlenratio: float,
running_hyps: List[Hypothesis],
ended_hyps: List[Hypothesis],
) -> List[Hypothesis]:
"""Perform post-processing of beam search iterations.
Args:
i (int): The length of hypothesis tokens.
maxlen (int): The maximum length of tokens in beam search.
maxlenratio (int): The maximum length ratio in beam search.
running_hyps (List[Hypothesis]): The running hypotheses in beam search.
ended_hyps (List[Hypothesis]): The ended hypotheses in beam search.
Returns:
List[Hypothesis]: The new running hypotheses.
"""
logging.debug(f"the number of running hypotheses: {len(running_hyps)}")
if self.token_list is not None:
logging.debug(
"best hypo: "
+ "".join([self.token_list[x] for x in running_hyps[0].yseq[1:]])
)
# add eos in the final loop to avoid that there are no ended hyps
if i == maxlen - 1:
logging.info("adding <eos> in the last position in the loop")
running_hyps = [
h._replace(yseq=self.append_token(h.yseq, self.eos))
for h in running_hyps
]
# add ended hypotheses to a final list, and removed them from current hypotheses
# (this will be a problem, number of hyps < beam)
remained_hyps = []
for hyp in running_hyps:
if hyp.yseq[-1] == self.eos:
# e.g., Word LM needs to add final <eos> score
for k, d in chain(self.full_scorers.items(), self.part_scorers.items()):
s = d.final_score(hyp.states[k])
hyp.scores[k] += s
hyp = hyp._replace(score=hyp.score + self.weights[k] * s)
ended_hyps.append(hyp)
else:
remained_hyps.append(hyp)
return remained_hyps
def beam_search(
x: torch.Tensor,
sos: int,
eos: int,
beam_size: int,
vocab_size: int,
scorers: Dict[str, ScorerInterface],
weights: Dict[str, float],
token_list: List[str] = None,
maxlenratio: float = 0.0,
minlenratio: float = 0.0,
pre_beam_ratio: float = 1.5,
pre_beam_score_key: str = "full",
) -> list:
"""Perform beam search with scorers.
Args:
x (torch.Tensor): Encoded speech feature (T, D)
sos (int): Start of sequence id
eos (int): End of sequence id
beam_size (int): The number of hypotheses kept during search
vocab_size (int): The number of vocabulary
scorers (dict[str, ScorerInterface]): Dict of decoder modules
e.g., Decoder, CTCPrefixScorer, LM
The scorer will be ignored if it is `None`
weights (dict[str, float]): Dict of weights for each scorers
The scorer will be ignored if its weight is 0
token_list (list[str]): List of tokens for debug log
maxlenratio (float): Input length ratio to obtain max output length.
If maxlenratio=0.0 (default), it uses a end-detect function
to automatically find maximum hypothesis lengths
minlenratio (float): Input length ratio to obtain min output length.
pre_beam_score_key (str): key of scores to perform pre-beam search
pre_beam_ratio (float): beam size in the pre-beam search
will be `int(pre_beam_ratio * beam_size)`
Returns:
list: N-best decoding results
"""
ret = BeamSearch(
scorers,
weights,
beam_size=beam_size,
vocab_size=vocab_size,
pre_beam_ratio=pre_beam_ratio,
pre_beam_score_key=pre_beam_score_key,
sos=sos,
eos=eos,
token_list=token_list,
).forward(x=x, maxlenratio=maxlenratio, minlenratio=minlenratio)
return [h.asdict() for h in ret]
| 20,199 | 36.269373 | 88 | py |
espnet | espnet-master/espnet/nets/e2e_mt_common.py | #!/usr/bin/env python3
# encoding: utf-8
# Copyright 2019 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Common functions for ST and MT."""
import nltk
import numpy as np
class ErrorCalculator(object):
"""Calculate BLEU for ST and MT models during training.
:param y_hats: numpy array with predicted text
:param y_pads: numpy array with true (target) text
:param char_list: vocabulary list
:param sym_space: space symbol
:param sym_pad: pad symbol
:param report_bleu: report BLUE score if True
"""
def __init__(self, char_list, sym_space, sym_pad, report_bleu=False):
"""Construct an ErrorCalculator object."""
super(ErrorCalculator, self).__init__()
self.char_list = char_list
self.space = sym_space
self.pad = sym_pad
self.report_bleu = report_bleu
if self.space in self.char_list:
self.idx_space = self.char_list.index(self.space)
else:
self.idx_space = None
def __call__(self, ys_hat, ys_pad):
"""Calculate corpus-level BLEU score.
:param torch.Tensor ys_hat: prediction (batch, seqlen)
:param torch.Tensor ys_pad: reference (batch, seqlen)
:return: corpus-level BLEU score in a mini-batch
:rtype float
"""
bleu = None
if not self.report_bleu:
return bleu
bleu = self.calculate_corpus_bleu(ys_hat, ys_pad)
return bleu
def calculate_corpus_bleu(self, ys_hat, ys_pad):
"""Calculate corpus-level BLEU score in a mini-batch.
:param torch.Tensor seqs_hat: prediction (batch, seqlen)
:param torch.Tensor seqs_true: reference (batch, seqlen)
:return: corpus-level BLEU score
:rtype float
"""
seqs_hat, seqs_true = [], []
for i, y_hat in enumerate(ys_hat):
y_true = ys_pad[i]
eos_true = np.where(y_true == -1)[0]
ymax = eos_true[0] if len(eos_true) > 0 else len(y_true)
# NOTE: padding index (-1) in y_true is used to pad y_hat
# because y_hats is not padded with -1
seq_hat = [self.char_list[int(idx)] for idx in y_hat[:ymax]]
seq_true = [self.char_list[int(idx)] for idx in y_true if int(idx) != -1]
seq_hat_text = "".join(seq_hat).replace(self.space, " ")
seq_hat_text = seq_hat_text.replace(self.pad, "")
seq_true_text = "".join(seq_true).replace(self.space, " ")
seqs_hat.append(seq_hat_text)
seqs_true.append(seq_true_text)
bleu = nltk.bleu_score.corpus_bleu([[ref] for ref in seqs_true], seqs_hat)
return bleu * 100
| 2,731 | 35.426667 | 85 | py |
espnet | espnet-master/espnet/nets/lm_interface.py | """Language model interface."""
import argparse
from espnet.nets.scorer_interface import ScorerInterface
from espnet.utils.dynamic_import import dynamic_import
from espnet.utils.fill_missing_args import fill_missing_args
class LMInterface(ScorerInterface):
"""LM Interface for ESPnet model implementation."""
@staticmethod
def add_arguments(parser):
"""Add arguments to command line argument parser."""
return parser
@classmethod
def build(cls, n_vocab: int, **kwargs):
"""Initialize this class with python-level args.
Args:
idim (int): The number of vocabulary.
Returns:
LMinterface: A new instance of LMInterface.
"""
# local import to avoid cyclic import in lm_train
from espnet.bin.lm_train import get_parser
def wrap(parser):
return get_parser(parser, required=False)
args = argparse.Namespace(**kwargs)
args = fill_missing_args(args, wrap)
args = fill_missing_args(args, cls.add_arguments)
return cls(n_vocab, args)
def forward(self, x, t):
"""Compute LM loss value from buffer sequences.
Args:
x (torch.Tensor): Input ids. (batch, len)
t (torch.Tensor): Target ids. (batch, len)
Returns:
tuple[torch.Tensor, torch.Tensor, torch.Tensor]: Tuple of
loss to backward (scalar),
negative log-likelihood of t: -log p(t) (scalar) and
the number of elements in x (scalar)
Notes:
The last two return values are used
in perplexity: p(t)^{-n} = exp(-log p(t) / n)
"""
raise NotImplementedError("forward method is not implemented")
predefined_lms = {
"pytorch": {
"default": "espnet.nets.pytorch_backend.lm.default:DefaultRNNLM",
"seq_rnn": "espnet.nets.pytorch_backend.lm.seq_rnn:SequentialRNNLM",
"transformer": "espnet.nets.pytorch_backend.lm.transformer:TransformerLM",
},
"chainer": {"default": "espnet.lm.chainer_backend.lm:DefaultRNNLM"},
}
def dynamic_import_lm(module, backend):
"""Import LM class dynamically.
Args:
module (str): module_name:class_name or alias in `predefined_lms`
backend (str): NN backend. e.g., pytorch, chainer
Returns:
type: LM class
"""
model_class = dynamic_import(module, predefined_lms.get(backend, dict()))
assert issubclass(
model_class, LMInterface
), f"{module} does not implement LMInterface"
return model_class
| 2,590 | 28.781609 | 82 | py |
espnet | espnet-master/espnet/nets/scorer_interface.py | """Scorer interface module."""
import warnings
from typing import Any, List, Tuple
import torch
class ScorerInterface:
"""Scorer interface for beam search.
The scorer performs scoring of the all tokens in vocabulary.
Examples:
* Search heuristics
* :class:`espnet.nets.scorers.length_bonus.LengthBonus`
* Decoder networks of the sequence-to-sequence models
* :class:`espnet.nets.pytorch_backend.nets.transformer.decoder.Decoder`
* :class:`espnet.nets.pytorch_backend.nets.rnn.decoders.Decoder`
* Neural language models
* :class:`espnet.nets.pytorch_backend.lm.transformer.TransformerLM`
* :class:`espnet.nets.pytorch_backend.lm.default.DefaultRNNLM`
* :class:`espnet.nets.pytorch_backend.lm.seq_rnn.SequentialRNNLM`
"""
def init_state(self, x: torch.Tensor) -> Any:
"""Get an initial state for decoding (optional).
Args:
x (torch.Tensor): The encoded feature tensor
Returns: initial state
"""
return None
def select_state(self, state: Any, i: int, new_id: int = None) -> Any:
"""Select state with relative ids in the main beam search.
Args:
state: Decoder state for prefix tokens
i (int): Index to select a state in the main beam search
new_id (int): New label index to select a state if necessary
Returns:
state: pruned state
"""
return None if state is None else state[i]
def score(
self, y: torch.Tensor, state: Any, x: torch.Tensor
) -> Tuple[torch.Tensor, Any]:
"""Score new token (required).
Args:
y (torch.Tensor): 1D torch.int64 prefix tokens.
state: Scorer state for prefix tokens
x (torch.Tensor): The encoder feature that generates ys.
Returns:
tuple[torch.Tensor, Any]: Tuple of
scores for next token that has a shape of `(n_vocab)`
and next state for ys
"""
raise NotImplementedError
def final_score(self, state: Any) -> float:
"""Score eos (optional).
Args:
state: Scorer state for prefix tokens
Returns:
float: final score
"""
return 0.0
class BatchScorerInterface(ScorerInterface):
"""Batch scorer interface."""
def batch_init_state(self, x: torch.Tensor) -> Any:
"""Get an initial state for decoding (optional).
Args:
x (torch.Tensor): The encoded feature tensor
Returns: initial state
"""
return self.init_state(x)
def batch_score(
self, ys: torch.Tensor, states: List[Any], xs: torch.Tensor
) -> Tuple[torch.Tensor, List[Any]]:
"""Score new token batch (required).
Args:
ys (torch.Tensor): torch.int64 prefix tokens (n_batch, ylen).
states (List[Any]): Scorer states for prefix tokens.
xs (torch.Tensor):
The encoder feature that generates ys (n_batch, xlen, n_feat).
Returns:
tuple[torch.Tensor, List[Any]]: Tuple of
batchfied scores for next token with shape of `(n_batch, n_vocab)`
and next state list for ys.
"""
warnings.warn(
"{} batch score is implemented through for loop not parallelized".format(
self.__class__.__name__
)
)
scores = list()
outstates = list()
for i, (y, state, x) in enumerate(zip(ys, states, xs)):
score, outstate = self.score(y, state, x)
outstates.append(outstate)
scores.append(score)
scores = torch.cat(scores, 0).view(ys.shape[0], -1)
return scores, outstates
class PartialScorerInterface(ScorerInterface):
"""Partial scorer interface for beam search.
The partial scorer performs scoring when non-partial scorer finished scoring,
and receives pre-pruned next tokens to score because it is too heavy to score
all the tokens.
Examples:
* Prefix search for connectionist-temporal-classification models
* :class:`espnet.nets.scorers.ctc.CTCPrefixScorer`
"""
def score_partial(
self, y: torch.Tensor, next_tokens: torch.Tensor, state: Any, x: torch.Tensor
) -> Tuple[torch.Tensor, Any]:
"""Score new token (required).
Args:
y (torch.Tensor): 1D prefix token
next_tokens (torch.Tensor): torch.int64 next token to score
state: decoder state for prefix tokens
x (torch.Tensor): The encoder feature that generates ys
Returns:
tuple[torch.Tensor, Any]:
Tuple of a score tensor for y that has a shape `(len(next_tokens),)`
and next state for ys
"""
raise NotImplementedError
class BatchPartialScorerInterface(BatchScorerInterface, PartialScorerInterface):
"""Batch partial scorer interface for beam search."""
def batch_score_partial(
self,
ys: torch.Tensor,
next_tokens: torch.Tensor,
states: List[Any],
xs: torch.Tensor,
) -> Tuple[torch.Tensor, Any]:
"""Score new token (required).
Args:
ys (torch.Tensor): torch.int64 prefix tokens (n_batch, ylen).
next_tokens (torch.Tensor): torch.int64 tokens to score (n_batch, n_token).
states (List[Any]): Scorer states for prefix tokens.
xs (torch.Tensor):
The encoder feature that generates ys (n_batch, xlen, n_feat).
Returns:
tuple[torch.Tensor, Any]:
Tuple of a score tensor for ys that has a shape `(n_batch, n_vocab)`
and next states for ys
"""
raise NotImplementedError
| 5,902 | 30.566845 | 87 | py |
espnet | espnet-master/espnet/nets/asr_interface.py | """ASR Interface module."""
import argparse
from espnet.bin.asr_train import get_parser
from espnet.utils.dynamic_import import dynamic_import
from espnet.utils.fill_missing_args import fill_missing_args
class ASRInterface:
"""ASR Interface for ESPnet model implementation."""
@staticmethod
def add_arguments(parser):
"""Add arguments to parser."""
return parser
@classmethod
def build(cls, idim: int, odim: int, **kwargs):
"""Initialize this class with python-level args.
Args:
idim (int): The number of an input feature dim.
odim (int): The number of output vocab.
Returns:
ASRinterface: A new instance of ASRInterface.
"""
def wrap(parser):
return get_parser(parser, required=False)
args = argparse.Namespace(**kwargs)
args = fill_missing_args(args, wrap)
args = fill_missing_args(args, cls.add_arguments)
return cls(idim, odim, args)
def forward(self, xs, ilens, ys):
"""Compute loss for training.
:param xs:
For pytorch, batch of padded source sequences torch.Tensor (B, Tmax, idim)
For chainer, list of source sequences chainer.Variable
:param ilens: batch of lengths of source sequences (B)
For pytorch, torch.Tensor
For chainer, list of int
:param ys:
For pytorch, batch of padded source sequences torch.Tensor (B, Lmax)
For chainer, list of source sequences chainer.Variable
:return: loss value
:rtype: torch.Tensor for pytorch, chainer.Variable for chainer
"""
raise NotImplementedError("forward method is not implemented")
def recognize(self, x, recog_args, char_list=None, rnnlm=None):
"""Recognize x for evaluation.
:param ndarray x: input acouctic feature (B, T, D) or (T, D)
:param namespace recog_args: argment namespace contraining options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
raise NotImplementedError("recognize method is not implemented")
def recognize_batch(self, x, recog_args, char_list=None, rnnlm=None):
"""Beam search implementation for batch.
:param torch.Tensor x: encoder hidden state sequences (B, Tmax, Henc)
:param namespace recog_args: argument namespace containing options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
raise NotImplementedError("Batch decoding is not supported yet.")
def calculate_all_attentions(self, xs, ilens, ys):
"""Calculate attention.
:param list xs: list of padded input sequences [(T1, idim), (T2, idim), ...]
:param ndarray ilens: batch of lengths of input sequences (B)
:param list ys: list of character id sequence tensor [(L1), (L2), (L3), ...]
:return: attention weights (B, Lmax, Tmax)
:rtype: float ndarray
"""
raise NotImplementedError("calculate_all_attentions method is not implemented")
def calculate_all_ctc_probs(self, xs, ilens, ys):
"""Calculate CTC probability.
:param list xs_pad: list of padded input sequences [(T1, idim), (T2, idim), ...]
:param ndarray ilens: batch of lengths of input sequences (B)
:param list ys: list of character id sequence tensor [(L1), (L2), (L3), ...]
:return: CTC probabilities (B, Tmax, vocab)
:rtype: float ndarray
"""
raise NotImplementedError("calculate_all_ctc_probs method is not implemented")
@property
def attention_plot_class(self):
"""Get attention plot class."""
from espnet.asr.asr_utils import PlotAttentionReport
return PlotAttentionReport
@property
def ctc_plot_class(self):
"""Get CTC plot class."""
from espnet.asr.asr_utils import PlotCTCReport
return PlotCTCReport
def get_total_subsampling_factor(self):
"""Get total subsampling factor."""
raise NotImplementedError(
"get_total_subsampling_factor method is not implemented"
)
def encode(self, feat):
"""Encode feature in `beam_search` (optional).
Args:
x (numpy.ndarray): input feature (T, D)
Returns:
torch.Tensor for pytorch, chainer.Variable for chainer:
encoded feature (T, D)
"""
raise NotImplementedError("encode method is not implemented")
def scorers(self):
"""Get scorers for `beam_search` (optional).
Returns:
dict[str, ScorerInterface]: dict of `ScorerInterface` objects
"""
raise NotImplementedError("decoders method is not implemented")
predefined_asr = {
"pytorch": {
"rnn": "espnet.nets.pytorch_backend.e2e_asr:E2E",
"transducer": "espnet.nets.pytorch_backend.e2e_asr_transducer:E2E",
"transformer": "espnet.nets.pytorch_backend.e2e_asr_transformer:E2E",
"conformer": "espnet.nets.pytorch_backend.e2e_asr_conformer:E2E",
},
"chainer": {
"rnn": "espnet.nets.chainer_backend.e2e_asr:E2E",
"transformer": "espnet.nets.chainer_backend.e2e_asr_transformer:E2E",
},
}
def dynamic_import_asr(module, backend):
"""Import ASR models dynamically.
Args:
module (str): module_name:class_name or alias in `predefined_asr`
backend (str): NN backend. e.g., pytorch, chainer
Returns:
type: ASR class
"""
model_class = dynamic_import(module, predefined_asr.get(backend, dict()))
assert issubclass(
model_class, ASRInterface
), f"{module} does not implement ASRInterface"
return model_class
| 5,957 | 33.439306 | 88 | py |
espnet | espnet-master/espnet/nets/batch_beam_search_online.py | """Parallel beam search module for online simulation."""
import logging
from typing import Any # noqa: H301
from typing import Dict # noqa: H301
from typing import List # noqa: H301
from typing import Tuple # noqa: H301
import torch
from espnet.nets.batch_beam_search import BatchBeamSearch # noqa: H301
from espnet.nets.batch_beam_search import BatchHypothesis # noqa: H301
from espnet.nets.beam_search import Hypothesis
from espnet.nets.e2e_asr_common import end_detect
class BatchBeamSearchOnline(BatchBeamSearch):
"""Online beam search implementation.
This simulates streaming decoding.
It requires encoded features of entire utterance and
extracts block by block from it as it shoud be done
in streaming processing.
This is based on Tsunoo et al, "STREAMING TRANSFORMER ASR
WITH BLOCKWISE SYNCHRONOUS BEAM SEARCH"
(https://arxiv.org/abs/2006.14941).
"""
def __init__(
self,
*args,
block_size=40,
hop_size=16,
look_ahead=16,
disable_repetition_detection=False,
encoded_feat_length_limit=0,
decoder_text_length_limit=0,
**kwargs,
):
"""Initialize beam search."""
super().__init__(*args, **kwargs)
self.block_size = block_size
self.hop_size = hop_size
self.look_ahead = look_ahead
self.disable_repetition_detection = disable_repetition_detection
self.encoded_feat_length_limit = encoded_feat_length_limit
self.decoder_text_length_limit = decoder_text_length_limit
self.reset()
def reset(self):
"""Reset parameters."""
self.encbuffer = None
self.running_hyps = None
self.prev_hyps = []
self.ended_hyps = []
self.processed_block = 0
self.process_idx = 0
self.prev_output = None
def score_full(
self, hyp: BatchHypothesis, x: torch.Tensor
) -> Tuple[Dict[str, torch.Tensor], Dict[str, Any]]:
"""Score new hypothesis by `self.full_scorers`.
Args:
hyp (Hypothesis): Hypothesis with prefix tokens to score
x (torch.Tensor): Corresponding input feature
Returns:
Tuple[Dict[str, torch.Tensor], Dict[str, Any]]: Tuple of
score dict of `hyp` that has string keys of `self.full_scorers`
and tensor score values of shape: `(self.n_vocab,)`,
and state dict that has string keys
and state values of `self.full_scorers`
"""
scores = dict()
states = dict()
for k, d in self.full_scorers.items():
if (
self.decoder_text_length_limit > 0
and len(hyp.yseq) > 0
and len(hyp.yseq[0]) > self.decoder_text_length_limit
):
temp_yseq = hyp.yseq.narrow(
1, -self.decoder_text_length_limit, self.decoder_text_length_limit
).clone()
temp_yseq[:, 0] = self.sos
self.running_hyps.states["decoder"] = [
None for _ in self.running_hyps.states["decoder"]
]
scores[k], states[k] = d.batch_score(temp_yseq, hyp.states[k], x)
else:
scores[k], states[k] = d.batch_score(hyp.yseq, hyp.states[k], x)
return scores, states
def forward(
self,
x: torch.Tensor,
maxlenratio: float = 0.0,
minlenratio: float = 0.0,
is_final: bool = True,
) -> List[Hypothesis]:
"""Perform beam search.
Args:
x (torch.Tensor): Encoded speech feature (T, D)
maxlenratio (float): Input length ratio to obtain max output length.
If maxlenratio=0.0 (default), it uses a end-detect function
to automatically find maximum hypothesis lengths
minlenratio (float): Input length ratio to obtain min output length.
Returns:
list[Hypothesis]: N-best decoding results
"""
if self.encbuffer is None:
self.encbuffer = x
else:
self.encbuffer = torch.cat([self.encbuffer, x], axis=0)
x = self.encbuffer
# set length bounds
if maxlenratio == 0:
maxlen = x.shape[0]
else:
maxlen = max(1, int(maxlenratio * x.size(0)))
ret = None
while True:
cur_end_frame = (
self.block_size - self.look_ahead + self.hop_size * self.processed_block
)
if cur_end_frame < x.shape[0]:
h = x.narrow(0, 0, cur_end_frame)
block_is_final = False
else:
if is_final:
h = x
block_is_final = True
else:
break
logging.debug("Start processing block: %d", self.processed_block)
logging.debug(
" Feature length: {}, current position: {}".format(
h.shape[0], self.process_idx
)
)
if (
self.encoded_feat_length_limit > 0
and h.shape[0] > self.encoded_feat_length_limit
):
h = h.narrow(
0,
h.shape[0] - self.encoded_feat_length_limit,
self.encoded_feat_length_limit,
)
if self.running_hyps is None:
self.running_hyps = self.init_hyp(h)
ret = self.process_one_block(h, block_is_final, maxlen, maxlenratio)
logging.debug("Finished processing block: %d", self.processed_block)
self.processed_block += 1
if block_is_final:
return ret
if ret is None:
if self.prev_output is None:
return []
else:
return self.prev_output
else:
self.prev_output = ret
# N-best results
return ret
def process_one_block(self, h, is_final, maxlen, maxlenratio):
"""Recognize one block."""
# extend states for ctc
self.extend(h, self.running_hyps)
while self.process_idx < maxlen:
logging.debug("position " + str(self.process_idx))
best = self.search(self.running_hyps, h)
if self.process_idx == maxlen - 1:
# end decoding
self.running_hyps = self.post_process(
self.process_idx, maxlen, maxlenratio, best, self.ended_hyps
)
n_batch = best.yseq.shape[0]
local_ended_hyps = []
is_local_eos = best.yseq[torch.arange(n_batch), best.length - 1] == self.eos
prev_repeat = False
for i in range(is_local_eos.shape[0]):
if is_local_eos[i]:
hyp = self._select(best, i)
local_ended_hyps.append(hyp)
# NOTE(tsunoo): check repetitions here
# This is a implicit implementation of
# Eq (11) in https://arxiv.org/abs/2006.14941
# A flag prev_repeat is used instead of using set
# NOTE(fujihara): I made it possible to turned off
# the below lines using disable_repetition_detection flag,
# because this criteria is too sensitive that the beam
# search starts only after the entire inputs are available.
# Empirically, this flag didn't affect the performance.
elif (
not self.disable_repetition_detection
and not prev_repeat
and best.yseq[i, -1] in best.yseq[i, :-1]
and not is_final
):
prev_repeat = True
if prev_repeat:
logging.info("Detected repetition.")
break
if (
is_final
and maxlenratio == 0.0
and end_detect(
[lh.asdict() for lh in self.ended_hyps], self.process_idx
)
):
logging.info(f"end detected at {self.process_idx}")
return self.assemble_hyps(self.ended_hyps)
if len(local_ended_hyps) > 0 and not is_final:
logging.info("Detected hyp(s) reaching EOS in this block.")
break
self.prev_hyps = self.running_hyps
self.running_hyps = self.post_process(
self.process_idx, maxlen, maxlenratio, best, self.ended_hyps
)
if is_final:
for hyp in local_ended_hyps:
self.ended_hyps.append(hyp)
if len(self.running_hyps) == 0:
logging.info("no hypothesis. Finish decoding.")
return self.assemble_hyps(self.ended_hyps)
else:
logging.debug(f"remained hypotheses: {len(self.running_hyps)}")
# increment number
self.process_idx += 1
if is_final:
return self.assemble_hyps(self.ended_hyps)
else:
for hyp in self.ended_hyps:
local_ended_hyps.append(hyp)
rets = self.assemble_hyps(local_ended_hyps)
if self.process_idx > 1 and len(self.prev_hyps) > 0:
self.running_hyps = self.prev_hyps
self.process_idx -= 1
self.prev_hyps = []
# N-best results
return rets
def assemble_hyps(self, ended_hyps):
"""Assemble the hypotheses."""
nbest_hyps = sorted(ended_hyps, key=lambda x: x.score, reverse=True)
# check the number of hypotheses reaching to eos
if len(nbest_hyps) == 0:
logging.warning(
"there is no N-best results, perform recognition "
"again with smaller minlenratio."
)
return []
# report the best result
best = nbest_hyps[0]
for k, v in best.scores.items():
logging.info(
f"{v:6.2f} * {self.weights[k]:3} = {v * self.weights[k]:6.2f} for {k}"
)
logging.info(f"total log probability: {best.score:.2f}")
logging.info(f"normalized log probability: {best.score / len(best.yseq):.2f}")
logging.info(f"total number of ended hypotheses: {len(nbest_hyps)}")
if self.token_list is not None:
logging.info(
"best hypo: "
+ "".join([self.token_list[x] for x in best.yseq[1:-1]])
+ "\n"
)
return nbest_hyps
def extend(self, x: torch.Tensor, hyps: Hypothesis) -> List[Hypothesis]:
"""Extend probabilities and states with more encoded chunks.
Args:
x (torch.Tensor): The extended encoder output feature
hyps (Hypothesis): Current list of hypothesis
Returns:
Hypothesis: The extended hypothesis
"""
for k, d in self.scorers.items():
if hasattr(d, "extend_prob"):
d.extend_prob(x)
if hasattr(d, "extend_state"):
hyps.states[k] = d.extend_state(hyps.states[k])
| 11,320 | 35.519355 | 88 | py |
espnet | espnet-master/espnet/nets/mt_interface.py | """MT Interface module."""
import argparse
from espnet.bin.asr_train import get_parser
from espnet.utils.fill_missing_args import fill_missing_args
class MTInterface:
"""MT Interface for ESPnet model implementation."""
@staticmethod
def add_arguments(parser):
"""Add arguments to parser."""
return parser
@classmethod
def build(cls, idim: int, odim: int, **kwargs):
"""Initialize this class with python-level args.
Args:
idim (int): The number of an input feature dim.
odim (int): The number of output vocab.
Returns:
ASRinterface: A new instance of ASRInterface.
"""
def wrap(parser):
return get_parser(parser, required=False)
args = argparse.Namespace(**kwargs)
args = fill_missing_args(args, wrap)
args = fill_missing_args(args, cls.add_arguments)
return cls(idim, odim, args)
def forward(self, xs, ilens, ys):
"""Compute loss for training.
:param xs:
For pytorch, batch of padded source sequences torch.Tensor (B, Tmax, idim)
For chainer, list of source sequences chainer.Variable
:param ilens: batch of lengths of source sequences (B)
For pytorch, torch.Tensor
For chainer, list of int
:param ys:
For pytorch, batch of padded source sequences torch.Tensor (B, Lmax)
For chainer, list of source sequences chainer.Variable
:return: loss value
:rtype: torch.Tensor for pytorch, chainer.Variable for chainer
"""
raise NotImplementedError("forward method is not implemented")
def translate(self, x, trans_args, char_list=None, rnnlm=None):
"""Translate x for evaluation.
:param ndarray x: input acouctic feature (B, T, D) or (T, D)
:param namespace trans_args: argment namespace contraining options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
raise NotImplementedError("translate method is not implemented")
def translate_batch(self, x, trans_args, char_list=None, rnnlm=None):
"""Beam search implementation for batch.
:param torch.Tensor x: encoder hidden state sequences (B, Tmax, Henc)
:param namespace trans_args: argument namespace containing options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
raise NotImplementedError("Batch decoding is not supported yet.")
def calculate_all_attentions(self, xs, ilens, ys):
"""Calculate attention.
:param list xs: list of padded input sequences [(T1, idim), (T2, idim), ...]
:param ndarray ilens: batch of lengths of input sequences (B)
:param list ys: list of character id sequence tensor [(L1), (L2), (L3), ...]
:return: attention weights (B, Lmax, Tmax)
:rtype: float ndarray
"""
raise NotImplementedError("calculate_all_attentions method is not implemented")
@property
def attention_plot_class(self):
"""Get attention plot class."""
from espnet.asr.asr_utils import PlotAttentionReport
return PlotAttentionReport
| 3,428 | 35.094737 | 87 | py |
espnet | espnet-master/espnet/nets/tts_interface.py | # -*- coding: utf-8 -*-
# Copyright 2018 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""TTS Interface realted modules."""
from espnet.asr.asr_utils import torch_load
try:
import chainer
except ImportError:
Reporter = None
else:
class Reporter(chainer.Chain):
"""Reporter module."""
def report(self, dicts):
"""Report values from a given dict."""
for d in dicts:
chainer.reporter.report(d, self)
class TTSInterface(object):
"""TTS Interface for ESPnet model implementation."""
@staticmethod
def add_arguments(parser):
"""Add model specific argments to parser."""
return parser
def __init__(self):
"""Initilize TTS module."""
self.reporter = Reporter()
def forward(self, *args, **kwargs):
"""Calculate TTS forward propagation.
Returns:
Tensor: Loss value.
"""
raise NotImplementedError("forward method is not implemented")
def inference(self, *args, **kwargs):
"""Generate the sequence of features given the sequences of characters.
Returns:
Tensor: The sequence of generated features (L, odim).
Tensor: The sequence of stop probabilities (L,).
Tensor: The sequence of attention weights (L, T).
"""
raise NotImplementedError("inference method is not implemented")
def calculate_all_attentions(self, *args, **kwargs):
"""Calculate TTS attention weights.
Args:
Tensor: Batch of attention weights (B, Lmax, Tmax).
"""
raise NotImplementedError("calculate_all_attentions method is not implemented")
def load_pretrained_model(self, model_path):
"""Load pretrained model parameters."""
torch_load(model_path, self)
@property
def attention_plot_class(self):
"""Plot attention weights."""
from espnet.asr.asr_utils import PlotAttentionReport
return PlotAttentionReport
@property
def base_plot_keys(self):
"""Return base key names to plot during training.
The keys should match what `chainer.reporter` reports.
if you add the key `loss`,
the reporter will report `main/loss` and `validation/main/loss` values.
also `loss.png` will be created as a figure visulizing `main/loss`
and `validation/main/loss` values.
Returns:
list[str]: Base keys to plot during training.
"""
return ["loss"]
| 2,582 | 27.076087 | 87 | py |
espnet | espnet-master/espnet/nets/beam_search_timesync.py | """
Time Synchronous One-Pass Beam Search.
Implements joint CTC/attention decoding where
hypotheses are expanded along the time (input) axis,
as described in https://arxiv.org/abs/2210.05200.
Supports CPU and GPU inference.
References: https://arxiv.org/abs/1408.2873 for CTC beam search
Author: Brian Yan
"""
import logging
from collections import defaultdict
from dataclasses import dataclass
from typing import Any, Dict, List, Tuple
import numpy as np
import torch
from espnet.nets.beam_search import Hypothesis
from espnet.nets.scorer_interface import ScorerInterface
@dataclass
class CacheItem:
"""For caching attentional decoder and LM states."""
state: Any
scores: Any
log_sum: float
class BeamSearchTimeSync(torch.nn.Module):
"""Time synchronous beam search algorithm."""
def __init__(
self,
sos: int,
beam_size: int,
scorers: Dict[str, ScorerInterface],
weights: Dict[str, float],
token_list=dict,
pre_beam_ratio: float = 1.5,
blank: int = 0,
force_lid: bool = False,
temp: float = 1.0,
):
"""Initialize beam search.
Args:
beam_size: num hyps
sos: sos index
ctc: CTC module
pre_beam_ratio: pre_beam_ratio * beam_size = pre_beam
pre_beam is used to select candidates from vocab to extend hypotheses
decoder: decoder ScorerInterface
ctc_weight: ctc_weight
blank: blank index
"""
super().__init__()
self.ctc = scorers["ctc"]
self.decoder = scorers["decoder"]
self.lm = scorers["lm"] if "lm" in scorers else None
self.beam_size = beam_size
self.pre_beam_size = int(pre_beam_ratio * beam_size)
self.ctc_weight = weights["ctc"]
self.lm_weight = weights["lm"]
self.decoder_weight = weights["decoder"]
self.penalty = weights["length_bonus"]
self.sos = sos
self.sos_th = torch.tensor([self.sos])
self.blank = blank
self.attn_cache = dict() # cache for p_attn(Y|X)
self.lm_cache = dict() # cache for p_lm(Y)
self.enc_output = None # log p_ctc(Z|X)
self.force_lid = force_lid
self.temp = temp
self.token_list = token_list
def reset(self, enc_output: torch.Tensor):
"""Reset object for a new utterance."""
self.attn_cache = dict()
self.lm_cache = dict()
self.enc_output = enc_output
self.sos_th = self.sos_th.to(enc_output.device)
if self.decoder is not None:
init_decoder_state = self.decoder.init_state(enc_output)
decoder_scores, decoder_state = self.decoder.score(
self.sos_th, init_decoder_state, enc_output
)
self.attn_cache[(self.sos,)] = CacheItem(
state=decoder_state,
scores=decoder_scores,
log_sum=0.0,
)
if self.lm is not None:
init_lm_state = self.lm.init_state(enc_output)
lm_scores, lm_state = self.lm.score(self.sos_th, init_lm_state, enc_output)
self.lm_cache[(self.sos,)] = CacheItem(
state=lm_state,
scores=lm_scores,
log_sum=0.0,
)
def cached_score(self, h: Tuple[int], cache: dict, scorer: ScorerInterface) -> Any:
"""Retrieve decoder/LM scores which may be cached."""
root = h[:-1] # prefix
if root in cache:
root_scores = cache[root].scores
root_state = cache[root].state
root_log_sum = cache[root].log_sum
else: # run decoder fwd one step and update cache
root_root = root[:-1]
root_root_state = cache[root_root].state
root_scores, root_state = scorer.score(
torch.tensor(root, device=self.enc_output.device).long(),
root_root_state,
self.enc_output,
)
root_log_sum = cache[root_root].log_sum + float(
cache[root_root].scores[root[-1]]
)
cache[root] = CacheItem(
state=root_state, scores=root_scores, log_sum=root_log_sum
)
cand_score = float(root_scores[h[-1]])
score = root_log_sum + cand_score
return score
def joint_score(self, hyps: Any, ctc_score_dp: Any) -> Any:
"""Calculate joint score for hyps."""
scores = dict()
for h in hyps:
score = self.ctc_weight * np.logaddexp(*ctc_score_dp[h]) # ctc score
if len(h) > 1 and self.decoder_weight > 0 and self.decoder is not None:
score += (
self.cached_score(h, self.attn_cache, self.decoder)
* self.decoder_weight
) # attn score
if len(h) > 1 and self.lm is not None and self.lm_weight > 0:
score += (
self.cached_score(h, self.lm_cache, self.lm) * self.lm_weight
) # lm score
score += self.penalty * (len(h) - 1) # penalty score
scores[h] = score
return scores
def time_step(self, p_ctc: Any, ctc_score_dp: Any, hyps: Any) -> Any:
"""Execute a single time step."""
pre_beam_threshold = np.sort(p_ctc)[-self.pre_beam_size]
cands = set(np.where(p_ctc >= pre_beam_threshold)[0])
if len(cands) == 0:
cands = {np.argmax(p_ctc)}
new_hyps = set()
ctc_score_dp_next = defaultdict(
lambda: (float("-inf"), float("-inf"))
) # (p_nb, p_b)
tmp = []
for hyp_l in hyps:
p_prev_l = np.logaddexp(*ctc_score_dp[hyp_l])
for c in cands:
if c == self.blank:
logging.debug("blank cand, hypothesis is " + str(hyp_l))
p_nb, p_b = ctc_score_dp_next[hyp_l]
p_b = np.logaddexp(p_b, p_ctc[c] + p_prev_l)
ctc_score_dp_next[hyp_l] = (p_nb, p_b)
new_hyps.add(hyp_l)
else:
l_plus = hyp_l + (int(c),)
logging.debug("non-blank cand, hypothesis is " + str(l_plus))
p_nb, p_b = ctc_score_dp_next[l_plus]
if c == hyp_l[-1]:
logging.debug("repeat cand, hypothesis is " + str(hyp_l))
p_nb_prev, p_b_prev = ctc_score_dp[hyp_l]
p_nb = np.logaddexp(p_nb, p_ctc[c] + p_b_prev)
p_nb_l, p_b_l = ctc_score_dp_next[hyp_l]
p_nb_l = np.logaddexp(p_nb_l, p_ctc[c] + p_nb_prev)
ctc_score_dp_next[hyp_l] = (p_nb_l, p_b_l)
else:
p_nb = np.logaddexp(p_nb, p_ctc[c] + p_prev_l)
if l_plus not in hyps and l_plus in ctc_score_dp:
p_b = np.logaddexp(
p_b, p_ctc[self.blank] + np.logaddexp(*ctc_score_dp[l_plus])
)
p_nb = np.logaddexp(p_nb, p_ctc[c] + ctc_score_dp[l_plus][0])
tmp.append(l_plus)
ctc_score_dp_next[l_plus] = (p_nb, p_b)
new_hyps.add(l_plus)
scores = self.joint_score(new_hyps, ctc_score_dp_next)
hyps = sorted(new_hyps, key=lambda ll: scores[ll], reverse=True)[
: self.beam_size
]
ctc_score_dp = ctc_score_dp_next.copy()
return ctc_score_dp, hyps, scores
def forward(
self, x: torch.Tensor, maxlenratio: float = 0.0, minlenratio: float = 0.0
) -> List[Hypothesis]:
"""Perform beam search.
Args:
enc_output (torch.Tensor)
Return:
list[Hypothesis]
"""
logging.info("decoder input lengths: " + str(x.shape[0]))
lpz = self.ctc.log_softmax(x.unsqueeze(0))
lpz = lpz.squeeze(0)
lpz = lpz.cpu().detach().numpy()
self.reset(x)
hyps = [(self.sos,)]
ctc_score_dp = defaultdict(
lambda: (float("-inf"), float("-inf"))
) # (p_nb, p_b) - dp object tracking p_ctc
ctc_score_dp[(self.sos,)] = (float("-inf"), 0.0)
for t in range(lpz.shape[0]):
logging.debug("position " + str(t))
ctc_score_dp, hyps, scores = self.time_step(lpz[t, :], ctc_score_dp, hyps)
ret = [
Hypothesis(yseq=torch.tensor(list(h) + [self.sos]), score=scores[h])
for h in hyps
]
best_hyp = "".join([self.token_list[x] for x in ret[0].yseq.tolist()])
best_hyp_len = len(ret[0].yseq)
best_score = ret[0].score
logging.info(f"output length: {best_hyp_len}")
logging.info(f"total log probability: {best_score:.2f}")
logging.info(f"best hypo: {best_hyp}")
return ret
| 8,986 | 36.445833 | 88 | py |
espnet | espnet-master/espnet/nets/chainer_backend/e2e_asr.py | # Copyright 2017 Johns Hopkins University (Shinji Watanabe)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""RNN sequence-to-sequence speech recognition model (chainer)."""
import logging
import math
import chainer
import numpy as np
from chainer import reporter
from espnet.nets.chainer_backend.asr_interface import ChainerASRInterface
from espnet.nets.chainer_backend.ctc import ctc_for
from espnet.nets.chainer_backend.rnn.attentions import att_for
from espnet.nets.chainer_backend.rnn.decoders import decoder_for
from espnet.nets.chainer_backend.rnn.encoders import encoder_for
from espnet.nets.e2e_asr_common import label_smoothing_dist
from espnet.nets.pytorch_backend.e2e_asr import E2E as E2E_pytorch
from espnet.nets.pytorch_backend.nets_utils import get_subsample
CTC_LOSS_THRESHOLD = 10000
class E2E(ChainerASRInterface):
"""E2E module for chainer backend.
Args:
idim (int): Dimension of the inputs.
odim (int): Dimension of the outputs.
args (parser.args): Training config.
flag_return (bool): If True, train() would return
additional metrics in addition to the training
loss.
"""
@staticmethod
def add_arguments(parser):
"""Add arguments."""
return E2E_pytorch.add_arguments(parser)
def get_total_subsampling_factor(self):
"""Get total subsampling factor."""
return self.enc.conv_subsampling_factor * int(np.prod(self.subsample))
def __init__(self, idim, odim, args, flag_return=True):
"""Construct an E2E object.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
chainer.Chain.__init__(self)
self.mtlalpha = args.mtlalpha
assert 0 <= self.mtlalpha <= 1, "mtlalpha must be [0,1]"
self.etype = args.etype
self.verbose = args.verbose
self.char_list = args.char_list
self.outdir = args.outdir
# below means the last number becomes eos/sos ID
# note that sos/eos IDs are identical
self.sos = odim - 1
self.eos = odim - 1
# subsample info
self.subsample = get_subsample(args, mode="asr", arch="rnn")
# label smoothing info
if args.lsm_type:
logging.info("Use label smoothing with " + args.lsm_type)
labeldist = label_smoothing_dist(
odim, args.lsm_type, transcript=args.train_json
)
else:
labeldist = None
with self.init_scope():
# encoder
self.enc = encoder_for(args, idim, self.subsample)
# ctc
self.ctc = ctc_for(args, odim)
# attention
self.att = att_for(args)
# decoder
self.dec = decoder_for(args, odim, self.sos, self.eos, self.att, labeldist)
self.acc = None
self.loss = None
self.flag_return = flag_return
def forward(self, xs, ilens, ys):
"""E2E forward propagation.
Args:
xs (chainer.Variable): Batch of padded character ids. (B, Tmax)
ilens (chainer.Variable): Batch of length of each input batch. (B,)
ys (chainer.Variable): Batch of padded target features. (B, Lmax, odim)
Returns:
float: Loss that calculated by attention and ctc loss.
float (optional): Ctc loss.
float (optional): Attention loss.
float (optional): Accuracy.
"""
# 1. encoder
hs, ilens = self.enc(xs, ilens)
# 3. CTC loss
if self.mtlalpha == 0:
loss_ctc = None
else:
loss_ctc = self.ctc(hs, ys)
# 4. attention loss
if self.mtlalpha == 1:
loss_att = None
acc = None
else:
loss_att, acc = self.dec(hs, ys)
self.acc = acc
alpha = self.mtlalpha
if alpha == 0:
self.loss = loss_att
elif alpha == 1:
self.loss = loss_ctc
else:
self.loss = alpha * loss_ctc + (1 - alpha) * loss_att
if self.loss.data < CTC_LOSS_THRESHOLD and not math.isnan(self.loss.data):
reporter.report({"loss_ctc": loss_ctc}, self)
reporter.report({"loss_att": loss_att}, self)
reporter.report({"acc": acc}, self)
logging.info("mtl loss:" + str(self.loss.data))
reporter.report({"loss": self.loss}, self)
else:
logging.warning("loss (=%f) is not correct", self.loss.data)
if self.flag_return:
return self.loss, loss_ctc, loss_att, acc
else:
return self.loss
def recognize(self, x, recog_args, char_list, rnnlm=None):
"""E2E greedy/beam search.
Args:
x (chainer.Variable): Input tensor for recognition.
recog_args (parser.args): Arguments of config file.
char_list (List[str]): List of Characters.
rnnlm (Module): RNNLM module defined at `espnet.lm.chainer_backend.lm`.
Returns:
List[Dict[str, Any]]: Result of recognition.
"""
# subsample frame
x = x[:: self.subsample[0], :]
ilen = self.xp.array(x.shape[0], dtype=np.int32)
h = chainer.Variable(self.xp.array(x, dtype=np.float32))
with chainer.no_backprop_mode(), chainer.using_config("train", False):
# 1. encoder
# make a utt list (1) to use the same interface for encoder
h, _ = self.enc([h], [ilen])
# calculate log P(z_t|X) for CTC scores
if recog_args.ctc_weight > 0.0:
lpz = self.ctc.log_softmax(h).data[0]
else:
lpz = None
# 2. decoder
# decode the first utterance
y = self.dec.recognize_beam(h[0], lpz, recog_args, char_list, rnnlm)
return y
def calculate_all_attentions(self, xs, ilens, ys):
"""E2E attention calculation.
Args:
xs (List): List of padded input sequences. [(T1, idim), (T2, idim), ...]
ilens (np.ndarray): Batch of lengths of input sequences. (B)
ys (List): List of character id sequence tensor. [(L1), (L2), (L3), ...]
Returns:
float np.ndarray: Attention weights. (B, Lmax, Tmax)
"""
hs, ilens = self.enc(xs, ilens)
att_ws = self.dec.calculate_all_attentions(hs, ys)
return att_ws
@staticmethod
def custom_converter(subsampling_factor=0):
"""Get customconverter of the model."""
from espnet.nets.chainer_backend.rnn.training import CustomConverter
return CustomConverter(subsampling_factor=subsampling_factor)
@staticmethod
def custom_updater(iters, optimizer, converter, device=-1, accum_grad=1):
"""Get custom_updater of the model."""
from espnet.nets.chainer_backend.rnn.training import CustomUpdater
return CustomUpdater(
iters, optimizer, converter=converter, device=device, accum_grad=accum_grad
)
@staticmethod
def custom_parallel_updater(iters, optimizer, converter, devices, accum_grad=1):
"""Get custom_parallel_updater of the model."""
from espnet.nets.chainer_backend.rnn.training import CustomParallelUpdater
return CustomParallelUpdater(
iters,
optimizer,
converter=converter,
devices=devices,
accum_grad=accum_grad,
)
| 7,619 | 32.568282 | 87 | py |
espnet | espnet-master/espnet/nets/chainer_backend/e2e_asr_transformer.py | # encoding: utf-8
"""Transformer-based model for End-to-end ASR."""
import logging
import math
from argparse import Namespace
from distutils.util import strtobool
import chainer
import chainer.functions as F
import numpy as np
from chainer import reporter
from espnet.nets.chainer_backend.asr_interface import ChainerASRInterface
from espnet.nets.chainer_backend.transformer import ctc
from espnet.nets.chainer_backend.transformer.attention import MultiHeadAttention
from espnet.nets.chainer_backend.transformer.decoder import Decoder
from espnet.nets.chainer_backend.transformer.encoder import Encoder
from espnet.nets.chainer_backend.transformer.label_smoothing_loss import ( # noqa: H301
LabelSmoothingLoss,
)
from espnet.nets.chainer_backend.transformer.training import ( # noqa: H301
CustomConverter,
CustomParallelUpdater,
CustomUpdater,
)
from espnet.nets.ctc_prefix_score import CTCPrefixScore
from espnet.nets.e2e_asr_common import ErrorCalculator, end_detect
from espnet.nets.pytorch_backend.nets_utils import get_subsample
from espnet.nets.pytorch_backend.transformer.plot import PlotAttentionReport
CTC_SCORING_RATIO = 1.5
MAX_DECODER_OUTPUT = 5
class E2E(ChainerASRInterface):
"""E2E module.
Args:
idim (int): Input dimmensions.
odim (int): Output dimmensions.
args (Namespace): Training config.
ignore_id (int, optional): Id for ignoring a character.
flag_return (bool, optional): If true, return a list with (loss,
loss_ctc, loss_att, acc) in forward. Otherwise, return loss.
"""
@staticmethod
def add_arguments(parser):
"""Customize flags for transformer setup.
Args:
parser (Namespace): Training config.
"""
group = parser.add_argument_group("transformer model setting")
group.add_argument(
"--transformer-init",
type=str,
default="pytorch",
help="how to initialize transformer parameters",
)
group.add_argument(
"--transformer-input-layer",
type=str,
default="conv2d",
choices=["conv2d", "linear", "embed"],
help="transformer input layer type",
)
group.add_argument(
"--transformer-attn-dropout-rate",
default=None,
type=float,
help="dropout in transformer attention. use --dropout-rate if None is set",
)
group.add_argument(
"--transformer-lr",
default=10.0,
type=float,
help="Initial value of learning rate",
)
group.add_argument(
"--transformer-warmup-steps",
default=25000,
type=int,
help="optimizer warmup steps",
)
group.add_argument(
"--transformer-length-normalized-loss",
default=True,
type=strtobool,
help="normalize loss by length",
)
group.add_argument(
"--dropout-rate",
default=0.0,
type=float,
help="Dropout rate for the encoder",
)
# Encoder
group.add_argument(
"--elayers",
default=4,
type=int,
help="Number of encoder layers (for shared recognition part "
"in multi-speaker asr mode)",
)
group.add_argument(
"--eunits",
"-u",
default=300,
type=int,
help="Number of encoder hidden units",
)
# Attention
group.add_argument(
"--adim",
default=320,
type=int,
help="Number of attention transformation dimensions",
)
group.add_argument(
"--aheads",
default=4,
type=int,
help="Number of heads for multi head attention",
)
# Decoder
group.add_argument(
"--dlayers", default=1, type=int, help="Number of decoder layers"
)
group.add_argument(
"--dunits", default=320, type=int, help="Number of decoder hidden units"
)
return parser
def get_total_subsampling_factor(self):
"""Get total subsampling factor."""
return self.encoder.conv_subsampling_factor * int(np.prod(self.subsample))
def __init__(self, idim, odim, args, ignore_id=-1, flag_return=True):
"""Initialize the transformer."""
chainer.Chain.__init__(self)
self.mtlalpha = args.mtlalpha
assert 0 <= self.mtlalpha <= 1, "mtlalpha must be [0,1]"
if args.transformer_attn_dropout_rate is None:
args.transformer_attn_dropout_rate = args.dropout_rate
self.use_label_smoothing = False
self.char_list = args.char_list
self.space = args.sym_space
self.blank = args.sym_blank
self.scale_emb = args.adim**0.5
self.sos = odim - 1
self.eos = odim - 1
self.subsample = get_subsample(args, mode="asr", arch="transformer")
self.ignore_id = ignore_id
self.reset_parameters(args)
with self.init_scope():
self.encoder = Encoder(
idim=idim,
attention_dim=args.adim,
attention_heads=args.aheads,
linear_units=args.eunits,
input_layer=args.transformer_input_layer,
dropout_rate=args.dropout_rate,
positional_dropout_rate=args.dropout_rate,
attention_dropout_rate=args.transformer_attn_dropout_rate,
initialW=self.initialW,
initial_bias=self.initialB,
)
self.decoder = Decoder(
odim, args, initialW=self.initialW, initial_bias=self.initialB
)
self.criterion = LabelSmoothingLoss(
args.lsm_weight,
len(args.char_list),
args.transformer_length_normalized_loss,
)
if args.mtlalpha > 0.0:
if args.ctc_type == "builtin":
logging.info("Using chainer CTC implementation")
self.ctc = ctc.CTC(odim, args.adim, args.dropout_rate)
else:
raise ValueError(
'ctc_type must be "builtin": {}'.format(args.ctc_type)
)
else:
self.ctc = None
self.dims = args.adim
self.odim = odim
self.flag_return = flag_return
if args.report_cer or args.report_wer:
self.error_calculator = ErrorCalculator(
args.char_list,
args.sym_space,
args.sym_blank,
args.report_cer,
args.report_wer,
)
else:
self.error_calculator = None
if "Namespace" in str(type(args)):
self.verbose = 0 if "verbose" not in args else args.verbose
else:
self.verbose = 0 if args.verbose is None else args.verbose
def reset_parameters(self, args):
"""Initialize the Weight according to the give initialize-type.
Args:
args (Namespace): Transformer config.
"""
type_init = args.transformer_init
if type_init == "lecun_uniform":
logging.info("Using LeCunUniform as Parameter initializer")
self.initialW = chainer.initializers.LeCunUniform
elif type_init == "lecun_normal":
logging.info("Using LeCunNormal as Parameter initializer")
self.initialW = chainer.initializers.LeCunNormal
elif type_init == "gorot_uniform":
logging.info("Using GlorotUniform as Parameter initializer")
self.initialW = chainer.initializers.GlorotUniform
elif type_init == "gorot_normal":
logging.info("Using GlorotNormal as Parameter initializer")
self.initialW = chainer.initializers.GlorotNormal
elif type_init == "he_uniform":
logging.info("Using HeUniform as Parameter initializer")
self.initialW = chainer.initializers.HeUniform
elif type_init == "he_normal":
logging.info("Using HeNormal as Parameter initializer")
self.initialW = chainer.initializers.HeNormal
elif type_init == "pytorch":
logging.info("Using Pytorch initializer")
self.initialW = chainer.initializers.Uniform
else:
logging.info("Using Chainer default as Parameter initializer")
self.initialW = chainer.initializers.Uniform
self.initialB = chainer.initializers.Uniform
def forward(self, xs, ilens, ys_pad, calculate_attentions=False):
"""E2E forward propagation.
Args:
xs (chainer.Variable): Batch of padded character ids. (B, Tmax)
ilens (chainer.Variable): Batch of length of each input batch. (B,)
ys (chainer.Variable): Batch of padded target features. (B, Lmax, odim)
calculate_attentions (bool): If true, return value is the output of encoder.
Returns:
float: Training loss.
float (optional): Training loss for ctc.
float (optional): Training loss for attention.
float (optional): Accuracy.
chainer.Variable (Optional): Output of the encoder.
"""
alpha = self.mtlalpha
# 1. Encoder
xs, x_mask, ilens = self.encoder(xs, ilens)
# 2. CTC loss
cer_ctc = None
if alpha == 0.0:
loss_ctc = None
else:
_ys = [y.astype(np.int32) for y in ys_pad]
loss_ctc = self.ctc(xs, _ys)
if self.error_calculator is not None:
with chainer.no_backprop_mode():
ys_hat = chainer.backends.cuda.to_cpu(self.ctc.argmax(xs).data)
cer_ctc = self.error_calculator(ys_hat, ys_pad, is_ctc=True)
# 3. Decoder
if calculate_attentions:
self.calculate_attentions(xs, x_mask, ys_pad)
ys = self.decoder(ys_pad, xs, x_mask)
# 4. Attention Loss
cer, wer = None, None
if alpha == 1:
loss_att = None
acc = None
else:
# Make target
eos = np.array([self.eos], "i")
with chainer.no_backprop_mode():
ys_pad_out = [np.concatenate([y, eos], axis=0) for y in ys_pad]
ys_pad_out = F.pad_sequence(ys_pad_out, padding=-1).data
ys_pad_out = self.xp.array(ys_pad_out)
loss_att = self.criterion(ys, ys_pad_out)
acc = F.accuracy(
ys.reshape(-1, self.odim), ys_pad_out.reshape(-1), ignore_label=-1
)
if (not chainer.config.train) and (self.error_calculator is not None):
cer, wer = self.error_calculator(ys, ys_pad)
if alpha == 0.0:
self.loss = loss_att
loss_att_data = loss_att.data
loss_ctc_data = None
elif alpha == 1.0:
self.loss = loss_ctc
loss_att_data = None
loss_ctc_data = loss_ctc.data
else:
self.loss = alpha * loss_ctc + (1 - alpha) * loss_att
loss_att_data = loss_att.data
loss_ctc_data = loss_ctc.data
loss_data = self.loss.data
if not math.isnan(loss_data):
reporter.report({"loss_ctc": loss_ctc_data}, self)
reporter.report({"loss_att": loss_att_data}, self)
reporter.report({"acc": acc}, self)
reporter.report({"cer_ctc": cer_ctc}, self)
reporter.report({"cer": cer}, self)
reporter.report({"wer": wer}, self)
logging.info("mtl loss:" + str(loss_data))
reporter.report({"loss": loss_data}, self)
else:
logging.warning("loss (=%f) is not correct", loss_data)
if self.flag_return:
loss_ctc = None
return self.loss, loss_ctc, loss_att, acc
else:
return self.loss
def calculate_attentions(self, xs, x_mask, ys_pad):
"""Calculate Attentions."""
self.decoder(ys_pad, xs, x_mask)
def recognize(self, x_block, recog_args, char_list=None, rnnlm=None):
"""E2E recognition function.
Args:
x (ndarray): Input acouctic feature (B, T, D) or (T, D).
recog_args (Namespace): Argment namespace contraining options.
char_list (List[str]): List of characters.
rnnlm (chainer.Chain): Language model module defined at
`espnet.lm.chainer_backend.lm`.
Returns:
List: N-best decoding results.
"""
with chainer.no_backprop_mode(), chainer.using_config("train", False):
# 1. encoder
ilens = [x_block.shape[0]]
batch = len(ilens)
xs, _, _ = self.encoder(x_block[None, :, :], ilens)
# calculate log P(z_t|X) for CTC scores
if recog_args.ctc_weight > 0.0:
lpz = self.ctc.log_softmax(xs.reshape(batch, -1, self.dims)).data[0]
else:
lpz = None
# 2. decoder
if recog_args.lm_weight == 0.0:
rnnlm = None
y = self.recognize_beam(xs, lpz, recog_args, char_list, rnnlm)
return y
def recognize_beam(self, h, lpz, recog_args, char_list=None, rnnlm=None):
"""E2E beam search.
Args:
h (ndarray): Encoder output features (B, T, D) or (T, D).
lpz (ndarray): Log probabilities from CTC.
recog_args (Namespace): Argment namespace contraining options.
char_list (List[str]): List of characters.
rnnlm (chainer.Chain): Language model module defined at
`espnet.lm.chainer_backend.lm`.
Returns:
List: N-best decoding results.
"""
logging.info("input lengths: " + str(h.shape[1]))
# initialization
n_len = h.shape[1]
xp = self.xp
h_mask = xp.ones((1, n_len))
# search parms
beam = recog_args.beam_size
penalty = recog_args.penalty
ctc_weight = recog_args.ctc_weight
# prepare sos
y = self.sos
if recog_args.maxlenratio == 0:
maxlen = n_len
else:
maxlen = max(1, int(recog_args.maxlenratio * n_len))
minlen = int(recog_args.minlenratio * n_len)
logging.info("max output length: " + str(maxlen))
logging.info("min output length: " + str(minlen))
# initialize hypothesis
if rnnlm:
hyp = {"score": 0.0, "yseq": [y], "rnnlm_prev": None}
else:
hyp = {"score": 0.0, "yseq": [y]}
if lpz is not None:
ctc_prefix_score = CTCPrefixScore(lpz, 0, self.eos, self.xp)
hyp["ctc_state_prev"] = ctc_prefix_score.initial_state()
hyp["ctc_score_prev"] = 0.0
if ctc_weight != 1.0:
# pre-pruning based on attention scores
ctc_beam = min(lpz.shape[-1], int(beam * CTC_SCORING_RATIO))
else:
ctc_beam = lpz.shape[-1]
hyps = [hyp]
ended_hyps = []
for i in range(maxlen):
logging.debug("position " + str(i))
hyps_best_kept = []
for hyp in hyps:
ys = F.expand_dims(xp.array(hyp["yseq"]), axis=0).data
out = self.decoder(ys, h, h_mask)
# get nbest local scores and their ids
local_att_scores = F.log_softmax(out[:, -1], axis=-1).data
if rnnlm:
rnnlm_state, local_lm_scores = rnnlm.predict(
hyp["rnnlm_prev"], hyp["yseq"][i]
)
local_scores = (
local_att_scores + recog_args.lm_weight * local_lm_scores
)
else:
local_scores = local_att_scores
if lpz is not None:
local_best_ids = xp.argsort(local_scores, axis=1)[0, ::-1][
:ctc_beam
]
ctc_scores, ctc_states = ctc_prefix_score(
hyp["yseq"], local_best_ids, hyp["ctc_state_prev"]
)
local_scores = (1.0 - ctc_weight) * local_att_scores[
:, local_best_ids
] + ctc_weight * (ctc_scores - hyp["ctc_score_prev"])
if rnnlm:
local_scores += (
recog_args.lm_weight * local_lm_scores[:, local_best_ids]
)
joint_best_ids = xp.argsort(local_scores, axis=1)[0, ::-1][:beam]
local_best_scores = local_scores[:, joint_best_ids]
local_best_ids = local_best_ids[joint_best_ids]
else:
local_best_ids = self.xp.argsort(local_scores, axis=1)[0, ::-1][
:beam
]
local_best_scores = local_scores[:, local_best_ids]
for j in range(beam):
new_hyp = {}
new_hyp["score"] = hyp["score"] + float(local_best_scores[0, j])
new_hyp["yseq"] = [0] * (1 + len(hyp["yseq"]))
new_hyp["yseq"][: len(hyp["yseq"])] = hyp["yseq"]
new_hyp["yseq"][len(hyp["yseq"])] = int(local_best_ids[j])
if rnnlm:
new_hyp["rnnlm_prev"] = rnnlm_state
if lpz is not None:
new_hyp["ctc_state_prev"] = ctc_states[joint_best_ids[j]]
new_hyp["ctc_score_prev"] = ctc_scores[joint_best_ids[j]]
hyps_best_kept.append(new_hyp)
hyps_best_kept = sorted(
hyps_best_kept, key=lambda x: x["score"], reverse=True
)[:beam]
# sort and get nbest
hyps = hyps_best_kept
logging.debug("number of pruned hypothesis: " + str(len(hyps)))
if char_list is not None:
logging.debug(
"best hypo: "
+ "".join([char_list[int(x)] for x in hyps[0]["yseq"][1:]])
+ " score: "
+ str(hyps[0]["score"])
)
# add eos in the final loop to avoid that there are no ended hyps
if i == maxlen - 1:
logging.info("adding <eos> in the last position in the loop")
for hyp in hyps:
hyp["yseq"].append(self.eos)
# add ended hypothes to a final list, and removed them from current hypothes
# (this will be a probmlem, number of hyps < beam)
remained_hyps = []
for hyp in hyps:
if hyp["yseq"][-1] == self.eos:
# only store the sequence that has more than minlen outputs
# also add penalty
if len(hyp["yseq"]) > minlen:
hyp["score"] += (i + 1) * penalty
if rnnlm: # Word LM needs to add final <eos> score
hyp["score"] += recog_args.lm_weight * rnnlm.final(
hyp["rnnlm_prev"]
)
ended_hyps.append(hyp)
else:
remained_hyps.append(hyp)
# end detection
if end_detect(ended_hyps, i) and recog_args.maxlenratio == 0.0:
logging.info("end detected at %d", i)
break
hyps = remained_hyps
if len(hyps) > 0:
logging.debug("remained hypothes: " + str(len(hyps)))
else:
logging.info("no hypothesis. Finish decoding.")
break
if char_list is not None:
for hyp in hyps:
logging.debug(
"hypo: " + "".join([char_list[int(x)] for x in hyp["yseq"][1:]])
)
logging.debug("number of ended hypothes: " + str(len(ended_hyps)))
nbest_hyps = sorted(
ended_hyps, key=lambda x: x["score"], reverse=True
) # [:min(len(ended_hyps), recog_args.nbest)]
logging.debug(nbest_hyps)
# check number of hypotheis
if len(nbest_hyps) == 0:
logging.warn(
"there is no N-best results, perform recognition "
"again with smaller minlenratio."
)
# should copy becasuse Namespace will be overwritten globally
recog_args = Namespace(**vars(recog_args))
recog_args.minlenratio = max(0.0, recog_args.minlenratio - 0.1)
return self.recognize_beam(h, lpz, recog_args, char_list, rnnlm)
logging.info("total log probability: " + str(nbest_hyps[0]["score"]))
logging.info(
"normalized log probability: "
+ str(nbest_hyps[0]["score"] / len(nbest_hyps[0]["yseq"]))
)
# remove sos
return nbest_hyps
def calculate_all_attentions(self, xs, ilens, ys):
"""E2E attention calculation.
Args:
xs (List[tuple()]): List of padded input sequences.
[(T1, idim), (T2, idim), ...]
ilens (ndarray): Batch of lengths of input sequences. (B)
ys (List): List of character id sequence tensor. [(L1), (L2), (L3), ...]
Returns:
float ndarray: Attention weights. (B, Lmax, Tmax)
"""
with chainer.no_backprop_mode():
self(xs, ilens, ys, calculate_attentions=True)
ret = dict()
for name, m in self.namedlinks():
if isinstance(m, MultiHeadAttention):
var = m.attn
var.to_cpu()
_name = name[1:].replace("/", "_")
ret[_name] = var.data
return ret
@property
def attention_plot_class(self):
"""Attention plot function.
Redirects to PlotAttentionReport
Returns:
PlotAttentionReport
"""
return PlotAttentionReport
@staticmethod
def custom_converter(subsampling_factor=0):
"""Get customconverter of the model."""
return CustomConverter()
@staticmethod
def custom_updater(iters, optimizer, converter, device=-1, accum_grad=1):
"""Get custom_updater of the model."""
return CustomUpdater(
iters, optimizer, converter=converter, device=device, accum_grad=accum_grad
)
@staticmethod
def custom_parallel_updater(iters, optimizer, converter, devices, accum_grad=1):
"""Get custom_parallel_updater of the model."""
return CustomParallelUpdater(
iters,
optimizer,
converter=converter,
devices=devices,
accum_grad=accum_grad,
)
| 23,226 | 36.282504 | 88 | py |
espnet | espnet-master/espnet/nets/chainer_backend/rnn/training.py | # Copyright 2017 Johns Hopkins University (Shinji Watanabe)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import collections
import logging
import math
import numpy as np
# chainer related
from chainer import Variable, cuda, training
from chainer.training.updaters.multiprocess_parallel_updater import (
gather_grads,
gather_params,
scatter_grads,
)
# copied from https://github.com/chainer/chainer/blob/master/chainer/optimizer.py
def sum_sqnorm(arr):
"""Calculate the norm of the array.
Args:
arr (numpy.ndarray)
Returns:
Float: Sum of the norm calculated from the given array.
"""
sq_sum = collections.defaultdict(float)
for x in arr:
with cuda.get_device_from_array(x) as dev:
if x is not None:
x = x.ravel()
s = x.dot(x)
sq_sum[int(dev)] += s
return sum([float(i) for i in sq_sum.values()])
class CustomUpdater(training.StandardUpdater):
"""Custom updater for chainer.
Args:
train_iter (iterator | dict[str, iterator]): Dataset iterator for the
training dataset. It can also be a dictionary that maps strings to
iterators. If this is just an iterator, then the iterator is
registered by the name ``'main'``.
optimizer (optimizer | dict[str, optimizer]): Optimizer to update
parameters. It can also be a dictionary that maps strings to
optimizers. If this is just an optimizer, then the optimizer is
registered by the name ``'main'``.
converter (espnet.asr.chainer_backend.asr.CustomConverter): Converter
function to build input arrays. Each batch extracted by the main
iterator and the ``device`` option are passed to this function.
:func:`chainer.dataset.concat_examples` is used by default.
device (int or dict): The destination device info to send variables. In the
case of cpu or single gpu, `device=-1 or 0`, respectively.
In the case of multi-gpu, `device={"main":0, "sub_1": 1, ...}`.
accum_grad (int):The number of gradient accumulation. if set to 2, the network
parameters will be updated once in twice,
i.e. actual batchsize will be doubled.
"""
def __init__(self, train_iter, optimizer, converter, device, accum_grad=1):
super(CustomUpdater, self).__init__(
train_iter, optimizer, converter=converter, device=device
)
self.forward_count = 0
self.accum_grad = accum_grad
self.start = True
# To solve #1091, it is required to set the variable inside this class.
self.device = device
# The core part of the update routine can be customized by overriding.
def update_core(self):
"""Main update routine for Custom Updater."""
train_iter = self.get_iterator("main")
optimizer = self.get_optimizer("main")
# Get batch and convert into variables
batch = train_iter.next()
x = self.converter(batch, self.device)
if self.start:
optimizer.target.cleargrads()
self.start = False
# Compute the loss at this time step and accumulate it
loss = optimizer.target(*x) / self.accum_grad
loss.backward() # Backprop
loss.unchain_backward() # Truncate the graph
# update parameters
self.forward_count += 1
if self.forward_count != self.accum_grad:
return
self.forward_count = 0
# compute the gradient norm to check if it is normal or not
grad_norm = np.sqrt(
sum_sqnorm([p.grad for p in optimizer.target.params(False)])
)
logging.info("grad norm={}".format(grad_norm))
if math.isnan(grad_norm):
logging.warning("grad norm is nan. Do not update model.")
else:
optimizer.update()
optimizer.target.cleargrads() # Clear the parameter gradients
def update(self):
self.update_core()
if self.forward_count == 0:
self.iteration += 1
class CustomParallelUpdater(training.updaters.MultiprocessParallelUpdater):
"""Custom Parallel Updater for chainer.
Defines the main update routine.
Args:
train_iter (iterator | dict[str, iterator]): Dataset iterator for the
training dataset. It can also be a dictionary that maps strings to
iterators. If this is just an iterator, then the iterator is
registered by the name ``'main'``.
optimizer (optimizer | dict[str, optimizer]): Optimizer to update
parameters. It can also be a dictionary that maps strings to
optimizers. If this is just an optimizer, then the optimizer is
registered by the name ``'main'``.
converter (espnet.asr.chainer_backend.asr.CustomConverter): Converter
function to build input arrays. Each batch extracted by the main
iterator and the ``device`` option are passed to this function.
:func:`chainer.dataset.concat_examples` is used by default.
device (torch.device): Device to which the training data is sent.
Negative value
indicates the host memory (CPU).
accum_grad (int):The number of gradient accumulation. if set to 2,
the network parameters will be updated once in twice,
i.e. actual batchsize will be doubled.
"""
def __init__(self, train_iters, optimizer, converter, devices, accum_grad=1):
super(CustomParallelUpdater, self).__init__(
train_iters, optimizer, converter=converter, devices=devices
)
from cupy.cuda import nccl
self.accum_grad = accum_grad
self.forward_count = 0
self.nccl = nccl
# The core part of the update routine can be customized by overriding.
def update_core(self):
"""Main Update routine of the custom parallel updater."""
self.setup_workers()
self._send_message(("update", None))
with cuda.Device(self._devices[0]):
# For reducing memory
optimizer = self.get_optimizer("main")
batch = self.get_iterator("main").next()
x = self.converter(batch, self._devices[0])
loss = self._master(*x) / self.accum_grad
loss.backward()
loss.unchain_backward()
# NCCL: reduce grads
null_stream = cuda.Stream.null
if self.comm is not None:
gg = gather_grads(self._master)
self.comm.reduce(
gg.data.ptr,
gg.data.ptr,
gg.size,
self.nccl.NCCL_FLOAT,
self.nccl.NCCL_SUM,
0,
null_stream.ptr,
)
scatter_grads(self._master, gg)
del gg
# update parameters
self.forward_count += 1
if self.forward_count != self.accum_grad:
return
self.forward_count = 0
# check gradient value
grad_norm = np.sqrt(
sum_sqnorm([p.grad for p in optimizer.target.params(False)])
)
logging.info("grad norm={}".format(grad_norm))
# update
if math.isnan(grad_norm):
logging.warning("grad norm is nan. Do not update model.")
else:
optimizer.update()
self._master.cleargrads()
if self.comm is not None:
gp = gather_params(self._master)
self.comm.bcast(
gp.data.ptr, gp.size, self.nccl.NCCL_FLOAT, 0, null_stream.ptr
)
def update(self):
self.update_core()
if self.forward_count == 0:
self.iteration += 1
class CustomConverter(object):
"""Custom Converter.
Args:
subsampling_factor (int): The subsampling factor.
"""
def __init__(self, subsampling_factor=1):
self.subsampling_factor = subsampling_factor
def __call__(self, batch, device):
"""Perform sabsampling.
Args:
batch (list): Batch that will be sabsampled.
device (device): GPU device.
Returns:
chainer.Variable: xp.array that sabsampled from batch.
xp.array: xp.array of the length of the mini-batches.
chainer.Variable: xp.array that sabsampled from batch.
"""
# set device
xp = cuda.cupy if device != -1 else np
# batch should be located in list
assert len(batch) == 1
xs, ys = batch[0]
# perform subsampling
if self.subsampling_factor > 1:
xs = [x[:: self.subsampling_factor, :] for x in xs]
# get batch made of lengths of input sequences
ilens = [x.shape[0] for x in xs]
# convert to Variable
xs = [Variable(xp.array(x, dtype=xp.float32)) for x in xs]
ilens = xp.array(ilens, dtype=xp.int32)
ys = [Variable(xp.array(y, dtype=xp.int32)) for y in ys]
return xs, ilens, ys
| 9,245 | 34.561538 | 86 | py |
espnet | espnet-master/espnet/nets/chainer_backend/transformer/training.py | # Copyright 2017 Johns Hopkins University (Shinji Watanabe)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Class Declaration of Transformer's Training Subprocess."""
import collections
import logging
import math
import numpy as np
from chainer import cuda
from chainer import functions as F
from chainer import training
from chainer.training import extension
from chainer.training.updaters.multiprocess_parallel_updater import (
gather_grads,
gather_params,
scatter_grads,
)
# copied from https://github.com/chainer/chainer/blob/master/chainer/optimizer.py
def sum_sqnorm(arr):
"""Calculate the norm of the array.
Args:
arr (numpy.ndarray)
Returns:
Float: Sum of the norm calculated from the given array.
"""
sq_sum = collections.defaultdict(float)
for x in arr:
with cuda.get_device_from_array(x) as dev:
if x is not None:
x = x.ravel()
s = x.dot(x)
sq_sum[int(dev)] += s
return sum([float(i) for i in sq_sum.values()])
class CustomUpdater(training.StandardUpdater):
"""Custom updater for chainer.
Args:
train_iter (iterator | dict[str, iterator]): Dataset iterator for the
training dataset. It can also be a dictionary that maps strings to
iterators. If this is just an iterator, then the iterator is
registered by the name ``'main'``.
optimizer (optimizer | dict[str, optimizer]): Optimizer to update
parameters. It can also be a dictionary that maps strings to
optimizers. If this is just an optimizer, then the optimizer is
registered by the name ``'main'``.
converter (espnet.asr.chainer_backend.asr.CustomConverter): Converter
function to build input arrays. Each batch extracted by the main
iterator and the ``device`` option are passed to this function.
:func:`chainer.dataset.concat_examples` is used by default.
device (int or dict): The destination device info to send variables. In the
case of cpu or single gpu, `device=-1 or 0`, respectively.
In the case of multi-gpu, `device={"main":0, "sub_1": 1, ...}`.
accum_grad (int):The number of gradient accumulation. if set to 2, the network
parameters will be updated once in twice,
i.e. actual batchsize will be doubled.
"""
def __init__(self, train_iter, optimizer, converter, device, accum_grad=1):
"""Initialize Custom Updater."""
super(CustomUpdater, self).__init__(
train_iter, optimizer, converter=converter, device=device
)
self.accum_grad = accum_grad
self.forward_count = 0
self.start = True
self.device = device
logging.debug("using custom converter for transformer")
# The core part of the update routine can be customized by overriding.
def update_core(self):
"""Process main update routine for Custom Updater."""
train_iter = self.get_iterator("main")
optimizer = self.get_optimizer("main")
# Get batch and convert into variables
batch = train_iter.next()
x = self.converter(batch, self.device)
if self.start:
optimizer.target.cleargrads()
self.start = False
# Compute the loss at this time step and accumulate it
loss = optimizer.target(*x) / self.accum_grad
loss.backward() # Backprop
self.forward_count += 1
if self.forward_count != self.accum_grad:
return
self.forward_count = 0
# compute the gradient norm to check if it is normal or not
grad_norm = np.sqrt(
sum_sqnorm([p.grad for p in optimizer.target.params(False)])
)
logging.info("grad norm={}".format(grad_norm))
if math.isnan(grad_norm):
logging.warning("grad norm is nan. Do not update model.")
else:
optimizer.update()
optimizer.target.cleargrads() # Clear the parameter gradients
def update(self):
"""Update step for Custom Updater."""
self.update_core()
if self.forward_count == 0:
self.iteration += 1
class CustomParallelUpdater(training.updaters.MultiprocessParallelUpdater):
"""Custom Parallel Updater for chainer.
Defines the main update routine.
Args:
train_iter (iterator | dict[str, iterator]): Dataset iterator for the
training dataset. It can also be a dictionary that maps strings to
iterators. If this is just an iterator, then the iterator is
registered by the name ``'main'``.
optimizer (optimizer | dict[str, optimizer]): Optimizer to update
parameters. It can also be a dictionary that maps strings to
optimizers. If this is just an optimizer, then the optimizer is
registered by the name ``'main'``.
converter (espnet.asr.chainer_backend.asr.CustomConverter): Converter
function to build input arrays. Each batch extracted by the main
iterator and the ``device`` option are passed to this function.
:func:`chainer.dataset.concat_examples` is used by default.
device (torch.device): Device to which the training data is sent. Negative value
indicates the host memory (CPU).
accum_grad (int):The number of gradient accumulation. if set to 2, the network
parameters will be updated once in twice,
i.e. actual batchsize will be doubled.
"""
def __init__(self, train_iters, optimizer, converter, devices, accum_grad=1):
"""Initialize custom parallel updater."""
from cupy.cuda import nccl
super(CustomParallelUpdater, self).__init__(
train_iters, optimizer, converter=converter, devices=devices
)
self.accum_grad = accum_grad
self.forward_count = 0
self.nccl = nccl
logging.debug("using custom parallel updater for transformer")
# The core part of the update routine can be customized by overriding.
def update_core(self):
"""Process main update routine for Custom Parallel Updater."""
self.setup_workers()
self._send_message(("update", None))
with cuda.Device(self._devices[0]):
# For reducing memory
optimizer = self.get_optimizer("main")
batch = self.get_iterator("main").next()
x = self.converter(batch, self._devices[0])
loss = self._master(*x) / self.accum_grad
loss.backward()
# NCCL: reduce grads
null_stream = cuda.Stream.null
if self.comm is not None:
gg = gather_grads(self._master)
self.comm.reduce(
gg.data.ptr,
gg.data.ptr,
gg.size,
self.nccl.NCCL_FLOAT,
self.nccl.NCCL_SUM,
0,
null_stream.ptr,
)
scatter_grads(self._master, gg)
del gg
# update parameters
self.forward_count += 1
if self.forward_count != self.accum_grad:
return
self.forward_count = 0
# check gradient value
grad_norm = np.sqrt(
sum_sqnorm([p.grad for p in optimizer.target.params(False)])
)
logging.info("grad norm={}".format(grad_norm))
# update
if math.isnan(grad_norm):
logging.warning("grad norm is nan. Do not update model.")
else:
optimizer.update()
self._master.cleargrads()
if self.comm is not None:
gp = gather_params(self._master)
self.comm.bcast(
gp.data.ptr, gp.size, self.nccl.NCCL_FLOAT, 0, null_stream.ptr
)
def update(self):
"""Update step for Custom Parallel Updater."""
self.update_core()
if self.forward_count == 0:
self.iteration += 1
class VaswaniRule(extension.Extension):
"""Trainer extension to shift an optimizer attribute magically by Vaswani.
Args:
attr (str): Name of the attribute to shift.
rate (float): Rate of the exponential shift. This value is multiplied
to the attribute at each call.
init (float): Initial value of the attribute. If it is ``None``, the
extension extracts the attribute at the first call and uses it as
the initial value.
target (float): Target value of the attribute. If the attribute reaches
this value, the shift stops.
optimizer (~chainer.Optimizer): Target optimizer to adjust the
attribute. If it is ``None``, the main optimizer of the updater is
used.
"""
def __init__(
self,
attr,
d,
warmup_steps=4000,
init=None,
target=None,
optimizer=None,
scale=1.0,
):
"""Initialize Vaswani rule extension."""
self._attr = attr
self._d_inv05 = d ** (-0.5) * scale
self._warmup_steps_inv15 = warmup_steps ** (-1.5)
self._init = init
self._target = target
self._optimizer = optimizer
self._t = 0
self._last_value = None
def initialize(self, trainer):
"""Initialize Optimizer values."""
optimizer = self._get_optimizer(trainer)
# ensure that _init is set
if self._init is None:
self._init = self._d_inv05 * (1.0 * self._warmup_steps_inv15)
if self._last_value is not None: # resuming from a snapshot
self._update_value(optimizer, self._last_value)
else:
self._update_value(optimizer, self._init)
def __call__(self, trainer):
"""Forward extension."""
self._t += 1
optimizer = self._get_optimizer(trainer)
value = self._d_inv05 * min(
self._t ** (-0.5), self._t * self._warmup_steps_inv15
)
self._update_value(optimizer, value)
def serialize(self, serializer):
"""Serialize extension."""
self._t = serializer("_t", self._t)
self._last_value = serializer("_last_value", self._last_value)
def _get_optimizer(self, trainer):
"""Obtain optimizer from trainer."""
return self._optimizer or trainer.updater.get_optimizer("main")
def _update_value(self, optimizer, value):
"""Update requested variable values."""
setattr(optimizer, self._attr, value)
self._last_value = value
class CustomConverter(object):
"""Custom Converter.
Args:
subsampling_factor (int): The subsampling factor.
"""
def __init__(self):
"""Initialize subsampling."""
pass
def __call__(self, batch, device):
"""Perform subsampling.
Args:
batch (list): Batch that will be sabsampled.
device (chainer.backend.Device): CPU or GPU device.
Returns:
chainer.Variable: xp.array that are padded and subsampled from batch.
xp.array: xp.array of the length of the mini-batches.
chainer.Variable: xp.array that are padded and subsampled from batch.
"""
# For transformer, data is processed in CPU.
# batch should be located in list
assert len(batch) == 1
xs, ys = batch[0]
xs = F.pad_sequence(xs, padding=-1).data
# get batch of lengths of input sequences
ilens = np.array([x.shape[0] for x in xs], dtype=np.int32)
return xs, ilens, ys
| 11,798 | 35.642857 | 88 | py |
espnet | espnet-master/espnet/nets/scorers/length_bonus.py | """Length bonus module."""
from typing import Any, List, Tuple
import torch
from espnet.nets.scorer_interface import BatchScorerInterface
class LengthBonus(BatchScorerInterface):
"""Length bonus in beam search."""
def __init__(self, n_vocab: int):
"""Initialize class.
Args:
n_vocab (int): The number of tokens in vocabulary for beam search
"""
self.n = n_vocab
def score(self, y, state, x):
"""Score new token.
Args:
y (torch.Tensor): 1D torch.int64 prefix tokens.
state: Scorer state for prefix tokens
x (torch.Tensor): 2D encoder feature that generates ys.
Returns:
tuple[torch.Tensor, Any]: Tuple of
torch.float32 scores for next token (n_vocab)
and None
"""
return torch.tensor([1.0], device=x.device, dtype=x.dtype).expand(self.n), None
def batch_score(
self, ys: torch.Tensor, states: List[Any], xs: torch.Tensor
) -> Tuple[torch.Tensor, List[Any]]:
"""Score new token batch.
Args:
ys (torch.Tensor): torch.int64 prefix tokens (n_batch, ylen).
states (List[Any]): Scorer states for prefix tokens.
xs (torch.Tensor):
The encoder feature that generates ys (n_batch, xlen, n_feat).
Returns:
tuple[torch.Tensor, List[Any]]: Tuple of
batchfied scores for next token with shape of `(n_batch, n_vocab)`
and next state list for ys.
"""
return (
torch.tensor([1.0], device=xs.device, dtype=xs.dtype).expand(
ys.shape[0], self.n
),
None,
)
| 1,740 | 28.016667 | 87 | py |
espnet | espnet-master/espnet/nets/scorers/ctc.py | """ScorerInterface implementation for CTC."""
import numpy as np
import torch
from espnet.nets.ctc_prefix_score import CTCPrefixScore, CTCPrefixScoreTH
from espnet.nets.scorer_interface import BatchPartialScorerInterface
class CTCPrefixScorer(BatchPartialScorerInterface):
"""Decoder interface wrapper for CTCPrefixScore."""
def __init__(self, ctc: torch.nn.Module, eos: int):
"""Initialize class.
Args:
ctc (torch.nn.Module): The CTC implementation.
For example, :class:`espnet.nets.pytorch_backend.ctc.CTC`
eos (int): The end-of-sequence id.
"""
self.ctc = ctc
self.eos = eos
self.impl = None
def init_state(self, x: torch.Tensor):
"""Get an initial state for decoding.
Args:
x (torch.Tensor): The encoded feature tensor
Returns: initial state
"""
logp = self.ctc.log_softmax(x.unsqueeze(0)).detach().squeeze(0).cpu().numpy()
# TODO(karita): use CTCPrefixScoreTH
self.impl = CTCPrefixScore(logp, 0, self.eos, np)
return 0, self.impl.initial_state()
def select_state(self, state, i, new_id=None):
"""Select state with relative ids in the main beam search.
Args:
state: Decoder state for prefix tokens
i (int): Index to select a state in the main beam search
new_id (int): New label id to select a state if necessary
Returns:
state: pruned state
"""
if type(state) == tuple:
if len(state) == 2: # for CTCPrefixScore
sc, st = state
return sc[i], st[i]
else: # for CTCPrefixScoreTH (need new_id > 0)
r, log_psi, f_min, f_max, scoring_idmap = state
s = log_psi[i, new_id].expand(log_psi.size(1))
if scoring_idmap is not None:
return r[:, :, i, scoring_idmap[i, new_id]], s, f_min, f_max
else:
return r[:, :, i, new_id], s, f_min, f_max
return None if state is None else state[i]
def score_partial(self, y, ids, state, x):
"""Score new token.
Args:
y (torch.Tensor): 1D prefix token
next_tokens (torch.Tensor): torch.int64 next token to score
state: decoder state for prefix tokens
x (torch.Tensor): 2D encoder feature that generates ys
Returns:
tuple[torch.Tensor, Any]:
Tuple of a score tensor for y that has a shape `(len(next_tokens),)`
and next state for ys
"""
prev_score, state = state
presub_score, new_st = self.impl(y.cpu(), ids.cpu(), state)
tscore = torch.as_tensor(
presub_score - prev_score, device=x.device, dtype=x.dtype
)
return tscore, (presub_score, new_st)
def batch_init_state(self, x: torch.Tensor):
"""Get an initial state for decoding.
Args:
x (torch.Tensor): The encoded feature tensor
Returns: initial state
"""
logp = self.ctc.log_softmax(x.unsqueeze(0)) # assuming batch_size = 1
xlen = torch.tensor([logp.size(1)])
self.impl = CTCPrefixScoreTH(logp, xlen, 0, self.eos)
return None
def batch_score_partial(self, y, ids, state, x):
"""Score new token.
Args:
y (torch.Tensor): 1D prefix token
ids (torch.Tensor): torch.int64 next token to score
state: decoder state for prefix tokens
x (torch.Tensor): 2D encoder feature that generates ys
Returns:
tuple[torch.Tensor, Any]:
Tuple of a score tensor for y that has a shape `(len(next_tokens),)`
and next state for ys
"""
batch_state = (
(
torch.stack([s[0] for s in state], dim=2),
torch.stack([s[1] for s in state]),
state[0][2],
state[0][3],
)
if state[0] is not None
else None
)
return self.impl(y, batch_state, ids)
def extend_prob(self, x: torch.Tensor):
"""Extend probs for decoding.
This extension is for streaming decoding
as in Eq (14) in https://arxiv.org/abs/2006.14941
Args:
x (torch.Tensor): The encoded feature tensor
"""
logp = self.ctc.log_softmax(x.unsqueeze(0))
self.impl.extend_prob(logp)
def extend_state(self, state):
"""Extend state for decoding.
This extension is for streaming decoding
as in Eq (14) in https://arxiv.org/abs/2006.14941
Args:
state: The states of hyps
Returns: exteded state
"""
new_state = []
for s in state:
new_state.append(self.impl.extend_state(s))
return new_state
| 4,953 | 30.35443 | 85 | py |
espnet | espnet-master/espnet/nets/scorers/ngram.py | """Ngram lm implement."""
from abc import ABC
import kenlm
import torch
from espnet.nets.scorer_interface import BatchScorerInterface, PartialScorerInterface
class Ngrambase(ABC):
"""Ngram base implemented through ScorerInterface."""
def __init__(self, ngram_model, token_list):
"""Initialize Ngrambase.
Args:
ngram_model: ngram model path
token_list: token list from dict or model.json
"""
self.chardict = [x if x != "<eos>" else "</s>" for x in token_list]
self.charlen = len(self.chardict)
self.lm = kenlm.LanguageModel(ngram_model)
self.tmpkenlmstate = kenlm.State()
def init_state(self, x):
"""Initialize tmp state."""
state = kenlm.State()
self.lm.NullContextWrite(state)
return state
def score_partial_(self, y, next_token, state, x):
"""Score interface for both full and partial scorer.
Args:
y: previous char
next_token: next token need to be score
state: previous state
x: encoded feature
Returns:
tuple[torch.Tensor, List[Any]]: Tuple of
batchfied scores for next token with shape of `(n_batch, n_vocab)`
and next state list for ys.
"""
out_state = kenlm.State()
ys = self.chardict[y[-1]] if y.shape[0] > 1 else "<s>"
self.lm.BaseScore(state, ys, out_state)
scores = torch.empty_like(next_token, dtype=x.dtype, device=y.device)
for i, j in enumerate(next_token):
scores[i] = self.lm.BaseScore(
out_state, self.chardict[j], self.tmpkenlmstate
)
return scores, out_state
class NgramFullScorer(Ngrambase, BatchScorerInterface):
"""Fullscorer for ngram."""
def score(self, y, state, x):
"""Score interface for both full and partial scorer.
Args:
y: previous char
state: previous state
x: encoded feature
Returns:
tuple[torch.Tensor, List[Any]]: Tuple of
batchfied scores for next token with shape of `(n_batch, n_vocab)`
and next state list for ys.
"""
return self.score_partial_(y, torch.tensor(range(self.charlen)), state, x)
class NgramPartScorer(Ngrambase, PartialScorerInterface):
"""Partialscorer for ngram."""
def score_partial(self, y, next_token, state, x):
"""Score interface for both full and partial scorer.
Args:
y: previous char
next_token: next token need to be score
state: previous state
x: encoded feature
Returns:
tuple[torch.Tensor, List[Any]]: Tuple of
batchfied scores for next token with shape of `(n_batch, n_vocab)`
and next state list for ys.
"""
return self.score_partial_(y, next_token, state, x)
def select_state(self, state, i):
"""Empty select state for scorer interface."""
return state
| 3,080 | 29.205882 | 85 | py |
espnet | espnet-master/espnet/nets/scorers/uasr.py | """ScorerInterface implementation for UASR."""
import numpy as np
import torch
from espnet.nets.ctc_prefix_score import CTCPrefixScore, CTCPrefixScoreTH
from espnet.nets.scorers.ctc import CTCPrefixScorer
class UASRPrefixScorer(CTCPrefixScorer):
"""Decoder interface wrapper for CTCPrefixScore."""
def __init__(self, eos: int):
"""Initialize class."""
self.eos = eos
def init_state(self, x: torch.Tensor):
"""Get an initial state for decoding.
Args:
x (torch.Tensor): The encoded feature tensor
Returns: initial state
"""
x[:, 0] = x[:, 0] - 100000000000 # simulate a no-blank CTC
self.logp = (
torch.nn.functional.log_softmax(x, dim=1).detach().squeeze(0).cpu().numpy()
)
# TODO(karita): use CTCPrefixScoreTH
self.impl = CTCPrefixScore(self.logp, 0, self.eos, np)
return 0, self.impl.initial_state()
def batch_init_state(self, x: torch.Tensor):
"""Get an initial state for decoding.
Args:
x (torch.Tensor): The encoded feature tensor
Returns: initial state
"""
x[:, 0] = x[:, 0] - 100000000000 # simulate a no-blank CTC
logp = torch.nn.functional.log_softmax(x, dim=1).unsqueeze(
0
) # assuming batch_size = 1
xlen = torch.tensor([logp.size(1)])
self.impl = CTCPrefixScoreTH(logp, xlen, 0, self.eos)
return None
| 1,469 | 28.4 | 87 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/e2e_asr.py | # Copyright 2017 Johns Hopkins University (Shinji Watanabe)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""RNN sequence-to-sequence speech recognition model (pytorch)."""
import argparse
import logging
import math
import os
from itertools import groupby
import chainer
import numpy as np
import torch
from chainer import reporter
from espnet.nets.asr_interface import ASRInterface
from espnet.nets.e2e_asr_common import label_smoothing_dist
from espnet.nets.pytorch_backend.ctc import ctc_for
from espnet.nets.pytorch_backend.frontends.feature_transform import ( # noqa: H301
feature_transform_for,
)
from espnet.nets.pytorch_backend.frontends.frontend import frontend_for
from espnet.nets.pytorch_backend.initialization import (
lecun_normal_init_parameters,
set_forget_bias_to_one,
)
from espnet.nets.pytorch_backend.nets_utils import (
get_subsample,
pad_list,
to_device,
to_torch_tensor,
)
from espnet.nets.pytorch_backend.rnn.argument import ( # noqa: H301
add_arguments_rnn_attention_common,
add_arguments_rnn_decoder_common,
add_arguments_rnn_encoder_common,
)
from espnet.nets.pytorch_backend.rnn.attentions import att_for
from espnet.nets.pytorch_backend.rnn.decoders import decoder_for
from espnet.nets.pytorch_backend.rnn.encoders import encoder_for
from espnet.nets.scorers.ctc import CTCPrefixScorer
from espnet.utils.fill_missing_args import fill_missing_args
CTC_LOSS_THRESHOLD = 10000
class Reporter(chainer.Chain):
"""A chainer reporter wrapper."""
def report(self, loss_ctc, loss_att, acc, cer_ctc, cer, wer, mtl_loss):
"""Report at every step."""
reporter.report({"loss_ctc": loss_ctc}, self)
reporter.report({"loss_att": loss_att}, self)
reporter.report({"acc": acc}, self)
reporter.report({"cer_ctc": cer_ctc}, self)
reporter.report({"cer": cer}, self)
reporter.report({"wer": wer}, self)
logging.info("mtl loss:" + str(mtl_loss))
reporter.report({"loss": mtl_loss}, self)
class E2E(ASRInterface, torch.nn.Module):
"""E2E module.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
@staticmethod
def add_arguments(parser):
"""Add arguments."""
E2E.encoder_add_arguments(parser)
E2E.attention_add_arguments(parser)
E2E.decoder_add_arguments(parser)
return parser
@staticmethod
def encoder_add_arguments(parser):
"""Add arguments for the encoder."""
group = parser.add_argument_group("E2E encoder setting")
group = add_arguments_rnn_encoder_common(group)
return parser
@staticmethod
def attention_add_arguments(parser):
"""Add arguments for the attention."""
group = parser.add_argument_group("E2E attention setting")
group = add_arguments_rnn_attention_common(group)
return parser
@staticmethod
def decoder_add_arguments(parser):
"""Add arguments for the decoder."""
group = parser.add_argument_group("E2E decoder setting")
group = add_arguments_rnn_decoder_common(group)
return parser
def get_total_subsampling_factor(self):
"""Get total subsampling factor."""
if isinstance(self.enc, torch.nn.ModuleList):
return self.enc[0].conv_subsampling_factor * int(np.prod(self.subsample))
else:
return self.enc.conv_subsampling_factor * int(np.prod(self.subsample))
def __init__(self, idim, odim, args):
"""Construct an E2E object.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
super(E2E, self).__init__()
torch.nn.Module.__init__(self)
# fill missing arguments for compatibility
args = fill_missing_args(args, self.add_arguments)
self.mtlalpha = args.mtlalpha
assert 0.0 <= self.mtlalpha <= 1.0, "mtlalpha should be [0.0, 1.0]"
self.etype = args.etype
self.verbose = args.verbose
# NOTE: for self.build method
args.char_list = getattr(args, "char_list", None)
self.char_list = args.char_list
self.outdir = args.outdir
self.space = args.sym_space
self.blank = args.sym_blank
self.reporter = Reporter()
# below means the last number becomes eos/sos ID
# note that sos/eos IDs are identical
self.sos = odim - 1
self.eos = odim - 1
# subsample info
self.subsample = get_subsample(args, mode="asr", arch="rnn")
# label smoothing info
if args.lsm_type and os.path.isfile(args.train_json):
logging.info("Use label smoothing with " + args.lsm_type)
labeldist = label_smoothing_dist(
odim, args.lsm_type, transcript=args.train_json
)
else:
labeldist = None
if getattr(args, "use_frontend", False): # use getattr to keep compatibility
self.frontend = frontend_for(args, idim)
self.feature_transform = feature_transform_for(args, (idim - 1) * 2)
idim = args.n_mels
else:
self.frontend = None
# encoder
self.enc = encoder_for(args, idim, self.subsample)
# ctc
self.ctc = ctc_for(args, odim)
# attention
self.att = att_for(args)
# decoder
self.dec = decoder_for(args, odim, self.sos, self.eos, self.att, labeldist)
# weight initialization
self.init_like_chainer()
# options for beam search
if args.report_cer or args.report_wer:
recog_args = {
"beam_size": args.beam_size,
"penalty": args.penalty,
"ctc_weight": args.ctc_weight,
"maxlenratio": args.maxlenratio,
"minlenratio": args.minlenratio,
"lm_weight": args.lm_weight,
"rnnlm": args.rnnlm,
"nbest": args.nbest,
"space": args.sym_space,
"blank": args.sym_blank,
}
self.recog_args = argparse.Namespace(**recog_args)
self.report_cer = args.report_cer
self.report_wer = args.report_wer
else:
self.report_cer = False
self.report_wer = False
self.rnnlm = None
self.logzero = -10000000000.0
self.loss = None
self.acc = None
def init_like_chainer(self):
"""Initialize weight like chainer.
chainer basically uses LeCun way: W ~ Normal(0, fan_in ** -0.5), b = 0
pytorch basically uses W, b ~ Uniform(-fan_in**-0.5, fan_in**-0.5)
however, there are two exceptions as far as I know.
- EmbedID.W ~ Normal(0, 1)
- LSTM.upward.b[forget_gate_range] = 1 (but not used in NStepLSTM)
"""
lecun_normal_init_parameters(self)
# exceptions
# embed weight ~ Normal(0, 1)
self.dec.embed.weight.data.normal_(0, 1)
# forget-bias = 1.0
# https://discuss.pytorch.org/t/set-forget-gate-bias-of-lstm/1745
for i in range(len(self.dec.decoder)):
set_forget_bias_to_one(self.dec.decoder[i].bias_ih)
def forward(self, xs_pad, ilens, ys_pad):
"""E2E forward.
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, idim)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor ys_pad: batch of padded token id sequence tensor (B, Lmax)
:return: loss value
:rtype: torch.Tensor
"""
import editdistance
# 0. Frontend
if self.frontend is not None:
hs_pad, hlens, mask = self.frontend(to_torch_tensor(xs_pad), ilens)
hs_pad, hlens = self.feature_transform(hs_pad, hlens)
else:
hs_pad, hlens = xs_pad, ilens
# 1. Encoder
hs_pad, hlens, _ = self.enc(hs_pad, hlens)
# 2. CTC loss
if self.mtlalpha == 0:
self.loss_ctc = None
else:
self.loss_ctc = self.ctc(hs_pad, hlens, ys_pad)
# 3. attention loss
if self.mtlalpha == 1:
self.loss_att, acc = None, None
else:
self.loss_att, acc, _ = self.dec(hs_pad, hlens, ys_pad)
self.acc = acc
# 4. compute cer without beam search
if self.mtlalpha == 0 or self.char_list is None:
cer_ctc = None
else:
cers = []
y_hats = self.ctc.argmax(hs_pad).data
for i, y in enumerate(y_hats):
y_hat = [x[0] for x in groupby(y)]
y_true = ys_pad[i]
seq_hat = [self.char_list[int(idx)] for idx in y_hat if int(idx) != -1]
seq_true = [
self.char_list[int(idx)] for idx in y_true if int(idx) != -1
]
seq_hat_text = "".join(seq_hat).replace(self.space, " ")
seq_hat_text = seq_hat_text.replace(self.blank, "")
seq_true_text = "".join(seq_true).replace(self.space, " ")
hyp_chars = seq_hat_text.replace(" ", "")
ref_chars = seq_true_text.replace(" ", "")
if len(ref_chars) > 0:
cers.append(
editdistance.eval(hyp_chars, ref_chars) / len(ref_chars)
)
cer_ctc = sum(cers) / len(cers) if cers else None
# 5. compute cer/wer
if self.training or not (self.report_cer or self.report_wer):
cer, wer = 0.0, 0.0
# oracle_cer, oracle_wer = 0.0, 0.0
else:
if self.recog_args.ctc_weight > 0.0:
lpz = self.ctc.log_softmax(hs_pad).data
else:
lpz = None
word_eds, word_ref_lens, char_eds, char_ref_lens = [], [], [], []
nbest_hyps = self.dec.recognize_beam_batch(
hs_pad,
torch.tensor(hlens),
lpz,
self.recog_args,
self.char_list,
self.rnnlm,
)
# remove <sos> and <eos>
y_hats = [nbest_hyp[0]["yseq"][1:-1] for nbest_hyp in nbest_hyps]
for i, y_hat in enumerate(y_hats):
y_true = ys_pad[i]
seq_hat = [self.char_list[int(idx)] for idx in y_hat if int(idx) != -1]
seq_true = [
self.char_list[int(idx)] for idx in y_true if int(idx) != -1
]
seq_hat_text = "".join(seq_hat).replace(self.recog_args.space, " ")
seq_hat_text = seq_hat_text.replace(self.recog_args.blank, "")
seq_true_text = "".join(seq_true).replace(self.recog_args.space, " ")
hyp_words = seq_hat_text.split()
ref_words = seq_true_text.split()
word_eds.append(editdistance.eval(hyp_words, ref_words))
word_ref_lens.append(len(ref_words))
hyp_chars = seq_hat_text.replace(" ", "")
ref_chars = seq_true_text.replace(" ", "")
char_eds.append(editdistance.eval(hyp_chars, ref_chars))
char_ref_lens.append(len(ref_chars))
wer = (
0.0
if not self.report_wer
else float(sum(word_eds)) / sum(word_ref_lens)
)
cer = (
0.0
if not self.report_cer
else float(sum(char_eds)) / sum(char_ref_lens)
)
alpha = self.mtlalpha
if alpha == 0:
self.loss = self.loss_att
loss_att_data = float(self.loss_att)
loss_ctc_data = None
elif alpha == 1:
self.loss = self.loss_ctc
loss_att_data = None
loss_ctc_data = float(self.loss_ctc)
else:
self.loss = alpha * self.loss_ctc + (1 - alpha) * self.loss_att
loss_att_data = float(self.loss_att)
loss_ctc_data = float(self.loss_ctc)
loss_data = float(self.loss)
if loss_data < CTC_LOSS_THRESHOLD and not math.isnan(loss_data):
self.reporter.report(
loss_ctc_data, loss_att_data, acc, cer_ctc, cer, wer, loss_data
)
else:
logging.warning("loss (=%f) is not correct", loss_data)
return self.loss
def scorers(self):
"""Scorers."""
return dict(decoder=self.dec, ctc=CTCPrefixScorer(self.ctc, self.eos))
def encode(self, x):
"""Encode acoustic features.
:param ndarray x: input acoustic feature (T, D)
:return: encoder outputs
:rtype: torch.Tensor
"""
self.eval()
ilens = [x.shape[0]]
# subsample frame
x = x[:: self.subsample[0], :]
p = next(self.parameters())
h = torch.as_tensor(x, device=p.device, dtype=p.dtype)
# make a utt list (1) to use the same interface for encoder
hs = h.contiguous().unsqueeze(0)
# 0. Frontend
if self.frontend is not None:
enhanced, hlens, mask = self.frontend(hs, ilens)
hs, hlens = self.feature_transform(enhanced, hlens)
else:
hs, hlens = hs, ilens
# 1. encoder
hs, _, _ = self.enc(hs, hlens)
return hs.squeeze(0)
def recognize(self, x, recog_args, char_list, rnnlm=None):
"""E2E beam search.
:param ndarray x: input acoustic feature (T, D)
:param Namespace recog_args: argument Namespace containing options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
hs = self.encode(x).unsqueeze(0)
# calculate log P(z_t|X) for CTC scores
if recog_args.ctc_weight > 0.0:
lpz = self.ctc.log_softmax(hs)[0]
else:
lpz = None
# 2. Decoder
# decode the first utterance
y = self.dec.recognize_beam(hs[0], lpz, recog_args, char_list, rnnlm)
return y
def recognize_batch(self, xs, recog_args, char_list, rnnlm=None):
"""E2E batch beam search.
:param list xs: list of input acoustic feature arrays [(T_1, D), (T_2, D), ...]
:param Namespace recog_args: argument Namespace containing options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
prev = self.training
self.eval()
ilens = np.fromiter((xx.shape[0] for xx in xs), dtype=np.int64)
# subsample frame
xs = [xx[:: self.subsample[0], :] for xx in xs]
xs = [to_device(self, to_torch_tensor(xx).float()) for xx in xs]
xs_pad = pad_list(xs, 0.0)
# 0. Frontend
if self.frontend is not None:
enhanced, hlens, mask = self.frontend(xs_pad, ilens)
hs_pad, hlens = self.feature_transform(enhanced, hlens)
else:
hs_pad, hlens = xs_pad, ilens
# 1. Encoder
hs_pad, hlens, _ = self.enc(hs_pad, hlens)
# calculate log P(z_t|X) for CTC scores
if recog_args.ctc_weight > 0.0:
lpz = self.ctc.log_softmax(hs_pad)
normalize_score = False
else:
lpz = None
normalize_score = True
# 2. Decoder
hlens = torch.tensor(list(map(int, hlens))) # make sure hlens is tensor
y = self.dec.recognize_beam_batch(
hs_pad,
hlens,
lpz,
recog_args,
char_list,
rnnlm,
normalize_score=normalize_score,
)
if prev:
self.train()
return y
def enhance(self, xs):
"""Forward only in the frontend stage.
:param ndarray xs: input acoustic feature (T, C, F)
:return: enhaned feature
:rtype: torch.Tensor
"""
if self.frontend is None:
raise RuntimeError("Frontend does't exist")
prev = self.training
self.eval()
ilens = np.fromiter((xx.shape[0] for xx in xs), dtype=np.int64)
# subsample frame
xs = [xx[:: self.subsample[0], :] for xx in xs]
xs = [to_device(self, to_torch_tensor(xx).float()) for xx in xs]
xs_pad = pad_list(xs, 0.0)
enhanced, hlensm, mask = self.frontend(xs_pad, ilens)
if prev:
self.train()
return enhanced.cpu().numpy(), mask.cpu().numpy(), ilens
def calculate_all_attentions(self, xs_pad, ilens, ys_pad):
"""E2E attention calculation.
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, idim)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor ys_pad: batch of padded token id sequence tensor (B, Lmax)
:return: attention weights with the following shape,
1) multi-head case => attention weights (B, H, Lmax, Tmax),
2) other case => attention weights (B, Lmax, Tmax).
:rtype: float ndarray
"""
self.eval()
with torch.no_grad():
# 0. Frontend
if self.frontend is not None:
hs_pad, hlens, mask = self.frontend(to_torch_tensor(xs_pad), ilens)
hs_pad, hlens = self.feature_transform(hs_pad, hlens)
else:
hs_pad, hlens = xs_pad, ilens
# 1. Encoder
hpad, hlens, _ = self.enc(hs_pad, hlens)
# 2. Decoder
att_ws = self.dec.calculate_all_attentions(hpad, hlens, ys_pad)
self.train()
return att_ws
def calculate_all_ctc_probs(self, xs_pad, ilens, ys_pad):
"""E2E CTC probability calculation.
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor ys_pad: batch of padded token id sequence tensor (B, Lmax)
:return: CTC probability (B, Tmax, vocab)
:rtype: float ndarray
"""
probs = None
if self.mtlalpha == 0:
return probs
self.eval()
with torch.no_grad():
# 0. Frontend
if self.frontend is not None:
hs_pad, hlens, mask = self.frontend(to_torch_tensor(xs_pad), ilens)
hs_pad, hlens = self.feature_transform(hs_pad, hlens)
else:
hs_pad, hlens = xs_pad, ilens
# 1. Encoder
hpad, hlens, _ = self.enc(hs_pad, hlens)
# 2. CTC probs
probs = self.ctc.softmax(hpad).cpu().numpy()
self.train()
return probs
def subsample_frames(self, x):
"""Subsample speeh frames in the encoder."""
# subsample frame
x = x[:: self.subsample[0], :]
ilen = [x.shape[0]]
h = to_device(self, torch.from_numpy(np.array(x, dtype=np.float32)))
h.contiguous()
return h, ilen
| 19,358 | 34.456044 | 87 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/e2e_tts_fastspeech.py | # Copyright 2019 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""FastSpeech related modules."""
import logging
import torch
import torch.nn.functional as F
from espnet.asr.asr_utils import get_model_conf, torch_load
from espnet.nets.pytorch_backend.fastspeech.duration_calculator import ( # noqa: H301
DurationCalculator,
)
from espnet.nets.pytorch_backend.fastspeech.duration_predictor import ( # noqa: H301
DurationPredictor,
DurationPredictorLoss,
)
from espnet.nets.pytorch_backend.fastspeech.length_regulator import LengthRegulator
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask, make_pad_mask
from espnet.nets.pytorch_backend.tacotron2.decoder import Postnet
from espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention
from espnet.nets.pytorch_backend.transformer.embedding import (
PositionalEncoding,
ScaledPositionalEncoding,
)
from espnet.nets.pytorch_backend.transformer.encoder import Encoder
from espnet.nets.pytorch_backend.transformer.initializer import initialize
from espnet.nets.tts_interface import TTSInterface
from espnet.utils.cli_utils import strtobool
from espnet.utils.fill_missing_args import fill_missing_args
class FeedForwardTransformerLoss(torch.nn.Module):
"""Loss function module for feed-forward Transformer."""
def __init__(self, use_masking=True, use_weighted_masking=False):
"""Initialize feed-forward Transformer loss module.
Args:
use_masking (bool):
Whether to apply masking for padded part in loss calculation.
use_weighted_masking (bool):
Whether to weighted masking in loss calculation.
"""
super(FeedForwardTransformerLoss, self).__init__()
assert (use_masking != use_weighted_masking) or not use_masking
self.use_masking = use_masking
self.use_weighted_masking = use_weighted_masking
# define criterions
reduction = "none" if self.use_weighted_masking else "mean"
self.l1_criterion = torch.nn.L1Loss(reduction=reduction)
self.duration_criterion = DurationPredictorLoss(reduction=reduction)
def forward(self, after_outs, before_outs, d_outs, ys, ds, ilens, olens):
"""Calculate forward propagation.
Args:
after_outs (Tensor): Batch of outputs after postnets (B, Lmax, odim).
before_outs (Tensor): Batch of outputs before postnets (B, Lmax, odim).
d_outs (Tensor): Batch of outputs of duration predictor (B, Tmax).
ys (Tensor): Batch of target features (B, Lmax, odim).
ds (Tensor): Batch of durations (B, Tmax).
ilens (LongTensor): Batch of the lengths of each input (B,).
olens (LongTensor): Batch of the lengths of each target (B,).
Returns:
Tensor: L1 loss value.
Tensor: Duration predictor loss value.
"""
# apply mask to remove padded part
if self.use_masking:
duration_masks = make_non_pad_mask(ilens).to(ys.device)
d_outs = d_outs.masked_select(duration_masks)
ds = ds.masked_select(duration_masks)
out_masks = make_non_pad_mask(olens).unsqueeze(-1).to(ys.device)
before_outs = before_outs.masked_select(out_masks)
after_outs = (
after_outs.masked_select(out_masks) if after_outs is not None else None
)
ys = ys.masked_select(out_masks)
# calculate loss
l1_loss = self.l1_criterion(before_outs, ys)
if after_outs is not None:
l1_loss += self.l1_criterion(after_outs, ys)
duration_loss = self.duration_criterion(d_outs, ds)
# make weighted mask and apply it
if self.use_weighted_masking:
out_masks = make_non_pad_mask(olens).unsqueeze(-1).to(ys.device)
out_weights = out_masks.float() / out_masks.sum(dim=1, keepdim=True).float()
out_weights /= ys.size(0) * ys.size(2)
duration_masks = make_non_pad_mask(ilens).to(ys.device)
duration_weights = (
duration_masks.float() / duration_masks.sum(dim=1, keepdim=True).float()
)
duration_weights /= ds.size(0)
# apply weight
l1_loss = l1_loss.mul(out_weights).masked_select(out_masks).sum()
duration_loss = (
duration_loss.mul(duration_weights).masked_select(duration_masks).sum()
)
return l1_loss, duration_loss
class FeedForwardTransformer(TTSInterface, torch.nn.Module):
"""Feed Forward Transformer for TTS a.k.a. FastSpeech.
This is a module of FastSpeech,
feed-forward Transformer with duration predictor described in
`FastSpeech: Fast, Robust and Controllable Text to Speech`_,
which does not require any auto-regressive
processing during inference,
resulting in fast decoding compared with auto-regressive Transformer.
.. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:
https://arxiv.org/pdf/1905.09263.pdf
"""
@staticmethod
def add_arguments(parser):
"""Add model-specific arguments to the parser."""
group = parser.add_argument_group("feed-forward transformer model setting")
# network structure related
group.add_argument(
"--adim",
default=384,
type=int,
help="Number of attention transformation dimensions",
)
group.add_argument(
"--aheads",
default=4,
type=int,
help="Number of heads for multi head attention",
)
group.add_argument(
"--elayers", default=6, type=int, help="Number of encoder layers"
)
group.add_argument(
"--eunits", default=1536, type=int, help="Number of encoder hidden units"
)
group.add_argument(
"--dlayers", default=6, type=int, help="Number of decoder layers"
)
group.add_argument(
"--dunits", default=1536, type=int, help="Number of decoder hidden units"
)
group.add_argument(
"--positionwise-layer-type",
default="linear",
type=str,
choices=["linear", "conv1d", "conv1d-linear"],
help="Positionwise layer type.",
)
group.add_argument(
"--positionwise-conv-kernel-size",
default=3,
type=int,
help="Kernel size of positionwise conv1d layer",
)
group.add_argument(
"--postnet-layers", default=0, type=int, help="Number of postnet layers"
)
group.add_argument(
"--postnet-chans", default=256, type=int, help="Number of postnet channels"
)
group.add_argument(
"--postnet-filts", default=5, type=int, help="Filter size of postnet"
)
group.add_argument(
"--use-batch-norm",
default=True,
type=strtobool,
help="Whether to use batch normalization",
)
group.add_argument(
"--use-scaled-pos-enc",
default=True,
type=strtobool,
help="Use trainable scaled positional encoding "
"instead of the fixed scale one",
)
group.add_argument(
"--encoder-normalize-before",
default=False,
type=strtobool,
help="Whether to apply layer norm before encoder block",
)
group.add_argument(
"--decoder-normalize-before",
default=False,
type=strtobool,
help="Whether to apply layer norm before decoder block",
)
group.add_argument(
"--encoder-concat-after",
default=False,
type=strtobool,
help="Whether to concatenate attention layer's input and output in encoder",
)
group.add_argument(
"--decoder-concat-after",
default=False,
type=strtobool,
help="Whether to concatenate attention layer's input and output in decoder",
)
group.add_argument(
"--duration-predictor-layers",
default=2,
type=int,
help="Number of layers in duration predictor",
)
group.add_argument(
"--duration-predictor-chans",
default=384,
type=int,
help="Number of channels in duration predictor",
)
group.add_argument(
"--duration-predictor-kernel-size",
default=3,
type=int,
help="Kernel size in duration predictor",
)
group.add_argument(
"--teacher-model",
default=None,
type=str,
nargs="?",
help="Teacher model file path",
)
group.add_argument(
"--reduction-factor", default=1, type=int, help="Reduction factor"
)
group.add_argument(
"--spk-embed-dim",
default=None,
type=int,
help="Number of speaker embedding dimensions",
)
group.add_argument(
"--spk-embed-integration-type",
type=str,
default="add",
choices=["add", "concat"],
help="How to integrate speaker embedding",
)
# training related
group.add_argument(
"--transformer-init",
type=str,
default="pytorch",
choices=[
"pytorch",
"xavier_uniform",
"xavier_normal",
"kaiming_uniform",
"kaiming_normal",
],
help="How to initialize transformer parameters",
)
group.add_argument(
"--initial-encoder-alpha",
type=float,
default=1.0,
help="Initial alpha value in encoder's ScaledPositionalEncoding",
)
group.add_argument(
"--initial-decoder-alpha",
type=float,
default=1.0,
help="Initial alpha value in decoder's ScaledPositionalEncoding",
)
group.add_argument(
"--transformer-lr",
default=1.0,
type=float,
help="Initial value of learning rate",
)
group.add_argument(
"--transformer-warmup-steps",
default=4000,
type=int,
help="Optimizer warmup steps",
)
group.add_argument(
"--transformer-enc-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer encoder except for attention",
)
group.add_argument(
"--transformer-enc-positional-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer encoder positional encoding",
)
group.add_argument(
"--transformer-enc-attn-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer encoder self-attention",
)
group.add_argument(
"--transformer-dec-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer decoder except "
"for attention and pos encoding",
)
group.add_argument(
"--transformer-dec-positional-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer decoder positional encoding",
)
group.add_argument(
"--transformer-dec-attn-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer decoder self-attention",
)
group.add_argument(
"--transformer-enc-dec-attn-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer encoder-decoder attention",
)
group.add_argument(
"--duration-predictor-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for duration predictor",
)
group.add_argument(
"--postnet-dropout-rate",
default=0.5,
type=float,
help="Dropout rate in postnet",
)
group.add_argument(
"--transfer-encoder-from-teacher",
default=True,
type=strtobool,
help="Whether to transfer teacher's parameters",
)
group.add_argument(
"--transferred-encoder-module",
default="all",
type=str,
choices=["all", "embed"],
help="Encoder modeules to be trasferred from teacher",
)
# loss related
group.add_argument(
"--use-masking",
default=True,
type=strtobool,
help="Whether to use masking in calculation of loss",
)
group.add_argument(
"--use-weighted-masking",
default=False,
type=strtobool,
help="Whether to use weighted masking in calculation of loss",
)
return parser
def __init__(self, idim, odim, args=None):
"""Initialize feed-forward Transformer module.
Args:
idim (int): Dimension of the inputs.
odim (int): Dimension of the outputs.
args (Namespace, optional):
- elayers (int): Number of encoder layers.
- eunits (int): Number of encoder hidden units.
- adim (int): Number of attention transformation dimensions.
- aheads (int): Number of heads for multi head attention.
- dlayers (int): Number of decoder layers.
- dunits (int): Number of decoder hidden units.
- use_scaled_pos_enc (bool):
Whether to use trainable scaled positional encoding.
- encoder_normalize_before (bool):
Whether to perform layer normalization before encoder block.
- decoder_normalize_before (bool):
Whether to perform layer normalization before decoder block.
- encoder_concat_after (bool): Whether to concatenate attention
layer's input and output in encoder.
- decoder_concat_after (bool): Whether to concatenate attention
layer's input and output in decoder.
- duration_predictor_layers (int): Number of duration predictor layers.
- duration_predictor_chans (int): Number of duration predictor channels.
- duration_predictor_kernel_size (int):
Kernel size of duration predictor.
- spk_embed_dim (int): Number of speaker embedding dimensions.
- spk_embed_integration_type: How to integrate speaker embedding.
- teacher_model (str): Teacher auto-regressive transformer model path.
- reduction_factor (int): Reduction factor.
- transformer_init (float): How to initialize transformer parameters.
- transformer_lr (float): Initial value of learning rate.
- transformer_warmup_steps (int): Optimizer warmup steps.
- transformer_enc_dropout_rate (float):
Dropout rate in encoder except attention & positional encoding.
- transformer_enc_positional_dropout_rate (float):
Dropout rate after encoder positional encoding.
- transformer_enc_attn_dropout_rate (float):
Dropout rate in encoder self-attention module.
- transformer_dec_dropout_rate (float):
Dropout rate in decoder except attention & positional encoding.
- transformer_dec_positional_dropout_rate (float):
Dropout rate after decoder positional encoding.
- transformer_dec_attn_dropout_rate (float):
Dropout rate in deocoder self-attention module.
- transformer_enc_dec_attn_dropout_rate (float):
Dropout rate in encoder-deocoder attention module.
- use_masking (bool):
Whether to apply masking for padded part in loss calculation.
- use_weighted_masking (bool):
Whether to apply weighted masking in loss calculation.
- transfer_encoder_from_teacher:
Whether to transfer encoder using teacher encoder parameters.
- transferred_encoder_module:
Encoder module to be initialized using teacher parameters.
"""
# initialize base classes
TTSInterface.__init__(self)
torch.nn.Module.__init__(self)
# fill missing arguments
args = fill_missing_args(args, self.add_arguments)
# store hyperparameters
self.idim = idim
self.odim = odim
self.reduction_factor = args.reduction_factor
self.use_scaled_pos_enc = args.use_scaled_pos_enc
self.spk_embed_dim = args.spk_embed_dim
if self.spk_embed_dim is not None:
self.spk_embed_integration_type = args.spk_embed_integration_type
# use idx 0 as padding idx
padding_idx = 0
# get positional encoding class
pos_enc_class = (
ScaledPositionalEncoding if self.use_scaled_pos_enc else PositionalEncoding
)
# define encoder
encoder_input_layer = torch.nn.Embedding(
num_embeddings=idim, embedding_dim=args.adim, padding_idx=padding_idx
)
self.encoder = Encoder(
idim=idim,
attention_dim=args.adim,
attention_heads=args.aheads,
linear_units=args.eunits,
num_blocks=args.elayers,
input_layer=encoder_input_layer,
dropout_rate=args.transformer_enc_dropout_rate,
positional_dropout_rate=args.transformer_enc_positional_dropout_rate,
attention_dropout_rate=args.transformer_enc_attn_dropout_rate,
pos_enc_class=pos_enc_class,
normalize_before=args.encoder_normalize_before,
concat_after=args.encoder_concat_after,
positionwise_layer_type=args.positionwise_layer_type,
positionwise_conv_kernel_size=args.positionwise_conv_kernel_size,
)
# define additional projection for speaker embedding
if self.spk_embed_dim is not None:
if self.spk_embed_integration_type == "add":
self.projection = torch.nn.Linear(self.spk_embed_dim, args.adim)
else:
self.projection = torch.nn.Linear(
args.adim + self.spk_embed_dim, args.adim
)
# define duration predictor
self.duration_predictor = DurationPredictor(
idim=args.adim,
n_layers=args.duration_predictor_layers,
n_chans=args.duration_predictor_chans,
kernel_size=args.duration_predictor_kernel_size,
dropout_rate=args.duration_predictor_dropout_rate,
)
# define length regulator
self.length_regulator = LengthRegulator()
# define decoder
# NOTE: we use encoder as decoder
# because fastspeech's decoder is the same as encoder
self.decoder = Encoder(
idim=0,
attention_dim=args.adim,
attention_heads=args.aheads,
linear_units=args.dunits,
num_blocks=args.dlayers,
input_layer=None,
dropout_rate=args.transformer_dec_dropout_rate,
positional_dropout_rate=args.transformer_dec_positional_dropout_rate,
attention_dropout_rate=args.transformer_dec_attn_dropout_rate,
pos_enc_class=pos_enc_class,
normalize_before=args.decoder_normalize_before,
concat_after=args.decoder_concat_after,
positionwise_layer_type=args.positionwise_layer_type,
positionwise_conv_kernel_size=args.positionwise_conv_kernel_size,
)
# define final projection
self.feat_out = torch.nn.Linear(args.adim, odim * args.reduction_factor)
# define postnet
self.postnet = (
None
if args.postnet_layers == 0
else Postnet(
idim=idim,
odim=odim,
n_layers=args.postnet_layers,
n_chans=args.postnet_chans,
n_filts=args.postnet_filts,
use_batch_norm=args.use_batch_norm,
dropout_rate=args.postnet_dropout_rate,
)
)
# initialize parameters
self._reset_parameters(
init_type=args.transformer_init,
init_enc_alpha=args.initial_encoder_alpha,
init_dec_alpha=args.initial_decoder_alpha,
)
# define teacher model
if args.teacher_model is not None:
self.teacher = self._load_teacher_model(args.teacher_model)
else:
self.teacher = None
# define duration calculator
if self.teacher is not None:
self.duration_calculator = DurationCalculator(self.teacher)
else:
self.duration_calculator = None
# transfer teacher parameters
if self.teacher is not None and args.transfer_encoder_from_teacher:
self._transfer_from_teacher(args.transferred_encoder_module)
# define criterions
self.criterion = FeedForwardTransformerLoss(
use_masking=args.use_masking, use_weighted_masking=args.use_weighted_masking
)
def _forward(
self,
xs,
ilens,
ys=None,
olens=None,
spembs=None,
ds=None,
is_inference=False,
alpha=1.0,
):
# forward encoder
x_masks = self._source_mask(ilens).to(xs.device)
hs, _ = self.encoder(xs, x_masks) # (B, Tmax, adim)
# integrate speaker embedding
if self.spk_embed_dim is not None:
hs = self._integrate_with_spk_embed(hs, spembs)
# forward duration predictor and length regulator
d_masks = make_pad_mask(ilens).to(xs.device)
if is_inference:
d_outs = self.duration_predictor.inference(hs, d_masks) # (B, Tmax)
hs = self.length_regulator(hs, d_outs, alpha) # (B, Lmax, adim)
else:
if ds is None:
with torch.no_grad():
ds = self.duration_calculator(
xs, ilens, ys, olens, spembs
) # (B, Tmax)
d_outs = self.duration_predictor(hs, d_masks) # (B, Tmax)
hs = self.length_regulator(hs, ds) # (B, Lmax, adim)
# forward decoder
if olens is not None:
if self.reduction_factor > 1:
olens_in = olens.new([olen // self.reduction_factor for olen in olens])
else:
olens_in = olens
h_masks = self._source_mask(olens_in).to(xs.device)
else:
h_masks = None
zs, _ = self.decoder(hs, h_masks) # (B, Lmax, adim)
before_outs = self.feat_out(zs).view(
zs.size(0), -1, self.odim
) # (B, Lmax, odim)
# postnet -> (B, Lmax//r * r, odim)
if self.postnet is None:
after_outs = before_outs
else:
after_outs = before_outs + self.postnet(
before_outs.transpose(1, 2)
).transpose(1, 2)
if is_inference:
return before_outs, after_outs, d_outs
else:
return before_outs, after_outs, ds, d_outs
def forward(self, xs, ilens, ys, olens, spembs=None, extras=None, *args, **kwargs):
"""Calculate forward propagation.
Args:
xs (Tensor): Batch of padded character ids (B, Tmax).
ilens (LongTensor): Batch of lengths of each input batch (B,).
ys (Tensor): Batch of padded target features (B, Lmax, odim).
olens (LongTensor): Batch of the lengths of each target (B,).
spembs (Tensor, optional):
Batch of speaker embedding vectors (B, spk_embed_dim).
extras (Tensor, optional): Batch of precalculated durations (B, Tmax, 1).
Returns:
Tensor: Loss value.
"""
# remove unnecessary padded part (for multi-gpus)
xs = xs[:, : max(ilens)]
ys = ys[:, : max(olens)]
if extras is not None:
extras = extras[:, : max(ilens)].squeeze(-1)
# forward propagation
before_outs, after_outs, ds, d_outs = self._forward(
xs, ilens, ys, olens, spembs=spembs, ds=extras, is_inference=False
)
# modifiy mod part of groundtruth
if self.reduction_factor > 1:
olens = olens.new([olen - olen % self.reduction_factor for olen in olens])
max_olen = max(olens)
ys = ys[:, :max_olen]
# calculate loss
if self.postnet is None:
l1_loss, duration_loss = self.criterion(
None, before_outs, d_outs, ys, ds, ilens, olens
)
else:
l1_loss, duration_loss = self.criterion(
after_outs, before_outs, d_outs, ys, ds, ilens, olens
)
loss = l1_loss + duration_loss
report_keys = [
{"l1_loss": l1_loss.item()},
{"duration_loss": duration_loss.item()},
{"loss": loss.item()},
]
# report extra information
if self.use_scaled_pos_enc:
report_keys += [
{"encoder_alpha": self.encoder.embed[-1].alpha.data.item()},
{"decoder_alpha": self.decoder.embed[-1].alpha.data.item()},
]
self.reporter.report(report_keys)
return loss
def calculate_all_attentions(
self, xs, ilens, ys, olens, spembs=None, extras=None, *args, **kwargs
):
"""Calculate all of the attention weights.
Args:
xs (Tensor): Batch of padded character ids (B, Tmax).
ilens (LongTensor): Batch of lengths of each input batch (B,).
ys (Tensor): Batch of padded target features (B, Lmax, odim).
olens (LongTensor): Batch of the lengths of each target (B,).
spembs (Tensor, optional):
Batch of speaker embedding vectors (B, spk_embed_dim).
extras (Tensor, optional): Batch of precalculated durations (B, Tmax, 1).
Returns:
dict: Dict of attention weights and outputs.
"""
with torch.no_grad():
# remove unnecessary padded part (for multi-gpus)
xs = xs[:, : max(ilens)]
ys = ys[:, : max(olens)]
if extras is not None:
extras = extras[:, : max(ilens)].squeeze(-1)
# forward propagation
outs = self._forward(
xs, ilens, ys, olens, spembs=spembs, ds=extras, is_inference=False
)[1]
att_ws_dict = dict()
for name, m in self.named_modules():
if isinstance(m, MultiHeadedAttention):
attn = m.attn.cpu().numpy()
if "encoder" in name:
attn = [a[:, :l, :l] for a, l in zip(attn, ilens.tolist())]
elif "decoder" in name:
if "src" in name:
attn = [
a[:, :ol, :il]
for a, il, ol in zip(attn, ilens.tolist(), olens.tolist())
]
elif "self" in name:
attn = [a[:, :l, :l] for a, l in zip(attn, olens.tolist())]
else:
logging.warning("unknown attention module: " + name)
else:
logging.warning("unknown attention module: " + name)
att_ws_dict[name] = attn
att_ws_dict["predicted_fbank"] = [
m[:l].T for m, l in zip(outs.cpu().numpy(), olens.tolist())
]
return att_ws_dict
def inference(self, x, inference_args, spemb=None, *args, **kwargs):
"""Generate the sequence of features given the sequences of characters.
Args:
x (Tensor): Input sequence of characters (T,).
inference_args (Namespace): Dummy for compatibility.
spemb (Tensor, optional): Speaker embedding vector (spk_embed_dim).
Returns:
Tensor: Output sequence of features (L, odim).
None: Dummy for compatibility.
None: Dummy for compatibility.
"""
# setup batch axis
ilens = torch.tensor([x.shape[0]], dtype=torch.long, device=x.device)
xs = x.unsqueeze(0)
if spemb is not None:
spembs = spemb.unsqueeze(0)
else:
spembs = None
# get option
alpha = getattr(inference_args, "fastspeech_alpha", 1.0)
# inference
_, outs, _ = self._forward(
xs,
ilens,
spembs=spembs,
is_inference=True,
alpha=alpha,
) # (1, L, odim)
return outs[0], None, None
def _integrate_with_spk_embed(self, hs, spembs):
"""Integrate speaker embedding with hidden states.
Args:
hs (Tensor): Batch of hidden state sequences (B, Tmax, adim).
spembs (Tensor): Batch of speaker embeddings (B, spk_embed_dim).
Returns:
Tensor: Batch of integrated hidden state sequences (B, Tmax, adim)
"""
if self.spk_embed_integration_type == "add":
# apply projection and then add to hidden states
spembs = self.projection(F.normalize(spembs))
hs = hs + spembs.unsqueeze(1)
elif self.spk_embed_integration_type == "concat":
# concat hidden states with spk embeds and then apply projection
spembs = F.normalize(spembs).unsqueeze(1).expand(-1, hs.size(1), -1)
hs = self.projection(torch.cat([hs, spembs], dim=-1))
else:
raise NotImplementedError("support only add or concat.")
return hs
def _source_mask(self, ilens):
"""Make masks for self-attention.
Args:
ilens (LongTensor or List): Batch of lengths (B,).
Returns:
Tensor: Mask tensor for self-attention.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
>>> ilens = [5, 3]
>>> self._source_mask(ilens)
tensor([[[1, 1, 1, 1, 1],
[1, 1, 1, 0, 0]]], dtype=torch.uint8)
"""
x_masks = make_non_pad_mask(ilens)
return x_masks.unsqueeze(-2)
def _load_teacher_model(self, model_path):
# get teacher model config
idim, odim, args = get_model_conf(model_path)
# assert dimension is the same between teacher and studnet
assert idim == self.idim
assert odim == self.odim
assert args.reduction_factor == self.reduction_factor
# load teacher model
from espnet.utils.dynamic_import import dynamic_import
model_class = dynamic_import(args.model_module)
model = model_class(idim, odim, args)
torch_load(model_path, model)
# freeze teacher model parameters
for p in model.parameters():
p.requires_grad = False
return model
def _reset_parameters(self, init_type, init_enc_alpha=1.0, init_dec_alpha=1.0):
# initialize parameters
initialize(self, init_type)
# initialize alpha in scaled positional encoding
if self.use_scaled_pos_enc:
self.encoder.embed[-1].alpha.data = torch.tensor(init_enc_alpha)
self.decoder.embed[-1].alpha.data = torch.tensor(init_dec_alpha)
def _transfer_from_teacher(self, transferred_encoder_module):
if transferred_encoder_module == "all":
for (n1, p1), (n2, p2) in zip(
self.encoder.named_parameters(), self.teacher.encoder.named_parameters()
):
assert n1 == n2, "It seems that encoder structure is different."
assert p1.shape == p2.shape, "It seems that encoder size is different."
p1.data.copy_(p2.data)
elif transferred_encoder_module == "embed":
student_shape = self.encoder.embed[0].weight.data.shape
teacher_shape = self.teacher.encoder.embed[0].weight.data.shape
assert (
student_shape == teacher_shape
), "It seems that embed dimension is different."
self.encoder.embed[0].weight.data.copy_(
self.teacher.encoder.embed[0].weight.data
)
else:
raise NotImplementedError("Support only all or embed.")
@property
def attention_plot_class(self):
"""Return plot class for attention weight plot."""
# Lazy import to avoid chainer dependency
from espnet.nets.pytorch_backend.e2e_tts_transformer import TTSPlot
return TTSPlot
@property
def base_plot_keys(self):
"""Return base key names to plot during training.
keys should match what `chainer.reporter` reports.
If you add the key `loss`,
the reporter will report `main/loss` and `validation/main/loss` values.
also `loss.png` will be created as a figure visulizing `main/loss`
and `validation/main/loss` values.
Returns:
list: List of strings which are base keys to plot during training.
"""
plot_keys = ["loss", "l1_loss", "duration_loss"]
if self.use_scaled_pos_enc:
plot_keys += ["encoder_alpha", "decoder_alpha"]
return plot_keys
| 34,348 | 37.165556 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/e2e_mt.py | # Copyright 2019 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""RNN sequence-to-sequence text translation model (pytorch)."""
import argparse
import logging
import math
import os
import chainer
import nltk
import numpy as np
import torch
from chainer import reporter
from espnet.nets.e2e_asr_common import label_smoothing_dist
from espnet.nets.mt_interface import MTInterface
from espnet.nets.pytorch_backend.initialization import uniform_init_parameters
from espnet.nets.pytorch_backend.nets_utils import get_subsample, pad_list, to_device
from espnet.nets.pytorch_backend.rnn.argument import ( # noqa: H301
add_arguments_rnn_attention_common,
add_arguments_rnn_decoder_common,
add_arguments_rnn_encoder_common,
)
from espnet.nets.pytorch_backend.rnn.attentions import att_for
from espnet.nets.pytorch_backend.rnn.decoders import decoder_for
from espnet.nets.pytorch_backend.rnn.encoders import encoder_for
from espnet.utils.fill_missing_args import fill_missing_args
class Reporter(chainer.Chain):
"""A chainer reporter wrapper."""
def report(self, loss, acc, ppl, bleu):
"""Report at every step."""
reporter.report({"loss": loss}, self)
reporter.report({"acc": acc}, self)
reporter.report({"ppl": ppl}, self)
reporter.report({"bleu": bleu}, self)
class E2E(MTInterface, torch.nn.Module):
"""E2E module.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
@staticmethod
def add_arguments(parser):
"""Add arguments."""
E2E.encoder_add_arguments(parser)
E2E.attention_add_arguments(parser)
E2E.decoder_add_arguments(parser)
return parser
@staticmethod
def encoder_add_arguments(parser):
"""Add arguments for the encoder."""
group = parser.add_argument_group("E2E encoder setting")
group = add_arguments_rnn_encoder_common(group)
return parser
@staticmethod
def attention_add_arguments(parser):
"""Add arguments for the attention."""
group = parser.add_argument_group("E2E attention setting")
group = add_arguments_rnn_attention_common(group)
return parser
@staticmethod
def decoder_add_arguments(parser):
"""Add arguments for the decoder."""
group = parser.add_argument_group("E2E decoder setting")
group = add_arguments_rnn_decoder_common(group)
return parser
def __init__(self, idim, odim, args):
"""Construct an E2E object.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
super(E2E, self).__init__()
torch.nn.Module.__init__(self)
# fill missing arguments for compatibility
args = fill_missing_args(args, self.add_arguments)
self.etype = args.etype
self.verbose = args.verbose
# NOTE: for self.build method
args.char_list = getattr(args, "char_list", None)
self.char_list = args.char_list
self.outdir = args.outdir
self.space = args.sym_space
self.blank = args.sym_blank
self.reporter = Reporter()
# below means the last number becomes eos/sos ID
# note that sos/eos IDs are identical
self.sos = odim - 1
self.eos = odim - 1
self.pad = 0
# NOTE: we reserve index:0 for <pad> although this is reserved for a blank class
# in ASR. However, blank labels are not used in MT.
# To keep the vocabulary size,
# we use index:0 for padding instead of adding one more class.
# subsample info
self.subsample = get_subsample(args, mode="mt", arch="rnn")
# label smoothing info
if args.lsm_type and os.path.isfile(args.train_json):
logging.info("Use label smoothing with " + args.lsm_type)
labeldist = label_smoothing_dist(
odim, args.lsm_type, transcript=args.train_json
)
else:
labeldist = None
# multilingual related
self.multilingual = getattr(args, "multilingual", False)
self.replace_sos = getattr(args, "replace_sos", False)
# encoder
self.embed = torch.nn.Embedding(idim, args.eunits, padding_idx=self.pad)
self.dropout = torch.nn.Dropout(p=args.dropout_rate)
self.enc = encoder_for(args, args.eunits, self.subsample)
# attention
self.att = att_for(args)
# decoder
self.dec = decoder_for(args, odim, self.sos, self.eos, self.att, labeldist)
# tie source and target emeddings
if args.tie_src_tgt_embedding:
if idim != odim:
raise ValueError(
"When using tie_src_tgt_embedding, idim and odim must be equal."
)
if args.eunits != args.dunits:
raise ValueError(
"When using tie_src_tgt_embedding, eunits and dunits must be equal."
)
self.embed.weight = self.dec.embed.weight
# tie emeddings and the classfier
if args.tie_classifier:
if args.context_residual:
raise ValueError(
"When using tie_classifier, context_residual must be turned off."
)
self.dec.output.weight = self.dec.embed.weight
# weight initialization
self.init_like_fairseq()
# options for beam search
if args.report_bleu:
trans_args = {
"beam_size": args.beam_size,
"penalty": args.penalty,
"ctc_weight": 0,
"maxlenratio": args.maxlenratio,
"minlenratio": args.minlenratio,
"lm_weight": args.lm_weight,
"rnnlm": args.rnnlm,
"nbest": args.nbest,
"space": args.sym_space,
"blank": args.sym_blank,
"tgt_lang": False,
}
self.trans_args = argparse.Namespace(**trans_args)
self.report_bleu = args.report_bleu
else:
self.report_bleu = False
self.rnnlm = None
self.logzero = -10000000000.0
self.loss = None
self.acc = None
def init_like_fairseq(self):
"""Initialize weight like Fairseq.
Fairseq basically uses W, b, EmbedID.W ~ Uniform(-0.1, 0.1),
"""
uniform_init_parameters(self)
# exceptions
# embed weight ~ Normal(-0.1, 0.1)
torch.nn.init.uniform_(self.embed.weight, -0.1, 0.1)
torch.nn.init.constant_(self.embed.weight[self.pad], 0)
torch.nn.init.uniform_(self.dec.embed.weight, -0.1, 0.1)
torch.nn.init.constant_(self.dec.embed.weight[self.pad], 0)
def forward(self, xs_pad, ilens, ys_pad):
"""E2E forward.
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, idim)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor ys_pad: batch of padded token id sequence tensor (B, Lmax)
:return: loss value
:rtype: torch.Tensor
"""
# 1. Encoder
xs_pad, ys_pad = self.target_language_biasing(xs_pad, ilens, ys_pad)
hs_pad, hlens, _ = self.enc(self.dropout(self.embed(xs_pad)), ilens)
# 3. attention loss
self.loss, self.acc, self.ppl = self.dec(hs_pad, hlens, ys_pad)
# 4. compute bleu
if self.training or not self.report_bleu:
self.bleu = 0.0
else:
lpz = None
nbest_hyps = self.dec.recognize_beam_batch(
hs_pad,
torch.tensor(hlens),
lpz,
self.trans_args,
self.char_list,
self.rnnlm,
)
# remove <sos> and <eos>
list_of_refs = []
hyps = []
y_hats = [nbest_hyp[0]["yseq"][1:-1] for nbest_hyp in nbest_hyps]
for i, y_hat in enumerate(y_hats):
y_true = ys_pad[i]
seq_hat = [self.char_list[int(idx)] for idx in y_hat if int(idx) != -1]
seq_true = [
self.char_list[int(idx)] for idx in y_true if int(idx) != -1
]
seq_hat_text = "".join(seq_hat).replace(self.trans_args.space, " ")
seq_hat_text = seq_hat_text.replace(self.trans_args.blank, "")
seq_true_text = "".join(seq_true).replace(self.trans_args.space, " ")
hyps += [seq_hat_text.split(" ")]
list_of_refs += [[seq_true_text.split(" ")]]
self.bleu = nltk.bleu_score.corpus_bleu(list_of_refs, hyps) * 100
loss_data = float(self.loss)
if not math.isnan(loss_data):
self.reporter.report(loss_data, self.acc, self.ppl, self.bleu)
else:
logging.warning("loss (=%f) is not correct", loss_data)
return self.loss
def target_language_biasing(self, xs_pad, ilens, ys_pad):
"""Prepend target language IDs to source sentences for multilingual MT.
These tags are prepended in source/target sentences as pre-processing.
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, idim)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:return: source text without language IDs
:rtype: torch.Tensor
:return: target text without language IDs
:rtype: torch.Tensor
:return: target language IDs
:rtype: torch.Tensor (B, 1)
"""
if self.multilingual:
# remove language ID in the beginning
tgt_lang_ids = ys_pad[:, 0].unsqueeze(1)
xs_pad = xs_pad[:, 1:] # remove source language IDs here
ys_pad = ys_pad[:, 1:]
# prepend target language ID to source sentences
xs_pad = torch.cat([tgt_lang_ids, xs_pad], dim=1)
return xs_pad, ys_pad
def translate(self, x, trans_args, char_list, rnnlm=None):
"""E2E beam search.
:param ndarray x: input source text feature (B, T, D)
:param Namespace trans_args: argument Namespace containing options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
prev = self.training
self.eval()
# 1. encoder
# make a utt list (1) to use the same interface for encoder
if self.multilingual:
ilen = [len(x[0][1:])]
h = to_device(
self, torch.from_numpy(np.fromiter(map(int, x[0][1:]), dtype=np.int64))
)
else:
ilen = [len(x[0])]
h = to_device(
self, torch.from_numpy(np.fromiter(map(int, x[0]), dtype=np.int64))
)
hs, _, _ = self.enc(self.dropout(self.embed(h.unsqueeze(0))), ilen)
# 2. decoder
# decode the first utterance
y = self.dec.recognize_beam(hs[0], None, trans_args, char_list, rnnlm)
if prev:
self.train()
return y
def translate_batch(self, xs, trans_args, char_list, rnnlm=None):
"""E2E batch beam search.
:param list xs:
list of input source text feature arrays [(T_1, D), (T_2, D), ...]
:param Namespace trans_args: argument Namespace containing options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
prev = self.training
self.eval()
# 1. Encoder
if self.multilingual:
ilens = np.fromiter((len(xx[1:]) for xx in xs), dtype=np.int64)
hs = [to_device(self, torch.from_numpy(xx[1:])) for xx in xs]
else:
ilens = np.fromiter((len(xx) for xx in xs), dtype=np.int64)
hs = [to_device(self, torch.from_numpy(xx)) for xx in xs]
xpad = pad_list(hs, self.pad)
hs_pad, hlens, _ = self.enc(self.dropout(self.embed(xpad)), ilens)
# 2. Decoder
hlens = torch.tensor(list(map(int, hlens))) # make sure hlens is tensor
y = self.dec.recognize_beam_batch(
hs_pad, hlens, None, trans_args, char_list, rnnlm
)
if prev:
self.train()
return y
def calculate_all_attentions(self, xs_pad, ilens, ys_pad):
"""E2E attention calculation.
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, idim)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor ys_pad: batch of padded token id sequence tensor (B, Lmax)
:return: attention weights with the following shape,
1) multi-head case => attention weights (B, H, Lmax, Tmax),
2) other case => attention weights (B, Lmax, Tmax).
:rtype: float ndarray
"""
self.eval()
with torch.no_grad():
# 1. Encoder
xs_pad, ys_pad = self.target_language_biasing(xs_pad, ilens, ys_pad)
hpad, hlens, _ = self.enc(self.dropout(self.embed(xs_pad)), ilens)
# 2. Decoder
att_ws = self.dec.calculate_all_attentions(hpad, hlens, ys_pad)
self.train()
return att_ws
| 13,630 | 35.642473 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/e2e_vc_transformer.py | # Copyright 2020 Nagoya University (Wen-Chin Huang)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Voice Transformer Network (Transformer-VC) related modules."""
import logging
import torch
import torch.nn.functional as F
from espnet.nets.pytorch_backend.e2e_asr_transformer import subsequent_mask
from espnet.nets.pytorch_backend.e2e_tts_tacotron2 import (
Tacotron2Loss as TransformerLoss,
)
from espnet.nets.pytorch_backend.e2e_tts_transformer import TTSPlot # noqa: H301
from espnet.nets.pytorch_backend.e2e_tts_transformer import ( # noqa: H301
GuidedMultiHeadAttentionLoss,
)
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask
from espnet.nets.pytorch_backend.tacotron2.decoder import Postnet
from espnet.nets.pytorch_backend.tacotron2.decoder import Prenet as DecoderPrenet
from espnet.nets.pytorch_backend.tacotron2.encoder import Encoder as EncoderPrenet
from espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention
from espnet.nets.pytorch_backend.transformer.decoder import Decoder
from espnet.nets.pytorch_backend.transformer.embedding import (
PositionalEncoding,
ScaledPositionalEncoding,
)
from espnet.nets.pytorch_backend.transformer.encoder import Encoder
from espnet.nets.pytorch_backend.transformer.initializer import initialize
from espnet.nets.tts_interface import TTSInterface
from espnet.utils.cli_utils import strtobool
from espnet.utils.fill_missing_args import fill_missing_args
class Transformer(TTSInterface, torch.nn.Module):
"""VC Transformer module.
This is a module of the Voice Transformer Network
(a.k.a. VTN or Transformer-VC) described in
`Voice Transformer Network: Sequence-to-Sequence
Voice Conversion Using Transformer with
Text-to-Speech Pretraining`_,
which convert the sequence of acoustic features
into the sequence of acoustic features.
.. _`Voice Transformer Network: Sequence-to-Sequence
Voice Conversion Using Transformer with
Text-to-Speech Pretraining`:
https://arxiv.org/pdf/1912.06813.pdf
"""
@staticmethod
def add_arguments(parser):
"""Add model-specific arguments to the parser."""
group = parser.add_argument_group("transformer model setting")
# network structure related
group.add_argument(
"--eprenet-conv-layers",
default=0,
type=int,
help="Number of encoder prenet convolution layers",
)
group.add_argument(
"--eprenet-conv-chans",
default=0,
type=int,
help="Number of encoder prenet convolution channels",
)
group.add_argument(
"--eprenet-conv-filts",
default=0,
type=int,
help="Filter size of encoder prenet convolution",
)
group.add_argument(
"--transformer-input-layer",
default="linear",
type=str,
help="Type of input layer (linear or conv2d)",
)
group.add_argument(
"--dprenet-layers",
default=2,
type=int,
help="Number of decoder prenet layers",
)
group.add_argument(
"--dprenet-units",
default=256,
type=int,
help="Number of decoder prenet hidden units",
)
group.add_argument(
"--elayers", default=3, type=int, help="Number of encoder layers"
)
group.add_argument(
"--eunits", default=1536, type=int, help="Number of encoder hidden units"
)
group.add_argument(
"--adim",
default=384,
type=int,
help="Number of attention transformation dimensions",
)
group.add_argument(
"--aheads",
default=4,
type=int,
help="Number of heads for multi head attention",
)
group.add_argument(
"--dlayers", default=3, type=int, help="Number of decoder layers"
)
group.add_argument(
"--dunits", default=1536, type=int, help="Number of decoder hidden units"
)
group.add_argument(
"--positionwise-layer-type",
default="linear",
type=str,
choices=["linear", "conv1d", "conv1d-linear"],
help="Positionwise layer type.",
)
group.add_argument(
"--positionwise-conv-kernel-size",
default=1,
type=int,
help="Kernel size of positionwise conv1d layer",
)
group.add_argument(
"--postnet-layers", default=5, type=int, help="Number of postnet layers"
)
group.add_argument(
"--postnet-chans", default=256, type=int, help="Number of postnet channels"
)
group.add_argument(
"--postnet-filts", default=5, type=int, help="Filter size of postnet"
)
group.add_argument(
"--use-scaled-pos-enc",
default=True,
type=strtobool,
help="Use trainable scaled positional encoding"
"instead of the fixed scale one.",
)
group.add_argument(
"--use-batch-norm",
default=True,
type=strtobool,
help="Whether to use batch normalization",
)
group.add_argument(
"--encoder-normalize-before",
default=False,
type=strtobool,
help="Whether to apply layer norm before encoder block",
)
group.add_argument(
"--decoder-normalize-before",
default=False,
type=strtobool,
help="Whether to apply layer norm before decoder block",
)
group.add_argument(
"--encoder-concat-after",
default=False,
type=strtobool,
help="Whether to concatenate attention layer's input and output in encoder",
)
group.add_argument(
"--decoder-concat-after",
default=False,
type=strtobool,
help="Whether to concatenate attention layer's input and output in decoder",
)
group.add_argument(
"--reduction-factor",
default=1,
type=int,
help="Reduction factor (for decoder)",
)
group.add_argument(
"--encoder-reduction-factor",
default=1,
type=int,
help="Reduction factor (for encoder)",
)
group.add_argument(
"--spk-embed-dim",
default=None,
type=int,
help="Number of speaker embedding dimensions",
)
group.add_argument(
"--spk-embed-integration-type",
type=str,
default="add",
choices=["add", "concat"],
help="How to integrate speaker embedding",
)
# training related
group.add_argument(
"--transformer-init",
type=str,
default="pytorch",
choices=[
"pytorch",
"xavier_uniform",
"xavier_normal",
"kaiming_uniform",
"kaiming_normal",
],
help="How to initialize transformer parameters",
)
group.add_argument(
"--initial-encoder-alpha",
type=float,
default=1.0,
help="Initial alpha value in encoder's ScaledPositionalEncoding",
)
group.add_argument(
"--initial-decoder-alpha",
type=float,
default=1.0,
help="Initial alpha value in decoder's ScaledPositionalEncoding",
)
group.add_argument(
"--transformer-lr",
default=1.0,
type=float,
help="Initial value of learning rate",
)
group.add_argument(
"--transformer-warmup-steps",
default=4000,
type=int,
help="Optimizer warmup steps",
)
group.add_argument(
"--transformer-enc-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer encoder except for attention",
)
group.add_argument(
"--transformer-enc-positional-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer encoder positional encoding",
)
group.add_argument(
"--transformer-enc-attn-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer encoder self-attention",
)
group.add_argument(
"--transformer-dec-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer decoder "
"except for attention and pos encoding",
)
group.add_argument(
"--transformer-dec-positional-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer decoder positional encoding",
)
group.add_argument(
"--transformer-dec-attn-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer decoder self-attention",
)
group.add_argument(
"--transformer-enc-dec-attn-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer encoder-decoder attention",
)
group.add_argument(
"--eprenet-dropout-rate",
default=0.5,
type=float,
help="Dropout rate in encoder prenet",
)
group.add_argument(
"--dprenet-dropout-rate",
default=0.5,
type=float,
help="Dropout rate in decoder prenet",
)
group.add_argument(
"--postnet-dropout-rate",
default=0.5,
type=float,
help="Dropout rate in postnet",
)
group.add_argument(
"--pretrained-model", default=None, type=str, help="Pretrained model path"
)
# loss related
group.add_argument(
"--use-masking",
default=True,
type=strtobool,
help="Whether to use masking in calculation of loss",
)
group.add_argument(
"--use-weighted-masking",
default=False,
type=strtobool,
help="Whether to use weighted masking in calculation of loss",
)
group.add_argument(
"--loss-type",
default="L1",
choices=["L1", "L2", "L1+L2"],
help="How to calc loss",
)
group.add_argument(
"--bce-pos-weight",
default=5.0,
type=float,
help="Positive sample weight in BCE calculation "
"(only for use-masking=True)",
)
group.add_argument(
"--use-guided-attn-loss",
default=False,
type=strtobool,
help="Whether to use guided attention loss",
)
group.add_argument(
"--guided-attn-loss-sigma",
default=0.4,
type=float,
help="Sigma in guided attention loss",
)
group.add_argument(
"--guided-attn-loss-lambda",
default=1.0,
type=float,
help="Lambda in guided attention loss",
)
group.add_argument(
"--num-heads-applied-guided-attn",
default=2,
type=int,
help="Number of heads in each layer to be applied guided attention loss"
"if set -1, all of the heads will be applied.",
)
group.add_argument(
"--num-layers-applied-guided-attn",
default=2,
type=int,
help="Number of layers to be applied guided attention loss"
"if set -1, all of the layers will be applied.",
)
group.add_argument(
"--modules-applied-guided-attn",
type=str,
nargs="+",
default=["encoder-decoder"],
help="Module name list to be applied guided attention loss",
)
return parser
@property
def attention_plot_class(self):
"""Return plot class for attention weight plot."""
return TTSPlot
def __init__(self, idim, odim, args=None):
"""Initialize Transformer-VC module.
Args:
idim (int): Dimension of the inputs.
odim (int): Dimension of the outputs.
args (Namespace, optional):
- eprenet_conv_layers (int):
Number of encoder prenet convolution layers.
- eprenet_conv_chans (int):
Number of encoder prenet convolution channels.
- eprenet_conv_filts (int):
Filter size of encoder prenet convolution.
- transformer_input_layer (str): Input layer before the encoder.
- dprenet_layers (int): Number of decoder prenet layers.
- dprenet_units (int): Number of decoder prenet hidden units.
- elayers (int): Number of encoder layers.
- eunits (int): Number of encoder hidden units.
- adim (int): Number of attention transformation dimensions.
- aheads (int): Number of heads for multi head attention.
- dlayers (int): Number of decoder layers.
- dunits (int): Number of decoder hidden units.
- postnet_layers (int): Number of postnet layers.
- postnet_chans (int): Number of postnet channels.
- postnet_filts (int): Filter size of postnet.
- use_scaled_pos_enc (bool):
Whether to use trainable scaled positional encoding.
- use_batch_norm (bool):
Whether to use batch normalization in encoder prenet.
- encoder_normalize_before (bool):
Whether to perform layer normalization before encoder block.
- decoder_normalize_before (bool):
Whether to perform layer normalization before decoder block.
- encoder_concat_after (bool): Whether to concatenate
attention layer's input and output in encoder.
- decoder_concat_after (bool): Whether to concatenate
attention layer's input and output in decoder.
- reduction_factor (int): Reduction factor (for decoder).
- encoder_reduction_factor (int): Reduction factor (for encoder).
- spk_embed_dim (int): Number of speaker embedding dimenstions.
- spk_embed_integration_type: How to integrate speaker embedding.
- transformer_init (float): How to initialize transformer parameters.
- transformer_lr (float): Initial value of learning rate.
- transformer_warmup_steps (int): Optimizer warmup steps.
- transformer_enc_dropout_rate (float):
Dropout rate in encoder except attention & positional encoding.
- transformer_enc_positional_dropout_rate (float):
Dropout rate after encoder positional encoding.
- transformer_enc_attn_dropout_rate (float):
Dropout rate in encoder self-attention module.
- transformer_dec_dropout_rate (float):
Dropout rate in decoder except attention & positional encoding.
- transformer_dec_positional_dropout_rate (float):
Dropout rate after decoder positional encoding.
- transformer_dec_attn_dropout_rate (float):
Dropout rate in deocoder self-attention module.
- transformer_enc_dec_attn_dropout_rate (float):
Dropout rate in encoder-deocoder attention module.
- eprenet_dropout_rate (float): Dropout rate in encoder prenet.
- dprenet_dropout_rate (float): Dropout rate in decoder prenet.
- postnet_dropout_rate (float): Dropout rate in postnet.
- use_masking (bool):
Whether to apply masking for padded part in loss calculation.
- use_weighted_masking (bool):
Whether to apply weighted masking in loss calculation.
- bce_pos_weight (float): Positive sample weight in bce calculation
(only for use_masking=true).
- loss_type (str): How to calculate loss.
- use_guided_attn_loss (bool): Whether to use guided attention loss.
- num_heads_applied_guided_attn (int):
Number of heads in each layer to apply guided attention loss.
- num_layers_applied_guided_attn (int):
Number of layers to apply guided attention loss.
- modules_applied_guided_attn (list):
List of module names to apply guided attention loss.
- guided-attn-loss-sigma (float) Sigma in guided attention loss.
- guided-attn-loss-lambda (float): Lambda in guided attention loss.
"""
# initialize base classes
TTSInterface.__init__(self)
torch.nn.Module.__init__(self)
# fill missing arguments
args = fill_missing_args(args, self.add_arguments)
# store hyperparameters
self.idim = idim
self.odim = odim
self.spk_embed_dim = args.spk_embed_dim
if self.spk_embed_dim is not None:
self.spk_embed_integration_type = args.spk_embed_integration_type
self.use_scaled_pos_enc = args.use_scaled_pos_enc
self.reduction_factor = args.reduction_factor
self.encoder_reduction_factor = args.encoder_reduction_factor
self.transformer_input_layer = args.transformer_input_layer
self.loss_type = args.loss_type
self.use_guided_attn_loss = args.use_guided_attn_loss
if self.use_guided_attn_loss:
if args.num_layers_applied_guided_attn == -1:
self.num_layers_applied_guided_attn = args.elayers
else:
self.num_layers_applied_guided_attn = (
args.num_layers_applied_guided_attn
)
if args.num_heads_applied_guided_attn == -1:
self.num_heads_applied_guided_attn = args.aheads
else:
self.num_heads_applied_guided_attn = args.num_heads_applied_guided_attn
self.modules_applied_guided_attn = args.modules_applied_guided_attn
# use idx 0 as padding idx
padding_idx = 0
# get positional encoding class
pos_enc_class = (
ScaledPositionalEncoding if self.use_scaled_pos_enc else PositionalEncoding
)
# define transformer encoder
if args.eprenet_conv_layers != 0:
# encoder prenet
encoder_input_layer = torch.nn.Sequential(
EncoderPrenet(
idim=idim,
elayers=0,
econv_layers=args.eprenet_conv_layers,
econv_chans=args.eprenet_conv_chans,
econv_filts=args.eprenet_conv_filts,
use_batch_norm=args.use_batch_norm,
dropout_rate=args.eprenet_dropout_rate,
padding_idx=padding_idx,
input_layer=torch.nn.Linear(
idim * args.encoder_reduction_factor, idim
),
),
torch.nn.Linear(args.eprenet_conv_chans, args.adim),
)
elif args.transformer_input_layer == "linear":
encoder_input_layer = torch.nn.Linear(
idim * args.encoder_reduction_factor, args.adim
)
else:
encoder_input_layer = args.transformer_input_layer
self.encoder = Encoder(
idim=idim,
attention_dim=args.adim,
attention_heads=args.aheads,
linear_units=args.eunits,
num_blocks=args.elayers,
input_layer=encoder_input_layer,
dropout_rate=args.transformer_enc_dropout_rate,
positional_dropout_rate=args.transformer_enc_positional_dropout_rate,
attention_dropout_rate=args.transformer_enc_attn_dropout_rate,
pos_enc_class=pos_enc_class,
normalize_before=args.encoder_normalize_before,
concat_after=args.encoder_concat_after,
positionwise_layer_type=args.positionwise_layer_type,
positionwise_conv_kernel_size=args.positionwise_conv_kernel_size,
)
# define projection layer
if self.spk_embed_dim is not None:
if self.spk_embed_integration_type == "add":
self.projection = torch.nn.Linear(self.spk_embed_dim, args.adim)
else:
self.projection = torch.nn.Linear(
args.adim + self.spk_embed_dim, args.adim
)
# define transformer decoder
if args.dprenet_layers != 0:
# decoder prenet
decoder_input_layer = torch.nn.Sequential(
DecoderPrenet(
idim=odim,
n_layers=args.dprenet_layers,
n_units=args.dprenet_units,
dropout_rate=args.dprenet_dropout_rate,
),
torch.nn.Linear(args.dprenet_units, args.adim),
)
else:
decoder_input_layer = "linear"
self.decoder = Decoder(
odim=-1,
attention_dim=args.adim,
attention_heads=args.aheads,
linear_units=args.dunits,
num_blocks=args.dlayers,
dropout_rate=args.transformer_dec_dropout_rate,
positional_dropout_rate=args.transformer_dec_positional_dropout_rate,
self_attention_dropout_rate=args.transformer_dec_attn_dropout_rate,
src_attention_dropout_rate=args.transformer_enc_dec_attn_dropout_rate,
input_layer=decoder_input_layer,
use_output_layer=False,
pos_enc_class=pos_enc_class,
normalize_before=args.decoder_normalize_before,
concat_after=args.decoder_concat_after,
)
# define final projection
self.feat_out = torch.nn.Linear(args.adim, odim * args.reduction_factor)
self.prob_out = torch.nn.Linear(args.adim, args.reduction_factor)
# define postnet
self.postnet = (
None
if args.postnet_layers == 0
else Postnet(
idim=idim,
odim=odim,
n_layers=args.postnet_layers,
n_chans=args.postnet_chans,
n_filts=args.postnet_filts,
use_batch_norm=args.use_batch_norm,
dropout_rate=args.postnet_dropout_rate,
)
)
# define loss function
self.criterion = TransformerLoss(
use_masking=args.use_masking,
use_weighted_masking=args.use_weighted_masking,
bce_pos_weight=args.bce_pos_weight,
)
if self.use_guided_attn_loss:
self.attn_criterion = GuidedMultiHeadAttentionLoss(
sigma=args.guided_attn_loss_sigma,
alpha=args.guided_attn_loss_lambda,
)
# initialize parameters
self._reset_parameters(
init_type=args.transformer_init,
init_enc_alpha=args.initial_encoder_alpha,
init_dec_alpha=args.initial_decoder_alpha,
)
# load pretrained model
if args.pretrained_model is not None:
self.load_pretrained_model(args.pretrained_model)
def _reset_parameters(self, init_type, init_enc_alpha=1.0, init_dec_alpha=1.0):
# initialize parameters
initialize(self, init_type)
# initialize alpha in scaled positional encoding
if self.use_scaled_pos_enc:
self.encoder.embed[-1].alpha.data = torch.tensor(init_enc_alpha)
self.decoder.embed[-1].alpha.data = torch.tensor(init_dec_alpha)
def _add_first_frame_and_remove_last_frame(self, ys):
ys_in = torch.cat(
[ys.new_zeros((ys.shape[0], 1, ys.shape[2])), ys[:, :-1]], dim=1
)
return ys_in
def forward(self, xs, ilens, ys, labels, olens, spembs=None, *args, **kwargs):
"""Calculate forward propagation.
Args:
xs (Tensor): Batch of padded acoustic features (B, Tmax, idim).
ilens (LongTensor): Batch of lengths of each input batch (B,).
ys (Tensor): Batch of padded target features (B, Lmax, odim).
olens (LongTensor): Batch of the lengths of each target (B,).
spembs (Tensor, optional): Batch of speaker embedding vectors
(B, spk_embed_dim).
Returns:
Tensor: Loss value.
"""
# remove unnecessary padded part (for multi-gpus)
max_ilen = max(ilens)
max_olen = max(olens)
if max_ilen != xs.shape[1]:
xs = xs[:, :max_ilen]
if max_olen != ys.shape[1]:
ys = ys[:, :max_olen]
labels = labels[:, :max_olen]
# thin out input frames for reduction factor
# (B, Lmax, idim) -> (B, Lmax // r, idim * r)
if self.encoder_reduction_factor > 1:
B, Lmax, idim = xs.shape
if Lmax % self.encoder_reduction_factor != 0:
xs = xs[:, : -(Lmax % self.encoder_reduction_factor), :]
xs_ds = xs.contiguous().view(
B,
int(Lmax / self.encoder_reduction_factor),
idim * self.encoder_reduction_factor,
)
ilens_ds = ilens.new(
[ilen // self.encoder_reduction_factor for ilen in ilens]
)
else:
xs_ds, ilens_ds = xs, ilens
# forward encoder
x_masks = self._source_mask(ilens_ds).to(xs.device)
hs, hs_masks = self.encoder(xs_ds, x_masks)
# integrate speaker embedding
if self.spk_embed_dim is not None:
hs_int = self._integrate_with_spk_embed(hs, spembs)
else:
hs_int = hs
# thin out frames for reduction factor (B, Lmax, odim) -> (B, Lmax//r, odim)
if self.reduction_factor > 1:
ys_in = ys[:, self.reduction_factor - 1 :: self.reduction_factor]
olens_in = olens.new([olen // self.reduction_factor for olen in olens])
else:
ys_in, olens_in = ys, olens
# add first zero frame and remove last frame for auto-regressive
ys_in = self._add_first_frame_and_remove_last_frame(ys_in)
# if conv2d, modify mask. Use ceiling division here
if "conv2d" in self.transformer_input_layer:
ilens_ds_st = ilens_ds.new(
[((ilen - 2 + 1) // 2 - 2 + 1) // 2 for ilen in ilens_ds]
)
else:
ilens_ds_st = ilens_ds
# forward decoder
y_masks = self._target_mask(olens_in).to(xs.device)
zs, _ = self.decoder(ys_in, y_masks, hs_int, hs_masks)
# (B, Lmax//r, odim * r) -> (B, Lmax//r * r, odim)
before_outs = self.feat_out(zs).view(zs.size(0), -1, self.odim)
# (B, Lmax//r, r) -> (B, Lmax//r * r)
logits = self.prob_out(zs).view(zs.size(0), -1)
# postnet -> (B, Lmax//r * r, odim)
if self.postnet is None:
after_outs = before_outs
else:
after_outs = before_outs + self.postnet(
before_outs.transpose(1, 2)
).transpose(1, 2)
# modifiy mod part of groundtruth
if self.reduction_factor > 1:
assert olens.ge(
self.reduction_factor
).all(), "Output length must be greater than or equal to reduction factor."
olens = olens.new([olen - olen % self.reduction_factor for olen in olens])
max_olen = max(olens)
ys = ys[:, :max_olen]
labels = labels[:, :max_olen]
labels = torch.scatter(
labels, 1, (olens - 1).unsqueeze(1), 1.0
) # see #3388
# calculate loss values
l1_loss, l2_loss, bce_loss = self.criterion(
after_outs, before_outs, logits, ys, labels, olens
)
if self.loss_type == "L1":
loss = l1_loss + bce_loss
elif self.loss_type == "L2":
loss = l2_loss + bce_loss
elif self.loss_type == "L1+L2":
loss = l1_loss + l2_loss + bce_loss
else:
raise ValueError("unknown --loss-type " + self.loss_type)
report_keys = [
{"l1_loss": l1_loss.item()},
{"l2_loss": l2_loss.item()},
{"bce_loss": bce_loss.item()},
{"loss": loss.item()},
]
# calculate guided attention loss
if self.use_guided_attn_loss:
# calculate for encoder
if "encoder" in self.modules_applied_guided_attn:
att_ws = []
for idx, layer_idx in enumerate(
reversed(range(len(self.encoder.encoders)))
):
att_ws += [
self.encoder.encoders[layer_idx].self_attn.attn[
:, : self.num_heads_applied_guided_attn
]
]
if idx + 1 == self.num_layers_applied_guided_attn:
break
att_ws = torch.cat(att_ws, dim=1) # (B, H*L, T_in, T_in)
enc_attn_loss = self.attn_criterion(
att_ws, ilens_ds_st, ilens_ds_st
) # TODO(unilight): is changing to ilens_ds_st right?
loss = loss + enc_attn_loss
report_keys += [{"enc_attn_loss": enc_attn_loss.item()}]
# calculate for decoder
if "decoder" in self.modules_applied_guided_attn:
att_ws = []
for idx, layer_idx in enumerate(
reversed(range(len(self.decoder.decoders)))
):
att_ws += [
self.decoder.decoders[layer_idx].self_attn.attn[
:, : self.num_heads_applied_guided_attn
]
]
if idx + 1 == self.num_layers_applied_guided_attn:
break
att_ws = torch.cat(att_ws, dim=1) # (B, H*L, T_out, T_out)
dec_attn_loss = self.attn_criterion(att_ws, olens_in, olens_in)
loss = loss + dec_attn_loss
report_keys += [{"dec_attn_loss": dec_attn_loss.item()}]
# calculate for encoder-decoder
if "encoder-decoder" in self.modules_applied_guided_attn:
att_ws = []
for idx, layer_idx in enumerate(
reversed(range(len(self.decoder.decoders)))
):
att_ws += [
self.decoder.decoders[layer_idx].src_attn.attn[
:, : self.num_heads_applied_guided_attn
]
]
if idx + 1 == self.num_layers_applied_guided_attn:
break
att_ws = torch.cat(att_ws, dim=1) # (B, H*L, T_out, T_in)
enc_dec_attn_loss = self.attn_criterion(
att_ws, ilens_ds_st, olens_in
) # TODO(unilight): is changing to ilens_ds_st right?
loss = loss + enc_dec_attn_loss
report_keys += [{"enc_dec_attn_loss": enc_dec_attn_loss.item()}]
# report extra information
if self.use_scaled_pos_enc:
report_keys += [
{"encoder_alpha": self.encoder.embed[-1].alpha.data.item()},
{"decoder_alpha": self.decoder.embed[-1].alpha.data.item()},
]
self.reporter.report(report_keys)
return loss
def inference(self, x, inference_args, spemb=None, *args, **kwargs):
"""Generate the sequence of features given the sequences of acoustic features.
Args:
x (Tensor): Input sequence of acoustic features (T, idim).
inference_args (Namespace):
- threshold (float): Threshold in inference.
- minlenratio (float): Minimum length ratio in inference.
- maxlenratio (float): Maximum length ratio in inference.
spemb (Tensor, optional): Speaker embedding vector (spk_embed_dim).
Returns:
Tensor: Output sequence of features (L, odim).
Tensor: Output sequence of stop probabilities (L,).
Tensor: Encoder-decoder (source) attention weights (#layers, #heads, L, T).
"""
# get options
threshold = inference_args.threshold
minlenratio = inference_args.minlenratio
maxlenratio = inference_args.maxlenratio
use_att_constraint = getattr(
inference_args, "use_att_constraint", False
) # keep compatibility
if use_att_constraint:
logging.warning(
"Attention constraint is not yet supported in Transformer. Not enabled."
)
# thin out input frames for reduction factor
# (B, Lmax, idim) -> (B, Lmax // r, idim * r)
if self.encoder_reduction_factor > 1:
Lmax, idim = x.shape
if Lmax % self.encoder_reduction_factor != 0:
x = x[: -(Lmax % self.encoder_reduction_factor), :]
x_ds = x.contiguous().view(
int(Lmax / self.encoder_reduction_factor),
idim * self.encoder_reduction_factor,
)
else:
x_ds = x
# forward encoder
x_ds = x_ds.unsqueeze(0)
hs, _ = self.encoder(x_ds, None)
# integrate speaker embedding
if self.spk_embed_dim is not None:
spembs = spemb.unsqueeze(0)
hs = self._integrate_with_spk_embed(hs, spembs)
# set limits of length
maxlen = int(hs.size(1) * maxlenratio / self.reduction_factor)
minlen = int(hs.size(1) * minlenratio / self.reduction_factor)
# initialize
idx = 0
ys = hs.new_zeros(1, 1, self.odim)
outs, probs = [], []
# forward decoder step-by-step
z_cache = self.decoder.init_state(x)
while True:
# update index
idx += 1
# calculate output and stop prob at idx-th step
y_masks = subsequent_mask(idx).unsqueeze(0).to(x.device)
z, z_cache = self.decoder.forward_one_step(
ys, y_masks, hs, cache=z_cache
) # (B, adim)
outs += [
self.feat_out(z).view(self.reduction_factor, self.odim)
] # [(r, odim), ...]
probs += [torch.sigmoid(self.prob_out(z))[0]] # [(r), ...]
# update next inputs
ys = torch.cat(
(ys, outs[-1][-1].view(1, 1, self.odim)), dim=1
) # (1, idx + 1, odim)
# get attention weights
att_ws_ = []
for name, m in self.named_modules():
if isinstance(m, MultiHeadedAttention) and "src" in name:
att_ws_ += [m.attn[0, :, -1].unsqueeze(1)] # [(#heads, 1, T),...]
if idx == 1:
att_ws = att_ws_
else:
# [(#heads, l, T), ...]
att_ws = [
torch.cat([att_w, att_w_], dim=1)
for att_w, att_w_ in zip(att_ws, att_ws_)
]
# check whether to finish generation
if int(sum(probs[-1] >= threshold)) > 0 or idx >= maxlen:
# check mininum length
if idx < minlen:
continue
outs = (
torch.cat(outs, dim=0).unsqueeze(0).transpose(1, 2)
) # (L, odim) -> (1, L, odim) -> (1, odim, L)
if self.postnet is not None:
outs = outs + self.postnet(outs) # (1, odim, L)
outs = outs.transpose(2, 1).squeeze(0) # (L, odim)
probs = torch.cat(probs, dim=0)
break
# concatenate attention weights -> (#layers, #heads, L, T)
att_ws = torch.stack(att_ws, dim=0)
return outs, probs, att_ws
def calculate_all_attentions(
self,
xs,
ilens,
ys,
olens,
spembs=None,
skip_output=False,
keep_tensor=False,
*args,
**kwargs
):
"""Calculate all of the attention weights.
Args:
xs (Tensor): Batch of padded acoustic features (B, Tmax, idim).
ilens (LongTensor): Batch of lengths of each input batch (B,).
ys (Tensor): Batch of padded target features (B, Lmax, odim).
olens (LongTensor): Batch of the lengths of each target (B,).
spembs (Tensor, optional): Batch of speaker embedding vectors
(B, spk_embed_dim).
skip_output (bool, optional): Whether to skip calculate the final output.
keep_tensor (bool, optional): Whether to keep original tensor.
Returns:
dict: Dict of attention weights and outputs.
"""
with torch.no_grad():
# thin out input frames for reduction factor
# (B, Lmax, idim) -> (B, Lmax // r, idim * r)
if self.encoder_reduction_factor > 1:
B, Lmax, idim = xs.shape
if Lmax % self.encoder_reduction_factor != 0:
xs = xs[:, : -(Lmax % self.encoder_reduction_factor), :]
xs_ds = xs.contiguous().view(
B,
int(Lmax / self.encoder_reduction_factor),
idim * self.encoder_reduction_factor,
)
ilens_ds = ilens.new(
[ilen // self.encoder_reduction_factor for ilen in ilens]
)
else:
xs_ds, ilens_ds = xs, ilens
# forward encoder
x_masks = self._source_mask(ilens_ds).to(xs.device)
hs, hs_masks = self.encoder(xs_ds, x_masks)
# integrate speaker embedding
if self.spk_embed_dim is not None:
hs = self._integrate_with_spk_embed(hs, spembs)
# thin out frames for reduction factor
# (B, Lmax, odim) -> (B, Lmax//r, odim)
if self.reduction_factor > 1:
ys_in = ys[:, self.reduction_factor - 1 :: self.reduction_factor]
olens_in = olens.new([olen // self.reduction_factor for olen in olens])
else:
ys_in, olens_in = ys, olens
# add first zero frame and remove last frame for auto-regressive
ys_in = self._add_first_frame_and_remove_last_frame(ys_in)
# forward decoder
y_masks = self._target_mask(olens_in).to(xs.device)
zs, _ = self.decoder(ys_in, y_masks, hs, hs_masks)
# calculate final outputs
if not skip_output:
before_outs = self.feat_out(zs).view(zs.size(0), -1, self.odim)
if self.postnet is None:
after_outs = before_outs
else:
after_outs = before_outs + self.postnet(
before_outs.transpose(1, 2)
).transpose(1, 2)
# modifiy mod part of output lengths due to reduction factor > 1
if self.reduction_factor > 1:
olens = olens.new([olen - olen % self.reduction_factor for olen in olens])
# store into dict
att_ws_dict = dict()
if keep_tensor:
for name, m in self.named_modules():
if isinstance(m, MultiHeadedAttention):
att_ws_dict[name] = m.attn
if not skip_output:
att_ws_dict["before_postnet_fbank"] = before_outs
att_ws_dict["after_postnet_fbank"] = after_outs
else:
for name, m in self.named_modules():
if isinstance(m, MultiHeadedAttention):
attn = m.attn.cpu().numpy()
if "encoder" in name:
attn = [a[:, :l, :l] for a, l in zip(attn, ilens.tolist())]
elif "decoder" in name:
if "src" in name:
attn = [
a[:, :ol, :il]
for a, il, ol in zip(
attn, ilens.tolist(), olens_in.tolist()
)
]
elif "self" in name:
attn = [
a[:, :l, :l] for a, l in zip(attn, olens_in.tolist())
]
else:
logging.warning("unknown attention module: " + name)
else:
logging.warning("unknown attention module: " + name)
att_ws_dict[name] = attn
if not skip_output:
before_outs = before_outs.cpu().numpy()
after_outs = after_outs.cpu().numpy()
att_ws_dict["before_postnet_fbank"] = [
m[:l].T for m, l in zip(before_outs, olens.tolist())
]
att_ws_dict["after_postnet_fbank"] = [
m[:l].T for m, l in zip(after_outs, olens.tolist())
]
return att_ws_dict
def _integrate_with_spk_embed(self, hs, spembs):
"""Integrate speaker embedding with hidden states.
Args:
hs (Tensor): Batch of hidden state sequences (B, Tmax, adim).
spembs (Tensor): Batch of speaker embeddings (B, spk_embed_dim).
Returns:
Tensor: Batch of integrated hidden state sequences (B, Tmax, adim)
"""
if self.spk_embed_integration_type == "add":
# apply projection and then add to hidden states
spembs = self.projection(F.normalize(spembs))
hs = hs + spembs.unsqueeze(1)
elif self.spk_embed_integration_type == "concat":
# concat hidden states with spk embeds and then apply projection
spembs = F.normalize(spembs).unsqueeze(1).expand(-1, hs.size(1), -1)
hs = self.projection(torch.cat([hs, spembs], dim=-1))
else:
raise NotImplementedError("support only add or concat.")
return hs
def _source_mask(self, ilens):
"""Make masks for self-attention.
Args:
ilens (LongTensor or List): Batch of lengths (B,).
Returns:
Tensor: Mask tensor for self-attention.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
>>> ilens = [5, 3]
>>> self._source_mask(ilens)
tensor([[[1, 1, 1, 1, 1],
[[1, 1, 1, 0, 0]]], dtype=torch.uint8)
"""
x_masks = make_non_pad_mask(ilens)
return x_masks.unsqueeze(-2)
def _target_mask(self, olens):
"""Make masks for masked self-attention.
Args:
olens (LongTensor or List): Batch of lengths (B,).
Returns:
Tensor: Mask tensor for masked self-attention.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
>>> olens = [5, 3]
>>> self._target_mask(olens)
tensor([[[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]],
[[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0]]], dtype=torch.uint8)
"""
y_masks = make_non_pad_mask(olens)
s_masks = subsequent_mask(y_masks.size(-1), device=y_masks.device).unsqueeze(0)
return y_masks.unsqueeze(-2) & s_masks
@property
def base_plot_keys(self):
"""Return base key names to plot during training.
keys should match what `chainer.reporter` reports.
If you add the key `loss`, the reporter will report `main/loss`
and `validation/main/loss` values.
also `loss.png` will be created as a figure visulizing `main/loss`
and `validation/main/loss` values.
Returns:
list: List of strings which are base keys to plot during training.
"""
plot_keys = ["loss", "l1_loss", "l2_loss", "bce_loss"]
if self.use_scaled_pos_enc:
plot_keys += ["encoder_alpha", "decoder_alpha"]
if self.use_guided_attn_loss:
if "encoder" in self.modules_applied_guided_attn:
plot_keys += ["enc_attn_loss"]
if "decoder" in self.modules_applied_guided_attn:
plot_keys += ["dec_attn_loss"]
if "encoder-decoder" in self.modules_applied_guided_attn:
plot_keys += ["enc_dec_attn_loss"]
return plot_keys
| 46,109 | 38.647463 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/e2e_vc_tacotron2.py | # Copyright 2020 Nagoya University (Wen-Chin Huang)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Tacotron2-VC related modules."""
import logging
from distutils.util import strtobool
import numpy as np
import torch
import torch.nn.functional as F
from espnet.nets.pytorch_backend.e2e_tts_tacotron2 import Tacotron2Loss # noqa: H301
from espnet.nets.pytorch_backend.e2e_tts_tacotron2 import ( # noqa: H301
GuidedAttentionLoss,
)
from espnet.nets.pytorch_backend.rnn.attentions import AttForward, AttForwardTA, AttLoc
from espnet.nets.pytorch_backend.tacotron2.cbhg import CBHG, CBHGLoss
from espnet.nets.pytorch_backend.tacotron2.decoder import Decoder
from espnet.nets.pytorch_backend.tacotron2.encoder import Encoder
from espnet.nets.tts_interface import TTSInterface
from espnet.utils.fill_missing_args import fill_missing_args
class Tacotron2(TTSInterface, torch.nn.Module):
"""VC Tacotron2 module for VC.
This is a module of Tacotron2-based VC model,
which convert the sequence of acoustic features
into the sequence of acoustic features.
"""
@staticmethod
def add_arguments(parser):
"""Add model-specific arguments to the parser."""
group = parser.add_argument_group("tacotron 2 model setting")
# encoder
group.add_argument(
"--elayers", default=1, type=int, help="Number of encoder layers"
)
group.add_argument(
"--eunits",
"-u",
default=512,
type=int,
help="Number of encoder hidden units",
)
group.add_argument(
"--econv-layers",
default=3,
type=int,
help="Number of encoder convolution layers",
)
group.add_argument(
"--econv-chans",
default=512,
type=int,
help="Number of encoder convolution channels",
)
group.add_argument(
"--econv-filts",
default=5,
type=int,
help="Filter size of encoder convolution",
)
# attention
group.add_argument(
"--atype",
default="location",
type=str,
choices=["forward_ta", "forward", "location"],
help="Type of attention mechanism",
)
group.add_argument(
"--adim",
default=512,
type=int,
help="Number of attention transformation dimensions",
)
group.add_argument(
"--aconv-chans",
default=32,
type=int,
help="Number of attention convolution channels",
)
group.add_argument(
"--aconv-filts",
default=15,
type=int,
help="Filter size of attention convolution",
)
group.add_argument(
"--cumulate-att-w",
default=True,
type=strtobool,
help="Whether or not to cumulate attention weights",
)
# decoder
group.add_argument(
"--dlayers", default=2, type=int, help="Number of decoder layers"
)
group.add_argument(
"--dunits", default=1024, type=int, help="Number of decoder hidden units"
)
group.add_argument(
"--prenet-layers", default=2, type=int, help="Number of prenet layers"
)
group.add_argument(
"--prenet-units",
default=256,
type=int,
help="Number of prenet hidden units",
)
group.add_argument(
"--postnet-layers", default=5, type=int, help="Number of postnet layers"
)
group.add_argument(
"--postnet-chans", default=512, type=int, help="Number of postnet channels"
)
group.add_argument(
"--postnet-filts", default=5, type=int, help="Filter size of postnet"
)
group.add_argument(
"--output-activation",
default=None,
type=str,
nargs="?",
help="Output activation function",
)
# cbhg
group.add_argument(
"--use-cbhg",
default=False,
type=strtobool,
help="Whether to use CBHG module",
)
group.add_argument(
"--cbhg-conv-bank-layers",
default=8,
type=int,
help="Number of convoluional bank layers in CBHG",
)
group.add_argument(
"--cbhg-conv-bank-chans",
default=128,
type=int,
help="Number of convoluional bank channles in CBHG",
)
group.add_argument(
"--cbhg-conv-proj-filts",
default=3,
type=int,
help="Filter size of convoluional projection layer in CBHG",
)
group.add_argument(
"--cbhg-conv-proj-chans",
default=256,
type=int,
help="Number of convoluional projection channels in CBHG",
)
group.add_argument(
"--cbhg-highway-layers",
default=4,
type=int,
help="Number of highway layers in CBHG",
)
group.add_argument(
"--cbhg-highway-units",
default=128,
type=int,
help="Number of highway units in CBHG",
)
group.add_argument(
"--cbhg-gru-units",
default=256,
type=int,
help="Number of GRU units in CBHG",
)
# model (parameter) related
group.add_argument(
"--use-batch-norm",
default=True,
type=strtobool,
help="Whether to use batch normalization",
)
group.add_argument(
"--use-concate",
default=True,
type=strtobool,
help="Whether to concatenate encoder embedding with decoder outputs",
)
group.add_argument(
"--use-residual",
default=True,
type=strtobool,
help="Whether to use residual connection in conv layer",
)
group.add_argument(
"--dropout-rate", default=0.5, type=float, help="Dropout rate"
)
group.add_argument(
"--zoneout-rate", default=0.1, type=float, help="Zoneout rate"
)
group.add_argument(
"--reduction-factor",
default=1,
type=int,
help="Reduction factor (for decoder)",
)
group.add_argument(
"--encoder-reduction-factor",
default=1,
type=int,
help="Reduction factor (for encoder)",
)
group.add_argument(
"--spk-embed-dim",
default=None,
type=int,
help="Number of speaker embedding dimensions",
)
group.add_argument(
"--spc-dim", default=None, type=int, help="Number of spectrogram dimensions"
)
group.add_argument(
"--pretrained-model", default=None, type=str, help="Pretrained model path"
)
# loss related
group.add_argument(
"--use-masking",
default=False,
type=strtobool,
help="Whether to use masking in calculation of loss",
)
group.add_argument(
"--bce-pos-weight",
default=20.0,
type=float,
help="Positive sample weight in BCE calculation "
"(only for use-masking=True)",
)
group.add_argument(
"--use-guided-attn-loss",
default=False,
type=strtobool,
help="Whether to use guided attention loss",
)
group.add_argument(
"--guided-attn-loss-sigma",
default=0.4,
type=float,
help="Sigma in guided attention loss",
)
group.add_argument(
"--guided-attn-loss-lambda",
default=1.0,
type=float,
help="Lambda in guided attention loss",
)
group.add_argument(
"--src-reconstruction-loss-lambda",
default=1.0,
type=float,
help="Lambda in source reconstruction loss",
)
group.add_argument(
"--trg-reconstruction-loss-lambda",
default=1.0,
type=float,
help="Lambda in target reconstruction loss",
)
return parser
def __init__(self, idim, odim, args=None):
"""Initialize Tacotron2 module.
Args:
idim (int): Dimension of the inputs.
odim (int): Dimension of the outputs.
args (Namespace, optional):
- spk_embed_dim (int): Dimension of the speaker embedding.
- elayers (int): The number of encoder blstm layers.
- eunits (int): The number of encoder blstm units.
- econv_layers (int): The number of encoder conv layers.
- econv_filts (int): The number of encoder conv filter size.
- econv_chans (int): The number of encoder conv filter channels.
- dlayers (int): The number of decoder lstm layers.
- dunits (int): The number of decoder lstm units.
- prenet_layers (int): The number of prenet layers.
- prenet_units (int): The number of prenet units.
- postnet_layers (int): The number of postnet layers.
- postnet_filts (int): The number of postnet filter size.
- postnet_chans (int): The number of postnet filter channels.
- output_activation (int): The name of activation function for outputs.
- adim (int): The number of dimension of mlp in attention.
- aconv_chans (int): The number of attention conv filter channels.
- aconv_filts (int): The number of attention conv filter size.
- cumulate_att_w (bool): Whether to cumulate previous attention weight.
- use_batch_norm (bool): Whether to use batch normalization.
- use_concate (int):
Whether to concatenate encoder embedding with decoder lstm outputs.
- dropout_rate (float): Dropout rate.
- zoneout_rate (float): Zoneout rate.
- reduction_factor (int): Reduction factor.
- spk_embed_dim (int): Number of speaker embedding dimenstions.
- spc_dim (int): Number of spectrogram embedding dimenstions
(only for use_cbhg=True).
- use_cbhg (bool): Whether to use CBHG module.
- cbhg_conv_bank_layers (int):
The number of convoluional banks in CBHG.
- cbhg_conv_bank_chans (int):
The number of channels of convolutional bank in CBHG.
- cbhg_proj_filts (int):
The number of filter size of projection layeri in CBHG.
- cbhg_proj_chans (int):
The number of channels of projection layer in CBHG.
- cbhg_highway_layers (int):
The number of layers of highway network in CBHG.
- cbhg_highway_units (int):
The number of units of highway network in CBHG.
- cbhg_gru_units (int): The number of units of GRU in CBHG.
- use_masking (bool): Whether to mask padded part in loss calculation.
- bce_pos_weight (float): Weight of positive sample of stop token
(only for use_masking=True).
- use-guided-attn-loss (bool): Whether to use guided attention loss.
- guided-attn-loss-sigma (float) Sigma in guided attention loss.
- guided-attn-loss-lamdba (float): Lambda in guided attention loss.
"""
# initialize base classes
TTSInterface.__init__(self)
torch.nn.Module.__init__(self)
# fill missing arguments
args = fill_missing_args(args, self.add_arguments)
# store hyperparameters
self.idim = idim
self.odim = odim
self.adim = args.adim
self.spk_embed_dim = args.spk_embed_dim
self.cumulate_att_w = args.cumulate_att_w
self.reduction_factor = args.reduction_factor
self.encoder_reduction_factor = args.encoder_reduction_factor
self.use_cbhg = args.use_cbhg
self.use_guided_attn_loss = args.use_guided_attn_loss
self.src_reconstruction_loss_lambda = args.src_reconstruction_loss_lambda
self.trg_reconstruction_loss_lambda = args.trg_reconstruction_loss_lambda
# define activation function for the final output
if args.output_activation is None:
self.output_activation_fn = None
elif hasattr(F, args.output_activation):
self.output_activation_fn = getattr(F, args.output_activation)
else:
raise ValueError(
"there is no such an activation function. (%s)" % args.output_activation
)
# define network modules
self.enc = Encoder(
idim=idim * args.encoder_reduction_factor,
input_layer="linear",
elayers=args.elayers,
eunits=args.eunits,
econv_layers=args.econv_layers,
econv_chans=args.econv_chans,
econv_filts=args.econv_filts,
use_batch_norm=args.use_batch_norm,
use_residual=args.use_residual,
dropout_rate=args.dropout_rate,
)
dec_idim = (
args.eunits
if args.spk_embed_dim is None
else args.eunits + args.spk_embed_dim
)
if args.atype == "location":
att = AttLoc(
dec_idim, args.dunits, args.adim, args.aconv_chans, args.aconv_filts
)
elif args.atype == "forward":
att = AttForward(
dec_idim, args.dunits, args.adim, args.aconv_chans, args.aconv_filts
)
if self.cumulate_att_w:
logging.warning(
"cumulation of attention weights is disabled in forward attention."
)
self.cumulate_att_w = False
elif args.atype == "forward_ta":
att = AttForwardTA(
dec_idim,
args.dunits,
args.adim,
args.aconv_chans,
args.aconv_filts,
odim,
)
if self.cumulate_att_w:
logging.warning(
"cumulation of attention weights is disabled in forward attention."
)
self.cumulate_att_w = False
else:
raise NotImplementedError("Support only location or forward")
self.dec = Decoder(
idim=dec_idim,
odim=odim,
att=att,
dlayers=args.dlayers,
dunits=args.dunits,
prenet_layers=args.prenet_layers,
prenet_units=args.prenet_units,
postnet_layers=args.postnet_layers,
postnet_chans=args.postnet_chans,
postnet_filts=args.postnet_filts,
output_activation_fn=self.output_activation_fn,
cumulate_att_w=self.cumulate_att_w,
use_batch_norm=args.use_batch_norm,
use_concate=args.use_concate,
dropout_rate=args.dropout_rate,
zoneout_rate=args.zoneout_rate,
reduction_factor=args.reduction_factor,
)
self.taco2_loss = Tacotron2Loss(
use_masking=args.use_masking, bce_pos_weight=args.bce_pos_weight
)
if self.use_guided_attn_loss:
self.attn_loss = GuidedAttentionLoss(
sigma=args.guided_attn_loss_sigma,
alpha=args.guided_attn_loss_lambda,
)
if self.use_cbhg:
self.cbhg = CBHG(
idim=odim,
odim=args.spc_dim,
conv_bank_layers=args.cbhg_conv_bank_layers,
conv_bank_chans=args.cbhg_conv_bank_chans,
conv_proj_filts=args.cbhg_conv_proj_filts,
conv_proj_chans=args.cbhg_conv_proj_chans,
highway_layers=args.cbhg_highway_layers,
highway_units=args.cbhg_highway_units,
gru_units=args.cbhg_gru_units,
)
self.cbhg_loss = CBHGLoss(use_masking=args.use_masking)
if self.src_reconstruction_loss_lambda > 0:
self.src_reconstructor = Encoder(
idim=dec_idim,
input_layer="linear",
elayers=args.elayers,
eunits=args.eunits,
econv_layers=args.econv_layers,
econv_chans=args.econv_chans,
econv_filts=args.econv_filts,
use_batch_norm=args.use_batch_norm,
use_residual=args.use_residual,
dropout_rate=args.dropout_rate,
)
self.src_reconstructor_linear = torch.nn.Linear(
args.econv_chans, idim * args.encoder_reduction_factor
)
self.src_reconstruction_loss = CBHGLoss(use_masking=args.use_masking)
if self.trg_reconstruction_loss_lambda > 0:
self.trg_reconstructor = Encoder(
idim=dec_idim,
input_layer="linear",
elayers=args.elayers,
eunits=args.eunits,
econv_layers=args.econv_layers,
econv_chans=args.econv_chans,
econv_filts=args.econv_filts,
use_batch_norm=args.use_batch_norm,
use_residual=args.use_residual,
dropout_rate=args.dropout_rate,
)
self.trg_reconstructor_linear = torch.nn.Linear(
args.econv_chans, odim * args.reduction_factor
)
self.trg_reconstruction_loss = CBHGLoss(use_masking=args.use_masking)
# load pretrained model
if args.pretrained_model is not None:
self.load_pretrained_model(args.pretrained_model)
def forward(
self, xs, ilens, ys, labels, olens, spembs=None, spcs=None, *args, **kwargs
):
"""Calculate forward propagation.
Args:
xs (Tensor): Batch of padded acoustic features (B, Tmax, idim).
ilens (LongTensor): Batch of lengths of each input batch (B,).
ys (Tensor): Batch of padded target features (B, Lmax, odim).
olens (LongTensor): Batch of the lengths of each target (B,).
spembs (Tensor, optional):
Batch of speaker embedding vectors (B, spk_embed_dim).
spcs (Tensor, optional):
Batch of groundtruth spectrograms (B, Lmax, spc_dim).
Returns:
Tensor: Loss value.
"""
# remove unnecessary padded part (for multi-gpus)
max_in = max(ilens)
max_out = max(olens)
if max_in != xs.shape[1]:
xs = xs[:, :max_in]
if max_out != ys.shape[1]:
ys = ys[:, :max_out]
labels = labels[:, :max_out]
# thin out input frames for reduction factor
# (B, Lmax, idim) -> (B, Lmax // r, idim * r)
if self.encoder_reduction_factor > 1:
B, Lmax, idim = xs.shape
if Lmax % self.encoder_reduction_factor != 0:
xs = xs[:, : -(Lmax % self.encoder_reduction_factor), :]
xs_ds = xs.contiguous().view(
B,
int(Lmax / self.encoder_reduction_factor),
idim * self.encoder_reduction_factor,
)
ilens_ds = ilens.new(
[ilen // self.encoder_reduction_factor for ilen in ilens]
)
else:
xs_ds, ilens_ds = xs, ilens
# calculate tacotron2 outputs
hs, hlens = self.enc(xs_ds, ilens_ds)
if self.spk_embed_dim is not None:
spembs = F.normalize(spembs).unsqueeze(1).expand(-1, hs.size(1), -1)
hs = torch.cat([hs, spembs], dim=-1)
after_outs, before_outs, logits, att_ws = self.dec(hs, hlens, ys)
# calculate src reconstruction
if self.src_reconstruction_loss_lambda > 0:
B, _in_length, _adim = hs.shape
xt, xtlens = self.src_reconstructor(hs, hlens)
xt = self.src_reconstructor_linear(xt)
if self.encoder_reduction_factor > 1:
xt = xt.view(B, -1, self.idim)
# calculate trg reconstruction
if self.trg_reconstruction_loss_lambda > 0:
olens_trg_cp = olens.new(
sorted([olen // self.reduction_factor for olen in olens], reverse=True)
)
B, _in_length, _adim = hs.shape
_, _out_length, _ = att_ws.shape
# att_R should be [B, out_length / r_d, adim]
att_R = torch.sum(
hs.view(B, 1, _in_length, _adim)
* att_ws.view(B, _out_length, _in_length, 1),
dim=2,
)
yt, ytlens = self.trg_reconstructor(
att_R, olens_trg_cp
) # is using olens correct?
yt = self.trg_reconstructor_linear(yt)
if self.reduction_factor > 1:
yt = yt.view(
B, -1, self.odim
) # now att_R should be [B, out_length, adim]
# modifiy mod part of groundtruth
if self.reduction_factor > 1:
assert olens.ge(
self.reduction_factor
).all(), "Output length must be greater than or equal to reduction factor."
olens = olens.new([olen - olen % self.reduction_factor for olen in olens])
max_out = max(olens)
ys = ys[:, :max_out]
labels = labels[:, :max_out]
labels = torch.scatter(
labels, 1, (olens - 1).unsqueeze(1), 1.0
) # see #3388
if self.encoder_reduction_factor > 1:
ilens = ilens.new(
[ilen - ilen % self.encoder_reduction_factor for ilen in ilens]
)
max_in = max(ilens)
xs = xs[:, :max_in]
# calculate taco2 loss
l1_loss, mse_loss, bce_loss = self.taco2_loss(
after_outs, before_outs, logits, ys, labels, olens
)
loss = l1_loss + mse_loss + bce_loss
report_keys = [
{"l1_loss": l1_loss.item()},
{"mse_loss": mse_loss.item()},
{"bce_loss": bce_loss.item()},
]
# calculate context_preservation loss
if self.src_reconstruction_loss_lambda > 0:
src_recon_l1_loss, src_recon_mse_loss = self.src_reconstruction_loss(
xt, xs, ilens
)
loss = loss + src_recon_l1_loss
report_keys += [
{"src_recon_l1_loss": src_recon_l1_loss.item()},
{"src_recon_mse_loss": src_recon_mse_loss.item()},
]
if self.trg_reconstruction_loss_lambda > 0:
trg_recon_l1_loss, trg_recon_mse_loss = self.trg_reconstruction_loss(
yt, ys, olens
)
loss = loss + trg_recon_l1_loss
report_keys += [
{"trg_recon_l1_loss": trg_recon_l1_loss.item()},
{"trg_recon_mse_loss": trg_recon_mse_loss.item()},
]
# calculate attention loss
if self.use_guided_attn_loss:
# NOTE(kan-bayashi): length of output for auto-regressive input
# will be changed when r > 1
if self.encoder_reduction_factor > 1:
ilens_in = ilens.new(
[ilen // self.encoder_reduction_factor for ilen in ilens]
)
else:
ilens_in = ilens
if self.reduction_factor > 1:
olens_in = olens.new([olen // self.reduction_factor for olen in olens])
else:
olens_in = olens
attn_loss = self.attn_loss(att_ws, ilens_in, olens_in)
loss = loss + attn_loss
report_keys += [
{"attn_loss": attn_loss.item()},
]
# calculate cbhg loss
if self.use_cbhg:
# remove unnecessary padded part (for multi-gpus)
if max_out != spcs.shape[1]:
spcs = spcs[:, :max_out]
# calculate cbhg outputs & loss and report them
cbhg_outs, _ = self.cbhg(after_outs, olens)
cbhg_l1_loss, cbhg_mse_loss = self.cbhg_loss(cbhg_outs, spcs, olens)
loss = loss + cbhg_l1_loss + cbhg_mse_loss
report_keys += [
{"cbhg_l1_loss": cbhg_l1_loss.item()},
{"cbhg_mse_loss": cbhg_mse_loss.item()},
]
report_keys += [{"loss": loss.item()}]
self.reporter.report(report_keys)
return loss
def inference(self, x, inference_args, spemb=None, *args, **kwargs):
"""Generate the sequence of features given the sequences of characters.
Args:
x (Tensor): Input sequence of acoustic features (T, idim).
inference_args (Namespace):
- threshold (float): Threshold in inference.
- minlenratio (float): Minimum length ratio in inference.
- maxlenratio (float): Maximum length ratio in inference.
spemb (Tensor, optional): Speaker embedding vector (spk_embed_dim).
Returns:
Tensor: Output sequence of features (L, odim).
Tensor: Output sequence of stop probabilities (L,).
Tensor: Attention weights (L, T).
"""
# get options
threshold = inference_args.threshold
minlenratio = inference_args.minlenratio
maxlenratio = inference_args.maxlenratio
# thin out input frames for reduction factor
# (B, Lmax, idim) -> (B, Lmax // r, idim * r)
if self.encoder_reduction_factor > 1:
Lmax, idim = x.shape
if Lmax % self.encoder_reduction_factor != 0:
x = x[: -(Lmax % self.encoder_reduction_factor), :]
x_ds = x.contiguous().view(
int(Lmax / self.encoder_reduction_factor),
idim * self.encoder_reduction_factor,
)
else:
x_ds = x
# inference
h = self.enc.inference(x_ds)
if self.spk_embed_dim is not None:
spemb = F.normalize(spemb, dim=0).unsqueeze(0).expand(h.size(0), -1)
h = torch.cat([h, spemb], dim=-1)
outs, probs, att_ws = self.dec.inference(h, threshold, minlenratio, maxlenratio)
if self.use_cbhg:
cbhg_outs = self.cbhg.inference(outs)
return cbhg_outs, probs, att_ws
else:
return outs, probs, att_ws
def calculate_all_attentions(self, xs, ilens, ys, spembs=None, *args, **kwargs):
"""Calculate all of the attention weights.
Args:
xs (Tensor): Batch of padded acoustic features (B, Tmax, idim).
ilens (LongTensor): Batch of lengths of each input batch (B,).
ys (Tensor): Batch of padded target features (B, Lmax, odim).
olens (LongTensor): Batch of the lengths of each target (B,).
spembs (Tensor, optional):
Batch of speaker embedding vectors (B, spk_embed_dim).
Returns:
numpy.ndarray: Batch of attention weights (B, Lmax, Tmax).
"""
# check ilens type (should be list of int)
if isinstance(ilens, torch.Tensor) or isinstance(ilens, np.ndarray):
ilens = list(map(int, ilens))
self.eval()
with torch.no_grad():
# thin out input frames for reduction factor
# (B, Lmax, idim) -> (B, Lmax // r, idim * r)
if self.encoder_reduction_factor > 1:
B, Lmax, idim = xs.shape
if Lmax % self.encoder_reduction_factor != 0:
xs = xs[:, : -(Lmax % self.encoder_reduction_factor), :]
xs_ds = xs.contiguous().view(
B,
int(Lmax / self.encoder_reduction_factor),
idim * self.encoder_reduction_factor,
)
ilens_ds = [ilen // self.encoder_reduction_factor for ilen in ilens]
else:
xs_ds, ilens_ds = xs, ilens
hs, hlens = self.enc(xs_ds, ilens_ds)
if self.spk_embed_dim is not None:
spembs = F.normalize(spembs).unsqueeze(1).expand(-1, hs.size(1), -1)
hs = torch.cat([hs, spembs], dim=-1)
att_ws = self.dec.calculate_all_attentions(hs, hlens, ys)
self.train()
return att_ws.cpu().numpy()
@property
def base_plot_keys(self):
"""Return base key names to plot during training.
keys should match what `chainer.reporter` reports.
If you add the key `loss`, the reporter will report `main/loss`
and `validation/main/loss` values.
also `loss.png` will be created as a figure visulizing `main/loss`
and `validation/main/loss` values.
Returns:
list: List of strings which are base keys to plot during training.
"""
plot_keys = ["loss", "l1_loss", "mse_loss", "bce_loss"]
if self.use_guided_attn_loss:
plot_keys += ["attn_loss"]
if self.use_cbhg:
plot_keys += ["cbhg_l1_loss", "cbhg_mse_loss"]
if self.src_reconstruction_loss_lambda > 0:
plot_keys += ["src_recon_l1_loss", "src_recon_mse_loss"]
if self.trg_reconstruction_loss_lambda > 0:
plot_keys += ["trg_recon_l1_loss", "trg_recon_mse_loss"]
return plot_keys
def _sort_by_length(self, xs, ilens):
sort_ilens, sort_idx = ilens.sort(0, descending=True)
return xs[sort_idx], ilens[sort_idx], sort_idx
def _revert_sort_by_length(self, xs, ilens, sort_idx):
_, revert_idx = sort_idx.sort(0)
return xs[revert_idx], ilens[revert_idx]
| 30,409 | 37.788265 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/e2e_asr_conformer.py | # Copyright 2020 Johns Hopkins University (Shinji Watanabe)
# Northwestern Polytechnical University (Pengcheng Guo)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
Conformer speech recognition model (pytorch).
It is a fusion of `e2e_asr_transformer.py`
Refer to: https://arxiv.org/abs/2005.08100
"""
from espnet.nets.pytorch_backend.conformer.argument import ( # noqa: H301
add_arguments_conformer_common,
verify_rel_pos_type,
)
from espnet.nets.pytorch_backend.conformer.encoder import Encoder
from espnet.nets.pytorch_backend.e2e_asr_transformer import E2E as E2ETransformer
class E2E(E2ETransformer):
"""E2E module.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
@staticmethod
def add_arguments(parser):
"""Add arguments."""
E2ETransformer.add_arguments(parser)
E2E.add_conformer_arguments(parser)
return parser
@staticmethod
def add_conformer_arguments(parser):
"""Add arguments for conformer model."""
group = parser.add_argument_group("conformer model specific setting")
group = add_arguments_conformer_common(group)
return parser
def __init__(self, idim, odim, args, ignore_id=-1):
"""Construct an E2E object.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
super().__init__(idim, odim, args, ignore_id)
if args.transformer_attn_dropout_rate is None:
args.transformer_attn_dropout_rate = args.dropout_rate
# Check the relative positional encoding type
args = verify_rel_pos_type(args)
self.encoder = Encoder(
idim=idim,
attention_dim=args.adim,
attention_heads=args.aheads,
linear_units=args.eunits,
num_blocks=args.elayers,
input_layer=args.transformer_input_layer,
dropout_rate=args.dropout_rate,
positional_dropout_rate=args.dropout_rate,
attention_dropout_rate=args.transformer_attn_dropout_rate,
pos_enc_layer_type=args.transformer_encoder_pos_enc_layer_type,
selfattention_layer_type=args.transformer_encoder_selfattn_layer_type,
activation_type=args.transformer_encoder_activation_type,
macaron_style=args.macaron_style,
use_cnn_module=args.use_cnn_module,
zero_triu=args.zero_triu,
cnn_module_kernel=args.cnn_module_kernel,
stochastic_depth_rate=args.stochastic_depth_rate,
intermediate_layers=self.intermediate_ctc_layers,
ctc_softmax=self.ctc.softmax if args.self_conditioning else None,
conditioning_layer_dim=odim,
)
self.reset_parameters(args)
| 2,955 | 35.493827 | 82 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/e2e_tts_transformer.py | # Copyright 2019 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""TTS-Transformer related modules."""
import logging
import torch
import torch.nn.functional as F
from espnet.nets.pytorch_backend.e2e_tts_tacotron2 import GuidedAttentionLoss
from espnet.nets.pytorch_backend.e2e_tts_tacotron2 import (
Tacotron2Loss as TransformerLoss,
)
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask
from espnet.nets.pytorch_backend.tacotron2.decoder import Postnet
from espnet.nets.pytorch_backend.tacotron2.decoder import Prenet as DecoderPrenet
from espnet.nets.pytorch_backend.tacotron2.encoder import Encoder as EncoderPrenet
from espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention
from espnet.nets.pytorch_backend.transformer.decoder import Decoder
from espnet.nets.pytorch_backend.transformer.embedding import (
PositionalEncoding,
ScaledPositionalEncoding,
)
from espnet.nets.pytorch_backend.transformer.encoder import Encoder
from espnet.nets.pytorch_backend.transformer.initializer import initialize
from espnet.nets.pytorch_backend.transformer.mask import subsequent_mask
from espnet.nets.tts_interface import TTSInterface
from espnet.utils.cli_utils import strtobool
from espnet.utils.fill_missing_args import fill_missing_args
class GuidedMultiHeadAttentionLoss(GuidedAttentionLoss):
"""Guided attention loss function module for multi head attention.
Args:
sigma (float, optional): Standard deviation to control
how close attention to a diagonal.
alpha (float, optional): Scaling coefficient (lambda).
reset_always (bool, optional): Whether to always reset masks.
"""
def forward(self, att_ws, ilens, olens):
"""Calculate forward propagation.
Args:
att_ws (Tensor):
Batch of multi head attention weights (B, H, T_max_out, T_max_in).
ilens (LongTensor): Batch of input lengths (B,).
olens (LongTensor): Batch of output lengths (B,).
Returns:
Tensor: Guided attention loss value.
"""
if self.guided_attn_masks is None:
self.guided_attn_masks = (
self._make_guided_attention_masks(ilens, olens)
.to(att_ws.device)
.unsqueeze(1)
)
if self.masks is None:
self.masks = self._make_masks(ilens, olens).to(att_ws.device).unsqueeze(1)
losses = self.guided_attn_masks * att_ws
loss = torch.mean(losses.masked_select(self.masks))
if self.reset_always:
self._reset_masks()
return self.alpha * loss
try:
from espnet.nets.pytorch_backend.transformer.plot import PlotAttentionReport
except (ImportError, TypeError):
TTSPlot = None
else:
class TTSPlot(PlotAttentionReport):
"""Attention plot module for TTS-Transformer."""
def plotfn(
self, data_dict, uttid_list, attn_dict, outdir, suffix="png", savefn=None
):
"""Plot multi head attentions.
Args:
data_dict (dict): Utts info from json file.
uttid_list (list): List of utt_id.
attn_dict (dict): Multi head attention dict.
Values should be numpy.ndarray (H, L, T)
outdir (str): Directory name to save figures.
suffix (str): Filename suffix including image type (e.g., png).
savefn (function): Function to save figures.
"""
import matplotlib.pyplot as plt
from espnet.nets.pytorch_backend.transformer.plot import ( # noqa: H301
_plot_and_save_attention,
)
for name, att_ws in attn_dict.items():
for utt_id, att_w in zip(uttid_list, att_ws):
filename = "%s/%s.%s.%s" % (outdir, utt_id, name, suffix)
if "fbank" in name:
fig = plt.Figure()
ax = fig.subplots(1, 1)
ax.imshow(att_w, aspect="auto")
ax.set_xlabel("frames")
ax.set_ylabel("fbank coeff")
fig.tight_layout()
else:
fig = _plot_and_save_attention(att_w, filename)
savefn(fig, filename)
class Transformer(TTSInterface, torch.nn.Module):
"""Text-to-Speech Transformer module.
This is a module of text-to-speech Transformer described
in `Neural Speech Synthesis with Transformer Network`_,
which convert the sequence of characters
or phonemes into the sequence of Mel-filterbanks.
.. _`Neural Speech Synthesis with Transformer Network`:
https://arxiv.org/pdf/1809.08895.pdf
"""
@staticmethod
def add_arguments(parser):
"""Add model-specific arguments to the parser."""
group = parser.add_argument_group("transformer model setting")
# network structure related
group.add_argument(
"--embed-dim",
default=512,
type=int,
help="Dimension of character embedding in encoder prenet",
)
group.add_argument(
"--eprenet-conv-layers",
default=3,
type=int,
help="Number of encoder prenet convolution layers",
)
group.add_argument(
"--eprenet-conv-chans",
default=256,
type=int,
help="Number of encoder prenet convolution channels",
)
group.add_argument(
"--eprenet-conv-filts",
default=5,
type=int,
help="Filter size of encoder prenet convolution",
)
group.add_argument(
"--dprenet-layers",
default=2,
type=int,
help="Number of decoder prenet layers",
)
group.add_argument(
"--dprenet-units",
default=256,
type=int,
help="Number of decoder prenet hidden units",
)
group.add_argument(
"--elayers", default=3, type=int, help="Number of encoder layers"
)
group.add_argument(
"--eunits", default=1536, type=int, help="Number of encoder hidden units"
)
group.add_argument(
"--adim",
default=384,
type=int,
help="Number of attention transformation dimensions",
)
group.add_argument(
"--aheads",
default=4,
type=int,
help="Number of heads for multi head attention",
)
group.add_argument(
"--dlayers", default=3, type=int, help="Number of decoder layers"
)
group.add_argument(
"--dunits", default=1536, type=int, help="Number of decoder hidden units"
)
group.add_argument(
"--positionwise-layer-type",
default="linear",
type=str,
choices=["linear", "conv1d", "conv1d-linear"],
help="Positionwise layer type.",
)
group.add_argument(
"--positionwise-conv-kernel-size",
default=1,
type=int,
help="Kernel size of positionwise conv1d layer",
)
group.add_argument(
"--postnet-layers", default=5, type=int, help="Number of postnet layers"
)
group.add_argument(
"--postnet-chans", default=256, type=int, help="Number of postnet channels"
)
group.add_argument(
"--postnet-filts", default=5, type=int, help="Filter size of postnet"
)
group.add_argument(
"--use-scaled-pos-enc",
default=True,
type=strtobool,
help="Use trainable scaled positional encoding "
"instead of the fixed scale one.",
)
group.add_argument(
"--use-batch-norm",
default=True,
type=strtobool,
help="Whether to use batch normalization",
)
group.add_argument(
"--encoder-normalize-before",
default=False,
type=strtobool,
help="Whether to apply layer norm before encoder block",
)
group.add_argument(
"--decoder-normalize-before",
default=False,
type=strtobool,
help="Whether to apply layer norm before decoder block",
)
group.add_argument(
"--encoder-concat-after",
default=False,
type=strtobool,
help="Whether to concatenate attention layer's input and output in encoder",
)
group.add_argument(
"--decoder-concat-after",
default=False,
type=strtobool,
help="Whether to concatenate attention layer's input and output in decoder",
)
group.add_argument(
"--reduction-factor", default=1, type=int, help="Reduction factor"
)
group.add_argument(
"--spk-embed-dim",
default=None,
type=int,
help="Number of speaker embedding dimensions",
)
group.add_argument(
"--spk-embed-integration-type",
type=str,
default="add",
choices=["add", "concat"],
help="How to integrate speaker embedding",
)
# training related
group.add_argument(
"--transformer-init",
type=str,
default="pytorch",
choices=[
"pytorch",
"xavier_uniform",
"xavier_normal",
"kaiming_uniform",
"kaiming_normal",
],
help="How to initialize transformer parameters",
)
group.add_argument(
"--initial-encoder-alpha",
type=float,
default=1.0,
help="Initial alpha value in encoder's ScaledPositionalEncoding",
)
group.add_argument(
"--initial-decoder-alpha",
type=float,
default=1.0,
help="Initial alpha value in decoder's ScaledPositionalEncoding",
)
group.add_argument(
"--transformer-lr",
default=1.0,
type=float,
help="Initial value of learning rate",
)
group.add_argument(
"--transformer-warmup-steps",
default=4000,
type=int,
help="Optimizer warmup steps",
)
group.add_argument(
"--transformer-enc-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer encoder except for attention",
)
group.add_argument(
"--transformer-enc-positional-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer encoder positional encoding",
)
group.add_argument(
"--transformer-enc-attn-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer encoder self-attention",
)
group.add_argument(
"--transformer-dec-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer decoder "
"except for attention and pos encoding",
)
group.add_argument(
"--transformer-dec-positional-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer decoder positional encoding",
)
group.add_argument(
"--transformer-dec-attn-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer decoder self-attention",
)
group.add_argument(
"--transformer-enc-dec-attn-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer encoder-decoder attention",
)
group.add_argument(
"--eprenet-dropout-rate",
default=0.5,
type=float,
help="Dropout rate in encoder prenet",
)
group.add_argument(
"--dprenet-dropout-rate",
default=0.5,
type=float,
help="Dropout rate in decoder prenet",
)
group.add_argument(
"--postnet-dropout-rate",
default=0.5,
type=float,
help="Dropout rate in postnet",
)
group.add_argument(
"--pretrained-model", default=None, type=str, help="Pretrained model path"
)
# loss related
group.add_argument(
"--use-masking",
default=True,
type=strtobool,
help="Whether to use masking in calculation of loss",
)
group.add_argument(
"--use-weighted-masking",
default=False,
type=strtobool,
help="Whether to use weighted masking in calculation of loss",
)
group.add_argument(
"--loss-type",
default="L1",
choices=["L1", "L2", "L1+L2"],
help="How to calc loss",
)
group.add_argument(
"--bce-pos-weight",
default=5.0,
type=float,
help="Positive sample weight in BCE calculation "
"(only for use-masking=True)",
)
group.add_argument(
"--use-guided-attn-loss",
default=False,
type=strtobool,
help="Whether to use guided attention loss",
)
group.add_argument(
"--guided-attn-loss-sigma",
default=0.4,
type=float,
help="Sigma in guided attention loss",
)
group.add_argument(
"--guided-attn-loss-lambda",
default=1.0,
type=float,
help="Lambda in guided attention loss",
)
group.add_argument(
"--num-heads-applied-guided-attn",
default=2,
type=int,
help="Number of heads in each layer to be applied guided attention loss"
"if set -1, all of the heads will be applied.",
)
group.add_argument(
"--num-layers-applied-guided-attn",
default=2,
type=int,
help="Number of layers to be applied guided attention loss"
"if set -1, all of the layers will be applied.",
)
group.add_argument(
"--modules-applied-guided-attn",
type=str,
nargs="+",
default=["encoder-decoder"],
help="Module name list to be applied guided attention loss",
)
return parser
@property
def attention_plot_class(self):
"""Return plot class for attention weight plot."""
return TTSPlot
def __init__(self, idim, odim, args=None):
"""Initialize TTS-Transformer module.
Args:
idim (int): Dimension of the inputs.
odim (int): Dimension of the outputs.
args (Namespace, optional):
- embed_dim (int): Dimension of character embedding.
- eprenet_conv_layers (int):
Number of encoder prenet convolution layers.
- eprenet_conv_chans (int):
Number of encoder prenet convolution channels.
- eprenet_conv_filts (int): Filter size of encoder prenet convolution.
- dprenet_layers (int): Number of decoder prenet layers.
- dprenet_units (int): Number of decoder prenet hidden units.
- elayers (int): Number of encoder layers.
- eunits (int): Number of encoder hidden units.
- adim (int): Number of attention transformation dimensions.
- aheads (int): Number of heads for multi head attention.
- dlayers (int): Number of decoder layers.
- dunits (int): Number of decoder hidden units.
- postnet_layers (int): Number of postnet layers.
- postnet_chans (int): Number of postnet channels.
- postnet_filts (int): Filter size of postnet.
- use_scaled_pos_enc (bool):
Whether to use trainable scaled positional encoding.
- use_batch_norm (bool):
Whether to use batch normalization in encoder prenet.
- encoder_normalize_before (bool):
Whether to perform layer normalization before encoder block.
- decoder_normalize_before (bool):
Whether to perform layer normalization before decoder block.
- encoder_concat_after (bool): Whether to concatenate attention
layer's input and output in encoder.
- decoder_concat_after (bool): Whether to concatenate attention
layer's input and output in decoder.
- reduction_factor (int): Reduction factor.
- spk_embed_dim (int): Number of speaker embedding dimenstions.
- spk_embed_integration_type: How to integrate speaker embedding.
- transformer_init (float): How to initialize transformer parameters.
- transformer_lr (float): Initial value of learning rate.
- transformer_warmup_steps (int): Optimizer warmup steps.
- transformer_enc_dropout_rate (float):
Dropout rate in encoder except attention & positional encoding.
- transformer_enc_positional_dropout_rate (float):
Dropout rate after encoder positional encoding.
- transformer_enc_attn_dropout_rate (float):
Dropout rate in encoder self-attention module.
- transformer_dec_dropout_rate (float):
Dropout rate in decoder except attention & positional encoding.
- transformer_dec_positional_dropout_rate (float):
Dropout rate after decoder positional encoding.
- transformer_dec_attn_dropout_rate (float):
Dropout rate in deocoder self-attention module.
- transformer_enc_dec_attn_dropout_rate (float):
Dropout rate in encoder-deocoder attention module.
- eprenet_dropout_rate (float): Dropout rate in encoder prenet.
- dprenet_dropout_rate (float): Dropout rate in decoder prenet.
- postnet_dropout_rate (float): Dropout rate in postnet.
- use_masking (bool):
Whether to apply masking for padded part in loss calculation.
- use_weighted_masking (bool):
Whether to apply weighted masking in loss calculation.
- bce_pos_weight (float): Positive sample weight in bce calculation
(only for use_masking=true).
- loss_type (str): How to calculate loss.
- use_guided_attn_loss (bool): Whether to use guided attention loss.
- num_heads_applied_guided_attn (int):
Number of heads in each layer to apply guided attention loss.
- num_layers_applied_guided_attn (int):
Number of layers to apply guided attention loss.
- modules_applied_guided_attn (list):
List of module names to apply guided attention loss.
- guided-attn-loss-sigma (float) Sigma in guided attention loss.
- guided-attn-loss-lambda (float): Lambda in guided attention loss.
"""
# initialize base classes
TTSInterface.__init__(self)
torch.nn.Module.__init__(self)
# fill missing arguments
args = fill_missing_args(args, self.add_arguments)
# store hyperparameters
self.idim = idim
self.odim = odim
self.spk_embed_dim = args.spk_embed_dim
if self.spk_embed_dim is not None:
self.spk_embed_integration_type = args.spk_embed_integration_type
self.use_scaled_pos_enc = args.use_scaled_pos_enc
self.reduction_factor = args.reduction_factor
self.loss_type = args.loss_type
self.use_guided_attn_loss = args.use_guided_attn_loss
if self.use_guided_attn_loss:
if args.num_layers_applied_guided_attn == -1:
self.num_layers_applied_guided_attn = args.elayers
else:
self.num_layers_applied_guided_attn = (
args.num_layers_applied_guided_attn
)
if args.num_heads_applied_guided_attn == -1:
self.num_heads_applied_guided_attn = args.aheads
else:
self.num_heads_applied_guided_attn = args.num_heads_applied_guided_attn
self.modules_applied_guided_attn = args.modules_applied_guided_attn
# use idx 0 as padding idx
padding_idx = 0
# get positional encoding class
pos_enc_class = (
ScaledPositionalEncoding if self.use_scaled_pos_enc else PositionalEncoding
)
# define transformer encoder
if args.eprenet_conv_layers != 0:
# encoder prenet
encoder_input_layer = torch.nn.Sequential(
EncoderPrenet(
idim=idim,
embed_dim=args.embed_dim,
elayers=0,
econv_layers=args.eprenet_conv_layers,
econv_chans=args.eprenet_conv_chans,
econv_filts=args.eprenet_conv_filts,
use_batch_norm=args.use_batch_norm,
dropout_rate=args.eprenet_dropout_rate,
padding_idx=padding_idx,
),
torch.nn.Linear(args.eprenet_conv_chans, args.adim),
)
else:
encoder_input_layer = torch.nn.Embedding(
num_embeddings=idim, embedding_dim=args.adim, padding_idx=padding_idx
)
self.encoder = Encoder(
idim=idim,
attention_dim=args.adim,
attention_heads=args.aheads,
linear_units=args.eunits,
num_blocks=args.elayers,
input_layer=encoder_input_layer,
dropout_rate=args.transformer_enc_dropout_rate,
positional_dropout_rate=args.transformer_enc_positional_dropout_rate,
attention_dropout_rate=args.transformer_enc_attn_dropout_rate,
pos_enc_class=pos_enc_class,
normalize_before=args.encoder_normalize_before,
concat_after=args.encoder_concat_after,
positionwise_layer_type=args.positionwise_layer_type,
positionwise_conv_kernel_size=args.positionwise_conv_kernel_size,
)
# define projection layer
if self.spk_embed_dim is not None:
if self.spk_embed_integration_type == "add":
self.projection = torch.nn.Linear(self.spk_embed_dim, args.adim)
else:
self.projection = torch.nn.Linear(
args.adim + self.spk_embed_dim, args.adim
)
# define transformer decoder
if args.dprenet_layers != 0:
# decoder prenet
decoder_input_layer = torch.nn.Sequential(
DecoderPrenet(
idim=odim,
n_layers=args.dprenet_layers,
n_units=args.dprenet_units,
dropout_rate=args.dprenet_dropout_rate,
),
torch.nn.Linear(args.dprenet_units, args.adim),
)
else:
decoder_input_layer = "linear"
self.decoder = Decoder(
odim=-1,
attention_dim=args.adim,
attention_heads=args.aheads,
linear_units=args.dunits,
num_blocks=args.dlayers,
dropout_rate=args.transformer_dec_dropout_rate,
positional_dropout_rate=args.transformer_dec_positional_dropout_rate,
self_attention_dropout_rate=args.transformer_dec_attn_dropout_rate,
src_attention_dropout_rate=args.transformer_enc_dec_attn_dropout_rate,
input_layer=decoder_input_layer,
use_output_layer=False,
pos_enc_class=pos_enc_class,
normalize_before=args.decoder_normalize_before,
concat_after=args.decoder_concat_after,
)
# define final projection
self.feat_out = torch.nn.Linear(args.adim, odim * args.reduction_factor)
self.prob_out = torch.nn.Linear(args.adim, args.reduction_factor)
# define postnet
self.postnet = (
None
if args.postnet_layers == 0
else Postnet(
idim=idim,
odim=odim,
n_layers=args.postnet_layers,
n_chans=args.postnet_chans,
n_filts=args.postnet_filts,
use_batch_norm=args.use_batch_norm,
dropout_rate=args.postnet_dropout_rate,
)
)
# define loss function
self.criterion = TransformerLoss(
use_masking=args.use_masking,
use_weighted_masking=args.use_weighted_masking,
bce_pos_weight=args.bce_pos_weight,
)
if self.use_guided_attn_loss:
self.attn_criterion = GuidedMultiHeadAttentionLoss(
sigma=args.guided_attn_loss_sigma,
alpha=args.guided_attn_loss_lambda,
)
# initialize parameters
self._reset_parameters(
init_type=args.transformer_init,
init_enc_alpha=args.initial_encoder_alpha,
init_dec_alpha=args.initial_decoder_alpha,
)
# load pretrained model
if args.pretrained_model is not None:
self.load_pretrained_model(args.pretrained_model)
def _reset_parameters(self, init_type, init_enc_alpha=1.0, init_dec_alpha=1.0):
# initialize parameters
initialize(self, init_type)
# initialize alpha in scaled positional encoding
if self.use_scaled_pos_enc:
self.encoder.embed[-1].alpha.data = torch.tensor(init_enc_alpha)
self.decoder.embed[-1].alpha.data = torch.tensor(init_dec_alpha)
def _add_first_frame_and_remove_last_frame(self, ys):
ys_in = torch.cat(
[ys.new_zeros((ys.shape[0], 1, ys.shape[2])), ys[:, :-1]], dim=1
)
return ys_in
def forward(self, xs, ilens, ys, labels, olens, spembs=None, *args, **kwargs):
"""Calculate forward propagation.
Args:
xs (Tensor): Batch of padded character ids (B, Tmax).
ilens (LongTensor): Batch of lengths of each input batch (B,).
ys (Tensor): Batch of padded target features (B, Lmax, odim).
olens (LongTensor): Batch of the lengths of each target (B,).
spembs (Tensor, optional):
Batch of speaker embedding vectors (B, spk_embed_dim).
Returns:
Tensor: Loss value.
"""
# remove unnecessary padded part (for multi-gpus)
max_ilen = max(ilens)
max_olen = max(olens)
if max_ilen != xs.shape[1]:
xs = xs[:, :max_ilen]
if max_olen != ys.shape[1]:
ys = ys[:, :max_olen]
labels = labels[:, :max_olen]
# forward encoder
x_masks = self._source_mask(ilens).to(xs.device)
hs, h_masks = self.encoder(xs, x_masks)
# integrate speaker embedding
if self.spk_embed_dim is not None:
hs = self._integrate_with_spk_embed(hs, spembs)
# thin out frames for reduction factor (B, Lmax, odim) -> (B, Lmax//r, odim)
if self.reduction_factor > 1:
ys_in = ys[:, self.reduction_factor - 1 :: self.reduction_factor]
olens_in = olens.new([olen // self.reduction_factor for olen in olens])
else:
ys_in, olens_in = ys, olens
# add first zero frame and remove last frame for auto-regressive
ys_in = self._add_first_frame_and_remove_last_frame(ys_in)
# forward decoder
y_masks = self._target_mask(olens_in).to(xs.device)
zs, _ = self.decoder(ys_in, y_masks, hs, h_masks)
# (B, Lmax//r, odim * r) -> (B, Lmax//r * r, odim)
before_outs = self.feat_out(zs).view(zs.size(0), -1, self.odim)
# (B, Lmax//r, r) -> (B, Lmax//r * r)
logits = self.prob_out(zs).view(zs.size(0), -1)
# postnet -> (B, Lmax//r * r, odim)
if self.postnet is None:
after_outs = before_outs
else:
after_outs = before_outs + self.postnet(
before_outs.transpose(1, 2)
).transpose(1, 2)
# modifiy mod part of groundtruth
if self.reduction_factor > 1:
assert olens.ge(
self.reduction_factor
).all(), "Output length must be greater than or equal to reduction factor."
olens = olens.new([olen - olen % self.reduction_factor for olen in olens])
max_olen = max(olens)
ys = ys[:, :max_olen]
labels = labels[:, :max_olen]
labels = torch.scatter(
labels, 1, (olens - 1).unsqueeze(1), 1.0
) # see #3388
# calculate loss values
l1_loss, l2_loss, bce_loss = self.criterion(
after_outs, before_outs, logits, ys, labels, olens
)
if self.loss_type == "L1":
loss = l1_loss + bce_loss
elif self.loss_type == "L2":
loss = l2_loss + bce_loss
elif self.loss_type == "L1+L2":
loss = l1_loss + l2_loss + bce_loss
else:
raise ValueError("unknown --loss-type " + self.loss_type)
report_keys = [
{"l1_loss": l1_loss.item()},
{"l2_loss": l2_loss.item()},
{"bce_loss": bce_loss.item()},
{"loss": loss.item()},
]
# calculate guided attention loss
if self.use_guided_attn_loss:
# calculate for encoder
if "encoder" in self.modules_applied_guided_attn:
att_ws = []
for idx, layer_idx in enumerate(
reversed(range(len(self.encoder.encoders)))
):
att_ws += [
self.encoder.encoders[layer_idx].self_attn.attn[
:, : self.num_heads_applied_guided_attn
]
]
if idx + 1 == self.num_layers_applied_guided_attn:
break
att_ws = torch.cat(att_ws, dim=1) # (B, H*L, T_in, T_in)
enc_attn_loss = self.attn_criterion(att_ws, ilens, ilens)
loss = loss + enc_attn_loss
report_keys += [{"enc_attn_loss": enc_attn_loss.item()}]
# calculate for decoder
if "decoder" in self.modules_applied_guided_attn:
att_ws = []
for idx, layer_idx in enumerate(
reversed(range(len(self.decoder.decoders)))
):
att_ws += [
self.decoder.decoders[layer_idx].self_attn.attn[
:, : self.num_heads_applied_guided_attn
]
]
if idx + 1 == self.num_layers_applied_guided_attn:
break
att_ws = torch.cat(att_ws, dim=1) # (B, H*L, T_out, T_out)
dec_attn_loss = self.attn_criterion(att_ws, olens_in, olens_in)
loss = loss + dec_attn_loss
report_keys += [{"dec_attn_loss": dec_attn_loss.item()}]
# calculate for encoder-decoder
if "encoder-decoder" in self.modules_applied_guided_attn:
att_ws = []
for idx, layer_idx in enumerate(
reversed(range(len(self.decoder.decoders)))
):
att_ws += [
self.decoder.decoders[layer_idx].src_attn.attn[
:, : self.num_heads_applied_guided_attn
]
]
if idx + 1 == self.num_layers_applied_guided_attn:
break
att_ws = torch.cat(att_ws, dim=1) # (B, H*L, T_out, T_in)
enc_dec_attn_loss = self.attn_criterion(att_ws, ilens, olens_in)
loss = loss + enc_dec_attn_loss
report_keys += [{"enc_dec_attn_loss": enc_dec_attn_loss.item()}]
# report extra information
if self.use_scaled_pos_enc:
report_keys += [
{"encoder_alpha": self.encoder.embed[-1].alpha.data.item()},
{"decoder_alpha": self.decoder.embed[-1].alpha.data.item()},
]
self.reporter.report(report_keys)
return loss
def inference(self, x, inference_args, spemb=None, *args, **kwargs):
"""Generate the sequence of features given the sequences of characters.
Args:
x (Tensor): Input sequence of characters (T,).
inference_args (Namespace):
- threshold (float): Threshold in inference.
- minlenratio (float): Minimum length ratio in inference.
- maxlenratio (float): Maximum length ratio in inference.
spemb (Tensor, optional): Speaker embedding vector (spk_embed_dim).
Returns:
Tensor: Output sequence of features (L, odim).
Tensor: Output sequence of stop probabilities (L,).
Tensor: Encoder-decoder (source) attention weights (#layers, #heads, L, T).
"""
# get options
threshold = inference_args.threshold
minlenratio = inference_args.minlenratio
maxlenratio = inference_args.maxlenratio
use_att_constraint = getattr(
inference_args, "use_att_constraint", False
) # keep compatibility
if use_att_constraint:
logging.warning(
"Attention constraint is not yet supported in Transformer. Not enabled."
)
# forward encoder
xs = x.unsqueeze(0)
hs, _ = self.encoder(xs, None)
# integrate speaker embedding
if self.spk_embed_dim is not None:
spembs = spemb.unsqueeze(0)
hs = self._integrate_with_spk_embed(hs, spembs)
# set limits of length
maxlen = int(hs.size(1) * maxlenratio / self.reduction_factor)
minlen = int(hs.size(1) * minlenratio / self.reduction_factor)
# initialize
idx = 0
ys = hs.new_zeros(1, 1, self.odim)
outs, probs = [], []
# forward decoder step-by-step
z_cache = self.decoder.init_state(x)
while True:
# update index
idx += 1
# calculate output and stop prob at idx-th step
y_masks = subsequent_mask(idx).unsqueeze(0).to(x.device)
z, z_cache = self.decoder.forward_one_step(
ys, y_masks, hs, cache=z_cache
) # (B, adim)
outs += [
self.feat_out(z).view(self.reduction_factor, self.odim)
] # [(r, odim), ...]
probs += [torch.sigmoid(self.prob_out(z))[0]] # [(r), ...]
# update next inputs
ys = torch.cat(
(ys, outs[-1][-1].view(1, 1, self.odim)), dim=1
) # (1, idx + 1, odim)
# get attention weights
att_ws_ = []
for name, m in self.named_modules():
if isinstance(m, MultiHeadedAttention) and "src" in name:
att_ws_ += [m.attn[0, :, -1].unsqueeze(1)] # [(#heads, 1, T),...]
if idx == 1:
att_ws = att_ws_
else:
# [(#heads, l, T), ...]
att_ws = [
torch.cat([att_w, att_w_], dim=1)
for att_w, att_w_ in zip(att_ws, att_ws_)
]
# check whether to finish generation
if int(sum(probs[-1] >= threshold)) > 0 or idx >= maxlen:
# check mininum length
if idx < minlen:
continue
outs = (
torch.cat(outs, dim=0).unsqueeze(0).transpose(1, 2)
) # (L, odim) -> (1, L, odim) -> (1, odim, L)
if self.postnet is not None:
outs = outs + self.postnet(outs) # (1, odim, L)
outs = outs.transpose(2, 1).squeeze(0) # (L, odim)
probs = torch.cat(probs, dim=0)
break
# concatenate attention weights -> (#layers, #heads, L, T)
att_ws = torch.stack(att_ws, dim=0)
return outs, probs, att_ws
def calculate_all_attentions(
self,
xs,
ilens,
ys,
olens,
spembs=None,
skip_output=False,
keep_tensor=False,
*args,
**kwargs
):
"""Calculate all of the attention weights.
Args:
xs (Tensor): Batch of padded character ids (B, Tmax).
ilens (LongTensor): Batch of lengths of each input batch (B,).
ys (Tensor): Batch of padded target features (B, Lmax, odim).
olens (LongTensor): Batch of the lengths of each target (B,).
spembs (Tensor, optional):
Batch of speaker embedding vectors (B, spk_embed_dim).
skip_output (bool, optional): Whether to skip calculate the final output.
keep_tensor (bool, optional): Whether to keep original tensor.
Returns:
dict: Dict of attention weights and outputs.
"""
self.eval()
with torch.no_grad():
# forward encoder
x_masks = self._source_mask(ilens).to(xs.device)
hs, h_masks = self.encoder(xs, x_masks)
# integrate speaker embedding
if self.spk_embed_dim is not None:
hs = self._integrate_with_spk_embed(hs, spembs)
# thin out frames for reduction factor
# (B, Lmax, odim) -> (B, Lmax//r, odim)
if self.reduction_factor > 1:
ys_in = ys[:, self.reduction_factor - 1 :: self.reduction_factor]
olens_in = olens.new([olen // self.reduction_factor for olen in olens])
else:
ys_in, olens_in = ys, olens
# add first zero frame and remove last frame for auto-regressive
ys_in = self._add_first_frame_and_remove_last_frame(ys_in)
# forward decoder
y_masks = self._target_mask(olens_in).to(xs.device)
zs, _ = self.decoder(ys_in, y_masks, hs, h_masks)
# calculate final outputs
if not skip_output:
before_outs = self.feat_out(zs).view(zs.size(0), -1, self.odim)
if self.postnet is None:
after_outs = before_outs
else:
after_outs = before_outs + self.postnet(
before_outs.transpose(1, 2)
).transpose(1, 2)
# modifiy mod part of output lengths due to reduction factor > 1
if self.reduction_factor > 1:
olens = olens.new([olen - olen % self.reduction_factor for olen in olens])
# store into dict
att_ws_dict = dict()
if keep_tensor:
for name, m in self.named_modules():
if isinstance(m, MultiHeadedAttention):
att_ws_dict[name] = m.attn
if not skip_output:
att_ws_dict["before_postnet_fbank"] = before_outs
att_ws_dict["after_postnet_fbank"] = after_outs
else:
for name, m in self.named_modules():
if isinstance(m, MultiHeadedAttention):
attn = m.attn.cpu().numpy()
if "encoder" in name:
attn = [a[:, :l, :l] for a, l in zip(attn, ilens.tolist())]
elif "decoder" in name:
if "src" in name:
attn = [
a[:, :ol, :il]
for a, il, ol in zip(
attn, ilens.tolist(), olens_in.tolist()
)
]
elif "self" in name:
attn = [
a[:, :l, :l] for a, l in zip(attn, olens_in.tolist())
]
else:
logging.warning("unknown attention module: " + name)
else:
logging.warning("unknown attention module: " + name)
att_ws_dict[name] = attn
if not skip_output:
before_outs = before_outs.cpu().numpy()
after_outs = after_outs.cpu().numpy()
att_ws_dict["before_postnet_fbank"] = [
m[:l].T for m, l in zip(before_outs, olens.tolist())
]
att_ws_dict["after_postnet_fbank"] = [
m[:l].T for m, l in zip(after_outs, olens.tolist())
]
self.train()
return att_ws_dict
def _integrate_with_spk_embed(self, hs, spembs):
"""Integrate speaker embedding with hidden states.
Args:
hs (Tensor): Batch of hidden state sequences (B, Tmax, adim).
spembs (Tensor): Batch of speaker embeddings (B, spk_embed_dim).
Returns:
Tensor: Batch of integrated hidden state sequences (B, Tmax, adim)
"""
if self.spk_embed_integration_type == "add":
# apply projection and then add to hidden states
spembs = self.projection(F.normalize(spembs))
hs = hs + spembs.unsqueeze(1)
elif self.spk_embed_integration_type == "concat":
# concat hidden states with spk embeds and then apply projection
spembs = F.normalize(spembs).unsqueeze(1).expand(-1, hs.size(1), -1)
hs = self.projection(torch.cat([hs, spembs], dim=-1))
else:
raise NotImplementedError("support only add or concat.")
return hs
def _source_mask(self, ilens):
"""Make masks for self-attention.
Args:
ilens (LongTensor or List): Batch of lengths (B,).
Returns:
Tensor: Mask tensor for self-attention.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
>>> ilens = [5, 3]
>>> self._source_mask(ilens)
tensor([[[1, 1, 1, 1, 1],
[[1, 1, 1, 0, 0]]], dtype=torch.uint8)
"""
x_masks = make_non_pad_mask(ilens)
return x_masks.unsqueeze(-2)
def _target_mask(self, olens):
"""Make masks for masked self-attention.
Args:
olens (LongTensor or List): Batch of lengths (B,).
Returns:
Tensor: Mask tensor for masked self-attention.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
>>> olens = [5, 3]
>>> self._target_mask(olens)
tensor([[[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]],
[[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0]]], dtype=torch.uint8)
"""
y_masks = make_non_pad_mask(olens)
s_masks = subsequent_mask(y_masks.size(-1), device=y_masks.device).unsqueeze(0)
return y_masks.unsqueeze(-2) & s_masks
@property
def base_plot_keys(self):
"""Return base key names to plot during training.
keys should match what `chainer.reporter` reports.
If you add the key `loss`, the reporter will report `main/loss`
and `validation/main/loss` values.
also `loss.png` will be created as a figure visulizing `main/loss`
and `validation/main/loss` values.
Returns:
list: List of strings which are base keys to plot during training.
"""
plot_keys = ["loss", "l1_loss", "l2_loss", "bce_loss"]
if self.use_scaled_pos_enc:
plot_keys += ["encoder_alpha", "decoder_alpha"]
if self.use_guided_attn_loss:
if "encoder" in self.modules_applied_guided_attn:
plot_keys += ["enc_attn_loss"]
if "decoder" in self.modules_applied_guided_attn:
plot_keys += ["dec_attn_loss"]
if "encoder-decoder" in self.modules_applied_guided_attn:
plot_keys += ["enc_dec_attn_loss"]
return plot_keys
| 45,641 | 38.27883 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/gtn_ctc.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""GTN CTC implementation."""
import gtn
import torch
class GTNCTCLossFunction(torch.autograd.Function):
"""GTN CTC module."""
# Copied from FB's GTN example implementation:
# https://github.com/facebookresearch/gtn_applications/blob/master/utils.py#L251
@staticmethod
def create_ctc_graph(target, blank_idx):
"""Build gtn graph.
:param list target: single target sequence
:param int blank_idx: index of blank token
:return: gtn graph of target sequence
:rtype: gtn.Graph
"""
g_criterion = gtn.Graph(False)
L = len(target)
S = 2 * L + 1
for s in range(S):
idx = (s - 1) // 2
g_criterion.add_node(s == 0, s == S - 1 or s == S - 2)
label = target[idx] if s % 2 else blank_idx
g_criterion.add_arc(s, s, label)
if s > 0:
g_criterion.add_arc(s - 1, s, label)
if s % 2 and s > 1 and label != target[idx - 1]:
g_criterion.add_arc(s - 2, s, label)
g_criterion.arc_sort(False)
return g_criterion
@staticmethod
def forward(ctx, log_probs, targets, ilens, blank_idx=0, reduction="none"):
"""Forward computation.
:param torch.tensor log_probs: batched log softmax probabilities (B, Tmax, oDim)
:param list targets: batched target sequences, list of lists
:param int blank_idx: index of blank token
:return: ctc loss value
:rtype: torch.Tensor
"""
B, _, C = log_probs.shape
losses = [None] * B
scales = [None] * B
emissions_graphs = [None] * B
def process(b):
# create emission graph
T = ilens[b]
g_emissions = gtn.linear_graph(T, C, log_probs.requires_grad)
cpu_data = log_probs[b][:T].cpu().contiguous()
g_emissions.set_weights(cpu_data.data_ptr())
# create criterion graph
g_criterion = GTNCTCLossFunction.create_ctc_graph(targets[b], blank_idx)
# compose the graphs
g_loss = gtn.negate(
gtn.forward_score(gtn.intersect(g_emissions, g_criterion))
)
scale = 1.0
if reduction == "mean":
L = len(targets[b])
scale = 1.0 / L if L > 0 else scale
elif reduction != "none":
raise ValueError("invalid value for reduction '" + str(reduction) + "'")
# Save for backward:
losses[b] = g_loss
scales[b] = scale
emissions_graphs[b] = g_emissions
gtn.parallel_for(process, range(B))
ctx.auxiliary_data = (losses, scales, emissions_graphs, log_probs.shape, ilens)
loss = torch.tensor([losses[b].item() * scales[b] for b in range(B)])
return torch.mean(loss.cuda() if log_probs.is_cuda else loss)
@staticmethod
def backward(ctx, grad_output):
"""Backward computation.
:param torch.tensor grad_output: backward passed gradient value
:return: cumulative gradient output
:rtype: (torch.Tensor, None, None, None)
"""
losses, scales, emissions_graphs, in_shape, ilens = ctx.auxiliary_data
B, T, C = in_shape
input_grad = torch.zeros((B, T, C))
def process(b):
T = ilens[b]
gtn.backward(losses[b], False)
emissions = emissions_graphs[b]
grad = emissions.grad().weights_to_numpy()
input_grad[b][:T] = torch.from_numpy(grad).view(1, T, C) * scales[b]
gtn.parallel_for(process, range(B))
if grad_output.is_cuda:
input_grad = input_grad.cuda()
input_grad *= grad_output / B
return (
input_grad,
None, # targets
None, # ilens
None, # blank_idx
None, # reduction
)
| 3,974 | 32.403361 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/e2e_tts_tacotron2.py | # Copyright 2018 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Tacotron 2 related modules."""
import logging
import numpy as np
import torch
import torch.nn.functional as F
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask
from espnet.nets.pytorch_backend.rnn.attentions import AttForward, AttForwardTA, AttLoc
from espnet.nets.pytorch_backend.tacotron2.cbhg import CBHG, CBHGLoss
from espnet.nets.pytorch_backend.tacotron2.decoder import Decoder
from espnet.nets.pytorch_backend.tacotron2.encoder import Encoder
from espnet.nets.tts_interface import TTSInterface
from espnet.utils.cli_utils import strtobool
from espnet.utils.fill_missing_args import fill_missing_args
class GuidedAttentionLoss(torch.nn.Module):
"""Guided attention loss function module.
This module calculates the guided attention loss described
in `Efficiently Trainable Text-to-Speech System Based
on Deep Convolutional Networks with Guided Attention`_,
which forces the attention to be diagonal.
.. _`Efficiently Trainable Text-to-Speech System
Based on Deep Convolutional Networks with Guided Attention`:
https://arxiv.org/abs/1710.08969
"""
def __init__(self, sigma=0.4, alpha=1.0, reset_always=True):
"""Initialize guided attention loss module.
Args:
sigma (float, optional): Standard deviation to control
how close attention to a diagonal.
alpha (float, optional): Scaling coefficient (lambda).
reset_always (bool, optional): Whether to always reset masks.
"""
super(GuidedAttentionLoss, self).__init__()
self.sigma = sigma
self.alpha = alpha
self.reset_always = reset_always
self.guided_attn_masks = None
self.masks = None
def _reset_masks(self):
self.guided_attn_masks = None
self.masks = None
def forward(self, att_ws, ilens, olens):
"""Calculate forward propagation.
Args:
att_ws (Tensor): Batch of attention weights (B, T_max_out, T_max_in).
ilens (LongTensor): Batch of input lengths (B,).
olens (LongTensor): Batch of output lengths (B,).
Returns:
Tensor: Guided attention loss value.
"""
if self.guided_attn_masks is None:
self.guided_attn_masks = self._make_guided_attention_masks(ilens, olens).to(
att_ws.device
)
if self.masks is None:
self.masks = self._make_masks(ilens, olens).to(att_ws.device)
losses = self.guided_attn_masks * att_ws
loss = torch.mean(losses.masked_select(self.masks))
if self.reset_always:
self._reset_masks()
return self.alpha * loss
def _make_guided_attention_masks(self, ilens, olens):
n_batches = len(ilens)
max_ilen = max(ilens)
max_olen = max(olens)
guided_attn_masks = torch.zeros((n_batches, max_olen, max_ilen))
for idx, (ilen, olen) in enumerate(zip(ilens, olens)):
guided_attn_masks[idx, :olen, :ilen] = self._make_guided_attention_mask(
ilen, olen, self.sigma
)
return guided_attn_masks
@staticmethod
def _make_guided_attention_mask(ilen, olen, sigma):
"""Make guided attention mask.
Examples:
>>> guided_attn_mask =_make_guided_attention(5, 5, 0.4)
>>> guided_attn_mask.shape
torch.Size([5, 5])
>>> guided_attn_mask
tensor([[0.0000, 0.1175, 0.3935, 0.6753, 0.8647],
[0.1175, 0.0000, 0.1175, 0.3935, 0.6753],
[0.3935, 0.1175, 0.0000, 0.1175, 0.3935],
[0.6753, 0.3935, 0.1175, 0.0000, 0.1175],
[0.8647, 0.6753, 0.3935, 0.1175, 0.0000]])
>>> guided_attn_mask =_make_guided_attention(3, 6, 0.4)
>>> guided_attn_mask.shape
torch.Size([6, 3])
>>> guided_attn_mask
tensor([[0.0000, 0.2934, 0.7506],
[0.0831, 0.0831, 0.5422],
[0.2934, 0.0000, 0.2934],
[0.5422, 0.0831, 0.0831],
[0.7506, 0.2934, 0.0000],
[0.8858, 0.5422, 0.0831]])
"""
grid_x, grid_y = torch.meshgrid(torch.arange(olen), torch.arange(ilen))
grid_x, grid_y = grid_x.float().to(olen.device), grid_y.float().to(ilen.device)
return 1.0 - torch.exp(
-((grid_y / ilen - grid_x / olen) ** 2) / (2 * (sigma**2))
)
@staticmethod
def _make_masks(ilens, olens):
"""Make masks indicating non-padded part.
Args:
ilens (LongTensor or List): Batch of lengths (B,).
olens (LongTensor or List): Batch of lengths (B,).
Returns:
Tensor: Mask tensor indicating non-padded part.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
>>> ilens, olens = [5, 2], [8, 5]
>>> _make_mask(ilens, olens)
tensor([[[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1]],
[[1, 1, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]], dtype=torch.uint8)
"""
in_masks = make_non_pad_mask(ilens) # (B, T_in)
out_masks = make_non_pad_mask(olens) # (B, T_out)
return out_masks.unsqueeze(-1) & in_masks.unsqueeze(-2) # (B, T_out, T_in)
class Tacotron2Loss(torch.nn.Module):
"""Loss function module for Tacotron2."""
def __init__(
self, use_masking=True, use_weighted_masking=False, bce_pos_weight=20.0
):
"""Initialize Tactoron2 loss module.
Args:
use_masking (bool): Whether to apply masking
for padded part in loss calculation.
use_weighted_masking (bool):
Whether to apply weighted masking in loss calculation.
bce_pos_weight (float): Weight of positive sample of stop token.
"""
super(Tacotron2Loss, self).__init__()
assert (use_masking != use_weighted_masking) or not use_masking
self.use_masking = use_masking
self.use_weighted_masking = use_weighted_masking
# define criterions
reduction = "none" if self.use_weighted_masking else "mean"
self.l1_criterion = torch.nn.L1Loss(reduction=reduction)
self.mse_criterion = torch.nn.MSELoss(reduction=reduction)
self.bce_criterion = torch.nn.BCEWithLogitsLoss(
reduction=reduction, pos_weight=torch.tensor(bce_pos_weight)
)
# NOTE(kan-bayashi): register pre hook function for the compatibility
self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook)
def forward(self, after_outs, before_outs, logits, ys, labels, olens):
"""Calculate forward propagation.
Args:
after_outs (Tensor): Batch of outputs after postnets (B, Lmax, odim).
before_outs (Tensor): Batch of outputs before postnets (B, Lmax, odim).
logits (Tensor): Batch of stop logits (B, Lmax).
ys (Tensor): Batch of padded target features (B, Lmax, odim).
labels (LongTensor): Batch of the sequences of stop token labels (B, Lmax).
olens (LongTensor): Batch of the lengths of each target (B,).
Returns:
Tensor: L1 loss value.
Tensor: Mean square error loss value.
Tensor: Binary cross entropy loss value.
"""
# make mask and apply it
if self.use_masking:
masks = make_non_pad_mask(olens).unsqueeze(-1).to(ys.device)
ys = ys.masked_select(masks)
after_outs = after_outs.masked_select(masks)
before_outs = before_outs.masked_select(masks)
labels = labels.masked_select(masks[:, :, 0])
logits = logits.masked_select(masks[:, :, 0])
# calculate loss
l1_loss = self.l1_criterion(after_outs, ys) + self.l1_criterion(before_outs, ys)
mse_loss = self.mse_criterion(after_outs, ys) + self.mse_criterion(
before_outs, ys
)
bce_loss = self.bce_criterion(logits, labels)
# make weighted mask and apply it
if self.use_weighted_masking:
masks = make_non_pad_mask(olens).unsqueeze(-1).to(ys.device)
weights = masks.float() / masks.sum(dim=1, keepdim=True).float()
out_weights = weights.div(ys.size(0) * ys.size(2))
logit_weights = weights.div(ys.size(0))
# apply weight
l1_loss = l1_loss.mul(out_weights).masked_select(masks).sum()
mse_loss = mse_loss.mul(out_weights).masked_select(masks).sum()
bce_loss = (
bce_loss.mul(logit_weights.squeeze(-1))
.masked_select(masks.squeeze(-1))
.sum()
)
return l1_loss, mse_loss, bce_loss
def _load_state_dict_pre_hook(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
"""Apply pre hook function before loading state dict.
From v.0.6.1 `bce_criterion.pos_weight` param is registered as a parameter but
old models do not include it and as a result, it causes missing key error when
loading old model parameter. This function solve the issue by adding param in
state dict before loading as a pre hook function
of the `load_state_dict` method.
"""
key = prefix + "bce_criterion.pos_weight"
if key not in state_dict:
state_dict[key] = self.bce_criterion.pos_weight
class Tacotron2(TTSInterface, torch.nn.Module):
"""Tacotron2 module for end-to-end text-to-speech (E2E-TTS).
This is a module of Spectrogram prediction network in Tacotron2 described
in `Natural TTS Synthesis
by Conditioning WaveNet on Mel Spectrogram Predictions`_,
which converts the sequence of characters
into the sequence of Mel-filterbanks.
.. _`Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`:
https://arxiv.org/abs/1712.05884
"""
@staticmethod
def add_arguments(parser):
"""Add model-specific arguments to the parser."""
group = parser.add_argument_group("tacotron 2 model setting")
# encoder
group.add_argument(
"--embed-dim",
default=512,
type=int,
help="Number of dimension of embedding",
)
group.add_argument(
"--elayers", default=1, type=int, help="Number of encoder layers"
)
group.add_argument(
"--eunits",
"-u",
default=512,
type=int,
help="Number of encoder hidden units",
)
group.add_argument(
"--econv-layers",
default=3,
type=int,
help="Number of encoder convolution layers",
)
group.add_argument(
"--econv-chans",
default=512,
type=int,
help="Number of encoder convolution channels",
)
group.add_argument(
"--econv-filts",
default=5,
type=int,
help="Filter size of encoder convolution",
)
# attention
group.add_argument(
"--atype",
default="location",
type=str,
choices=["forward_ta", "forward", "location"],
help="Type of attention mechanism",
)
group.add_argument(
"--adim",
default=512,
type=int,
help="Number of attention transformation dimensions",
)
group.add_argument(
"--aconv-chans",
default=32,
type=int,
help="Number of attention convolution channels",
)
group.add_argument(
"--aconv-filts",
default=15,
type=int,
help="Filter size of attention convolution",
)
group.add_argument(
"--cumulate-att-w",
default=True,
type=strtobool,
help="Whether or not to cumulate attention weights",
)
# decoder
group.add_argument(
"--dlayers", default=2, type=int, help="Number of decoder layers"
)
group.add_argument(
"--dunits", default=1024, type=int, help="Number of decoder hidden units"
)
group.add_argument(
"--prenet-layers", default=2, type=int, help="Number of prenet layers"
)
group.add_argument(
"--prenet-units",
default=256,
type=int,
help="Number of prenet hidden units",
)
group.add_argument(
"--postnet-layers", default=5, type=int, help="Number of postnet layers"
)
group.add_argument(
"--postnet-chans", default=512, type=int, help="Number of postnet channels"
)
group.add_argument(
"--postnet-filts", default=5, type=int, help="Filter size of postnet"
)
group.add_argument(
"--output-activation",
default=None,
type=str,
nargs="?",
help="Output activation function",
)
# cbhg
group.add_argument(
"--use-cbhg",
default=False,
type=strtobool,
help="Whether to use CBHG module",
)
group.add_argument(
"--cbhg-conv-bank-layers",
default=8,
type=int,
help="Number of convoluional bank layers in CBHG",
)
group.add_argument(
"--cbhg-conv-bank-chans",
default=128,
type=int,
help="Number of convoluional bank channles in CBHG",
)
group.add_argument(
"--cbhg-conv-proj-filts",
default=3,
type=int,
help="Filter size of convoluional projection layer in CBHG",
)
group.add_argument(
"--cbhg-conv-proj-chans",
default=256,
type=int,
help="Number of convoluional projection channels in CBHG",
)
group.add_argument(
"--cbhg-highway-layers",
default=4,
type=int,
help="Number of highway layers in CBHG",
)
group.add_argument(
"--cbhg-highway-units",
default=128,
type=int,
help="Number of highway units in CBHG",
)
group.add_argument(
"--cbhg-gru-units",
default=256,
type=int,
help="Number of GRU units in CBHG",
)
# model (parameter) related
group.add_argument(
"--use-batch-norm",
default=True,
type=strtobool,
help="Whether to use batch normalization",
)
group.add_argument(
"--use-concate",
default=True,
type=strtobool,
help="Whether to concatenate encoder embedding with decoder outputs",
)
group.add_argument(
"--use-residual",
default=True,
type=strtobool,
help="Whether to use residual connection in conv layer",
)
group.add_argument(
"--dropout-rate", default=0.5, type=float, help="Dropout rate"
)
group.add_argument(
"--zoneout-rate", default=0.1, type=float, help="Zoneout rate"
)
group.add_argument(
"--reduction-factor", default=1, type=int, help="Reduction factor"
)
group.add_argument(
"--spk-embed-dim",
default=None,
type=int,
help="Number of speaker embedding dimensions",
)
group.add_argument(
"--spc-dim", default=None, type=int, help="Number of spectrogram dimensions"
)
group.add_argument(
"--pretrained-model", default=None, type=str, help="Pretrained model path"
)
# loss related
group.add_argument(
"--use-masking",
default=False,
type=strtobool,
help="Whether to use masking in calculation of loss",
)
group.add_argument(
"--use-weighted-masking",
default=False,
type=strtobool,
help="Whether to use weighted masking in calculation of loss",
)
group.add_argument(
"--bce-pos-weight",
default=20.0,
type=float,
help="Positive sample weight in BCE calculation "
"(only for use-masking=True)",
)
group.add_argument(
"--use-guided-attn-loss",
default=False,
type=strtobool,
help="Whether to use guided attention loss",
)
group.add_argument(
"--guided-attn-loss-sigma",
default=0.4,
type=float,
help="Sigma in guided attention loss",
)
group.add_argument(
"--guided-attn-loss-lambda",
default=1.0,
type=float,
help="Lambda in guided attention loss",
)
return parser
def __init__(self, idim, odim, args=None):
"""Initialize Tacotron2 module.
Args:
idim (int): Dimension of the inputs.
odim (int): Dimension of the outputs.
args (Namespace, optional):
- spk_embed_dim (int): Dimension of the speaker embedding.
- embed_dim (int): Dimension of character embedding.
- elayers (int): The number of encoder blstm layers.
- eunits (int): The number of encoder blstm units.
- econv_layers (int): The number of encoder conv layers.
- econv_filts (int): The number of encoder conv filter size.
- econv_chans (int): The number of encoder conv filter channels.
- dlayers (int): The number of decoder lstm layers.
- dunits (int): The number of decoder lstm units.
- prenet_layers (int): The number of prenet layers.
- prenet_units (int): The number of prenet units.
- postnet_layers (int): The number of postnet layers.
- postnet_filts (int): The number of postnet filter size.
- postnet_chans (int): The number of postnet filter channels.
- output_activation (int): The name of activation function for outputs.
- adim (int): The number of dimension of mlp in attention.
- aconv_chans (int): The number of attention conv filter channels.
- aconv_filts (int): The number of attention conv filter size.
- cumulate_att_w (bool): Whether to cumulate previous attention weight.
- use_batch_norm (bool): Whether to use batch normalization.
- use_concate (int): Whether to concatenate encoder embedding
with decoder lstm outputs.
- dropout_rate (float): Dropout rate.
- zoneout_rate (float): Zoneout rate.
- reduction_factor (int): Reduction factor.
- spk_embed_dim (int): Number of speaker embedding dimenstions.
- spc_dim (int): Number of spectrogram embedding dimenstions
(only for use_cbhg=True).
- use_cbhg (bool): Whether to use CBHG module.
- cbhg_conv_bank_layers (int): The number of convoluional banks in CBHG.
- cbhg_conv_bank_chans (int): The number of channels of
convolutional bank in CBHG.
- cbhg_proj_filts (int):
The number of filter size of projection layeri in CBHG.
- cbhg_proj_chans (int):
The number of channels of projection layer in CBHG.
- cbhg_highway_layers (int):
The number of layers of highway network in CBHG.
- cbhg_highway_units (int):
The number of units of highway network in CBHG.
- cbhg_gru_units (int): The number of units of GRU in CBHG.
- use_masking (bool):
Whether to apply masking for padded part in loss calculation.
- use_weighted_masking (bool):
Whether to apply weighted masking in loss calculation.
- bce_pos_weight (float):
Weight of positive sample of stop token (only for use_masking=True).
- use-guided-attn-loss (bool): Whether to use guided attention loss.
- guided-attn-loss-sigma (float) Sigma in guided attention loss.
- guided-attn-loss-lamdba (float): Lambda in guided attention loss.
"""
# initialize base classes
TTSInterface.__init__(self)
torch.nn.Module.__init__(self)
# fill missing arguments
args = fill_missing_args(args, self.add_arguments)
# store hyperparameters
self.idim = idim
self.odim = odim
self.spk_embed_dim = args.spk_embed_dim
self.cumulate_att_w = args.cumulate_att_w
self.reduction_factor = args.reduction_factor
self.use_cbhg = args.use_cbhg
self.use_guided_attn_loss = args.use_guided_attn_loss
# define activation function for the final output
if args.output_activation is None:
self.output_activation_fn = None
elif hasattr(F, args.output_activation):
self.output_activation_fn = getattr(F, args.output_activation)
else:
raise ValueError(
"there is no such an activation function. (%s)" % args.output_activation
)
# set padding idx
padding_idx = 0
# define network modules
self.enc = Encoder(
idim=idim,
embed_dim=args.embed_dim,
elayers=args.elayers,
eunits=args.eunits,
econv_layers=args.econv_layers,
econv_chans=args.econv_chans,
econv_filts=args.econv_filts,
use_batch_norm=args.use_batch_norm,
use_residual=args.use_residual,
dropout_rate=args.dropout_rate,
padding_idx=padding_idx,
)
dec_idim = (
args.eunits
if args.spk_embed_dim is None
else args.eunits + args.spk_embed_dim
)
if args.atype == "location":
att = AttLoc(
dec_idim, args.dunits, args.adim, args.aconv_chans, args.aconv_filts
)
elif args.atype == "forward":
att = AttForward(
dec_idim, args.dunits, args.adim, args.aconv_chans, args.aconv_filts
)
if self.cumulate_att_w:
logging.warning(
"cumulation of attention weights is disabled in forward attention."
)
self.cumulate_att_w = False
elif args.atype == "forward_ta":
att = AttForwardTA(
dec_idim,
args.dunits,
args.adim,
args.aconv_chans,
args.aconv_filts,
odim,
)
if self.cumulate_att_w:
logging.warning(
"cumulation of attention weights is disabled in forward attention."
)
self.cumulate_att_w = False
else:
raise NotImplementedError("Support only location or forward")
self.dec = Decoder(
idim=dec_idim,
odim=odim,
att=att,
dlayers=args.dlayers,
dunits=args.dunits,
prenet_layers=args.prenet_layers,
prenet_units=args.prenet_units,
postnet_layers=args.postnet_layers,
postnet_chans=args.postnet_chans,
postnet_filts=args.postnet_filts,
output_activation_fn=self.output_activation_fn,
cumulate_att_w=self.cumulate_att_w,
use_batch_norm=args.use_batch_norm,
use_concate=args.use_concate,
dropout_rate=args.dropout_rate,
zoneout_rate=args.zoneout_rate,
reduction_factor=args.reduction_factor,
)
self.taco2_loss = Tacotron2Loss(
use_masking=args.use_masking,
use_weighted_masking=args.use_weighted_masking,
bce_pos_weight=args.bce_pos_weight,
)
if self.use_guided_attn_loss:
self.attn_loss = GuidedAttentionLoss(
sigma=args.guided_attn_loss_sigma,
alpha=args.guided_attn_loss_lambda,
)
if self.use_cbhg:
self.cbhg = CBHG(
idim=odim,
odim=args.spc_dim,
conv_bank_layers=args.cbhg_conv_bank_layers,
conv_bank_chans=args.cbhg_conv_bank_chans,
conv_proj_filts=args.cbhg_conv_proj_filts,
conv_proj_chans=args.cbhg_conv_proj_chans,
highway_layers=args.cbhg_highway_layers,
highway_units=args.cbhg_highway_units,
gru_units=args.cbhg_gru_units,
)
self.cbhg_loss = CBHGLoss(use_masking=args.use_masking)
# load pretrained model
if args.pretrained_model is not None:
self.load_pretrained_model(args.pretrained_model)
def forward(
self, xs, ilens, ys, labels, olens, spembs=None, extras=None, *args, **kwargs
):
"""Calculate forward propagation.
Args:
xs (Tensor): Batch of padded character ids (B, Tmax).
ilens (LongTensor): Batch of lengths of each input batch (B,).
ys (Tensor): Batch of padded target features (B, Lmax, odim).
olens (LongTensor): Batch of the lengths of each target (B,).
spembs (Tensor, optional):
Batch of speaker embedding vectors (B, spk_embed_dim).
extras (Tensor, optional):
Batch of groundtruth spectrograms (B, Lmax, spc_dim).
Returns:
Tensor: Loss value.
"""
# remove unnecessary padded part (for multi-gpus)
max_in = max(ilens)
max_out = max(olens)
if max_in != xs.shape[1]:
xs = xs[:, :max_in]
if max_out != ys.shape[1]:
ys = ys[:, :max_out]
labels = labels[:, :max_out]
# calculate tacotron2 outputs
hs, hlens = self.enc(xs, ilens)
if self.spk_embed_dim is not None:
spembs = F.normalize(spembs).unsqueeze(1).expand(-1, hs.size(1), -1)
hs = torch.cat([hs, spembs], dim=-1)
after_outs, before_outs, logits, att_ws = self.dec(hs, hlens, ys)
# modifiy mod part of groundtruth
if self.reduction_factor > 1:
assert olens.ge(
self.reduction_factor
).all(), "Output length must be greater than or equal to reduction factor."
olens = olens.new([olen - olen % self.reduction_factor for olen in olens])
max_out = max(olens)
ys = ys[:, :max_out]
labels = labels[:, :max_out]
labels = torch.scatter(
labels, 1, (olens - 1).unsqueeze(1), 1.0
) # see #3388
# calculate taco2 loss
l1_loss, mse_loss, bce_loss = self.taco2_loss(
after_outs, before_outs, logits, ys, labels, olens
)
loss = l1_loss + mse_loss + bce_loss
report_keys = [
{"l1_loss": l1_loss.item()},
{"mse_loss": mse_loss.item()},
{"bce_loss": bce_loss.item()},
]
# calculate attention loss
if self.use_guided_attn_loss:
# NOTE(kan-bayashi):
# length of output for auto-regressive input will be changed when r > 1
if self.reduction_factor > 1:
olens_in = olens.new([olen // self.reduction_factor for olen in olens])
else:
olens_in = olens
attn_loss = self.attn_loss(att_ws, ilens, olens_in)
loss = loss + attn_loss
report_keys += [
{"attn_loss": attn_loss.item()},
]
# calculate cbhg loss
if self.use_cbhg:
# remove unnecessary padded part (for multi-gpus)
if max_out != extras.shape[1]:
extras = extras[:, :max_out]
# calculate cbhg outputs & loss and report them
cbhg_outs, _ = self.cbhg(after_outs, olens)
cbhg_l1_loss, cbhg_mse_loss = self.cbhg_loss(cbhg_outs, extras, olens)
loss = loss + cbhg_l1_loss + cbhg_mse_loss
report_keys += [
{"cbhg_l1_loss": cbhg_l1_loss.item()},
{"cbhg_mse_loss": cbhg_mse_loss.item()},
]
report_keys += [{"loss": loss.item()}]
self.reporter.report(report_keys)
return loss
def inference(self, x, inference_args, spemb=None, *args, **kwargs):
"""Generate the sequence of features given the sequences of characters.
Args:
x (Tensor): Input sequence of characters (T,).
inference_args (Namespace):
- threshold (float): Threshold in inference.
- minlenratio (float): Minimum length ratio in inference.
- maxlenratio (float): Maximum length ratio in inference.
spemb (Tensor, optional): Speaker embedding vector (spk_embed_dim).
Returns:
Tensor: Output sequence of features (L, odim).
Tensor: Output sequence of stop probabilities (L,).
Tensor: Attention weights (L, T).
"""
# get options
threshold = inference_args.threshold
minlenratio = inference_args.minlenratio
maxlenratio = inference_args.maxlenratio
use_att_constraint = getattr(
inference_args, "use_att_constraint", False
) # keep compatibility
backward_window = inference_args.backward_window if use_att_constraint else 0
forward_window = inference_args.forward_window if use_att_constraint else 0
# inference
h = self.enc.inference(x)
if self.spk_embed_dim is not None:
spemb = F.normalize(spemb, dim=0).unsqueeze(0).expand(h.size(0), -1)
h = torch.cat([h, spemb], dim=-1)
outs, probs, att_ws = self.dec.inference(
h,
threshold,
minlenratio,
maxlenratio,
use_att_constraint=use_att_constraint,
backward_window=backward_window,
forward_window=forward_window,
)
if self.use_cbhg:
cbhg_outs = self.cbhg.inference(outs)
return cbhg_outs, probs, att_ws
else:
return outs, probs, att_ws
def calculate_all_attentions(
self, xs, ilens, ys, spembs=None, keep_tensor=False, *args, **kwargs
):
"""Calculate all of the attention weights.
Args:
xs (Tensor): Batch of padded character ids (B, Tmax).
ilens (LongTensor): Batch of lengths of each input batch (B,).
ys (Tensor): Batch of padded target features (B, Lmax, odim).
olens (LongTensor): Batch of the lengths of each target (B,).
spembs (Tensor, optional):
Batch of speaker embedding vectors (B, spk_embed_dim).
keep_tensor (bool, optional): Whether to keep original tensor.
Returns:
Union[ndarray, Tensor]: Batch of attention weights (B, Lmax, Tmax).
"""
# check ilens type (should be list of int)
if isinstance(ilens, torch.Tensor) or isinstance(ilens, np.ndarray):
ilens = list(map(int, ilens))
self.eval()
with torch.no_grad():
hs, hlens = self.enc(xs, ilens)
if self.spk_embed_dim is not None:
spembs = F.normalize(spembs).unsqueeze(1).expand(-1, hs.size(1), -1)
hs = torch.cat([hs, spembs], dim=-1)
att_ws = self.dec.calculate_all_attentions(hs, hlens, ys)
self.train()
if keep_tensor:
return att_ws
else:
return att_ws.cpu().numpy()
@property
def base_plot_keys(self):
"""Return base key names to plot during training.
keys should match what `chainer.reporter` reports.
If you add the key `loss`, the reporter will report `main/loss`
and `validation/main/loss` values.
also `loss.png` will be created as a figure visulizing `main/loss`
and `validation/main/loss` values.
Returns:
list: List of strings which are base keys to plot during training.
"""
plot_keys = ["loss", "l1_loss", "mse_loss", "bce_loss"]
if self.use_guided_attn_loss:
plot_keys += ["attn_loss"]
if self.use_cbhg:
plot_keys += ["cbhg_l1_loss", "cbhg_mse_loss"]
return plot_keys
| 34,041 | 36.993304 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/ctc.py | import logging
import numpy as np
import torch
import torch.nn.functional as F
from packaging.version import parse as V
from espnet.nets.pytorch_backend.nets_utils import to_device
class CTC(torch.nn.Module):
"""CTC module
:param int odim: dimension of outputs
:param int eprojs: number of encoder projection units
:param float dropout_rate: dropout rate (0.0 ~ 1.0)
:param str ctc_type: builtin
:param bool reduce: reduce the CTC loss into a scalar
"""
def __init__(self, odim, eprojs, dropout_rate, ctc_type="builtin", reduce=True):
super().__init__()
self.dropout_rate = dropout_rate
self.loss = None
self.ctc_lo = torch.nn.Linear(eprojs, odim)
self.dropout = torch.nn.Dropout(dropout_rate)
self.probs = None # for visualization
# In case of Pytorch >= 1.7.0, CTC will be always builtin
self.ctc_type = ctc_type if V(torch.__version__) < V("1.7.0") else "builtin"
if ctc_type != self.ctc_type:
logging.warning(f"CTC was set to {self.ctc_type} due to PyTorch version.")
if self.ctc_type == "builtin":
reduction_type = "sum" if reduce else "none"
self.ctc_loss = torch.nn.CTCLoss(
reduction=reduction_type, zero_infinity=True
)
elif self.ctc_type == "cudnnctc":
reduction_type = "sum" if reduce else "none"
self.ctc_loss = torch.nn.CTCLoss(reduction=reduction_type)
elif self.ctc_type == "gtnctc":
from espnet.nets.pytorch_backend.gtn_ctc import GTNCTCLossFunction
self.ctc_loss = GTNCTCLossFunction.apply
else:
raise ValueError(
'ctc_type must be "builtin" or "gtnctc": {}'.format(self.ctc_type)
)
self.ignore_id = -1
self.reduce = reduce
def loss_fn(self, th_pred, th_target, th_ilen, th_olen):
if self.ctc_type in ["builtin", "cudnnctc"]:
th_pred = th_pred.log_softmax(2)
# Use the deterministic CuDNN implementation of CTC loss to avoid
# [issue#17798](https://github.com/pytorch/pytorch/issues/17798)
with torch.backends.cudnn.flags(deterministic=True):
loss = self.ctc_loss(th_pred, th_target, th_ilen, th_olen)
# Batch-size average
loss = loss / th_pred.size(1)
return loss
elif self.ctc_type == "gtnctc":
targets = [t.tolist() for t in th_target]
log_probs = torch.nn.functional.log_softmax(th_pred, dim=2)
return self.ctc_loss(log_probs, targets, th_ilen, 0, "none")
else:
raise NotImplementedError
def forward(self, hs_pad, hlens, ys_pad):
"""CTC forward
:param torch.Tensor hs_pad: batch of padded hidden state sequences (B, Tmax, D)
:param torch.Tensor hlens: batch of lengths of hidden state sequences (B)
:param torch.Tensor ys_pad:
batch of padded character id sequence tensor (B, Lmax)
:return: ctc loss value
:rtype: torch.Tensor
"""
# TODO(kan-bayashi): need to make more smart way
ys = [y[y != self.ignore_id] for y in ys_pad] # parse padded ys
# zero padding for hs
ys_hat = self.ctc_lo(self.dropout(hs_pad))
if self.ctc_type != "gtnctc":
ys_hat = ys_hat.transpose(0, 1)
if self.ctc_type == "builtin":
olens = to_device(ys_hat, torch.LongTensor([len(s) for s in ys]))
hlens = hlens.long()
ys_pad = torch.cat(ys) # without this the code breaks for asr_mix
self.loss = self.loss_fn(ys_hat, ys_pad, hlens, olens)
else:
self.loss = None
hlens = torch.from_numpy(np.fromiter(hlens, dtype=np.int32))
olens = torch.from_numpy(
np.fromiter((x.size(0) for x in ys), dtype=np.int32)
)
# zero padding for ys
ys_true = torch.cat(ys).cpu().int() # batch x olen
# get ctc loss
# expected shape of seqLength x batchSize x alphabet_size
dtype = ys_hat.dtype
if self.ctc_type == "cudnnctc":
# use GPU when using the cuDNN implementation
ys_true = to_device(hs_pad, ys_true)
if self.ctc_type == "gtnctc":
# keep as list for gtn
ys_true = ys
self.loss = to_device(
hs_pad, self.loss_fn(ys_hat, ys_true, hlens, olens)
).to(dtype=dtype)
# get length info
logging.info(
self.__class__.__name__
+ " input lengths: "
+ "".join(str(hlens).split("\n"))
)
logging.info(
self.__class__.__name__
+ " output lengths: "
+ "".join(str(olens).split("\n"))
)
if self.reduce:
self.loss = self.loss.sum()
logging.info("ctc loss:" + str(float(self.loss)))
return self.loss
def softmax(self, hs_pad):
"""softmax of frame activations
:param torch.Tensor hs_pad: 3d tensor (B, Tmax, eprojs)
:return: log softmax applied 3d tensor (B, Tmax, odim)
:rtype: torch.Tensor
"""
self.probs = F.softmax(self.ctc_lo(hs_pad), dim=2)
return self.probs
def log_softmax(self, hs_pad):
"""log_softmax of frame activations
:param torch.Tensor hs_pad: 3d tensor (B, Tmax, eprojs)
:return: log softmax applied 3d tensor (B, Tmax, odim)
:rtype: torch.Tensor
"""
return F.log_softmax(self.ctc_lo(hs_pad), dim=2)
def argmax(self, hs_pad):
"""argmax of frame activations
:param torch.Tensor hs_pad: 3d tensor (B, Tmax, eprojs)
:return: argmax applied 2d tensor (B, Tmax)
:rtype: torch.Tensor
"""
return torch.argmax(self.ctc_lo(hs_pad), dim=2)
def forced_align(self, h, y, blank_id=0):
"""forced alignment.
:param torch.Tensor h: hidden state sequence, 2d tensor (T, D)
:param torch.Tensor y: id sequence tensor 1d tensor (L)
:param int y: blank symbol index
:return: best alignment results
:rtype: list
"""
def interpolate_blank(label, blank_id=0):
"""Insert blank token between every two label token."""
label = np.expand_dims(label, 1)
blanks = np.zeros((label.shape[0], 1), dtype=np.int64) + blank_id
label = np.concatenate([blanks, label], axis=1)
label = label.reshape(-1)
label = np.append(label, label[0])
return label
lpz = self.log_softmax(h)
lpz = lpz.squeeze(0)
y_int = interpolate_blank(y, blank_id)
logdelta = np.zeros((lpz.size(0), len(y_int))) - 100000000000.0 # log of zero
state_path = (
np.zeros((lpz.size(0), len(y_int)), dtype=np.int16) - 1
) # state path
logdelta[0, 0] = lpz[0][y_int[0]]
logdelta[0, 1] = lpz[0][y_int[1]]
for t in range(1, lpz.size(0)):
for s in range(len(y_int)):
if y_int[s] == blank_id or s < 2 or y_int[s] == y_int[s - 2]:
candidates = np.array([logdelta[t - 1, s], logdelta[t - 1, s - 1]])
prev_state = [s, s - 1]
else:
candidates = np.array(
[
logdelta[t - 1, s],
logdelta[t - 1, s - 1],
logdelta[t - 1, s - 2],
]
)
prev_state = [s, s - 1, s - 2]
logdelta[t, s] = np.max(candidates) + lpz[t][y_int[s]]
state_path[t, s] = prev_state[np.argmax(candidates)]
state_seq = -1 * np.ones((lpz.size(0), 1), dtype=np.int16)
candidates = np.array(
[logdelta[-1, len(y_int) - 1], logdelta[-1, len(y_int) - 2]]
)
prev_state = [len(y_int) - 1, len(y_int) - 2]
state_seq[-1] = prev_state[np.argmax(candidates)]
for t in range(lpz.size(0) - 2, -1, -1):
state_seq[t] = state_path[t + 1, state_seq[t + 1, 0]]
output_state_seq = []
for t in range(0, lpz.size(0)):
output_state_seq.append(y_int[state_seq[t, 0]])
return output_state_seq
def ctc_for(args, odim, reduce=True):
"""Returns the CTC module for the given args and output dimension
:param Namespace args: the program args
:param int odim : The output dimension
:param bool reduce : return the CTC loss in a scalar
:return: the corresponding CTC module
"""
num_encs = getattr(args, "num_encs", 1) # use getattr to keep compatibility
if num_encs == 1:
# compatible with single encoder asr mode
return CTC(
odim, args.eprojs, args.dropout_rate, ctc_type=args.ctc_type, reduce=reduce
)
elif num_encs >= 1:
ctcs_list = torch.nn.ModuleList()
if args.share_ctc:
# use dropout_rate of the first encoder
ctc = CTC(
odim,
args.eprojs,
args.dropout_rate[0],
ctc_type=args.ctc_type,
reduce=reduce,
)
ctcs_list.append(ctc)
else:
for idx in range(num_encs):
ctc = CTC(
odim,
args.eprojs,
args.dropout_rate[idx],
ctc_type=args.ctc_type,
reduce=reduce,
)
ctcs_list.append(ctc)
return ctcs_list
else:
raise ValueError(
"Number of encoders needs to be more than one. {}".format(num_encs)
)
| 9,877 | 35.72119 | 87 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/e2e_st.py | # Copyright 2019 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""RNN sequence-to-sequence speech translation model (pytorch)."""
import argparse
import copy
import logging
import math
import os
from itertools import groupby
import chainer
import nltk
import numpy as np
import torch
from chainer import reporter
from espnet.nets.e2e_asr_common import label_smoothing_dist
from espnet.nets.pytorch_backend.ctc import CTC
from espnet.nets.pytorch_backend.initialization import (
lecun_normal_init_parameters,
set_forget_bias_to_one,
)
from espnet.nets.pytorch_backend.nets_utils import (
get_subsample,
pad_list,
to_device,
to_torch_tensor,
)
from espnet.nets.pytorch_backend.rnn.argument import ( # noqa: H301
add_arguments_rnn_attention_common,
add_arguments_rnn_decoder_common,
add_arguments_rnn_encoder_common,
)
from espnet.nets.pytorch_backend.rnn.attentions import att_for
from espnet.nets.pytorch_backend.rnn.decoders import decoder_for
from espnet.nets.pytorch_backend.rnn.encoders import encoder_for
from espnet.nets.st_interface import STInterface
from espnet.utils.fill_missing_args import fill_missing_args
CTC_LOSS_THRESHOLD = 10000
class Reporter(chainer.Chain):
"""A chainer reporter wrapper."""
def report(
self,
loss_asr,
loss_mt,
loss_st,
acc_asr,
acc_mt,
acc,
cer_ctc,
cer,
wer,
bleu,
mtl_loss,
):
"""Report at every step."""
reporter.report({"loss_asr": loss_asr}, self)
reporter.report({"loss_mt": loss_mt}, self)
reporter.report({"loss_st": loss_st}, self)
reporter.report({"acc_asr": acc_asr}, self)
reporter.report({"acc_mt": acc_mt}, self)
reporter.report({"acc": acc}, self)
reporter.report({"cer_ctc": cer_ctc}, self)
reporter.report({"cer": cer}, self)
reporter.report({"wer": wer}, self)
reporter.report({"bleu": bleu}, self)
logging.info("mtl loss:" + str(mtl_loss))
reporter.report({"loss": mtl_loss}, self)
class E2E(STInterface, torch.nn.Module):
"""E2E module.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
@staticmethod
def add_arguments(parser):
"""Add arguments."""
E2E.encoder_add_arguments(parser)
E2E.attention_add_arguments(parser)
E2E.decoder_add_arguments(parser)
return parser
@staticmethod
def encoder_add_arguments(parser):
"""Add arguments for the encoder."""
group = parser.add_argument_group("E2E encoder setting")
group = add_arguments_rnn_encoder_common(group)
return parser
@staticmethod
def attention_add_arguments(parser):
"""Add arguments for the attention."""
group = parser.add_argument_group("E2E attention setting")
group = add_arguments_rnn_attention_common(group)
return parser
@staticmethod
def decoder_add_arguments(parser):
"""Add arguments for the decoder."""
group = parser.add_argument_group("E2E decoder setting")
group = add_arguments_rnn_decoder_common(group)
return parser
def get_total_subsampling_factor(self):
"""Get total subsampling factor."""
return self.enc.conv_subsampling_factor * int(np.prod(self.subsample))
def __init__(self, idim, odim, args):
"""Construct an E2E object.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
super(E2E, self).__init__()
torch.nn.Module.__init__(self)
# fill missing arguments for compatibility
args = fill_missing_args(args, self.add_arguments)
self.asr_weight = args.asr_weight
self.mt_weight = args.mt_weight
self.mtlalpha = args.mtlalpha
assert 0.0 <= self.asr_weight < 1.0, "asr_weight should be [0.0, 1.0)"
assert 0.0 <= self.mt_weight < 1.0, "mt_weight should be [0.0, 1.0)"
assert 0.0 <= self.mtlalpha <= 1.0, "mtlalpha should be [0.0, 1.0]"
self.etype = args.etype
self.verbose = args.verbose
# NOTE: for self.build method
args.char_list = getattr(args, "char_list", None)
self.char_list = args.char_list
self.outdir = args.outdir
self.space = args.sym_space
self.blank = args.sym_blank
self.reporter = Reporter()
# below means the last number becomes eos/sos ID
# note that sos/eos IDs are identical
self.sos = odim - 1
self.eos = odim - 1
self.pad = 0
# NOTE: we reserve index:0 for <pad> although this is reserved for a blank class
# in ASR. However, blank labels are not used in MT.
# To keep the vocabulary size,
# we use index:0 for padding instead of adding one more class.
# subsample info
self.subsample = get_subsample(args, mode="st", arch="rnn")
# label smoothing info
if args.lsm_type and os.path.isfile(args.train_json):
logging.info("Use label smoothing with " + args.lsm_type)
labeldist = label_smoothing_dist(
odim, args.lsm_type, transcript=args.train_json
)
else:
labeldist = None
# multilingual related
self.multilingual = getattr(args, "multilingual", False)
self.replace_sos = getattr(args, "replace_sos", False)
# encoder
self.enc = encoder_for(args, idim, self.subsample)
# attention (ST)
self.att = att_for(args)
# decoder (ST)
self.dec = decoder_for(args, odim, self.sos, self.eos, self.att, labeldist)
# submodule for ASR task
self.ctc = None
self.att_asr = None
self.dec_asr = None
if self.asr_weight > 0:
if self.mtlalpha > 0.0:
self.ctc = CTC(
odim,
args.eprojs,
args.dropout_rate,
ctc_type=args.ctc_type,
reduce=True,
)
if self.mtlalpha < 1.0:
# attention (asr)
self.att_asr = att_for(args)
# decoder (asr)
args_asr = copy.deepcopy(args)
args_asr.atype = "location" # TODO(hirofumi0810): make this option
self.dec_asr = decoder_for(
args_asr, odim, self.sos, self.eos, self.att_asr, labeldist
)
# submodule for MT task
if self.mt_weight > 0:
self.embed_mt = torch.nn.Embedding(odim, args.eunits, padding_idx=self.pad)
self.dropout_mt = torch.nn.Dropout(p=args.dropout_rate)
self.enc_mt = encoder_for(
args, args.eunits, subsample=np.ones(args.elayers + 1, dtype=np.int64)
)
# weight initialization
self.init_like_chainer()
# options for beam search
if self.asr_weight > 0 and args.report_cer or args.report_wer:
recog_args = {
"beam_size": args.beam_size,
"penalty": args.penalty,
"ctc_weight": args.ctc_weight,
"maxlenratio": args.maxlenratio,
"minlenratio": args.minlenratio,
"lm_weight": args.lm_weight,
"rnnlm": args.rnnlm,
"nbest": args.nbest,
"space": args.sym_space,
"blank": args.sym_blank,
"tgt_lang": False,
}
self.recog_args = argparse.Namespace(**recog_args)
self.report_cer = args.report_cer
self.report_wer = args.report_wer
else:
self.report_cer = False
self.report_wer = False
if args.report_bleu:
trans_args = {
"beam_size": args.beam_size,
"penalty": args.penalty,
"ctc_weight": 0,
"maxlenratio": args.maxlenratio,
"minlenratio": args.minlenratio,
"lm_weight": args.lm_weight,
"rnnlm": args.rnnlm,
"nbest": args.nbest,
"space": args.sym_space,
"blank": args.sym_blank,
"tgt_lang": False,
}
self.trans_args = argparse.Namespace(**trans_args)
self.report_bleu = args.report_bleu
else:
self.report_bleu = False
self.rnnlm = None
self.logzero = -10000000000.0
self.loss = None
self.acc = None
def init_like_chainer(self):
"""Initialize weight like chainer.
chainer basically uses LeCun way: W ~ Normal(0, fan_in ** -0.5), b = 0
pytorch basically uses W, b ~ Uniform(-fan_in**-0.5, fan_in**-0.5)
however, there are two exceptions as far as I know.
- EmbedID.W ~ Normal(0, 1)
- LSTM.upward.b[forget_gate_range] = 1 (but not used in NStepLSTM)
"""
lecun_normal_init_parameters(self)
# exceptions
# embed weight ~ Normal(0, 1)
self.dec.embed.weight.data.normal_(0, 1)
# forget-bias = 1.0
# https://discuss.pytorch.org/t/set-forget-gate-bias-of-lstm/1745
for i in range(len(self.dec.decoder)):
set_forget_bias_to_one(self.dec.decoder[i].bias_ih)
def forward(self, xs_pad, ilens, ys_pad, ys_pad_src):
"""E2E forward.
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, idim)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor ys_pad: batch of padded token id sequence tensor (B, Lmax)
:return: loss value
:rtype: torch.Tensor
"""
# 0. Extract target language ID
if self.multilingual:
tgt_lang_ids = ys_pad[:, 0:1]
ys_pad = ys_pad[:, 1:] # remove target language ID in the beginning
else:
tgt_lang_ids = None
# 1. Encoder
hs_pad, hlens, _ = self.enc(xs_pad, ilens)
# 2. ST attention loss
self.loss_st, self.acc, _ = self.dec(
hs_pad, hlens, ys_pad, lang_ids=tgt_lang_ids
)
# 3. ASR loss
(
self.loss_asr_att,
acc_asr,
self.loss_asr_ctc,
cer_ctc,
cer,
wer,
) = self.forward_asr(hs_pad, hlens, ys_pad_src)
# 4. MT attention loss
self.loss_mt, acc_mt = self.forward_mt(ys_pad, ys_pad_src)
# 5. Compute BLEU
if self.training or not self.report_bleu:
self.bleu = 0.0
else:
lpz = None
nbest_hyps = self.dec.recognize_beam_batch(
hs_pad,
torch.tensor(hlens),
lpz,
self.trans_args,
self.char_list,
self.rnnlm,
lang_ids=tgt_lang_ids.squeeze(1).tolist()
if self.multilingual
else None,
)
# remove <sos> and <eos>
list_of_refs = []
hyps = []
y_hats = [nbest_hyp[0]["yseq"][1:-1] for nbest_hyp in nbest_hyps]
for i, y_hat in enumerate(y_hats):
y_true = ys_pad[i]
seq_hat = [self.char_list[int(idx)] for idx in y_hat if int(idx) != -1]
seq_true = [
self.char_list[int(idx)] for idx in y_true if int(idx) != -1
]
seq_hat_text = "".join(seq_hat).replace(self.trans_args.space, " ")
seq_hat_text = seq_hat_text.replace(self.trans_args.blank, "")
seq_true_text = "".join(seq_true).replace(self.trans_args.space, " ")
hyps += [seq_hat_text.split(" ")]
list_of_refs += [[seq_true_text.split(" ")]]
self.bleu = nltk.bleu_score.corpus_bleu(list_of_refs, hyps) * 100
asr_ctc_weight = self.mtlalpha
self.loss_asr = (
asr_ctc_weight * self.loss_asr_ctc
+ (1 - asr_ctc_weight) * self.loss_asr_att
)
self.loss = (
(1 - self.asr_weight - self.mt_weight) * self.loss_st
+ self.asr_weight * self.loss_asr
+ self.mt_weight * self.loss_mt
)
loss_st_data = float(self.loss_st)
loss_asr_data = float(self.loss_asr)
loss_mt_data = float(self.loss_mt)
loss_data = float(self.loss)
if loss_data < CTC_LOSS_THRESHOLD and not math.isnan(loss_data):
self.reporter.report(
loss_asr_data,
loss_mt_data,
loss_st_data,
acc_asr,
acc_mt,
self.acc,
cer_ctc,
cer,
wer,
self.bleu,
loss_data,
)
else:
logging.warning("loss (=%f) is not correct", loss_data)
return self.loss
def forward_asr(self, hs_pad, hlens, ys_pad):
"""Forward pass in the auxiliary ASR task.
:param torch.Tensor hs_pad: batch of padded source sequences (B, Tmax, idim)
:param torch.Tensor hlens: batch of lengths of input sequences (B)
:param torch.Tensor ys_pad: batch of padded target sequences (B, Lmax)
:return: ASR attention loss value
:rtype: torch.Tensor
:return: accuracy in ASR attention decoder
:rtype: float
:return: ASR CTC loss value
:rtype: torch.Tensor
:return: character error rate from CTC prediction
:rtype: float
:return: character error rate from attetion decoder prediction
:rtype: float
:return: word error rate from attetion decoder prediction
:rtype: float
"""
import editdistance
loss_att, loss_ctc = 0.0, 0.0
acc = None
cer, wer = None, None
cer_ctc = None
if self.asr_weight == 0:
return loss_att, acc, loss_ctc, cer_ctc, cer, wer
# attention
if self.mtlalpha < 1:
loss_asr, acc_asr, _ = self.dec_asr(hs_pad, hlens, ys_pad)
# Compute wer and cer
if not self.training and (self.report_cer or self.report_wer):
if self.mtlalpha > 0 and self.recog_args.ctc_weight > 0.0:
lpz = self.ctc.log_softmax(hs_pad).data
else:
lpz = None
word_eds, word_ref_lens, char_eds, char_ref_lens = [], [], [], []
nbest_hyps_asr = self.dec_asr.recognize_beam_batch(
hs_pad,
torch.tensor(hlens),
lpz,
self.recog_args,
self.char_list,
self.rnnlm,
)
# remove <sos> and <eos>
y_hats = [nbest_hyp[0]["yseq"][1:-1] for nbest_hyp in nbest_hyps_asr]
for i, y_hat in enumerate(y_hats):
y_true = ys_pad[i]
seq_hat = [
self.char_list[int(idx)] for idx in y_hat if int(idx) != -1
]
seq_true = [
self.char_list[int(idx)] for idx in y_true if int(idx) != -1
]
seq_hat_text = "".join(seq_hat).replace(self.recog_args.space, " ")
seq_hat_text = seq_hat_text.replace(self.recog_args.blank, "")
seq_true_text = "".join(seq_true).replace(
self.recog_args.space, " "
)
hyp_words = seq_hat_text.split()
ref_words = seq_true_text.split()
word_eds.append(editdistance.eval(hyp_words, ref_words))
word_ref_lens.append(len(ref_words))
hyp_chars = seq_hat_text.replace(" ", "")
ref_chars = seq_true_text.replace(" ", "")
char_eds.append(editdistance.eval(hyp_chars, ref_chars))
char_ref_lens.append(len(ref_chars))
wer = (
0.0
if not self.report_wer
else float(sum(word_eds)) / sum(word_ref_lens)
)
cer = (
0.0
if not self.report_cer
else float(sum(char_eds)) / sum(char_ref_lens)
)
# CTC
if self.mtlalpha > 0:
loss_ctc = self.ctc(hs_pad, hlens, ys_pad)
# Compute cer with CTC prediction
if self.char_list is not None:
cers = []
y_hats = self.ctc.argmax(hs_pad).data
for i, y in enumerate(y_hats):
y_hat = [x[0] for x in groupby(y)]
y_true = ys_pad[i]
seq_hat = [
self.char_list[int(idx)] for idx in y_hat if int(idx) != -1
]
seq_true = [
self.char_list[int(idx)] for idx in y_true if int(idx) != -1
]
seq_hat_text = "".join(seq_hat).replace(self.space, " ")
seq_hat_text = seq_hat_text.replace(self.blank, "")
seq_true_text = "".join(seq_true).replace(self.space, " ")
hyp_chars = seq_hat_text.replace(" ", "")
ref_chars = seq_true_text.replace(" ", "")
if len(ref_chars) > 0:
cers.append(
editdistance.eval(hyp_chars, ref_chars) / len(ref_chars)
)
cer_ctc = sum(cers) / len(cers) if cers else None
return loss_att, acc, loss_ctc, cer_ctc, cer, wer
def forward_mt(self, xs_pad, ys_pad):
"""Forward pass in the auxiliary MT task.
:param torch.Tensor xs_pad: batch of padded source sequences (B, Tmax, idim)
:param torch.Tensor ys_pad: batch of padded target sequences (B, Lmax)
:return: MT loss value
:rtype: torch.Tensor
:return: accuracy in MT decoder
:rtype: float
"""
loss = 0.0
acc = 0.0
if self.mt_weight == 0:
return loss, acc
ilens = torch.sum(xs_pad != -1, dim=1).cpu().numpy()
# NOTE: xs_pad is padded with -1
ys_src = [y[y != -1] for y in xs_pad] # parse padded ys_src
xs_zero_pad = pad_list(ys_src, self.pad) # re-pad with zero
hs_pad, hlens, _ = self.enc_mt(
self.dropout_mt(self.embed_mt(xs_zero_pad)), ilens
)
loss, acc, _ = self.dec(hs_pad, hlens, ys_pad)
return loss, acc
def scorers(self):
"""Scorers."""
return dict(decoder=self.dec)
def encode(self, x):
"""Encode acoustic features.
:param ndarray x: input acoustic feature (T, D)
:return: encoder outputs
:rtype: torch.Tensor
"""
self.eval()
ilens = [x.shape[0]]
# subsample frame
x = x[:: self.subsample[0], :]
p = next(self.parameters())
h = torch.as_tensor(x, device=p.device, dtype=p.dtype)
# make a utt list (1) to use the same interface for encoder
hs = h.contiguous().unsqueeze(0)
# 1. encoder
hs, _, _ = self.enc(hs, ilens)
return hs.squeeze(0)
def translate(self, x, trans_args, char_list, rnnlm=None):
"""E2E beam search.
:param ndarray x: input acoustic feature (T, D)
:param Namespace trans_args: argument Namespace containing options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
logging.info("input lengths: " + str(x.shape[0]))
hs = self.encode(x).unsqueeze(0)
logging.info("encoder output lengths: " + str(hs.size(1)))
# 2. Decoder
# decode the first utterance
y = self.dec.recognize_beam(hs[0], None, trans_args, char_list, rnnlm)
return y
def translate_batch(self, xs, trans_args, char_list, rnnlm=None):
"""E2E batch beam search.
:param list xs: list of input acoustic feature arrays [(T_1, D), (T_2, D), ...]
:param Namespace trans_args: argument Namespace containing options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
prev = self.training
self.eval()
ilens = np.fromiter((xx.shape[0] for xx in xs), dtype=np.int64)
# subsample frame
xs = [xx[:: self.subsample[0], :] for xx in xs]
xs = [to_device(self, to_torch_tensor(xx).float()) for xx in xs]
xs_pad = pad_list(xs, 0.0)
# 1. Encoder
hs_pad, hlens, _ = self.enc(xs_pad, ilens)
# 2. Decoder
hlens = torch.tensor(list(map(int, hlens))) # make sure hlens is tensor
y = self.dec.recognize_beam_batch(
hs_pad, hlens, None, trans_args, char_list, rnnlm
)
if prev:
self.train()
return y
def calculate_all_attentions(self, xs_pad, ilens, ys_pad, ys_pad_src):
"""E2E attention calculation.
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, idim)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor ys_pad: batch of padded token id sequence tensor (B, Lmax)
:param torch.Tensor ys_pad_src:
batch of padded token id sequence tensor (B, Lmax)
:return: attention weights with the following shape,
1) multi-head case => attention weights (B, H, Lmax, Tmax),
2) other case => attention weights (B, Lmax, Tmax).
:rtype: float ndarray
"""
self.eval()
with torch.no_grad():
# 1. Encoder
if self.multilingual:
tgt_lang_ids = ys_pad[:, 0:1]
ys_pad = ys_pad[:, 1:] # remove target language ID in the beginning
else:
tgt_lang_ids = None
hpad, hlens, _ = self.enc(xs_pad, ilens)
# 2. Decoder
att_ws = self.dec.calculate_all_attentions(
hpad, hlens, ys_pad, lang_ids=tgt_lang_ids
)
self.train()
return att_ws
def calculate_all_ctc_probs(self, xs_pad, ilens, ys_pad, ys_pad_src):
"""E2E CTC probability calculation.
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor ys_pad: batch of padded token id sequence tensor (B, Lmax)
:param torch.Tensor
ys_pad_src: batch of padded token id sequence tensor (B, Lmax)
:return: CTC probability (B, Tmax, vocab)
:rtype: float ndarray
"""
probs = None
if self.asr_weight == 0 or self.mtlalpha == 0:
return probs
self.eval()
with torch.no_grad():
# 1. Encoder
hpad, hlens, _ = self.enc(xs_pad, ilens)
# 2. CTC probs
probs = self.ctc.softmax(hpad).cpu().numpy()
self.train()
return probs
def subsample_frames(self, x):
"""Subsample speeh frames in the encoder."""
# subsample frame
x = x[:: self.subsample[0], :]
ilen = [x.shape[0]]
h = to_device(self, torch.from_numpy(np.array(x, dtype=np.float32)))
h.contiguous()
return h, ilen
| 24,036 | 35.037481 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/wavenet.py | # -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""This code is based on https://github.com/kan-bayashi/PytorchWaveNetVocoder."""
import logging
import sys
import time
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
def encode_mu_law(x, mu=256):
"""Perform mu-law encoding.
Args:
x (ndarray): Audio signal with the range from -1 to 1.
mu (int): Quantized level.
Returns:
ndarray: Quantized audio signal with the range from 0 to mu - 1.
"""
mu = mu - 1
fx = np.sign(x) * np.log(1 + mu * np.abs(x)) / np.log(1 + mu)
return np.floor((fx + 1) / 2 * mu + 0.5).astype(np.int64)
def decode_mu_law(y, mu=256):
"""Perform mu-law decoding.
Args:
x (ndarray): Quantized audio signal with the range from 0 to mu - 1.
mu (int): Quantized level.
Returns:
ndarray: Audio signal with the range from -1 to 1.
"""
mu = mu - 1
fx = (y - 0.5) / mu * 2 - 1
x = np.sign(fx) / mu * ((1 + mu) ** np.abs(fx) - 1)
return x
def initialize(m):
"""Initilize conv layers with xavier.
Args:
m (torch.nn.Module): Torch module.
"""
if isinstance(m, nn.Conv1d):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0.0)
if isinstance(m, nn.ConvTranspose2d):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
class OneHot(nn.Module):
"""Convert to one-hot vector.
Args:
depth (int): Dimension of one-hot vector.
"""
def __init__(self, depth):
super(OneHot, self).__init__()
self.depth = depth
def forward(self, x):
"""Calculate forward propagation.
Args:
x (LongTensor): long tensor variable with the shape (B, T)
Returns:
Tensor: float tensor variable with the shape (B, depth, T)
"""
x = x % self.depth
x = torch.unsqueeze(x, 2)
x_onehot = x.new_zeros(x.size(0), x.size(1), self.depth).float()
return x_onehot.scatter_(2, x, 1)
class CausalConv1d(nn.Module):
"""1D dilated causal convolution."""
def __init__(self, in_channels, out_channels, kernel_size, dilation=1, bias=True):
super(CausalConv1d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.dilation = dilation
self.padding = padding = (kernel_size - 1) * dilation
self.conv = nn.Conv1d(
in_channels,
out_channels,
kernel_size,
padding=padding,
dilation=dilation,
bias=bias,
)
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor with the shape (B, in_channels, T).
Returns:
Tensor: Tensor with the shape (B, out_channels, T)
"""
x = self.conv(x)
if self.padding != 0:
x = x[:, :, : -self.padding]
return x
class UpSampling(nn.Module):
"""Upsampling layer with deconvolution.
Args:
upsampling_factor (int): Upsampling factor.
"""
def __init__(self, upsampling_factor, bias=True):
super(UpSampling, self).__init__()
self.upsampling_factor = upsampling_factor
self.bias = bias
self.conv = nn.ConvTranspose2d(
1,
1,
kernel_size=(1, self.upsampling_factor),
stride=(1, self.upsampling_factor),
bias=self.bias,
)
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor with the shape (B, C, T)
Returns:
Tensor: Tensor with the shape (B, C, T') where T' = T * upsampling_factor.
"""
x = x.unsqueeze(1) # B x 1 x C x T
x = self.conv(x) # B x 1 x C x T'
return x.squeeze(1)
class WaveNet(nn.Module):
"""Conditional wavenet.
Args:
n_quantize (int): Number of quantization.
n_aux (int): Number of aux feature dimension.
n_resch (int): Number of filter channels for residual block.
n_skipch (int): Number of filter channels for skip connection.
dilation_depth (int): Number of dilation depth
(e.g. if set 10, max dilation = 2^(10-1)).
dilation_repeat (int): Number of dilation repeat.
kernel_size (int): Filter size of dilated causal convolution.
upsampling_factor (int): Upsampling factor.
"""
def __init__(
self,
n_quantize=256,
n_aux=28,
n_resch=512,
n_skipch=256,
dilation_depth=10,
dilation_repeat=3,
kernel_size=2,
upsampling_factor=0,
):
super(WaveNet, self).__init__()
self.n_aux = n_aux
self.n_quantize = n_quantize
self.n_resch = n_resch
self.n_skipch = n_skipch
self.kernel_size = kernel_size
self.dilation_depth = dilation_depth
self.dilation_repeat = dilation_repeat
self.upsampling_factor = upsampling_factor
self.dilations = [
2**i for i in range(self.dilation_depth)
] * self.dilation_repeat
self.receptive_field = (self.kernel_size - 1) * sum(self.dilations) + 1
# for preprocessing
self.onehot = OneHot(self.n_quantize)
self.causal = CausalConv1d(self.n_quantize, self.n_resch, self.kernel_size)
if self.upsampling_factor > 0:
self.upsampling = UpSampling(self.upsampling_factor)
# for residual blocks
self.dil_sigmoid = nn.ModuleList()
self.dil_tanh = nn.ModuleList()
self.aux_1x1_sigmoid = nn.ModuleList()
self.aux_1x1_tanh = nn.ModuleList()
self.skip_1x1 = nn.ModuleList()
self.res_1x1 = nn.ModuleList()
for d in self.dilations:
self.dil_sigmoid += [
CausalConv1d(self.n_resch, self.n_resch, self.kernel_size, d)
]
self.dil_tanh += [
CausalConv1d(self.n_resch, self.n_resch, self.kernel_size, d)
]
self.aux_1x1_sigmoid += [nn.Conv1d(self.n_aux, self.n_resch, 1)]
self.aux_1x1_tanh += [nn.Conv1d(self.n_aux, self.n_resch, 1)]
self.skip_1x1 += [nn.Conv1d(self.n_resch, self.n_skipch, 1)]
self.res_1x1 += [nn.Conv1d(self.n_resch, self.n_resch, 1)]
# for postprocessing
self.conv_post_1 = nn.Conv1d(self.n_skipch, self.n_skipch, 1)
self.conv_post_2 = nn.Conv1d(self.n_skipch, self.n_quantize, 1)
def forward(self, x, h):
"""Calculate forward propagation.
Args:
x (LongTensor): Quantized input waveform tensor with the shape (B, T).
h (Tensor): Auxiliary feature tensor with the shape (B, n_aux, T).
Returns:
Tensor: Logits with the shape (B, T, n_quantize).
"""
# preprocess
output = self._preprocess(x)
if self.upsampling_factor > 0:
h = self.upsampling(h)
# residual block
skip_connections = []
for i in range(len(self.dilations)):
output, skip = self._residual_forward(
output,
h,
self.dil_sigmoid[i],
self.dil_tanh[i],
self.aux_1x1_sigmoid[i],
self.aux_1x1_tanh[i],
self.skip_1x1[i],
self.res_1x1[i],
)
skip_connections.append(skip)
# skip-connection part
output = sum(skip_connections)
output = self._postprocess(output)
return output
def generate(self, x, h, n_samples, interval=None, mode="sampling"):
"""Generate a waveform with fast genration algorithm.
This generation based on `Fast WaveNet Generation Algorithm`_.
Args:
x (LongTensor): Initial waveform tensor with the shape (T,).
h (Tensor): Auxiliary feature tensor with the shape (n_samples + T, n_aux).
n_samples (int): Number of samples to be generated.
interval (int, optional): Log interval.
mode (str, optional): "sampling" or "argmax".
Return:
ndarray: Generated quantized waveform (n_samples).
.. _`Fast WaveNet Generation Algorithm`: https://arxiv.org/abs/1611.09482
"""
# reshape inputs
assert len(x.shape) == 1
assert len(h.shape) == 2 and h.shape[1] == self.n_aux
x = x.unsqueeze(0)
h = h.transpose(0, 1).unsqueeze(0)
# perform upsampling
if self.upsampling_factor > 0:
h = self.upsampling(h)
# padding for shortage
if n_samples > h.shape[2]:
h = F.pad(h, (0, n_samples - h.shape[2]), "replicate")
# padding if the length less than
n_pad = self.receptive_field - x.size(1)
if n_pad > 0:
x = F.pad(x, (n_pad, 0), "constant", self.n_quantize // 2)
h = F.pad(h, (n_pad, 0), "replicate")
# prepare buffer
output = self._preprocess(x)
h_ = h[:, :, : x.size(1)]
output_buffer = []
buffer_size = []
for i, d in enumerate(self.dilations):
output, _ = self._residual_forward(
output,
h_,
self.dil_sigmoid[i],
self.dil_tanh[i],
self.aux_1x1_sigmoid[i],
self.aux_1x1_tanh[i],
self.skip_1x1[i],
self.res_1x1[i],
)
if d == 2 ** (self.dilation_depth - 1):
buffer_size.append(self.kernel_size - 1)
else:
buffer_size.append(d * 2 * (self.kernel_size - 1))
output_buffer.append(output[:, :, -buffer_size[i] - 1 : -1])
# generate
samples = x[0]
start_time = time.time()
for i in range(n_samples):
output = samples[-self.kernel_size * 2 + 1 :].unsqueeze(0)
output = self._preprocess(output)
h_ = h[:, :, samples.size(0) - 1].contiguous().view(1, self.n_aux, 1)
output_buffer_next = []
skip_connections = []
for j, d in enumerate(self.dilations):
output, skip = self._generate_residual_forward(
output,
h_,
self.dil_sigmoid[j],
self.dil_tanh[j],
self.aux_1x1_sigmoid[j],
self.aux_1x1_tanh[j],
self.skip_1x1[j],
self.res_1x1[j],
)
output = torch.cat([output_buffer[j], output], dim=2)
output_buffer_next.append(output[:, :, -buffer_size[j] :])
skip_connections.append(skip)
# update buffer
output_buffer = output_buffer_next
# get predicted sample
output = sum(skip_connections)
output = self._postprocess(output)[0]
if mode == "sampling":
posterior = F.softmax(output[-1], dim=0)
dist = torch.distributions.Categorical(posterior)
sample = dist.sample().unsqueeze(0)
elif mode == "argmax":
sample = output.argmax(-1)
else:
logging.error("mode should be sampling or argmax")
sys.exit(1)
samples = torch.cat([samples, sample], dim=0)
# show progress
if interval is not None and (i + 1) % interval == 0:
elapsed_time_per_sample = (time.time() - start_time) / interval
logging.info(
"%d/%d estimated time = %.3f sec (%.3f sec / sample)"
% (
i + 1,
n_samples,
(n_samples - i - 1) * elapsed_time_per_sample,
elapsed_time_per_sample,
)
)
start_time = time.time()
return samples[-n_samples:].cpu().numpy()
def _preprocess(self, x):
x = self.onehot(x).transpose(1, 2)
output = self.causal(x)
return output
def _postprocess(self, x):
output = F.relu(x)
output = self.conv_post_1(output)
output = F.relu(output) # B x C x T
output = self.conv_post_2(output).transpose(1, 2) # B x T x C
return output
def _residual_forward(
self,
x,
h,
dil_sigmoid,
dil_tanh,
aux_1x1_sigmoid,
aux_1x1_tanh,
skip_1x1,
res_1x1,
):
output_sigmoid = dil_sigmoid(x)
output_tanh = dil_tanh(x)
aux_output_sigmoid = aux_1x1_sigmoid(h)
aux_output_tanh = aux_1x1_tanh(h)
output = torch.sigmoid(output_sigmoid + aux_output_sigmoid) * torch.tanh(
output_tanh + aux_output_tanh
)
skip = skip_1x1(output)
output = res_1x1(output)
output = output + x
return output, skip
def _generate_residual_forward(
self,
x,
h,
dil_sigmoid,
dil_tanh,
aux_1x1_sigmoid,
aux_1x1_tanh,
skip_1x1,
res_1x1,
):
output_sigmoid = dil_sigmoid(x)[:, :, -1:]
output_tanh = dil_tanh(x)[:, :, -1:]
aux_output_sigmoid = aux_1x1_sigmoid(h)
aux_output_tanh = aux_1x1_tanh(h)
output = torch.sigmoid(output_sigmoid + aux_output_sigmoid) * torch.tanh(
output_tanh + aux_output_tanh
)
skip = skip_1x1(output)
output = res_1x1(output)
output = output + x[:, :, -1:] # B x C x 1
return output, skip
| 13,938 | 30.113839 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/e2e_asr_mix_transformer.py | #!/usr/bin/env python3
# encoding: utf-8
# Copyright 2020 Johns Hopkins University (Xuankai Chang)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
Transformer speech recognition model for single-channel multi-speaker mixture speech.
It is a fusion of `e2e_asr_mix.py` and `e2e_asr_transformer.py`. Refer to:
https://arxiv.org/pdf/2002.03921.pdf
1. The Transformer-based Encoder now consists of three stages:
(a): Enc_mix: encoding input mixture speech;
(b): Enc_SD: separating mixed speech representations;
(c): Enc_rec: transforming each separated speech representation.
2. PIT is used in CTC to determine the permutation with minimum loss.
"""
import logging
import math
from argparse import Namespace
import numpy
import torch
from espnet.nets.asr_interface import ASRInterface
from espnet.nets.ctc_prefix_score import CTCPrefixScore
from espnet.nets.e2e_asr_common import end_detect
from espnet.nets.pytorch_backend.ctc import CTC
from espnet.nets.pytorch_backend.e2e_asr import CTC_LOSS_THRESHOLD
from espnet.nets.pytorch_backend.e2e_asr_mix import E2E as E2EASRMIX
from espnet.nets.pytorch_backend.e2e_asr_mix import PIT
from espnet.nets.pytorch_backend.e2e_asr_transformer import E2E as E2EASR
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask, th_accuracy
from espnet.nets.pytorch_backend.rnn.decoders import CTC_SCORING_RATIO
from espnet.nets.pytorch_backend.transformer.add_sos_eos import add_sos_eos
from espnet.nets.pytorch_backend.transformer.encoder_mix import EncoderMix
from espnet.nets.pytorch_backend.transformer.mask import subsequent_mask, target_mask
class E2E(E2EASR, ASRInterface, torch.nn.Module):
"""E2E module.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
@staticmethod
def add_arguments(parser):
"""Add arguments."""
E2EASR.add_arguments(parser)
E2EASRMIX.encoder_mix_add_arguments(parser)
return parser
def __init__(self, idim, odim, args, ignore_id=-1):
"""Construct an E2E object.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
super(E2E, self).__init__(idim, odim, args, ignore_id=-1)
if args.transformer_attn_dropout_rate is None:
args.transformer_attn_dropout_rate = args.dropout_rate
self.encoder = EncoderMix(
idim=idim,
attention_dim=args.adim,
attention_heads=args.aheads,
linear_units=args.eunits,
num_blocks_sd=args.elayers_sd,
num_blocks_rec=args.elayers,
input_layer=args.transformer_input_layer,
dropout_rate=args.dropout_rate,
positional_dropout_rate=args.dropout_rate,
attention_dropout_rate=args.transformer_attn_dropout_rate,
num_spkrs=args.num_spkrs,
)
if args.mtlalpha > 0.0:
self.ctc = CTC(
odim, args.adim, args.dropout_rate, ctc_type=args.ctc_type, reduce=False
)
else:
self.ctc = None
self.num_spkrs = args.num_spkrs
self.pit = PIT(self.num_spkrs)
def forward(self, xs_pad, ilens, ys_pad):
"""E2E forward.
:param torch.Tensor xs_pad: batch of padded source sequences (B, Tmax, idim)
:param torch.Tensor ilens: batch of lengths of source sequences (B)
:param torch.Tensor ys_pad: batch of padded target sequences
(B, num_spkrs, Lmax)
:return: ctc loass value
:rtype: torch.Tensor
:return: attention loss value
:rtype: torch.Tensor
:return: accuracy in attention decoder
:rtype: float
"""
# 1. forward encoder
xs_pad = xs_pad[:, : max(ilens)] # for data parallel
src_mask = make_non_pad_mask(ilens.tolist()).to(xs_pad.device).unsqueeze(-2)
hs_pad, hs_mask = self.encoder(xs_pad, src_mask) # list: speaker differentiate
self.hs_pad = hs_pad
# 2. ctc
# TODO(karita) show predicted text
# TODO(karita) calculate these stats
cer_ctc = None
assert self.mtlalpha > 0.0
batch_size = xs_pad.size(0)
ys_pad = ys_pad.transpose(0, 1) # (num_spkrs, B, Lmax)
hs_len = [hs_mask[i].view(batch_size, -1).sum(1) for i in range(self.num_spkrs)]
loss_ctc_perm = torch.stack(
[
self.ctc(
hs_pad[i // self.num_spkrs].view(batch_size, -1, self.adim),
hs_len[i // self.num_spkrs],
ys_pad[i % self.num_spkrs],
)
for i in range(self.num_spkrs**2)
],
dim=1,
) # (B, num_spkrs^2)
loss_ctc, min_perm = self.pit.pit_process(loss_ctc_perm)
logging.info("ctc loss:" + str(float(loss_ctc)))
# Permute the labels according to loss
for b in range(batch_size): # B
ys_pad[:, b] = ys_pad[min_perm[b], b] # (num_spkrs, B, Lmax)
ys_out_len = [
float(torch.sum(ys_pad[i] != self.ignore_id)) for i in range(self.num_spkrs)
]
# TODO(karita) show predicted text
# TODO(karita) calculate these stats
if self.error_calculator is not None:
cer_ctc = []
for i in range(self.num_spkrs):
ys_hat = self.ctc.argmax(hs_pad[i].view(batch_size, -1, self.adim)).data
cer_ctc.append(
self.error_calculator(ys_hat.cpu(), ys_pad[i].cpu(), is_ctc=True)
)
cer_ctc = sum(map(lambda x: x[0] * x[1], zip(cer_ctc, ys_out_len))) / sum(
ys_out_len
)
else:
cer_ctc = None
# 3. forward decoder
if self.mtlalpha == 1.0:
loss_att, self.acc, cer, wer = None, None, None, None
else:
pred_pad, pred_mask = [None] * self.num_spkrs, [None] * self.num_spkrs
loss_att, acc = [None] * self.num_spkrs, [None] * self.num_spkrs
for i in range(self.num_spkrs):
(
pred_pad[i],
pred_mask[i],
loss_att[i],
acc[i],
) = self.decoder_and_attention(
hs_pad[i], hs_mask[i], ys_pad[i], batch_size
)
# 4. compute attention loss
# The following is just an approximation
loss_att = sum(map(lambda x: x[0] * x[1], zip(loss_att, ys_out_len))) / sum(
ys_out_len
)
self.acc = sum(map(lambda x: x[0] * x[1], zip(acc, ys_out_len))) / sum(
ys_out_len
)
# 5. compute cer/wer
if self.training or self.error_calculator is None:
cer, wer = None, None
else:
ys_hat = pred_pad.argmax(dim=-1)
cer, wer = self.error_calculator(ys_hat.cpu(), ys_pad.cpu())
# copyied from e2e_asr
alpha = self.mtlalpha
if alpha == 0:
self.loss = loss_att
loss_att_data = float(loss_att)
loss_ctc_data = None
elif alpha == 1:
self.loss = loss_ctc
loss_att_data = None
loss_ctc_data = float(loss_ctc)
else:
self.loss = alpha * loss_ctc + (1 - alpha) * loss_att
loss_att_data = float(loss_att)
loss_ctc_data = float(loss_ctc)
loss_data = float(self.loss)
if loss_data < CTC_LOSS_THRESHOLD and not math.isnan(loss_data):
self.reporter.report(
loss_ctc_data, loss_att_data, self.acc, cer_ctc, cer, wer, loss_data
)
else:
logging.warning("loss (=%f) is not correct", loss_data)
return self.loss
def decoder_and_attention(self, hs_pad, hs_mask, ys_pad, batch_size):
"""Forward decoder and attention loss."""
# forward decoder
ys_in_pad, ys_out_pad = add_sos_eos(ys_pad, self.sos, self.eos, self.ignore_id)
ys_mask = target_mask(ys_in_pad, self.ignore_id)
pred_pad, pred_mask = self.decoder(ys_in_pad, ys_mask, hs_pad, hs_mask)
# compute attention loss
loss_att = self.criterion(pred_pad, ys_out_pad)
acc = th_accuracy(
pred_pad.view(-1, self.odim), ys_out_pad, ignore_label=self.ignore_id
)
return pred_pad, pred_mask, loss_att, acc
def encode(self, x):
"""Encode acoustic features.
:param ndarray x: source acoustic feature (T, D)
:return: encoder outputs
:rtype: torch.Tensor
"""
self.eval()
x = torch.as_tensor(x).unsqueeze(0)
enc_output, _ = self.encoder(x, None)
return enc_output
def recog(self, enc_output, recog_args, char_list=None, rnnlm=None, use_jit=False):
"""Recognize input speech of each speaker.
:param ndnarray enc_output: encoder outputs (B, T, D) or (T, D)
:param Namespace recog_args: argment Namespace contraining options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
if recog_args.ctc_weight > 0.0:
lpz = self.ctc.log_softmax(enc_output)
lpz = lpz.squeeze(0)
else:
lpz = None
h = enc_output.squeeze(0)
logging.info("input lengths: " + str(h.size(0)))
# search parms
beam = recog_args.beam_size
penalty = recog_args.penalty
ctc_weight = recog_args.ctc_weight
# preprare sos
y = self.sos
vy = h.new_zeros(1).long()
if recog_args.maxlenratio == 0:
maxlen = h.shape[0]
else:
# maxlen >= 1
maxlen = max(1, int(recog_args.maxlenratio * h.size(0)))
minlen = int(recog_args.minlenratio * h.size(0))
logging.info("max output length: " + str(maxlen))
logging.info("min output length: " + str(minlen))
# initialize hypothesis
if rnnlm:
hyp = {"score": 0.0, "yseq": [y], "rnnlm_prev": None}
else:
hyp = {"score": 0.0, "yseq": [y]}
if lpz is not None:
ctc_prefix_score = CTCPrefixScore(lpz.detach().numpy(), 0, self.eos, numpy)
hyp["ctc_state_prev"] = ctc_prefix_score.initial_state()
hyp["ctc_score_prev"] = 0.0
if ctc_weight != 1.0:
# pre-pruning based on attention scores
ctc_beam = min(lpz.shape[-1], int(beam * CTC_SCORING_RATIO))
else:
ctc_beam = lpz.shape[-1]
hyps = [hyp]
ended_hyps = []
traced_decoder = None
for i in range(maxlen):
logging.debug("position " + str(i))
hyps_best_kept = []
for hyp in hyps:
vy[0] = hyp["yseq"][i]
# get nbest local scores and their ids
ys_mask = subsequent_mask(i + 1).unsqueeze(0)
ys = torch.tensor(hyp["yseq"]).unsqueeze(0)
# FIXME: jit does not match non-jit result
if use_jit:
if traced_decoder is None:
traced_decoder = torch.jit.trace(
self.decoder.forward_one_step, (ys, ys_mask, enc_output)
)
local_att_scores = traced_decoder(ys, ys_mask, enc_output)[0]
else:
local_att_scores = self.decoder.forward_one_step(
ys, ys_mask, enc_output
)[0]
if rnnlm:
rnnlm_state, local_lm_scores = rnnlm.predict(hyp["rnnlm_prev"], vy)
local_scores = (
local_att_scores + recog_args.lm_weight * local_lm_scores
)
else:
local_scores = local_att_scores
if lpz is not None:
local_best_scores, local_best_ids = torch.topk(
local_att_scores, ctc_beam, dim=1
)
ctc_scores, ctc_states = ctc_prefix_score(
hyp["yseq"], local_best_ids[0], hyp["ctc_state_prev"]
)
local_scores = (1.0 - ctc_weight) * local_att_scores[
:, local_best_ids[0]
] + ctc_weight * torch.from_numpy(
ctc_scores - hyp["ctc_score_prev"]
)
if rnnlm:
local_scores += (
recog_args.lm_weight * local_lm_scores[:, local_best_ids[0]]
)
local_best_scores, joint_best_ids = torch.topk(
local_scores, beam, dim=1
)
local_best_ids = local_best_ids[:, joint_best_ids[0]]
else:
local_best_scores, local_best_ids = torch.topk(
local_scores, beam, dim=1
)
for j in range(beam):
new_hyp = {}
new_hyp["score"] = hyp["score"] + float(local_best_scores[0, j])
new_hyp["yseq"] = [0] * (1 + len(hyp["yseq"]))
new_hyp["yseq"][: len(hyp["yseq"])] = hyp["yseq"]
new_hyp["yseq"][len(hyp["yseq"])] = int(local_best_ids[0, j])
if rnnlm:
new_hyp["rnnlm_prev"] = rnnlm_state
if lpz is not None:
new_hyp["ctc_state_prev"] = ctc_states[joint_best_ids[0, j]]
new_hyp["ctc_score_prev"] = ctc_scores[joint_best_ids[0, j]]
# will be (2 x beam) hyps at most
hyps_best_kept.append(new_hyp)
hyps_best_kept = sorted(
hyps_best_kept, key=lambda x: x["score"], reverse=True
)[:beam]
# sort and get nbest
hyps = hyps_best_kept
logging.debug("number of pruned hypothes: " + str(len(hyps)))
if char_list is not None:
logging.debug(
"best hypo: "
+ "".join([char_list[int(x)] for x in hyps[0]["yseq"][1:]])
)
# add eos in the final loop to avoid that there are no ended hyps
if i == maxlen - 1:
logging.info("adding <eos> in the last position in the loop")
for hyp in hyps:
hyp["yseq"].append(self.eos)
# add ended hypothes to a final list, and removed them from current hypothes
# (this will be a probmlem, number of hyps < beam)
remained_hyps = []
for hyp in hyps:
if hyp["yseq"][-1] == self.eos:
# only store the sequence that has more than minlen outputs
# also add penalty
if len(hyp["yseq"]) > minlen:
hyp["score"] += (i + 1) * penalty
if rnnlm: # Word LM needs to add final <eos> score
hyp["score"] += recog_args.lm_weight * rnnlm.final(
hyp["rnnlm_prev"]
)
ended_hyps.append(hyp)
else:
remained_hyps.append(hyp)
# end detection
if end_detect(ended_hyps, i) and recog_args.maxlenratio == 0.0:
logging.info("end detected at %d", i)
break
hyps = remained_hyps
if len(hyps) > 0:
logging.debug("remeined hypothes: " + str(len(hyps)))
else:
logging.info("no hypothesis. Finish decoding.")
break
if char_list is not None:
for hyp in hyps:
logging.debug(
"hypo: " + "".join([char_list[int(x)] for x in hyp["yseq"][1:]])
)
logging.debug("number of ended hypothes: " + str(len(ended_hyps)))
nbest_hyps = sorted(ended_hyps, key=lambda x: x["score"], reverse=True)[
: min(len(ended_hyps), recog_args.nbest)
]
# check number of hypotheis
if len(nbest_hyps) == 0:
logging.warning(
"there is no N-best results, perform recognition "
"again with smaller minlenratio."
)
# should copy becasuse Namespace will be overwritten globally
recog_args = Namespace(**vars(recog_args))
recog_args.minlenratio = max(0.0, recog_args.minlenratio - 0.1)
return self.recog(enc_output, recog_args, char_list, rnnlm)
logging.info("total log probability: " + str(nbest_hyps[0]["score"]))
logging.info(
"normalized log probability: "
+ str(nbest_hyps[0]["score"] / len(nbest_hyps[0]["yseq"]))
)
return nbest_hyps
def recognize(self, x, recog_args, char_list=None, rnnlm=None, use_jit=False):
"""Recognize input speech of each speaker.
:param ndnarray x: input acoustic feature (B, T, D) or (T, D)
:param Namespace recog_args: argment Namespace contraining options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
# Encoder
enc_output = self.encode(x)
# Decoder
nbest_hyps = []
for enc_out in enc_output:
nbest_hyps.append(
self.recog(enc_out, recog_args, char_list, rnnlm, use_jit)
)
return nbest_hyps
| 18,143 | 38.529412 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/e2e_st_conformer.py | # Copyright 2020 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
Conformer speech translation model (pytorch).
It is a fusion of `e2e_st_transformer.py`
Refer to: https://arxiv.org/abs/2005.08100
"""
from espnet.nets.pytorch_backend.conformer.argument import ( # noqa: H301
add_arguments_conformer_common,
verify_rel_pos_type,
)
from espnet.nets.pytorch_backend.conformer.encoder import Encoder
from espnet.nets.pytorch_backend.e2e_st_transformer import E2E as E2ETransformer
class E2E(E2ETransformer):
"""E2E module.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
@staticmethod
def add_arguments(parser):
"""Add arguments."""
E2ETransformer.add_arguments(parser)
E2E.add_conformer_arguments(parser)
return parser
@staticmethod
def add_conformer_arguments(parser):
"""Add arguments for conformer model."""
group = parser.add_argument_group("conformer model specific setting")
group = add_arguments_conformer_common(group)
return parser
def __init__(self, idim, odim, args, ignore_id=-1):
"""Construct an E2E object.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
super().__init__(idim, odim, args, ignore_id)
if args.transformer_attn_dropout_rate is None:
args.transformer_attn_dropout_rate = args.dropout_rate
# Check the relative positional encoding type
args = verify_rel_pos_type(args)
self.encoder = Encoder(
idim=idim,
attention_dim=args.adim,
attention_heads=args.aheads,
linear_units=args.eunits,
num_blocks=args.elayers,
input_layer=args.transformer_input_layer,
dropout_rate=args.dropout_rate,
positional_dropout_rate=args.dropout_rate,
attention_dropout_rate=args.transformer_attn_dropout_rate,
pos_enc_layer_type=args.transformer_encoder_pos_enc_layer_type,
selfattention_layer_type=args.transformer_encoder_selfattn_layer_type,
activation_type=args.transformer_encoder_activation_type,
macaron_style=args.macaron_style,
use_cnn_module=args.use_cnn_module,
cnn_module_kernel=args.cnn_module_kernel,
)
self.reset_parameters(args)
| 2,594 | 33.6 | 82 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/initialization.py | #!/usr/bin/env python
# Copyright 2019 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Initialization functions for RNN sequence-to-sequence models."""
import math
def lecun_normal_init_parameters(module):
"""Initialize parameters in the LeCun's manner."""
for p in module.parameters():
data = p.data
if data.dim() == 1:
# bias
data.zero_()
elif data.dim() == 2:
# linear weight
n = data.size(1)
stdv = 1.0 / math.sqrt(n)
data.normal_(0, stdv)
elif data.dim() in (3, 4):
# conv weight
n = data.size(1)
for k in data.size()[2:]:
n *= k
stdv = 1.0 / math.sqrt(n)
data.normal_(0, stdv)
else:
raise NotImplementedError
def uniform_init_parameters(module):
"""Initialize parameters with an uniform distribution."""
for p in module.parameters():
data = p.data
if data.dim() == 1:
# bias
data.uniform_(-0.1, 0.1)
elif data.dim() == 2:
# linear weight
data.uniform_(-0.1, 0.1)
elif data.dim() in (3, 4):
# conv weight
pass # use the pytorch default
else:
raise NotImplementedError
def set_forget_bias_to_one(bias):
"""Initialize a bias vector in the forget gate with one."""
n = bias.size(0)
start, end = n // 4, n // 2
bias.data[start:end].fill_(1.0)
| 1,561 | 26.892857 | 67 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/e2e_mt_transformer.py | # Copyright 2019 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Transformer text translation model (pytorch)."""
import logging
import math
from argparse import Namespace
import numpy as np
import torch
from espnet.nets.e2e_asr_common import end_detect
from espnet.nets.e2e_mt_common import ErrorCalculator
from espnet.nets.mt_interface import MTInterface
from espnet.nets.pytorch_backend.e2e_mt import Reporter
from espnet.nets.pytorch_backend.nets_utils import (
get_subsample,
make_pad_mask,
th_accuracy,
to_device,
)
from espnet.nets.pytorch_backend.transformer.add_sos_eos import add_sos_eos
from espnet.nets.pytorch_backend.transformer.argument import ( # noqa: H301
add_arguments_transformer_common,
)
from espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention
from espnet.nets.pytorch_backend.transformer.decoder import Decoder
from espnet.nets.pytorch_backend.transformer.encoder import Encoder
from espnet.nets.pytorch_backend.transformer.initializer import initialize
from espnet.nets.pytorch_backend.transformer.label_smoothing_loss import ( # noqa: H301
LabelSmoothingLoss,
)
from espnet.nets.pytorch_backend.transformer.mask import subsequent_mask, target_mask
from espnet.nets.pytorch_backend.transformer.plot import PlotAttentionReport
from espnet.utils.fill_missing_args import fill_missing_args
class E2E(MTInterface, torch.nn.Module):
"""E2E module.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
@staticmethod
def add_arguments(parser):
"""Add arguments."""
group = parser.add_argument_group("transformer model setting")
group = add_arguments_transformer_common(group)
return parser
@property
def attention_plot_class(self):
"""Return PlotAttentionReport."""
return PlotAttentionReport
def __init__(self, idim, odim, args, ignore_id=-1):
"""Construct an E2E object.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
torch.nn.Module.__init__(self)
# fill missing arguments for compatibility
args = fill_missing_args(args, self.add_arguments)
if args.transformer_attn_dropout_rate is None:
args.transformer_attn_dropout_rate = args.dropout_rate
self.encoder = Encoder(
idim=idim,
selfattention_layer_type=args.transformer_encoder_selfattn_layer_type,
attention_dim=args.adim,
attention_heads=args.aheads,
conv_wshare=args.wshare,
conv_kernel_length=args.ldconv_encoder_kernel_length,
conv_usebias=args.ldconv_usebias,
linear_units=args.eunits,
num_blocks=args.elayers,
input_layer="embed",
dropout_rate=args.dropout_rate,
positional_dropout_rate=args.dropout_rate,
attention_dropout_rate=args.transformer_attn_dropout_rate,
)
self.decoder = Decoder(
odim=odim,
selfattention_layer_type=args.transformer_decoder_selfattn_layer_type,
attention_dim=args.adim,
attention_heads=args.aheads,
conv_wshare=args.wshare,
conv_kernel_length=args.ldconv_decoder_kernel_length,
conv_usebias=args.ldconv_usebias,
linear_units=args.dunits,
num_blocks=args.dlayers,
dropout_rate=args.dropout_rate,
positional_dropout_rate=args.dropout_rate,
self_attention_dropout_rate=args.transformer_attn_dropout_rate,
src_attention_dropout_rate=args.transformer_attn_dropout_rate,
)
self.pad = 0 # use <blank> for padding
self.sos = odim - 1
self.eos = odim - 1
self.odim = odim
self.ignore_id = ignore_id
self.subsample = get_subsample(args, mode="mt", arch="transformer")
self.reporter = Reporter()
# tie source and target emeddings
if args.tie_src_tgt_embedding:
if idim != odim:
raise ValueError(
"When using tie_src_tgt_embedding, idim and odim must be equal."
)
self.encoder.embed[0].weight = self.decoder.embed[0].weight
# tie emeddings and the classfier
if args.tie_classifier:
self.decoder.output_layer.weight = self.decoder.embed[0].weight
self.criterion = LabelSmoothingLoss(
self.odim,
self.ignore_id,
args.lsm_weight,
args.transformer_length_normalized_loss,
)
self.normalize_length = args.transformer_length_normalized_loss # for PPL
self.reset_parameters(args)
self.adim = args.adim
self.error_calculator = ErrorCalculator(
args.char_list, args.sym_space, args.sym_blank, args.report_bleu
)
self.rnnlm = None
# multilingual MT related
self.multilingual = args.multilingual
def reset_parameters(self, args):
"""Initialize parameters."""
initialize(self, args.transformer_init)
torch.nn.init.normal_(
self.encoder.embed[0].weight, mean=0, std=args.adim**-0.5
)
torch.nn.init.constant_(self.encoder.embed[0].weight[self.pad], 0)
torch.nn.init.normal_(
self.decoder.embed[0].weight, mean=0, std=args.adim**-0.5
)
torch.nn.init.constant_(self.decoder.embed[0].weight[self.pad], 0)
def forward(self, xs_pad, ilens, ys_pad):
"""E2E forward.
:param torch.Tensor xs_pad: batch of padded source sequences (B, Tmax)
:param torch.Tensor ilens: batch of lengths of source sequences (B)
:param torch.Tensor ys_pad: batch of padded target sequences (B, Lmax)
:rtype: torch.Tensor
:return: attention loss value
:rtype: torch.Tensor
:return: accuracy in attention decoder
:rtype: float
"""
# 1. forward encoder
xs_pad = xs_pad[:, : max(ilens)] # for data parallel
src_mask = (~make_pad_mask(ilens.tolist())).to(xs_pad.device).unsqueeze(-2)
xs_pad, ys_pad = self.target_forcing(xs_pad, ys_pad)
hs_pad, hs_mask = self.encoder(xs_pad, src_mask)
# 2. forward decoder
ys_in_pad, ys_out_pad = add_sos_eos(ys_pad, self.sos, self.eos, self.ignore_id)
ys_mask = target_mask(ys_in_pad, self.ignore_id)
pred_pad, pred_mask = self.decoder(ys_in_pad, ys_mask, hs_pad, hs_mask)
# 3. compute attention loss
self.loss = self.criterion(pred_pad, ys_out_pad)
self.acc = th_accuracy(
pred_pad.view(-1, self.odim), ys_out_pad, ignore_label=self.ignore_id
)
# 4. compute corpus-level bleu in a mini-batch
if self.training:
self.bleu = None
else:
ys_hat = pred_pad.argmax(dim=-1)
self.bleu = self.error_calculator(ys_hat.cpu(), ys_pad.cpu())
loss_data = float(self.loss)
if self.normalize_length:
self.ppl = np.exp(loss_data)
else:
batch_size = ys_out_pad.size(0)
ys_out_pad = ys_out_pad.view(-1)
ignore = ys_out_pad == self.ignore_id # (B*T,)
total_n_tokens = len(ys_out_pad) - ignore.sum().item()
self.ppl = np.exp(loss_data * batch_size / total_n_tokens)
if not math.isnan(loss_data):
self.reporter.report(loss_data, self.acc, self.ppl, self.bleu)
else:
logging.warning("loss (=%f) is not correct", loss_data)
return self.loss
def scorers(self):
"""Scorers."""
return dict(decoder=self.decoder)
def encode(self, xs):
"""Encode source sentences."""
self.eval()
xs = torch.as_tensor(xs).unsqueeze(0)
enc_output, _ = self.encoder(xs, None)
return enc_output.squeeze(0)
def target_forcing(self, xs_pad, ys_pad=None, tgt_lang=None):
"""Prepend target language IDs to source sentences for multilingual MT.
These tags are prepended in source/target sentences as pre-processing.
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax)
:return: source text without language IDs
:rtype: torch.Tensor
:return: target text without language IDs
:rtype: torch.Tensor
:return: target language IDs
:rtype: torch.Tensor (B, 1)
"""
if self.multilingual:
xs_pad = xs_pad[:, 1:] # remove source language IDs here
if ys_pad is not None:
# remove language ID in the beginning
lang_ids = ys_pad[:, 0].unsqueeze(1)
ys_pad = ys_pad[:, 1:]
elif tgt_lang is not None:
lang_ids = xs_pad.new_zeros(xs_pad.size(0), 1).fill_(tgt_lang)
else:
raise ValueError("Set ys_pad or tgt_lang.")
# prepend target language ID to source sentences
xs_pad = torch.cat([lang_ids, xs_pad], dim=1)
return xs_pad, ys_pad
def translate(self, x, trans_args, char_list=None):
"""Translate source text.
:param list x: input source text feature (T,)
:param Namespace trans_args: argment Namespace contraining options
:param list char_list: list of characters
:return: N-best decoding results
:rtype: list
"""
self.eval() # NOTE: this is important because self.encode() is not used
assert isinstance(x, list)
# make a utt list (1) to use the same interface for encoder
if self.multilingual:
x = to_device(
self, torch.from_numpy(np.fromiter(map(int, x[0][1:]), dtype=np.int64))
)
else:
x = to_device(
self, torch.from_numpy(np.fromiter(map(int, x[0]), dtype=np.int64))
)
logging.info("input lengths: " + str(x.size(0)))
xs_pad = x.unsqueeze(0)
tgt_lang = None
if trans_args.tgt_lang:
tgt_lang = char_list.index(trans_args.tgt_lang)
xs_pad, _ = self.target_forcing(xs_pad, tgt_lang=tgt_lang)
h, _ = self.encoder(xs_pad, None)
logging.info("encoder output lengths: " + str(h.size(1)))
# search parms
beam = trans_args.beam_size
penalty = trans_args.penalty
if trans_args.maxlenratio == 0:
maxlen = h.size(1)
else:
# maxlen >= 1
maxlen = max(1, int(trans_args.maxlenratio * h.size(1)))
minlen = int(trans_args.minlenratio * h.size(1))
logging.info("max output length: " + str(maxlen))
logging.info("min output length: " + str(minlen))
# initialize hypothesis
hyp = {"score": 0.0, "yseq": [self.sos]}
hyps = [hyp]
ended_hyps = []
for i in range(maxlen):
logging.debug("position " + str(i))
# batchfy
ys = h.new_zeros((len(hyps), i + 1), dtype=torch.int64)
for j, hyp in enumerate(hyps):
ys[j, :] = torch.tensor(hyp["yseq"])
ys_mask = subsequent_mask(i + 1).unsqueeze(0).to(h.device)
local_scores = self.decoder.forward_one_step(
ys, ys_mask, h.repeat([len(hyps), 1, 1])
)[0]
hyps_best_kept = []
for j, hyp in enumerate(hyps):
local_best_scores, local_best_ids = torch.topk(
local_scores[j : j + 1], beam, dim=1
)
for j in range(beam):
new_hyp = {}
new_hyp["score"] = hyp["score"] + float(local_best_scores[0, j])
new_hyp["yseq"] = [0] * (1 + len(hyp["yseq"]))
new_hyp["yseq"][: len(hyp["yseq"])] = hyp["yseq"]
new_hyp["yseq"][len(hyp["yseq"])] = int(local_best_ids[0, j])
# will be (2 x beam) hyps at most
hyps_best_kept.append(new_hyp)
hyps_best_kept = sorted(
hyps_best_kept, key=lambda x: x["score"], reverse=True
)[:beam]
# sort and get nbest
hyps = hyps_best_kept
logging.debug("number of pruned hypothes: " + str(len(hyps)))
if char_list is not None:
logging.debug(
"best hypo: "
+ "".join([char_list[int(x)] for x in hyps[0]["yseq"][1:]])
)
# add eos in the final loop to avoid that there are no ended hyps
if i == maxlen - 1:
logging.info("adding <eos> in the last position in the loop")
for hyp in hyps:
hyp["yseq"].append(self.eos)
# add ended hypothes to a final list, and removed them from current hypothes
# (this will be a probmlem, number of hyps < beam)
remained_hyps = []
for hyp in hyps:
if hyp["yseq"][-1] == self.eos:
# only store the sequence that has more than minlen outputs
# also add penalty
if len(hyp["yseq"]) > minlen:
hyp["score"] += (i + 1) * penalty
ended_hyps.append(hyp)
else:
remained_hyps.append(hyp)
# end detection
if end_detect(ended_hyps, i) and trans_args.maxlenratio == 0.0:
logging.info("end detected at %d", i)
break
hyps = remained_hyps
if len(hyps) > 0:
logging.debug("remeined hypothes: " + str(len(hyps)))
else:
logging.info("no hypothesis. Finish decoding.")
break
if char_list is not None:
for hyp in hyps:
logging.debug(
"hypo: " + "".join([char_list[int(x)] for x in hyp["yseq"][1:]])
)
logging.debug("number of ended hypothes: " + str(len(ended_hyps)))
nbest_hyps = sorted(ended_hyps, key=lambda x: x["score"], reverse=True)[
: min(len(ended_hyps), trans_args.nbest)
]
# check number of hypotheis
if len(nbest_hyps) == 0:
logging.warning(
"there is no N-best results, perform translation "
"again with smaller minlenratio."
)
# should copy becasuse Namespace will be overwritten globally
trans_args = Namespace(**vars(trans_args))
trans_args.minlenratio = max(0.0, trans_args.minlenratio - 0.1)
return self.translate(x, trans_args, char_list)
logging.info("total log probability: " + str(nbest_hyps[0]["score"]))
logging.info(
"normalized log probability: "
+ str(nbest_hyps[0]["score"] / len(nbest_hyps[0]["yseq"]))
)
return nbest_hyps
def calculate_all_attentions(self, xs_pad, ilens, ys_pad):
"""E2E attention calculation.
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor ys_pad: batch of padded token id sequence tensor (B, Lmax)
:return: attention weights (B, H, Lmax, Tmax)
:rtype: float ndarray
"""
self.eval()
with torch.no_grad():
self.forward(xs_pad, ilens, ys_pad)
ret = dict()
for name, m in self.named_modules():
if isinstance(m, MultiHeadedAttention) and m.attn is not None:
ret[name] = m.attn.cpu().numpy()
self.train()
return ret
| 16,007 | 37.854369 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/nets_utils.py | # -*- coding: utf-8 -*-
"""Network related utility tools."""
import logging
from typing import Dict
import numpy as np
import torch
def to_device(m, x):
"""Send tensor into the device of the module.
Args:
m (torch.nn.Module): Torch module.
x (Tensor): Torch tensor.
Returns:
Tensor: Torch tensor located in the same place as torch module.
"""
if isinstance(m, torch.nn.Module):
device = next(m.parameters()).device
elif isinstance(m, torch.Tensor):
device = m.device
else:
raise TypeError(
"Expected torch.nn.Module or torch.tensor, " f"bot got: {type(m)}"
)
return x.to(device)
def pad_list(xs, pad_value):
"""Perform padding for the list of tensors.
Args:
xs (List): List of Tensors [(T_1, `*`), (T_2, `*`), ..., (T_B, `*`)].
pad_value (float): Value for padding.
Returns:
Tensor: Padded tensor (B, Tmax, `*`).
Examples:
>>> x = [torch.ones(4), torch.ones(2), torch.ones(1)]
>>> x
[tensor([1., 1., 1., 1.]), tensor([1., 1.]), tensor([1.])]
>>> pad_list(x, 0)
tensor([[1., 1., 1., 1.],
[1., 1., 0., 0.],
[1., 0., 0., 0.]])
"""
n_batch = len(xs)
max_len = max(x.size(0) for x in xs)
pad = xs[0].new(n_batch, max_len, *xs[0].size()[1:]).fill_(pad_value)
for i in range(n_batch):
pad[i, : xs[i].size(0)] = xs[i]
return pad
def make_pad_mask(lengths, xs=None, length_dim=-1, maxlen=None):
"""Make mask tensor containing indices of padded part.
Args:
lengths (LongTensor or List): Batch of lengths (B,).
xs (Tensor, optional): The reference tensor.
If set, masks will be the same shape as this tensor.
length_dim (int, optional): Dimension indicator of the above tensor.
See the example.
Returns:
Tensor: Mask tensor containing indices of padded part.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
With only lengths.
>>> lengths = [5, 3, 2]
>>> make_pad_mask(lengths)
masks = [[0, 0, 0, 0 ,0],
[0, 0, 0, 1, 1],
[0, 0, 1, 1, 1]]
With the reference tensor.
>>> xs = torch.zeros((3, 2, 4))
>>> make_pad_mask(lengths, xs)
tensor([[[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 0, 0, 1],
[0, 0, 0, 1]],
[[0, 0, 1, 1],
[0, 0, 1, 1]]], dtype=torch.uint8)
>>> xs = torch.zeros((3, 2, 6))
>>> make_pad_mask(lengths, xs)
tensor([[[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1]],
[[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1]],
[[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)
With the reference tensor and dimension indicator.
>>> xs = torch.zeros((3, 6, 6))
>>> make_pad_mask(lengths, xs, 1)
tensor([[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1]],
[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1]],
[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1]]], dtype=torch.uint8)
>>> make_pad_mask(lengths, xs, 2)
tensor([[[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1]],
[[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1]],
[[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)
"""
if length_dim == 0:
raise ValueError("length_dim cannot be 0: {}".format(length_dim))
if not isinstance(lengths, list):
lengths = lengths.long().tolist()
bs = int(len(lengths))
if maxlen is None:
if xs is None:
maxlen = int(max(lengths))
else:
maxlen = xs.size(length_dim)
else:
assert xs is None
assert maxlen >= int(max(lengths))
seq_range = torch.arange(0, maxlen, dtype=torch.int64)
seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen)
seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1)
mask = seq_range_expand >= seq_length_expand
if xs is not None:
assert xs.size(0) == bs, (xs.size(0), bs)
if length_dim < 0:
length_dim = xs.dim() + length_dim
# ind = (:, None, ..., None, :, , None, ..., None)
ind = tuple(
slice(None) if i in (0, length_dim) else None for i in range(xs.dim())
)
mask = mask[ind].expand_as(xs).to(xs.device)
return mask
def make_non_pad_mask(lengths, xs=None, length_dim=-1):
"""Make mask tensor containing indices of non-padded part.
Args:
lengths (LongTensor or List): Batch of lengths (B,).
xs (Tensor, optional): The reference tensor.
If set, masks will be the same shape as this tensor.
length_dim (int, optional): Dimension indicator of the above tensor.
See the example.
Returns:
ByteTensor: mask tensor containing indices of padded part.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
With only lengths.
>>> lengths = [5, 3, 2]
>>> make_non_pad_mask(lengths)
masks = [[1, 1, 1, 1 ,1],
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0]]
With the reference tensor.
>>> xs = torch.zeros((3, 2, 4))
>>> make_non_pad_mask(lengths, xs)
tensor([[[1, 1, 1, 1],
[1, 1, 1, 1]],
[[1, 1, 1, 0],
[1, 1, 1, 0]],
[[1, 1, 0, 0],
[1, 1, 0, 0]]], dtype=torch.uint8)
>>> xs = torch.zeros((3, 2, 6))
>>> make_non_pad_mask(lengths, xs)
tensor([[[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0]],
[[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0]],
[[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)
With the reference tensor and dimension indicator.
>>> xs = torch.zeros((3, 6, 6))
>>> make_non_pad_mask(lengths, xs, 1)
tensor([[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0]],
[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]], dtype=torch.uint8)
>>> make_non_pad_mask(lengths, xs, 2)
tensor([[[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0]],
[[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0]],
[[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)
"""
return ~make_pad_mask(lengths, xs, length_dim)
def mask_by_length(xs, lengths, fill=0):
"""Mask tensor according to length.
Args:
xs (Tensor): Batch of input tensor (B, `*`).
lengths (LongTensor or List): Batch of lengths (B,).
fill (int or float): Value to fill masked part.
Returns:
Tensor: Batch of masked input tensor (B, `*`).
Examples:
>>> x = torch.arange(5).repeat(3, 1) + 1
>>> x
tensor([[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5]])
>>> lengths = [5, 3, 2]
>>> mask_by_length(x, lengths)
tensor([[1, 2, 3, 4, 5],
[1, 2, 3, 0, 0],
[1, 2, 0, 0, 0]])
"""
assert xs.size(0) == len(lengths)
ret = xs.data.new(*xs.size()).fill_(fill)
for i, l in enumerate(lengths):
ret[i, :l] = xs[i, :l]
return ret
def th_accuracy(pad_outputs, pad_targets, ignore_label):
"""Calculate accuracy.
Args:
pad_outputs (Tensor): Prediction tensors (B * Lmax, D).
pad_targets (LongTensor): Target label tensors (B, Lmax, D).
ignore_label (int): Ignore label id.
Returns:
float: Accuracy value (0.0 - 1.0).
"""
pad_pred = pad_outputs.view(
pad_targets.size(0), pad_targets.size(1), pad_outputs.size(1)
).argmax(2)
mask = pad_targets != ignore_label
numerator = torch.sum(
pad_pred.masked_select(mask) == pad_targets.masked_select(mask)
)
denominator = torch.sum(mask)
return float(numerator) / float(denominator)
def to_torch_tensor(x):
"""Change to torch.Tensor or ComplexTensor from numpy.ndarray.
Args:
x: Inputs. It should be one of numpy.ndarray, Tensor, ComplexTensor, and dict.
Returns:
Tensor or ComplexTensor: Type converted inputs.
Examples:
>>> xs = np.ones(3, dtype=np.float32)
>>> xs = to_torch_tensor(xs)
tensor([1., 1., 1.])
>>> xs = torch.ones(3, 4, 5)
>>> assert to_torch_tensor(xs) is xs
>>> xs = {'real': xs, 'imag': xs}
>>> to_torch_tensor(xs)
ComplexTensor(
Real:
tensor([1., 1., 1.])
Imag;
tensor([1., 1., 1.])
)
"""
# If numpy, change to torch tensor
if isinstance(x, np.ndarray):
if x.dtype.kind == "c":
# Dynamically importing because torch_complex requires python3
from torch_complex.tensor import ComplexTensor
return ComplexTensor(x)
else:
return torch.from_numpy(x)
# If {'real': ..., 'imag': ...}, convert to ComplexTensor
elif isinstance(x, dict):
# Dynamically importing because torch_complex requires python3
from torch_complex.tensor import ComplexTensor
if "real" not in x or "imag" not in x:
raise ValueError("has 'real' and 'imag' keys: {}".format(list(x)))
# Relative importing because of using python3 syntax
return ComplexTensor(x["real"], x["imag"])
# If torch.Tensor, as it is
elif isinstance(x, torch.Tensor):
return x
else:
error = (
"x must be numpy.ndarray, torch.Tensor or a dict like "
"{{'real': torch.Tensor, 'imag': torch.Tensor}}, "
"but got {}".format(type(x))
)
try:
from torch_complex.tensor import ComplexTensor
except Exception:
# If PY2
raise ValueError(error)
else:
# If PY3
if isinstance(x, ComplexTensor):
return x
else:
raise ValueError(error)
def get_subsample(train_args, mode, arch):
"""Parse the subsampling factors from the args for the specified `mode` and `arch`.
Args:
train_args: argument Namespace containing options.
mode: one of ('asr', 'mt', 'st')
arch: one of ('rnn', 'rnn-t', 'rnn_mix', 'rnn_mulenc', 'transformer')
Returns:
np.ndarray / List[np.ndarray]: subsampling factors.
"""
if arch == "transformer":
return np.array([1])
elif mode == "mt" and arch == "rnn":
# +1 means input (+1) and layers outputs (train_args.elayer)
subsample = np.ones(train_args.elayers + 1, dtype=np.int64)
logging.warning("Subsampling is not performed for machine translation.")
logging.info("subsample: " + " ".join([str(x) for x in subsample]))
return subsample
elif (
(mode == "asr" and arch in ("rnn", "rnn-t"))
or (mode == "mt" and arch == "rnn")
or (mode == "st" and arch == "rnn")
):
subsample = np.ones(train_args.elayers + 1, dtype=np.int64)
if train_args.etype.endswith("p") and not train_args.etype.startswith("vgg"):
ss = train_args.subsample.split("_")
for j in range(min(train_args.elayers + 1, len(ss))):
subsample[j] = int(ss[j])
else:
logging.warning(
"Subsampling is not performed for vgg*. "
"It is performed in max pooling layers at CNN."
)
logging.info("subsample: " + " ".join([str(x) for x in subsample]))
return subsample
elif mode == "asr" and arch == "rnn_mix":
subsample = np.ones(
train_args.elayers_sd + train_args.elayers + 1, dtype=np.int64
)
if train_args.etype.endswith("p") and not train_args.etype.startswith("vgg"):
ss = train_args.subsample.split("_")
for j in range(
min(train_args.elayers_sd + train_args.elayers + 1, len(ss))
):
subsample[j] = int(ss[j])
else:
logging.warning(
"Subsampling is not performed for vgg*. "
"It is performed in max pooling layers at CNN."
)
logging.info("subsample: " + " ".join([str(x) for x in subsample]))
return subsample
elif mode == "asr" and arch == "rnn_mulenc":
subsample_list = []
for idx in range(train_args.num_encs):
subsample = np.ones(train_args.elayers[idx] + 1, dtype=np.int64)
if train_args.etype[idx].endswith("p") and not train_args.etype[
idx
].startswith("vgg"):
ss = train_args.subsample[idx].split("_")
for j in range(min(train_args.elayers[idx] + 1, len(ss))):
subsample[j] = int(ss[j])
else:
logging.warning(
"Encoder %d: Subsampling is not performed for vgg*. "
"It is performed in max pooling layers at CNN.",
idx + 1,
)
logging.info("subsample: " + " ".join([str(x) for x in subsample]))
subsample_list.append(subsample)
return subsample_list
else:
raise ValueError("Invalid options: mode={}, arch={}".format(mode, arch))
def rename_state_dict(
old_prefix: str, new_prefix: str, state_dict: Dict[str, torch.Tensor]
):
"""Replace keys of old prefix with new prefix in state dict."""
# need this list not to break the dict iterator
old_keys = [k for k in state_dict if k.startswith(old_prefix)]
if len(old_keys) > 0:
logging.warning(f"Rename: {old_prefix} -> {new_prefix}")
for k in old_keys:
v = state_dict.pop(k)
new_k = k.replace(old_prefix, new_prefix)
state_dict[new_k] = v
def get_activation(act):
"""Return activation function."""
# Lazy load to avoid unused import
from espnet.nets.pytorch_backend.conformer.swish import Swish
activation_funcs = {
"hardtanh": torch.nn.Hardtanh,
"tanh": torch.nn.Tanh,
"relu": torch.nn.ReLU,
"selu": torch.nn.SELU,
"swish": Swish,
}
return activation_funcs[act]()
| 16,551 | 31.84127 | 87 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/e2e_asr_maskctc.py | # Copyright 2020 Johns Hopkins University (Shinji Watanabe)
# Waseda University (Yosuke Higuchi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
Mask CTC based non-autoregressive speech recognition model (pytorch).
See https://arxiv.org/abs/2005.08700 for the detail.
"""
import logging
import math
from distutils.util import strtobool
from itertools import groupby
import numpy
import torch
from espnet.nets.pytorch_backend.conformer.argument import ( # noqa: H301
add_arguments_conformer_common,
)
from espnet.nets.pytorch_backend.conformer.encoder import Encoder
from espnet.nets.pytorch_backend.e2e_asr import CTC_LOSS_THRESHOLD
from espnet.nets.pytorch_backend.e2e_asr_transformer import E2E as E2ETransformer
from espnet.nets.pytorch_backend.maskctc.add_mask_token import mask_uniform
from espnet.nets.pytorch_backend.maskctc.mask import square_mask
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask, th_accuracy
class E2E(E2ETransformer):
"""E2E module.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
@staticmethod
def add_arguments(parser):
"""Add arguments."""
E2ETransformer.add_arguments(parser)
E2E.add_maskctc_arguments(parser)
return parser
@staticmethod
def add_maskctc_arguments(parser):
"""Add arguments for maskctc model."""
group = parser.add_argument_group("maskctc specific setting")
group.add_argument(
"--maskctc-use-conformer-encoder",
default=False,
type=strtobool,
)
group = add_arguments_conformer_common(group)
return parser
def __init__(self, idim, odim, args, ignore_id=-1):
"""Construct an E2E object.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
odim += 1 # for the mask token
super().__init__(idim, odim, args, ignore_id)
assert 0.0 <= self.mtlalpha < 1.0, "mtlalpha should be [0.0, 1.0)"
self.mask_token = odim - 1
self.sos = odim - 2
self.eos = odim - 2
self.odim = odim
self.intermediate_ctc_weight = args.intermediate_ctc_weight
self.intermediate_ctc_layers = None
if args.intermediate_ctc_layer != "":
self.intermediate_ctc_layers = [
int(i) for i in args.intermediate_ctc_layer.split(",")
]
if args.maskctc_use_conformer_encoder:
if args.transformer_attn_dropout_rate is None:
args.transformer_attn_dropout_rate = args.conformer_dropout_rate
self.encoder = Encoder(
idim=idim,
attention_dim=args.adim,
attention_heads=args.aheads,
linear_units=args.eunits,
num_blocks=args.elayers,
input_layer=args.transformer_input_layer,
dropout_rate=args.dropout_rate,
positional_dropout_rate=args.dropout_rate,
attention_dropout_rate=args.transformer_attn_dropout_rate,
pos_enc_layer_type=args.transformer_encoder_pos_enc_layer_type,
selfattention_layer_type=args.transformer_encoder_selfattn_layer_type,
activation_type=args.transformer_encoder_activation_type,
macaron_style=args.macaron_style,
use_cnn_module=args.use_cnn_module,
cnn_module_kernel=args.cnn_module_kernel,
stochastic_depth_rate=args.stochastic_depth_rate,
intermediate_layers=self.intermediate_ctc_layers,
)
self.reset_parameters(args)
def forward(self, xs_pad, ilens, ys_pad):
"""E2E forward.
:param torch.Tensor xs_pad: batch of padded source sequences (B, Tmax, idim)
:param torch.Tensor ilens: batch of lengths of source sequences (B)
:param torch.Tensor ys_pad: batch of padded target sequences (B, Lmax)
:return: ctc loss value
:rtype: torch.Tensor
:return: attention loss value
:rtype: torch.Tensor
:return: accuracy in attention decoder
:rtype: float
"""
# 1. forward encoder
xs_pad = xs_pad[:, : max(ilens)] # for data parallel
src_mask = make_non_pad_mask(ilens.tolist()).to(xs_pad.device).unsqueeze(-2)
if self.intermediate_ctc_layers:
hs_pad, hs_mask, hs_intermediates = self.encoder(xs_pad, src_mask)
else:
hs_pad, hs_mask = self.encoder(xs_pad, src_mask)
self.hs_pad = hs_pad
# 2. forward decoder
ys_in_pad, ys_out_pad = mask_uniform(
ys_pad, self.mask_token, self.eos, self.ignore_id
)
ys_mask = square_mask(ys_in_pad, self.eos)
pred_pad, pred_mask = self.decoder(ys_in_pad, ys_mask, hs_pad, hs_mask)
self.pred_pad = pred_pad
# 3. compute attention loss
loss_att = self.criterion(pred_pad, ys_out_pad)
self.acc = th_accuracy(
pred_pad.view(-1, self.odim), ys_out_pad, ignore_label=self.ignore_id
)
# 4. compute ctc loss
loss_ctc, cer_ctc = None, None
loss_intermediate_ctc = 0.0
if self.mtlalpha > 0:
batch_size = xs_pad.size(0)
hs_len = hs_mask.view(batch_size, -1).sum(1)
loss_ctc = self.ctc(hs_pad.view(batch_size, -1, self.adim), hs_len, ys_pad)
if self.error_calculator is not None:
ys_hat = self.ctc.argmax(hs_pad.view(batch_size, -1, self.adim)).data
cer_ctc = self.error_calculator(ys_hat.cpu(), ys_pad.cpu(), is_ctc=True)
# for visualization
if not self.training:
self.ctc.softmax(hs_pad)
if self.intermediate_ctc_weight > 0 and self.intermediate_ctc_layers:
for hs_intermediate in hs_intermediates:
# assuming hs_intermediates and hs_pad has same length / padding
loss_inter = self.ctc(
hs_intermediate.view(batch_size, -1, self.adim), hs_len, ys_pad
)
loss_intermediate_ctc += loss_inter
loss_intermediate_ctc /= len(self.intermediate_ctc_layers)
# 5. compute cer/wer
if self.training or self.error_calculator is None or self.decoder is None:
cer, wer = None, None
else:
ys_hat = pred_pad.argmax(dim=-1)
cer, wer = self.error_calculator(ys_hat.cpu(), ys_pad.cpu())
alpha = self.mtlalpha
if alpha == 0:
self.loss = loss_att
loss_att_data = float(loss_att)
loss_ctc_data = None
else:
self.loss = (
alpha * loss_ctc
+ self.intermediate_ctc_weight * loss_intermediate_ctc
+ (1 - alpha - self.intermediate_ctc_weight) * loss_att
)
loss_att_data = float(loss_att)
loss_ctc_data = float(loss_ctc)
loss_data = float(self.loss)
if loss_data < CTC_LOSS_THRESHOLD and not math.isnan(loss_data):
self.reporter.report(
loss_ctc_data, loss_att_data, self.acc, cer_ctc, cer, wer, loss_data
)
else:
logging.warning("loss (=%f) is not correct", loss_data)
return self.loss
def recognize(self, x, recog_args, char_list=None, rnnlm=None):
"""Recognize input speech.
:param ndnarray x: input acoustic feature (B, T, D) or (T, D)
:param Namespace recog_args: argment Namespace contraining options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: decoding result
:rtype: list
"""
def num2str(char_list, mask_token, mask_char="_"):
def f(yl):
cl = [char_list[y] if y != mask_token else mask_char for y in yl]
return "".join(cl).replace("<space>", " ")
return f
n2s = num2str(char_list, self.mask_token)
self.eval()
h = self.encode(x).unsqueeze(0)
input_len = h.squeeze(0)
logging.info("input lengths: " + str(input_len.size(0)))
# greedy ctc outputs
ctc_probs, ctc_ids = torch.exp(self.ctc.log_softmax(h)).max(dim=-1)
y_hat = torch.stack([x[0] for x in groupby(ctc_ids[0])])
y_idx = torch.nonzero(y_hat != 0).squeeze(-1)
# calculate token-level ctc probabilities by taking
# the maximum probability of consecutive frames with
# the same ctc symbols
probs_hat = []
cnt = 0
for i, y in enumerate(y_hat.tolist()):
probs_hat.append(-1)
while cnt < ctc_ids.shape[1] and y == ctc_ids[0][cnt]:
if probs_hat[i] < ctc_probs[0][cnt]:
probs_hat[i] = ctc_probs[0][cnt].item()
cnt += 1
probs_hat = torch.from_numpy(numpy.array(probs_hat))
# mask ctc outputs based on ctc probabilities
p_thres = recog_args.maskctc_probability_threshold
mask_idx = torch.nonzero(probs_hat[y_idx] < p_thres).squeeze(-1)
confident_idx = torch.nonzero(probs_hat[y_idx] >= p_thres).squeeze(-1)
mask_num = len(mask_idx)
y_in = torch.zeros(1, len(y_idx), dtype=torch.long) + self.mask_token
y_in[0][confident_idx] = y_hat[y_idx][confident_idx]
logging.info("ctc:{}".format(n2s(y_in[0].tolist())))
# iterative decoding
if not mask_num == 0:
K = recog_args.maskctc_n_iterations
num_iter = K if mask_num >= K and K > 0 else mask_num
for t in range(num_iter - 1):
pred, _ = self.decoder(y_in, None, h, None)
pred_score, pred_id = pred[0][mask_idx].max(dim=-1)
cand = torch.topk(pred_score, mask_num // num_iter, -1)[1]
y_in[0][mask_idx[cand]] = pred_id[cand]
mask_idx = torch.nonzero(y_in[0] == self.mask_token).squeeze(-1)
logging.info("msk:{}".format(n2s(y_in[0].tolist())))
# predict leftover masks (|masks| < mask_num // num_iter)
pred, pred_mask = self.decoder(y_in, None, h, None)
y_in[0][mask_idx] = pred[0][mask_idx].argmax(dim=-1)
logging.info("msk:{}".format(n2s(y_in[0].tolist())))
ret = y_in.tolist()[0]
hyp = {"score": 0.0, "yseq": [self.sos] + ret + [self.eos]}
return [hyp]
| 10,739 | 37.633094 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/e2e_asr_transducer.py | """Transducer speech recognition model (pytorch)."""
import logging
import math
from argparse import ArgumentParser, Namespace
from dataclasses import asdict
from typing import List
import chainer
import numpy
import torch
from espnet.nets.asr_interface import ASRInterface
from espnet.nets.beam_search_transducer import BeamSearchTransducer
from espnet.nets.pytorch_backend.nets_utils import get_subsample, make_non_pad_mask
from espnet.nets.pytorch_backend.transducer.arguments import ( # noqa: H301
add_auxiliary_task_arguments,
add_custom_decoder_arguments,
add_custom_encoder_arguments,
add_custom_training_arguments,
add_decoder_general_arguments,
add_encoder_general_arguments,
add_rnn_decoder_arguments,
add_rnn_encoder_arguments,
add_transducer_arguments,
)
from espnet.nets.pytorch_backend.transducer.custom_decoder import CustomDecoder
from espnet.nets.pytorch_backend.transducer.custom_encoder import CustomEncoder
from espnet.nets.pytorch_backend.transducer.error_calculator import ErrorCalculator
from espnet.nets.pytorch_backend.transducer.initializer import initializer
from espnet.nets.pytorch_backend.transducer.rnn_decoder import RNNDecoder
from espnet.nets.pytorch_backend.transducer.rnn_encoder import encoder_for
from espnet.nets.pytorch_backend.transducer.transducer_tasks import TransducerTasks
from espnet.nets.pytorch_backend.transducer.utils import (
get_decoder_input,
valid_aux_encoder_output_layers,
)
from espnet.nets.pytorch_backend.transformer.attention import ( # noqa: H301
MultiHeadedAttention,
RelPositionMultiHeadedAttention,
)
from espnet.nets.pytorch_backend.transformer.mask import target_mask
from espnet.nets.pytorch_backend.transformer.plot import PlotAttentionReport
from espnet.utils.fill_missing_args import fill_missing_args
class Reporter(chainer.Chain):
"""A chainer reporter wrapper for Transducer models."""
def report(
self,
loss: float,
loss_trans: float,
loss_ctc: float,
loss_aux_trans: float,
loss_symm_kl_div: float,
loss_lm: float,
cer: float,
wer: float,
):
"""Instantiate reporter attributes.
Args:
loss: Model loss.
loss_trans: Main Transducer loss.
loss_ctc: CTC loss.
loss_aux_trans: Auxiliary Transducer loss.
loss_symm_kl_div: Symmetric KL-divergence loss.
loss_lm: Label smoothing loss.
cer: Character Error Rate.
wer: Word Error Rate.
"""
chainer.reporter.report({"loss": loss}, self)
chainer.reporter.report({"loss_trans": loss_trans}, self)
chainer.reporter.report({"loss_ctc": loss_ctc}, self)
chainer.reporter.report({"loss_lm": loss_lm}, self)
chainer.reporter.report({"loss_aux_trans": loss_aux_trans}, self)
chainer.reporter.report({"loss_symm_kl_div": loss_symm_kl_div}, self)
chainer.reporter.report({"cer": cer}, self)
chainer.reporter.report({"wer": wer}, self)
logging.info("loss:" + str(loss))
class E2E(ASRInterface, torch.nn.Module):
"""E2E module for Transducer models.
Args:
idim: Dimension of inputs.
odim: Dimension of outputs.
args: Namespace containing model options.
ignore_id: Padding symbol ID.
blank_id: Blank symbol ID.
training: Whether the model is initialized in training or inference mode.
"""
@staticmethod
def add_arguments(parser: ArgumentParser) -> ArgumentParser:
"""Add arguments for Transducer model."""
E2E.encoder_add_general_arguments(parser)
E2E.encoder_add_rnn_arguments(parser)
E2E.encoder_add_custom_arguments(parser)
E2E.decoder_add_general_arguments(parser)
E2E.decoder_add_rnn_arguments(parser)
E2E.decoder_add_custom_arguments(parser)
E2E.training_add_custom_arguments(parser)
E2E.transducer_add_arguments(parser)
E2E.auxiliary_task_add_arguments(parser)
return parser
@staticmethod
def encoder_add_general_arguments(parser: ArgumentParser) -> ArgumentParser:
"""Add general arguments for encoder."""
group = parser.add_argument_group("Encoder general arguments")
group = add_encoder_general_arguments(group)
return parser
@staticmethod
def encoder_add_rnn_arguments(parser: ArgumentParser) -> ArgumentParser:
"""Add arguments for RNN encoder."""
group = parser.add_argument_group("RNN encoder arguments")
group = add_rnn_encoder_arguments(group)
return parser
@staticmethod
def encoder_add_custom_arguments(parser: ArgumentParser) -> ArgumentParser:
"""Add arguments for Custom encoder."""
group = parser.add_argument_group("Custom encoder arguments")
group = add_custom_encoder_arguments(group)
return parser
@staticmethod
def decoder_add_general_arguments(parser: ArgumentParser) -> ArgumentParser:
"""Add general arguments for decoder."""
group = parser.add_argument_group("Decoder general arguments")
group = add_decoder_general_arguments(group)
return parser
@staticmethod
def decoder_add_rnn_arguments(parser: ArgumentParser) -> ArgumentParser:
"""Add arguments for RNN decoder."""
group = parser.add_argument_group("RNN decoder arguments")
group = add_rnn_decoder_arguments(group)
return parser
@staticmethod
def decoder_add_custom_arguments(parser: ArgumentParser) -> ArgumentParser:
"""Add arguments for Custom decoder."""
group = parser.add_argument_group("Custom decoder arguments")
group = add_custom_decoder_arguments(group)
return parser
@staticmethod
def training_add_custom_arguments(parser: ArgumentParser) -> ArgumentParser:
"""Add arguments for Custom architecture training."""
group = parser.add_argument_group("Training arguments for custom archictecture")
group = add_custom_training_arguments(group)
return parser
@staticmethod
def transducer_add_arguments(parser: ArgumentParser) -> ArgumentParser:
"""Add arguments for Transducer model."""
group = parser.add_argument_group("Transducer model arguments")
group = add_transducer_arguments(group)
return parser
@staticmethod
def auxiliary_task_add_arguments(parser: ArgumentParser) -> ArgumentParser:
"""Add arguments for auxiliary task."""
group = parser.add_argument_group("Auxiliary task arguments")
group = add_auxiliary_task_arguments(group)
return parser
@property
def attention_plot_class(self):
"""Get attention plot class."""
return PlotAttentionReport
def get_total_subsampling_factor(self) -> float:
"""Get total subsampling factor."""
if self.etype == "custom":
return self.encoder.conv_subsampling_factor * int(
numpy.prod(self.subsample)
)
else:
return self.enc.conv_subsampling_factor * int(numpy.prod(self.subsample))
def __init__(
self,
idim: int,
odim: int,
args: Namespace,
ignore_id: int = -1,
blank_id: int = 0,
training: bool = True,
):
"""Construct an E2E object for Transducer model."""
torch.nn.Module.__init__(self)
args = fill_missing_args(args, self.add_arguments)
self.is_transducer = True
self.use_auxiliary_enc_outputs = (
True if (training and args.use_aux_transducer_loss) else False
)
self.subsample = get_subsample(
args, mode="asr", arch="transformer" if args.etype == "custom" else "rnn-t"
)
if self.use_auxiliary_enc_outputs:
n_layers = (
((len(args.enc_block_arch) * args.enc_block_repeat) - 1)
if args.enc_block_arch is not None
else (args.elayers - 1)
)
aux_enc_output_layers = valid_aux_encoder_output_layers(
args.aux_transducer_loss_enc_output_layers,
n_layers,
args.use_symm_kl_div_loss,
self.subsample,
)
else:
aux_enc_output_layers = []
if args.etype == "custom":
if args.enc_block_arch is None:
raise ValueError(
"When specifying custom encoder type, --enc-block-arch"
"should be set in training config."
)
self.encoder = CustomEncoder(
idim,
args.enc_block_arch,
args.custom_enc_input_layer,
repeat_block=args.enc_block_repeat,
self_attn_type=args.custom_enc_self_attn_type,
positional_encoding_type=args.custom_enc_positional_encoding_type,
positionwise_activation_type=args.custom_enc_pw_activation_type,
conv_mod_activation_type=args.custom_enc_conv_mod_activation_type,
aux_enc_output_layers=aux_enc_output_layers,
input_layer_dropout_rate=args.custom_enc_input_dropout_rate,
input_layer_pos_enc_dropout_rate=(
args.custom_enc_input_pos_enc_dropout_rate
),
)
encoder_out = self.encoder.enc_out
else:
self.enc = encoder_for(
args,
idim,
self.subsample,
aux_enc_output_layers=aux_enc_output_layers,
)
encoder_out = args.eprojs
if args.dtype == "custom":
if args.dec_block_arch is None:
raise ValueError(
"When specifying custom decoder type, --dec-block-arch"
"should be set in training config."
)
self.decoder = CustomDecoder(
odim,
args.dec_block_arch,
args.custom_dec_input_layer,
repeat_block=args.dec_block_repeat,
positionwise_activation_type=args.custom_dec_pw_activation_type,
input_layer_dropout_rate=args.dropout_rate_embed_decoder,
blank_id=blank_id,
)
decoder_out = self.decoder.dunits
else:
self.dec = RNNDecoder(
odim,
args.dtype,
args.dlayers,
args.dunits,
args.dec_embed_dim,
dropout_rate=args.dropout_rate_decoder,
dropout_rate_embed=args.dropout_rate_embed_decoder,
blank_id=blank_id,
)
decoder_out = args.dunits
self.transducer_tasks = TransducerTasks(
encoder_out,
decoder_out,
args.joint_dim,
odim,
joint_activation_type=args.joint_activation_type,
transducer_loss_weight=args.transducer_weight,
ctc_loss=args.use_ctc_loss,
ctc_loss_weight=args.ctc_loss_weight,
ctc_loss_dropout_rate=args.ctc_loss_dropout_rate,
lm_loss=args.use_lm_loss,
lm_loss_weight=args.lm_loss_weight,
lm_loss_smoothing_rate=args.lm_loss_smoothing_rate,
aux_transducer_loss=args.use_aux_transducer_loss,
aux_transducer_loss_weight=args.aux_transducer_loss_weight,
aux_transducer_loss_mlp_dim=args.aux_transducer_loss_mlp_dim,
aux_trans_loss_mlp_dropout_rate=args.aux_transducer_loss_mlp_dropout_rate,
symm_kl_div_loss=args.use_symm_kl_div_loss,
symm_kl_div_loss_weight=args.symm_kl_div_loss_weight,
fastemit_lambda=args.fastemit_lambda,
blank_id=blank_id,
ignore_id=ignore_id,
training=training,
)
if training and (args.report_cer or args.report_wer):
self.error_calculator = ErrorCalculator(
self.decoder if args.dtype == "custom" else self.dec,
self.transducer_tasks.joint_network,
args.char_list,
args.sym_space,
args.sym_blank,
args.report_cer,
args.report_wer,
)
else:
self.error_calculator = None
self.etype = args.etype
self.dtype = args.dtype
self.sos = odim - 1
self.eos = odim - 1
self.blank_id = blank_id
self.ignore_id = ignore_id
self.space = args.sym_space
self.blank = args.sym_blank
self.odim = odim
self.reporter = Reporter()
self.default_parameters(args)
self.loss = None
self.rnnlm = None
def default_parameters(self, args: Namespace):
"""Initialize/reset parameters for Transducer.
Args:
args: Namespace containing model options.
"""
initializer(self, args)
def forward(
self, feats: torch.Tensor, feats_len: torch.Tensor, labels: torch.Tensor
) -> torch.Tensor:
"""E2E forward.
Args:
feats: Feature sequences. (B, F, D_feats)
feats_len: Feature sequences lengths. (B,)
labels: Label ID sequences. (B, L)
Returns:
loss: Transducer loss value
"""
# 1. encoder
feats = feats[:, : max(feats_len)]
if self.etype == "custom":
feats_mask = (
make_non_pad_mask(feats_len.tolist()).to(feats.device).unsqueeze(-2)
)
_enc_out, _enc_out_len = self.encoder(feats, feats_mask)
else:
_enc_out, _enc_out_len, _ = self.enc(feats, feats_len)
if self.use_auxiliary_enc_outputs:
enc_out, aux_enc_out = _enc_out[0], _enc_out[1]
enc_out_len, aux_enc_out_len = _enc_out_len[0], _enc_out_len[1]
else:
enc_out, aux_enc_out = _enc_out, None
enc_out_len, aux_enc_out_len = _enc_out_len, None
# 2. decoder
dec_in = get_decoder_input(labels, self.blank_id, self.ignore_id)
if self.dtype == "custom":
self.decoder.set_device(enc_out.device)
dec_in_mask = target_mask(dec_in, self.blank_id)
dec_out, _ = self.decoder(dec_in, dec_in_mask)
else:
self.dec.set_device(enc_out.device)
dec_out = self.dec(dec_in)
# 3. Transducer task and auxiliary tasks computation
losses = self.transducer_tasks(
enc_out,
aux_enc_out,
dec_out,
labels,
enc_out_len,
aux_enc_out_len,
)
if self.training or self.error_calculator is None:
cer, wer = None, None
else:
cer, wer = self.error_calculator(
enc_out, self.transducer_tasks.get_target()
)
self.loss = sum(losses)
loss_data = float(self.loss)
if not math.isnan(loss_data):
self.reporter.report(
loss_data,
*[float(loss) for loss in losses],
cer,
wer,
)
else:
logging.warning("loss (=%f) is not correct", loss_data)
return self.loss
def encode_custom(self, feats: numpy.ndarray) -> torch.Tensor:
"""Encode acoustic features.
Args:
feats: Feature sequence. (F, D_feats)
Returns:
enc_out: Encoded feature sequence. (T, D_enc)
"""
feats = torch.as_tensor(feats).unsqueeze(0)
enc_out, _ = self.encoder(feats, None)
return enc_out.squeeze(0)
def encode_rnn(self, feats: numpy.ndarray) -> torch.Tensor:
"""Encode acoustic features.
Args:
feats: Feature sequence. (F, D_feats)
Returns:
enc_out: Encoded feature sequence. (T, D_enc)
"""
p = next(self.parameters())
feats_len = [feats.shape[0]]
feats = feats[:: self.subsample[0], :]
feats = torch.as_tensor(feats, device=p.device, dtype=p.dtype)
feats = feats.contiguous().unsqueeze(0)
enc_out, _, _ = self.enc(feats, feats_len)
return enc_out.squeeze(0)
def recognize(
self, feats: numpy.ndarray, beam_search: BeamSearchTransducer
) -> List:
"""Recognize input features.
Args:
feats: Feature sequence. (F, D_feats)
beam_search: Beam search class.
Returns:
nbest_hyps: N-best decoding results.
"""
self.eval()
if self.etype == "custom":
enc_out = self.encode_custom(feats)
else:
enc_out = self.encode_rnn(feats)
nbest_hyps = beam_search(enc_out)
return [asdict(n) for n in nbest_hyps]
def calculate_all_attentions(
self, feats: torch.Tensor, feats_len: torch.Tensor, labels: torch.Tensor
) -> numpy.ndarray:
"""E2E attention calculation.
Args:
feats: Feature sequences. (B, F, D_feats)
feats_len: Feature sequences lengths. (B,)
labels: Label ID sequences. (B, L)
Returns:
ret: Attention weights with the following shape,
1) multi-head case => attention weights. (B, D_att, U, T),
2) other case => attention weights. (B, U, T)
"""
self.eval()
if self.etype != "custom" and self.dtype != "custom":
return []
else:
with torch.no_grad():
self.forward(feats, feats_len, labels)
ret = dict()
for name, m in self.named_modules():
if isinstance(m, MultiHeadedAttention) or isinstance(
m, RelPositionMultiHeadedAttention
):
ret[name] = m.attn.cpu().numpy()
self.train()
return ret
| 18,123 | 32.316176 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/e2e_asr_mulenc.py | # Copyright 2017 Johns Hopkins University (Shinji Watanabe)
# Copyright 2017 Johns Hopkins University (Ruizhi Li)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Define e2e module for multi-encoder network. https://arxiv.org/pdf/1811.04903.pdf."""
import argparse
import logging
import math
import os
from itertools import groupby
import chainer
import numpy as np
import torch
from chainer import reporter
from espnet.nets.asr_interface import ASRInterface
from espnet.nets.e2e_asr_common import label_smoothing_dist
from espnet.nets.pytorch_backend.ctc import ctc_for
from espnet.nets.pytorch_backend.nets_utils import (
get_subsample,
pad_list,
to_device,
to_torch_tensor,
)
from espnet.nets.pytorch_backend.rnn.attentions import att_for
from espnet.nets.pytorch_backend.rnn.decoders import decoder_for
from espnet.nets.pytorch_backend.rnn.encoders import Encoder, encoder_for
from espnet.nets.scorers.ctc import CTCPrefixScorer
from espnet.utils.cli_utils import strtobool
CTC_LOSS_THRESHOLD = 10000
class Reporter(chainer.Chain):
"""Define a chainer reporter wrapper."""
def report(self, loss_ctc_list, loss_att, acc, cer_ctc_list, cer, wer, mtl_loss):
"""Define a chainer reporter function."""
# loss_ctc_list = [weighted CTC, CTC1, CTC2, ... CTCN]
# cer_ctc_list = [weighted cer_ctc, cer_ctc_1, cer_ctc_2, ... cer_ctc_N]
num_encs = len(loss_ctc_list) - 1
reporter.report({"loss_ctc": loss_ctc_list[0]}, self)
for i in range(num_encs):
reporter.report({"loss_ctc{}".format(i + 1): loss_ctc_list[i + 1]}, self)
reporter.report({"loss_att": loss_att}, self)
reporter.report({"acc": acc}, self)
reporter.report({"cer_ctc": cer_ctc_list[0]}, self)
for i in range(num_encs):
reporter.report({"cer_ctc{}".format(i + 1): cer_ctc_list[i + 1]}, self)
reporter.report({"cer": cer}, self)
reporter.report({"wer": wer}, self)
logging.info("mtl loss:" + str(mtl_loss))
reporter.report({"loss": mtl_loss}, self)
class E2E(ASRInterface, torch.nn.Module):
"""E2E module.
:param List idims: List of dimensions of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
@staticmethod
def add_arguments(parser):
"""Add arguments for multi-encoder setting."""
E2E.encoder_add_arguments(parser)
E2E.attention_add_arguments(parser)
E2E.decoder_add_arguments(parser)
E2E.ctc_add_arguments(parser)
return parser
@staticmethod
def encoder_add_arguments(parser):
"""Add arguments for encoders in multi-encoder setting."""
group = parser.add_argument_group("E2E encoder setting")
group.add_argument(
"--etype",
action="append",
type=str,
choices=[
"lstm",
"blstm",
"lstmp",
"blstmp",
"vgglstmp",
"vggblstmp",
"vgglstm",
"vggblstm",
"gru",
"bgru",
"grup",
"bgrup",
"vgggrup",
"vggbgrup",
"vgggru",
"vggbgru",
],
help="Type of encoder network architecture",
)
group.add_argument(
"--elayers",
type=int,
action="append",
help="Number of encoder layers "
"(for shared recognition part in multi-speaker asr mode)",
)
group.add_argument(
"--eunits",
"-u",
type=int,
action="append",
help="Number of encoder hidden units",
)
group.add_argument(
"--eprojs", default=320, type=int, help="Number of encoder projection units"
)
group.add_argument(
"--subsample",
type=str,
action="append",
help="Subsample input frames x_y_z means "
"subsample every x frame at 1st layer, "
"every y frame at 2nd layer etc.",
)
return parser
@staticmethod
def attention_add_arguments(parser):
"""Add arguments for attentions in multi-encoder setting."""
group = parser.add_argument_group("E2E attention setting")
# attention
group.add_argument(
"--atype",
type=str,
action="append",
choices=[
"noatt",
"dot",
"add",
"location",
"coverage",
"coverage_location",
"location2d",
"location_recurrent",
"multi_head_dot",
"multi_head_add",
"multi_head_loc",
"multi_head_multi_res_loc",
],
help="Type of attention architecture",
)
group.add_argument(
"--adim",
type=int,
action="append",
help="Number of attention transformation dimensions",
)
group.add_argument(
"--awin",
type=int,
action="append",
help="Window size for location2d attention",
)
group.add_argument(
"--aheads",
type=int,
action="append",
help="Number of heads for multi head attention",
)
group.add_argument(
"--aconv-chans",
type=int,
action="append",
help="Number of attention convolution channels \
(negative value indicates no location-aware attention)",
)
group.add_argument(
"--aconv-filts",
type=int,
action="append",
help="Number of attention convolution filters \
(negative value indicates no location-aware attention)",
)
group.add_argument(
"--dropout-rate",
type=float,
action="append",
help="Dropout rate for the encoder",
)
# hierarchical attention network (HAN)
group.add_argument(
"--han-type",
default="dot",
type=str,
choices=[
"noatt",
"dot",
"add",
"location",
"coverage",
"coverage_location",
"location2d",
"location_recurrent",
"multi_head_dot",
"multi_head_add",
"multi_head_loc",
"multi_head_multi_res_loc",
],
help="Type of attention architecture (multi-encoder asr mode only)",
)
group.add_argument(
"--han-dim",
default=320,
type=int,
help="Number of attention transformation dimensions in HAN",
)
group.add_argument(
"--han-win",
default=5,
type=int,
help="Window size for location2d attention in HAN",
)
group.add_argument(
"--han-heads",
default=4,
type=int,
help="Number of heads for multi head attention in HAN",
)
group.add_argument(
"--han-conv-chans",
default=-1,
type=int,
help="Number of attention convolution channels in HAN \
(negative value indicates no location-aware attention)",
)
group.add_argument(
"--han-conv-filts",
default=100,
type=int,
help="Number of attention convolution filters in HAN \
(negative value indicates no location-aware attention)",
)
return parser
@staticmethod
def decoder_add_arguments(parser):
"""Add arguments for decoder in multi-encoder setting."""
group = parser.add_argument_group("E2E decoder setting")
group.add_argument(
"--dtype",
default="lstm",
type=str,
choices=["lstm", "gru"],
help="Type of decoder network architecture",
)
group.add_argument(
"--dlayers", default=1, type=int, help="Number of decoder layers"
)
group.add_argument(
"--dunits", default=320, type=int, help="Number of decoder hidden units"
)
group.add_argument(
"--dropout-rate-decoder",
default=0.0,
type=float,
help="Dropout rate for the decoder",
)
group.add_argument(
"--sampling-probability",
default=0.0,
type=float,
help="Ratio of predicted labels fed back to decoder",
)
group.add_argument(
"--lsm-type",
const="",
default="",
type=str,
nargs="?",
choices=["", "unigram"],
help="Apply label smoothing with a specified distribution type",
)
return parser
@staticmethod
def ctc_add_arguments(parser):
"""Add arguments for ctc in multi-encoder setting."""
group = parser.add_argument_group("E2E multi-ctc setting")
group.add_argument(
"--share-ctc",
type=strtobool,
default=False,
help="The flag to switch to share ctc across multiple encoders "
"(multi-encoder asr mode only).",
)
group.add_argument(
"--weights-ctc-train",
type=float,
action="append",
help="ctc weight assigned to each encoder during training.",
)
group.add_argument(
"--weights-ctc-dec",
type=float,
action="append",
help="ctc weight assigned to each encoder during decoding.",
)
return parser
def get_total_subsampling_factor(self):
"""Get total subsampling factor."""
if isinstance(self.enc, Encoder):
return self.enc.conv_subsampling_factor * int(
np.prod(self.subsample_list[0])
)
else:
return self.enc[0].conv_subsampling_factor * int(
np.prod(self.subsample_list[0])
)
def __init__(self, idims, odim, args):
"""Initialize this class with python-level args.
Args:
idims (list): list of the number of an input feature dim.
odim (int): The number of output vocab.
args (Namespace): arguments
"""
super(E2E, self).__init__()
torch.nn.Module.__init__(self)
self.mtlalpha = args.mtlalpha
assert 0.0 <= self.mtlalpha <= 1.0, "mtlalpha should be [0.0, 1.0]"
self.verbose = args.verbose
# NOTE: for self.build method
args.char_list = getattr(args, "char_list", None)
self.char_list = args.char_list
self.outdir = args.outdir
self.space = args.sym_space
self.blank = args.sym_blank
self.reporter = Reporter()
self.num_encs = args.num_encs
self.share_ctc = args.share_ctc
# below means the last number becomes eos/sos ID
# note that sos/eos IDs are identical
self.sos = odim - 1
self.eos = odim - 1
# subsample info
self.subsample_list = get_subsample(args, mode="asr", arch="rnn_mulenc")
# label smoothing info
if args.lsm_type and os.path.isfile(args.train_json):
logging.info("Use label smoothing with " + args.lsm_type)
labeldist = label_smoothing_dist(
odim, args.lsm_type, transcript=args.train_json
)
else:
labeldist = None
# speech translation related
self.replace_sos = getattr(
args, "replace_sos", False
) # use getattr to keep compatibility
self.frontend = None
# encoder
self.enc = encoder_for(args, idims, self.subsample_list)
# ctc
self.ctc = ctc_for(args, odim)
# attention
self.att = att_for(args)
# hierarchical attention network
han = att_for(args, han_mode=True)
self.att.append(han)
# decoder
self.dec = decoder_for(args, odim, self.sos, self.eos, self.att, labeldist)
if args.mtlalpha > 0 and self.num_encs > 1:
# weights-ctc,
# e.g. ctc_loss = w_1*ctc_1_loss + w_2 * ctc_2_loss + w_N * ctc_N_loss
self.weights_ctc_train = args.weights_ctc_train / np.sum(
args.weights_ctc_train
) # normalize
self.weights_ctc_dec = args.weights_ctc_dec / np.sum(
args.weights_ctc_dec
) # normalize
logging.info(
"ctc weights (training during training): "
+ " ".join([str(x) for x in self.weights_ctc_train])
)
logging.info(
"ctc weights (decoding during training): "
+ " ".join([str(x) for x in self.weights_ctc_dec])
)
else:
self.weights_ctc_dec = [1.0]
self.weights_ctc_train = [1.0]
# weight initialization
self.init_like_chainer()
# options for beam search
if args.report_cer or args.report_wer:
recog_args = {
"beam_size": args.beam_size,
"penalty": args.penalty,
"ctc_weight": args.ctc_weight,
"maxlenratio": args.maxlenratio,
"minlenratio": args.minlenratio,
"lm_weight": args.lm_weight,
"rnnlm": args.rnnlm,
"nbest": args.nbest,
"space": args.sym_space,
"blank": args.sym_blank,
"tgt_lang": False,
"ctc_weights_dec": self.weights_ctc_dec,
}
self.recog_args = argparse.Namespace(**recog_args)
self.report_cer = args.report_cer
self.report_wer = args.report_wer
else:
self.report_cer = False
self.report_wer = False
self.rnnlm = None
self.logzero = -10000000000.0
self.loss = None
self.acc = None
def init_like_chainer(self):
"""Initialize weight like chainer.
chainer basically uses LeCun way: W ~ Normal(0, fan_in ** -0.5), b = 0
pytorch basically uses W, b ~ Uniform(-fan_in**-0.5, fan_in**-0.5)
however, there are two exceptions as far as I know.
- EmbedID.W ~ Normal(0, 1)
- LSTM.upward.b[forget_gate_range] = 1 (but not used in NStepLSTM)
"""
def lecun_normal_init_parameters(module):
for p in module.parameters():
data = p.data
if data.dim() == 1:
# bias
data.zero_()
elif data.dim() == 2:
# linear weight
n = data.size(1)
stdv = 1.0 / math.sqrt(n)
data.normal_(0, stdv)
elif data.dim() in (3, 4):
# conv weight
n = data.size(1)
for k in data.size()[2:]:
n *= k
stdv = 1.0 / math.sqrt(n)
data.normal_(0, stdv)
else:
raise NotImplementedError
def set_forget_bias_to_one(bias):
n = bias.size(0)
start, end = n // 4, n // 2
bias.data[start:end].fill_(1.0)
lecun_normal_init_parameters(self)
# exceptions
# embed weight ~ Normal(0, 1)
self.dec.embed.weight.data.normal_(0, 1)
# forget-bias = 1.0
# https://discuss.pytorch.org/t/set-forget-gate-bias-of-lstm/1745
for i in range(len(self.dec.decoder)):
set_forget_bias_to_one(self.dec.decoder[i].bias_ih)
def forward(self, xs_pad_list, ilens_list, ys_pad):
"""E2E forward.
:param List xs_pad_list: list of batch (torch.Tensor) of padded input sequences
[(B, Tmax_1, idim), (B, Tmax_2, idim),..]
:param List ilens_list:
list of batch (torch.Tensor) of lengths of input sequences [(B), (B), ..]
:param torch.Tensor ys_pad:
batch of padded character id sequence tensor (B, Lmax)
:return: loss value
:rtype: torch.Tensor
"""
import editdistance
if self.replace_sos:
tgt_lang_ids = ys_pad[:, 0:1]
ys_pad = ys_pad[:, 1:] # remove target language ID in the beginning
else:
tgt_lang_ids = None
hs_pad_list, hlens_list, self.loss_ctc_list = [], [], []
for idx in range(self.num_encs):
# 1. Encoder
hs_pad, hlens, _ = self.enc[idx](xs_pad_list[idx], ilens_list[idx])
# 2. CTC loss
if self.mtlalpha == 0:
self.loss_ctc_list.append(None)
else:
ctc_idx = 0 if self.share_ctc else idx
loss_ctc = self.ctc[ctc_idx](hs_pad, hlens, ys_pad)
self.loss_ctc_list.append(loss_ctc)
hs_pad_list.append(hs_pad)
hlens_list.append(hlens)
# 3. attention loss
if self.mtlalpha == 1:
self.loss_att, acc = None, None
else:
self.loss_att, acc, _ = self.dec(
hs_pad_list, hlens_list, ys_pad, lang_ids=tgt_lang_ids
)
self.acc = acc
# 4. compute cer without beam search
if self.mtlalpha == 0 or self.char_list is None:
cer_ctc_list = [None] * (self.num_encs + 1)
else:
cer_ctc_list = []
for ind in range(self.num_encs):
cers = []
ctc_idx = 0 if self.share_ctc else ind
y_hats = self.ctc[ctc_idx].argmax(hs_pad_list[ind]).data
for i, y in enumerate(y_hats):
y_hat = [x[0] for x in groupby(y)]
y_true = ys_pad[i]
seq_hat = [
self.char_list[int(idx)] for idx in y_hat if int(idx) != -1
]
seq_true = [
self.char_list[int(idx)] for idx in y_true if int(idx) != -1
]
seq_hat_text = "".join(seq_hat).replace(self.space, " ")
seq_hat_text = seq_hat_text.replace(self.blank, "")
seq_true_text = "".join(seq_true).replace(self.space, " ")
hyp_chars = seq_hat_text.replace(" ", "")
ref_chars = seq_true_text.replace(" ", "")
if len(ref_chars) > 0:
cers.append(
editdistance.eval(hyp_chars, ref_chars) / len(ref_chars)
)
cer_ctc = sum(cers) / len(cers) if cers else None
cer_ctc_list.append(cer_ctc)
cer_ctc_weighted = np.sum(
[
item * self.weights_ctc_train[i]
for i, item in enumerate(cer_ctc_list)
]
)
cer_ctc_list = [float(cer_ctc_weighted)] + [
float(item) for item in cer_ctc_list
]
# 5. compute cer/wer
if self.training or not (self.report_cer or self.report_wer):
cer, wer = 0.0, 0.0
# oracle_cer, oracle_wer = 0.0, 0.0
else:
if self.recog_args.ctc_weight > 0.0:
lpz_list = []
for idx in range(self.num_encs):
ctc_idx = 0 if self.share_ctc else idx
lpz = self.ctc[ctc_idx].log_softmax(hs_pad_list[idx]).data
lpz_list.append(lpz)
else:
lpz_list = None
word_eds, word_ref_lens, char_eds, char_ref_lens = [], [], [], []
nbest_hyps = self.dec.recognize_beam_batch(
hs_pad_list,
hlens_list,
lpz_list,
self.recog_args,
self.char_list,
self.rnnlm,
lang_ids=tgt_lang_ids.squeeze(1).tolist() if self.replace_sos else None,
)
# remove <sos> and <eos>
y_hats = [nbest_hyp[0]["yseq"][1:-1] for nbest_hyp in nbest_hyps]
for i, y_hat in enumerate(y_hats):
y_true = ys_pad[i]
seq_hat = [self.char_list[int(idx)] for idx in y_hat if int(idx) != -1]
seq_true = [
self.char_list[int(idx)] for idx in y_true if int(idx) != -1
]
seq_hat_text = "".join(seq_hat).replace(self.recog_args.space, " ")
seq_hat_text = seq_hat_text.replace(self.recog_args.blank, "")
seq_true_text = "".join(seq_true).replace(self.recog_args.space, " ")
hyp_words = seq_hat_text.split()
ref_words = seq_true_text.split()
word_eds.append(editdistance.eval(hyp_words, ref_words))
word_ref_lens.append(len(ref_words))
hyp_chars = seq_hat_text.replace(" ", "")
ref_chars = seq_true_text.replace(" ", "")
char_eds.append(editdistance.eval(hyp_chars, ref_chars))
char_ref_lens.append(len(ref_chars))
wer = (
0.0
if not self.report_wer
else float(sum(word_eds)) / sum(word_ref_lens)
)
cer = (
0.0
if not self.report_cer
else float(sum(char_eds)) / sum(char_ref_lens)
)
alpha = self.mtlalpha
if alpha == 0:
self.loss = self.loss_att
loss_att_data = float(self.loss_att)
loss_ctc_data_list = [None] * (self.num_encs + 1)
elif alpha == 1:
self.loss = torch.sum(
torch.cat(
[
(item * self.weights_ctc_train[i]).unsqueeze(0)
for i, item in enumerate(self.loss_ctc_list)
]
)
)
loss_att_data = None
loss_ctc_data_list = [float(self.loss)] + [
float(item) for item in self.loss_ctc_list
]
else:
self.loss_ctc = torch.sum(
torch.cat(
[
(item * self.weights_ctc_train[i]).unsqueeze(0)
for i, item in enumerate(self.loss_ctc_list)
]
)
)
self.loss = alpha * self.loss_ctc + (1 - alpha) * self.loss_att
loss_att_data = float(self.loss_att)
loss_ctc_data_list = [float(self.loss_ctc)] + [
float(item) for item in self.loss_ctc_list
]
loss_data = float(self.loss)
if loss_data < CTC_LOSS_THRESHOLD and not math.isnan(loss_data):
self.reporter.report(
loss_ctc_data_list,
loss_att_data,
acc,
cer_ctc_list,
cer,
wer,
loss_data,
)
else:
logging.warning("loss (=%f) is not correct", loss_data)
return self.loss
def scorers(self):
"""Get scorers for `beam_search` (optional).
Returns:
dict[str, ScorerInterface]: dict of `ScorerInterface` objects
"""
return dict(decoder=self.dec, ctc=CTCPrefixScorer(self.ctc, self.eos))
def encode(self, x_list):
"""Encode feature.
Args:
x_list (list): input feature [(T1, D), (T2, D), ... ]
Returns:
list
encoded feature [(T1, D), (T2, D), ... ]
"""
self.eval()
ilens_list = [[x_list[idx].shape[0]] for idx in range(self.num_encs)]
# subsample frame
x_list = [
x_list[idx][:: self.subsample_list[idx][0], :]
for idx in range(self.num_encs)
]
p = next(self.parameters())
x_list = [
torch.as_tensor(x_list[idx], device=p.device, dtype=p.dtype)
for idx in range(self.num_encs)
]
# make a utt list (1) to use the same interface for encoder
xs_list = [
x_list[idx].contiguous().unsqueeze(0) for idx in range(self.num_encs)
]
# 1. encoder
hs_list = []
for idx in range(self.num_encs):
hs, _, _ = self.enc[idx](xs_list[idx], ilens_list[idx])
hs_list.append(hs[0])
return hs_list
def recognize(self, x_list, recog_args, char_list, rnnlm=None):
"""E2E beam search.
:param list of ndarray x: list of input acoustic feature [(T1, D), (T2,D),...]
:param Namespace recog_args: argument Namespace containing options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
hs_list = self.encode(x_list)
# calculate log P(z_t|X) for CTC scores
if recog_args.ctc_weight > 0.0:
if self.share_ctc:
lpz_list = [
self.ctc[0].log_softmax(hs_list[idx].unsqueeze(0))[0]
for idx in range(self.num_encs)
]
else:
lpz_list = [
self.ctc[idx].log_softmax(hs_list[idx].unsqueeze(0))[0]
for idx in range(self.num_encs)
]
else:
lpz_list = None
# 2. Decoder
# decode the first utterance
y = self.dec.recognize_beam(hs_list, lpz_list, recog_args, char_list, rnnlm)
return y
def recognize_batch(self, xs_list, recog_args, char_list, rnnlm=None):
"""E2E beam search.
:param list xs_list: list of list of input acoustic feature arrays
[[(T1_1, D), (T1_2, D), ...],[(T2_1, D), (T2_2, D), ...], ...]
:param Namespace recog_args: argument Namespace containing options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
prev = self.training
self.eval()
ilens_list = [
np.fromiter((xx.shape[0] for xx in xs_list[idx]), dtype=np.int64)
for idx in range(self.num_encs)
]
# subsample frame
xs_list = [
[xx[:: self.subsample_list[idx][0], :] for xx in xs_list[idx]]
for idx in range(self.num_encs)
]
xs_list = [
[to_device(self, to_torch_tensor(xx).float()) for xx in xs_list[idx]]
for idx in range(self.num_encs)
]
xs_pad_list = [pad_list(xs_list[idx], 0.0) for idx in range(self.num_encs)]
# 1. Encoder
hs_pad_list, hlens_list = [], []
for idx in range(self.num_encs):
hs_pad, hlens, _ = self.enc[idx](xs_pad_list[idx], ilens_list[idx])
hs_pad_list.append(hs_pad)
hlens_list.append(hlens)
# calculate log P(z_t|X) for CTC scores
if recog_args.ctc_weight > 0.0:
if self.share_ctc:
lpz_list = [
self.ctc[0].log_softmax(hs_pad_list[idx])
for idx in range(self.num_encs)
]
else:
lpz_list = [
self.ctc[idx].log_softmax(hs_pad_list[idx])
for idx in range(self.num_encs)
]
normalize_score = False
else:
lpz_list = None
normalize_score = True
# 2. Decoder
hlens_list = [
torch.tensor(list(map(int, hlens_list[idx])))
for idx in range(self.num_encs)
] # make sure hlens is tensor
y = self.dec.recognize_beam_batch(
hs_pad_list,
hlens_list,
lpz_list,
recog_args,
char_list,
rnnlm,
normalize_score=normalize_score,
)
if prev:
self.train()
return y
def calculate_all_attentions(self, xs_pad_list, ilens_list, ys_pad):
"""E2E attention calculation.
:param List xs_pad_list: list of batch (torch.Tensor) of padded input sequences
[(B, Tmax_1, idim), (B, Tmax_2, idim),..]
:param List ilens_list:
list of batch (torch.Tensor) of lengths of input sequences [(B), (B), ..]
:param torch.Tensor ys_pad:
batch of padded character id sequence tensor (B, Lmax)
:return: attention weights with the following shape,
1) multi-head case => attention weights (B, H, Lmax, Tmax),
2) multi-encoder case
=> [(B, Lmax, Tmax1), (B, Lmax, Tmax2), ..., (B, Lmax, NumEncs)]
3) other case => attention weights (B, Lmax, Tmax).
:rtype: float ndarray or list
"""
self.eval()
with torch.no_grad():
# 1. Encoder
if self.replace_sos:
tgt_lang_ids = ys_pad[:, 0:1]
ys_pad = ys_pad[:, 1:] # remove target language ID in the beginning
else:
tgt_lang_ids = None
hs_pad_list, hlens_list = [], []
for idx in range(self.num_encs):
hs_pad, hlens, _ = self.enc[idx](xs_pad_list[idx], ilens_list[idx])
hs_pad_list.append(hs_pad)
hlens_list.append(hlens)
# 2. Decoder
att_ws = self.dec.calculate_all_attentions(
hs_pad_list, hlens_list, ys_pad, lang_ids=tgt_lang_ids
)
self.train()
return att_ws
def calculate_all_ctc_probs(self, xs_pad_list, ilens_list, ys_pad):
"""E2E CTC probability calculation.
:param List xs_pad_list: list of batch (torch.Tensor) of padded input sequences
[(B, Tmax_1, idim), (B, Tmax_2, idim),..]
:param List ilens_list:
list of batch (torch.Tensor) of lengths of input sequences [(B), (B), ..]
:param torch.Tensor ys_pad:
batch of padded character id sequence tensor (B, Lmax)
:return: CTC probability (B, Tmax, vocab)
:rtype: float ndarray or list
"""
probs_list = [None]
if self.mtlalpha == 0:
return probs_list
self.eval()
probs_list = []
with torch.no_grad():
# 1. Encoder
for idx in range(self.num_encs):
hs_pad, hlens, _ = self.enc[idx](xs_pad_list[idx], ilens_list[idx])
# 2. CTC loss
ctc_idx = 0 if self.share_ctc else idx
probs = self.ctc[ctc_idx].softmax(hs_pad).cpu().numpy()
probs_list.append(probs)
self.train()
return probs_list
| 31,487 | 34.379775 | 88 | py |
espnet | espnet-master/espnet/nets/pytorch_backend/e2e_asr_transformer.py | # Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Transformer speech recognition model (pytorch)."""
import logging
import math
from argparse import Namespace
import numpy
import torch
from espnet.nets.asr_interface import ASRInterface
from espnet.nets.ctc_prefix_score import CTCPrefixScore
from espnet.nets.e2e_asr_common import ErrorCalculator, end_detect
from espnet.nets.pytorch_backend.ctc import CTC
from espnet.nets.pytorch_backend.e2e_asr import CTC_LOSS_THRESHOLD, Reporter
from espnet.nets.pytorch_backend.nets_utils import (
get_subsample,
make_non_pad_mask,
th_accuracy,
)
from espnet.nets.pytorch_backend.rnn.decoders import CTC_SCORING_RATIO
from espnet.nets.pytorch_backend.transformer.add_sos_eos import add_sos_eos
from espnet.nets.pytorch_backend.transformer.argument import ( # noqa: H301
add_arguments_transformer_common,
)
from espnet.nets.pytorch_backend.transformer.attention import ( # noqa: H301
MultiHeadedAttention,
RelPositionMultiHeadedAttention,
)
from espnet.nets.pytorch_backend.transformer.decoder import Decoder
from espnet.nets.pytorch_backend.transformer.dynamic_conv import DynamicConvolution
from espnet.nets.pytorch_backend.transformer.dynamic_conv2d import DynamicConvolution2D
from espnet.nets.pytorch_backend.transformer.encoder import Encoder
from espnet.nets.pytorch_backend.transformer.initializer import initialize
from espnet.nets.pytorch_backend.transformer.label_smoothing_loss import ( # noqa: H301
LabelSmoothingLoss,
)
from espnet.nets.pytorch_backend.transformer.mask import subsequent_mask, target_mask
from espnet.nets.pytorch_backend.transformer.plot import PlotAttentionReport
from espnet.nets.scorers.ctc import CTCPrefixScorer
from espnet.utils.fill_missing_args import fill_missing_args
class E2E(ASRInterface, torch.nn.Module):
"""E2E module.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
@staticmethod
def add_arguments(parser):
"""Add arguments."""
group = parser.add_argument_group("transformer model setting")
group = add_arguments_transformer_common(group)
return parser
@property
def attention_plot_class(self):
"""Return PlotAttentionReport."""
return PlotAttentionReport
def get_total_subsampling_factor(self):
"""Get total subsampling factor."""
return self.encoder.conv_subsampling_factor * int(numpy.prod(self.subsample))
def __init__(self, idim, odim, args, ignore_id=-1):
"""Construct an E2E object.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
torch.nn.Module.__init__(self)
# fill missing arguments for compatibility
args = fill_missing_args(args, self.add_arguments)
if args.transformer_attn_dropout_rate is None:
args.transformer_attn_dropout_rate = args.dropout_rate
self.adim = args.adim # used for CTC (equal to d_model)
self.mtlalpha = args.mtlalpha
if args.mtlalpha > 0.0:
self.ctc = CTC(
odim, args.adim, args.dropout_rate, ctc_type=args.ctc_type, reduce=True
)
else:
self.ctc = None
self.intermediate_ctc_weight = args.intermediate_ctc_weight
self.intermediate_ctc_layers = None
if args.intermediate_ctc_layer != "":
self.intermediate_ctc_layers = [
int(i) for i in args.intermediate_ctc_layer.split(",")
]
self.encoder = Encoder(
idim=idim,
selfattention_layer_type=args.transformer_encoder_selfattn_layer_type,
attention_dim=args.adim,
attention_heads=args.aheads,
conv_wshare=args.wshare,
conv_kernel_length=args.ldconv_encoder_kernel_length,
conv_usebias=args.ldconv_usebias,
linear_units=args.eunits,
num_blocks=args.elayers,
input_layer=args.transformer_input_layer,
dropout_rate=args.dropout_rate,
positional_dropout_rate=args.dropout_rate,
attention_dropout_rate=args.transformer_attn_dropout_rate,
stochastic_depth_rate=args.stochastic_depth_rate,
intermediate_layers=self.intermediate_ctc_layers,
ctc_softmax=self.ctc.softmax if args.self_conditioning else None,
conditioning_layer_dim=odim,
)
if args.mtlalpha < 1:
self.decoder = Decoder(
odim=odim,
selfattention_layer_type=args.transformer_decoder_selfattn_layer_type,
attention_dim=args.adim,
attention_heads=args.aheads,
conv_wshare=args.wshare,
conv_kernel_length=args.ldconv_decoder_kernel_length,
conv_usebias=args.ldconv_usebias,
linear_units=args.dunits,
num_blocks=args.dlayers,
dropout_rate=args.dropout_rate,
positional_dropout_rate=args.dropout_rate,
self_attention_dropout_rate=args.transformer_attn_dropout_rate,
src_attention_dropout_rate=args.transformer_attn_dropout_rate,
)
self.criterion = LabelSmoothingLoss(
odim,
ignore_id,
args.lsm_weight,
args.transformer_length_normalized_loss,
)
else:
self.decoder = None
self.criterion = None
self.blank = 0
self.sos = odim - 1
self.eos = odim - 1
self.odim = odim
self.ignore_id = ignore_id
self.subsample = get_subsample(args, mode="asr", arch="transformer")
self.reporter = Reporter()
self.reset_parameters(args)
if args.report_cer or args.report_wer:
self.error_calculator = ErrorCalculator(
args.char_list,
args.sym_space,
args.sym_blank,
args.report_cer,
args.report_wer,
)
else:
self.error_calculator = None
self.rnnlm = None
def reset_parameters(self, args):
"""Initialize parameters."""
# initialize parameters
initialize(self, args.transformer_init)
def forward(self, xs_pad, ilens, ys_pad):
"""E2E forward.
:param torch.Tensor xs_pad: batch of padded source sequences (B, Tmax, idim)
:param torch.Tensor ilens: batch of lengths of source sequences (B)
:param torch.Tensor ys_pad: batch of padded target sequences (B, Lmax)
:return: ctc loss value
:rtype: torch.Tensor
:return: attention loss value
:rtype: torch.Tensor
:return: accuracy in attention decoder
:rtype: float
"""
# 1. forward encoder
xs_pad = xs_pad[:, : max(ilens)] # for data parallel
src_mask = make_non_pad_mask(ilens.tolist()).to(xs_pad.device).unsqueeze(-2)
if self.intermediate_ctc_layers:
hs_pad, hs_mask, hs_intermediates = self.encoder(xs_pad, src_mask)
else:
hs_pad, hs_mask = self.encoder(xs_pad, src_mask)
self.hs_pad = hs_pad
# 2. forward decoder
if self.decoder is not None:
ys_in_pad, ys_out_pad = add_sos_eos(
ys_pad, self.sos, self.eos, self.ignore_id
)
ys_mask = target_mask(ys_in_pad, self.ignore_id)
pred_pad, pred_mask = self.decoder(ys_in_pad, ys_mask, hs_pad, hs_mask)
self.pred_pad = pred_pad
# 3. compute attention loss
loss_att = self.criterion(pred_pad, ys_out_pad)
self.acc = th_accuracy(
pred_pad.view(-1, self.odim), ys_out_pad, ignore_label=self.ignore_id
)
else:
loss_att = None
self.acc = None
# TODO(karita) show predicted text
# TODO(karita) calculate these stats
cer_ctc = None
loss_intermediate_ctc = 0.0
if self.mtlalpha == 0.0:
loss_ctc = None
else:
batch_size = xs_pad.size(0)
hs_len = hs_mask.view(batch_size, -1).sum(1)
loss_ctc = self.ctc(hs_pad.view(batch_size, -1, self.adim), hs_len, ys_pad)
if not self.training and self.error_calculator is not None:
ys_hat = self.ctc.argmax(hs_pad.view(batch_size, -1, self.adim)).data
cer_ctc = self.error_calculator(ys_hat.cpu(), ys_pad.cpu(), is_ctc=True)
# for visualization
if not self.training:
self.ctc.softmax(hs_pad)
if self.intermediate_ctc_weight > 0 and self.intermediate_ctc_layers:
for hs_intermediate in hs_intermediates:
# assuming hs_intermediates and hs_pad has same length / padding
loss_inter = self.ctc(
hs_intermediate.view(batch_size, -1, self.adim), hs_len, ys_pad
)
loss_intermediate_ctc += loss_inter
loss_intermediate_ctc /= len(self.intermediate_ctc_layers)
# 5. compute cer/wer
if self.training or self.error_calculator is None or self.decoder is None:
cer, wer = None, None
else:
ys_hat = pred_pad.argmax(dim=-1)
cer, wer = self.error_calculator(ys_hat.cpu(), ys_pad.cpu())
# copied from e2e_asr
alpha = self.mtlalpha
if alpha == 0:
self.loss = loss_att
loss_att_data = float(loss_att)
loss_ctc_data = None
elif alpha == 1:
self.loss = loss_ctc
if self.intermediate_ctc_weight > 0:
self.loss = (
1 - self.intermediate_ctc_weight
) * loss_ctc + self.intermediate_ctc_weight * loss_intermediate_ctc
loss_att_data = None
loss_ctc_data = float(loss_ctc)
else:
self.loss = alpha * loss_ctc + (1 - alpha) * loss_att
if self.intermediate_ctc_weight > 0:
self.loss = (
(1 - alpha - self.intermediate_ctc_weight) * loss_att
+ alpha * loss_ctc
+ self.intermediate_ctc_weight * loss_intermediate_ctc
)
loss_att_data = float(loss_att)
loss_ctc_data = float(loss_ctc)
loss_data = float(self.loss)
if loss_data < CTC_LOSS_THRESHOLD and not math.isnan(loss_data):
self.reporter.report(
loss_ctc_data, loss_att_data, self.acc, cer_ctc, cer, wer, loss_data
)
else:
logging.warning("loss (=%f) is not correct", loss_data)
return self.loss
def scorers(self):
"""Scorers."""
return dict(decoder=self.decoder, ctc=CTCPrefixScorer(self.ctc, self.eos))
def encode(self, x):
"""Encode acoustic features.
:param ndarray x: source acoustic feature (T, D)
:return: encoder outputs
:rtype: torch.Tensor
"""
self.eval()
x = torch.as_tensor(x).unsqueeze(0)
enc_output, *_ = self.encoder(x, None)
return enc_output.squeeze(0)
def recognize(self, x, recog_args, char_list=None, rnnlm=None, use_jit=False):
"""Recognize input speech.
:param ndnarray x: input acoustic feature (B, T, D) or (T, D)
:param Namespace recog_args: argment Namespace contraining options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
enc_output = self.encode(x).unsqueeze(0)
if self.mtlalpha == 1.0:
recog_args.ctc_weight = 1.0
logging.info("Set to pure CTC decoding mode.")
if self.mtlalpha > 0 and recog_args.ctc_weight == 1.0:
from itertools import groupby
lpz = self.ctc.argmax(enc_output)
collapsed_indices = [x[0] for x in groupby(lpz[0])]
hyp = [x for x in filter(lambda x: x != self.blank, collapsed_indices)]
nbest_hyps = [{"score": 0.0, "yseq": [self.sos] + hyp}]
if recog_args.beam_size > 1:
raise NotImplementedError("Pure CTC beam search is not implemented.")
# TODO(hirofumi0810): Implement beam search
return nbest_hyps
elif self.mtlalpha > 0 and recog_args.ctc_weight > 0.0:
lpz = self.ctc.log_softmax(enc_output)
lpz = lpz.squeeze(0)
else:
lpz = None
h = enc_output.squeeze(0)
logging.info("input lengths: " + str(h.size(0)))
# search parms
beam = recog_args.beam_size
penalty = recog_args.penalty
ctc_weight = recog_args.ctc_weight
# preprare sos
y = self.sos
vy = h.new_zeros(1).long()
if recog_args.maxlenratio == 0:
maxlen = h.shape[0]
else:
# maxlen >= 1
maxlen = max(1, int(recog_args.maxlenratio * h.size(0)))
minlen = int(recog_args.minlenratio * h.size(0))
logging.info("max output length: " + str(maxlen))
logging.info("min output length: " + str(minlen))
# initialize hypothesis
if rnnlm:
hyp = {"score": 0.0, "yseq": [y], "rnnlm_prev": None}
else:
hyp = {"score": 0.0, "yseq": [y]}
if lpz is not None:
ctc_prefix_score = CTCPrefixScore(lpz.detach().numpy(), 0, self.eos, numpy)
hyp["ctc_state_prev"] = ctc_prefix_score.initial_state()
hyp["ctc_score_prev"] = 0.0
if ctc_weight != 1.0:
# pre-pruning based on attention scores
ctc_beam = min(lpz.shape[-1], int(beam * CTC_SCORING_RATIO))
else:
ctc_beam = lpz.shape[-1]
hyps = [hyp]
ended_hyps = []
traced_decoder = None
for i in range(maxlen):
logging.debug("position " + str(i))
hyps_best_kept = []
for hyp in hyps:
vy[0] = hyp["yseq"][i]
# get nbest local scores and their ids
ys_mask = subsequent_mask(i + 1).unsqueeze(0)
ys = torch.tensor(hyp["yseq"]).unsqueeze(0)
# FIXME: jit does not match non-jit result
if use_jit:
if traced_decoder is None:
traced_decoder = torch.jit.trace(
self.decoder.forward_one_step, (ys, ys_mask, enc_output)
)
local_att_scores = traced_decoder(ys, ys_mask, enc_output)[0]
else:
local_att_scores = self.decoder.forward_one_step(
ys, ys_mask, enc_output
)[0]
if rnnlm:
rnnlm_state, local_lm_scores = rnnlm.predict(hyp["rnnlm_prev"], vy)
local_scores = (
local_att_scores + recog_args.lm_weight * local_lm_scores
)
else:
local_scores = local_att_scores
if lpz is not None:
local_best_scores, local_best_ids = torch.topk(
local_att_scores, ctc_beam, dim=1
)
ctc_scores, ctc_states = ctc_prefix_score(
hyp["yseq"], local_best_ids[0], hyp["ctc_state_prev"]
)
local_scores = (1.0 - ctc_weight) * local_att_scores[
:, local_best_ids[0]
] + ctc_weight * torch.from_numpy(
ctc_scores - hyp["ctc_score_prev"]
)
if rnnlm:
local_scores += (
recog_args.lm_weight * local_lm_scores[:, local_best_ids[0]]
)
local_best_scores, joint_best_ids = torch.topk(
local_scores, beam, dim=1
)
local_best_ids = local_best_ids[:, joint_best_ids[0]]
else:
local_best_scores, local_best_ids = torch.topk(
local_scores, beam, dim=1
)
for j in range(beam):
new_hyp = {}
new_hyp["score"] = hyp["score"] + float(local_best_scores[0, j])
new_hyp["yseq"] = [0] * (1 + len(hyp["yseq"]))
new_hyp["yseq"][: len(hyp["yseq"])] = hyp["yseq"]
new_hyp["yseq"][len(hyp["yseq"])] = int(local_best_ids[0, j])
if rnnlm:
new_hyp["rnnlm_prev"] = rnnlm_state
if lpz is not None:
new_hyp["ctc_state_prev"] = ctc_states[joint_best_ids[0, j]]
new_hyp["ctc_score_prev"] = ctc_scores[joint_best_ids[0, j]]
# will be (2 x beam) hyps at most
hyps_best_kept.append(new_hyp)
hyps_best_kept = sorted(
hyps_best_kept, key=lambda x: x["score"], reverse=True
)[:beam]
# sort and get nbest
hyps = hyps_best_kept
logging.debug("number of pruned hypothes: " + str(len(hyps)))
if char_list is not None:
logging.debug(
"best hypo: "
+ "".join([char_list[int(x)] for x in hyps[0]["yseq"][1:]])
)
# add eos in the final loop to avoid that there are no ended hyps
if i == maxlen - 1:
logging.info("adding <eos> in the last position in the loop")
for hyp in hyps:
hyp["yseq"].append(self.eos)
# add ended hypothes to a final list, and removed them from current hypothes
# (this will be a probmlem, number of hyps < beam)
remained_hyps = []
for hyp in hyps:
if hyp["yseq"][-1] == self.eos:
# only store the sequence that has more than minlen outputs
# also add penalty
if len(hyp["yseq"]) > minlen:
hyp["score"] += (i + 1) * penalty
if rnnlm: # Word LM needs to add final <eos> score
hyp["score"] += recog_args.lm_weight * rnnlm.final(
hyp["rnnlm_prev"]
)
ended_hyps.append(hyp)
else:
remained_hyps.append(hyp)
# end detection
if end_detect(ended_hyps, i) and recog_args.maxlenratio == 0.0:
logging.info("end detected at %d", i)
break
hyps = remained_hyps
if len(hyps) > 0:
logging.debug("remeined hypothes: " + str(len(hyps)))
else:
logging.info("no hypothesis. Finish decoding.")
break
if char_list is not None:
for hyp in hyps:
logging.debug(
"hypo: " + "".join([char_list[int(x)] for x in hyp["yseq"][1:]])
)
logging.debug("number of ended hypothes: " + str(len(ended_hyps)))
nbest_hyps = sorted(ended_hyps, key=lambda x: x["score"], reverse=True)[
: min(len(ended_hyps), recog_args.nbest)
]
# check number of hypotheis
if len(nbest_hyps) == 0:
logging.warning(
"there is no N-best results, perform recognition "
"again with smaller minlenratio."
)
# should copy becasuse Namespace will be overwritten globally
recog_args = Namespace(**vars(recog_args))
recog_args.minlenratio = max(0.0, recog_args.minlenratio - 0.1)
return self.recognize(x, recog_args, char_list, rnnlm)
logging.info("total log probability: " + str(nbest_hyps[0]["score"]))
logging.info(
"normalized log probability: "
+ str(nbest_hyps[0]["score"] / len(nbest_hyps[0]["yseq"]))
)
return nbest_hyps
def calculate_all_attentions(self, xs_pad, ilens, ys_pad):
"""E2E attention calculation.
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, idim)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor ys_pad: batch of padded token id sequence tensor (B, Lmax)
:return: attention weights (B, H, Lmax, Tmax)
:rtype: float ndarray
"""
self.eval()
with torch.no_grad():
self.forward(xs_pad, ilens, ys_pad)
ret = dict()
for name, m in self.named_modules():
if (
isinstance(m, MultiHeadedAttention)
or isinstance(m, DynamicConvolution)
or isinstance(m, RelPositionMultiHeadedAttention)
):
ret[name] = m.attn.cpu().numpy()
if isinstance(m, DynamicConvolution2D):
ret[name + "_time"] = m.attn_t.cpu().numpy()
ret[name + "_freq"] = m.attn_f.cpu().numpy()
self.train()
return ret
def calculate_all_ctc_probs(self, xs_pad, ilens, ys_pad):
"""E2E CTC probability calculation.
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor ys_pad: batch of padded token id sequence tensor (B, Lmax)
:return: CTC probability (B, Tmax, vocab)
:rtype: float ndarray
"""
ret = None
if self.mtlalpha == 0:
return ret
self.eval()
with torch.no_grad():
self.forward(xs_pad, ilens, ys_pad)
for name, m in self.named_modules():
if isinstance(m, CTC) and m.probs is not None:
ret = m.probs.cpu().numpy()
self.train()
return ret
| 22,522 | 38.863717 | 88 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.