repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
espnet | espnet-master/test/test_e2e_st.py | # coding: utf-8
# Copyright 2019 Hirofumi Inaguma
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from __future__ import division
import argparse
import importlib
import os
import tempfile
from test.utils_test import make_dummy_json_st
import chainer
import numpy as np
import pytest
import torch
from espnet.nets.pytorch_backend.nets_utils import pad_list
from espnet.utils.training.batchfy import make_batchset
def make_arg(**kwargs):
defaults = dict(
elayers=1,
subsample="1_2_2_1_1",
etype="vggblstm",
eunits=16,
eprojs=8,
dtype="lstm",
dlayers=1,
dunits=16,
atype="add",
aheads=2,
awin=5,
aconv_chans=4,
aconv_filts=10,
mtlalpha=0.0,
lsm_type="",
lsm_weight=0.0,
sampling_probability=0.0,
adim=16,
dropout_rate=0.0,
dropout_rate_decoder=0.0,
nbest=5,
beam_size=2,
penalty=0.5,
maxlenratio=1.0,
minlenratio=0.0,
ctc_weight=0.0,
ctc_window_margin=0, # dummy
lm_weight=0.0,
rnnlm=None,
streaming_min_blank_dur=10,
streaming_onset_margin=2,
streaming_offset_margin=2,
verbose=2,
char_list=["あ", "い", "う", "え", "お"],
outdir=None,
ctc_type="builtin",
report_bleu=False,
report_cer=False,
report_wer=False,
sym_space="<space>",
sym_blank="<blank>",
sortagrad=0,
grad_noise=False,
context_residual=False,
multilingual=False,
replace_sos=False,
tgt_lang=False,
asr_weight=0.0,
mt_weight=0.0,
)
defaults.update(kwargs)
return argparse.Namespace(**defaults)
def prepare_inputs(
mode, ilens=[20, 15], olens_tgt=[4, 3], olens_src=[3, 2], is_cuda=False
):
np.random.seed(1)
assert len(ilens) == len(olens_tgt)
xs = [np.random.randn(ilen, 40).astype(np.float32) for ilen in ilens]
ys_tgt = [np.random.randint(1, 5, olen).astype(np.int32) for olen in olens_tgt]
ys_src = [np.random.randint(1, 5, olen).astype(np.int32) for olen in olens_src]
ilens = np.array([x.shape[0] for x in xs], dtype=np.int32)
if mode == "chainer":
raise NotImplementedError
elif mode == "pytorch":
ilens = torch.from_numpy(ilens).long()
xs_pad = pad_list([torch.from_numpy(x).float() for x in xs], 0)
ys_pad_tgt = pad_list([torch.from_numpy(y).long() for y in ys_tgt], -1)
ys_pad_src = pad_list([torch.from_numpy(y).long() for y in ys_src], -1)
if is_cuda:
xs_pad = xs_pad.cuda()
ilens = ilens.cuda()
ys_pad_tgt = ys_pad_tgt.cuda()
ys_pad_src = ys_pad_src.cuda()
return xs_pad, ilens, ys_pad_tgt, ys_pad_src
else:
raise ValueError("Invalid mode")
def convert_batch(batch, backend="pytorch", is_cuda=False, idim=40, odim=5):
ilens = np.array([x[1]["input"][0]["shape"][0] for x in batch])
olens_tgt = np.array([x[1]["output"][0]["shape"][0] for x in batch])
olens_src = np.array([x[1]["output"][1]["shape"][0] for x in batch])
xs = [np.random.randn(ilen, idim).astype(np.float32) for ilen in ilens]
ys_tgt = [np.random.randint(1, odim, olen).astype(np.int32) for olen in olens_tgt]
ys_src = [np.random.randint(1, odim, olen).astype(np.int32) for olen in olens_src]
is_pytorch = backend == "pytorch"
if is_pytorch:
xs = pad_list([torch.from_numpy(x).float() for x in xs], 0)
ilens = torch.from_numpy(ilens).long()
ys_tgt = pad_list([torch.from_numpy(y).long() for y in ys_tgt], -1)
ys_src = pad_list([torch.from_numpy(y).long() for y in ys_src], -1)
if is_cuda:
xs = xs.cuda()
ilens = ilens.cuda()
ys_tgt = ys_tgt.cuda()
ys_src = ys_src.cuda()
else:
raise NotImplementedError
return xs, ilens, ys_tgt, ys_src
@pytest.mark.parametrize(
"module, model_dict",
[
("espnet.nets.pytorch_backend.e2e_st", {}),
("espnet.nets.pytorch_backend.e2e_st", {"elayers": 2, "dlayers": 2}),
("espnet.nets.pytorch_backend.e2e_st", {"etype": "grup"}),
("espnet.nets.pytorch_backend.e2e_st", {"etype": "lstmp"}),
("espnet.nets.pytorch_backend.e2e_st", {"etype": "bgrup"}),
("espnet.nets.pytorch_backend.e2e_st", {"etype": "blstmp"}),
("espnet.nets.pytorch_backend.e2e_st", {"etype": "bgru"}),
("espnet.nets.pytorch_backend.e2e_st", {"etype": "blstm"}),
("espnet.nets.pytorch_backend.e2e_st", {"etype": "vgggru"}),
("espnet.nets.pytorch_backend.e2e_st", {"etype": "vgggrup"}),
("espnet.nets.pytorch_backend.e2e_st", {"etype": "vgglstm"}),
("espnet.nets.pytorch_backend.e2e_st", {"etype": "vgglstmp"}),
("espnet.nets.pytorch_backend.e2e_st", {"etype": "vggbgru"}),
("espnet.nets.pytorch_backend.e2e_st", {"etype": "vggbgrup"}),
("espnet.nets.pytorch_backend.e2e_st", {"etype": "vggblstmp", "dtype": "gru"}),
(
"espnet.nets.pytorch_backend.e2e_st",
{"etype": "vggblstmp", "atype": "noatt"},
),
("espnet.nets.pytorch_backend.e2e_st", {"etype": "vggblstmp", "atype": "add"}),
("espnet.nets.pytorch_backend.e2e_st", {"etype": "vggblstmp", "atype": "dot"}),
(
"espnet.nets.pytorch_backend.e2e_st",
{"etype": "vggblstmp", "atype": "coverage"},
),
(
"espnet.nets.pytorch_backend.e2e_st",
{"etype": "vggblstmp", "atype": "coverage_location"},
),
(
"espnet.nets.pytorch_backend.e2e_st",
{"etype": "vggblstmp", "atype": "location2d"},
),
(
"espnet.nets.pytorch_backend.e2e_st",
{"etype": "vggblstmp", "atype": "location_recurrent"},
),
(
"espnet.nets.pytorch_backend.e2e_st",
{"etype": "vggblstmp", "atype": "multi_head_dot"},
),
(
"espnet.nets.pytorch_backend.e2e_st",
{"etype": "vggblstmp", "atype": "multi_head_add"},
),
(
"espnet.nets.pytorch_backend.e2e_st",
{"etype": "vggblstmp", "atype": "multi_head_loc"},
),
(
"espnet.nets.pytorch_backend.e2e_st",
{"etype": "vggblstmp", "atype": "multi_head_multi_res_loc"},
),
("espnet.nets.pytorch_backend.e2e_st", {"asr_weight": 0.0}),
("espnet.nets.pytorch_backend.e2e_st", {"asr_weight": 0.2}),
("espnet.nets.pytorch_backend.e2e_st", {"mt_weight": 0.0}),
("espnet.nets.pytorch_backend.e2e_st", {"mt_weight": 0.2}),
(
"espnet.nets.pytorch_backend.e2e_st",
{"asr_weight": 0.2, "mtlalpha": 0.0, "mt_weight": 0.2},
),
(
"espnet.nets.pytorch_backend.e2e_st",
{"asr_weight": 0.2, "mtlalpha": 0.5, "mt_weight": 0.2},
),
(
"espnet.nets.pytorch_backend.e2e_st",
{"asr_weight": 0.2, "mtlalpha": 1.0, "mt_weight": 0.2},
),
("espnet.nets.pytorch_backend.e2e_st", {"sampling_probability": 0.5}),
("espnet.nets.pytorch_backend.e2e_st", {"context_residual": True}),
("espnet.nets.pytorch_backend.e2e_st", {"grad_noise": True}),
("espnet.nets.pytorch_backend.e2e_st", {"report_cer": True, "asr_weight": 0.0}),
(
"espnet.nets.pytorch_backend.e2e_st",
{"report_cer": True, "asr_weight": 0.5, "mtlalpha": 0.0},
),
(
"espnet.nets.pytorch_backend.e2e_st",
{"report_cer": True, "asr_weight": 0.5, "mtlalpha": 0.5},
),
(
"espnet.nets.pytorch_backend.e2e_st",
{"report_cer": True, "asr_weight": 0.5, "mtlalpha": 1.0},
),
("espnet.nets.pytorch_backend.e2e_st", {"report_wer": True, "asr_weight": 0.0}),
(
"espnet.nets.pytorch_backend.e2e_st",
{"report_wer": True, "asr_weight": 0.5, "mtlalpha": 0.0},
),
(
"espnet.nets.pytorch_backend.e2e_st",
{"report_wer": True, "asr_weight": 0.5, "mtlalpha": 0.5},
),
(
"espnet.nets.pytorch_backend.e2e_st",
{"report_wer": True, "asr_weight": 0.5, "mtlalpha": 1.0},
),
(
"espnet.nets.pytorch_backend.e2e_st",
{"report_cer": True, "report_wer": True},
),
(
"espnet.nets.pytorch_backend.e2e_st",
{"report_cer": True, "report_wer": True, "asr_weight": 0.0},
),
(
"espnet.nets.pytorch_backend.e2e_st",
{
"report_cer": True,
"report_wer": True,
"asr_weight": 0.5,
"mtlalpha": 0.0,
},
),
(
"espnet.nets.pytorch_backend.e2e_st",
{
"report_cer": True,
"report_wer": True,
"asr_weight": 0.5,
"mtlalpha": 0.5,
},
),
(
"espnet.nets.pytorch_backend.e2e_st",
{
"report_cer": True,
"report_wer": True,
"asr_weight": 0.5,
"mtlalpha": 1.0,
},
),
],
)
def test_model_trainable_and_decodable(module, model_dict):
args = make_arg(**model_dict)
if "pytorch" in module:
batch = prepare_inputs("pytorch")
else:
raise NotImplementedError
m = importlib.import_module(module)
model = m.E2E(40, 5, args)
loss = model(*batch)
if isinstance(loss, tuple):
# chainer return several values as tuple
loss[0].backward() # trainable
else:
loss.backward() # trainable
with torch.no_grad(), chainer.no_backprop_mode():
in_data = np.random.randn(10, 40)
model.translate(in_data, args, args.char_list) # decodable
if "pytorch" in module:
batch_in_data = [np.random.randn(10, 40), np.random.randn(5, 40)]
model.translate_batch(
batch_in_data, args, args.char_list
) # batch decodable
@pytest.mark.parametrize("module", ["pytorch"])
def test_gradient_noise_injection(module):
args = make_arg(grad_noise=True)
args_org = make_arg()
dummy_json = make_dummy_json_st(2, [10, 20], [10, 20], [10, 20], idim=20, odim=5)
if module == "pytorch":
import espnet.nets.pytorch_backend.e2e_st as m
else:
raise NotImplementedError
batchset = make_batchset(dummy_json, 2, 2**10, 2**10, shortest_first=True)
model = m.E2E(20, 5, args)
model_org = m.E2E(20, 5, args_org)
for batch in batchset:
loss = model(*convert_batch(batch, module, idim=20, odim=5))
loss_org = model_org(*convert_batch(batch, module, idim=20, odim=5))
loss.backward()
grad = [param.grad for param in model.parameters()][10]
loss_org.backward()
grad_org = [param.grad for param in model_org.parameters()][10]
assert grad[0] != grad_org[0]
@pytest.mark.parametrize("module", ["pytorch"])
def test_sortagrad_trainable(module):
args = make_arg(sortagrad=1)
dummy_json = make_dummy_json_st(4, [10, 20], [10, 20], [10, 20], idim=20, odim=5)
if module == "pytorch":
import espnet.nets.pytorch_backend.e2e_st as m
else:
raise NotImplementedError
batchset = make_batchset(dummy_json, 2, 2**10, 2**10, shortest_first=True)
model = m.E2E(20, 5, args)
for batch in batchset:
loss = model(*convert_batch(batch, module, idim=20, odim=5))
if isinstance(loss, tuple):
# chainer return several values as tuple
loss[0].backward() # trainable
else:
loss.backward() # trainable
with torch.no_grad(), chainer.no_backprop_mode():
in_data = np.random.randn(50, 20)
model.translate(in_data, args, args.char_list)
@pytest.mark.parametrize("module", ["pytorch"])
def test_sortagrad_trainable_with_batch_bins(module):
args = make_arg(sortagrad=1)
idim = 20
odim = 5
dummy_json = make_dummy_json_st(
4, [10, 20], [10, 20], [10, 20], idim=idim, odim=odim
)
if module == "pytorch":
import espnet.nets.pytorch_backend.e2e_st as m
else:
raise NotImplementedError
batch_elems = 2000
batchset = make_batchset(dummy_json, batch_bins=batch_elems, shortest_first=True)
for batch in batchset:
n = 0
for uttid, info in batch:
ilen = int(info["input"][0]["shape"][0])
olen = int(info["output"][0]["shape"][0])
n += ilen * idim + olen * odim
assert olen < batch_elems
model = m.E2E(20, 5, args)
for batch in batchset:
loss = model(*convert_batch(batch, module, idim=20, odim=5))
if isinstance(loss, tuple):
# chainer return several values as tuple
loss[0].backward() # trainable
else:
loss.backward() # trainable
with torch.no_grad(), chainer.no_backprop_mode():
in_data = np.random.randn(100, 20)
model.translate(in_data, args, args.char_list)
@pytest.mark.parametrize("module", ["pytorch"])
def test_sortagrad_trainable_with_batch_frames(module):
args = make_arg(sortagrad=1)
idim = 20
odim = 5
dummy_json = make_dummy_json_st(
4, [10, 20], [10, 20], [10, 20], idim=idim, odim=odim
)
if module == "pytorch":
import espnet.nets.pytorch_backend.e2e_st as m
else:
raise NotImplementedError
batch_frames_in = 50
batch_frames_out = 50
batchset = make_batchset(
dummy_json,
batch_frames_in=batch_frames_in,
batch_frames_out=batch_frames_out,
shortest_first=True,
)
for batch in batchset:
i = 0
o = 0
for uttid, info in batch:
i += int(info["input"][0]["shape"][0])
o += int(info["output"][0]["shape"][0])
assert i <= batch_frames_in
assert o <= batch_frames_out
model = m.E2E(20, 5, args)
for batch in batchset:
loss = model(*convert_batch(batch, module, idim=20, odim=5))
if isinstance(loss, tuple):
# chainer return several values as tuple
loss[0].backward() # trainable
else:
loss.backward() # trainable
with torch.no_grad(), chainer.no_backprop_mode():
in_data = np.random.randn(100, 20)
model.translate(in_data, args, args.char_list)
def init_torch_weight_const(m, val):
for p in m.parameters():
if p.dim() > 1:
p.data.fill_(val)
def init_chainer_weight_const(m, val):
for p in m.params():
if p.data.ndim > 1:
p.data[:] = val
@pytest.mark.parametrize("etype", ["blstmp", "vggblstmp"])
def test_mtl_loss(etype):
th = importlib.import_module("espnet.nets.pytorch_backend.e2e_st")
args = make_arg(etype=etype)
th_model = th.E2E(40, 5, args)
const = 1e-4
init_torch_weight_const(th_model, const)
th_batch = prepare_inputs("pytorch")
th_model(*th_batch)
th_asr, th_st = th_model.loss_asr, th_model.loss_st
# test grads in mtl mode
th_loss = th_asr * 0.5 + th_st * 0.5
th_model.zero_grad()
th_loss.backward()
@pytest.mark.parametrize("etype", ["blstmp", "vggblstmp"])
def test_zero_length_target(etype):
th = importlib.import_module("espnet.nets.pytorch_backend.e2e_st")
args = make_arg(etype=etype)
th_model = th.E2E(40, 5, args)
th_batch = prepare_inputs("pytorch", olens_tgt=[4, 0], olens_src=[3, 0])
th_model(*th_batch)
# NOTE: We ignore all zero length case because chainer also fails.
# Have a nice data-prep!
# out_data = ""
# data = [
# ("aaa", dict(feat=np.random.randn(200, 40).astype(np.float32), tokenid="")),
# ("bbb", dict(feat=np.random.randn(100, 40).astype(np.float32), tokenid="")),
# ("cc", dict(feat=np.random.randn(100, 40).astype(np.float32), tokenid=""))
# ]
# th_asr, th_st, th_acc = th_model(data)
@pytest.mark.parametrize(
"module, atype",
[
("espnet.nets.pytorch_backend.e2e_st", "noatt"),
("espnet.nets.pytorch_backend.e2e_st", "dot"),
("espnet.nets.pytorch_backend.e2e_st", "add"),
("espnet.nets.pytorch_backend.e2e_st", "location"),
("espnet.nets.pytorch_backend.e2e_st", "coverage"),
("espnet.nets.pytorch_backend.e2e_st", "coverage_location"),
("espnet.nets.pytorch_backend.e2e_st", "location2d"),
("espnet.nets.pytorch_backend.e2e_st", "location_recurrent"),
("espnet.nets.pytorch_backend.e2e_st", "multi_head_dot"),
("espnet.nets.pytorch_backend.e2e_st", "multi_head_add"),
("espnet.nets.pytorch_backend.e2e_st", "multi_head_loc"),
("espnet.nets.pytorch_backend.e2e_st", "multi_head_multi_res_loc"),
],
)
def test_calculate_all_attentions(module, atype):
m = importlib.import_module(module)
args = make_arg(atype=atype)
if "pytorch" in module:
batch = prepare_inputs("pytorch")
else:
raise NotImplementedError
model = m.E2E(40, 5, args)
with chainer.no_backprop_mode():
if "pytorch" in module:
att_ws = model.calculate_all_attentions(*batch)[0]
else:
raise NotImplementedError
print(att_ws.shape)
@pytest.mark.parametrize(
"module, mtlalpha",
[
("espnet.nets.pytorch_backend.e2e_st", 0.0),
("espnet.nets.pytorch_backend.e2e_st", 0.5),
("espnet.nets.pytorch_backend.e2e_st", 1.0),
],
)
def test_calculate_all_ctc_probs(module, mtlalpha):
m = importlib.import_module(module)
args = make_arg(mtlalpha=mtlalpha, asr_weight=0.3)
if "pytorch" in module:
batch = prepare_inputs("pytorch")
else:
batch = prepare_inputs("chainer")
model = m.E2E(40, 5, args)
with chainer.no_backprop_mode():
if "pytorch" in module:
ctc_probs = model.calculate_all_ctc_probs(*batch)
if mtlalpha > 0:
print(ctc_probs.shape)
else:
assert ctc_probs is None
else:
raise NotImplementedError
def test_torch_save_and_load():
m = importlib.import_module("espnet.nets.pytorch_backend.e2e_st")
utils = importlib.import_module("espnet.asr.asr_utils")
args = make_arg()
model = m.E2E(40, 5, args)
# initialize randomly
for p in model.parameters():
p.data.uniform_()
if not os.path.exists(".pytest_cache"):
os.makedirs(".pytest_cache")
tmppath = tempfile.mktemp()
utils.torch_save(tmppath, model)
p_saved = [p.data.numpy() for p in model.parameters()]
# set constant value
for p in model.parameters():
p.data.zero_()
utils.torch_load(tmppath, model)
for p1, p2 in zip(p_saved, model.parameters()):
np.testing.assert_array_equal(p1, p2.data.numpy())
if os.path.exists(tmppath):
os.remove(tmppath)
@pytest.mark.skipif(
not torch.cuda.is_available() and not chainer.cuda.available, reason="gpu required"
)
@pytest.mark.parametrize("module", ["espnet.nets.pytorch_backend.e2e_st"])
def test_gpu_trainable(module):
m = importlib.import_module(module)
args = make_arg()
model = m.E2E(40, 5, args)
if "pytorch" in module:
batch = prepare_inputs("pytorch", is_cuda=True)
model.cuda()
else:
raise NotImplementedError
loss = model(*batch)
if isinstance(loss, tuple):
# chainer return several values as tuple
loss[0].backward() # trainable
else:
loss.backward() # trainable
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="multi gpu required")
@pytest.mark.parametrize("module", ["espnet.nets.pytorch_backend.e2e_st"])
def test_multi_gpu_trainable(module):
m = importlib.import_module(module)
ngpu = 2
device_ids = list(range(ngpu))
args = make_arg()
model = m.E2E(40, 5, args)
if "pytorch" in module:
model = torch.nn.DataParallel(model, device_ids)
batch = prepare_inputs("pytorch", is_cuda=True)
model.cuda()
loss = 1.0 / ngpu * model(*batch)
loss.backward(loss.new_ones(ngpu)) # trainable
else:
raise NotImplementedError
| 20,465 | 33.629442 | 88 | py |
espnet | espnet-master/test/test_e2e_asr_maskctc.py | import argparse
import pytest
import torch
from espnet.nets.pytorch_backend.e2e_asr_maskctc import E2E
from espnet.nets.pytorch_backend.maskctc.add_mask_token import mask_uniform
from espnet.nets.pytorch_backend.transformer import plot
def make_arg(**kwargs):
defaults = dict(
adim=2,
aheads=2,
dropout_rate=0.0,
transformer_attn_dropout_rate=None,
elayers=1,
eunits=2,
dlayers=1,
dunits=2,
sym_space="<space>",
sym_blank="<blank>",
transformer_decoder_selfattn_layer_type="selfattn",
transformer_encoder_selfattn_layer_type="selfattn",
transformer_init="pytorch",
transformer_input_layer="conv2d",
transformer_length_normalized_loss=False,
report_cer=False,
report_wer=False,
mtlalpha=0.3,
lsm_weight=0.001,
wshare=4,
char_list=["<blank>", "a", "e", "<eos>"],
ctc_type="builtin",
)
defaults.update(kwargs)
return argparse.Namespace(**defaults)
def prepare(args):
idim = 10
odim = len(args.char_list)
model = E2E(idim, odim, args)
batchsize = 2
x = torch.randn(batchsize, 15, idim)
ilens = [15, 10]
n_token = model.odim - 2 # w/o <eos>/<sos>, <mask>
y = (torch.rand(batchsize, 10) * n_token % n_token).long()
olens = [7, 6]
for i in range(batchsize):
x[i, ilens[i] :] = -1
y[i, olens[i] :] = model.ignore_id
data = {}
uttid_list = []
for i in range(batchsize):
data["utt%d" % i] = {
"input": [{"shape": [ilens[i], idim]}],
"output": [{"shape": [olens[i]]}],
}
uttid_list.append("utt%d" % i)
return model, x, torch.tensor(ilens), y, data, uttid_list
def test_mask():
args = make_arg()
model, x, ilens, y, data, uttid_list = prepare(args)
# check <sos>/<eos>, <mask> position
n_char = len(args.char_list) + 1
assert model.sos == n_char - 2
assert model.eos == n_char - 2
assert model.mask_token == n_char - 1
yi, yo = mask_uniform(y, model.mask_token, model.eos, model.ignore_id)
assert (
(yi == model.mask_token).detach().numpy()
== (yo != model.ignore_id).detach().numpy()
).all()
def _savefn(*args, **kwargs):
return
maskctc_interctc = {
"maskctc_n_iterations": 0,
"maskctc_probability_threshold": 0.5,
"elayers": 2,
"intermediate_ctc_weight": 0.3,
"intermediate_ctc_layer": "1",
}
@pytest.mark.parametrize(
"model_dict",
[
({"maskctc_n_iterations": 1, "maskctc_probability_threshold": 0.0}),
({"maskctc_n_iterations": 1, "maskctc_probability_threshold": 0.5}),
({"maskctc_n_iterations": 2, "maskctc_probability_threshold": 0.5}),
({"maskctc_n_iterations": 0, "maskctc_probability_threshold": 0.5}),
maskctc_interctc,
],
)
def test_transformer_trainable_and_decodable(model_dict):
args = make_arg(**model_dict)
model, x, ilens, y, data, uttid_list = prepare(args)
# decoding params
recog_args = argparse.Namespace(
maskctc_n_iterations=args.maskctc_n_iterations,
maskctc_probability_threshold=args.maskctc_probability_threshold,
)
# test training
optim = torch.optim.Adam(model.parameters(), 0.01)
loss = model(x, ilens, y)
optim.zero_grad()
loss.backward()
optim.step()
# test attention plot
attn_dict = model.calculate_all_attentions(x[0:1], ilens[0:1], y[0:1])
plot.plot_multi_head_attention(data, uttid_list, attn_dict, "", savefn=_savefn)
# test decoding
with torch.no_grad():
model.recognize(x[0, : ilens[0]].numpy(), recog_args, args.char_list)
| 3,708 | 27.530769 | 83 | py |
espnet | espnet-master/test/test_e2e_asr_transducer.py | # coding: utf-8
import argparse
import json
import tempfile
import numpy as np
import pytest
import torch
from packaging.version import parse as V
import espnet.lm.pytorch_backend.extlm as extlm_pytorch
import espnet.nets.pytorch_backend.lm.default as lm_pytorch
from espnet.asr.pytorch_backend.asr_init import load_trained_model
from espnet.nets.beam_search_transducer import BeamSearchTransducer
from espnet.nets.pytorch_backend.e2e_asr_transducer import E2E
from espnet.nets.pytorch_backend.nets_utils import pad_list
is_torch_1_4_plus = V(torch.__version__) >= V("1.4.0")
is_torch_1_5_plus = V(torch.__version__) >= V("1.5.0")
def get_default_train_args(**kwargs):
train_defaults = dict(
etype="vggblstmp",
elayers=1,
subsample="1_2_2_1_1",
eunits=4,
eprojs=4,
dtype="lstm",
dlayers=1,
dunits=4,
dec_embed_dim=4,
dropout_rate=0.0,
dropout_rate_decoder=0.0,
dropout_rate_embed_decoder=0.0,
joint_dim=2,
joint_activation_type="tanh",
transducer_loss_weight=1.0,
use_ctc_loss=False,
ctc_loss_weight=0.0,
ctc_loss_dropout_rate=0.0,
use_lm_loss=False,
lm_loss_weight=0.0,
use_aux_transducer_loss=False,
aux_transducer_loss_weight=0.0,
aux_transducer_loss_enc_output_layers=[],
use_symm_kl_div_loss=False,
symm_kl_div_loss_weight=0.0,
char_list=["a", "b", "c", "d"],
sym_space="<space>",
sym_blank="<blank>",
report_cer=False,
report_wer=False,
verbose=0,
outdir=None,
rnnlm=None,
model_module="espnet.nets.pytorch_backend.e2e_asr_transducer:E2E",
)
train_defaults.update(kwargs)
return argparse.Namespace(**train_defaults)
def get_default_recog_args(**kwargs):
recog_defaults = dict(
batchsize=0,
beam_size=1,
nbest=1,
verbose=0,
search_type="default",
nstep=1,
max_sym_exp=2,
prefix_alpha=2,
u_max=5,
expansion_gamma=2,
expansion_beta=0.2,
score_norm_transducer=True,
rnnlm=None,
lm_weight=0.1,
)
recog_defaults.update(kwargs)
return argparse.Namespace(**recog_defaults)
def get_default_scope_inputs():
idim = 15
odim = 4
ilens = [12, 8]
olens = [8, 4]
return idim, odim, ilens, olens
def get_lm():
n_layers = 1
n_units = 4
char_list = ["<blank>", "<space>", "a", "b", "c", "d", "<eos>"]
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(len(char_list), n_layers, n_units, typ="lstm")
)
return rnnlm
def get_wordlm():
n_layers = 1
n_units = 8
char_list = ["<blank>", "<space>", "a", "b", "c", "d", "<eos>"]
word_list = ["<blank>", "<unk>", "ab", "id", "ac", "bd", "<eos>"]
char_dict = {x: i for i, x in enumerate(char_list)}
word_dict = {x: i for i, x in enumerate(word_list)}
word_rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(len(word_list), n_layers, n_units)
)
word_rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.LookAheadWordLM(word_rnnlm.predictor, word_dict, char_dict)
)
return word_rnnlm
def prepare_inputs(idim, odim, ilens, olens, is_cuda=False):
np.random.seed(1)
feats = [np.random.randn(ilen, idim).astype(np.float32) for ilen in ilens]
labels = [np.random.randint(1, odim, olen).astype(np.int32) for olen in olens]
feats_len = np.array([x.shape[0] for x in feats], dtype=np.int32)
feats = pad_list([torch.from_numpy(x).float() for x in feats], 0)
labels = pad_list([torch.from_numpy(y).long() for y in labels], -1)
feats_len = torch.from_numpy(feats_len).long()
if is_cuda:
feats = feats.cuda()
labels = labels.cuda()
feats_len = feats_len.cuda()
return feats, feats_len, labels
@pytest.mark.parametrize(
"train_dic, recog_dic",
[
({}, {}),
({"eprojs": 4}, {}),
({"dlayers": 2}, {}),
({"etype": "gru"}, {}),
({"etype": "blstm"}, {}),
({"etype": "blstmp", "elayers": 2, "eprojs": 4}, {}),
({"etype": "vgggru"}, {}),
({"etype": "vggbru"}, {}),
({"etype": "vgggrup", "elayers": 2, "eprojs": 4}, {}),
({"dtype": "gru"}, {}),
({"dtype": "bgrup"}, {}),
({"dtype": "gru", "dlayers": 2}, {}),
({"joint-activation-type": "relu"}, {}),
({"joint-activation-type": "swish"}, {}),
({}, {"score_norm_transducer": False}),
({"report_cer": True, "report_wer": True}, {}),
({}, {"nbest": 2}),
({}, {"beam_size": 1}),
({}, {"beam_size": 2}),
({}, {"beam_size": 2, "search_type": "nsc"}),
({}, {"beam_size": 2, "search_type": "nsc", "nstep": 2, "prefix_alpha": 1}),
({}, {"beam_size": 2, "search_type": "tsd"}),
({}, {"beam_size": 2, "search_type": "tsd", "max-sym-exp": 3}),
({}, {"beam_size": 2, "search_type": "alsd"}),
({}, {"beam_size": 2, "search_type": "alsd", "u_max": 10}),
({}, {"beam_size": 2, "search_type": "maes", "nstep": 2}),
(
{},
{
"beam_size": 2,
"search_type": "default",
"rnnlm": get_wordlm(),
"lm_weight": 1.0,
},
),
({}, {"beam_size": 2, "search_type": "nsc", "rnnlm": get_lm()}),
({}, {"beam_size": 2, "search_type": "nsc", "rnnlm": get_wordlm()}),
({}, {"beam_size": 2, "search_type": "nsc", "nstep": 2, "rnnlm": get_lm()}),
({}, {"beam_size": 2, "search_type": "nsc", "nstep": 2, "rnnlm": get_wordlm()}),
(
{},
{
"beam_size": 2,
"search_type": "alsd",
"rnnlm": get_lm(),
"lm_weight": 0.2,
},
),
(
{},
{
"beam_size": 2,
"search_type": "alsd",
"rnnlm": get_wordlm(),
"lm_weight": 0.6,
},
),
({}, {"beam_size": 2, "search_type": "tsd", "rnnlm": get_lm()}),
({}, {"beam_size": 2, "search_type": "tsd", "rnnlm": get_wordlm()}),
(
{},
{"beam_size": 2, "search_type": "maes", "nstep": 2, "rnnlm": get_wordlm()},
),
],
)
def test_pytorch_transducer_trainable_and_decodable(train_dic, recog_dic):
idim, odim, ilens, olens = get_default_scope_inputs()
train_args = get_default_train_args(**train_dic)
recog_args = get_default_recog_args(**recog_dic)
model = E2E(idim, odim, train_args)
batch = prepare_inputs(idim, odim, ilens, olens)
# to avoid huge training time, cer/wer report
# is only enabled at validation steps
if train_args.report_cer or train_args.report_wer:
model.training = False
loss = model(*batch)
loss.backward()
beam_search = BeamSearchTransducer(
decoder=model.dec,
joint_network=model.transducer_tasks.joint_network,
beam_size=recog_args.beam_size,
lm=recog_args.rnnlm,
lm_weight=recog_args.lm_weight,
search_type=recog_args.search_type,
max_sym_exp=recog_args.max_sym_exp,
u_max=recog_args.u_max,
nstep=recog_args.nstep,
prefix_alpha=recog_args.prefix_alpha,
score_norm=recog_args.score_norm_transducer,
)
with torch.no_grad():
in_data = np.random.randn(20, idim)
model.recognize(in_data, beam_search)
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="multi gpu required")
@pytest.mark.parametrize(
"train_dic",
[
{"report_cer": True, "report_wer": True},
],
)
@pytest.mark.execution_timeout(3.2)
def test_pytorch_multi_gpu_trainable(train_dic):
idim, odim, ilens, olens = get_default_scope_inputs()
train_args = get_default_train_args(**train_dic)
ngpu = 2
device_ids = list(range(ngpu))
model = E2E(idim, odim, train_args)
model = torch.nn.DataParallel(model, device_ids)
model.cuda()
batch = prepare_inputs(idim, odim, ilens, olens, is_cuda=True)
loss = 1.0 / ngpu * model(*batch)
loss.backward(loss.new_ones(ngpu))
def test_calculate_plot_attention():
idim, odim, ilens, olens = get_default_scope_inputs()
train_args = get_default_train_args()
model = E2E(idim, odim, train_args)
batch = prepare_inputs(idim, odim, ilens, olens, is_cuda=False)
assert model.calculate_all_attentions(*batch) == []
@pytest.mark.parametrize(
"train_dic",
[
{
"elayers": 3,
"use_aux_transducer_loss": True,
"aux_transducer_loss_enc_output_layers": [1],
},
{
"elayers": 2,
"use_ctc_loss": True,
"ctc_loss_weight": 0.5,
"ctc_loss_dropout_rate": 0.1,
},
{
"etype": "vggblstm",
"elayers": 3,
"use_aux_transducer_loss": True,
"aux_transducer_loss": True,
"use_symm_kl_div_loss": True,
"symm_kl_div_loss_weight": 0.5,
"aux_transducer_loss_enc_output_layers": [0, 1],
},
{"dlayers": 2, "use_lm_loss": True, "lm_loss_weight": 0.5},
],
)
def test_auxiliary_task(train_dic):
idim, odim, ilens, olens = get_default_scope_inputs()
train_args = get_default_train_args(**train_dic)
recog_args = get_default_recog_args()
model = E2E(idim, odim, train_args)
batch = prepare_inputs(idim, odim, ilens, olens)
loss = model(*batch)
loss.backward()
beam_search = BeamSearchTransducer(
decoder=model.dec,
joint_network=model.transducer_tasks.joint_network,
beam_size=recog_args.beam_size,
lm=recog_args.rnnlm,
lm_weight=recog_args.lm_weight,
search_type=recog_args.search_type,
max_sym_exp=recog_args.max_sym_exp,
u_max=recog_args.u_max,
nstep=recog_args.nstep,
prefix_alpha=recog_args.prefix_alpha,
score_norm=recog_args.score_norm_transducer,
)
tmpdir = tempfile.mkdtemp(prefix="tmp_", dir="/tmp")
torch.save(model.state_dict(), tmpdir + "/model.dummy.best")
with open(tmpdir + "/model.json", "wb") as f:
f.write(
json.dumps(
(idim, odim, vars(train_args)),
indent=4,
ensure_ascii=False,
sort_keys=True,
).encode("utf_8")
)
with torch.no_grad():
in_data = np.random.randn(20, idim)
model, _ = load_trained_model(tmpdir + "/model.dummy.best", training=False)
model.recognize(in_data, beam_search)
def test_invalid_aux_transducer_loss_enc_layers():
idim, odim, ilens, olens = get_default_scope_inputs()
train_args = get_default_train_args(use_aux_transducer_loss=True)
with pytest.raises(ValueError):
E2E(idim, odim, train_args)
train_args = get_default_train_args(
use_aux_transducer_loss=True, aux_transducer_loss_enc_output_layers="foo"
)
with pytest.raises(ValueError):
E2E(idim, odim, train_args)
train_args = get_default_train_args(
use_aux_transducer_loss=True, aux_transducer_loss_enc_output_layers=[0, 4]
)
with pytest.raises(ValueError):
E2E(idim, odim, train_args)
train_args = get_default_train_args(
use_aux_transducer_loss=True,
use_symm_kl_div_loss=True,
aux_transducer_loss_enc_output_layers=[0],
elayers=3,
etype="blstmp",
subsample="1_2_1",
)
with pytest.raises(ValueError):
E2E(idim, odim, train_args)
@pytest.mark.parametrize(
"train_dic",
[
{},
{"etype": "vggblstm"},
],
)
@pytest.mark.parametrize(
"recog_dic",
[
{},
{"beam_size": 2, "search_type": "default"},
{"beam_size": 2, "search_type": "alsd"},
{"beam_size": 2, "search_type": "tsd"},
{"beam_size": 2, "search_type": "nsc"},
{"beam_size": 2, "search_type": "maes"},
],
)
@pytest.mark.parametrize(
"quantize_dic",
[
{"mod": {torch.nn.Linear}, "dtype": torch.qint8},
{"mod": {torch.nn.Linear}, "dtype": torch.float16},
{"mod": {torch.nn.LSTM}, "dtype": torch.qint8},
{"mod": {torch.nn.LSTM}, "dtype": torch.float16},
{"mod": {torch.nn.Linear, torch.nn.LSTM}, "dtype": torch.qint8},
{"mod": {torch.nn.Linear, torch.nn.LSTM}, "dtype": torch.float16},
],
)
def test_dynamic_quantization(train_dic, recog_dic, quantize_dic):
idim, odim, ilens, olens = get_default_scope_inputs()
train_args = get_default_train_args(**train_dic)
recog_args = get_default_recog_args(**recog_dic)
model = E2E(idim, odim, train_args)
if not is_torch_1_5_plus and (
torch.nn.Linear in quantize_dic["mod"]
and quantize_dic["dtype"] == torch.float16
):
# In recognize(...) from asr.py we raise ValueError however
# AssertionError is originaly raised by torch.
with pytest.raises(AssertionError):
model = torch.quantization.quantize_dynamic(
model,
quantize_dic["mod"],
dtype=quantize_dic["dtype"],
)
pytest.skip("Skip rest of the test after checking AssertionError")
else:
model = torch.quantization.quantize_dynamic(
model,
quantize_dic["mod"],
quantize_dic["dtype"],
)
beam_search = BeamSearchTransducer(
decoder=model.dec,
joint_network=model.transducer_tasks.joint_network,
beam_size=recog_args.beam_size,
lm=recog_args.rnnlm,
lm_weight=recog_args.lm_weight,
search_type=recog_args.search_type,
max_sym_exp=recog_args.max_sym_exp,
u_max=recog_args.u_max,
nstep=recog_args.nstep,
prefix_alpha=recog_args.prefix_alpha,
score_norm=recog_args.score_norm_transducer,
quantization=True,
)
with torch.no_grad():
in_data = np.random.randn(20, idim)
if not is_torch_1_4_plus and torch.nn.LSTM in quantize_dic["mod"]:
# Cf. previous comment
with pytest.raises(AssertionError):
model.recognize(in_data, beam_search)
else:
model.recognize(in_data, beam_search)
@pytest.mark.parametrize(
"train_dic, subsample",
[
({}, 4),
({"etype": "blstm"}, 1),
({"etype": "blstmp"}, 2),
],
)
def test_subsampling(train_dic, subsample):
idim, odim, ilens, olens = get_default_scope_inputs()
train_args = get_default_train_args(**train_dic)
model = E2E(idim, odim, train_args)
assert model.get_total_subsampling_factor() == subsample
| 14,894 | 28.849699 | 88 | py |
espnet | espnet-master/test/test_e2e_vc_transformer.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Wen-Chin Huang
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from argparse import Namespace
from math import floor
import numpy as np
import pytest
import torch
from espnet.nets.pytorch_backend.e2e_vc_transformer import Transformer, subsequent_mask
from espnet.nets.pytorch_backend.nets_utils import pad_list
def make_transformer_args(**kwargs):
defaults = dict(
embed_dim=32,
spk_embed_dim=None,
eprenet_conv_layers=0,
eprenet_conv_filts=0,
eprenet_conv_chans=0,
dprenet_layers=2,
dprenet_units=32,
adim=32,
aheads=4,
elayers=2,
eunits=32,
dlayers=2,
dunits=32,
postnet_layers=2,
postnet_filts=5,
postnet_chans=32,
eprenet_dropout_rate=0.1,
dprenet_dropout_rate=0.5,
postnet_dropout_rate=0.1,
transformer_input_layer="conv2d-scaled-pos-enc",
transformer_enc_dropout_rate=0.1,
transformer_enc_positional_dropout_rate=0.1,
transformer_enc_attn_dropout_rate=0.0,
transformer_dec_dropout_rate=0.1,
transformer_dec_positional_dropout_rate=0.1,
transformer_dec_attn_dropout_rate=0.3,
transformer_enc_dec_attn_dropout_rate=0.0,
spk_embed_integration_type="add",
use_masking=True,
use_weighted_masking=False,
bce_pos_weight=1.0,
use_batch_norm=True,
use_scaled_pos_enc=True,
encoder_normalize_before=False,
decoder_normalize_before=False,
encoder_concat_after=False,
decoder_concat_after=False,
transformer_init="pytorch",
initial_encoder_alpha=1.0,
initial_decoder_alpha=1.0,
reduction_factor=1,
loss_type="L1",
use_guided_attn_loss=False,
num_heads_applied_guided_attn=2,
num_layers_applied_guided_attn=2,
guided_attn_loss_sigma=0.4,
guided_attn_loss_lambda=1.0,
modules_applied_guided_attn=["encoder", "decoder", "encoder-decoder"],
)
defaults.update(kwargs)
return defaults
def make_inference_args(**kwargs):
defaults = dict(threshold=0.5, maxlenratio=5.0, minlenratio=0.0)
defaults.update(kwargs)
return defaults
def prepare_inputs(
idim, odim, ilens, olens, spk_embed_dim=None, device=torch.device("cpu")
):
xs = [np.random.randn(lg, idim) for lg in ilens]
ys = [np.random.randn(lg, odim) for lg in olens]
ilens = torch.LongTensor(ilens).to(device)
olens = torch.LongTensor(olens).to(device)
xs = pad_list([torch.from_numpy(x).float() for x in xs], 0).to(device)
ys = pad_list([torch.from_numpy(y).float() for y in ys], 0).to(device)
labels = ys.new_zeros(ys.size(0), ys.size(1))
for i, l in enumerate(olens):
labels[i, l - 1 :] = 1
batch = {
"xs": xs,
"ilens": ilens,
"ys": ys,
"labels": labels,
"olens": olens,
}
if spk_embed_dim is not None:
batch["spembs"] = torch.FloatTensor(
np.random.randn(len(ilens), spk_embed_dim)
).to(device)
return batch
@pytest.mark.parametrize(
"model_dict",
[
({}),
({"use_masking": False}),
({"spk_embed_dim": 16, "spk_embed_integration_type": "concat"}),
({"spk_embed_dim": 16, "spk_embed_integration_type": "add"}),
({"use_scaled_pos_enc": False}),
({"use_scaled_pos_enc": True}),
({"bce_pos_weight": 10.0}),
({"reduction_factor": 2}),
({"reduction_factor": 3}),
({"encoder_normalize_before": False}),
({"decoder_normalize_before": False}),
({"encoder_normalize_before": False, "decoder_normalize_before": False}),
({"encoder_concat_after": True}),
({"decoder_concat_after": True}),
({"encoder_concat_after": True, "decoder_concat_after": True}),
({"loss_type": "L1"}),
({"loss_type": "L2"}),
({"loss_type": "L1+L2"}),
({"use_masking": False}),
({"use_masking": False, "use_weighted_masking": True}),
({"use_guided_attn_loss": True}),
({"use_guided_attn_loss": True, "reduction_factor": 3}),
(
{
"use_guided_attn_loss": True,
"modules_applied_guided_attn": ["encoder-decoder"],
}
),
(
{
"use_guided_attn_loss": True,
"modules_applied_guided_attn": ["encoder", "decoder"],
}
),
({"use_guided_attn_loss": True, "num_heads_applied_guided_attn": -1}),
({"use_guided_attn_loss": True, "num_layers_applied_guided_attn": -1}),
(
{
"use_guided_attn_loss": True,
"modules_applied_guided_attn": ["encoder"],
"elayers": 2,
"dlayers": 3,
}
),
],
)
def test_transformer_trainable_and_decodable(model_dict):
# make args
model_args = make_transformer_args(**model_dict)
inference_args = make_inference_args()
# setup batch
idim = 40
odim = 40
ilens = [10, 5]
olens = [20, 15]
batch = prepare_inputs(idim, odim, ilens, olens, model_args["spk_embed_dim"])
# define model
model = Transformer(idim, odim, Namespace(**model_args))
optimizer = torch.optim.Adam(model.parameters())
# trainable
loss = model(**batch).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# check gradient of ScaledPositionalEncoding
if model.use_scaled_pos_enc:
assert model.encoder.embed[-1].alpha.grad is not None
assert model.decoder.embed[-1].alpha.grad is not None
# decodable
model.eval()
with torch.no_grad():
if model_args["spk_embed_dim"] is None:
spemb = None
else:
spemb = batch["spembs"][0]
model.inference(
batch["xs"][0][: batch["ilens"][0]],
Namespace(**inference_args),
spemb=spemb,
)
model.calculate_all_attentions(**batch)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="gpu required")
@pytest.mark.parametrize(
"model_dict",
[
({}),
({"spk_embed_dim": 16, "spk_embed_integration_type": "concat"}),
({"spk_embed_dim": 16, "spk_embed_integration_type": "add"}),
({"use_masking": False}),
({"use_scaled_pos_enc": False}),
({"bce_pos_weight": 10.0}),
({"encoder_normalize_before": False}),
({"decoder_normalize_before": False}),
({"encoder_normalize_before": False, "decoder_normalize_before": False}),
({"decoder_concat_after": True}),
({"encoder_concat_after": True, "decoder_concat_after": True}),
({"use_masking": False}),
({"use_masking": False, "use_weighted_masking": True}),
],
)
def test_transformer_gpu_trainable_and_decodable(model_dict):
# make args
model_args = make_transformer_args(**model_dict)
inference_args = make_inference_args()
idim = 40
odim = 40
ilens = [10, 5, 10, 5]
olens = [20, 15, 20, 15]
device = torch.device("cuda")
batch = prepare_inputs(
idim, odim, ilens, olens, model_args["spk_embed_dim"], device=device
)
# define model
model = Transformer(idim, odim, Namespace(**model_args))
model.to(device)
optimizer = torch.optim.Adam(model.parameters())
# trainable
loss = model(**batch).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# check gradient of ScaledPositionalEncoding
if model.use_scaled_pos_enc:
assert model.encoder.embed[-1].alpha.grad is not None
assert model.decoder.embed[-1].alpha.grad is not None
# decodable
model.eval()
with torch.no_grad():
if model_args["spk_embed_dim"] is None:
spemb = None
else:
spemb = batch["spembs"][0]
model.inference(
batch["xs"][0][: batch["ilens"][0]],
Namespace(**inference_args),
spemb=spemb,
)
model.calculate_all_attentions(**batch)
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="multi gpu required")
@pytest.mark.parametrize(
"model_dict",
[
({}),
({"spk_embed_dim": 16, "spk_embed_integration_type": "concat"}),
({"spk_embed_dim": 16, "spk_embed_integration_type": "add"}),
({"use_masking": False}),
({"use_scaled_pos_enc": False}),
({"bce_pos_weight": 10.0}),
({"encoder_normalize_before": False}),
({"decoder_normalize_before": False}),
({"encoder_normalize_before": False, "decoder_normalize_before": False}),
({"decoder_concat_after": True}),
({"encoder_concat_after": True, "decoder_concat_after": True}),
({"use_masking": False}),
({"use_masking": False, "use_weighted_masking": True}),
],
)
def test_transformer_multi_gpu_trainable(model_dict):
# make args
model_args = make_transformer_args(**model_dict)
# setup batch
idim = 40
odim = 40
ilens = [10, 5, 10, 5]
olens = [20, 15, 20, 15]
device = torch.device("cuda")
batch = prepare_inputs(
idim, odim, ilens, olens, model_args["spk_embed_dim"], device=device
)
# define model
ngpu = 2
device_ids = list(range(ngpu))
model = Transformer(idim, odim, Namespace(**model_args))
model = torch.nn.DataParallel(model, device_ids)
model.to(device)
optimizer = torch.optim.Adam(model.parameters())
# trainable
loss = model(**batch).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# check gradient of ScaledPositionalEncoding
if model.module.use_scaled_pos_enc:
assert model.module.encoder.embed[-1].alpha.grad is not None
assert model.module.decoder.embed[-1].alpha.grad is not None
@pytest.mark.parametrize("model_dict", [({})])
def test_attention_masking(model_dict):
# make args
model_args = make_transformer_args(**model_dict)
# setup batch
idim = 40
odim = 40
ilens = [40, 40]
olens = [40, 40]
batch = prepare_inputs(idim, odim, ilens, olens)
# define model
model = Transformer(idim, odim, Namespace(**model_args))
# test encoder self-attention
x_masks = model._source_mask(batch["ilens"])
xs, x_masks = model.encoder.embed(batch["xs"], x_masks)
xs[1, ilens[1] :] = float("nan")
a = model.encoder.encoders[0].self_attn
a(xs, xs, xs, x_masks)
aws = a.attn.detach().numpy()
for aw, ilen in zip(aws, batch["ilens"]):
ilen = floor(floor(((ilen - 1) // 2) - 1) / 2) # due to 4x down sampling
assert not np.isnan(aw[:, :ilen, :ilen]).any()
np.testing.assert_almost_equal(
aw[:, :ilen, :ilen].sum(),
float(aw.shape[0] * ilen),
decimal=4,
err_msg=f"ilen={ilen}, awshape={str(aw)}",
)
assert aw[:, ilen:, ilen:].sum() == 0.0
# test encoder-decoder attention
ys = model.decoder.embed(batch["ys"])
ys[1, olens[1] :] = float("nan")
xy_masks = x_masks
a = model.decoder.decoders[0].src_attn
a(ys, xs, xs, xy_masks)
aws = a.attn.detach().numpy()
for aw, ilen, olen in zip(aws, batch["ilens"], batch["olens"]):
ilen = floor(floor(((ilen - 1) // 2) - 1) / 2) # due to 4x down sampling
assert not np.isnan(aw[:, :olen, :ilen]).any()
np.testing.assert_almost_equal(
aw[:, :olen, :ilen].sum(), float(aw.shape[0] * olen), decimal=4
)
assert aw[:, olen:, ilen:].sum() == 0.0
# test decoder self-attention
y_masks = model._target_mask(batch["olens"])
a = model.decoder.decoders[0].self_attn
a(ys, ys, ys, y_masks)
aws = a.attn.detach().numpy()
for aw, olen in zip(aws, batch["olens"]):
assert not np.isnan(aw[:, :olen, :olen]).any()
np.testing.assert_almost_equal(
aw[:, :olen, :olen].sum(), float(aw.shape[0] * olen), decimal=4
)
assert aw[:, olen:, olen:].sum() == 0.0
@pytest.mark.parametrize(
"model_dict",
[
({}),
({"reduction_factor": 3}),
({"reduction_factor": 4}),
({"decoder_normalize_before": False}),
({"encoder_normalize_before": False, "decoder_normalize_before": False}),
({"decoder_concat_after": True}),
({"encoder_concat_after": True, "decoder_concat_after": True}),
],
)
def test_forward_and_inference_are_equal(model_dict):
# make args
model_args = make_transformer_args(dprenet_dropout_rate=0.0, **model_dict)
# setup batch
idim = 40
odim = 40
ilens = [60]
olens = [60]
batch = prepare_inputs(idim, odim, ilens, olens)
xs = batch["xs"]
ilens = batch["ilens"]
ys = batch["ys"]
olens = batch["olens"]
# define model
model = Transformer(idim, odim, Namespace(**model_args))
model.eval()
# TODO(kan-bayashi): update following ugly part
with torch.no_grad():
# --------- forward calculation ---------
x_masks = model._source_mask(ilens)
hs_fp, h_masks = model.encoder(xs, x_masks)
if model.reduction_factor > 1:
ys_in = ys[:, model.reduction_factor - 1 :: model.reduction_factor]
olens_in = olens.new([olen // model.reduction_factor for olen in olens])
else:
ys_in, olens_in = ys, olens
ys_in = model._add_first_frame_and_remove_last_frame(ys_in)
y_masks = model._target_mask(olens_in)
zs, _ = model.decoder(ys_in, y_masks, hs_fp, h_masks)
before_outs = model.feat_out(zs).view(zs.size(0), -1, model.odim)
logits = model.prob_out(zs).view(zs.size(0), -1)
after_outs = before_outs + model.postnet(before_outs.transpose(1, 2)).transpose(
1, 2
)
# --------- forward calculation ---------
# --------- inference calculation ---------
hs_ir, _ = model.encoder(xs, None)
maxlen = ys_in.shape[1]
minlen = ys_in.shape[1]
idx = 0
# this is the inferene calculation but we use groundtruth to check the behavior
ys_in_ = ys_in[0, idx].view(1, 1, model.odim)
np.testing.assert_array_equal(
ys_in_.new_zeros(1, 1, model.odim).detach().cpu().numpy(),
ys_in_.detach().cpu().numpy(),
)
outs, probs = [], []
while True:
idx += 1
y_masks = subsequent_mask(idx).unsqueeze(0)
z = model.decoder.forward_one_step(ys_in_, y_masks, hs_ir)[
0
] # (B, idx, adim)
outs += [model.feat_out(z).view(1, -1, model.odim)] # [(1, r, odim), ...]
probs += [torch.sigmoid(model.prob_out(z))[0]] # [(r), ...]
if idx >= maxlen:
if idx < minlen:
continue
outs = torch.cat(outs, dim=1).transpose(
1, 2
) # (1, L, odim) -> (1, odim, L)
if model.postnet is not None:
outs = outs + model.postnet(outs) # (1, odim, L)
outs = outs.transpose(2, 1).squeeze(0) # (L, odim)
probs = torch.cat(probs, dim=0)
break
ys_in_ = torch.cat(
(ys_in_, ys_in[0, idx].view(1, 1, model.odim)), dim=1
) # (1, idx + 1, odim)
# --------- inference calculation ---------
# check both are equal
np.testing.assert_array_almost_equal(
hs_fp.detach().cpu().numpy(),
hs_ir.detach().cpu().numpy(),
)
np.testing.assert_array_almost_equal(
after_outs.squeeze(0).detach().cpu().numpy(),
outs.detach().cpu().numpy(),
)
np.testing.assert_array_almost_equal(
torch.sigmoid(logits.squeeze(0)).detach().cpu().numpy(),
probs.detach().cpu().numpy(),
)
| 16,029 | 32.676471 | 88 | py |
espnet | espnet-master/test/test_e2e_asr_mulenc.py | # coding: utf-8
# Copyright 2019 Ruizhi Li
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from __future__ import division
import argparse
import importlib
import os
import tempfile
from test.utils_test import make_dummy_json
import numpy as np
import pytest
import torch
from espnet.nets.pytorch_backend.nets_utils import pad_list
from espnet.utils.training.batchfy import make_batchset
def make_arg(num_encs, **kwargs):
defaults = dict(
num_encs=num_encs,
elayers=[1 for _ in range(num_encs)],
subsample=["1_2_2_1_1" for _ in range(num_encs)],
etype=["vggblstm" for _ in range(num_encs)],
eunits=[1 for _ in range(num_encs)],
eprojs=1,
dtype="lstm",
dlayers=1,
dunits=1,
atype=["add" for _ in range(num_encs)],
aheads=[1 for _ in range(num_encs)],
awin=[1 for _ in range(num_encs)],
aconv_chans=[1 for _ in range(num_encs)],
aconv_filts=[1 for _ in range(num_encs)],
han_type="add",
han_heads=1,
han_win=1,
han_conv_chans=1,
han_conv_filts=1,
han_dim=1,
mtlalpha=0.5,
lsm_type="",
lsm_weight=0.0,
sampling_probability=0.0,
adim=[1 for _ in range(num_encs)],
dropout_rate=[0.0 for _ in range(num_encs)],
dropout_rate_decoder=0.0,
nbest=1,
beam_size=2,
penalty=0.5,
maxlenratio=1.0,
minlenratio=0.0,
ctc_weight=0.2,
ctc_window_margin=0,
lm_weight=0.0,
rnnlm=None,
streaming_min_blank_dur=10,
streaming_onset_margin=2,
streaming_offset_margin=2,
verbose=2,
char_list=["あ", "い"],
outdir=None,
ctc_type="builtin",
report_cer=False,
report_wer=False,
sym_space="<space>",
sym_blank="<blank>",
sortagrad=0,
grad_noise=False,
context_residual=False,
use_frontend=False,
share_ctc=False,
weights_ctc_train=[0.5 for _ in range(num_encs)],
weights_ctc_dec=[0.5 for _ in range(num_encs)],
)
defaults.update(kwargs)
return argparse.Namespace(**defaults)
def prepare_inputs(mode, num_encs=2, is_cuda=False):
ilens_list = [[3, 2] for _ in range(num_encs)]
olens = [2, 1]
np.random.seed(1)
assert len(ilens_list[0]) == len(ilens_list[1]) == len(olens)
xs_list = [
[np.random.randn(ilen, 2).astype(np.float32) for ilen in ilens]
for ilens in ilens_list
]
ys = [np.random.randint(1, 2, olen).astype(np.int32) for olen in olens]
ilens_list = [np.array([x.shape[0] for x in xs], dtype=np.int32) for xs in xs_list]
if mode == "pytorch":
ilens_list = [torch.from_numpy(ilens).long() for ilens in ilens_list]
xs_pad_list = [
pad_list([torch.from_numpy(x).float() for x in xs], 0) for xs in xs_list
]
ys_pad = pad_list([torch.from_numpy(y).long() for y in ys], -1)
if is_cuda:
xs_pad_list = [xs_pad.cuda() for xs_pad in xs_pad_list]
ilens_list = [ilens.cuda() for ilens in ilens_list]
ys_pad = ys_pad.cuda()
return xs_pad_list, ilens_list, ys_pad
else:
raise ValueError("Invalid mode")
def convert_batch(
batch, backend="pytorch", is_cuda=False, idim=2, odim=2, num_inputs=2
):
ilens_list = [
np.array([x[1]["input"][idx]["shape"][0] for x in batch])
for idx in range(num_inputs)
]
olens = np.array([x[1]["output"][0]["shape"][0] for x in batch])
xs_list = [
[np.random.randn(ilen, idim).astype(np.float32) for ilen in ilens_list[idx]]
for idx in range(num_inputs)
]
ys = [np.random.randint(1, odim, olen).astype(np.int32) for olen in olens]
is_pytorch = backend == "pytorch"
if is_pytorch:
xs_list = [
pad_list([torch.from_numpy(x).float() for x in xs_list[idx]], 0)
for idx in range(num_inputs)
]
ilens_list = [
torch.from_numpy(ilens_list[idx]).long() for idx in range(num_inputs)
]
ys = pad_list([torch.from_numpy(y).long() for y in ys], -1)
if is_cuda:
xs_list = [xs_list[idx].cuda() for idx in range(num_inputs)]
ilens_list = [ilens_list[idx].cuda() for idx in range(num_inputs)]
ys = ys.cuda()
return xs_list, ilens_list, ys
@pytest.mark.parametrize(
"module, num_encs, model_dict",
[
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 2, {}),
(
"espnet.nets.pytorch_backend.e2e_asr_mulenc",
2,
{"elayers": [2, 1], "dlayers": 2},
),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 2, {"etype": ["grup", "grup"]}),
(
"espnet.nets.pytorch_backend.e2e_asr_mulenc",
2,
{"etype": ["lstmp", "lstmp"]},
),
(
"espnet.nets.pytorch_backend.e2e_asr_mulenc",
2,
{"etype": ["bgrup", "bgrup"]},
),
(
"espnet.nets.pytorch_backend.e2e_asr_mulenc",
2,
{"etype": ["blstmp", "blstmp"]},
),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 2, {"etype": ["bgru", "bgru"]}),
(
"espnet.nets.pytorch_backend.e2e_asr_mulenc",
2,
{"etype": ["blstm", "blstm"]},
),
(
"espnet.nets.pytorch_backend.e2e_asr_mulenc",
2,
{"etype": ["vgggru", "vgggru"]},
),
(
"espnet.nets.pytorch_backend.e2e_asr_mulenc",
2,
{"etype": ["vgggrup", "vgggrup"]},
),
(
"espnet.nets.pytorch_backend.e2e_asr_mulenc",
2,
{"etype": ["vgglstm", "vgglstm"]},
),
(
"espnet.nets.pytorch_backend.e2e_asr_mulenc",
2,
{"etype": ["vgglstmp", "vgglstmp"]},
),
(
"espnet.nets.pytorch_backend.e2e_asr_mulenc",
2,
{"etype": ["vggbgru", "vggbgru"]},
),
(
"espnet.nets.pytorch_backend.e2e_asr_mulenc",
2,
{"etype": ["vggbgrup", "vggbgrup"]},
),
(
"espnet.nets.pytorch_backend.e2e_asr_mulenc",
2,
{"etype": ["blstmp", "vggblstmp"]},
),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 2, {"dtype": "gru"}),
(
"espnet.nets.pytorch_backend.e2e_asr_mulenc",
2,
{"atype": ["noatt", "noatt"], "han_type": "noatt"},
),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 2, {"atype": ["add", "add"]}),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 2, {"atype": ["add", "add"]}),
(
"espnet.nets.pytorch_backend.e2e_asr_mulenc",
2,
{"atype": ["coverage", "coverage"], "han_type": "coverage"},
),
(
"espnet.nets.pytorch_backend.e2e_asr_mulenc",
2,
{
"atype": ["coverage_location", "coverage_location"],
"han_type": "coverage_location",
},
),
(
"espnet.nets.pytorch_backend.e2e_asr_mulenc",
2,
{"atype": ["location2d", "location2d"], "han_type": "location2d"},
),
(
"espnet.nets.pytorch_backend.e2e_asr_mulenc",
2,
{
"atype": ["location_recurrent", "location_recurrent"],
"han_type": "location_recurrent",
},
),
(
"espnet.nets.pytorch_backend.e2e_asr_mulenc",
2,
{
"atype": ["multi_head_dot", "multi_head_dot"],
"han_type": "multi_head_dot",
},
),
(
"espnet.nets.pytorch_backend.e2e_asr_mulenc",
2,
{
"atype": ["multi_head_add", "multi_head_add"],
"han_type": "multi_head_add",
},
),
(
"espnet.nets.pytorch_backend.e2e_asr_mulenc",
2,
{
"atype": ["multi_head_loc", "multi_head_loc"],
"han_type": "multi_head_loc",
},
),
(
"espnet.nets.pytorch_backend.e2e_asr_mulenc",
2,
{
"atype": ["multi_head_multi_res_loc", "multi_head_multi_res_loc"],
"han_type": "multi_head_multi_res_loc",
},
),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 2, {"mtlalpha": 0.0}),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 2, {"mtlalpha": 1.0}),
(
"espnet.nets.pytorch_backend.e2e_asr_mulenc",
2,
{"sampling_probability": 0.5},
),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 2, {"ctc_type": "builtin"}),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 2, {"ctc_weight": 0.0}),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 2, {"ctc_weight": 1.0}),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 2, {"context_residual": True}),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 2, {"grad_noise": True}),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 2, {"report_cer": True}),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 2, {"report_wer": True}),
(
"espnet.nets.pytorch_backend.e2e_asr_mulenc",
2,
{"report_cer": True, "report_wer": True},
),
(
"espnet.nets.pytorch_backend.e2e_asr_mulenc",
2,
{"report_cer": True, "report_wer": True, "mtlalpha": 0.0},
),
(
"espnet.nets.pytorch_backend.e2e_asr_mulenc",
2,
{"report_cer": True, "report_wer": True, "mtlalpha": 1.0},
),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 2, {"share_ctc": True}),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 3, {}),
],
)
def test_model_trainable_and_decodable(module, num_encs, model_dict):
args = make_arg(num_encs=num_encs, **model_dict)
batch = prepare_inputs("pytorch", num_encs)
# test trainable
m = importlib.import_module(module)
model = m.E2E([2 for _ in range(num_encs)], 2, args)
loss = model(*batch)
loss.backward() # trainable
# test decodable
with torch.no_grad():
in_data = [np.random.randn(2, 2) for _ in range(num_encs)]
model.recognize(in_data, args, args.char_list) # decodable
if "pytorch" in module:
batch_in_data = [
[np.random.randn(5, 2), np.random.randn(2, 2)] for _ in range(num_encs)
]
model.recognize_batch(
batch_in_data, args, args.char_list
) # batch decodable
@pytest.mark.parametrize("module, num_encs", [("pytorch", 2), ("pytorch", 3)])
def test_gradient_noise_injection(module, num_encs):
args = make_arg(num_encs=num_encs, grad_noise=True)
args_org = make_arg(num_encs=num_encs)
dummy_json = make_dummy_json(
num_encs, [2, 3], [2, 3], idim=2, odim=2, num_inputs=num_encs
)
import espnet.nets.pytorch_backend.e2e_asr_mulenc as m
batchset = make_batchset(dummy_json, 2, 2**10, 2**10, shortest_first=True)
model = m.E2E([2 for _ in range(num_encs)], 2, args)
model_org = m.E2E([2 for _ in range(num_encs)], 2, args_org)
for batch in batchset:
loss = model(*convert_batch(batch, module, idim=2, odim=2, num_inputs=num_encs))
loss_org = model_org(
*convert_batch(batch, module, idim=2, odim=2, num_inputs=num_encs)
)
loss.backward()
grad = [param.grad for param in model.parameters()][10]
loss_org.backward()
grad_org = [param.grad for param in model_org.parameters()][10]
assert grad[0] != grad_org[0]
@pytest.mark.parametrize("module, num_encs", [("pytorch", 2), ("pytorch", 3)])
def test_sortagrad_trainable(module, num_encs):
args = make_arg(num_encs=num_encs, sortagrad=1)
dummy_json = make_dummy_json(6, [2, 3], [2, 3], idim=2, odim=2, num_inputs=num_encs)
import espnet.nets.pytorch_backend.e2e_asr_mulenc as m
batchset = make_batchset(dummy_json, 2, 2**10, 2**10, shortest_first=True)
model = m.E2E([2 for _ in range(num_encs)], 2, args)
num_utts = 0
for batch in batchset:
num_utts += len(batch)
loss = model(*convert_batch(batch, module, idim=2, odim=2, num_inputs=num_encs))
loss.backward() # trainable
assert num_utts == 6
with torch.no_grad():
in_data = [np.random.randn(50, 2) for _ in range(num_encs)]
model.recognize(in_data, args, args.char_list)
@pytest.mark.parametrize("module, num_encs", [("pytorch", 2), ("pytorch", 3)])
def test_sortagrad_trainable_with_batch_bins(module, num_encs):
args = make_arg(num_encs=num_encs, sortagrad=1)
idim = 2
odim = 2
dummy_json = make_dummy_json(
4, [2, 3], [2, 3], idim=idim, odim=odim, num_inputs=num_encs
)
import espnet.nets.pytorch_backend.e2e_asr_mulenc as m
batch_elems = 2000
batchset = make_batchset(dummy_json, batch_bins=batch_elems, shortest_first=True)
for batch in batchset:
n = 0
for uttid, info in batch:
ilen = int(info["input"][0]["shape"][0]) # based on the first input
olen = int(info["output"][0]["shape"][0])
n += ilen * idim + olen * odim
assert olen < batch_elems
model = m.E2E([2 for _ in range(num_encs)], 2, args)
for batch in batchset:
loss = model(*convert_batch(batch, module, idim=2, odim=2, num_inputs=num_encs))
loss.backward() # trainable
with torch.no_grad():
in_data = [np.random.randn(100, 2) for _ in range(num_encs)]
model.recognize(in_data, args, args.char_list)
@pytest.mark.parametrize("module, num_encs", [("pytorch", 2), ("pytorch", 3)])
def test_sortagrad_trainable_with_batch_frames(module, num_encs):
args = make_arg(num_encs=num_encs, sortagrad=1)
idim = 2
odim = 2
dummy_json = make_dummy_json(
4, [2, 3], [2, 3], idim=idim, odim=odim, num_inputs=num_encs
)
import espnet.nets.pytorch_backend.e2e_asr_mulenc as m
batch_frames_in = 50
batch_frames_out = 50
batchset = make_batchset(
dummy_json,
batch_frames_in=batch_frames_in,
batch_frames_out=batch_frames_out,
shortest_first=True,
)
for batch in batchset:
i = 0
o = 0
for uttid, info in batch:
i += int(info["input"][0]["shape"][0]) # based on the first input
o += int(info["output"][0]["shape"][0])
assert i <= batch_frames_in
assert o <= batch_frames_out
model = m.E2E([2 for _ in range(num_encs)], 2, args)
for batch in batchset:
loss = model(*convert_batch(batch, module, idim=2, odim=2, num_inputs=num_encs))
loss.backward() # trainable
with torch.no_grad():
in_data = [np.random.randn(100, 2) for _ in range(num_encs)]
model.recognize(in_data, args, args.char_list)
def init_torch_weight_const(m, val):
for p in m.parameters():
if p.dim() > 1:
p.data.fill_(val)
@pytest.mark.parametrize(
"module, num_encs, atype",
[
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 2, "noatt"),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 2, "dot"),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 2, "add"),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 2, "location"),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 2, "coverage"),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 2, "coverage_location"),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 2, "location2d"),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 2, "location_recurrent"),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 2, "multi_head_dot"),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 2, "multi_head_add"),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 2, "multi_head_loc"),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 2, "multi_head_multi_res_loc"),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 3, "noatt"),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 3, "dot"),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 3, "add"),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 3, "location"),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 3, "coverage"),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 3, "coverage_location"),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 3, "location2d"),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 3, "location_recurrent"),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 3, "multi_head_dot"),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 3, "multi_head_add"),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 3, "multi_head_loc"),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 3, "multi_head_multi_res_loc"),
],
)
def test_calculate_all_attentions(module, num_encs, atype):
m = importlib.import_module(module)
args = make_arg(
num_encs=num_encs, atype=[atype for _ in range(num_encs)], han_type=atype
)
batch = prepare_inputs("pytorch", num_encs)
model = m.E2E([2 for _ in range(num_encs)], 2, args)
att_ws = model.calculate_all_attentions(*batch)
for i in range(num_encs):
print(att_ws[i][0].shape) # att
print(att_ws[num_encs][0].shape) # han
@pytest.mark.parametrize("num_encs", [2, 3])
def test_torch_save_and_load(num_encs):
m = importlib.import_module("espnet.nets.pytorch_backend.e2e_asr_mulenc")
utils = importlib.import_module("espnet.asr.asr_utils")
args = make_arg(num_encs=num_encs)
model = m.E2E([2 for _ in range(num_encs)], 2, args)
# initialize randomly
for p in model.parameters():
p.data.uniform_()
if not os.path.exists(".pytest_cache"):
os.makedirs(".pytest_cache")
tmppath = tempfile.mktemp()
utils.torch_save(tmppath, model)
p_saved = [p.data.numpy() for p in model.parameters()]
# set constant value
for p in model.parameters():
p.data.zero_()
utils.torch_load(tmppath, model)
for p1, p2 in zip(p_saved, model.parameters()):
np.testing.assert_array_equal(p1, p2.data.numpy())
if os.path.exists(tmppath):
os.remove(tmppath)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="gpu required")
@pytest.mark.parametrize(
"module, num_encs",
[
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 2),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 3),
],
)
def test_gpu_trainable(module, num_encs):
m = importlib.import_module(module)
args = make_arg(num_encs=num_encs)
model = m.E2E([2 for _ in range(num_encs)], 2, args)
if "pytorch" in module:
batch = prepare_inputs("pytorch", num_encs, is_cuda=True)
model.cuda()
loss = model(*batch)
loss.backward() # trainable
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="multi gpu required")
@pytest.mark.parametrize(
"module, num_encs",
[
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 2),
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 3),
],
)
def test_multi_gpu_trainable(module, num_encs):
m = importlib.import_module(module)
ngpu = 2
device_ids = list(range(ngpu))
args = make_arg(num_encs=num_encs)
model = m.E2E([2 for _ in range(num_encs)], 2, args)
if "pytorch" in module:
model = torch.nn.DataParallel(model, device_ids)
batch = prepare_inputs("pytorch", num_encs, is_cuda=True)
model.cuda()
loss = 1.0 / ngpu * model(*batch)
loss.backward(loss.new_ones(ngpu)) # trainable
@pytest.mark.execution_timeout(5)
@pytest.mark.parametrize(
"module, num_encs, model_dict",
[
("espnet.nets.pytorch_backend.e2e_asr_mulenc", 2, {}),
],
)
def test_calculate_plot_attention_ctc(module, num_encs, model_dict):
args = make_arg(num_encs=num_encs, **model_dict)
m = importlib.import_module(module)
model = m.E2E([2 for _ in range(num_encs)], 2, args)
# test attention plot
dummy_json = make_dummy_json(
num_encs, [2, 3], [2, 3], idim=2, odim=2, num_inputs=num_encs
)
batchset = make_batchset(dummy_json, 2, 2**10, 2**10, shortest_first=True)
att_ws = model.calculate_all_attentions(
*convert_batch(batchset[0], "pytorch", idim=2, odim=2, num_inputs=num_encs)
)
from espnet.asr.asr_utils import PlotAttentionReport
tmpdir = tempfile.mkdtemp()
plot = PlotAttentionReport(
model.calculate_all_attentions, batchset[0], tmpdir, None, None, None
)
for i in range(num_encs):
# att-encoder
att_w = plot.trim_attention_weight("utt_%d" % 0, att_ws[i][0])
plot._plot_and_save_attention(att_w, "{}/att{}.png".format(tmpdir, i))
# han
att_w = plot.trim_attention_weight("utt_%d" % 0, att_ws[num_encs][0])
plot._plot_and_save_attention(att_w, "{}/han.png".format(tmpdir), han_mode=True)
# test CTC plot
ctc_probs = model.calculate_all_ctc_probs(
*convert_batch(batchset[0], "pytorch", idim=2, odim=2, num_inputs=num_encs)
)
from espnet.asr.asr_utils import PlotCTCReport
tmpdir = tempfile.mkdtemp()
plot = PlotCTCReport(
model.calculate_all_ctc_probs, batchset[0], tmpdir, None, None, None
)
if args.mtlalpha > 0:
for i in range(num_encs):
# ctc-encoder
plot._plot_and_save_ctc(ctc_probs[i][0], "{}/ctc{}.png".format(tmpdir, i))
| 21,844 | 35.408333 | 88 | py |
espnet | espnet-master/test/test_e2e_vc_tacotron2.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Wen-Chin Huang
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from __future__ import division, print_function
from argparse import Namespace
import numpy as np
import pytest
import torch
from espnet.nets.pytorch_backend.e2e_vc_tacotron2 import Tacotron2
from espnet.nets.pytorch_backend.nets_utils import pad_list
def make_taco2_args(**kwargs):
defaults = dict(
use_speaker_embedding=False,
spk_embed_dim=None,
input_layer="linear",
elayers=1,
eunits=32,
econv_layers=2,
econv_filts=5,
econv_chans=32,
dlayers=2,
dunits=32,
prenet_layers=2,
prenet_units=32,
postnet_layers=2,
postnet_filts=5,
postnet_chans=32,
output_activation=None,
atype="location",
adim=32,
aconv_chans=16,
aconv_filts=5,
cumulate_att_w=True,
use_batch_norm=True,
use_concate=True,
use_residual=False,
dropout_rate=0.5,
zoneout_rate=0.1,
reduction_factor=1,
threshold=0.5,
maxlenratio=5.0,
minlenratio=0.0,
use_cbhg=False,
spc_dim=None,
cbhg_conv_bank_layers=4,
cbhg_conv_bank_chans=32,
cbhg_conv_proj_filts=3,
cbhg_conv_proj_chans=32,
cbhg_highway_layers=4,
cbhg_highway_units=32,
cbhg_gru_units=32,
use_masking=True,
use_weighted_masking=False,
bce_pos_weight=1.0,
use_guided_attn_loss=False,
guided_attn_loss_sigma=0.4,
guided_attn_loss_lambda=1.0,
)
defaults.update(kwargs)
return defaults
def make_inference_args(**kwargs):
defaults = dict(
threshold=0.5,
maxlenratio=5.0,
minlenratio=0.0,
use_att_constraint=False,
backward_window=1,
forward_window=3,
)
defaults.update(kwargs)
return defaults
def prepare_inputs(
bs,
idim,
odim,
maxin_len,
maxout_len,
spk_embed_dim=None,
spc_dim=None,
device=torch.device("cpu"),
):
ilens = np.sort(np.random.randint(1, maxin_len, bs))[::-1].tolist()
olens = np.sort(np.random.randint(3, maxout_len, bs))[::-1].tolist()
xs = [np.random.randn(lg, idim) for lg in ilens]
ys = [np.random.randn(lg, odim) for lg in olens]
ilens = torch.LongTensor(ilens).to(device)
olens = torch.LongTensor(olens).to(device)
xs = pad_list([torch.from_numpy(x).float() for x in xs], 0).to(device)
ys = pad_list([torch.from_numpy(y).float() for y in ys], 0).to(device)
labels = ys.new_zeros(ys.size(0), ys.size(1))
for i, lg in enumerate(olens):
labels[i, lg - 1 :] = 1
batch = {
"xs": xs,
"ilens": ilens,
"ys": ys,
"labels": labels,
"olens": olens,
}
if spk_embed_dim is not None:
spembs = torch.from_numpy(np.random.randn(bs, spk_embed_dim)).float().to(device)
batch["spembs"] = spembs
if spc_dim is not None:
spcs = [np.random.randn(lg, spc_dim) for lg in olens]
spcs = pad_list([torch.from_numpy(spc).float() for spc in spcs], 0).to(device)
batch["extras"] = spcs
return batch
@pytest.mark.parametrize(
"model_dict, inference_dict",
[
({}, {}),
({"use_masking": False}, {}),
({"bce_pos_weight": 10.0}, {}),
({"atype": "forward"}, {}),
({"atype": "forward_ta"}, {}),
({"prenet_layers": 0}, {}),
({"postnet_layers": 0}, {}),
({"prenet_layers": 0, "postnet_layers": 0}, {}),
({"output_activation": "relu"}, {}),
({"cumulate_att_w": False}, {}),
({"use_batch_norm": False}, {}),
({"use_concate": False}, {}),
({"use_residual": True}, {}),
({"dropout_rate": 0.0}, {}),
({"zoneout_rate": 0.0}, {}),
({"reduction_factor": 2}, {}),
({"reduction_factor": 3}, {}),
({"use_speaker_embedding": True}, {}),
({"use_masking": False}, {}),
({"use_masking": False, "use_weighted_masking": True}, {}),
({"use_guided_attn_loss": True}, {}),
({"reduction_factor": 3, "use_guided_attn_loss": True}, {}),
({}, {"use_att_constraint": True}),
({"atype": "forward"}, {"use_att_constraint": True}),
({"atype": "forward_ta"}, {"use_att_constraint": True}),
],
)
def test_tacotron2_trainable_and_decodable(model_dict, inference_dict):
# make args
model_args = make_taco2_args(**model_dict)
inference_args = make_inference_args(**inference_dict)
# setup batch
bs = 2
maxin_len = 10
maxout_len = 10
idim = 5
odim = 10
if model_args["use_cbhg"]:
model_args["spc_dim"] = 129
if model_args["use_speaker_embedding"]:
model_args["spk_embed_dim"] = 128
batch = prepare_inputs(
bs,
idim,
odim,
maxin_len,
maxout_len,
model_args["spk_embed_dim"],
model_args["spc_dim"],
)
# define model
model = Tacotron2(idim, odim, Namespace(**model_args))
optimizer = torch.optim.Adam(model.parameters())
# trainable
loss = model(**batch).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# decodable
model.eval()
with torch.no_grad():
spemb = None if model_args["spk_embed_dim"] is None else batch["spembs"][0]
model.inference(
batch["xs"][0][: batch["ilens"][0]], Namespace(**inference_args), spemb
)
model.calculate_all_attentions(**batch)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="gpu required")
@pytest.mark.parametrize(
"model_dict, inference_dict",
[
({}, {}),
({"atype": "forward"}, {}),
({"atype": "forward_ta"}, {}),
({"use_speaker_embedding": True, "spk_embed_dim": 128}, {}),
({"reduction_factor": 3}, {}),
({"use_guided_attn_loss": True}, {}),
({"use_masking": False}, {}),
({"use_masking": False, "use_weighted_masking": True}, {}),
({}, {"use_att_constraint": True}),
({"atype": "forward"}, {"use_att_constraint": True}),
({"atype": "forward_ta"}, {"use_att_constraint": True}),
],
)
def test_tacotron2_gpu_trainable_and_decodable(model_dict, inference_dict):
bs = 2
maxin_len = 10
maxout_len = 10
idim = 5
odim = 10
device = torch.device("cuda")
model_args = make_taco2_args(**model_dict)
inference_args = make_inference_args(**inference_dict)
batch = prepare_inputs(
bs,
idim,
odim,
maxin_len,
maxout_len,
model_args["spk_embed_dim"],
model_args["spc_dim"],
device=device,
)
# define model
model = Tacotron2(idim, odim, Namespace(**model_args))
optimizer = torch.optim.Adam(model.parameters())
model.to(device)
# trainable
loss = model(**batch).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# decodable
model.eval()
with torch.no_grad():
spemb = None if model_args["spk_embed_dim"] is None else batch["spembs"][0]
model.inference(
batch["xs"][0][: batch["ilens"][0]], Namespace(**inference_args), spemb
)
model.calculate_all_attentions(**batch)
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="multi gpu required")
@pytest.mark.parametrize(
"model_dict",
[
({}),
({"atype": "forward"}),
({"atype": "forward_ta"}),
({"use_speaker_embedding": True, "spk_embed_dim": 128}),
({"reduction_factor": 3}),
({"use_guided_attn_loss": True}),
({"use_masking": False}),
({"use_masking": False, "use_weighted_masking": True}),
],
)
def test_tacotron2_multi_gpu_trainable(model_dict):
ngpu = 2
device_ids = list(range(ngpu))
device = torch.device("cuda")
bs = 10
maxin_len = 10
maxout_len = 10
idim = 5
odim = 10
model_args = make_taco2_args(**model_dict)
batch = prepare_inputs(
bs,
idim,
odim,
maxin_len,
maxout_len,
model_args["spk_embed_dim"],
model_args["spc_dim"],
device=device,
)
# define model
model = Tacotron2(idim, odim, Namespace(**model_args))
model = torch.nn.DataParallel(model, device_ids)
optimizer = torch.optim.Adam(model.parameters())
model.to(device)
# trainable
loss = model(**batch).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
| 8,638 | 27.417763 | 88 | py |
espnet | espnet-master/test/test_e2e_tts_tacotron2.py | #!/usr/bin/env python3
# Copyright 2019 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from __future__ import division, print_function
from argparse import Namespace
import numpy as np
import pytest
import torch
from espnet.nets.pytorch_backend.e2e_tts_tacotron2 import Tacotron2
from espnet.nets.pytorch_backend.nets_utils import pad_list
def make_taco2_args(**kwargs):
defaults = dict(
use_speaker_embedding=False,
spk_embed_dim=None,
embed_dim=32,
elayers=1,
eunits=32,
econv_layers=2,
econv_filts=5,
econv_chans=32,
dlayers=2,
dunits=32,
prenet_layers=2,
prenet_units=32,
postnet_layers=2,
postnet_filts=5,
postnet_chans=32,
output_activation=None,
atype="location",
adim=32,
aconv_chans=16,
aconv_filts=5,
cumulate_att_w=True,
use_batch_norm=True,
use_concate=True,
use_residual=False,
dropout_rate=0.5,
zoneout_rate=0.1,
reduction_factor=1,
threshold=0.5,
maxlenratio=5.0,
minlenratio=0.0,
use_cbhg=False,
spc_dim=None,
cbhg_conv_bank_layers=4,
cbhg_conv_bank_chans=32,
cbhg_conv_proj_filts=3,
cbhg_conv_proj_chans=32,
cbhg_highway_layers=4,
cbhg_highway_units=32,
cbhg_gru_units=32,
use_masking=True,
use_weighted_masking=False,
bce_pos_weight=1.0,
use_guided_attn_loss=False,
guided_attn_loss_sigma=0.4,
guided_attn_loss_lambda=1.0,
)
defaults.update(kwargs)
return defaults
def make_inference_args(**kwargs):
defaults = dict(
threshold=0.5,
maxlenratio=5.0,
minlenratio=0.0,
use_att_constraint=False,
backward_window=1,
forward_window=3,
)
defaults.update(kwargs)
return defaults
def prepare_inputs(
bs,
idim,
odim,
maxin_len,
maxout_len,
spk_embed_dim=None,
spc_dim=None,
device=torch.device("cpu"),
):
ilens = np.sort(np.random.randint(1, maxin_len, bs))[::-1].tolist()
olens = np.sort(np.random.randint(3, maxout_len, bs))[::-1].tolist()
xs = [np.random.randint(0, idim, lg) for lg in ilens]
ys = [np.random.randn(lg, odim) for lg in olens]
ilens = torch.LongTensor(ilens).to(device)
olens = torch.LongTensor(olens).to(device)
xs = pad_list([torch.from_numpy(x).long() for x in xs], 0).to(device)
ys = pad_list([torch.from_numpy(y).float() for y in ys], 0).to(device)
labels = ys.new_zeros(ys.size(0), ys.size(1))
for i, lg in enumerate(olens):
labels[i, lg - 1 :] = 1
batch = {
"xs": xs,
"ilens": ilens,
"ys": ys,
"labels": labels,
"olens": olens,
}
if spk_embed_dim is not None:
spembs = torch.from_numpy(np.random.randn(bs, spk_embed_dim)).float().to(device)
batch["spembs"] = spembs
if spc_dim is not None:
spcs = [np.random.randn(lg, spc_dim) for lg in olens]
spcs = pad_list([torch.from_numpy(spc).float() for spc in spcs], 0).to(device)
batch["extras"] = spcs
return batch
@pytest.mark.parametrize(
"model_dict, inference_dict",
[
({}, {}),
({"use_masking": False}, {}),
({"bce_pos_weight": 10.0}, {}),
({"atype": "forward"}, {}),
({"atype": "forward_ta"}, {}),
({"prenet_layers": 0}, {}),
({"postnet_layers": 0}, {}),
({"prenet_layers": 0, "postnet_layers": 0}, {}),
({"output_activation": "relu"}, {}),
({"cumulate_att_w": False}, {}),
({"use_batch_norm": False}, {}),
({"use_concate": False}, {}),
({"use_residual": True}, {}),
({"dropout_rate": 0.0}, {}),
({"zoneout_rate": 0.0}, {}),
({"reduction_factor": 2}, {}),
({"reduction_factor": 3}, {}),
({"use_speaker_embedding": True}, {}),
({"use_masking": False}, {}),
({"use_masking": False, "use_weighted_masking": True}, {}),
({"use_cbhg": True}, {}),
({"reduction_factor": 3, "use_cbhg": True}, {}),
({"use_guided_attn_loss": True}, {}),
({"reduction_factor": 3, "use_guided_attn_loss": True}, {}),
({}, {"use_att_constraint": True}),
({"atype": "forward"}, {"use_att_constraint": True}),
({"atype": "forward_ta"}, {"use_att_constraint": True}),
],
)
def test_tacotron2_trainable_and_decodable(model_dict, inference_dict):
# make args
model_args = make_taco2_args(**model_dict)
inference_args = make_inference_args(**inference_dict)
# setup batch
bs = 2
maxin_len = 10
maxout_len = 10
idim = 5
odim = 10
if model_args["use_cbhg"]:
model_args["spc_dim"] = 129
if model_args["use_speaker_embedding"]:
model_args["spk_embed_dim"] = 128
batch = prepare_inputs(
bs,
idim,
odim,
maxin_len,
maxout_len,
model_args["spk_embed_dim"],
model_args["spc_dim"],
)
# define model
model = Tacotron2(idim, odim, Namespace(**model_args))
optimizer = torch.optim.Adam(model.parameters())
# trainable
loss = model(**batch).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# decodable
model.eval()
with torch.no_grad():
spemb = None if model_args["spk_embed_dim"] is None else batch["spembs"][0]
model.inference(
batch["xs"][0][: batch["ilens"][0]], Namespace(**inference_args), spemb
)
model.calculate_all_attentions(**batch)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="gpu required")
@pytest.mark.parametrize(
"model_dict, inference_dict",
[
({}, {}),
({"atype": "forward"}, {}),
({"atype": "forward_ta"}, {}),
({"use_speaker_embedding": True, "spk_embed_dim": 128}, {}),
({"use_cbhg": True, "spc_dim": 128}, {}),
({"reduction_factor": 3}, {}),
({"use_guided_attn_loss": True}, {}),
({"use_masking": False}, {}),
({"use_masking": False, "use_weighted_masking": True}, {}),
({}, {"use_att_constraint": True}),
({"atype": "forward"}, {"use_att_constraint": True}),
({"atype": "forward_ta"}, {"use_att_constraint": True}),
],
)
def test_tacotron2_gpu_trainable_and_decodable(model_dict, inference_dict):
bs = 2
maxin_len = 10
maxout_len = 10
idim = 5
odim = 10
device = torch.device("cuda")
model_args = make_taco2_args(**model_dict)
inference_args = make_inference_args(**inference_dict)
batch = prepare_inputs(
bs,
idim,
odim,
maxin_len,
maxout_len,
model_args["spk_embed_dim"],
model_args["spc_dim"],
device=device,
)
# define model
model = Tacotron2(idim, odim, Namespace(**model_args))
optimizer = torch.optim.Adam(model.parameters())
model.to(device)
# trainable
loss = model(**batch).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# decodable
model.eval()
with torch.no_grad():
spemb = None if model_args["spk_embed_dim"] is None else batch["spembs"][0]
model.inference(
batch["xs"][0][: batch["ilens"][0]], Namespace(**inference_args), spemb
)
model.calculate_all_attentions(**batch)
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="multi gpu required")
@pytest.mark.parametrize(
"model_dict",
[
({}),
({"atype": "forward"}),
({"atype": "forward_ta"}),
({"use_speaker_embedding": True, "spk_embed_dim": 128}),
({"use_cbhg": True, "spc_dim": 128}),
({"reduction_factor": 3}),
({"use_guided_attn_loss": True}),
({"use_masking": False}),
({"use_masking": False, "use_weighted_masking": True}),
],
)
def test_tacotron2_multi_gpu_trainable(model_dict):
ngpu = 2
device_ids = list(range(ngpu))
device = torch.device("cuda")
bs = 10
maxin_len = 10
maxout_len = 10
idim = 5
odim = 10
model_args = make_taco2_args(**model_dict)
batch = prepare_inputs(
bs,
idim,
odim,
maxin_len,
maxout_len,
model_args["spk_embed_dim"],
model_args["spc_dim"],
device=device,
)
# define model
model = Tacotron2(idim, odim, Namespace(**model_args))
model = torch.nn.DataParallel(model, device_ids)
optimizer = torch.optim.Adam(model.parameters())
model.to(device)
# trainable
loss = model(**batch).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
| 8,798 | 27.661238 | 88 | py |
espnet | espnet-master/test/test_train_dtype.py | import pytest
import torch
from espnet.nets.asr_interface import dynamic_import_asr
@pytest.mark.parametrize(
"dtype, device, model, conf",
[
(dtype, device, nn, conf)
for nn, conf in [
(
"transformer",
dict(adim=4, eunits=3, dunits=3, elayers=2, dlayers=2, mtlalpha=0.0),
),
(
"transformer",
dict(
adim=4,
eunits=3,
dunits=3,
elayers=2,
dlayers=2,
mtlalpha=0.5,
ctc_type="builtin",
),
),
(
"transformer",
dict(
adim=4,
eunits=3,
dunits=3,
elayers=2,
dlayers=2,
mtlalpha=0.5,
ctc_type="builtin",
),
),
(
"rnn",
dict(adim=4, eunits=3, dunits=3, elayers=2, dlayers=2, mtlalpha=0.0),
),
(
"rnn",
dict(
adim=4,
eunits=3,
dunits=3,
elayers=2,
dlayers=2,
mtlalpha=0.5,
ctc_type="builtin",
),
),
(
"rnn",
dict(
adim=4,
eunits=3,
dunits=3,
elayers=2,
dlayers=2,
mtlalpha=0.5,
ctc_type="builtin",
),
),
]
for dtype in ("float16", "float32", "float64")
for device in ("cpu", "cuda")
],
)
def test_train_pytorch_dtype(dtype, device, model, conf):
if device == "cuda" and not torch.cuda.is_available():
pytest.skip("no cuda device is available")
if device == "cpu" and dtype == "float16":
pytest.skip("cpu float16 implementation is not available in pytorch yet")
idim = 10
odim = 10
model = dynamic_import_asr(model, "pytorch").build(idim, odim, **conf)
dtype = getattr(torch, dtype)
device = torch.device(device)
model.to(dtype=dtype, device=device)
x = torch.rand(2, 10, idim, dtype=dtype, device=device)
ilens = torch.tensor([10, 7], device=device)
y = torch.randint(1, odim, (2, 3), device=device)
opt = torch.optim.Adam(model.parameters())
loss = model(x, ilens, y)
assert loss.dtype == dtype
model.zero_grad()
loss.backward()
assert any(p.grad is not None for p in model.parameters())
opt.step()
| 2,795 | 28.125 | 85 | py |
espnet | espnet-master/test/test_transformer_decode.py | import numpy
import pytest
import torch
from espnet.nets.pytorch_backend.transformer.decoder import Decoder
from espnet.nets.pytorch_backend.transformer.encoder import Encoder
from espnet.nets.pytorch_backend.transformer.mask import subsequent_mask
RTOL = 1e-4
@pytest.mark.parametrize("normalize_before", [True, False])
def test_decoder_cache(normalize_before):
adim = 4
odim = 5
decoder = Decoder(
odim=odim,
attention_dim=adim,
linear_units=3,
num_blocks=2,
normalize_before=normalize_before,
dropout_rate=0.0,
)
dlayer = decoder.decoders[0]
memory = torch.randn(2, 5, adim)
x = torch.randn(2, 5, adim) * 100
mask = subsequent_mask(x.shape[1]).unsqueeze(0)
prev_mask = mask[:, :-1, :-1]
decoder.eval()
with torch.no_grad():
# layer-level test
y = dlayer(x, mask, memory, None)[0]
cache = dlayer(x[:, :-1], prev_mask, memory, None)[0]
y_fast = dlayer(x, mask, memory, None, cache=cache)[0]
numpy.testing.assert_allclose(y.numpy(), y_fast.numpy(), rtol=RTOL)
# decoder-level test
x = torch.randint(0, odim, x.shape[:2])
y, _ = decoder.forward_one_step(x, mask, memory)
y_, cache = decoder.forward_one_step(
x[:, :-1], prev_mask, memory, cache=decoder.init_state(None)
)
y_fast, _ = decoder.forward_one_step(x, mask, memory, cache=cache)
numpy.testing.assert_allclose(y.numpy(), y_fast.numpy(), rtol=RTOL)
@pytest.mark.parametrize("normalize_before", [True, False])
def test_encoder_cache(normalize_before):
adim = 4
idim = 5
encoder = Encoder(
idim=idim,
attention_dim=adim,
linear_units=3,
num_blocks=2,
normalize_before=normalize_before,
dropout_rate=0.0,
input_layer="embed",
)
elayer = encoder.encoders[0]
x = torch.randn(2, 5, adim)
mask = subsequent_mask(x.shape[1]).unsqueeze(0)
prev_mask = mask[:, :-1, :-1]
encoder.eval()
with torch.no_grad():
# layer-level test
y = elayer(x, mask, None)[0]
cache = elayer(x[:, :-1], prev_mask, None)[0]
y_fast = elayer(x, mask, cache=cache)[0]
numpy.testing.assert_allclose(y.numpy(), y_fast.numpy(), rtol=RTOL)
# encoder-level test
x = torch.randint(0, idim, x.shape[:2])
y = encoder.forward_one_step(x, mask)[0]
y_, _, cache = encoder.forward_one_step(x[:, :-1], prev_mask)
y_fast, _, _ = encoder.forward_one_step(x, mask, cache=cache)
numpy.testing.assert_allclose(y.numpy(), y_fast.numpy(), rtol=RTOL)
if __name__ == "__main__":
# benchmark with synth dataset
from time import time
import matplotlib.pyplot as plt
adim = 4
odim = 5
model = "decoder"
if model == "decoder":
decoder = Decoder(
odim=odim,
attention_dim=adim,
linear_units=3,
num_blocks=2,
dropout_rate=0.0,
)
decoder.eval()
else:
encoder = Encoder(
idim=odim,
attention_dim=adim,
linear_units=3,
num_blocks=2,
dropout_rate=0.0,
input_layer="embed",
)
encoder.eval()
xlen = 100
xs = torch.randint(0, odim, (1, xlen))
memory = torch.randn(2, 500, adim)
mask = subsequent_mask(xlen).unsqueeze(0)
result = {"cached": [], "baseline": []}
n_avg = 10
for key, value in result.items():
cache = None
print(key)
for i in range(xlen):
x = xs[:, : i + 1]
m = mask[:, : i + 1, : i + 1]
start = time()
for _ in range(n_avg):
with torch.no_grad():
if key == "baseline":
cache = None
if model == "decoder":
y, new_cache = decoder.forward_one_step(
x, m, memory, cache=cache
)
else:
y, _, new_cache = encoder.forward_one_step(x, m, cache=cache)
if key == "cached":
cache = new_cache
dur = (time() - start) / n_avg
value.append(dur)
plt.plot(range(xlen), value, label=key)
plt.xlabel("hypothesis length")
plt.ylabel("average time [sec]")
plt.grid()
plt.legend()
plt.savefig(f"benchmark_{model}.png")
| 4,486 | 30.159722 | 85 | py |
espnet | espnet-master/test/test_recog.py | # coding: utf-8
# Copyright 2018 Hiroshi Seki
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import numpy
import pytest
import torch
import espnet.lm.pytorch_backend.extlm as extlm_pytorch
import espnet.nets.pytorch_backend.lm.default as lm_pytorch
from espnet.nets.pytorch_backend import e2e_asr
def make_arg(**kwargs):
defaults = dict(
elayers=4,
subsample="1_2_2_1_1",
etype="blstmp",
eunits=100,
eprojs=100,
dtype="lstm",
dlayers=1,
dunits=300,
atype="location",
aconv_chans=10,
aconv_filts=100,
mtlalpha=0.5,
lsm_type="",
lsm_weight=0.0,
sampling_probability=0.0,
adim=320,
dropout_rate=0.0,
dropout_rate_decoder=0.0,
nbest=5,
beam_size=3,
penalty=0.5,
maxlenratio=1.0,
minlenratio=0.0,
ctc_weight=0.2,
ctc_window_margin=0,
verbose=2,
char_list=["a", "i", "u", "e", "o"],
word_list=["<blank>", "<unk>", "ai", "iu", "ue", "eo", "oa", "<eos>"],
outdir=None,
ctc_type="builtin",
report_cer=False,
report_wer=False,
sym_space="<space>",
sym_blank="<blank>",
context_residual=False,
use_frontend=False,
replace_sos=False,
tgt_lang=False,
)
defaults.update(kwargs)
return argparse.Namespace(**defaults)
def init_torch_weight_const(m, val):
for p in m.parameters():
p.data.fill_(val)
def init_torch_weight_random(m, rand_range):
for name, p in m.named_parameters():
p.data.uniform_(rand_range[0], rand_range[1])
# set small bias for <blank> output
if "wordlm.lo.bias" in name or "dec.output.bias" in name:
p.data[0] = -10.0
def init_chainer_weight_const(m, val):
for p in m.params():
p.data[:] = val
def make_small_arg(**kwargs):
return make_arg(
elayers=1,
subsample="1_1",
etype="lstm",
eunits=2,
eprojs=2,
dtype="lstm",
dlayers=1,
dunits=2,
atype="dot",
adim=2,
rnnlm="dummy",
lm_weight=0.3,
**kwargs
)
# ctc_weight: 0.0 (attention), 0.5 (hybrid CTC/attention), 1.0 (CTC)
@pytest.mark.parametrize("ctc_weight", [0.0, 0.5, 1.0])
def test_batch_beam_search(ctc_weight):
numpy.random.seed(1)
idim = 10
args = make_small_arg(ctc_weight=ctc_weight)
model = e2e_asr.E2E(idim, 5, args)
torch.manual_seed(1)
rnnlm = lm_pytorch.ClassifierWithState(lm_pytorch.RNNLM(len(args.char_list), 2, 2))
init_torch_weight_random(model, (-0.1, 0.1))
init_torch_weight_random(rnnlm, (-0.1, 0.1))
model.eval()
rnnlm.eval()
data = [("aaa", dict(feat=numpy.random.randn(10, idim).astype(numpy.float32)))]
in_data = data[0][1]["feat"]
s_nbest_hyps = model.recognize(in_data, args, args.char_list)
b_nbest_hyps = model.recognize_batch([in_data], args, args.char_list)
assert s_nbest_hyps[0]["yseq"] == b_nbest_hyps[0][0]["yseq"]
s_nbest_hyps = model.recognize(in_data, args, args.char_list, rnnlm)
b_nbest_hyps = model.recognize_batch([in_data], args, args.char_list, rnnlm)
assert s_nbest_hyps[0]["yseq"] == b_nbest_hyps[0][0]["yseq"]
if ctc_weight > 0.0:
args.ctc_window_margin = 10
s_nbest_hyps = model.recognize(in_data, args, args.char_list, rnnlm)
b_nbest_hyps = model.recognize_batch([in_data], args, args.char_list, rnnlm)
assert s_nbest_hyps[0]["yseq"] == b_nbest_hyps[0][0]["yseq"]
# Test word LM in batch decoding
rand_range = (-0.01, 0.01)
torch.manual_seed(1)
char_list = ["<blank>", "<space>"] + args.char_list + ["<eos>"]
args = make_small_arg(
ctc_weight=ctc_weight,
ctc_window_margin=10,
beam_size=5,
)
model = e2e_asr.E2E(idim, len(char_list), args)
char_dict = {x: i for i, x in enumerate(char_list)}
word_dict = {x: i for i, x in enumerate(args.word_list)}
word_rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(len(args.word_list), 2, 2)
)
rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.LookAheadWordLM(word_rnnlm.predictor, word_dict, char_dict)
)
init_torch_weight_random(model, rand_range)
init_torch_weight_random(rnnlm, rand_range)
model.eval()
rnnlm.eval()
s_nbest_hyps = model.recognize(in_data, args, char_list, rnnlm)
b_nbest_hyps = model.recognize_batch([in_data], args, char_list, rnnlm)
assert s_nbest_hyps[0]["yseq"] == b_nbest_hyps[0][0]["yseq"]
| 4,644 | 28.967742 | 87 | py |
espnet | espnet-master/test/test_scheduler.py | import chainer
import numpy
import pytest
import torch
from espnet.scheduler import scheduler
from espnet.scheduler.chainer import ChainerScheduler
from espnet.scheduler.pytorch import PyTorchScheduler
@pytest.mark.parametrize("name", scheduler.SCHEDULER_DICT.keys())
def test_scheduler(name):
s = scheduler.dynamic_import_scheduler(name).build("lr")
assert s.key == "lr"
assert isinstance(s.scale(0), float)
assert isinstance(s.scale(1000), float)
def test_pytorch_scheduler():
warmup = 30000
s = scheduler.NoamScheduler.build("lr", warmup=warmup)
net = torch.nn.Linear(2, 1)
o = torch.optim.SGD(net.parameters(), lr=1.0)
so = PyTorchScheduler([s], o)
so.step(0)
for g in o.param_groups:
assert g["lr"] == s.scale(0)
so.step(warmup)
for g in o.param_groups:
numpy.testing.assert_allclose(g["lr"], 1.0, rtol=1e-4)
def test_chainer_scheduler():
warmup = 30000
s = scheduler.NoamScheduler.build("lr", warmup=warmup)
net = chainer.links.Linear(2, 1)
o = chainer.optimizers.SGD(lr=1.0)
o.setup(net)
so = ChainerScheduler([s], o)
so.step(0)
assert o.lr == s.scale(0)
so.step(warmup)
numpy.testing.assert_allclose(o.lr, 1.0, rtol=1e-4)
| 1,247 | 26.130435 | 65 | py |
espnet | espnet-master/test/test_e2e_st_conformer.py | # coding: utf-8
# Copyright 2019 Hirofumi Inaguma
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import pytest
import torch
from espnet.nets.pytorch_backend.e2e_st_conformer import E2E
from espnet.nets.pytorch_backend.transformer import plot
def make_arg(**kwargs):
defaults = dict(
adim=2,
aheads=1,
dropout_rate=0.0,
transformer_attn_dropout_rate=None,
elayers=1,
eunits=2,
dlayers=1,
dunits=2,
sym_space="<space>",
sym_blank="<blank>",
transformer_decoder_selfattn_layer_type="selfattn",
transformer_encoder_pos_enc_layer_type="rel_pos",
transformer_encoder_selfattn_layer_type="rel_selfattn",
macaron_style=True,
use_cnn_module=True,
cnn_module_kernel=3,
transformer_init="pytorch",
transformer_input_layer="conv2d",
transformer_length_normalized_loss=True,
report_bleu=False,
report_cer=False,
report_wer=False,
mtlalpha=0.0, # for CTC-ASR
lsm_weight=0.001,
char_list=["<blank>", "a", "e", "i", "o", "u"],
ctc_type="builtin",
asr_weight=0.0,
mt_weight=0.0,
)
defaults.update(kwargs)
return argparse.Namespace(**defaults)
def prepare(args):
idim = 10
odim = 5
model = E2E(idim, odim, args)
batchsize = 2
ilens = [10, 9]
olens = [3, 4]
n_token = odim - 1
x = torch.randn(batchsize, max(ilens), idim)
y_src = (torch.rand(batchsize, max(olens)) * n_token % n_token).long()
y_tgt = (torch.rand(batchsize, max(olens)) * n_token % n_token).long()
for i in range(batchsize):
x[i, ilens[i] :] = -1
y_tgt[i, olens[i] :] = model.ignore_id
y_src[i, olens[i] :] = model.ignore_id
data = {}
uttid_list = []
for i in range(batchsize):
data["utt%d" % i] = {
"input": [{"shape": [ilens[i], idim]}],
"output": [{"shape": [olens[i]]}],
}
uttid_list.append("utt%d" % i)
return model, x, torch.tensor(ilens), y_tgt, y_src, data, uttid_list
conformer_mcnn_args = dict(
transformer_encoder_pos_enc_layer_type="rel_pos",
transformer_encoder_selfattn_layer_type="rel_selfattn",
macaron_style=True,
use_cnn_module=False,
)
conformer_mcnn_mmacaron_args = dict(
transformer_encoder_pos_enc_layer_type="rel_pos",
transformer_encoder_selfattn_layer_type="rel_selfattn",
macaron_style=False,
use_cnn_module=False,
)
conformer_mcnn_mmacaron_mrelattn_args = dict(
transformer_encoder_pos_enc_layer_type="abs_pos",
transformer_encoder_selfattn_layer_type="selfattn",
macaron_style=False,
use_cnn_module=False,
)
def _savefn(*args, **kwargs):
return
@pytest.mark.parametrize(
"model_dict",
[
{},
conformer_mcnn_args,
conformer_mcnn_mmacaron_args,
conformer_mcnn_mmacaron_mrelattn_args,
],
)
def test_transformer_trainable_and_decodable(model_dict):
args = make_arg(**model_dict)
model, x, ilens, y_tgt, y_src, data, uttid_list = prepare(args)
# test beam search
trans_args = argparse.Namespace(
beam_size=1,
penalty=0.0,
ctc_weight=0.0,
maxlenratio=1.0,
lm_weight=0,
minlenratio=0,
nbest=1,
tgt_lang=False,
)
# test trainable
optim = torch.optim.Adam(model.parameters(), 0.01)
loss = model(x, ilens, y_tgt, y_src)
optim.zero_grad()
loss.backward()
optim.step()
# test attention plot
attn_dict = model.calculate_all_attentions(
x[0:1], ilens[0:1], y_tgt[0:1], y_src[0:1]
)
plot.plot_multi_head_attention(data, uttid_list, attn_dict, "", savefn=_savefn)
# test CTC plot
ctc_probs = model.calculate_all_ctc_probs(
x[0:1], ilens[0:1], y_tgt[0:1], y_src[0:1]
)
if args.asr_weight > 0 and args.mtlalpha > 0:
print(ctc_probs.shape)
else:
assert ctc_probs is None
# test decodable
with torch.no_grad():
nbest = model.translate(x[0, : ilens[0]].numpy(), trans_args, args.char_list)
print(y_tgt[0])
print(nbest[0]["yseq"][1:-1])
| 4,216 | 26.383117 | 85 | py |
espnet | espnet-master/test/espnet2/svs/test_naive_rnn_dp.py | import pytest
import torch
from espnet2.svs.naive_rnn.naive_rnn_dp import NaiveRNNDP
@pytest.mark.parametrize("eprenet_conv_layers", [0, 1])
@pytest.mark.parametrize("midi_embed_integration_type", ["add", "cat"])
@pytest.mark.parametrize("postnet_layers", [0, 1])
@pytest.mark.parametrize("reduction_factor", [1, 3])
@pytest.mark.parametrize(
"spk_embed_dim, spk_embed_integration_type",
[(None, "add"), (2, "add"), (2, "concat")],
)
@pytest.mark.parametrize(
"spks, langs",
[(-1, -1), (5, 2)],
)
def test_NaiveRNNDP(
eprenet_conv_layers,
midi_embed_integration_type,
postnet_layers,
reduction_factor,
spk_embed_dim,
spk_embed_integration_type,
spks,
langs,
):
idim = 10
odim = 5
model = NaiveRNNDP(
idim=idim,
odim=odim,
midi_dim=129,
embed_dim=5,
duration_dim=10,
eprenet_conv_layers=eprenet_conv_layers,
eprenet_conv_chans=4,
eprenet_conv_filts=5,
elayers=2,
eunits=6,
ebidirectional=True,
midi_embed_integration_type=midi_embed_integration_type,
dlayers=2,
dunits=6,
postnet_layers=postnet_layers,
postnet_chans=4,
postnet_filts=5,
use_batch_norm=True,
duration_predictor_layers=2,
duration_predictor_chans=4,
duration_predictor_kernel_size=3,
duration_predictor_dropout_rate=0.1,
reduction_factor=reduction_factor,
spks=spks,
langs=langs,
spk_embed_dim=spk_embed_dim,
spk_embed_integration_type=spk_embed_integration_type,
eprenet_dropout_rate=0.2,
edropout_rate=0.1,
ddropout_rate=0.1,
postnet_dropout_rate=0.5,
init_type="pytorch",
use_masking=True,
use_weighted_masking=False,
)
inputs = dict(
text=torch.randint(0, idim, (2, 8)),
text_lengths=torch.tensor([8, 5], dtype=torch.long),
feats=torch.randn(2, 16, odim),
feats_lengths=torch.tensor([16, 13], dtype=torch.long),
label={
"lab": torch.randint(0, idim, (2, 8)),
"score": torch.randint(0, idim, (2, 8)),
},
label_lengths={
"lab": torch.tensor([8, 5], dtype=torch.long),
"score": torch.tensor([8, 5], dtype=torch.long),
},
melody={
"lab": torch.randint(0, 127, (2, 8)),
"score": torch.randint(0, 127, (2, 8)),
},
melody_lengths={
"lab": torch.tensor([8, 5], dtype=torch.long),
"score": torch.tensor([8, 5], dtype=torch.long),
},
duration={
"lab": torch.tensor(
[[1, 2, 2, 3, 1, 3, 2, 2], [2, 2, 1, 4, 1, 2, 1, 3]], dtype=torch.int64
),
"score_phn": torch.tensor(
[[1, 2, 2, 3, 1, 3, 2, 1], [2, 2, 1, 4, 1, 2, 1, 3]], dtype=torch.int64
),
"score_syb": torch.tensor(
[[3, 3, 5, 5, 4, 4, 3, 3], [4, 4, 5, 5, 3, 3, 4, 4]], dtype=torch.int64
),
},
duration_lengths={
"lab": torch.tensor([8, 5], dtype=torch.long),
"score_phn": torch.tensor([8, 5], dtype=torch.long),
"score_syb": torch.tensor([8, 5], dtype=torch.long),
},
slur=torch.randint(0, 2, (2, 8)),
slur_lengths=torch.tensor([8, 5], dtype=torch.long),
pitch=torch.randn(2, 16, 1),
pitch_lengths=torch.tensor([16, 13], dtype=torch.long),
)
if spk_embed_dim is not None:
inputs.update(spembs=torch.randn(2, spk_embed_dim))
if spks > 0:
inputs.update(sids=torch.randint(0, spks, (2, 1)))
if langs > 0:
inputs.update(lids=torch.randint(0, langs, (2, 1)))
loss, *_ = model(**inputs)
loss.backward()
with torch.no_grad():
model.eval()
inputs = dict(
text=torch.randint(
0,
idim,
(
1,
5,
),
),
label={
"lab": torch.randint(
0,
idim,
(
1,
5,
),
),
"score": torch.randint(
0,
idim,
(
1,
5,
),
),
},
melody={
"lab": torch.randint(
0,
127,
(
1,
5,
),
),
"score": torch.randint(
0,
127,
(
1,
5,
),
),
},
duration={
"lab": torch.tensor([[1, 2, 2, 3, 3]], dtype=torch.int64),
"score_phn": torch.tensor([[1, 2, 2, 3, 4]], dtype=torch.int64),
"score_syb": torch.tensor([[3, 3, 5, 5, 4]], dtype=torch.int64),
},
slur=torch.randint(0, 2, (1, 5)),
pitch=torch.randn(16, 1),
)
if spks > 0:
inputs["sids"] = torch.randint(0, spks, (1,))
if langs > 0:
inputs["lids"] = torch.randint(0, langs, (1,))
if spk_embed_dim is not None:
inputs.update(spembs=torch.randn(spk_embed_dim))
model.inference(**inputs)
test_NaiveRNNDP(0, "cat", 1, 3, None, "add", 5, 2)
| 5,665 | 29.627027 | 87 | py |
espnet | espnet-master/test/espnet2/svs/test_singing_tacotron.py | import pytest
import torch
from espnet2.svs.singing_tacotron.singing_tacotron import singing_tacotron
@pytest.mark.parametrize("prenet_layers", [0, 1])
@pytest.mark.parametrize("postnet_layers", [0, 1])
@pytest.mark.parametrize("reduction_factor", [1, 3])
@pytest.mark.parametrize("atype", ["location", "forward", "forward_ta", "GDCA"])
@pytest.mark.parametrize(
"spk_embed_dim, spk_embed_integration_type",
[(None, "add"), (2, "add"), (2, "concat")],
)
@pytest.mark.parametrize(
"spks, langs, use_gst",
[(-1, -1, False), (5, 2, True)],
)
@pytest.mark.parametrize("use_guided_attn_loss", [True, False])
def test_singing_tacotron(
prenet_layers,
postnet_layers,
reduction_factor,
atype,
spks,
langs,
spk_embed_dim,
spk_embed_integration_type,
use_gst,
use_guided_attn_loss,
):
idim = 10
odim = 5
model = singing_tacotron(
idim=idim,
odim=odim,
midi_dim=129,
embed_dim=5,
duration_dim=10,
elayers=1,
eunits=4,
econv_layers=1,
econv_filts=5,
econv_chans=4,
atype=atype,
adim=4,
dlayers=1,
dunits=4,
prenet_layers=prenet_layers,
prenet_units=4,
postnet_layers=postnet_layers,
postnet_chans=4,
postnet_filts=5,
reduction_factor=reduction_factor,
spks=spks,
langs=langs,
spk_embed_dim=spk_embed_dim,
spk_embed_integration_type=spk_embed_integration_type,
use_gst=use_gst,
gst_tokens=2,
gst_heads=4,
gst_conv_layers=2,
gst_conv_chans_list=[2, 4],
gst_conv_kernel_size=3,
gst_conv_stride=2,
gst_gru_layers=1,
gst_gru_units=4,
loss_type="L1",
use_guided_attn_loss=use_guided_attn_loss,
guided_attn_loss_sigma=0.4,
guided_attn_loss_lambda=1.0,
)
inputs = dict(
text=torch.randint(0, idim, (2, 8)),
text_lengths=torch.tensor([8, 5], dtype=torch.long),
feats=torch.randn(2, 16, odim),
feats_lengths=torch.tensor([16, 13], dtype=torch.long),
label={
"lab": torch.randint(0, idim, (2, 8)),
"score": torch.randint(0, idim, (2, 8)),
},
label_lengths={
"lab": torch.tensor([8, 5], dtype=torch.long),
"score": torch.tensor([8, 5], dtype=torch.long),
},
melody={
"lab": torch.randint(0, 127, (2, 8)),
"score": torch.randint(0, 127, (2, 8)),
},
melody_lengths={
"lab": torch.tensor([8, 5], dtype=torch.long),
"score": torch.tensor([8, 5], dtype=torch.long),
},
duration={
"lab": torch.tensor(
[[1, 2, 2, 3, 1, 3, 2, 2], [2, 2, 1, 4, 1, 2, 1, 3]], dtype=torch.int64
),
"score_phn": torch.tensor(
[[1, 2, 2, 3, 1, 3, 2, 1], [2, 2, 1, 4, 1, 2, 1, 3]], dtype=torch.int64
),
"score_syb": torch.tensor(
[[3, 3, 5, 5, 4, 4, 3, 3], [4, 4, 5, 5, 3, 3, 4, 4]], dtype=torch.int64
),
},
duration_lengths={
"lab": torch.tensor([8, 5], dtype=torch.long),
"score_phn": torch.tensor([8, 5], dtype=torch.long),
"score_syb": torch.tensor([8, 5], dtype=torch.long),
},
slur=torch.randint(0, 2, (2, 8)),
slur_lengths=torch.tensor([8, 5], dtype=torch.long),
pitch=torch.randn(2, 16, 1),
pitch_lengths=torch.tensor([16, 13], dtype=torch.long),
)
if spk_embed_dim is not None:
inputs.update(spembs=torch.randn(2, spk_embed_dim))
if spks > 0:
inputs.update(sids=torch.randint(0, spks, (2, 1)))
if langs > 0:
inputs.update(lids=torch.randint(0, langs, (2, 1)))
loss, *_ = model(**inputs)
loss.backward()
with torch.no_grad():
model.eval()
inputs = dict(
text=torch.randint(
0,
idim,
(
1,
5,
),
),
label={
"lab": torch.randint(
0,
idim,
(
1,
5,
),
),
"score": torch.randint(
0,
idim,
(
1,
5,
),
),
},
melody={
"lab": torch.randint(
0,
127,
(
1,
5,
),
),
"score": torch.randint(
0,
127,
(
1,
5,
),
),
},
duration={
"lab": torch.tensor([[1, 2, 2, 3, 3]], dtype=torch.int64),
"score_phn": torch.tensor([[1, 2, 2, 3, 4]], dtype=torch.int64),
"score_syb": torch.tensor([[3, 3, 5, 5, 4]], dtype=torch.int64),
},
slur=torch.randint(0, 2, (1, 5)),
pitch=torch.randn(16, 1),
)
if spks > 0:
inputs["sids"] = torch.randint(0, spks, (1,))
if langs > 0:
inputs["lids"] = torch.randint(0, langs, (1,))
if spk_embed_dim is not None:
inputs.update(spembs=torch.randn(spk_embed_dim))
if use_gst:
inputs.update(feats=torch.randn(5, 5))
if atype == "GDCA":
use_dynamic_filter = True
use_att_constraint = False
else:
use_dynamic_filter = False
use_att_constraint = True
model.inference(
**inputs,
maxlenratio=1.0,
use_att_constraint=use_att_constraint,
use_dynamic_filter=use_dynamic_filter
)
# teacher forcing
# check inference with teachder forcing
inputs = dict(
text=torch.randint(
0,
idim,
(
1,
5,
),
),
label={
"lab": torch.randint(
0,
idim,
(
1,
5,
),
),
"score": torch.randint(
0,
idim,
(
1,
5,
),
),
},
melody={
"lab": torch.randint(
0,
127,
(
1,
5,
),
),
"score": torch.randint(
0,
127,
(
1,
5,
),
),
},
duration={
"lab": torch.tensor([[1, 2, 2, 3, 3]], dtype=torch.int64),
"score_phn": torch.tensor([[1, 2, 2, 3, 4]], dtype=torch.int64),
"score_syb": torch.tensor([[3, 3, 5, 5, 4]], dtype=torch.int64),
},
slur=torch.randint(0, 2, (1, 5)),
pitch=torch.randn(16, 1),
feats=torch.randn(16, odim),
)
if spks > 0:
inputs["sids"] = torch.randint(0, spks, (1,))
if langs > 0:
inputs["lids"] = torch.randint(0, langs, (1,))
if spk_embed_dim is not None:
inputs.update(spembs=torch.randn(spk_embed_dim))
if use_gst:
inputs.update(feats=torch.randn(5, 5))
use_dynamic_filter = False
use_att_constraint = False
model.inference(
**inputs,
use_teacher_forcing=True,
use_att_constraint=use_att_constraint,
use_dynamic_filter=use_dynamic_filter
)
| 8,282 | 29.12 | 87 | py |
espnet | espnet-master/test/espnet2/gan_tts/jets/test_jets.py | # Copyright 2022 Dan Lim
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Test JETS related modules."""
import pytest
import torch
from espnet2.gan_tts.jets import JETS
def make_jets_generator_args(**kwargs):
defaults = dict(
generator_type="jets_generator",
generator_params={
"idim": 10,
"odim": 5,
"adim": 4,
"aheads": 2,
"elayers": 1,
"eunits": 4,
"dlayers": 1,
"dunits": 4,
"positionwise_layer_type": "conv1d",
"positionwise_conv_kernel_size": 1,
"use_scaled_pos_enc": True,
"use_batch_norm": True,
"encoder_normalize_before": True,
"decoder_normalize_before": True,
"encoder_concat_after": False,
"decoder_concat_after": False,
"reduction_factor": 1,
"encoder_type": "transformer",
"decoder_type": "transformer",
"transformer_enc_dropout_rate": 0.1,
"transformer_enc_positional_dropout_rate": 0.1,
"transformer_enc_attn_dropout_rate": 0.1,
"transformer_dec_dropout_rate": 0.1,
"transformer_dec_positional_dropout_rate": 0.1,
"transformer_dec_attn_dropout_rate": 0.1,
"conformer_rel_pos_type": "legacy",
"conformer_pos_enc_layer_type": "rel_pos",
"conformer_self_attn_layer_type": "rel_selfattn",
"conformer_activation_type": "swish",
"use_macaron_style_in_conformer": True,
"use_cnn_in_conformer": True,
"zero_triu": False,
"conformer_enc_kernel_size": 3,
"conformer_dec_kernel_size": 3,
"duration_predictor_layers": 2,
"duration_predictor_chans": 4,
"duration_predictor_kernel_size": 3,
"duration_predictor_dropout_rate": 0.1,
"energy_predictor_layers": 2,
"energy_predictor_chans": 4,
"energy_predictor_kernel_size": 3,
"energy_predictor_dropout": 0.5,
"energy_embed_kernel_size": 3,
"energy_embed_dropout": 0.5,
"stop_gradient_from_energy_predictor": False,
"pitch_predictor_layers": 2,
"pitch_predictor_chans": 4,
"pitch_predictor_kernel_size": 3,
"pitch_predictor_dropout": 0.5,
"pitch_embed_kernel_size": 3,
"pitch_embed_dropout": 0.5,
"stop_gradient_from_pitch_predictor": False,
"spks": None,
"langs": None,
"spk_embed_dim": None,
"spk_embed_integration_type": "add",
"use_gst": False,
"gst_tokens": 10,
"gst_heads": 4,
"gst_conv_layers": 2,
"gst_conv_chans_list": (3, 3, 6, 6, 12, 12),
"gst_conv_kernel_size": 3,
"gst_conv_stride": 2,
"gst_gru_layers": 1,
"gst_gru_units": 8,
"init_type": "xavier_uniform",
"init_enc_alpha": 1.0,
"init_dec_alpha": 1.0,
"use_masking": False,
"use_weighted_masking": False,
"segment_size": 4,
"generator_out_channels": 1,
"generator_channels": 16,
"generator_global_channels": -1,
"generator_kernel_size": 7,
"generator_upsample_scales": [16, 16],
"generator_upsample_kernel_sizes": [32, 32],
"generator_resblock_kernel_sizes": [3, 3],
"generator_resblock_dilations": [
[1, 3],
[1, 3],
],
"generator_use_additional_convs": True,
"generator_bias": True,
"generator_nonlinear_activation": "LeakyReLU",
"generator_nonlinear_activation_params": {"negative_slope": 0.1},
"generator_use_weight_norm": True,
},
)
defaults.update(kwargs)
return defaults
def make_jets_discriminator_args(**kwargs):
defaults = dict(
discriminator_type="hifigan_multi_scale_multi_period_discriminator",
discriminator_params={
"scales": 1,
"scale_downsample_pooling": "AvgPool1d",
"scale_downsample_pooling_params": {
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
"scale_discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 16,
"max_downsample_channels": 32,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
},
"follow_official_norm": False,
"periods": [2, 3],
"period_discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 4,
"downsample_scales": [3, 1],
"max_downsample_channels": 16,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
},
)
defaults.update(kwargs)
return defaults
def make_jets_loss_args(**kwargs):
defaults = dict(
lambda_adv=1.0,
lambda_mel=45.0,
lambda_feat_match=2.0,
lambda_var=1.0,
lambda_align=2.0,
generator_adv_loss_params={
"average_by_discriminators": False,
"loss_type": "mse",
},
discriminator_adv_loss_params={
"average_by_discriminators": False,
"loss_type": "mse",
},
feat_match_loss_params={
"average_by_discriminators": False,
"average_by_layers": False,
"include_final_outputs": True,
},
mel_loss_params={
"fs": 22050,
"n_fft": 1024,
"hop_length": 256,
"win_length": None,
"window": "hann",
"n_mels": 80,
"fmin": 0,
"fmax": None,
"log_base": None,
},
)
defaults.update(kwargs)
return defaults
# NOTE(kan-bayashi): first forward requires jit compile
# so a little bit more time is needed to run. Therefore,
# here we extend execution timeout from 2 sec to 8 sec.
@pytest.mark.execution_timeout(8)
@pytest.mark.skipif(
"1.6" in torch.__version__,
reason="group conv in pytorch 1.6 has an issue. "
"See https://github.com/pytorch/pytorch/issues/42446.",
)
@pytest.mark.parametrize(
"gen_dict, dis_dict, loss_dict",
[
({}, {}, {}),
({}, {}, {"cache_generator_outputs": True}),
(
{},
{
"discriminator_type": "hifigan_multi_scale_discriminator",
"discriminator_params": {
"scales": 2,
"downsample_pooling": "AvgPool1d",
"downsample_pooling_params": {
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
"discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 16,
"max_downsample_channels": 32,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 2, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
},
},
},
{},
),
(
{},
{
"discriminator_type": "hifigan_multi_period_discriminator",
"discriminator_params": {
"periods": [2, 3],
"discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 16,
"downsample_scales": [3, 3, 1],
"max_downsample_channels": 32,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
},
},
{},
),
(
{},
{},
{
"generator_adv_loss_params": {
"average_by_discriminators": True,
"loss_type": "mse",
},
"discriminator_adv_loss_params": {
"average_by_discriminators": True,
"loss_type": "mse",
},
},
),
(
{},
{},
{
"generator_adv_loss_params": {
"average_by_discriminators": False,
"loss_type": "hinge",
},
"discriminator_adv_loss_params": {
"average_by_discriminators": False,
"loss_type": "hinge",
},
},
),
],
)
def test_jets_is_trainable_and_decodable(gen_dict, dis_dict, loss_dict):
idim = 10
odim = 5
gen_args = make_jets_generator_args(**gen_dict)
dis_args = make_jets_discriminator_args(**dis_dict)
loss_args = make_jets_loss_args(**loss_dict)
model = JETS(
idim=idim,
odim=odim,
**gen_args,
**dis_args,
**loss_args,
)
model.train()
upsample_factor = model.generator.upsample_factor
inputs = dict(
text=torch.randint(0, idim, (2, 8)),
text_lengths=torch.tensor([8, 5], dtype=torch.long),
feats=torch.randn(2, 16, odim),
feats_lengths=torch.tensor([16, 13], dtype=torch.long),
speech=torch.randn(2, 16 * upsample_factor),
speech_lengths=torch.tensor([16, 13] * upsample_factor, dtype=torch.long),
pitch=torch.randn(2, 16, 1),
pitch_lengths=torch.tensor([16, 13], dtype=torch.long),
energy=torch.randn(2, 16, 1),
energy_lengths=torch.tensor([16, 13], dtype=torch.long),
)
gen_loss = model(forward_generator=True, **inputs)["loss"]
gen_loss.backward()
dis_loss = model(forward_generator=False, **inputs)["loss"]
dis_loss.backward()
with torch.no_grad():
model.eval()
# check inference
inputs = dict(
text=torch.randint(
0,
idim,
(5,),
)
)
model.inference(**inputs)
# check inference with teachder forcing
inputs = dict(
text=torch.randint(
0,
idim,
(5,),
),
feats=torch.randn(16, odim),
pitch=torch.randn(16, 1),
energy=torch.randn(16, 1),
)
output_dict = model.inference(**inputs, use_teacher_forcing=True)
assert output_dict["wav"].size(0) == inputs["feats"].size(0) * upsample_factor
@pytest.mark.skipif(
"1.6" in torch.__version__,
reason="Group conv in pytorch 1.6 has an issue. "
"See https://github.com/pytorch/pytorch/issues/42446.",
)
@pytest.mark.parametrize(
"gen_dict, dis_dict, loss_dict,",
[
({}, {}, {}),
({}, {}, {"cache_generator_outputs": True}),
(
{},
{
"discriminator_type": "hifigan_multi_scale_discriminator",
"discriminator_params": {
"scales": 2,
"downsample_pooling": "AvgPool1d",
"downsample_pooling_params": {
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
"discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 16,
"max_downsample_channels": 32,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 2, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
},
},
},
{},
),
(
{},
{
"discriminator_type": "hifigan_multi_period_discriminator",
"discriminator_params": {
"periods": [2, 3],
"discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 16,
"downsample_scales": [3, 3, 1],
"max_downsample_channels": 32,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
},
},
{},
),
(
{},
{},
{
"generator_adv_loss_params": {
"average_by_discriminators": True,
"loss_type": "mse",
},
"discriminator_adv_loss_params": {
"average_by_discriminators": True,
"loss_type": "mse",
},
},
),
(
{},
{},
{
"generator_adv_loss_params": {
"average_by_discriminators": False,
"loss_type": "hinge",
},
"discriminator_adv_loss_params": {
"average_by_discriminators": False,
"loss_type": "hinge",
},
},
),
],
)
@pytest.mark.parametrize(
"spks, spk_embed_dim, langs", [(10, -1, -1), (-1, 5, -1), (-1, -1, 3), (4, 5, 3)]
)
def test_multi_speaker_jets_is_trainable_and_decodable(
gen_dict, dis_dict, loss_dict, spks, spk_embed_dim, langs
):
idim = 10
odim = 5
gen_args = make_jets_generator_args(**gen_dict)
gen_args["generator_params"]["spks"] = spks
gen_args["generator_params"]["langs"] = langs
gen_args["generator_params"]["spk_embed_dim"] = spk_embed_dim
dis_args = make_jets_discriminator_args(**dis_dict)
loss_args = make_jets_loss_args(**loss_dict)
model = JETS(
idim=idim,
odim=odim,
**gen_args,
**dis_args,
**loss_args,
)
model.train()
upsample_factor = model.generator.upsample_factor
inputs = dict(
text=torch.randint(0, idim, (2, 8)),
text_lengths=torch.tensor([8, 5], dtype=torch.long),
feats=torch.randn(2, 16, odim),
feats_lengths=torch.tensor([16, 13], dtype=torch.long),
speech=torch.randn(2, 16 * upsample_factor),
speech_lengths=torch.tensor([16, 13] * upsample_factor, dtype=torch.long),
pitch=torch.randn(2, 16, 1),
pitch_lengths=torch.tensor([16, 13], dtype=torch.long),
energy=torch.randn(2, 16, 1),
energy_lengths=torch.tensor([16, 13], dtype=torch.long),
)
if spks > 0:
inputs["sids"] = torch.randint(0, spks, (2, 1))
if langs > 0:
inputs["lids"] = torch.randint(0, langs, (2, 1))
if spk_embed_dim > 0:
inputs["spembs"] = torch.randn(2, spk_embed_dim)
gen_loss = model(forward_generator=True, **inputs)["loss"]
gen_loss.backward()
dis_loss = model(forward_generator=False, **inputs)["loss"]
dis_loss.backward()
with torch.no_grad():
model.eval()
# check inference
inputs = dict(
text=torch.randint(
0,
idim,
(5,),
),
)
if spks > 0:
inputs["sids"] = torch.randint(0, spks, (1,))
if langs > 0:
inputs["lids"] = torch.randint(0, langs, (1,))
if spk_embed_dim > 0:
inputs["spembs"] = torch.randn(spk_embed_dim)
model.inference(**inputs)
# check inference with teacher forcing
inputs = dict(
text=torch.randint(
0,
idim,
(5,),
),
feats=torch.randn(16, odim),
pitch=torch.randn(16, 1),
energy=torch.randn(16, 1),
)
if spks > 0:
inputs["sids"] = torch.randint(0, spks, (1,))
if langs > 0:
inputs["lids"] = torch.randint(0, langs, (1,))
if spk_embed_dim > 0:
inputs["spembs"] = torch.randn(spk_embed_dim)
output_dict = model.inference(**inputs, use_teacher_forcing=True)
assert output_dict["wav"].size(0) == inputs["feats"].size(0) * upsample_factor
@pytest.mark.skipif(
not torch.cuda.is_available(),
reason="GPU is needed.",
)
@pytest.mark.skipif(
"1.6" in torch.__version__,
reason="group conv in pytorch 1.6 has an issue. "
"See https://github.com/pytorch/pytorch/issues/42446.",
)
@pytest.mark.parametrize(
"gen_dict, dis_dict, loss_dict",
[
({}, {}, {}),
({}, {}, {"cache_generator_outputs": True}),
(
{},
{
"discriminator_type": "hifigan_multi_scale_discriminator",
"discriminator_params": {
"scales": 2,
"downsample_pooling": "AvgPool1d",
"downsample_pooling_params": {
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
"discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 16,
"max_downsample_channels": 32,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 2, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
},
},
},
{},
),
(
{},
{
"discriminator_type": "hifigan_multi_period_discriminator",
"discriminator_params": {
"periods": [2, 3],
"discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 16,
"downsample_scales": [3, 3, 1],
"max_downsample_channels": 32,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
},
},
{},
),
(
{},
{
"discriminator_type": "hifigan_period_discriminator",
"discriminator_params": {
"period": 2,
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 16,
"downsample_scales": [3, 3, 1],
"max_downsample_channels": 32,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
},
{},
),
(
{},
{
"discriminator_type": "hifigan_scale_discriminator",
"discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 16,
"max_downsample_channels": 32,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 2, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
},
},
{},
),
(
{},
{},
{
"generator_adv_loss_params": {
"average_by_discriminators": True,
"loss_type": "mse",
},
"discriminator_adv_loss_params": {
"average_by_discriminators": True,
"loss_type": "mse",
},
},
),
(
{},
{},
{
"generator_adv_loss_params": {
"average_by_discriminators": False,
"loss_type": "hinge",
},
"discriminator_adv_loss_params": {
"average_by_discriminators": False,
"loss_type": "hinge",
},
},
),
],
)
def test_jets_is_trainable_and_decodable_on_gpu(gen_dict, dis_dict, loss_dict):
idim = 10
odim = 5
gen_args = make_jets_generator_args(**gen_dict)
dis_args = make_jets_discriminator_args(**dis_dict)
loss_args = make_jets_loss_args(**loss_dict)
model = JETS(
idim=idim,
odim=odim,
**gen_args,
**dis_args,
**loss_args,
)
model.train()
upsample_factor = model.generator.upsample_factor
inputs = dict(
text=torch.randint(0, idim, (2, 8)),
text_lengths=torch.tensor([8, 5], dtype=torch.long),
feats=torch.randn(2, 16, odim),
feats_lengths=torch.tensor([16, 13], dtype=torch.long),
speech=torch.randn(2, 16 * upsample_factor),
speech_lengths=torch.tensor([16, 13] * upsample_factor, dtype=torch.long),
pitch=torch.randn(2, 16, 1),
pitch_lengths=torch.tensor([16, 13], dtype=torch.long),
energy=torch.randn(2, 16, 1),
energy_lengths=torch.tensor([16, 13], dtype=torch.long),
)
device = torch.device("cuda")
model.to(device)
inputs = {k: v.to(device) for k, v in inputs.items()}
gen_loss = model(forward_generator=True, **inputs)["loss"]
gen_loss.backward()
dis_loss = model(forward_generator=False, **inputs)["loss"]
dis_loss.backward()
with torch.no_grad():
model.eval()
# check inference
inputs = dict(
text=torch.randint(
0,
idim,
(5,),
)
)
inputs = {k: v.to(device) for k, v in inputs.items()}
model.inference(**inputs)
# check inference with teacher forcing
inputs = dict(
text=torch.randint(
0,
idim,
(5,),
),
feats=torch.randn(16, odim),
pitch=torch.randn(16, 1),
energy=torch.randn(16, 1),
)
inputs = {k: v.to(device) for k, v in inputs.items()}
output_dict = model.inference(**inputs, use_teacher_forcing=True)
assert output_dict["wav"].size(0) == inputs["feats"].size(0) * upsample_factor
@pytest.mark.skipif(
not torch.cuda.is_available(),
reason="GPU is needed.",
)
@pytest.mark.skipif(
"1.6" in torch.__version__,
reason="Group conv in pytorch 1.6 has an issue. "
"See https://github.com/pytorch/pytorch/issues/42446.",
)
@pytest.mark.parametrize(
"gen_dict, dis_dict, loss_dict",
[
({}, {}, {}),
({}, {}, {"cache_generator_outputs": True}),
(
{},
{
"discriminator_type": "hifigan_multi_scale_discriminator",
"discriminator_params": {
"scales": 2,
"downsample_pooling": "AvgPool1d",
"downsample_pooling_params": {
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
"discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 16,
"max_downsample_channels": 32,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 2, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
},
},
},
{},
),
(
{},
{
"discriminator_type": "hifigan_multi_period_discriminator",
"discriminator_params": {
"periods": [2, 3],
"discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 16,
"downsample_scales": [3, 3, 1],
"max_downsample_channels": 32,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
},
},
{},
),
(
{},
{
"discriminator_type": "hifigan_period_discriminator",
"discriminator_params": {
"period": 2,
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 16,
"downsample_scales": [3, 3, 1],
"max_downsample_channels": 32,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
},
{},
),
(
{},
{
"discriminator_type": "hifigan_scale_discriminator",
"discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 16,
"max_downsample_channels": 32,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 2, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
},
},
{},
),
(
{},
{},
{
"generator_adv_loss_params": {
"average_by_discriminators": True,
"loss_type": "mse",
},
"discriminator_adv_loss_params": {
"average_by_discriminators": True,
"loss_type": "mse",
},
},
),
(
{},
{},
{
"generator_adv_loss_params": {
"average_by_discriminators": False,
"loss_type": "hinge",
},
"discriminator_adv_loss_params": {
"average_by_discriminators": False,
"loss_type": "hinge",
},
},
),
],
)
@pytest.mark.parametrize(
"spks, spk_embed_dim, langs", [(10, -1, -1), (-1, 5, -1), (-1, -1, 3), (4, 5, 3)]
)
def test_multi_speaker_jets_is_trainable_and_decodable_on_gpu(
gen_dict, dis_dict, loss_dict, spks, spk_embed_dim, langs
):
idim = 10
odim = 5
gen_args = make_jets_generator_args(**gen_dict)
gen_args["generator_params"]["spks"] = spks
gen_args["generator_params"]["langs"] = langs
gen_args["generator_params"]["spk_embed_dim"] = spk_embed_dim
dis_args = make_jets_discriminator_args(**dis_dict)
loss_args = make_jets_loss_args(**loss_dict)
model = JETS(
idim=idim,
odim=odim,
**gen_args,
**dis_args,
**loss_args,
)
model.train()
upsample_factor = model.generator.upsample_factor
inputs = dict(
text=torch.randint(0, idim, (2, 8)),
text_lengths=torch.tensor([8, 5], dtype=torch.long),
feats=torch.randn(2, 16, odim),
feats_lengths=torch.tensor([16, 13], dtype=torch.long),
speech=torch.randn(2, 16 * upsample_factor),
speech_lengths=torch.tensor([16, 13] * upsample_factor, dtype=torch.long),
pitch=torch.randn(2, 16, 1),
pitch_lengths=torch.tensor([16, 13], dtype=torch.long),
energy=torch.randn(2, 16, 1),
energy_lengths=torch.tensor([16, 13], dtype=torch.long),
)
if spks > 0:
inputs["sids"] = torch.randint(0, spks, (2, 1))
if langs > 0:
inputs["lids"] = torch.randint(0, langs, (2, 1))
if spk_embed_dim > 0:
inputs["spembs"] = torch.randn(2, spk_embed_dim)
device = torch.device("cuda")
model.to(device)
inputs = {k: v.to(device) for k, v in inputs.items()}
gen_loss = model(forward_generator=True, **inputs)["loss"]
gen_loss.backward()
dis_loss = model(forward_generator=False, **inputs)["loss"]
dis_loss.backward()
with torch.no_grad():
model.eval()
# check inference
inputs = dict(
text=torch.randint(
0,
idim,
(5,),
),
)
if spks > 0:
inputs["sids"] = torch.randint(0, spks, (1,))
if langs > 0:
inputs["lids"] = torch.randint(0, langs, (1,))
if spk_embed_dim > 0:
inputs["spembs"] = torch.randn(spk_embed_dim)
inputs = {k: v.to(device) for k, v in inputs.items()}
model.inference(**inputs)
# check inference with teacher forcing
inputs = dict(
text=torch.randint(
0,
idim,
(5,),
),
feats=torch.randn(16, odim),
pitch=torch.randn(16, 1),
energy=torch.randn(16, 1),
)
if spks > 0:
inputs["sids"] = torch.randint(0, spks, (1,))
if langs > 0:
inputs["lids"] = torch.randint(0, langs, (1,))
if spk_embed_dim > 0:
inputs["spembs"] = torch.randn(spk_embed_dim)
inputs = {k: v.to(device) for k, v in inputs.items()}
output_dict = model.inference(**inputs, use_teacher_forcing=True)
assert output_dict["wav"].size(0) == inputs["feats"].size(0) * upsample_factor
| 32,573 | 33.469841 | 86 | py |
espnet | espnet-master/test/espnet2/gan_tts/melgan/test_melgan.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Test code for MelGAN modules."""
import numpy as np
import pytest
import torch
from espnet2.gan_tts.hifigan.loss import (
DiscriminatorAdversarialLoss,
FeatureMatchLoss,
GeneratorAdversarialLoss,
)
from espnet2.gan_tts.melgan import MelGANGenerator, MelGANMultiScaleDiscriminator
def make_melgan_generator_args(**kwargs):
defaults = dict(
in_channels=80,
out_channels=1,
kernel_size=7,
channels=32,
bias=True,
upsample_scales=[4, 4],
stack_kernel_size=3,
stacks=2,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.2},
pad="ReflectionPad1d",
pad_params={},
use_final_nonlinear_activation=True,
use_weight_norm=True,
)
defaults.update(kwargs)
return defaults
def make_melgan_discriminator_args(**kwargs):
defaults = dict(
in_channels=1,
out_channels=1,
scales=2,
downsample_pooling="AvgPool1d",
# follow the official implementation setting
downsample_pooling_params={
"kernel_size": 4,
"stride": 2,
"padding": 1,
"count_include_pad": False,
},
kernel_sizes=[5, 3],
channels=16,
max_downsample_channels=32,
bias=True,
downsample_scales=[2, 2],
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.2},
pad="ReflectionPad1d",
pad_params={},
use_weight_norm=True,
)
defaults.update(kwargs)
return defaults
@pytest.mark.skipif(
"1.6" in torch.__version__,
reason="group conv in pytorch 1.6 has an issue. "
"See https://github.com/pytorch/pytorch/issues/42446.",
)
@pytest.mark.parametrize(
"dict_g, dict_d",
[
({}, {}),
({}, {"scales": 4}),
({}, {"kernel_sizes": [7, 5]}),
({}, {"max_downsample_channels": 128}),
({}, {"downsample_scales": [4, 4]}),
({}, {"pad": "ConstantPad1d", "pad_params": {"value": 0.0}}),
({}, {"nonlinear_activation": "ReLU", "nonlinear_activation_params": {}}),
],
)
def test_melgan_generator_and_discriminator(dict_g, dict_d):
# setup
batch_size = 2
batch_length = 512
args_g = make_melgan_generator_args(**dict_g)
args_d = make_melgan_discriminator_args(**dict_d)
y = torch.randn(batch_size, 1, batch_length)
c = torch.randn(
batch_size,
args_g["in_channels"],
batch_length // np.prod(args_g["upsample_scales"]),
)
model_g = MelGANGenerator(**args_g)
model_d = MelGANMultiScaleDiscriminator(**args_d)
gen_adv_criterion = GeneratorAdversarialLoss()
dis_adv_criterion = DiscriminatorAdversarialLoss()
feat_match_criterion = FeatureMatchLoss()
optimizer_g = torch.optim.Adam(model_g.parameters())
optimizer_d = torch.optim.Adam(model_d.parameters())
# check generator trainable
y_hat = model_g(c)
p_hat = model_d(y_hat)
adv_loss = gen_adv_criterion(p_hat)
with torch.no_grad():
p = model_d(y)
fm_loss = feat_match_criterion(p_hat, p)
loss_g = adv_loss + fm_loss
optimizer_g.zero_grad()
loss_g.backward()
optimizer_g.step()
# check discriminator trainable
p = model_d(y)
p_hat = model_d(y_hat.detach())
real_loss, fake_loss = dis_adv_criterion(p_hat, p)
loss_d = real_loss + fake_loss
optimizer_d.zero_grad()
loss_d.backward()
optimizer_d.step()
try:
import parallel_wavegan # NOQA
is_parallel_wavegan_available = True
except ImportError:
is_parallel_wavegan_available = False
@pytest.mark.skipif(
not is_parallel_wavegan_available, reason="parallel_wavegan is not installed."
)
def test_parallel_wavegan_compatibility():
from parallel_wavegan.models import MelGANGenerator as PWGMelGANGenerator
model_pwg = PWGMelGANGenerator(**make_melgan_generator_args())
model_espnet2 = MelGANGenerator(**make_melgan_generator_args())
model_espnet2.load_state_dict(model_pwg.state_dict())
model_pwg.eval()
model_espnet2.eval()
with torch.no_grad():
c = torch.randn(5, 80)
out_pwg = model_pwg.inference(c)
out_espnet2 = model_espnet2.inference(c)
np.testing.assert_array_equal(
out_pwg.cpu().numpy(),
out_espnet2.cpu().numpy(),
)
| 4,503 | 28.246753 | 82 | py |
espnet | espnet-master/test/espnet2/gan_tts/wavenet/test_wavenet.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Test code for WaveNet modules."""
import pytest
import torch
from espnet2.gan_tts.wavenet import WaveNet
def make_wavenet_args(**kwargs):
defaults = dict(
in_channels=1,
out_channels=1,
kernel_size=3,
layers=4,
stacks=1,
base_dilation=2,
residual_channels=4,
aux_channels=-1,
gate_channels=8,
skip_channels=8,
global_channels=-1,
dropout_rate=0.0,
bias=True,
use_weight_norm=True,
use_first_conv=True,
use_last_conv=False,
scale_residual=False,
scale_skip_connect=False,
)
defaults.update(kwargs)
return defaults
@pytest.mark.parametrize(
"model_dict",
[
({}),
({"use_first_conv": False}),
({"use_last_conv": True}),
({"global_channels": 3}),
({"aux_channels": 3}),
({"scale_residual": True}),
({"scale_skip_connect": True}),
],
)
def test_wavenet_forward(model_dict):
batch_size = 2
batch_length = 128
args = make_wavenet_args(**model_dict)
if args["use_first_conv"]:
y = torch.randn(batch_size, 1, batch_length)
else:
y = torch.randn(batch_size, args["residual_channels"], batch_length)
c, g = None, None
if args["aux_channels"] > 0:
c = torch.randn(batch_size, args["aux_channels"], batch_length)
if args["global_channels"] > 0:
g = torch.randn(batch_size, args["global_channels"], 1)
model = WaveNet(**args)
out = model(y, c=c, g=g)
if args["use_last_conv"]:
out.size(1) == args["out_channels"]
else:
out.size(1) == args["skip_channels"]
| 1,768 | 25.014706 | 76 | py |
espnet | espnet-master/test/espnet2/gan_tts/joint/test_joint_text2wav.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Test VITS related modules."""
import pytest
import torch
from packaging.version import parse as V
from espnet2.gan_tts.joint import JointText2Wav
def make_text2mel_args(**kwargs):
defaults = dict(
text2mel_type="fastspeech2",
text2mel_params={
"adim": 4,
"aheads": 2,
"elayers": 2,
"eunits": 4,
"dlayers": 2,
"dunits": 3,
"postnet_layers": 2,
"postnet_chans": 4,
"postnet_filts": 5,
"postnet_dropout_rate": 0.5,
"positionwise_layer_type": "conv1d",
"positionwise_conv_kernel_size": 1,
"use_scaled_pos_enc": True,
"use_batch_norm": True,
"encoder_normalize_before": True,
"decoder_normalize_before": True,
"encoder_concat_after": False,
"decoder_concat_after": False,
"reduction_factor": 1,
"encoder_type": "conformer",
"decoder_type": "conformer",
"transformer_enc_dropout_rate": 0.1,
"transformer_enc_positional_dropout_rate": 0.1,
"transformer_enc_attn_dropout_rate": 0.1,
"transformer_dec_dropout_rate": 0.1,
"transformer_dec_positional_dropout_rate": 0.1,
"transformer_dec_attn_dropout_rate": 0.1,
"conformer_rel_pos_type": "latest",
"conformer_pos_enc_layer_type": "rel_pos",
"conformer_self_attn_layer_type": "rel_selfattn",
"conformer_activation_type": "swish",
"use_macaron_style_in_conformer": True,
"use_cnn_in_conformer": True,
"zero_triu": False,
"conformer_enc_kernel_size": 7,
"conformer_dec_kernel_size": 31,
"duration_predictor_layers": 2,
"duration_predictor_chans": 4,
"duration_predictor_kernel_size": 3,
"duration_predictor_dropout_rate": 0.1,
"energy_predictor_layers": 2,
"energy_predictor_chans": 4,
"energy_predictor_kernel_size": 3,
"energy_predictor_dropout": 0.5,
"energy_embed_kernel_size": 1,
"energy_embed_dropout": 0.5,
"stop_gradient_from_energy_predictor": False,
"pitch_predictor_layers": 2,
"pitch_predictor_chans": 4,
"pitch_predictor_kernel_size": 5,
"pitch_predictor_dropout": 0.5,
"pitch_embed_kernel_size": 1,
"pitch_embed_dropout": 0.5,
"stop_gradient_from_pitch_predictor": True,
"spks": -1,
"langs": -1,
"spk_embed_dim": None,
"spk_embed_integration_type": "add",
"use_gst": False,
"init_type": "xavier_uniform",
"init_enc_alpha": 1.0,
"init_dec_alpha": 1.0,
"use_masking": False,
"use_weighted_masking": False,
},
)
defaults.update(kwargs)
return defaults
def make_vocoder_args(**kwargs):
defaults = dict(
vocoder_type="hifigan_generator",
vocoder_params={
"out_channels": 1,
"channels": 32,
"global_channels": -1,
"kernel_size": 7,
"upsample_scales": [2, 2],
"upsample_kernel_sizes": [4, 4],
"resblock_kernel_sizes": [3, 7],
"resblock_dilations": [[1, 3], [1, 3]],
"use_additional_convs": True,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
},
)
defaults.update(kwargs)
return defaults
def make_discriminator_args(**kwargs):
defaults = dict(
discriminator_type="hifigan_multi_scale_multi_period_discriminator",
discriminator_params={
"scales": 1,
"scale_downsample_pooling": "AvgPool1d",
"scale_downsample_pooling_params": {
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
"scale_discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 16,
"max_downsample_channels": 32,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
},
"follow_official_norm": True,
"periods": [2, 3],
"period_discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 4,
"downsample_scales": [3, 1],
"max_downsample_channels": 16,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
},
)
defaults.update(kwargs)
return defaults
def make_loss_args(**kwargs):
defaults = dict(
lambda_text2mel=1.0,
lambda_adv=1.0,
lambda_feat_match=2.0,
lambda_mel=1.0,
generator_adv_loss_params={
"average_by_discriminators": False,
"loss_type": "mse",
},
discriminator_adv_loss_params={
"average_by_discriminators": False,
"loss_type": "mse",
},
use_feat_match_loss=True,
feat_match_loss_params={
"average_by_discriminators": False,
"average_by_layers": False,
"include_final_outputs": True,
},
use_mel_loss=True,
mel_loss_params={
"fs": 22050,
"n_fft": 16,
"hop_length": 4,
"win_length": None,
"window": "hann",
"n_mels": 4,
"fmin": 0,
"fmax": None,
"log_base": None,
},
)
defaults.update(kwargs)
return defaults
@pytest.mark.skipif(
V(torch.__version__) < V("1.4"),
reason="Pytorch >= 1.4 is required.",
)
@pytest.mark.skipif(
"1.6" in torch.__version__,
reason="group conv in pytorch 1.6 has an issue. "
"See https://github.com/pytorch/pytorch/issues/42446.",
)
@pytest.mark.parametrize(
"t2m_dict, voc_dict, dis_dict, loss_dict",
[
({}, {}, {}, {}),
(
{
"text2mel_type": "tacotron2",
"text2mel_params": {
"embed_dim": 8,
"elayers": 1,
"eunits": 8,
"econv_layers": 2,
"econv_chans": 8,
"econv_filts": 5,
"atype": "location",
"adim": 4,
"aconv_chans": 4,
"aconv_filts": 3,
"cumulate_att_w": True,
"dlayers": 2,
"dunits": 8,
"prenet_layers": 2,
"prenet_units": 4,
"postnet_layers": 2,
"postnet_chans": 4,
"postnet_filts": 3,
"output_activation": None,
"use_batch_norm": True,
"use_concate": True,
"use_residual": False,
"reduction_factor": 1,
"spk_embed_dim": None,
"spk_embed_integration_type": "concat",
"use_gst": False,
"dropout_rate": 0.5,
"zoneout_rate": 0.1,
"use_masking": True,
"use_weighted_masking": False,
"bce_pos_weight": 5.0,
"loss_type": "L1+L2",
"use_guided_attn_loss": True,
"guided_attn_loss_sigma": 0.4,
"guided_attn_loss_lambda": 1.0,
},
},
{},
{},
{},
),
(
{
"text2mel_type": "transformer",
"text2mel_params": {
"embed_dim": 4,
"eprenet_conv_layers": 2,
"eprenet_conv_chans": 4,
"eprenet_conv_filts": 3,
"dprenet_layers": 2,
"dprenet_units": 7,
"elayers": 2,
"eunits": 4,
"adim": 4,
"aheads": 2,
"dlayers": 2,
"dunits": 3,
"postnet_layers": 1,
"postnet_chans": 2,
"postnet_filts": 3,
"positionwise_layer_type": "conv1d",
"positionwise_conv_kernel_size": 1,
"reduction_factor": 1,
"spk_embed_dim": None,
"use_gst": False,
},
},
{},
{},
{},
),
(
{
"text2mel_type": "fastspeech",
"text2mel_params": {
# network structure related
"adim": 4,
"aheads": 2,
"elayers": 2,
"eunits": 3,
"dlayers": 2,
"dunits": 3,
"postnet_layers": 2,
"postnet_chans": 3,
"postnet_filts": 5,
"positionwise_layer_type": "conv1d",
"positionwise_conv_kernel_size": 1,
"use_scaled_pos_enc": True,
"use_batch_norm": True,
"encoder_normalize_before": True,
"decoder_normalize_before": True,
"encoder_concat_after": False,
"decoder_concat_after": False,
"duration_predictor_layers": 2,
"duration_predictor_chans": 3,
"duration_predictor_kernel_size": 3,
"reduction_factor": 1,
"encoder_type": "transformer",
"decoder_type": "transformer",
"spk_embed_dim": None,
"use_gst": False,
},
},
{},
{},
{},
),
(
{},
{
"vocoder_type": "parallel_wavegan_generator",
"vocoder_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_size": 3,
"layers": 6,
"stacks": 3,
"residual_channels": 8,
"gate_channels": 16,
"skip_channels": 8,
"aux_channels": 5,
"aux_context_window": 0,
"upsample_net": "ConvInUpsampleNetwork",
"upsample_params": {"upsample_scales": [4, 4]},
},
},
{},
{},
),
(
{},
{},
{
"discriminator_type": "parallel_wavegan_discriminator",
"discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_size": 3,
"layers": 5,
"conv_channels": 16,
},
},
{},
),
(
{},
{
"vocoder_type": "melgan_generator",
"vocoder_params": {
"in_channels": 5,
"out_channels": 1,
"kernel_size": 7,
"channels": 32,
"bias": True,
"upsample_scales": [4, 2],
"stack_kernel_size": 3,
"stacks": 1,
"pad": "ReplicationPad1d",
},
},
{},
{},
),
(
{},
{},
{
"discriminator_type": "melgan_multi_scale_discriminator",
"discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"scales": 2,
"kernel_sizes": [5, 3],
"channels": 16,
"max_downsample_channels": 32,
"bias": True,
"downsample_scales": [2, 2],
},
},
{},
),
(
{},
{
"vocoder_type": "style_melgan_generator",
"vocoder_params": {
"in_channels": 32,
"aux_channels": 5,
"channels": 16,
"out_channels": 1,
"kernel_size": 9,
"dilation": 2,
"bias": True,
"noise_upsample_scales": [2, 2],
"noise_upsample_activation": "LeakyReLU",
"noise_upsample_activation_params": {"negative_slope": 0.2},
"upsample_scales": [4, 4],
},
},
{},
{},
),
(
{},
{},
{
"discriminator_type": "style_melgan_discriminator",
"discriminator_params": {
"repeats": 2,
"window_sizes": [4, 8],
"pqmf_params": [
[1, None, None, None],
[2, 62, 0.26700, 9.0],
],
"discriminator_params": {
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 16,
"max_downsample_channels": 32,
"bias": True,
"downsample_scales": [2, 2],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.2},
"pad": "ReplicationPad1d",
"pad_params": {},
},
"use_weight_norm": True,
},
},
{},
),
(
{},
{
"vocoder_params": {
"out_channels": 4,
"channels": 32,
"global_channels": -1,
"kernel_size": 7,
"upsample_scales": [4, 2],
"upsample_kernel_sizes": [8, 4],
"resblock_kernel_sizes": [3, 7],
"resblock_dilations": [[1, 3], [1, 3]],
},
"use_pqmf": True,
},
{},
{},
),
(
{},
{
"vocoder_type": "melgan_generator",
"vocoder_params": {
"in_channels": 5,
"out_channels": 4,
"kernel_size": 7,
"channels": 32,
"bias": True,
"upsample_scales": [4, 2],
"stack_kernel_size": 3,
"stacks": 1,
"pad": "ReplicationPad1d",
},
"use_pqmf": True,
},
{},
{},
),
],
)
def test_joint_model_is_trainable_and_decodable(
t2m_dict, voc_dict, dis_dict, loss_dict
):
idim = 10
odim = 5
t2m_args = make_text2mel_args(**t2m_dict)
voc_args = make_vocoder_args(**voc_dict)
dis_args = make_discriminator_args(**dis_dict)
loss_args = make_loss_args(**loss_dict)
model = JointText2Wav(
idim=idim,
odim=odim,
segment_size=4,
**t2m_args,
**voc_args,
**dis_args,
**loss_args,
)
model.train()
upsample_factor = model.generator["vocoder"].upsample_factor
inputs = dict(
text=torch.randint(0, idim, (2, 8)),
text_lengths=torch.tensor([8, 5], dtype=torch.long),
feats=torch.randn(2, 16, odim),
feats_lengths=torch.tensor([16, 13], dtype=torch.long),
speech=torch.randn(2, 16 * upsample_factor),
speech_lengths=torch.tensor([16, 13] * upsample_factor, dtype=torch.long),
)
if t2m_args["text2mel_type"] in ["fastspeech", "fastspeech2"]:
inputs.update(
durations=torch.tensor(
[
# +1 element for <eos>
[2, 2, 2, 2, 2, 2, 2, 2, 0],
[3, 3, 3, 3, 1, 0, 0, 0, 0],
],
dtype=torch.long,
),
# +1 element for <eos>
durations_lengths=torch.tensor([8 + 1, 5 + 1], dtype=torch.long),
)
if t2m_args["text2mel_type"] in ["fastspeech2"]:
inputs.update(
pitch=torch.randn(2, 9, 1),
pitch_lengths=torch.tensor([9, 7], dtype=torch.long),
energy=torch.randn(2, 9, 1),
energy_lengths=torch.tensor([9, 7], dtype=torch.long),
)
gen_loss = model(forward_generator=True, **inputs)["loss"]
gen_loss.backward()
dis_loss = model(forward_generator=False, **inputs)["loss"]
dis_loss.backward()
with torch.no_grad():
model.eval()
# check inference
inputs = dict(
text=torch.randint(
0,
idim,
(10,),
)
)
output_dict = model.inference(**inputs)
assert len(output_dict["wav"]) == len(output_dict["feat_gen"]) * upsample_factor
| 18,259 | 32.504587 | 88 | py |
espnet | espnet-master/test/espnet2/gan_tts/style_melgan/test_style_melgan.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Test code for StyleMelGAN modules."""
import numpy as np
import pytest
import torch
from espnet2.gan_tts.hifigan.loss import (
DiscriminatorAdversarialLoss,
GeneratorAdversarialLoss,
)
from espnet2.gan_tts.style_melgan import StyleMelGANDiscriminator, StyleMelGANGenerator
def make_style_melgan_generator_args(**kwargs):
defaults = dict(
in_channels=32,
aux_channels=5,
channels=16,
out_channels=1,
kernel_size=9,
dilation=2,
bias=True,
noise_upsample_scales=[11, 2, 2, 2],
noise_upsample_activation="LeakyReLU",
noise_upsample_activation_params={"negative_slope": 0.2},
upsample_scales=[2, 2],
upsample_mode="nearest",
gated_function="softmax",
use_weight_norm=True,
)
defaults.update(kwargs)
return defaults
def make_style_melgan_discriminator_args(**kwargs):
defaults = dict(
repeats=2,
window_sizes=[128, 256],
pqmf_params=[
[1, None, None, None],
[2, 62, 0.26700, 9.0],
],
discriminator_params={
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 16,
"max_downsample_channels": 32,
"bias": True,
"downsample_scales": [4, 4],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.2},
"pad": "ReflectionPad1d",
"pad_params": {},
},
use_weight_norm=True,
)
defaults.update(kwargs)
return defaults
@pytest.mark.skipif(
"1.6" in torch.__version__,
reason="group conv in pytorch 1.6 has an issue. "
"See https://github.com/pytorch/pytorch/issues/42446.",
)
@pytest.mark.parametrize(
"dict_g, dict_d",
[
({}, {}),
({"gated_function": "sigmoid"}, {}),
],
)
def test_style_melgan_trainable(dict_g, dict_d):
# setup
args_g = make_style_melgan_generator_args(**dict_g)
args_d = make_style_melgan_discriminator_args(**dict_d)
batch_size = 2
batch_length = np.prod(args_g["noise_upsample_scales"]) * np.prod(
args_g["upsample_scales"]
)
y = torch.randn(batch_size, 1, batch_length)
c = torch.randn(
batch_size,
args_g["aux_channels"],
batch_length // np.prod(args_g["upsample_scales"]),
)
model_g = StyleMelGANGenerator(**args_g)
model_d = StyleMelGANDiscriminator(**args_d)
gen_adv_criterion = GeneratorAdversarialLoss()
dis_adv_criterion = DiscriminatorAdversarialLoss()
optimizer_g = torch.optim.Adam(model_g.parameters())
optimizer_d = torch.optim.Adam(model_d.parameters())
# check generator trainable
y_hat = model_g(c)
p_hat = model_d(y_hat)
adv_loss = gen_adv_criterion(p_hat)
loss_g = adv_loss
optimizer_g.zero_grad()
loss_g.backward()
optimizer_g.step()
# check discriminator trainable
p = model_d(y)
p_hat = model_d(y_hat.detach())
real_loss, fake_loss = dis_adv_criterion(p_hat, p)
loss_d = real_loss + fake_loss
optimizer_d.zero_grad()
loss_d.backward()
optimizer_d.step()
try:
import parallel_wavegan # NOQA
is_parallel_wavegan_available = True
except ImportError:
is_parallel_wavegan_available = False
@pytest.mark.skipif(
not is_parallel_wavegan_available, reason="parallel_wavegan is not installed."
)
def test_parallel_wavegan_compatibility():
from parallel_wavegan.models import StyleMelGANGenerator as PWGStyleMelGANGenerator
model_pwg = PWGStyleMelGANGenerator(**make_style_melgan_generator_args())
model_espnet2 = StyleMelGANGenerator(**make_style_melgan_generator_args())
model_espnet2.load_state_dict(model_pwg.state_dict())
model_pwg.eval()
model_espnet2.eval()
with torch.no_grad():
c = torch.randn(3, 5)
torch.manual_seed(1)
out_pwg = model_pwg.inference(c)
torch.manual_seed(1)
out_espnet2 = model_espnet2.inference(c)
np.testing.assert_array_equal(
out_pwg.cpu().numpy(),
out_espnet2.cpu().numpy(),
)
| 4,247 | 28.09589 | 87 | py |
espnet | espnet-master/test/espnet2/gan_tts/hifigan/test_hifigan.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Test code for HiFi-GAN modules."""
import numpy as np
import pytest
import torch
from espnet2.gan_tts.hifigan import (
HiFiGANGenerator,
HiFiGANMultiScaleMultiPeriodDiscriminator,
)
from espnet2.gan_tts.hifigan.loss import (
DiscriminatorAdversarialLoss,
FeatureMatchLoss,
GeneratorAdversarialLoss,
MelSpectrogramLoss,
)
def make_hifigan_generator_args(**kwargs):
defaults = dict(
in_channels=5,
out_channels=1,
channels=32,
kernel_size=7,
upsample_scales=(2, 2),
upsample_kernel_sizes=(4, 4),
resblock_kernel_sizes=(3, 7),
resblock_dilations=[(1, 3), (1, 3)],
use_additional_convs=True,
bias=True,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.1},
use_weight_norm=True,
)
defaults.update(kwargs)
return defaults
def make_hifigan_multi_scale_multi_period_discriminator_args(**kwargs):
defaults = dict(
scales=2,
scale_downsample_pooling="AvgPool1d",
scale_downsample_pooling_params={
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
scale_discriminator_params={
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 16,
"max_downsample_channels": 16,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 2],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
},
follow_official_norm=False,
periods=[2, 3],
period_discriminator_params={
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 8,
"downsample_scales": [3, 3],
"max_downsample_channels": 32,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
)
defaults.update(kwargs)
return defaults
def make_mel_loss_args(**kwargs):
defaults = dict(
fs=120,
n_fft=16,
hop_length=4,
win_length=None,
window="hann",
n_mels=2,
fmin=None,
fmax=None,
center=True,
normalized=False,
onesided=True,
log_base=10.0,
)
defaults.update(kwargs)
return defaults
@pytest.mark.skipif(
"1.6" in torch.__version__,
reason="group conv in pytorch 1.6 has an issue. "
"See https://github.com/pytorch/pytorch/issues/42446.",
)
@pytest.mark.parametrize(
"dict_g, dict_d, dict_loss, average, include",
[
({}, {}, {}, True, True),
({}, {}, {}, False, False),
({}, {"scales": 1}, {}, False, True),
({}, {"periods": [2]}, {}, False, True),
({}, {"scales": 1, "periods": [2]}, {}, False, True),
({}, {"follow_official_norm": True}, {}, False, True),
({"use_additional_convs": False}, {}, {}, False, True),
({"global_channels": 4}, {}, {}, True, True),
],
)
def test_hifigan_generator_and_discriminator_and_loss(
dict_g, dict_d, dict_loss, average, include
):
batch_size = 2
batch_length = 128
args_g = make_hifigan_generator_args(**dict_g)
args_d = make_hifigan_multi_scale_multi_period_discriminator_args(**dict_d)
args_loss = make_mel_loss_args(**dict_loss)
y = torch.randn(batch_size, 1, batch_length)
c = torch.randn(
batch_size,
args_g["in_channels"],
batch_length // np.prod(args_g["upsample_scales"]),
)
g = None
if args_g.get("global_channels") is not None:
g = torch.randn(batch_size, args_g["global_channels"], 1)
model_g = HiFiGANGenerator(**args_g)
model_d = HiFiGANMultiScaleMultiPeriodDiscriminator(**args_d)
aux_criterion = MelSpectrogramLoss(**args_loss)
feat_match_criterion = FeatureMatchLoss(
average_by_layers=average,
average_by_discriminators=average,
include_final_outputs=include,
)
gen_adv_criterion = GeneratorAdversarialLoss(
average_by_discriminators=average,
)
dis_adv_criterion = DiscriminatorAdversarialLoss(
average_by_discriminators=average,
)
optimizer_g = torch.optim.AdamW(model_g.parameters())
optimizer_d = torch.optim.AdamW(model_d.parameters())
# check generator trainable
y_hat = model_g(c, g=g)
p_hat = model_d(y_hat)
aux_loss = aux_criterion(y_hat, y)
adv_loss = gen_adv_criterion(p_hat)
with torch.no_grad():
p = model_d(y)
fm_loss = feat_match_criterion(p_hat, p)
loss_g = adv_loss + aux_loss + fm_loss
optimizer_g.zero_grad()
loss_g.backward()
optimizer_g.step()
# check discriminator trainable
p = model_d(y)
p_hat = model_d(y_hat.detach())
real_loss, fake_loss = dis_adv_criterion(p_hat, p)
loss_d = real_loss + fake_loss
optimizer_d.zero_grad()
loss_d.backward()
optimizer_d.step()
try:
import parallel_wavegan # NOQA
is_parallel_wavegan_available = True
except ImportError:
is_parallel_wavegan_available = False
@pytest.mark.skipif(
not is_parallel_wavegan_available, reason="parallel_wavegan is not installed."
)
def test_parallel_wavegan_compatibility():
from parallel_wavegan.models import HiFiGANGenerator as PWGHiFiGANGenerator
model_pwg = PWGHiFiGANGenerator(**make_hifigan_generator_args())
model_espnet2 = HiFiGANGenerator(**make_hifigan_generator_args())
model_espnet2.load_state_dict(model_pwg.state_dict())
model_pwg.eval()
model_espnet2.eval()
with torch.no_grad():
c = torch.randn(3, 5)
out_pwg = model_pwg.inference(c)
out_espnet2 = model_espnet2.inference(c)
np.testing.assert_array_equal(
out_pwg.cpu().numpy(),
out_espnet2.cpu().numpy(),
)
| 6,163 | 29.068293 | 82 | py |
espnet | espnet-master/test/espnet2/gan_tts/vits/test_generator.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Test VITS generator modules."""
import pytest
import torch
from espnet2.gan_tts.vits.generator import VITSGenerator
def make_generator_args(**kwargs):
defaults = dict(
vocabs=10,
aux_channels=5,
hidden_channels=4,
spks=-1,
langs=-1,
spk_embed_dim=-1,
global_channels=-1,
segment_size=4,
text_encoder_attention_heads=2,
text_encoder_ffn_expand=4,
text_encoder_blocks=2,
text_encoder_positionwise_layer_type="conv1d",
text_encoder_positionwise_conv_kernel_size=1,
text_encoder_normalize_before=True,
text_encoder_dropout_rate=0.1,
text_encoder_positional_dropout_rate=0.0,
text_encoder_attention_dropout_rate=0.0,
text_encoder_conformer_kernel_size=7,
use_macaron_style_in_text_encoder=True,
use_conformer_conv_in_text_encoder=True,
decoder_kernel_size=7,
decoder_channels=16,
decoder_upsample_scales=[16, 16],
decoder_upsample_kernel_sizes=[32, 32],
decoder_resblock_kernel_sizes=[3, 5],
decoder_resblock_dilations=[[1, 3], [1, 3]],
use_weight_norm_in_decoder=True,
posterior_encoder_kernel_size=5,
posterior_encoder_layers=2,
posterior_encoder_stacks=1,
posterior_encoder_base_dilation=1,
posterior_encoder_dropout_rate=0.0,
use_weight_norm_in_posterior_encoder=True,
flow_flows=2,
flow_kernel_size=5,
flow_base_dilation=1,
flow_layers=2,
flow_dropout_rate=0.0,
use_weight_norm_in_flow=True,
use_only_mean_in_flow=True,
stochastic_duration_predictor_kernel_size=3,
stochastic_duration_predictor_dropout_rate=0.5,
stochastic_duration_predictor_flows=2,
stochastic_duration_predictor_dds_conv_layers=3,
)
defaults.update(kwargs)
return defaults
# NOTE(kan-bayashi): first forward requires jit compile
# so a little bit more time is needed to run. Therefore,
# here we extend execution timeout from 2 sec to 5 sec.
@pytest.mark.execution_timeout(5)
@pytest.mark.skipif(
"1.6" in torch.__version__,
reason="group conv in pytorch 1.6 has an issue. "
"See https://github.com/pytorch/pytorch/issues/42446.",
)
@torch.no_grad()
@pytest.mark.parametrize(
"model_dict",
[
({}),
({"text_encoder_positionwise_layer_type": "linear"}),
({"text_encoder_positionwise_layer_type": "conv1d-linear"}),
({"text_encoder_normalize_before": False}),
({"use_macaron_style_in_text_encoder": False}),
({"use_conformer_conv_in_text_encoder": False}),
(
{
"text_encoder_positional_encoding_layer_type": "scaled_abs_pos",
"text_encoder_self_attention_layer_type": "selfattn",
}
),
({"spk_embed_dim": 16, "global_channels": 4}),
({"langs": 16, "global_channels": 4}),
],
)
def test_vits_generator_forward(model_dict):
idim = 10
odim = 5
args = make_generator_args(vocabs=idim, aux_channels=odim, **model_dict)
model = VITSGenerator(**args)
# check forward
inputs = dict(
text=torch.randint(0, idim, (2, 8)),
text_lengths=torch.tensor([8, 5], dtype=torch.long),
feats=torch.randn(2, odim, 16),
feats_lengths=torch.tensor([16, 13], dtype=torch.long),
)
if args["spk_embed_dim"] > 0:
inputs["spembs"] = torch.randn(2, args["spk_embed_dim"])
if args["langs"] > 0:
inputs["lids"] = torch.randint(0, args["langs"], (2, 1))
outputs = model(**inputs)
for i, output in enumerate(outputs):
if not isinstance(output, tuple):
print(f"{i+1}: {output.shape}")
else:
for j, output_ in enumerate(output):
print(f"{i+j+1}: {output_.shape}")
# check inference
inputs = dict(
text=torch.randint(
0,
idim,
(
2,
5,
),
),
text_lengths=torch.tensor([5, 3], dtype=torch.long),
)
if args["spk_embed_dim"] > 0:
inputs["spembs"] = torch.randn(args["spk_embed_dim"])
if args["langs"] > 0:
inputs["lids"] = torch.randint(0, args["langs"], (1,))
outputs = model.inference(**inputs)
for i, output in enumerate(outputs):
if not isinstance(output, tuple):
print(f"{i+1}: {output.shape}")
else:
for j, output_ in enumerate(output):
print(f"{i+j+1}: {output_.shape}")
# check inference with predifined duration
inputs = dict(
text=torch.randint(
0,
idim,
(
1,
5,
),
),
text_lengths=torch.tensor([5], dtype=torch.long),
dur=torch.tensor([[[1, 2, 3, 4, 5]]], dtype=torch.long),
)
if args["spk_embed_dim"] > 0:
inputs["spembs"] = torch.randn(args["spk_embed_dim"])
if args["langs"] > 0:
inputs["lids"] = torch.randint(0, args["langs"], (1,))
outputs = model.inference(**inputs)
assert outputs[0].size(1) == inputs["dur"].sum() * model.upsample_factor
for i, output in enumerate(outputs):
if not isinstance(output, tuple):
print(f"{i+1}: {output.shape}")
else:
for j, output_ in enumerate(output):
print(f"{i+j+1}: {output_.shape}")
# check inference with teacher forcing
inputs = dict(
text=torch.randint(
0,
idim,
(
1,
5,
),
),
text_lengths=torch.tensor([5], dtype=torch.long),
feats=torch.randn(1, odim, 16),
feats_lengths=torch.tensor([16], dtype=torch.long),
)
if args["spk_embed_dim"] > 0:
inputs["spembs"] = torch.randn(args["spk_embed_dim"])
if args["langs"] > 0:
inputs["lids"] = torch.randint(0, args["langs"], (1,))
outputs = model.inference(**inputs, use_teacher_forcing=True)
assert outputs[0].size(1) == inputs["feats"].size(2) * model.upsample_factor
for i, output in enumerate(outputs):
if not isinstance(output, tuple):
print(f"{i+1}: {output.shape}")
else:
for j, output_ in enumerate(output):
print(f"{i+j+1}: {output_.shape}")
@pytest.mark.skipif(
"1.6" in torch.__version__,
reason="group conv in pytorch 1.6 has an issue. "
"See https://github.com/pytorch/pytorch/issues/42446.",
)
@torch.no_grad()
@pytest.mark.parametrize(
"model_dict",
[
({}),
({"text_encoder_positionwise_layer_type": "linear"}),
({"text_encoder_positionwise_layer_type": "conv1d-linear"}),
({"text_encoder_normalize_before": False}),
({"use_macaron_style_in_text_encoder": False}),
({"use_conformer_conv_in_text_encoder": False}),
(
{
"text_encoder_positional_encoding_layer_type": "scaled_abs_pos",
"text_encoder_self_attention_layer_type": "selfattn",
}
),
({"spk_embed_dim": 16}),
({"langs": 16}),
],
)
def test_multi_speaker_vits_generator_forward(model_dict):
idim = 10
odim = 5
spks = 10
global_channels = 8
args = make_generator_args(
vocabs=idim,
aux_channels=odim,
spks=spks,
global_channels=global_channels,
**model_dict,
)
model = VITSGenerator(**args)
# check forward
inputs = dict(
text=torch.randint(0, idim, (2, 8)),
text_lengths=torch.tensor([8, 5], dtype=torch.long),
feats=torch.randn(2, odim, 16),
feats_lengths=torch.tensor([16, 13], dtype=torch.long),
sids=torch.randint(0, spks, (2,)),
)
if args["spk_embed_dim"] > 0:
inputs["spembs"] = torch.randn(2, args["spk_embed_dim"])
if args["langs"] > 0:
inputs["lids"] = torch.randint(0, args["langs"], (2, 1))
outputs = model(**inputs)
for i, output in enumerate(outputs):
if not isinstance(output, tuple):
print(f"{i+1}: {output.shape}")
else:
for j, output_ in enumerate(output):
print(f"{i+j+1}: {output_.shape}")
# check inference
inputs = dict(
text=torch.randint(
0,
idim,
(
2,
5,
),
),
text_lengths=torch.tensor([5, 3], dtype=torch.long),
sids=torch.randint(0, spks, (1,)),
)
if args["spk_embed_dim"] > 0:
inputs["spembs"] = torch.randn(args["spk_embed_dim"])
if args["langs"] > 0:
inputs["lids"] = torch.randint(0, args["langs"], (1,))
outputs = model.inference(**inputs)
for i, output in enumerate(outputs):
if not isinstance(output, tuple):
print(f"{i+1}: {output.shape}")
else:
for j, output_ in enumerate(output):
print(f"{i+j+1}: {output_.shape}")
# check inference with predefined duration
inputs = dict(
text=torch.randint(
0,
idim,
(
1,
5,
),
),
text_lengths=torch.tensor([5], dtype=torch.long),
sids=torch.randint(0, spks, (1,)),
dur=torch.tensor([[[1, 2, 3, 4, 5]]], dtype=torch.long),
)
if args["spk_embed_dim"] > 0:
inputs["spembs"] = torch.randn(args["spk_embed_dim"])
if args["langs"] > 0:
inputs["lids"] = torch.randint(0, args["langs"], (1,))
outputs = model.inference(**inputs)
assert outputs[0].size(1) == inputs["dur"].sum() * model.upsample_factor
for i, output in enumerate(outputs):
if not isinstance(output, tuple):
print(f"{i+1}: {output.shape}")
else:
for j, output_ in enumerate(output):
print(f"{i+j+1}: {output_.shape}")
# check inference with teacher forcing
inputs = dict(
text=torch.randint(
0,
idim,
(
1,
5,
),
),
text_lengths=torch.tensor([5], dtype=torch.long),
feats=torch.randn(1, odim, 16),
feats_lengths=torch.tensor([16], dtype=torch.long),
sids=torch.randint(0, spks, (1,)),
)
if args["spk_embed_dim"] > 0:
inputs["spembs"] = torch.randn(args["spk_embed_dim"])
if args["langs"] > 0:
inputs["lids"] = torch.randint(0, args["langs"], (1,))
outputs = model.inference(**inputs, use_teacher_forcing=True)
assert outputs[0].size(1) == inputs["feats"].size(2) * model.upsample_factor
for i, output in enumerate(outputs):
if not isinstance(output, tuple):
print(f"{i+1}: {output.shape}")
else:
for j, output_ in enumerate(output):
print(f"{i+j+1}: {output_.shape}")
| 11,068 | 32.24024 | 80 | py |
espnet | espnet-master/test/espnet2/gan_tts/vits/test_vits.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Test VITS related modules."""
import pytest
import torch
from espnet2.gan_tts.vits import VITS
def get_test_data():
test_data = [
({}, {}, {}),
({}, {}, {"cache_generator_outputs": True}),
(
{},
{
"discriminator_type": "hifigan_multi_scale_discriminator",
"discriminator_params": {
"scales": 2,
"downsample_pooling": "AvgPool1d",
"downsample_pooling_params": {
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
"discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 16,
"max_downsample_channels": 32,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 2, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
},
},
},
{},
),
(
{},
{
"discriminator_type": "hifigan_multi_period_discriminator",
"discriminator_params": {
"periods": [2, 3],
"discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 16,
"downsample_scales": [3, 3, 1],
"max_downsample_channels": 32,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
},
},
{},
),
(
{},
{
"discriminator_type": "hifigan_period_discriminator",
"discriminator_params": {
"period": 2,
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 16,
"downsample_scales": [3, 3, 1],
"max_downsample_channels": 32,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
},
{},
),
(
{},
{
"discriminator_type": "hifigan_scale_discriminator",
"discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 16,
"max_downsample_channels": 32,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 2, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
},
},
{},
),
(
{},
{},
{
"generator_adv_loss_params": {
"average_by_discriminators": True,
"loss_type": "mse",
},
"discriminator_adv_loss_params": {
"average_by_discriminators": True,
"loss_type": "mse",
},
},
),
(
{},
{},
{
"generator_adv_loss_params": {
"average_by_discriminators": False,
"loss_type": "hinge",
},
"discriminator_adv_loss_params": {
"average_by_discriminators": False,
"loss_type": "hinge",
},
},
),
]
return test_data
def make_vits_generator_args(**kwargs):
defaults = dict(
generator_type="vits_generator",
generator_params={
"vocabs": 10,
"aux_channels": 5,
"hidden_channels": 4,
"spks": -1,
"langs": -1,
"spk_embed_dim": -1,
"global_channels": -1,
"segment_size": 4,
"text_encoder_attention_heads": 2,
"text_encoder_ffn_expand": 2,
"text_encoder_blocks": 2,
"text_encoder_positionwise_layer_type": "conv1d",
"text_encoder_positionwise_conv_kernel_size": 1,
"text_encoder_positional_encoding_layer_type": "rel_pos",
"text_encoder_self_attention_layer_type": "rel_selfattn",
"text_encoder_activation_type": "swish",
"text_encoder_normalize_before": True,
"text_encoder_dropout_rate": 0.1,
"text_encoder_positional_dropout_rate": 0.0,
"text_encoder_attention_dropout_rate": 0.0,
"text_encoder_conformer_kernel_size": 7,
"use_macaron_style_in_text_encoder": True,
"use_conformer_conv_in_text_encoder": True,
"decoder_kernel_size": 7,
"decoder_channels": 16,
"decoder_upsample_scales": (16, 16),
"decoder_upsample_kernel_sizes": (32, 32),
"decoder_resblock_kernel_sizes": (3, 5),
"decoder_resblock_dilations": [(1, 3), (1, 3)],
"use_weight_norm_in_decoder": True,
"posterior_encoder_kernel_size": 5,
"posterior_encoder_layers": 2,
"posterior_encoder_stacks": 1,
"posterior_encoder_base_dilation": 1,
"posterior_encoder_dropout_rate": 0.0,
"use_weight_norm_in_posterior_encoder": True,
"flow_flows": 2,
"flow_kernel_size": 5,
"flow_base_dilation": 1,
"flow_layers": 2,
"flow_dropout_rate": 0.0,
"use_weight_norm_in_flow": True,
"use_only_mean_in_flow": True,
"stochastic_duration_predictor_kernel_size": 3,
"stochastic_duration_predictor_dropout_rate": 0.5,
"stochastic_duration_predictor_flows": 2,
"stochastic_duration_predictor_dds_conv_layers": 3,
},
)
defaults.update(kwargs)
return defaults
def make_vits_discriminator_args(**kwargs):
defaults = dict(
discriminator_type="hifigan_multi_scale_multi_period_discriminator",
discriminator_params={
"scales": 2,
"scale_downsample_pooling": "AvgPool1d",
"scale_downsample_pooling_params": {
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
"scale_discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 16,
"max_downsample_channels": 32,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
},
"follow_official_norm": True,
"periods": [2, 3],
"period_discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 4,
"downsample_scales": [3, 1],
"max_downsample_channels": 16,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
},
)
defaults.update(kwargs)
return defaults
def make_vits_loss_args(**kwargs):
defaults = dict(
lambda_adv=1.0,
lambda_mel=45.0,
lambda_feat_match=2.0,
lambda_dur=1.0,
lambda_kl=1.0,
generator_adv_loss_params={
"average_by_discriminators": False,
"loss_type": "mse",
},
discriminator_adv_loss_params={
"average_by_discriminators": False,
"loss_type": "mse",
},
feat_match_loss_params={
"average_by_discriminators": False,
"average_by_layers": False,
"include_final_outputs": True,
},
mel_loss_params={
"fs": 22050,
"n_fft": 1024,
"hop_length": 256,
"win_length": None,
"window": "hann",
"n_mels": 80,
"fmin": 0,
"fmax": None,
"log_base": None,
},
)
defaults.update(kwargs)
return defaults
@pytest.mark.skipif(
"1.6" in torch.__version__,
reason="group conv in pytorch 1.6 has an issue. "
"See https://github.com/pytorch/pytorch/issues/42446.",
)
@pytest.mark.parametrize(
"gen_dict, dis_dict, loss_dict",
get_test_data(),
)
def test_vits_is_trainable_and_decodable(gen_dict, dis_dict, loss_dict):
idim = 10
odim = 5
gen_args = make_vits_generator_args(**gen_dict)
dis_args = make_vits_discriminator_args(**dis_dict)
loss_args = make_vits_loss_args(**loss_dict)
model = VITS(
idim=idim,
odim=odim,
**gen_args,
**dis_args,
**loss_args,
)
model.train()
upsample_factor = model.generator.upsample_factor
inputs = dict(
text=torch.randint(0, idim, (2, 8)),
text_lengths=torch.tensor([8, 5], dtype=torch.long),
feats=torch.randn(2, 16, odim),
feats_lengths=torch.tensor([16, 13], dtype=torch.long),
speech=torch.randn(2, 16 * upsample_factor),
speech_lengths=torch.tensor([16, 13] * upsample_factor, dtype=torch.long),
)
gen_loss = model(forward_generator=True, **inputs)["loss"]
gen_loss.backward()
dis_loss = model(forward_generator=False, **inputs)["loss"]
dis_loss.backward()
with torch.no_grad():
model.eval()
# check inference
inputs = dict(
text=torch.randint(
0,
idim,
(5,),
)
)
model.inference(**inputs)
# check inference with predefined durations
inputs = dict(
text=torch.randint(
0,
idim,
(5,),
),
durations=torch.tensor([1, 2, 3, 4, 5], dtype=torch.long),
)
output_dict = model.inference(**inputs)
assert output_dict["wav"].size(0) == inputs["durations"].sum() * upsample_factor
# check inference with teacher forcing
inputs = dict(
text=torch.randint(
0,
idim,
(5,),
),
feats=torch.randn(16, odim),
)
output_dict = model.inference(**inputs, use_teacher_forcing=True)
assert output_dict["wav"].size(0) == inputs["feats"].size(0) * upsample_factor
@pytest.mark.skipif(
"1.6" in torch.__version__,
reason="Group conv in pytorch 1.6 has an issue. "
"See https://github.com/pytorch/pytorch/issues/42446.",
)
@pytest.mark.parametrize(
"gen_dict, dis_dict, loss_dict,",
get_test_data(),
)
@pytest.mark.parametrize(
"spks, spk_embed_dim, langs", [(10, -1, -1), (-1, 5, -1), (-1, -1, 3), (4, 5, 3)]
)
def test_multi_speaker_vits_is_trainable_and_decodable(
gen_dict, dis_dict, loss_dict, spks, spk_embed_dim, langs
):
idim = 10
odim = 5
global_channels = 8
gen_args = make_vits_generator_args(**gen_dict)
gen_args["generator_params"]["spks"] = spks
gen_args["generator_params"]["langs"] = langs
gen_args["generator_params"]["spk_embed_dim"] = spk_embed_dim
gen_args["generator_params"]["global_channels"] = global_channels
dis_args = make_vits_discriminator_args(**dis_dict)
loss_args = make_vits_loss_args(**loss_dict)
model = VITS(
idim=idim,
odim=odim,
**gen_args,
**dis_args,
**loss_args,
)
model.train()
upsample_factor = model.generator.upsample_factor
inputs = dict(
text=torch.randint(0, idim, (2, 8)),
text_lengths=torch.tensor([8, 5], dtype=torch.long),
feats=torch.randn(2, 16, odim),
feats_lengths=torch.tensor([16, 13], dtype=torch.long),
speech=torch.randn(2, 16 * upsample_factor),
speech_lengths=torch.tensor([16, 13] * upsample_factor, dtype=torch.long),
)
if spks > 0:
inputs["sids"] = torch.randint(0, spks, (2, 1))
if langs > 0:
inputs["lids"] = torch.randint(0, langs, (2, 1))
if spk_embed_dim > 0:
inputs["spembs"] = torch.randn(2, spk_embed_dim)
gen_loss = model(forward_generator=True, **inputs)["loss"]
gen_loss.backward()
dis_loss = model(forward_generator=False, **inputs)["loss"]
dis_loss.backward()
with torch.no_grad():
model.eval()
# check inference
inputs = dict(
text=torch.randint(
0,
idim,
(5,),
),
)
if spks > 0:
inputs["sids"] = torch.randint(0, spks, (1,))
if langs > 0:
inputs["lids"] = torch.randint(0, langs, (1,))
if spk_embed_dim > 0:
inputs["spembs"] = torch.randn(spk_embed_dim)
model.inference(**inputs)
# check inference with predefined duration
inputs = dict(
text=torch.randint(
0,
idim,
(5,),
),
durations=torch.tensor([1, 2, 3, 4, 5], dtype=torch.long),
)
if spks > 0:
inputs["sids"] = torch.randint(0, spks, (1,))
if langs > 0:
inputs["lids"] = torch.randint(0, langs, (1,))
if spk_embed_dim > 0:
inputs["spembs"] = torch.randn(spk_embed_dim)
output_dict = model.inference(**inputs)
assert output_dict["wav"].size(0) == inputs["durations"].sum() * upsample_factor
# check inference with teachder forcing
inputs = dict(
text=torch.randint(
0,
idim,
(5,),
),
feats=torch.randn(16, odim),
)
if spks > 0:
inputs["sids"] = torch.randint(0, spks, (1,))
if langs > 0:
inputs["lids"] = torch.randint(0, langs, (1,))
if spk_embed_dim > 0:
inputs["spembs"] = torch.randn(spk_embed_dim)
output_dict = model.inference(**inputs, use_teacher_forcing=True)
assert output_dict["wav"].size(0) == inputs["feats"].size(0) * upsample_factor
@pytest.mark.skipif(
not torch.cuda.is_available(),
reason="GPU is needed.",
)
@pytest.mark.skipif(
"1.6" in torch.__version__,
reason="group conv in pytorch 1.6 has an issue. "
"See https://github.com/pytorch/pytorch/issues/42446.",
)
@pytest.mark.parametrize(
"gen_dict, dis_dict, loss_dict",
get_test_data(),
)
def test_vits_is_trainable_and_decodable_on_gpu(gen_dict, dis_dict, loss_dict):
idim = 10
odim = 5
gen_args = make_vits_generator_args(**gen_dict)
dis_args = make_vits_discriminator_args(**dis_dict)
loss_args = make_vits_loss_args(**loss_dict)
model = VITS(
idim=idim,
odim=odim,
**gen_args,
**dis_args,
**loss_args,
)
model.train()
upsample_factor = model.generator.upsample_factor
inputs = dict(
text=torch.randint(0, idim, (2, 8)),
text_lengths=torch.tensor([8, 5], dtype=torch.long),
feats=torch.randn(2, 16, odim),
feats_lengths=torch.tensor([16, 13], dtype=torch.long),
speech=torch.randn(2, 16 * upsample_factor),
speech_lengths=torch.tensor([16, 13] * upsample_factor, dtype=torch.long),
)
device = torch.device("cuda")
model.to(device)
inputs = {k: v.to(device) for k, v in inputs.items()}
gen_loss = model(forward_generator=True, **inputs)["loss"]
gen_loss.backward()
dis_loss = model(forward_generator=False, **inputs)["loss"]
dis_loss.backward()
with torch.no_grad():
model.eval()
# check inference
inputs = dict(
text=torch.randint(
0,
idim,
(5,),
)
)
inputs = {k: v.to(device) for k, v in inputs.items()}
model.inference(**inputs)
# check inference with predefined duration
inputs = dict(
text=torch.randint(
0,
idim,
(5,),
),
durations=torch.tensor([1, 2, 3, 4, 5], dtype=torch.long),
)
inputs = {k: v.to(device) for k, v in inputs.items()}
output_dict = model.inference(**inputs)
assert output_dict["wav"].size(0) == inputs["durations"].sum() * upsample_factor
# check inference with teachder forcing
inputs = dict(
text=torch.randint(
0,
idim,
(5,),
),
feats=torch.randn(16, odim),
)
inputs = {k: v.to(device) for k, v in inputs.items()}
output_dict = model.inference(**inputs, use_teacher_forcing=True)
assert output_dict["wav"].size(0) == inputs["feats"].size(0) * upsample_factor
@pytest.mark.skipif(
not torch.cuda.is_available(),
reason="GPU is needed.",
)
@pytest.mark.skipif(
"1.6" in torch.__version__,
reason="Group conv in pytorch 1.6 has an issue. "
"See https://github.com/pytorch/pytorch/issues/42446.",
)
@pytest.mark.parametrize(
"gen_dict, dis_dict, loss_dict",
get_test_data(),
)
@pytest.mark.parametrize(
"spks, spk_embed_dim, langs", [(10, -1, -1), (-1, 5, -1), (-1, -1, 3), (4, 5, 3)]
)
def test_multi_speaker_vits_is_trainable_and_decodable_on_gpu(
gen_dict, dis_dict, loss_dict, spks, spk_embed_dim, langs
):
idim = 10
odim = 5
global_channels = 8
gen_args = make_vits_generator_args(**gen_dict)
gen_args["generator_params"]["spks"] = spks
gen_args["generator_params"]["langs"] = langs
gen_args["generator_params"]["spk_embed_dim"] = spk_embed_dim
gen_args["generator_params"]["global_channels"] = global_channels
dis_args = make_vits_discriminator_args(**dis_dict)
loss_args = make_vits_loss_args(**loss_dict)
model = VITS(
idim=idim,
odim=odim,
**gen_args,
**dis_args,
**loss_args,
)
model.train()
upsample_factor = model.generator.upsample_factor
inputs = dict(
text=torch.randint(0, idim, (2, 8)),
text_lengths=torch.tensor([8, 5], dtype=torch.long),
feats=torch.randn(2, 16, odim),
feats_lengths=torch.tensor([16, 13], dtype=torch.long),
speech=torch.randn(2, 16 * upsample_factor),
speech_lengths=torch.tensor([16, 13] * upsample_factor, dtype=torch.long),
)
if spks > 0:
inputs["sids"] = torch.randint(0, spks, (2, 1))
if langs > 0:
inputs["lids"] = torch.randint(0, langs, (2, 1))
if spk_embed_dim > 0:
inputs["spembs"] = torch.randn(2, spk_embed_dim)
device = torch.device("cuda")
model.to(device)
inputs = {k: v.to(device) for k, v in inputs.items()}
gen_loss = model(forward_generator=True, **inputs)["loss"]
gen_loss.backward()
dis_loss = model(forward_generator=False, **inputs)["loss"]
dis_loss.backward()
with torch.no_grad():
model.eval()
# check inference
inputs = dict(
text=torch.randint(
0,
idim,
(5,),
),
)
if spks > 0:
inputs["sids"] = torch.randint(0, spks, (1,))
if langs > 0:
inputs["lids"] = torch.randint(0, langs, (1,))
if spk_embed_dim > 0:
inputs["spembs"] = torch.randn(spk_embed_dim)
inputs = {k: v.to(device) for k, v in inputs.items()}
model.inference(**inputs)
# check inference with predefined duration
inputs = dict(
text=torch.randint(
0,
idim,
(5,),
),
durations=torch.tensor([1, 2, 3, 4, 5], dtype=torch.long),
)
if spks > 0:
inputs["sids"] = torch.randint(0, spks, (1,))
if langs > 0:
inputs["lids"] = torch.randint(0, langs, (1,))
if spk_embed_dim > 0:
inputs["spembs"] = torch.randn(spk_embed_dim)
inputs = {k: v.to(device) for k, v in inputs.items()}
output_dict = model.inference(**inputs)
assert output_dict["wav"].size(0) == inputs["durations"].sum() * upsample_factor
# check inference with teachder forcing
inputs = dict(
text=torch.randint(
0,
idim,
(5,),
),
feats=torch.randn(16, odim),
)
if spks > 0:
inputs["sids"] = torch.randint(0, spks, (1,))
if langs > 0:
inputs["lids"] = torch.randint(0, langs, (1,))
if spk_embed_dim > 0:
inputs["spembs"] = torch.randn(spk_embed_dim)
inputs = {k: v.to(device) for k, v in inputs.items()}
output_dict = model.inference(**inputs, use_teacher_forcing=True)
assert output_dict["wav"].size(0) == inputs["feats"].size(0) * upsample_factor
| 22,280 | 32.6571 | 88 | py |
espnet | espnet-master/test/espnet2/gan_tts/parallel_wavegan/test_parallel_wavegan.py | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Test code for ParallelWaveGAN modules."""
import numpy as np
import pytest
import torch
from espnet2.gan_tts.hifigan.loss import (
DiscriminatorAdversarialLoss,
GeneratorAdversarialLoss,
)
from espnet2.gan_tts.parallel_wavegan import (
ParallelWaveGANDiscriminator,
ParallelWaveGANGenerator,
)
def make_generator_args(**kwargs):
defaults = dict(
in_channels=1,
out_channels=1,
kernel_size=3,
layers=6,
stacks=3,
residual_channels=8,
gate_channels=16,
skip_channels=8,
aux_channels=10,
aux_context_window=0,
use_weight_norm=True,
upsample_conditional_features=True,
upsample_net="ConvInUpsampleNetwork",
upsample_params={"upsample_scales": [4, 4]},
)
defaults.update(kwargs)
return defaults
def make_discriminator_args(**kwargs):
defaults = dict(
in_channels=1,
out_channels=1,
kernel_size=3,
layers=5,
conv_channels=16,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.2},
bias=True,
use_weight_norm=True,
)
defaults.update(kwargs)
return defaults
@pytest.mark.parametrize(
"dict_g, dict_d",
[
({}, {}),
({"layers": 1, "stacks": 1}, {}),
({}, {"layers": 1}),
({"kernel_size": 5}, {}),
({}, {"kernel_size": 5}),
({"gate_channels": 8}, {}),
({"stacks": 1}, {}),
({"use_weight_norm": False}, {"use_weight_norm": False}),
({"aux_context_window": 2}, {}),
({"upsample_net": "UpsampleNetwork"}, {}),
(
{"upsample_params": {"upsample_scales": [4], "freq_axis_kernel_size": 3}},
{},
),
(
{
"upsample_params": {
"upsample_scales": [4],
"nonlinear_activation": "ReLU",
}
},
{},
),
(
{
"upsample_conditional_features": False,
"upsample_params": {"upsample_scales": [1]},
},
{},
),
],
)
def test_parallel_wavegan_generator_and_discriminator(dict_g, dict_d):
# setup
batch_size = 4
batch_length = 4096
args_g = make_generator_args(**dict_g)
args_d = make_discriminator_args(**dict_d)
y = torch.randn(batch_size, 1, batch_length)
c = torch.randn(
batch_size,
args_g["aux_channels"],
batch_length // np.prod(args_g["upsample_params"]["upsample_scales"]),
)
model_g = ParallelWaveGANGenerator(**args_g)
model_d = ParallelWaveGANDiscriminator(**args_d)
gen_adv_criterion = GeneratorAdversarialLoss()
dis_adv_criterion = DiscriminatorAdversarialLoss()
optimizer_g = torch.optim.Adam(model_g.parameters())
optimizer_d = torch.optim.Adam(model_d.parameters())
# check generator trainable
y_hat = model_g(c)
p_hat = model_d(y_hat)
loss_g = gen_adv_criterion(p_hat)
optimizer_g.zero_grad()
loss_g.backward()
optimizer_g.step()
# check discriminator trainable
p = model_d(y)
p_hat = model_d(y_hat.detach())
real_loss, fake_loss = dis_adv_criterion(p_hat, p)
loss_d = real_loss + fake_loss
optimizer_d.zero_grad()
loss_d.backward()
optimizer_d.step()
try:
import parallel_wavegan # NOQA
is_parallel_wavegan_available = True
except ImportError:
is_parallel_wavegan_available = False
@pytest.mark.execution_timeout(10)
@pytest.mark.skipif(
not is_parallel_wavegan_available, reason="parallel_wavegan is not installed."
)
def test_parallel_wavegan_compatibility():
from parallel_wavegan.models import (
ParallelWaveGANGenerator as PWGParallelWaveGANGenerator,
)
model_pwg = PWGParallelWaveGANGenerator(**make_generator_args())
model_espnet2 = ParallelWaveGANGenerator(**make_generator_args())
model_espnet2.load_state_dict(model_pwg.state_dict())
model_pwg.eval()
model_espnet2.eval()
with torch.no_grad():
z = torch.randn(3 * 16, 1)
c = torch.randn(3, 10)
out_pwg = model_pwg.inference(c, z)
out_espnet2 = model_espnet2.inference(c, z)
np.testing.assert_allclose(
out_pwg.cpu().numpy(),
out_espnet2.cpu().numpy(),
rtol=1e-5,
)
| 4,501 | 26.790123 | 86 | py |
espnet | espnet-master/test/espnet2/gan_svs/visinger/test_visinger.py | # Copyright 2021 Tomoki Hayashi
# Copyright 2023 Yifeng Yu
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Test VISinger related modules."""
import pytest
import torch
from espnet2.gan_svs.vits import VITS
def get_test_data():
test_data = [
({}, {}, {}),
({}, {}, {"cache_generator_outputs": True}),
(
{},
{
"discriminator_type": "hifigan_multi_scale_discriminator",
"discriminator_params": {
"scales": 2,
"downsample_pooling": "AvgPool1d",
"downsample_pooling_params": {
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
"discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 16,
"max_downsample_channels": 32,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 2, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
},
},
},
{},
),
(
{},
{
"discriminator_type": "hifigan_multi_period_discriminator",
"discriminator_params": {
"periods": [2, 3],
"discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 16,
"downsample_scales": [3, 3, 1],
"max_downsample_channels": 32,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
},
},
{},
),
(
{},
{
"discriminator_type": "hifigan_period_discriminator",
"discriminator_params": {
"period": 2,
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 16,
"downsample_scales": [3, 3, 1],
"max_downsample_channels": 32,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
},
{},
),
(
{},
{
"discriminator_type": "hifigan_scale_discriminator",
"discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 16,
"max_downsample_channels": 32,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 2, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
},
},
{},
),
(
{},
{},
{
"generator_adv_loss_params": {
"average_by_discriminators": True,
"loss_type": "mse",
},
"discriminator_adv_loss_params": {
"average_by_discriminators": True,
"loss_type": "mse",
},
},
),
(
{},
{},
{
"generator_adv_loss_params": {
"average_by_discriminators": False,
"loss_type": "hinge",
},
"discriminator_adv_loss_params": {
"average_by_discriminators": False,
"loss_type": "hinge",
},
},
),
# visinger 2 discriminator (mpd+msd+mfd)
(
{},
{
"discriminator_type": "visinger2",
"discriminator_params": {
"scales": 2,
"scale_downsample_pooling": "AvgPool1d",
"scale_downsample_pooling_params": {
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
"scale_discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 16,
"max_downsample_channels": 32,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
},
"follow_official_norm": True,
"periods": [2, 3],
"period_discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 4,
"downsample_scales": [3, 1],
"max_downsample_channels": 16,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
"multi_freq_disc_params": {
"hop_length_factors": [1, 2, 4],
"hidden_channels": [2, 2, 4],
"domain": "double",
"mel_scale": True,
"divisors": [1, 1, 1, 1, 1, 1, 1],
"strides": [1, 2, 1, 2, 1, 2, 1],
},
},
},
{},
),
# avocodo discriminator
(
{
"generator_type": "visinger",
"vocoder_generator_type": "hifigan",
"generator_params": {
"vocabs": 10,
"aux_channels": 5,
"hidden_channels": 4,
"spks": -1,
"langs": -1,
"spk_embed_dim": -1,
"global_channels": -1,
"segment_size": 4,
"text_encoder_attention_heads": 2,
"text_encoder_ffn_expand": 2,
"text_encoder_blocks": 2,
"text_encoder_positionwise_layer_type": "conv1d",
"text_encoder_positionwise_conv_kernel_size": 1,
"text_encoder_positional_encoding_layer_type": "rel_pos",
"text_encoder_self_attention_layer_type": "rel_selfattn",
"text_encoder_activation_type": "swish",
"text_encoder_normalize_before": True,
"text_encoder_dropout_rate": 0.1,
"text_encoder_positional_dropout_rate": 0.0,
"text_encoder_attention_dropout_rate": 0.0,
"text_encoder_conformer_kernel_size": 7,
"use_macaron_style_in_text_encoder": True,
"use_conformer_conv_in_text_encoder": True,
"decoder_kernel_size": 7,
"decoder_channels": 16,
"decoder_upsample_scales": (2, 2, 4, 16),
"decoder_upsample_kernel_sizes": (4, 4, 8, 32),
"decoder_resblock_kernel_sizes": (3, 5),
"decoder_resblock_dilations": [(1, 3), (1, 3)],
"use_weight_norm_in_decoder": True,
"posterior_encoder_kernel_size": 5,
"posterior_encoder_layers": 2,
"posterior_encoder_stacks": 1,
"posterior_encoder_base_dilation": 1,
"posterior_encoder_dropout_rate": 0.0,
"use_weight_norm_in_posterior_encoder": True,
"flow_flows": 2,
"flow_kernel_size": 5,
"flow_base_dilation": 1,
"flow_layers": 2,
"flow_dropout_rate": 0.0,
"use_weight_norm_in_flow": True,
"use_only_mean_in_flow": True,
"fs": 22050,
"hop_length": 256,
"win_length": 1024,
"n_fft": 1024,
"use_phoneme_predictor": False,
# avocodo
"projection_filters": [0, 1, 1, 1],
"projection_kernels": [0, 5, 7, 11],
},
},
{
"discriminator_type": "avocodo",
"discriminator_params": {
"combd": {
"combd_h_u": [
[16, 64],
[16, 64],
],
"combd_d_k": [
[3, 3],
[3, 3],
],
"combd_d_s": [
[1, 1],
[1, 1],
],
"combd_d_d": [
[1, 1],
[1, 1],
],
"combd_d_g": [
[1, 4],
[1, 4],
],
"combd_d_p": [
[3, 5],
[5, 10],
],
"combd_op_f": [1, 1],
"combd_op_k": [3, 3],
# combd_op_k: [1, 3] # change for uhifigan 1 avocodo
"combd_op_g": [1, 1],
"use_spectral_norm": False,
},
"sbd": {
"sbd_filters": [
[64, 128],
[32, 64],
],
"sbd_strides": [
[1, 1],
[1, 1],
],
"sbd_kernel_sizes": [
[[5, 5, 5], [5, 5, 5]],
[[3, 3, 3], [3, 3, 3]],
],
"sbd_dilations": [
[[5, 7, 11], [5, 7, 11]],
[[1, 2, 3], [1, 2, 3]],
],
"sbd_band_ranges": [[0, 6], [0, 64]],
"sbd_transpose": [False, True],
"pqmf_config": {
"sbd": [16, 256, 0.03, 10.0],
"fsbd": [64, 256, 0.1, 9.0],
},
"segment_size": 1024, # 4 * hop_size
"use_spectral_norm": False,
},
"pqmf_config": {
"lv1": [16, 256, 0.25, 10.0],
"lv2": [64, 192, 0.13, 10.0],
},
},
},
{},
),
# visinger 2 vocoder (ddsp)
(
{
"generator_type": "visinger",
"vocoder_generator_type": "visinger2",
"generator_params": {
"vocabs": 10,
"aux_channels": 5,
"hidden_channels": 4,
"spks": -1,
"langs": -1,
"spk_embed_dim": -1,
"global_channels": -1,
"segment_size": 4,
"text_encoder_attention_heads": 2,
"text_encoder_ffn_expand": 2,
"text_encoder_blocks": 2,
"text_encoder_positionwise_layer_type": "conv1d",
"text_encoder_positionwise_conv_kernel_size": 1,
"text_encoder_positional_encoding_layer_type": "rel_pos",
"text_encoder_self_attention_layer_type": "rel_selfattn",
"text_encoder_activation_type": "swish",
"text_encoder_normalize_before": True,
"text_encoder_dropout_rate": 0.1,
"text_encoder_positional_dropout_rate": 0.0,
"text_encoder_attention_dropout_rate": 0.0,
"text_encoder_conformer_kernel_size": 7,
"use_macaron_style_in_text_encoder": True,
"use_conformer_conv_in_text_encoder": True,
"decoder_kernel_size": 7,
"decoder_channels": 16,
"decoder_upsample_scales": (16, 16),
"decoder_upsample_kernel_sizes": (32, 32),
"decoder_resblock_kernel_sizes": (3, 5),
"decoder_resblock_dilations": [(1, 3), (1, 3)],
"use_weight_norm_in_decoder": True,
"posterior_encoder_kernel_size": 5,
"posterior_encoder_layers": 2,
"posterior_encoder_stacks": 1,
"posterior_encoder_base_dilation": 1,
"posterior_encoder_dropout_rate": 0.0,
"use_weight_norm_in_posterior_encoder": True,
"flow_flows": 2,
"flow_kernel_size": 5,
"flow_base_dilation": 1,
"flow_layers": 2,
"flow_dropout_rate": 0.0,
"use_weight_norm_in_flow": True,
"use_only_mean_in_flow": True,
"fs": 22050,
"hop_length": 256,
"win_length": 1024,
"n_fft": 1024,
"use_phoneme_predictor": False,
},
},
{
"discriminator_type": "hifigan_multi_scale_multi_period_discriminator",
"discriminator_params": {
"scales": 2,
"scale_downsample_pooling": "AvgPool1d",
"scale_downsample_pooling_params": {
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
"scale_discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 16,
"max_downsample_channels": 32,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
},
"follow_official_norm": True,
"periods": [2, 3],
"period_discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 4,
"downsample_scales": [3, 1],
"max_downsample_channels": 16,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
},
},
{},
),
# uhifigan vocoder
(
{
"generator_type": "visinger",
"vocoder_generator_type": "uhifigan",
"generator_params": {
"vocabs": 10,
"aux_channels": 5,
"hidden_channels": 4,
"spks": -1,
"langs": -1,
"spk_embed_dim": -1,
"global_channels": -1,
"segment_size": 4,
"text_encoder_attention_heads": 2,
"text_encoder_ffn_expand": 2,
"text_encoder_blocks": 2,
"text_encoder_positionwise_layer_type": "conv1d",
"text_encoder_positionwise_conv_kernel_size": 1,
"text_encoder_positional_encoding_layer_type": "rel_pos",
"text_encoder_self_attention_layer_type": "rel_selfattn",
"text_encoder_activation_type": "swish",
"text_encoder_normalize_before": True,
"text_encoder_dropout_rate": 0.1,
"text_encoder_positional_dropout_rate": 0.0,
"text_encoder_attention_dropout_rate": 0.0,
"text_encoder_conformer_kernel_size": 7,
"use_macaron_style_in_text_encoder": True,
"use_conformer_conv_in_text_encoder": True,
"decoder_kernel_size": 7,
"decoder_channels": 16,
"decoder_downsample_scales": (16, 16),
"decoder_downsample_kernel_sizes": (32, 32),
"decoder_upsample_scales": (16, 16),
"decoder_upsample_kernel_sizes": (32, 32),
"decoder_resblock_kernel_sizes": (3, 5),
"decoder_resblock_dilations": [(1, 3), (1, 3)],
"use_weight_norm_in_decoder": True,
"posterior_encoder_kernel_size": 5,
"posterior_encoder_layers": 2,
"posterior_encoder_stacks": 1,
"posterior_encoder_base_dilation": 1,
"posterior_encoder_dropout_rate": 0.0,
"use_weight_norm_in_posterior_encoder": True,
"flow_flows": 2,
"flow_kernel_size": 5,
"flow_base_dilation": 1,
"flow_layers": 2,
"flow_dropout_rate": 0.0,
"use_weight_norm_in_flow": True,
"use_only_mean_in_flow": True,
"fs": 22050,
"hop_length": 256,
"win_length": 1024,
"n_fft": 1024,
"use_phoneme_predictor": False,
},
},
{
"discriminator_type": "hifigan_multi_scale_multi_period_discriminator",
"discriminator_params": {
"scales": 2,
"scale_downsample_pooling": "AvgPool1d",
"scale_downsample_pooling_params": {
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
"scale_discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 16,
"max_downsample_channels": 32,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
},
"follow_official_norm": True,
"periods": [2, 3],
"period_discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 4,
"downsample_scales": [3, 1],
"max_downsample_channels": 16,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
},
},
{},
),
# visinger 2 generator
(
{
"generator_type": "visinger2",
"vocoder_generator_type": "hifigan",
"generator_params": {
"vocabs": 10,
"aux_channels": 5,
"hidden_channels": 4,
"spks": -1,
"langs": -1,
"spk_embed_dim": -1,
"global_channels": -1,
"segment_size": 4,
"text_encoder_attention_heads": 2,
"text_encoder_ffn_expand": 2,
"text_encoder_blocks": 2,
"text_encoder_positionwise_layer_type": "conv1d",
"text_encoder_positionwise_conv_kernel_size": 1,
"text_encoder_positional_encoding_layer_type": "rel_pos",
"text_encoder_self_attention_layer_type": "rel_selfattn",
"text_encoder_activation_type": "swish",
"text_encoder_normalize_before": True,
"text_encoder_dropout_rate": 0.1,
"text_encoder_positional_dropout_rate": 0.0,
"text_encoder_attention_dropout_rate": 0.0,
"text_encoder_conformer_kernel_size": 7,
"use_macaron_style_in_text_encoder": True,
"use_conformer_conv_in_text_encoder": True,
"decoder_kernel_size": 7,
"decoder_channels": 16,
"decoder_downsample_scales": (16, 16),
"decoder_downsample_kernel_sizes": (32, 32),
"decoder_upsample_scales": (16, 16),
"decoder_upsample_kernel_sizes": (32, 32),
"decoder_resblock_kernel_sizes": (3, 5),
"decoder_resblock_dilations": [(1, 3), (1, 3)],
"use_weight_norm_in_decoder": True,
"posterior_encoder_kernel_size": 5,
"posterior_encoder_layers": 2,
"posterior_encoder_stacks": 1,
"posterior_encoder_base_dilation": 1,
"posterior_encoder_dropout_rate": 0.0,
"use_weight_norm_in_posterior_encoder": True,
"flow_flows": 2,
"flow_kernel_size": 5,
"flow_base_dilation": 1,
"flow_layers": 2,
"flow_dropout_rate": 0.0,
"use_weight_norm_in_flow": True,
"use_only_mean_in_flow": True,
"fs": 22050,
"hop_length": 256,
"win_length": 1024,
"n_fft": 1024,
"use_phoneme_predictor": False,
},
},
{
"discriminator_type": "hifigan_multi_scale_multi_period_discriminator",
"discriminator_params": {
"scales": 2,
"scale_downsample_pooling": "AvgPool1d",
"scale_downsample_pooling_params": {
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
"scale_discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 16,
"max_downsample_channels": 32,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
},
"follow_official_norm": True,
"periods": [2, 3],
"period_discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 4,
"downsample_scales": [3, 1],
"max_downsample_channels": 16,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
},
},
{},
),
# without flow
(
{
"generator_type": "visinger",
"vocoder_generator_type": "hifigan",
"generator_params": {
"vocabs": 10,
"aux_channels": 5,
"hidden_channels": 4,
"spks": -1,
"langs": -1,
"spk_embed_dim": -1,
"global_channels": -1,
"segment_size": 4,
"text_encoder_attention_heads": 2,
"text_encoder_ffn_expand": 2,
"text_encoder_blocks": 2,
"text_encoder_positionwise_layer_type": "conv1d",
"text_encoder_positionwise_conv_kernel_size": 1,
"text_encoder_positional_encoding_layer_type": "rel_pos",
"text_encoder_self_attention_layer_type": "rel_selfattn",
"text_encoder_activation_type": "swish",
"text_encoder_normalize_before": True,
"text_encoder_dropout_rate": 0.1,
"text_encoder_positional_dropout_rate": 0.0,
"text_encoder_attention_dropout_rate": 0.0,
"text_encoder_conformer_kernel_size": 7,
"use_macaron_style_in_text_encoder": True,
"use_conformer_conv_in_text_encoder": True,
"decoder_kernel_size": 7,
"decoder_channels": 16,
"decoder_downsample_scales": (16, 16),
"decoder_downsample_kernel_sizes": (32, 32),
"decoder_upsample_scales": (16, 16),
"decoder_upsample_kernel_sizes": (32, 32),
"decoder_resblock_kernel_sizes": (3, 5),
"decoder_resblock_dilations": [(1, 3), (1, 3)],
"use_weight_norm_in_decoder": True,
"posterior_encoder_kernel_size": 5,
"posterior_encoder_layers": 2,
"posterior_encoder_stacks": 1,
"posterior_encoder_base_dilation": 1,
"posterior_encoder_dropout_rate": 0.0,
"use_weight_norm_in_posterior_encoder": True,
"flow_flows": -1,
"flow_kernel_size": 5,
"flow_base_dilation": 1,
"flow_layers": 2,
"flow_dropout_rate": 0.0,
"use_weight_norm_in_flow": True,
"use_only_mean_in_flow": True,
"fs": 22050,
"hop_length": 256,
"win_length": 1024,
"n_fft": 1024,
"use_phoneme_predictor": False,
},
},
{},
{},
),
# use phoneme predictor
(
{
"generator_type": "visinger",
"vocoder_generator_type": "hifigan",
"generator_params": {
"vocabs": 10,
"aux_channels": 5,
"hidden_channels": 4,
"spks": -1,
"langs": -1,
"spk_embed_dim": -1,
"global_channels": -1,
"segment_size": 4,
"text_encoder_attention_heads": 2,
"text_encoder_ffn_expand": 2,
"text_encoder_blocks": 2,
"text_encoder_positionwise_layer_type": "conv1d",
"text_encoder_positionwise_conv_kernel_size": 1,
"text_encoder_positional_encoding_layer_type": "rel_pos",
"text_encoder_self_attention_layer_type": "rel_selfattn",
"text_encoder_activation_type": "swish",
"text_encoder_normalize_before": True,
"text_encoder_dropout_rate": 0.1,
"text_encoder_positional_dropout_rate": 0.0,
"text_encoder_attention_dropout_rate": 0.0,
"text_encoder_conformer_kernel_size": 7,
"use_macaron_style_in_text_encoder": True,
"use_conformer_conv_in_text_encoder": True,
"decoder_kernel_size": 7,
"decoder_channels": 16,
"decoder_downsample_scales": (16, 16),
"decoder_downsample_kernel_sizes": (32, 32),
"decoder_upsample_scales": (16, 16),
"decoder_upsample_kernel_sizes": (32, 32),
"decoder_resblock_kernel_sizes": (3, 5),
"decoder_resblock_dilations": [(1, 3), (1, 3)],
"use_weight_norm_in_decoder": True,
"posterior_encoder_kernel_size": 5,
"posterior_encoder_layers": 2,
"posterior_encoder_stacks": 1,
"posterior_encoder_base_dilation": 1,
"posterior_encoder_dropout_rate": 0.0,
"use_weight_norm_in_posterior_encoder": True,
"flow_flows": 2,
"flow_kernel_size": 5,
"flow_base_dilation": 1,
"flow_layers": 2,
"flow_dropout_rate": 0.0,
"use_weight_norm_in_flow": True,
"use_only_mean_in_flow": True,
"fs": 22050,
"hop_length": 256,
"win_length": 1024,
"n_fft": 1024,
"use_phoneme_predictor": True,
},
},
{},
{},
),
]
return test_data
def make_vits_generator_args(**kwargs):
defaults = dict(
generator_type="visinger",
vocoder_generator_type="hifigan",
generator_params={
"vocabs": 10,
"aux_channels": 5,
"hidden_channels": 4,
"spks": -1,
"langs": -1,
"spk_embed_dim": -1,
"global_channels": -1,
"segment_size": 4,
"text_encoder_attention_heads": 2,
"text_encoder_ffn_expand": 2,
"text_encoder_blocks": 2,
"text_encoder_positionwise_layer_type": "conv1d",
"text_encoder_positionwise_conv_kernel_size": 1,
"text_encoder_positional_encoding_layer_type": "rel_pos",
"text_encoder_self_attention_layer_type": "rel_selfattn",
"text_encoder_activation_type": "swish",
"text_encoder_normalize_before": True,
"text_encoder_dropout_rate": 0.1,
"text_encoder_positional_dropout_rate": 0.0,
"text_encoder_attention_dropout_rate": 0.0,
"text_encoder_conformer_kernel_size": 7,
"use_macaron_style_in_text_encoder": True,
"use_conformer_conv_in_text_encoder": True,
"decoder_kernel_size": 7,
"decoder_channels": 16,
"decoder_upsample_scales": (16, 16),
"decoder_upsample_kernel_sizes": (32, 32),
"decoder_resblock_kernel_sizes": (3, 5),
"decoder_resblock_dilations": [(1, 3), (1, 3)],
"use_weight_norm_in_decoder": True,
"posterior_encoder_kernel_size": 5,
"posterior_encoder_layers": 2,
"posterior_encoder_stacks": 1,
"posterior_encoder_base_dilation": 1,
"posterior_encoder_dropout_rate": 0.0,
"use_weight_norm_in_posterior_encoder": True,
"flow_flows": 2,
"flow_kernel_size": 5,
"flow_base_dilation": 1,
"flow_layers": 2,
"flow_dropout_rate": 0.0,
"use_weight_norm_in_flow": True,
"use_only_mean_in_flow": True,
"generator_type": "visinger",
"vocoder_generator_type": "hifigan",
"fs": 22050,
"hop_length": 256,
"win_length": 1024,
"n_fft": 1024,
"use_phoneme_predictor": False,
# avocodo
"projection_filters": [0, 1, 1, 1],
"projection_kernels": [0, 5, 7, 11],
},
)
defaults.update(kwargs)
return defaults
def make_vits_discriminator_args(**kwargs):
defaults = dict(
discriminator_type="hifigan_multi_scale_multi_period_discriminator",
discriminator_params={
"scales": 2,
"scale_downsample_pooling": "AvgPool1d",
"scale_downsample_pooling_params": {
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
"scale_discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 16,
"max_downsample_channels": 32,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
},
"follow_official_norm": True,
"periods": [2, 3],
"period_discriminator_params": {
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 4,
"downsample_scales": [3, 1],
"max_downsample_channels": 16,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
},
)
defaults.update(kwargs)
return defaults
def make_vits_loss_args(**kwargs):
defaults = dict(
lambda_adv=1.0,
lambda_mel=45.0,
lambda_feat_match=2.0,
lambda_dur=1.0,
lambda_kl=1.0,
generator_adv_loss_params={
"average_by_discriminators": False,
"loss_type": "mse",
},
discriminator_adv_loss_params={
"average_by_discriminators": False,
"loss_type": "mse",
},
feat_match_loss_params={
"average_by_discriminators": False,
"average_by_layers": False,
"include_final_outputs": True,
},
mel_loss_params={
"fs": 22050,
"n_fft": 1024,
"hop_length": 256,
"win_length": None,
"window": "hann",
"n_mels": 80,
"fmin": 0,
"fmax": None,
"log_base": None,
},
)
defaults.update(kwargs)
return defaults
@pytest.mark.skipif(
"1.6" in torch.__version__,
reason="group conv in pytorch 1.6 has an issue. "
"See https://github.com/pytorch/pytorch/issues/42446.",
)
@pytest.mark.parametrize(
"gen_dict, dis_dict, loss_dict",
get_test_data(),
)
def test_vits_is_trainable_and_decodable(gen_dict, dis_dict, loss_dict):
idim = 10
odim = 5
gen_args = make_vits_generator_args(**gen_dict)
dis_args = make_vits_discriminator_args(**dis_dict)
loss_args = make_vits_loss_args(**loss_dict)
model = VITS(
idim=idim,
odim=odim,
**gen_args,
**dis_args,
**loss_args,
)
model.train()
upsample_factor = model.generator.upsample_factor
inputs = dict(
text=torch.randint(0, idim, (2, 8)),
text_lengths=torch.tensor([8, 5], dtype=torch.long),
feats=torch.randn(2, 16, odim),
feats_lengths=torch.tensor([16, 13], dtype=torch.long),
singing=torch.randn(2, 16 * upsample_factor),
singing_lengths=torch.tensor([16, 13] * upsample_factor, dtype=torch.long),
label={
"lab": torch.randint(0, idim, (2, 8)),
"score": torch.randint(0, idim, (2, 8)),
},
label_lengths={
"lab": torch.tensor([8, 5], dtype=torch.long),
"score": torch.tensor([8, 5], dtype=torch.long),
},
melody={
"lab": torch.randint(0, 127, (2, 8)),
"score": torch.randint(0, 127, (2, 8)),
},
duration={
"lab": torch.tensor(
[[1, 2, 2, 3, 1, 3, 2, 2], [2, 2, 1, 4, 1, 2, 1, 3]], dtype=torch.int64
),
"score_phn": torch.tensor(
[[1, 2, 2, 3, 1, 3, 2, 1], [2, 2, 1, 4, 1, 2, 1, 3]], dtype=torch.int64
),
"score_syb": torch.tensor(
[[3, 3, 5, 5, 4, 4, 3, 3], [4, 4, 5, 5, 3, 3, 4, 4]], dtype=torch.int64
),
},
slur=torch.randint(0, 2, (2, 8)),
pitch=torch.randn(2, 16, 1),
)
gen_loss = model(forward_generator=True, **inputs)["loss"]
gen_loss.backward()
dis_loss = model(forward_generator=False, **inputs)["loss"]
dis_loss.backward()
with torch.no_grad():
model.eval()
# check inference
inputs = dict(
text=torch.randint(
0,
idim,
(
1,
5,
),
),
label={
"lab": torch.randint(
0,
idim,
(
1,
5,
),
),
"score": torch.randint(
0,
idim,
(
1,
5,
),
),
},
melody={
"lab": torch.randint(
0,
127,
(
1,
5,
),
),
"score": torch.randint(
0,
127,
(
1,
5,
),
),
},
duration={
"lab": torch.tensor([[1, 2, 2, 3, 3]], dtype=torch.int64),
"score_phn": torch.tensor([[1, 2, 2, 3, 4]], dtype=torch.int64),
"score_syb": torch.tensor([[3, 3, 5, 5, 4]], dtype=torch.int64),
},
slur=torch.randint(0, 2, (1, 5)),
pitch=torch.randn(16, 1),
)
model.inference(**inputs)
# check inference with teachder forcing
inputs = dict(
text=torch.randint(
0,
idim,
(
1,
5,
),
),
label={
"lab": torch.randint(
0,
idim,
(
1,
5,
),
),
"score": torch.randint(
0,
idim,
(
1,
5,
),
),
},
melody={
"lab": torch.randint(
0,
127,
(
1,
5,
),
),
"score": torch.randint(
0,
127,
(
1,
5,
),
),
},
duration={
"lab": torch.tensor([[1, 2, 2, 3, 3]], dtype=torch.int64),
"score_phn": torch.tensor([[1, 2, 2, 3, 4]], dtype=torch.int64),
"score_syb": torch.tensor([[3, 3, 5, 5, 4]], dtype=torch.int64),
},
slur=torch.randint(0, 2, (1, 5)),
pitch=torch.randn(16, 1),
feats=torch.randn(16, odim),
)
output_dict = model.inference(**inputs, use_teacher_forcing=True)
assert output_dict["wav"].size(0) == inputs["feats"].size(0) * upsample_factor
@pytest.mark.skipif(
"1.6" in torch.__version__,
reason="Group conv in pytorch 1.6 has an issue. "
"See https://github.com/pytorch/pytorch/issues/42446.",
)
@pytest.mark.parametrize(
"gen_dict, dis_dict, loss_dict,",
get_test_data(),
)
@pytest.mark.parametrize(
"spks, spk_embed_dim, langs", [(10, -1, -1), (-1, 5, -1), (-1, -1, 3), (4, 5, 3)]
)
def test_multi_speaker_vits_is_trainable_and_decodable(
gen_dict, dis_dict, loss_dict, spks, spk_embed_dim, langs
):
idim = 10
odim = 5
global_channels = 8
gen_args = make_vits_generator_args(**gen_dict)
print("gen_args", gen_args)
gen_args["generator_params"]["spks"] = spks
gen_args["generator_params"]["langs"] = langs
gen_args["generator_params"]["spk_embed_dim"] = spk_embed_dim
gen_args["generator_params"]["global_channels"] = global_channels
dis_args = make_vits_discriminator_args(**dis_dict)
loss_args = make_vits_loss_args(**loss_dict)
model = VITS(
idim=idim,
odim=odim,
**gen_args,
**dis_args,
**loss_args,
)
model.train()
upsample_factor = model.generator.upsample_factor
inputs = dict(
text=torch.randint(0, idim, (2, 8)),
text_lengths=torch.tensor([8, 5], dtype=torch.long),
feats=torch.randn(2, 16, odim),
feats_lengths=torch.tensor([16, 13], dtype=torch.long),
singing=torch.randn(2, 16 * upsample_factor),
singing_lengths=torch.tensor([16, 13] * upsample_factor, dtype=torch.long),
label={
"lab": torch.randint(0, idim, (2, 8)),
"score": torch.randint(0, idim, (2, 8)),
},
label_lengths={
"lab": torch.tensor([8, 5], dtype=torch.long),
"score": torch.tensor([8, 5], dtype=torch.long),
},
melody={
"lab": torch.randint(0, 127, (2, 8)),
"score": torch.randint(0, 127, (2, 8)),
},
duration={
"lab": torch.tensor(
[[1, 2, 2, 3, 1, 3, 2, 2], [2, 2, 1, 4, 1, 2, 1, 3]], dtype=torch.int64
),
"score_phn": torch.tensor(
[[1, 2, 2, 3, 1, 3, 2, 1], [2, 2, 1, 4, 1, 2, 1, 3]], dtype=torch.int64
),
"score_syb": torch.tensor(
[[3, 3, 5, 5, 4, 4, 3, 3], [4, 4, 5, 5, 3, 3, 4, 4]], dtype=torch.int64
),
},
slur=torch.randint(0, 2, (2, 8)),
pitch=torch.randn(2, 16, 1),
)
if spks > 0:
inputs["sids"] = torch.randint(0, spks, (2, 1))
if langs > 0:
inputs["lids"] = torch.randint(0, langs, (2, 1))
if spk_embed_dim > 0:
inputs["spembs"] = torch.randn(2, spk_embed_dim)
gen_loss = model(forward_generator=True, **inputs)["loss"]
gen_loss.backward()
dis_loss = model(forward_generator=False, **inputs)["loss"]
dis_loss.backward()
with torch.no_grad():
model.eval()
# check inference
inputs = dict(
text=torch.randint(
0,
idim,
(
1,
5,
),
),
label={
"lab": torch.randint(
0,
idim,
(
1,
5,
),
),
"score": torch.randint(
0,
idim,
(
1,
5,
),
),
},
melody={
"lab": torch.randint(
0,
127,
(
1,
5,
),
),
"score": torch.randint(
0,
127,
(
1,
5,
),
),
},
duration={
"lab": torch.tensor([[1, 2, 2, 3, 3]], dtype=torch.int64),
"score_phn": torch.tensor([[1, 2, 2, 3, 4]], dtype=torch.int64),
"score_syb": torch.tensor([[3, 3, 5, 5, 4]], dtype=torch.int64),
},
slur=torch.randint(0, 2, (1, 5)),
pitch=torch.randn(16, 1),
)
if spks > 0:
inputs["sids"] = torch.randint(0, spks, (1,))
if langs > 0:
inputs["lids"] = torch.randint(0, langs, (1,))
if spk_embed_dim > 0:
inputs["spembs"] = torch.randn(spk_embed_dim)
model.inference(**inputs)
# check inference with teachder forcing
inputs = dict(
text=torch.randint(
0,
idim,
(
1,
5,
),
),
label={
"lab": torch.randint(
0,
idim,
(
1,
5,
),
),
"score": torch.randint(
0,
idim,
(
1,
5,
),
),
},
melody={
"lab": torch.randint(
0,
127,
(
1,
5,
),
),
"score": torch.randint(
0,
127,
(
1,
5,
),
),
},
duration={
"lab": torch.tensor([[1, 2, 2, 3, 3]], dtype=torch.int64),
"score_phn": torch.tensor([[1, 2, 2, 3, 4]], dtype=torch.int64),
"score_syb": torch.tensor([[3, 3, 5, 5, 4]], dtype=torch.int64),
},
slur=torch.randint(0, 2, (1, 5)),
pitch=torch.randn(16, 1),
feats=torch.randn(16, odim),
)
if spks > 0:
inputs["sids"] = torch.randint(0, spks, (1,))
if langs > 0:
inputs["lids"] = torch.randint(0, langs, (1,))
if spk_embed_dim > 0:
inputs["spembs"] = torch.randn(spk_embed_dim)
output_dict = model.inference(**inputs, use_teacher_forcing=True)
assert output_dict["wav"].size(0) == inputs["feats"].size(0) * upsample_factor
@pytest.mark.skipif(
not torch.cuda.is_available(),
reason="GPU is needed.",
)
@pytest.mark.skipif(
"1.6" in torch.__version__,
reason="group conv in pytorch 1.6 has an issue. "
"See https://github.com/pytorch/pytorch/issues/42446.",
)
@pytest.mark.parametrize(
"gen_dict, dis_dict, loss_dict",
get_test_data(),
)
def test_vits_is_trainable_and_decodable_on_gpu(gen_dict, dis_dict, loss_dict):
idim = 10
odim = 5
gen_args = make_vits_generator_args(**gen_dict)
dis_args = make_vits_discriminator_args(**dis_dict)
loss_args = make_vits_loss_args(**loss_dict)
model = VITS(
idim=idim,
odim=odim,
**gen_args,
**dis_args,
**loss_args,
)
model.train()
upsample_factor = model.generator.upsample_factor
inputs = dict(
text=torch.randint(0, idim, (2, 8)),
text_lengths=torch.tensor([8, 5], dtype=torch.long),
feats=torch.randn(2, 16, odim),
feats_lengths=torch.tensor([16, 13], dtype=torch.long),
singing=torch.randn(2, 16 * upsample_factor),
singing_lengths=torch.tensor([16, 13] * upsample_factor, dtype=torch.long),
label={
"lab": torch.randint(0, idim, (2, 8)),
"score": torch.randint(0, idim, (2, 8)),
},
label_lengths={
"lab": torch.tensor([8, 5], dtype=torch.long),
"score": torch.tensor([8, 5], dtype=torch.long),
},
melody={
"lab": torch.randint(0, 127, (2, 8)),
"score": torch.randint(0, 127, (2, 8)),
},
duration={
"lab": torch.tensor(
[[1, 2, 2, 3, 1, 3, 2, 2], [2, 2, 1, 4, 1, 2, 1, 3]], dtype=torch.int64
),
"score_phn": torch.tensor(
[[1, 2, 2, 3, 1, 3, 2, 1], [2, 2, 1, 4, 1, 2, 1, 3]], dtype=torch.int64
),
"score_syb": torch.tensor(
[[3, 3, 5, 5, 4, 4, 3, 3], [4, 4, 5, 5, 3, 3, 4, 4]], dtype=torch.int64
),
},
slur=torch.randint(0, 2, (2, 8)),
pitch=torch.randn(2, 16, 1),
)
device = torch.device("cuda")
model.to(device)
inputs = {
k: {k2: v2.to(device) for k2, v2 in v.items()}
if isinstance(v, dict)
else v.to(device)
for k, v in inputs.items()
}
gen_loss = model(forward_generator=True, **inputs)["loss"]
gen_loss.backward()
dis_loss = model(forward_generator=False, **inputs)["loss"]
dis_loss.backward()
with torch.no_grad():
model.eval()
# check inference
inputs = dict(
text=torch.randint(
0,
idim,
(
1,
5,
),
),
label={
"lab": torch.randint(
0,
idim,
(
1,
5,
),
),
"score": torch.randint(
0,
idim,
(
1,
5,
),
),
},
melody={
"lab": torch.randint(
0,
127,
(
1,
5,
),
),
"score": torch.randint(
0,
127,
(
1,
5,
),
),
},
duration={
"lab": torch.tensor([[1, 2, 2, 3, 3]], dtype=torch.int64),
"score_phn": torch.tensor([[1, 2, 2, 3, 4]], dtype=torch.int64),
"score_syb": torch.tensor([[3, 3, 5, 5, 4]], dtype=torch.int64),
},
slur=torch.randint(0, 2, (1, 5)),
pitch=torch.randn(16, 1),
)
inputs = {
k: {k2: v2.to(device) for k2, v2 in v.items()}
if isinstance(v, dict)
else v.to(device)
for k, v in inputs.items()
}
model.inference(**inputs)
# check inference with teachder forcing
inputs = dict(
text=torch.randint(
0,
idim,
(
1,
5,
),
),
label={
"lab": torch.randint(
0,
idim,
(
1,
5,
),
),
"score": torch.randint(
0,
idim,
(
1,
5,
),
),
},
melody={
"lab": torch.randint(
0,
127,
(
1,
5,
),
),
"score": torch.randint(
0,
127,
(
1,
5,
),
),
},
duration={
"lab": torch.tensor([[1, 2, 2, 3, 3]], dtype=torch.int64),
"score_phn": torch.tensor([[1, 2, 2, 3, 4]], dtype=torch.int64),
"score_syb": torch.tensor([[3, 3, 5, 5, 4]], dtype=torch.int64),
},
slur=torch.randint(0, 2, (1, 5)),
pitch=torch.randn(16, 1),
feats=torch.randn(16, odim),
)
inputs = {
k: {k2: v2.to(device) for k2, v2 in v.items()}
if isinstance(v, dict)
else v.to(device)
for k, v in inputs.items()
}
output_dict = model.inference(**inputs, use_teacher_forcing=True)
assert output_dict["wav"].size(0) == inputs["feats"].size(0) * upsample_factor
@pytest.mark.skipif(
not torch.cuda.is_available(),
reason="GPU is needed.",
)
@pytest.mark.skipif(
"1.6" in torch.__version__,
reason="Group conv in pytorch 1.6 has an issue. "
"See https://github.com/pytorch/pytorch/issues/42446.",
)
@pytest.mark.parametrize(
"gen_dict, dis_dict, loss_dict",
get_test_data(),
)
@pytest.mark.parametrize(
"spks, spk_embed_dim, langs", [(10, -1, -1), (-1, 5, -1), (-1, -1, 3), (4, 5, 3)]
)
def test_multi_speaker_vits_is_trainable_and_decodable_on_gpu(
gen_dict, dis_dict, loss_dict, spks, spk_embed_dim, langs
):
idim = 10
odim = 5
global_channels = 8
gen_args = make_vits_generator_args(**gen_dict)
gen_args["generator_params"]["spks"] = spks
gen_args["generator_params"]["langs"] = langs
gen_args["generator_params"]["spk_embed_dim"] = spk_embed_dim
gen_args["generator_params"]["global_channels"] = global_channels
dis_args = make_vits_discriminator_args(**dis_dict)
loss_args = make_vits_loss_args(**loss_dict)
model = VITS(
idim=idim,
odim=odim,
**gen_args,
**dis_args,
**loss_args,
)
model.train()
upsample_factor = model.generator.upsample_factor
inputs = dict(
text=torch.randint(0, idim, (2, 8)),
text_lengths=torch.tensor([8, 5], dtype=torch.long),
feats=torch.randn(2, 16, odim),
feats_lengths=torch.tensor([16, 13], dtype=torch.long),
singing=torch.randn(2, 16 * upsample_factor),
singing_lengths=torch.tensor([16, 13] * upsample_factor, dtype=torch.long),
label={
"lab": torch.randint(0, idim, (2, 8)),
"score": torch.randint(0, idim, (2, 8)),
},
label_lengths={
"lab": torch.tensor([8, 5], dtype=torch.long),
"score": torch.tensor([8, 5], dtype=torch.long),
},
melody={
"lab": torch.randint(0, 127, (2, 8)),
"score": torch.randint(0, 127, (2, 8)),
},
duration={
"lab": torch.tensor(
[[1, 2, 2, 3, 1, 3, 2, 2], [2, 2, 1, 4, 1, 2, 1, 3]], dtype=torch.int64
),
"score_phn": torch.tensor(
[[1, 2, 2, 3, 1, 3, 2, 1], [2, 2, 1, 4, 1, 2, 1, 3]], dtype=torch.int64
),
"score_syb": torch.tensor(
[[3, 3, 5, 5, 4, 4, 3, 3], [4, 4, 5, 5, 3, 3, 4, 4]], dtype=torch.int64
),
},
slur=torch.randint(0, 2, (2, 8)),
pitch=torch.randn(2, 16, 1),
)
if spks > 0:
inputs["sids"] = torch.randint(0, spks, (2, 1))
if langs > 0:
inputs["lids"] = torch.randint(0, langs, (2, 1))
if spk_embed_dim > 0:
inputs["spembs"] = torch.randn(2, spk_embed_dim)
device = torch.device("cuda")
model.to(device)
inputs = {
k: {k2: v2.to(device) for k2, v2 in v.items()}
if isinstance(v, dict)
else v.to(device)
for k, v in inputs.items()
}
gen_loss = model(forward_generator=True, **inputs)["loss"]
gen_loss.backward()
dis_loss = model(forward_generator=False, **inputs)["loss"]
dis_loss.backward()
with torch.no_grad():
model.eval()
# check inference
inputs = dict(
text=torch.randint(
0,
idim,
(
1,
5,
),
),
label={
"lab": torch.randint(
0,
idim,
(
1,
5,
),
),
"score": torch.randint(
0,
idim,
(
1,
5,
),
),
},
melody={
"lab": torch.randint(
0,
127,
(
1,
5,
),
),
"score": torch.randint(
0,
127,
(
1,
5,
),
),
},
duration={
"lab": torch.tensor([[1, 2, 2, 3, 3]], dtype=torch.int64),
"score_phn": torch.tensor([[1, 2, 2, 3, 4]], dtype=torch.int64),
"score_syb": torch.tensor([[3, 3, 5, 5, 4]], dtype=torch.int64),
},
slur=torch.randint(0, 2, (1, 5)),
pitch=torch.randn(16, 1),
)
if spks > 0:
inputs["sids"] = torch.randint(0, spks, (1,))
if langs > 0:
inputs["lids"] = torch.randint(0, langs, (1,))
if spk_embed_dim > 0:
inputs["spembs"] = torch.randn(spk_embed_dim)
inputs = {
k: {k2: v2.to(device) for k2, v2 in v.items()}
if isinstance(v, dict)
else v.to(device)
for k, v in inputs.items()
}
model.inference(**inputs)
# check inference with teachder forcing
inputs = dict(
text=torch.randint(
0,
idim,
(
1,
5,
),
),
label={
"lab": torch.randint(
0,
idim,
(
1,
5,
),
),
"score": torch.randint(
0,
idim,
(
1,
5,
),
),
},
melody={
"lab": torch.randint(
0,
127,
(
1,
5,
),
),
"score": torch.randint(
0,
127,
(
1,
5,
),
),
},
duration={
"lab": torch.tensor([[1, 2, 2, 3, 3]], dtype=torch.int64),
"score_phn": torch.tensor([[1, 2, 2, 3, 4]], dtype=torch.int64),
"score_syb": torch.tensor([[3, 3, 5, 5, 4]], dtype=torch.int64),
},
slur=torch.randint(0, 2, (1, 5)),
pitch=torch.randn(16, 1),
feats=torch.randn(16, odim),
)
if spks > 0:
inputs["sids"] = torch.randint(0, spks, (1,))
if langs > 0:
inputs["lids"] = torch.randint(0, langs, (1,))
if spk_embed_dim > 0:
inputs["spembs"] = torch.randn(spk_embed_dim)
inputs = {
k: {k2: v2.to(device) for k2, v2 in v.items()}
if isinstance(v, dict)
else v.to(device)
for k, v in inputs.items()
}
output_dict = model.inference(**inputs, use_teacher_forcing=True)
assert output_dict["wav"].size(0) == inputs["feats"].size(0) * upsample_factor
| 62,568 | 35.740458 | 87 | py |
espnet | espnet-master/test/espnet2/gan_svs/visinger/test_visinger_generator.py | # Copyright 2021 Tomoki Hayashi
# Copyright 2023 Yifeng Yu
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Test VISinger generator modules."""
import pytest
import torch
from espnet2.gan_svs.vits.generator import VISingerGenerator
def make_generator_args(**kwargs):
defaults = dict(
vocabs=10,
aux_channels=5,
hidden_channels=4,
spks=-1,
langs=-1,
spk_embed_dim=-1,
global_channels=-1,
segment_size=4,
text_encoder_attention_heads=2,
text_encoder_ffn_expand=4,
text_encoder_blocks=2,
text_encoder_positionwise_layer_type="conv1d",
text_encoder_positionwise_conv_kernel_size=1,
text_encoder_normalize_before=True,
text_encoder_dropout_rate=0.1,
text_encoder_positional_dropout_rate=0.0,
text_encoder_attention_dropout_rate=0.0,
text_encoder_conformer_kernel_size=7,
use_macaron_style_in_text_encoder=True,
use_conformer_conv_in_text_encoder=True,
decoder_kernel_size=7,
decoder_channels=16,
decoder_upsample_scales=[16, 16],
decoder_upsample_kernel_sizes=[32, 32],
decoder_resblock_kernel_sizes=[3, 5],
decoder_resblock_dilations=[[1, 3], [1, 3]],
use_weight_norm_in_decoder=True,
posterior_encoder_kernel_size=5,
posterior_encoder_layers=2,
posterior_encoder_stacks=1,
posterior_encoder_base_dilation=1,
posterior_encoder_dropout_rate=0.0,
use_weight_norm_in_posterior_encoder=True,
flow_flows=2,
flow_kernel_size=5,
flow_base_dilation=1,
flow_layers=2,
flow_dropout_rate=0.0,
use_weight_norm_in_flow=True,
use_only_mean_in_flow=True,
generator_type="visinger",
vocoder_generator_type="hifigan",
fs=22050,
hop_length=256,
win_length=1024,
n_fft=1024,
use_phoneme_predictor=False,
)
defaults.update(kwargs)
return defaults
# NOTE(kan-bayashi): first forward requires jit compile
# so a little bit more time is needed to run. Therefore,
# here we extend execution timeout from 2 sec to 5 sec.
@pytest.mark.execution_timeout(5)
@pytest.mark.skipif(
"1.6" in torch.__version__,
reason="group conv in pytorch 1.6 has an issue. "
"See https://github.com/pytorch/pytorch/issues/42446.",
)
@torch.no_grad()
@pytest.mark.parametrize(
"model_dict",
[
({}),
({"text_encoder_positionwise_layer_type": "linear"}),
({"text_encoder_positionwise_layer_type": "conv1d-linear"}),
({"text_encoder_normalize_before": False}),
({"use_macaron_style_in_text_encoder": False}),
({"use_conformer_conv_in_text_encoder": False}),
(
{
"text_encoder_positional_encoding_layer_type": "scaled_abs_pos",
"text_encoder_self_attention_layer_type": "selfattn",
}
),
({"spk_embed_dim": 16, "global_channels": 4}),
({"langs": 16, "global_channels": 4}),
],
)
def test_vits_generator_forward(model_dict):
idim = 10
odim = 5
args = make_generator_args(vocabs=idim, aux_channels=odim, **model_dict)
model = VISingerGenerator(**args)
# check forward
inputs = dict(
text=torch.randint(0, idim, (2, 8)),
text_lengths=torch.tensor([8, 5], dtype=torch.long),
feats=torch.randn(2, odim, 16),
feats_lengths=torch.tensor([16, 13], dtype=torch.long),
label=torch.randint(0, idim, (2, 8)),
label_lengths=torch.tensor([8, 5], dtype=torch.long),
melody=torch.randint(0, 127, (2, 8)),
gt_dur=torch.tensor(
[[1, 2, 2, 3, 1, 3, 2, 2], [2, 2, 1, 4, 1, 2, 1, 3]], dtype=torch.int64
),
score_dur=torch.randint(1, idim, (2, 8)),
slur=torch.randint(0, 2, (2, 8)),
pitch=torch.randn(2, 16, 1),
)
if args["spk_embed_dim"] > 0:
inputs["spembs"] = torch.randn(2, args["spk_embed_dim"])
if args["langs"] > 0:
inputs["lids"] = torch.randint(0, args["langs"], (2, 1))
outputs = model(**inputs)
for i, output in enumerate(outputs):
if not isinstance(output, tuple):
if output is not None:
print(f"{i+1}: {output.shape}")
else:
for j, output_ in enumerate(output):
if output_ is not None:
print(f"{i+j+1}: {output_.shape}")
# check inference
inputs = dict(
text=torch.randint(
0,
idim,
(
2,
5,
),
),
text_lengths=torch.tensor([5, 3], dtype=torch.long),
label=torch.randint(
0,
idim,
(
2,
5,
),
),
label_lengths=torch.tensor([5, 3], dtype=torch.long),
melody=torch.randint(
0,
127,
(
2,
5,
),
),
score_dur=torch.randint(
1,
idim,
(
2,
5,
),
),
slur=torch.randint(
0,
2,
(
2,
5,
),
),
pitch=torch.randn(2, 16, 1),
)
if args["spk_embed_dim"] > 0:
inputs["spembs"] = torch.randn(args["spk_embed_dim"])
if args["langs"] > 0:
inputs["lids"] = torch.randint(0, args["langs"], (1,))
outputs = model.inference(**inputs)
for i, output in enumerate(outputs):
if not isinstance(output, tuple):
if output is not None:
print(f"{i+1}: {output.shape}")
else:
for j, output_ in enumerate(output):
if output_ is not None:
print(f"{i+j+1}: {output_.shape}")
# check inference with teacher forcing
inputs = dict(
text=torch.randint(
0,
idim,
(
1,
5,
),
),
text_lengths=torch.tensor([5], dtype=torch.long),
label=torch.randint(
0,
idim,
(
1,
5,
),
),
label_lengths=torch.tensor([5], dtype=torch.long),
melody=torch.randint(
0,
127,
(
1,
5,
),
),
score_dur=torch.randint(
1,
idim,
(
1,
5,
),
),
slur=torch.randint(
0,
2,
(
1,
5,
),
),
pitch=torch.randn(1, 16, 1),
feats=torch.randn(1, odim, 16),
feats_lengths=torch.tensor([16], dtype=torch.long),
)
if args["spk_embed_dim"] > 0:
inputs["spembs"] = torch.randn(args["spk_embed_dim"])
if args["langs"] > 0:
inputs["lids"] = torch.randint(0, args["langs"], (1,))
output = model.inference(**inputs, use_teacher_forcing=True)
assert output.size(1) == inputs["feats"].size(2) * model.upsample_factor
@pytest.mark.skipif(
"1.6" in torch.__version__,
reason="group conv in pytorch 1.6 has an issue. "
"See https://github.com/pytorch/pytorch/issues/42446.",
)
@torch.no_grad()
@pytest.mark.parametrize(
"model_dict",
[
({}),
({"text_encoder_positionwise_layer_type": "linear"}),
({"text_encoder_positionwise_layer_type": "conv1d-linear"}),
({"text_encoder_normalize_before": False}),
({"use_macaron_style_in_text_encoder": False}),
({"use_conformer_conv_in_text_encoder": False}),
(
{
"text_encoder_positional_encoding_layer_type": "scaled_abs_pos",
"text_encoder_self_attention_layer_type": "selfattn",
}
),
({"spk_embed_dim": 16}),
({"langs": 16}),
],
)
def test_multi_speaker_vits_generator_forward(model_dict):
idim = 10
odim = 5
spks = 10
global_channels = 8
args = make_generator_args(
vocabs=idim,
aux_channels=odim,
spks=spks,
global_channels=global_channels,
**model_dict,
)
model = VISingerGenerator(**args)
# check forward
inputs = dict(
text=torch.randint(0, idim, (2, 8)),
text_lengths=torch.tensor([8, 5], dtype=torch.long),
feats=torch.randn(2, odim, 16),
feats_lengths=torch.tensor([16, 13], dtype=torch.long),
label=torch.randint(0, idim, (2, 8)),
label_lengths=torch.tensor([8, 5], dtype=torch.long),
melody=torch.randint(0, 127, (2, 8)),
gt_dur=torch.tensor(
[[1, 2, 2, 3, 1, 3, 2, 2], [2, 2, 1, 4, 1, 2, 1, 3]], dtype=torch.int64
),
score_dur=torch.randint(1, idim, (2, 8)),
slur=torch.randint(0, 2, (2, 8)),
pitch=torch.randn(2, 16, 1),
sids=torch.randint(0, spks, (2,)),
)
if args["spk_embed_dim"] > 0:
inputs["spembs"] = torch.randn(2, args["spk_embed_dim"])
if args["langs"] > 0:
inputs["lids"] = torch.randint(0, args["langs"], (2, 1))
outputs = model(**inputs)
for i, output in enumerate(outputs):
if not isinstance(output, tuple):
if output is not None:
print(f"{i+1}: {output.shape}")
else:
for j, output_ in enumerate(output):
if output_ is not None:
print(f"{i+j+1}: {output_.shape}")
# check inference
inputs = dict(
text=torch.randint(
0,
idim,
(
2,
5,
),
),
text_lengths=torch.tensor([5, 3], dtype=torch.long),
label=torch.randint(
0,
idim,
(
2,
5,
),
),
label_lengths=torch.tensor([5, 3], dtype=torch.long),
melody=torch.randint(
0,
127,
(
2,
5,
),
),
score_dur=torch.randint(
1,
idim,
(
2,
5,
),
),
slur=torch.randint(
0,
2,
(
2,
5,
),
),
pitch=torch.randn(2, 16, 1),
sids=torch.randint(0, spks, (1,)),
)
if args["spk_embed_dim"] > 0:
inputs["spembs"] = torch.randn(args["spk_embed_dim"])
if args["langs"] > 0:
inputs["lids"] = torch.randint(0, args["langs"], (1,))
outputs = model.inference(**inputs)
for i, output in enumerate(outputs):
if not isinstance(output, tuple):
if output is not None:
print(f"{i+1}: {output.shape}")
else:
for j, output_ in enumerate(output):
if output_ is not None:
print(f"{i+j+1}: {output_.shape}")
# check inference with teacher forcing
inputs = dict(
text=torch.randint(
0,
idim,
(
1,
5,
),
),
text_lengths=torch.tensor([5], dtype=torch.long),
feats=torch.randn(1, odim, 16),
feats_lengths=torch.tensor([16], dtype=torch.long),
label=torch.randint(
0,
idim,
(
1,
5,
),
),
label_lengths=torch.tensor([5], dtype=torch.long),
melody=torch.randint(
0,
127,
(
1,
5,
),
),
score_dur=torch.randint(
1,
idim,
(
1,
5,
),
),
slur=torch.randint(
0,
2,
(
1,
5,
),
),
pitch=torch.randn(1, 16, 1),
sids=torch.randint(0, spks, (1,)),
)
if args["spk_embed_dim"] > 0:
inputs["spembs"] = torch.randn(args["spk_embed_dim"])
if args["langs"] > 0:
inputs["lids"] = torch.randint(0, args["langs"], (1,))
output = model.inference(**inputs, use_teacher_forcing=True)
assert output.size(1) == inputs["feats"].size(2) * model.upsample_factor
| 12,584 | 27.997696 | 83 | py |
espnet | espnet-master/test/espnet2/enh/test_espnet_enh_s2t_model.py | import pytest
import torch
from espnet2.asr.ctc import CTC
from espnet2.asr.decoder.transformer_decoder import TransformerDecoder
from espnet2.asr.encoder.transformer_encoder import TransformerEncoder
from espnet2.asr.espnet_model import ESPnetASRModel
from espnet2.asr.frontend.default import DefaultFrontend
from espnet2.diar.decoder.linear_decoder import LinearDecoder
from espnet2.diar.espnet_model import ESPnetDiarizationModel
from espnet2.diar.layers.multi_mask import MultiMask
from espnet2.diar.separator.tcn_separator_nomask import TCNSeparatorNomask
from espnet2.enh.decoder.conv_decoder import ConvDecoder
from espnet2.enh.decoder.stft_decoder import STFTDecoder
from espnet2.enh.encoder.conv_encoder import ConvEncoder
from espnet2.enh.encoder.stft_encoder import STFTEncoder
from espnet2.enh.espnet_enh_s2t_model import ESPnetEnhS2TModel
from espnet2.enh.espnet_model import ESPnetEnhancementModel
from espnet2.enh.loss.criterions.time_domain import SISNRLoss
from espnet2.enh.loss.wrappers.fixed_order import FixedOrderSolver
from espnet2.enh.loss.wrappers.pit_solver import PITSolver
from espnet2.enh.separator.neural_beamformer import NeuralBeamformer
from espnet2.enh.separator.rnn_separator import RNNSeparator
from espnet2.layers.label_aggregation import LabelAggregate
enh_stft_encoder = STFTEncoder(
n_fft=32,
hop_length=16,
)
enh_stft_decoder = STFTDecoder(
n_fft=32,
hop_length=16,
)
enh_rnn_separator = RNNSeparator(
input_dim=17,
layer=1,
unit=10,
num_spk=1,
)
si_snr_loss = SISNRLoss()
fix_order_solver = FixedOrderSolver(criterion=si_snr_loss)
pit_solver = PITSolver(criterion=si_snr_loss)
default_frontend = DefaultFrontend(
fs=300,
n_fft=32,
win_length=32,
hop_length=24,
n_mels=32,
)
token_list = ["<blank>", "<space>", "a", "e", "i", "o", "u", "<sos/eos>"]
asr_transformer_encoder = TransformerEncoder(
32,
output_size=16,
linear_units=16,
num_blocks=2,
)
asr_transformer_decoder = TransformerDecoder(
len(token_list),
16,
linear_units=16,
num_blocks=2,
)
asr_ctc = CTC(odim=len(token_list), encoder_output_size=16)
@pytest.mark.parametrize(
"enh_encoder, enh_decoder",
[(enh_stft_encoder, enh_stft_decoder)],
)
@pytest.mark.parametrize("enh_separator", [enh_rnn_separator])
@pytest.mark.parametrize("training", [True, False])
@pytest.mark.parametrize("loss_wrappers", [[fix_order_solver]])
@pytest.mark.parametrize("frontend", [default_frontend])
@pytest.mark.parametrize("s2t_encoder", [asr_transformer_encoder])
@pytest.mark.parametrize("s2t_decoder", [asr_transformer_decoder])
@pytest.mark.parametrize("s2t_ctc", [asr_ctc])
def test_enh_asr_model(
enh_encoder,
enh_decoder,
enh_separator,
training,
loss_wrappers,
frontend,
s2t_encoder,
s2t_decoder,
s2t_ctc,
):
inputs = torch.randn(2, 300)
ilens = torch.LongTensor([300, 200])
speech_ref = torch.randn(2, 300).float()
text = torch.LongTensor([[1, 2, 3, 4, 5], [5, 4, 3, 2, 1]])
text_lengths = torch.LongTensor([5, 5])
enh_model = ESPnetEnhancementModel(
encoder=enh_encoder,
separator=enh_separator,
decoder=enh_decoder,
mask_module=None,
loss_wrappers=loss_wrappers,
)
s2t_model = ESPnetASRModel(
vocab_size=len(token_list),
token_list=token_list,
frontend=frontend,
encoder=s2t_encoder,
decoder=s2t_decoder,
ctc=s2t_ctc,
specaug=None,
normalize=None,
preencoder=None,
postencoder=None,
joint_network=None,
)
enh_s2t_model = ESPnetEnhS2TModel(
enh_model=enh_model,
s2t_model=s2t_model,
)
if training:
enh_s2t_model.train()
else:
enh_s2t_model.eval()
kwargs = {
"speech": inputs,
"speech_lengths": ilens,
"speech_ref1": speech_ref,
"text_spk1": text,
"text_spk1_lengths": text_lengths,
}
loss, stats, weight = enh_s2t_model(**kwargs)
@pytest.mark.parametrize("training", [True, False])
@pytest.mark.parametrize("calc_enh_loss", [True, False])
def test_enh_asr_model_2spk(training, calc_enh_loss):
enh_beamformer_separator = NeuralBeamformer(
input_dim=17,
loss_type="spectrum",
num_spk=2,
use_wpe=True,
wlayers=2,
wunits=2,
wprojs=2,
use_dnn_mask_for_wpe=True,
multi_source_wpe=True,
use_beamformer=True,
blayers=2,
bunits=2,
bprojs=2,
badim=2,
ref_channel=0,
use_noise_mask=False,
beamformer_type="mvdr_souden",
)
enh_model = ESPnetEnhancementModel(
encoder=enh_stft_encoder,
separator=enh_beamformer_separator,
decoder=enh_stft_decoder,
mask_module=None,
loss_wrappers=[pit_solver],
)
s2t_model = ESPnetASRModel(
vocab_size=len(token_list),
token_list=token_list,
frontend=default_frontend,
encoder=asr_transformer_encoder,
decoder=asr_transformer_decoder,
ctc=asr_ctc,
specaug=None,
normalize=None,
preencoder=None,
postencoder=None,
joint_network=None,
)
enh_s2t_model = ESPnetEnhS2TModel(
enh_model=enh_model,
s2t_model=s2t_model,
calc_enh_loss=calc_enh_loss,
)
if training:
enh_s2t_model.train()
else:
enh_s2t_model.eval()
kwargs = {
"speech": torch.randn(2, 300, 2),
"speech_lengths": torch.LongTensor([300, 200]),
"speech_ref1": torch.randn(2, 300),
"speech_ref2": torch.randn(2, 300),
"text_spk1": torch.LongTensor([[1, 2, 3, 4, 5], [5, 4, 3, 2, 1]]),
"text_spk2": torch.LongTensor([[3, 4, 2, 5], [2, 1, 3, 5]]),
"text_spk1_lengths": torch.LongTensor([5, 4]),
"text_spk2_lengths": torch.LongTensor([4, 4]),
}
loss, stats, weight = enh_s2t_model(**kwargs)
label_aggregator = LabelAggregate(
win_length=32,
hop_length=16,
)
enh_encoder = ConvEncoder(
channel=17,
kernel_size=32,
stride=16,
)
enh_decoder = ConvDecoder(
channel=17,
kernel_size=32,
stride=16,
)
tcn_separator = TCNSeparatorNomask(
input_dim=enh_encoder.output_dim,
layer=2,
stack=1,
bottleneck_dim=10,
hidden_dim=10,
kernel=3,
)
mask_module = MultiMask(
bottleneck_dim=10,
max_num_spk=3,
input_dim=enh_encoder.output_dim,
)
diar_frontend = DefaultFrontend(
n_fft=32,
win_length=32,
hop_length=16,
n_mels=32,
)
diar_encoder = TransformerEncoder(
input_layer="linear",
num_blocks=1,
linear_units=32,
output_size=16,
attention_heads=2,
input_size=tcn_separator.output_dim + diar_frontend.output_size(),
)
diar_decoder = LinearDecoder(
num_spk=2,
encoder_output_size=diar_encoder.output_size(),
)
@pytest.mark.parametrize("label_aggregator", [label_aggregator])
@pytest.mark.parametrize("enh_encoder, enh_decoder", [(enh_encoder, enh_decoder)])
@pytest.mark.parametrize("enh_separator", [tcn_separator])
@pytest.mark.parametrize("mask_module", [mask_module])
@pytest.mark.parametrize("training", [True, False])
@pytest.mark.parametrize("loss_wrappers", [[fix_order_solver]])
@pytest.mark.parametrize("diar_frontend", [diar_frontend])
@pytest.mark.parametrize("diar_encoder, diar_decoder", [(diar_encoder, diar_decoder)])
def test_enh_diar_model(
enh_encoder,
enh_decoder,
enh_separator,
mask_module,
training,
loss_wrappers,
diar_frontend,
diar_encoder,
diar_decoder,
label_aggregator,
):
inputs = torch.randn(2, 300)
speech_ref = torch.randn(2, 300).float()
text = torch.randint(high=2, size=(2, 300, 2))
enh_model = ESPnetEnhancementModel(
encoder=enh_encoder,
separator=enh_separator,
decoder=enh_decoder,
mask_module=mask_module,
loss_wrappers=loss_wrappers,
)
diar_model = ESPnetDiarizationModel(
label_aggregator=label_aggregator,
frontend=diar_frontend,
encoder=diar_encoder,
decoder=diar_decoder,
specaug=None,
normalize=None,
attractor=None,
)
enh_s2t_model = ESPnetEnhS2TModel(
enh_model=enh_model,
s2t_model=diar_model,
)
if training:
enh_s2t_model.train()
else:
enh_s2t_model.eval()
kwargs = {
"speech": inputs,
"speech_ref1": speech_ref,
"speech_ref2": speech_ref,
"text": text,
}
loss, stats, weight = enh_s2t_model(**kwargs)
| 8,614 | 25.921875 | 86 | py |
espnet | espnet-master/test/espnet2/enh/test_espnet_model_tse.py | import pytest
import torch
from packaging.version import parse as V
from espnet2.enh.decoder.conv_decoder import ConvDecoder
from espnet2.enh.decoder.stft_decoder import STFTDecoder
from espnet2.enh.encoder.conv_encoder import ConvEncoder
from espnet2.enh.encoder.stft_encoder import STFTEncoder
from espnet2.enh.espnet_model_tse import ESPnetExtractionModel
from espnet2.enh.extractor.td_speakerbeam_extractor import TDSpeakerBeamExtractor
from espnet2.enh.loss.criterions.tf_domain import FrequencyDomainMSE
from espnet2.enh.loss.criterions.time_domain import SISNRLoss
from espnet2.enh.loss.wrappers.fixed_order import FixedOrderSolver
from espnet2.enh.loss.wrappers.pit_solver import PITSolver
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
is_torch_1_12_1_plus = V(torch.__version__) >= V("1.12.1")
stft_encoder = STFTEncoder(n_fft=32, hop_length=16)
stft_encoder_bultin_complex = STFTEncoder(
n_fft=32, hop_length=16, use_builtin_complex=True
)
stft_decoder = STFTDecoder(n_fft=32, hop_length=16)
conv_encoder = ConvEncoder(channel=17, kernel_size=36, stride=18)
conv_decoder = ConvDecoder(channel=17, kernel_size=36, stride=18)
td_speakerbeam_extractor = TDSpeakerBeamExtractor(
input_dim=17,
layer=3,
stack=2,
bottleneck_dim=8,
hidden_dim=16,
skip_dim=8,
i_adapt_layer=3,
adapt_enroll_dim=8,
)
si_snr_loss = SISNRLoss()
tf_mse_loss = FrequencyDomainMSE()
pit_wrapper = PITSolver(criterion=si_snr_loss)
fix_order_solver = FixedOrderSolver(criterion=tf_mse_loss)
@pytest.mark.parametrize(
"encoder, decoder",
[
(stft_encoder, stft_decoder),
(stft_encoder_bultin_complex, stft_decoder),
(conv_encoder, conv_decoder),
],
)
@pytest.mark.parametrize("training", [True, False])
@pytest.mark.parametrize("num_spk", [1, 2])
@pytest.mark.parametrize("loss_wrapper", [pit_wrapper, fix_order_solver])
def test_criterion_behavior_validation(
encoder, decoder, training, num_spk, loss_wrapper
):
inputs = torch.randn(2, 300)
ilens = torch.LongTensor([300, 200])
speech_refs = [torch.randn(2, 300).float(), torch.randn(2, 300).float()]
enroll_refs = [torch.randn(2, 400).float(), torch.randn(2, 400).float()]
aux_lens = [torch.LongTensor([400, 300]), torch.LongTensor([400, 350])]
if num_spk == 1:
speech_refs = speech_refs[:1]
enroll_refs = enroll_refs[:1]
aux_lens = aux_lens[:1]
enh_model = ESPnetExtractionModel(
encoder=encoder,
extractor=td_speakerbeam_extractor,
decoder=decoder,
loss_wrappers=[loss_wrapper],
num_spk=num_spk,
share_encoder=True,
)
if training:
enh_model.train()
else:
enh_model.eval()
kwargs = {
"speech_mix": inputs,
"speech_mix_lengths": ilens,
**{"speech_ref{}".format(i + 1): speech_refs[i] for i in range(num_spk)},
**{"enroll_ref{}".format(i + 1): enroll_refs[i] for i in range(num_spk)},
**{"enroll_ref{}_lengths".format(i + 1): aux_lens[i] for i in range(num_spk)},
}
if training:
loss, stats, weight = enh_model(**kwargs)
else:
loss, stats, weight = enh_model(**kwargs)
@pytest.mark.parametrize("encoder, decoder", [(conv_encoder, conv_decoder)])
@pytest.mark.parametrize("extractor", [td_speakerbeam_extractor])
def test_criterion_behavior_noise_dereverb(encoder, decoder, extractor):
with pytest.raises(ValueError):
ESPnetExtractionModel(
encoder=encoder,
extractor=extractor,
decoder=decoder,
loss_wrappers=[PITSolver(criterion=SISNRLoss(is_noise_loss=True))],
)
with pytest.raises(ValueError):
ESPnetExtractionModel(
encoder=encoder,
extractor=extractor,
decoder=decoder,
loss_wrappers=[PITSolver(criterion=SISNRLoss(is_dereverb_loss=True))],
)
| 3,910 | 33.307018 | 86 | py |
espnet | espnet-master/test/espnet2/enh/test_espnet_model.py | import pytest
import torch
from packaging.version import parse as V
from espnet2.enh.decoder.conv_decoder import ConvDecoder
from espnet2.enh.decoder.null_decoder import NullDecoder
from espnet2.enh.decoder.stft_decoder import STFTDecoder
from espnet2.enh.encoder.conv_encoder import ConvEncoder
from espnet2.enh.encoder.null_encoder import NullEncoder
from espnet2.enh.encoder.stft_encoder import STFTEncoder
from espnet2.enh.espnet_model import ESPnetEnhancementModel
from espnet2.enh.loss.criterions.tf_domain import FrequencyDomainL1, FrequencyDomainMSE
from espnet2.enh.loss.criterions.time_domain import SISNRLoss
from espnet2.enh.loss.wrappers.fixed_order import FixedOrderSolver
from espnet2.enh.loss.wrappers.multilayer_pit_solver import MultiLayerPITSolver
from espnet2.enh.loss.wrappers.pit_solver import PITSolver
from espnet2.enh.separator.conformer_separator import ConformerSeparator
from espnet2.enh.separator.dc_crn_separator import DC_CRNSeparator
from espnet2.enh.separator.dccrn_separator import DCCRNSeparator
from espnet2.enh.separator.dprnn_separator import DPRNNSeparator
from espnet2.enh.separator.dptnet_separator import DPTNetSeparator
from espnet2.enh.separator.ineube_separator import iNeuBe
from espnet2.enh.separator.neural_beamformer import NeuralBeamformer
from espnet2.enh.separator.rnn_separator import RNNSeparator
from espnet2.enh.separator.svoice_separator import SVoiceSeparator
from espnet2.enh.separator.tcn_separator import TCNSeparator
from espnet2.enh.separator.tfgridnet_separator import TFGridNet
from espnet2.enh.separator.transformer_separator import TransformerSeparator
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
is_torch_1_12_1_plus = V(torch.__version__) >= V("1.12.1")
stft_encoder = STFTEncoder(n_fft=32, hop_length=16)
stft_encoder_bultin_complex = STFTEncoder(
n_fft=32, hop_length=16, use_builtin_complex=True
)
stft_decoder = STFTDecoder(n_fft=32, hop_length=16)
conv_encoder = ConvEncoder(channel=17, kernel_size=36, stride=18)
conv_decoder = ConvDecoder(channel=17, kernel_size=36, stride=18)
null_encoder = NullEncoder()
null_decoder = NullDecoder()
conformer_separator = ConformerSeparator(
input_dim=17, predict_noise=True, adim=8, aheads=2, layers=2, linear_units=10
)
dc_crn_separator = DC_CRNSeparator(
input_dim=17, predict_noise=True, input_channels=[2, 2, 4]
)
dccrn_separator = DCCRNSeparator(
input_dim=17, num_spk=1, kernel_num=[32, 64, 128], use_noise_mask=True
)
dprnn_separator = DPRNNSeparator(
input_dim=17, predict_noise=True, layer=1, unit=10, segment_size=4
)
dptnet_separator = DPTNetSeparator(
input_dim=16, predict_noise=True, layer=1, unit=10, segment_size=4
)
rnn_separator = RNNSeparator(input_dim=17, predict_noise=True, layer=1, unit=10)
svoice_separator = SVoiceSeparator(
input_dim=17,
enc_dim=4,
kernel_size=4,
hidden_size=4,
num_spk=2,
num_layers=2,
segment_size=4,
bidirectional=False,
input_normalize=False,
)
tcn_separator = TCNSeparator(
input_dim=17,
predict_noise=True,
layer=2,
stack=1,
bottleneck_dim=10,
hidden_dim=10,
kernel=3,
)
transformer_separator = TransformerSeparator(
input_dim=17, predict_noise=True, adim=8, aheads=2, layers=2, linear_units=10
)
si_snr_loss = SISNRLoss()
tf_mse_loss = FrequencyDomainMSE()
tf_l1_loss = FrequencyDomainL1()
pit_wrapper = PITSolver(criterion=si_snr_loss)
multilayer_pit_solver = MultiLayerPITSolver(criterion=si_snr_loss)
fix_order_solver = FixedOrderSolver(criterion=tf_mse_loss)
@pytest.mark.parametrize("training", [True, False])
def test_criterion_behavior_validation(training):
inputs = torch.randn(2, 300)
ilens = torch.LongTensor([300, 200])
speech_refs = [torch.randn(2, 300).float(), torch.randn(2, 300).float()]
enh_model = ESPnetEnhancementModel(
encoder=stft_encoder,
separator=rnn_separator,
decoder=stft_decoder,
mask_module=None,
loss_wrappers=[PITSolver(criterion=SISNRLoss(only_for_test=True))],
)
if training:
enh_model.train()
else:
enh_model.eval()
kwargs = {
"speech_mix": inputs,
"speech_mix_lengths": ilens,
**{"speech_ref{}".format(i + 1): speech_refs[i] for i in range(2)},
}
if training:
with pytest.raises(AttributeError):
loss, stats, weight = enh_model(**kwargs)
else:
loss, stats, weight = enh_model(**kwargs)
@pytest.mark.parametrize(
"encoder, decoder",
[
(stft_encoder, stft_decoder),
(stft_encoder_bultin_complex, stft_decoder),
(conv_encoder, conv_decoder),
],
)
@pytest.mark.parametrize(
"separator",
[
conformer_separator,
dc_crn_separator,
dccrn_separator,
dprnn_separator,
rnn_separator,
tcn_separator,
transformer_separator,
],
)
def test_criterion_behavior_noise(encoder, decoder, separator):
if not isinstance(encoder, STFTEncoder) and isinstance(
separator, (DCCRNSeparator, DC_CRNSeparator)
):
# skip because DCCRNSeparator and DC_CRNSeparator only work
# for complex spectrum features
return
inputs = torch.randn(2, 300)
ilens = torch.LongTensor([300, 200])
speech_refs = [torch.randn(2, 300).float(), torch.randn(2, 300).float()]
noise_ref = torch.randn(2, 300)
enh_model = ESPnetEnhancementModel(
encoder=encoder,
separator=separator,
decoder=decoder,
mask_module=None,
loss_wrappers=[PITSolver(criterion=SISNRLoss(is_noise_loss=True))],
)
enh_model.train()
kwargs = {
"speech_mix": inputs,
"speech_mix_lengths": ilens,
**{"speech_ref{}".format(i + 1): speech_refs[i] for i in range(2)},
"noise_ref1": noise_ref,
}
loss, stats, weight = enh_model(**kwargs)
@pytest.mark.parametrize("loss_type", ["mask_mse", "spectrum"])
@pytest.mark.parametrize("num_spk", [1, 2])
def test_criterion_behavior_dereverb(loss_type, num_spk):
inputs = torch.randn(2, 300)
ilens = torch.LongTensor([300, 200])
speech_refs = [torch.randn(2, 300).float() for _ in range(num_spk)]
dereverb_ref = [torch.randn(2, 300).float() for _ in range(num_spk)]
beamformer = NeuralBeamformer(
input_dim=17,
loss_type=loss_type,
num_spk=num_spk,
use_wpe=True,
wlayers=2,
wunits=2,
wprojs=2,
use_dnn_mask_for_wpe=True,
multi_source_wpe=True,
use_beamformer=True,
blayers=2,
bunits=2,
bprojs=2,
badim=2,
ref_channel=0,
use_noise_mask=False,
)
if loss_type == "mask_mse":
loss_wrapper = PITSolver(
criterion=FrequencyDomainMSE(
compute_on_mask=True, mask_type="PSM", is_dereverb_loss=True
)
)
else:
loss_wrapper = PITSolver(criterion=SISNRLoss(is_dereverb_loss=True))
enh_model = ESPnetEnhancementModel(
encoder=stft_encoder,
separator=beamformer,
decoder=stft_decoder,
mask_module=None,
loss_wrappers=[loss_wrapper],
)
enh_model.train()
kwargs = {
"speech_mix": inputs,
"speech_mix_lengths": ilens,
**{"speech_ref{}".format(i + 1): speech_refs[i] for i in range(num_spk)},
"dereverb_ref1": dereverb_ref[0],
}
loss, stats, weight = enh_model(**kwargs)
@pytest.mark.parametrize(
"encoder, decoder",
[
(stft_encoder, stft_decoder),
(stft_encoder_bultin_complex, stft_decoder),
(conv_encoder, conv_decoder),
],
)
@pytest.mark.parametrize(
"separator",
[
conformer_separator,
dc_crn_separator,
dccrn_separator,
dprnn_separator,
rnn_separator,
tcn_separator,
transformer_separator,
],
)
@pytest.mark.parametrize("training", [True, False])
@pytest.mark.parametrize("loss_wrappers", [[pit_wrapper, fix_order_solver]])
def test_single_channel_model(encoder, decoder, separator, training, loss_wrappers):
if not isinstance(encoder, STFTEncoder) and isinstance(
separator, (DCCRNSeparator, DC_CRNSeparator)
):
# skip because DCCRNSeparator and DC_CRNSeparator only work
# for complex spectrum features
return
inputs = torch.randn(2, 300)
ilens = torch.LongTensor([300, 200])
speech_refs = [torch.randn(2, 300).float(), torch.randn(2, 300).float()]
enh_model = ESPnetEnhancementModel(
encoder=encoder,
separator=separator,
decoder=decoder,
mask_module=None,
loss_wrappers=loss_wrappers,
)
if training:
enh_model.train()
else:
enh_model.eval()
kwargs = {
"speech_mix": inputs,
"speech_mix_lengths": ilens,
**{"speech_ref{}".format(i + 1): speech_refs[i] for i in range(2)},
}
loss, stats, weight = enh_model(**kwargs)
@pytest.mark.parametrize("training", [True, False])
@pytest.mark.parametrize("loss_wrappers", [[pit_wrapper, fix_order_solver]])
def test_dptnet(training, loss_wrappers):
encoder = ConvEncoder(channel=16, kernel_size=36, stride=18)
decoder = ConvDecoder(channel=16, kernel_size=36, stride=18)
inputs = torch.randn(2, 300)
ilens = torch.LongTensor([300, 200])
speech_refs = [torch.randn(2, 300).float(), torch.randn(2, 300).float()]
enh_model = ESPnetEnhancementModel(
encoder=encoder,
separator=dptnet_separator,
decoder=decoder,
mask_module=None,
loss_wrappers=loss_wrappers,
)
if training:
enh_model.train()
else:
enh_model.eval()
kwargs = {
"speech_mix": inputs,
"speech_mix_lengths": ilens,
**{"speech_ref{}".format(i + 1): speech_refs[i] for i in range(2)},
}
loss, stats, weight = enh_model(**kwargs)
@pytest.mark.parametrize("encoder, decoder", [(null_encoder, null_decoder)])
@pytest.mark.parametrize("separator", [svoice_separator])
@pytest.mark.parametrize("training", [True, False])
@pytest.mark.parametrize("loss_wrappers", [[multilayer_pit_solver]])
def test_svoice_model(encoder, decoder, separator, training, loss_wrappers):
inputs = torch.randn(2, 300)
ilens = torch.LongTensor([300, 200])
speech_refs = [torch.randn(2, 300).float(), torch.randn(2, 300).float()]
enh_model = ESPnetEnhancementModel(
encoder=encoder,
separator=separator,
decoder=decoder,
mask_module=None,
loss_wrappers=loss_wrappers,
)
if training:
enh_model.train()
else:
enh_model.eval()
kwargs = {
"speech_mix": inputs,
"speech_mix_lengths": ilens,
**{"speech_ref{}".format(i + 1): speech_refs[i] for i in range(2)},
}
loss, stats, weight = enh_model(**kwargs)
@pytest.mark.parametrize("training", [True, False])
@pytest.mark.parametrize("n_mics", [1, 2])
@pytest.mark.parametrize("loss_wrappers", [[pit_wrapper]])
@pytest.mark.parametrize("output_from", ["dnn1", "dnn2", "mfmcwf"])
def test_ineube(n_mics, training, loss_wrappers, output_from):
if not is_torch_1_9_plus:
return
inputs = torch.randn(1, 300, n_mics)
ilens = torch.LongTensor([300])
speech_refs = [torch.randn(1, 300).float(), torch.randn(1, 300).float()]
from espnet2.enh.decoder.null_decoder import NullDecoder
from espnet2.enh.encoder.null_encoder import NullEncoder
encoder = NullEncoder()
decoder = NullDecoder()
separator = iNeuBe(
2, mic_channels=n_mics, output_from=output_from, tcn_blocks=1, tcn_repeats=1
)
enh_model = ESPnetEnhancementModel(
encoder=encoder,
separator=separator,
decoder=decoder,
mask_module=None,
loss_wrappers=loss_wrappers,
)
if training:
enh_model.train()
else:
enh_model.eval()
kwargs = {
"speech_mix": inputs,
"speech_mix_lengths": ilens,
**{"speech_ref{}".format(i + 1): speech_refs[i] for i in range(2)},
}
loss, stats, weight = enh_model(**kwargs)
@pytest.mark.parametrize("training", [True, False])
@pytest.mark.parametrize("n_mics", [1, 2])
@pytest.mark.parametrize("loss_wrappers", [[pit_wrapper]])
def test_tfgridnet(n_mics, training, loss_wrappers):
if not is_torch_1_9_plus:
return
if n_mics == 1:
inputs = torch.randn(1, 300)
else:
inputs = torch.randn(1, 300, n_mics)
ilens = torch.LongTensor([300])
speech_refs = [torch.randn(1, 300).float(), torch.randn(1, 300).float()]
from espnet2.enh.decoder.null_decoder import NullDecoder
from espnet2.enh.encoder.null_encoder import NullEncoder
encoder = NullEncoder()
decoder = NullDecoder()
separator = TFGridNet(
None,
n_srcs=2,
n_imics=n_mics,
n_layers=1,
lstm_hidden_units=64,
emb_dim=16,
attn_approx_qk_dim=256,
)
enh_model = ESPnetEnhancementModel(
encoder=encoder,
separator=separator,
decoder=decoder,
mask_module=None,
loss_wrappers=loss_wrappers,
)
if training:
enh_model.train()
else:
enh_model.eval()
kwargs = {
"speech_mix": inputs,
"speech_mix_lengths": ilens,
**{"speech_ref{}".format(i + 1): speech_refs[i] for i in range(2)},
}
loss, stats, weight = enh_model(**kwargs)
random_speech = torch.tensor(
[
[
[0.026, 0.031, 0.023, 0.029, 0.026, 0.029, 0.028, 0.027],
[0.027, 0.031, 0.023, 0.027, 0.026, 0.028, 0.027, 0.027],
[0.026, 0.030, 0.023, 0.026, 0.025, 0.028, 0.028, 0.028],
[0.024, 0.028, 0.024, 0.027, 0.024, 0.027, 0.030, 0.030],
[0.025, 0.027, 0.025, 0.028, 0.023, 0.026, 0.031, 0.031],
[0.027, 0.026, 0.025, 0.029, 0.022, 0.026, 0.032, 0.031],
[0.028, 0.026, 0.024, 0.031, 0.023, 0.025, 0.031, 0.029],
[0.029, 0.024, 0.023, 0.032, 0.023, 0.024, 0.030, 0.027],
[0.028, 0.024, 0.023, 0.030, 0.023, 0.023, 0.028, 0.027],
[0.029, 0.026, 0.023, 0.029, 0.025, 0.024, 0.027, 0.025],
[0.029, 0.027, 0.024, 0.026, 0.025, 0.027, 0.025, 0.025],
[0.029, 0.031, 0.026, 0.024, 0.028, 0.028, 0.024, 0.025],
[0.030, 0.038, 0.029, 0.023, 0.035, 0.032, 0.024, 0.026],
[0.029, 0.040, 0.030, 0.023, 0.039, 0.039, 0.025, 0.027],
[0.028, 0.040, 0.032, 0.025, 0.041, 0.039, 0.026, 0.028],
[0.028, 0.041, 0.039, 0.027, 0.044, 0.041, 0.029, 0.035],
],
[
[0.015, 0.021, 0.012, 0.006, 0.028, 0.021, 0.024, 0.018],
[0.005, 0.034, 0.036, 0.017, 0.016, 0.037, 0.011, 0.029],
[0.011, 0.029, 0.060, 0.029, 0.045, 0.035, 0.034, 0.018],
[0.031, 0.036, 0.040, 0.037, 0.059, 0.032, 0.035, 0.029],
[0.031, 0.031, 0.036, 0.029, 0.058, 0.035, 0.039, 0.045],
[0.050, 0.038, 0.052, 0.052, 0.059, 0.044, 0.055, 0.045],
[0.025, 0.054, 0.054, 0.047, 0.043, 0.059, 0.045, 0.060],
[0.042, 0.056, 0.073, 0.029, 0.048, 0.063, 0.051, 0.049],
[0.053, 0.048, 0.045, 0.052, 0.039, 0.045, 0.031, 0.053],
[0.054, 0.044, 0.053, 0.031, 0.062, 0.050, 0.048, 0.046],
[0.053, 0.036, 0.075, 0.046, 0.073, 0.052, 0.045, 0.030],
[0.039, 0.025, 0.061, 0.046, 0.064, 0.032, 0.027, 0.033],
[0.053, 0.032, 0.052, 0.033, 0.052, 0.029, 0.026, 0.017],
[0.054, 0.034, 0.054, 0.033, 0.045, 0.043, 0.024, 0.018],
[0.031, 0.025, 0.043, 0.016, 0.051, 0.040, 0.023, 0.030],
[0.008, 0.023, 0.024, 0.019, 0.032, 0.024, 0.012, 0.027],
],
],
dtype=torch.double,
)
pit_wrapper = PITSolver(criterion=FrequencyDomainMSE(compute_on_mask=True))
@pytest.mark.parametrize("training", [True, False])
@pytest.mark.parametrize("mask_type", ["IBM", "IRM", "IAM", "PSM", "PSM^2"])
@pytest.mark.parametrize(
"loss_type", ["mask_mse", "magnitude", "spectrum", "spectrum_log"]
)
@pytest.mark.parametrize("num_spk", [1, 2, 3])
@pytest.mark.parametrize("use_builtin_complex", [True, False])
@pytest.mark.parametrize("loss_wrappers", [[pit_wrapper]])
def test_forward_with_beamformer_net(
training, mask_type, loss_type, num_spk, use_builtin_complex, loss_wrappers
):
# Skip some testing cases
if not loss_type.startswith("mask") and mask_type != "IBM":
# `mask_type` has no effect when `loss_type` is not "mask..."
return
if not is_torch_1_9_plus and use_builtin_complex:
# builtin complex support is only well supported in PyTorch 1.9+
return
if is_torch_1_12_1_plus and not use_builtin_complex:
# non-builtin complex support is deprecated in PyTorch 1.12.1+
return
ch = 3
inputs = random_speech[..., :ch].float()
ilens = torch.LongTensor([16, 12])
speech_refs = [torch.randn(2, 16, dtype=torch.float) for spk in range(num_spk)]
noise_ref1 = torch.randn(2, 16, ch, dtype=torch.float)
dereverb_ref1 = torch.randn(2, 16, ch, dtype=torch.float)
encoder = STFTEncoder(
n_fft=8, hop_length=2, use_builtin_complex=use_builtin_complex
)
decoder = STFTDecoder(n_fft=8, hop_length=2)
beamformer = NeuralBeamformer(
input_dim=5,
loss_type=loss_type,
num_spk=num_spk,
use_wpe=True,
wlayers=2,
wunits=2,
wprojs=2,
use_dnn_mask_for_wpe=True,
multi_source_wpe=True,
use_beamformer=True,
blayers=2,
bunits=2,
bprojs=2,
badim=2,
ref_channel=0,
use_noise_mask=False,
beamformer_type="mvdr_souden",
use_torchaudio_api=is_torch_1_12_1_plus,
)
enh_model = ESPnetEnhancementModel(
encoder=encoder,
decoder=decoder,
separator=beamformer,
mask_module=None,
loss_type=loss_type,
mask_type=mask_type,
loss_wrappers=loss_wrappers,
)
if training:
enh_model.train()
else:
enh_model.eval()
kwargs = {
"speech_mix": inputs,
"speech_mix_lengths": ilens,
**{"speech_ref{}".format(i + 1): speech_refs[i] for i in range(num_spk)},
"dereverb_ref1": dereverb_ref1,
}
loss, stats, weight = enh_model(**kwargs)
if mask_type in ("IBM", "IRM"):
loss, stats, weight = enh_model(**kwargs, noise_ref1=noise_ref1)
| 18,543 | 32.055258 | 87 | py |
espnet | espnet-master/test/espnet2/enh/separator/test_beamformer.py | import numpy as np
import pytest
import torch
from packaging.version import parse as V
from espnet2.enh.encoder.stft_encoder import STFTEncoder
from espnet2.enh.layers.complex_utils import is_torch_complex_tensor
from espnet2.enh.layers.dnn_beamformer import BEAMFORMER_TYPES
from espnet2.enh.separator.neural_beamformer import NeuralBeamformer
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
is_torch_1_12_1_plus = V(torch.__version__) >= V("1.12.1")
random_speech = torch.tensor(
[
[
[0.026, 0.031, 0.023, 0.029, 0.026, 0.029, 0.028, 0.027],
[0.027, 0.031, 0.023, 0.027, 0.026, 0.028, 0.027, 0.027],
[0.026, 0.030, 0.023, 0.026, 0.025, 0.028, 0.028, 0.028],
[0.024, 0.028, 0.024, 0.027, 0.024, 0.027, 0.030, 0.030],
[0.025, 0.027, 0.025, 0.028, 0.023, 0.026, 0.031, 0.031],
[0.027, 0.026, 0.025, 0.029, 0.022, 0.026, 0.032, 0.031],
[0.028, 0.026, 0.024, 0.031, 0.023, 0.025, 0.031, 0.029],
[0.029, 0.024, 0.023, 0.032, 0.023, 0.024, 0.030, 0.027],
[0.028, 0.024, 0.023, 0.030, 0.023, 0.023, 0.028, 0.027],
[0.029, 0.026, 0.023, 0.029, 0.025, 0.024, 0.027, 0.025],
[0.029, 0.027, 0.024, 0.026, 0.025, 0.027, 0.025, 0.025],
[0.029, 0.031, 0.026, 0.024, 0.028, 0.028, 0.024, 0.025],
[0.030, 0.038, 0.029, 0.023, 0.035, 0.032, 0.024, 0.026],
[0.029, 0.040, 0.030, 0.023, 0.039, 0.039, 0.025, 0.027],
[0.028, 0.040, 0.032, 0.025, 0.041, 0.039, 0.026, 0.028],
[0.028, 0.041, 0.039, 0.027, 0.044, 0.041, 0.029, 0.035],
],
[
[0.015, 0.021, 0.012, 0.006, 0.028, 0.021, 0.024, 0.018],
[0.005, 0.034, 0.036, 0.017, 0.016, 0.037, 0.011, 0.029],
[0.011, 0.029, 0.060, 0.029, 0.045, 0.035, 0.034, 0.018],
[0.031, 0.036, 0.040, 0.037, 0.059, 0.032, 0.035, 0.029],
[0.031, 0.031, 0.036, 0.029, 0.058, 0.035, 0.039, 0.045],
[0.050, 0.038, 0.052, 0.052, 0.059, 0.044, 0.055, 0.045],
[0.025, 0.054, 0.054, 0.047, 0.043, 0.059, 0.045, 0.060],
[0.042, 0.056, 0.073, 0.029, 0.048, 0.063, 0.051, 0.049],
[0.053, 0.048, 0.045, 0.052, 0.039, 0.045, 0.031, 0.053],
[0.054, 0.044, 0.053, 0.031, 0.062, 0.050, 0.048, 0.046],
[0.053, 0.036, 0.075, 0.046, 0.073, 0.052, 0.045, 0.030],
[0.039, 0.025, 0.061, 0.046, 0.064, 0.032, 0.027, 0.033],
[0.053, 0.032, 0.052, 0.033, 0.052, 0.029, 0.026, 0.017],
[0.054, 0.034, 0.054, 0.033, 0.045, 0.043, 0.024, 0.018],
[0.031, 0.025, 0.043, 0.016, 0.051, 0.040, 0.023, 0.030],
[0.008, 0.023, 0.024, 0.019, 0.032, 0.024, 0.012, 0.027],
],
],
dtype=torch.double,
)
@pytest.mark.parametrize(
"n_fft, win_length, hop_length",
[(8, None, 2)],
)
@pytest.mark.parametrize("num_spk", [1, 2])
@pytest.mark.parametrize("loss_type", ["mask_mse", "spectrum"])
@pytest.mark.parametrize("use_wpe", [True])
@pytest.mark.parametrize("wnet_type", ["lstm"])
@pytest.mark.parametrize("wlayers", [2])
@pytest.mark.parametrize("wunits", [2])
@pytest.mark.parametrize("wprojs", [2])
@pytest.mark.parametrize("taps", [2])
@pytest.mark.parametrize("delay", [3])
@pytest.mark.parametrize("use_dnn_mask_for_wpe", [False])
@pytest.mark.parametrize("multi_source_wpe", [True, False])
@pytest.mark.parametrize("use_beamformer", [True])
@pytest.mark.parametrize("bnet_type", ["lstm"])
@pytest.mark.parametrize("blayers", [2])
@pytest.mark.parametrize("bunits", [2])
@pytest.mark.parametrize("bprojs", [2])
@pytest.mark.parametrize("badim", [2])
@pytest.mark.parametrize("ref_channel", [-1, 0])
@pytest.mark.parametrize("use_noise_mask", [True])
@pytest.mark.parametrize("bnonlinear", ["sigmoid", "relu", "tanh", "crelu"])
@pytest.mark.parametrize("beamformer_type", BEAMFORMER_TYPES)
def test_neural_beamformer_forward_backward(
n_fft,
win_length,
hop_length,
num_spk,
loss_type,
use_wpe,
wnet_type,
wlayers,
wunits,
wprojs,
taps,
delay,
use_dnn_mask_for_wpe,
multi_source_wpe,
use_beamformer,
bnet_type,
blayers,
bunits,
bprojs,
badim,
ref_channel,
use_noise_mask,
bnonlinear,
beamformer_type,
):
# Skip some cases
if num_spk > 1 and use_wpe and use_beamformer:
if not multi_source_wpe:
# Single-source WPE is not supported with beamformer in multi-speaker cases
return
elif num_spk == 1:
if multi_source_wpe:
# When num_spk == 1, `multi_source_wpe` has no effect
return
elif beamformer_type in (
"lcmv",
"lcmp",
"wlcmp",
"mvdr_tfs",
"mvdr_tfs_souden",
):
# only support multiple-source cases
return
if bnonlinear != "sigmoid" and (
beamformer_type != "mvdr_souden" or multi_source_wpe
):
# only test different nonlinear layers with MVDR_Souden
return
# ensures reproducibility and reversibility in the matrix inverse computation
torch.random.manual_seed(0)
stft = STFTEncoder(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
use_builtin_complex=is_torch_1_12_1_plus,
)
model = NeuralBeamformer(
stft.output_dim,
num_spk=num_spk,
loss_type=loss_type,
use_wpe=use_wpe,
wnet_type=wnet_type,
wlayers=wlayers,
wunits=wunits,
wprojs=wprojs,
taps=taps,
delay=delay,
use_dnn_mask_for_wpe=use_dnn_mask_for_wpe,
use_beamformer=use_beamformer,
bnet_type=bnet_type,
blayers=blayers,
bunits=bunits,
bprojs=bprojs,
badim=badim,
ref_channel=ref_channel,
use_noise_mask=use_noise_mask,
beamformer_type=beamformer_type,
rtf_iterations=2,
shared_power=True,
use_torchaudio_api=is_torch_1_12_1_plus,
)
model.train()
inputs = random_speech[..., :2].float()
ilens = torch.LongTensor([16, 12])
input_spectrum, flens = stft(inputs, ilens)
est_speech, flens, others = model(input_spectrum, flens)
if loss_type.startswith("mask"):
assert est_speech is None
loss = sum([abs(m).mean() for m in others.values()])
else:
loss = sum([abs(est).mean() for est in est_speech])
loss.backward()
@pytest.mark.parametrize("ch", [1, 2])
@pytest.mark.parametrize("num_spk", [1, 2])
@pytest.mark.parametrize("multi_source_wpe", [True, False])
@pytest.mark.parametrize("use_dnn_mask_for_wpe", [True, False])
def test_neural_beamformer_wpe_output(
ch, num_spk, multi_source_wpe, use_dnn_mask_for_wpe
):
torch.random.manual_seed(0)
inputs = torch.randn(2, 16, ch) if ch > 1 else torch.randn(2, 16)
inputs = inputs.float()
ilens = torch.LongTensor([16, 12])
stft = STFTEncoder(n_fft=8, hop_length=2, use_builtin_complex=is_torch_1_12_1_plus)
model = NeuralBeamformer(
stft.output_dim,
num_spk=num_spk,
use_wpe=True,
use_dnn_mask_for_wpe=use_dnn_mask_for_wpe,
multi_source_wpe=multi_source_wpe,
wlayers=2,
wunits=2,
wprojs=2,
taps=5,
delay=3,
use_beamformer=False,
use_torchaudio_api=is_torch_1_12_1_plus,
)
model.eval()
input_spectrum, flens = stft(inputs, ilens)
specs, _, others = model(input_spectrum, flens)
assert isinstance(specs, list)
if not use_dnn_mask_for_wpe or multi_source_wpe:
assert len(specs) == 1
else:
assert len(specs) == num_spk
assert specs[0].shape == input_spectrum.shape
if is_torch_complex_tensor(specs[0]):
assert specs[0].dtype == torch.complex64
else:
assert specs[0].dtype == torch.float
assert isinstance(others, dict)
if use_dnn_mask_for_wpe:
assert "mask_dereverb1" in others, others.keys()
assert others["mask_dereverb1"].shape == specs[0].shape
@pytest.mark.parametrize("num_spk", [1, 2])
@pytest.mark.parametrize("use_noise_mask", [True, False])
@pytest.mark.parametrize("beamformer_type", BEAMFORMER_TYPES)
@pytest.mark.parametrize(
"diagonal_loading, mask_flooring, use_torch_solver",
[(True, True, True), (False, False, False)],
)
def test_neural_beamformer_bf_output(
num_spk,
use_noise_mask,
beamformer_type,
diagonal_loading,
mask_flooring,
use_torch_solver,
):
if num_spk == 1 and beamformer_type in (
"lcmv",
"lcmp",
"wlcmp",
"mvdr_tfs",
"mvdr_tfs_souden",
):
# only support multiple-source cases
return
ch = 2
inputs = random_speech[..., :ch].float()
ilens = torch.LongTensor([16, 12])
torch.random.manual_seed(0)
stft = STFTEncoder(n_fft=8, hop_length=2, use_builtin_complex=is_torch_1_12_1_plus)
model = NeuralBeamformer(
stft.output_dim,
num_spk=num_spk,
use_wpe=False,
taps=2,
delay=3,
use_beamformer=True,
blayers=2,
bunits=2,
bprojs=2,
badim=2,
use_noise_mask=use_noise_mask,
beamformer_type=beamformer_type,
diagonal_loading=diagonal_loading,
mask_flooring=mask_flooring,
use_torch_solver=use_torch_solver,
use_torchaudio_api=is_torch_1_12_1_plus,
)
model.eval()
input_spectrum, flens = stft(inputs, ilens)
specs, _, others = model(input_spectrum, flens)
assert isinstance(others, dict)
if use_noise_mask:
assert "mask_noise1" in others
assert others["mask_noise1"].shape == others["mask_spk1"].shape
assert isinstance(specs, list)
assert len(specs) == num_spk
for n in range(1, num_spk + 1):
assert "mask_spk{}".format(n) in others, others.keys()
assert others["mask_spk{}".format(n)].shape[-2] == ch
assert specs[n - 1].shape == others["mask_spk{}".format(n)][..., 0, :].shape
assert specs[n - 1].shape == input_spectrum[..., 0, :].shape
if is_torch_complex_tensor(specs[n - 1]):
assert specs[n - 1].dtype == torch.complex64
else:
assert specs[n - 1].dtype == torch.float
@pytest.mark.parametrize("num_spk", [1, 2])
@pytest.mark.parametrize("use_noise_mask", [True, False])
@pytest.mark.parametrize("beamformer_type", BEAMFORMER_TYPES)
@pytest.mark.skipif(not is_torch_1_12_1_plus, reason="Only for torch>=1.12.1")
def test_beamformer_net_consistency(num_spk, use_noise_mask, beamformer_type):
if beamformer_type in (
"lcmv",
"lcmp",
"wlcmp",
"mvdr_tfs",
"mvdr_tfs_souden",
):
# skip these beamformers as they require real multi-speaker data to obtain
# consistent results
return
torch.random.manual_seed(0)
ch = 2
inputs = random_speech[..., :ch].float().repeat(1, 5, 1)
ilens = torch.LongTensor([80, 60])
stft1 = STFTEncoder(n_fft=8, hop_length=2, use_builtin_complex=True)
stft2 = STFTEncoder(n_fft=8, hop_length=2, use_builtin_complex=False)
args = dict(
num_spk=num_spk,
use_wpe=False,
taps=2,
delay=3,
use_beamformer=True,
blayers=2,
bunits=2,
bprojs=2,
badim=2,
use_noise_mask=use_noise_mask,
beamformer_type=beamformer_type,
diagonal_loading=True,
mask_flooring=True,
use_torch_solver=True,
)
model1 = NeuralBeamformer(stft1.output_dim, use_torchaudio_api=True, **args)
model2 = NeuralBeamformer(stft2.output_dim, use_torchaudio_api=False, **args)
model1.eval()
model2.eval()
models = [model1, model2]
input_spectrum1, flens = stft1(inputs, ilens)
input_spectrum2 = stft2(inputs, ilens)[0]
inputs = [input_spectrum1, input_spectrum2]
specs = [
model(input_spectrum, flens)[0] for model, input_spectrum in zip(models, inputs)
]
for spk in range(num_spk):
np.testing.assert_allclose(*[s[spk].detach().numpy() for s in specs], atol=1e-1)
def test_beamformer_net_invalid_bf_type():
with pytest.raises(ValueError):
NeuralBeamformer(10, use_beamformer=True, beamformer_type="fff")
def test_beamformer_net_invalid_loss_type():
with pytest.raises(ValueError):
NeuralBeamformer(10, loss_type="fff")
| 12,458 | 34.095775 | 88 | py |
espnet | espnet-master/test/espnet2/enh/separator/test_dptnet_separator.py | import pytest
import torch
from torch import Tensor
from torch_complex import ComplexTensor
from espnet2.enh.separator.dptnet_separator import DPTNetSeparator
@pytest.mark.parametrize("input_dim", [8])
@pytest.mark.parametrize("post_enc_relu", [True, False])
@pytest.mark.parametrize("rnn_type", ["lstm", "gru"])
@pytest.mark.parametrize("bidirectional", [True, False])
@pytest.mark.parametrize("num_spk", [1, 2])
@pytest.mark.parametrize("unit", [8])
@pytest.mark.parametrize("att_heads", [4])
@pytest.mark.parametrize("dropout", [0.2])
@pytest.mark.parametrize("activation", ["relu"])
@pytest.mark.parametrize("norm_type", ["gLN"])
@pytest.mark.parametrize("layer", [1, 3])
@pytest.mark.parametrize("segment_size", [2, 4])
@pytest.mark.parametrize("nonlinear", ["relu", "sigmoid", "tanh"])
def test_dptnet_separator_forward_backward_complex(
input_dim,
post_enc_relu,
rnn_type,
bidirectional,
num_spk,
unit,
att_heads,
dropout,
activation,
norm_type,
layer,
segment_size,
nonlinear,
):
model = DPTNetSeparator(
input_dim=input_dim,
post_enc_relu=post_enc_relu,
rnn_type=rnn_type,
bidirectional=bidirectional,
num_spk=num_spk,
unit=unit,
att_heads=att_heads,
dropout=dropout,
activation=activation,
norm_type=norm_type,
layer=layer,
segment_size=segment_size,
nonlinear=nonlinear,
)
model.train()
real = torch.rand(2, 10, input_dim)
imag = torch.rand(2, 10, input_dim)
x = ComplexTensor(real, imag)
x_lens = torch.tensor([10, 8], dtype=torch.long)
masked, flens, others = model(x, ilens=x_lens)
assert isinstance(masked[0], ComplexTensor)
assert len(masked) == num_spk
masked[0].abs().mean().backward()
@pytest.mark.parametrize("input_dim", [8])
@pytest.mark.parametrize("post_enc_relu", [True, False])
@pytest.mark.parametrize("rnn_type", ["lstm", "gru"])
@pytest.mark.parametrize("bidirectional", [True, False])
@pytest.mark.parametrize("num_spk", [1, 2])
@pytest.mark.parametrize("unit", [8])
@pytest.mark.parametrize("att_heads", [4])
@pytest.mark.parametrize("dropout", [0.2])
@pytest.mark.parametrize("activation", ["relu"])
@pytest.mark.parametrize("norm_type", ["gLN"])
@pytest.mark.parametrize("layer", [1, 3])
@pytest.mark.parametrize("segment_size", [2, 4])
@pytest.mark.parametrize("nonlinear", ["relu", "sigmoid", "tanh"])
def test_dptnet_separator_forward_backward_real(
input_dim,
post_enc_relu,
rnn_type,
bidirectional,
num_spk,
unit,
att_heads,
dropout,
activation,
norm_type,
layer,
segment_size,
nonlinear,
):
model = DPTNetSeparator(
input_dim=input_dim,
post_enc_relu=post_enc_relu,
rnn_type=rnn_type,
bidirectional=bidirectional,
num_spk=num_spk,
unit=unit,
att_heads=att_heads,
dropout=dropout,
activation=activation,
norm_type=norm_type,
layer=layer,
segment_size=segment_size,
nonlinear=nonlinear,
)
model.train()
x = torch.rand(2, 10, input_dim)
x_lens = torch.tensor([10, 8], dtype=torch.long)
masked, flens, others = model(x, ilens=x_lens)
assert isinstance(masked[0], Tensor)
assert len(masked) == num_spk
masked[0].abs().mean().backward()
def test_dptnet_separator_invalid_args():
with pytest.raises(ValueError):
DPTNetSeparator(
input_dim=8,
rnn_type="rnn",
num_spk=2,
unit=10,
dropout=0.1,
layer=2,
segment_size=2,
nonlinear="fff",
)
with pytest.raises(AssertionError):
DPTNetSeparator(
input_dim=10,
rnn_type="rnn",
num_spk=2,
unit=10,
att_heads=4,
dropout=0.1,
layer=2,
segment_size=2,
nonlinear="relu",
)
def test_dptnet_separator_output():
x = torch.rand(2, 10, 8)
x_lens = torch.tensor([10, 8], dtype=torch.long)
for num_spk in range(1, 3):
model = DPTNetSeparator(
input_dim=8,
rnn_type="rnn",
num_spk=2,
unit=10,
dropout=0.1,
layer=2,
segment_size=2,
nonlinear="relu",
)
model.eval()
specs, _, others = model(x, x_lens)
assert isinstance(specs, list)
assert isinstance(others, dict)
assert x.shape == specs[0].shape
for n in range(num_spk):
assert "mask_spk{}".format(n + 1) in others
assert specs[n].shape == others["mask_spk{}".format(n + 1)].shape
| 4,741 | 26.569767 | 77 | py |
espnet | espnet-master/test/espnet2/enh/separator/test_dpcl_e2e_separator.py | import pytest
import torch
from torch import Tensor
from torch_complex import ComplexTensor
from espnet2.enh.separator.dpcl_e2e_separator import DPCLE2ESeparator
@pytest.mark.parametrize("input_dim", [5])
@pytest.mark.parametrize("rnn_type", ["blstm"])
@pytest.mark.parametrize("layer", [1, 3])
@pytest.mark.parametrize("unit", [8])
@pytest.mark.parametrize("dropout", [0.0, 0.2])
@pytest.mark.parametrize("num_spk", [2])
@pytest.mark.parametrize("predict_noise", [True, False])
@pytest.mark.parametrize("emb_D", [40])
@pytest.mark.parametrize("nonlinear", ["relu", "sigmoid", "tanh"])
@pytest.mark.parametrize("alpha", [1.0, 5.0])
@pytest.mark.parametrize("max_iteration", [100, 500])
def test_dpcl_e2e_separator_forward_backward_complex(
input_dim,
rnn_type,
layer,
unit,
dropout,
num_spk,
predict_noise,
emb_D,
nonlinear,
alpha,
max_iteration,
):
model = DPCLE2ESeparator(
input_dim=input_dim,
rnn_type=rnn_type,
layer=layer,
unit=unit,
dropout=dropout,
num_spk=num_spk,
predict_noise=predict_noise,
emb_D=emb_D,
nonlinear=nonlinear,
alpha=alpha,
max_iteration=max_iteration,
)
model.train()
real = torch.rand(2, 10, input_dim)
imag = torch.rand(2, 10, input_dim)
x = ComplexTensor(real, imag)
x_lens = torch.tensor([10, 8], dtype=torch.long)
masked, flens, others = model(x, ilens=x_lens)
assert isinstance(masked[0], ComplexTensor)
assert len(masked) == num_spk
masked[0].abs().mean().backward()
@pytest.mark.parametrize("input_dim", [5])
@pytest.mark.parametrize("rnn_type", ["blstm"])
@pytest.mark.parametrize("layer", [1, 3])
@pytest.mark.parametrize("unit", [8])
@pytest.mark.parametrize("dropout", [0.0, 0.2])
@pytest.mark.parametrize("num_spk", [2])
@pytest.mark.parametrize("predict_noise", [True, False])
@pytest.mark.parametrize("emb_D", [40])
@pytest.mark.parametrize("nonlinear", ["relu", "sigmoid", "tanh"])
@pytest.mark.parametrize("alpha", [1.0, 5.0])
@pytest.mark.parametrize("max_iteration", [100, 500])
def test_dpcl_e2e_separator_forward_backward_real(
input_dim,
rnn_type,
layer,
unit,
dropout,
num_spk,
predict_noise,
emb_D,
nonlinear,
alpha,
max_iteration,
):
model = DPCLE2ESeparator(
input_dim=input_dim,
rnn_type=rnn_type,
layer=layer,
unit=unit,
dropout=dropout,
num_spk=num_spk,
predict_noise=predict_noise,
emb_D=emb_D,
nonlinear=nonlinear,
alpha=alpha,
max_iteration=max_iteration,
)
model.train()
x = torch.rand(2, 10, input_dim)
x_lens = torch.tensor([10, 8], dtype=torch.long)
masked, flens, others = model(x, ilens=x_lens)
assert isinstance(masked[0], Tensor)
assert len(masked) == num_spk
masked[0].abs().mean().backward()
def test_dpcl_e2e_separator_invalid_type():
with pytest.raises(ValueError):
DPCLE2ESeparator(
input_dim=10,
rnn_type="rnn",
layer=2,
unit=10,
dropout=0.1,
num_spk=2,
emb_D=40,
nonlinear="fff",
alpha=5.0,
max_iteration=100,
)
def test_dpcl_e2e_separator_output():
x = torch.rand(1, 10, 10)
x_lens = torch.tensor([10], dtype=torch.long)
for num_spk in range(1, 4):
model = DPCLE2ESeparator(
input_dim=10,
rnn_type="rnn",
layer=2,
unit=10,
dropout=0.1,
num_spk=num_spk,
emb_D=40,
nonlinear="relu",
alpha=5.0,
max_iteration=100,
)
model.eval()
specs, _, others = model(x, x_lens)
assert isinstance(specs, list)
assert isinstance(others, dict)
for n in range(num_spk):
assert "mask_spk{}".format(n + 1) in others
assert specs[n].shape == others["mask_spk{}".format(n + 1)].shape
| 4,049 | 25.821192 | 77 | py |
espnet | espnet-master/test/espnet2/enh/separator/test_dc_crn_separator.py | import pytest
import torch
from packaging.version import parse as V
from torch_complex import ComplexTensor
from espnet2.enh.layers.complex_utils import is_complex
from espnet2.enh.separator.dc_crn_separator import DC_CRNSeparator
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
@pytest.mark.parametrize("input_dim", [33, 65])
@pytest.mark.parametrize("num_spk", [1, 2])
@pytest.mark.parametrize("input_channels", [[2, 4], [2, 4, 4]])
@pytest.mark.parametrize("enc_hid_channels", [2, 5])
@pytest.mark.parametrize("enc_layers", [2])
@pytest.mark.parametrize("glstm_groups", [2])
@pytest.mark.parametrize("glstm_layers", [1, 2])
@pytest.mark.parametrize("glstm_bidirectional", [True, False])
@pytest.mark.parametrize("glstm_rearrange", [True, False])
@pytest.mark.parametrize("mode", ["mapping", "masking"])
def test_dc_crn_separator_forward_backward_complex(
input_dim,
num_spk,
input_channels,
enc_hid_channels,
enc_layers,
glstm_groups,
glstm_layers,
glstm_bidirectional,
glstm_rearrange,
mode,
):
model = DC_CRNSeparator(
input_dim=input_dim,
num_spk=num_spk,
input_channels=input_channels,
enc_hid_channels=enc_hid_channels,
enc_kernel_size=(1, 3),
enc_padding=(0, 1),
enc_last_kernel_size=(1, 3),
enc_last_stride=(1, 2),
enc_last_padding=(0, 1),
enc_layers=enc_layers,
skip_last_kernel_size=(1, 3),
skip_last_stride=(1, 1),
skip_last_padding=(0, 1),
glstm_groups=glstm_groups,
glstm_layers=glstm_layers,
glstm_bidirectional=glstm_bidirectional,
glstm_rearrange=glstm_rearrange,
mode=mode,
)
model.train()
real = torch.rand(2, 10, input_dim)
imag = torch.rand(2, 10, input_dim)
x = torch.complex(real, imag) if is_torch_1_9_plus else ComplexTensor(real, imag)
x_lens = torch.tensor([10, 8], dtype=torch.long)
masked, flens, others = model(x, ilens=x_lens)
assert is_complex(masked[0])
assert len(masked) == num_spk
masked[0].abs().mean().backward()
@pytest.mark.parametrize("num_spk", [1, 2])
@pytest.mark.parametrize("input_channels", [[4, 4], [6, 4, 4]])
@pytest.mark.parametrize(
"enc_kernel_size, enc_padding", [((1, 3), (0, 1)), ((1, 5), (0, 2))]
)
@pytest.mark.parametrize("enc_last_stride", [(1, 2)])
@pytest.mark.parametrize(
"enc_last_kernel_size, enc_last_padding",
[((1, 4), (0, 1)), ((1, 5), (0, 2))],
)
@pytest.mark.parametrize("skip_last_stride", [(1, 1)])
@pytest.mark.parametrize(
"skip_last_kernel_size, skip_last_padding",
[((1, 3), (0, 1)), ((1, 5), (0, 2))],
)
def test_dc_crn_separator_multich_input(
num_spk,
input_channels,
enc_kernel_size,
enc_padding,
enc_last_kernel_size,
enc_last_stride,
enc_last_padding,
skip_last_kernel_size,
skip_last_stride,
skip_last_padding,
):
model = DC_CRNSeparator(
input_dim=33,
num_spk=num_spk,
input_channels=input_channels,
enc_hid_channels=2,
enc_kernel_size=enc_kernel_size,
enc_padding=enc_padding,
enc_last_kernel_size=enc_last_kernel_size,
enc_last_stride=enc_last_stride,
enc_last_padding=enc_last_padding,
enc_layers=3,
skip_last_kernel_size=skip_last_kernel_size,
skip_last_stride=skip_last_stride,
skip_last_padding=skip_last_padding,
)
model.train()
real = torch.rand(2, 10, input_channels[0] // 2, 33)
imag = torch.rand(2, 10, input_channels[0] // 2, 33)
x = torch.complex(real, imag) if is_torch_1_9_plus else ComplexTensor(real, imag)
x_lens = torch.tensor([10, 8], dtype=torch.long)
masked, flens, others = model(x, ilens=x_lens)
assert is_complex(masked[0])
assert len(masked) == num_spk
masked[0].abs().mean().backward()
def test_dc_crn_separator_invalid_enc_layer():
with pytest.raises(AssertionError):
DC_CRNSeparator(
input_dim=17,
input_channels=[2, 2, 4],
enc_layers=1,
)
def test_dc_crn_separator_invalid_type():
with pytest.raises(ValueError):
DC_CRNSeparator(
input_dim=17,
input_channels=[2, 2, 4],
mode="xxx",
)
def test_dc_crn_separator_output():
real = torch.rand(2, 10, 17)
imag = torch.rand(2, 10, 17)
x = torch.complex(real, imag) if is_torch_1_9_plus else ComplexTensor(real, imag)
x_lens = torch.tensor([10, 8], dtype=torch.long)
for num_spk in range(1, 3):
model = DC_CRNSeparator(
input_dim=17,
num_spk=num_spk,
input_channels=[2, 2, 4],
)
model.eval()
specs, _, others = model(x, x_lens)
assert isinstance(specs, list)
assert isinstance(others, dict)
for n in range(num_spk):
assert "mask_spk{}".format(n + 1) in others
assert specs[n].shape == others["mask_spk{}".format(n + 1)].shape
| 4,997 | 29.662577 | 85 | py |
espnet | espnet-master/test/espnet2/enh/separator/test_svoice_separator.py | import pytest
import torch
from torch import Tensor
from espnet2.enh.separator.svoice_separator import SVoiceSeparator
@pytest.mark.parametrize("input_dim", [1])
@pytest.mark.parametrize("enc_dim", [4])
@pytest.mark.parametrize("kernel_size", [4])
@pytest.mark.parametrize("hidden_size", [4])
@pytest.mark.parametrize("num_spk", [1, 2])
@pytest.mark.parametrize("num_layers", [1, 2])
@pytest.mark.parametrize("segment_size", [2])
@pytest.mark.parametrize("bidirectional", [False])
@pytest.mark.parametrize("input_normalize", [False])
def test_svoice_separator_forward_backward(
input_dim,
enc_dim,
kernel_size,
hidden_size,
num_spk,
num_layers,
segment_size,
bidirectional,
input_normalize,
):
model = SVoiceSeparator(
input_dim=input_dim,
enc_dim=enc_dim,
kernel_size=kernel_size,
hidden_size=hidden_size,
num_spk=num_spk,
num_layers=num_layers,
segment_size=segment_size,
bidirectional=bidirectional,
input_normalize=input_normalize,
)
model.train()
x = torch.rand(2, 800)
x_lens = torch.tensor([400, 300], dtype=torch.long)
separated, _, _ = model(x, ilens=x_lens)
assert isinstance(separated[0][0], Tensor)
assert len(separated) == num_layers
separated[0][0].mean().backward()
def test_svoice_separator_output_train():
x = torch.rand(2, 800)
x_lens = torch.tensor([10, 8], dtype=torch.long)
for num_spk in range(1, 3):
model = SVoiceSeparator(
input_dim=12,
enc_dim=8,
kernel_size=8,
hidden_size=8,
num_spk=num_spk,
num_layers=4,
segment_size=2,
bidirectional=False,
input_normalize=False,
)
model.train()
waveforms, _, _ = model(x, x_lens)
assert isinstance(waveforms, list)
assert isinstance(waveforms[0], list)
assert x[0].shape == waveforms[0][0][0].shape
def test_svoice_separator_output_eval():
x = torch.rand(2, 800)
x_lens = torch.tensor([10, 8], dtype=torch.long)
for num_spk in range(1, 3):
model = SVoiceSeparator(
input_dim=12,
enc_dim=8,
kernel_size=8,
hidden_size=8,
num_spk=num_spk,
num_layers=4,
segment_size=2,
bidirectional=False,
input_normalize=False,
)
model.eval()
waveforms, _, _ = model(x, x_lens)
assert isinstance(waveforms, list)
assert x[0].shape == waveforms[0][0].shape
| 2,605 | 26.431579 | 66 | py |
espnet | espnet-master/test/espnet2/enh/separator/test_tcn_separator.py | import pytest
import torch
from torch import Tensor
from torch_complex import ComplexTensor
from espnet2.enh.separator.tcn_separator import TCNSeparator
@pytest.mark.parametrize("input_dim", [5])
@pytest.mark.parametrize("bottleneck_dim", [5])
@pytest.mark.parametrize("hidden_dim", [5])
@pytest.mark.parametrize("kernel", [3])
@pytest.mark.parametrize("layer", [1, 3])
@pytest.mark.parametrize("stack", [1, 3])
@pytest.mark.parametrize("causal", [True, False])
@pytest.mark.parametrize("num_spk", [1, 2])
@pytest.mark.parametrize("nonlinear", ["relu", "sigmoid", "tanh"])
@pytest.mark.parametrize("norm_type", ["BN", "gLN", "cLN"])
def test_tcn_separator_forward_backward_complex(
input_dim,
layer,
num_spk,
nonlinear,
stack,
bottleneck_dim,
hidden_dim,
kernel,
causal,
norm_type,
):
model = TCNSeparator(
input_dim=input_dim,
num_spk=num_spk,
layer=layer,
stack=stack,
bottleneck_dim=bottleneck_dim,
hidden_dim=hidden_dim,
kernel=kernel,
causal=causal,
norm_type=norm_type,
nonlinear=nonlinear,
)
model.train()
real = torch.rand(2, 10, input_dim)
imag = torch.rand(2, 10, input_dim)
x = ComplexTensor(real, imag)
x_lens = torch.tensor([10, 8], dtype=torch.long)
masked, flens, others = model(x, ilens=x_lens)
assert isinstance(masked[0], ComplexTensor)
assert len(masked) == num_spk
masked[0].abs().mean().backward()
@pytest.mark.parametrize("input_dim", [5])
@pytest.mark.parametrize("bottleneck_dim", [5])
@pytest.mark.parametrize("hidden_dim", [5])
@pytest.mark.parametrize("kernel", [3])
@pytest.mark.parametrize("layer", [1, 2])
@pytest.mark.parametrize("stack", [1, 2])
@pytest.mark.parametrize("causal", [True, False])
@pytest.mark.parametrize("num_spk", [1, 2])
@pytest.mark.parametrize("nonlinear", ["relu", "sigmoid", "tanh"])
@pytest.mark.parametrize("norm_type", ["BN", "gLN", "cLN"])
def test_tcn_separator_forward_backward_real(
input_dim,
layer,
num_spk,
nonlinear,
stack,
bottleneck_dim,
hidden_dim,
kernel,
causal,
norm_type,
):
model = TCNSeparator(
input_dim=input_dim,
num_spk=num_spk,
layer=layer,
stack=stack,
bottleneck_dim=bottleneck_dim,
hidden_dim=hidden_dim,
kernel=kernel,
causal=causal,
norm_type=norm_type,
nonlinear=nonlinear,
)
x = torch.rand(2, 10, input_dim)
x_lens = torch.tensor([10, 8], dtype=torch.long)
masked, flens, others = model(x, ilens=x_lens)
assert isinstance(masked[0], Tensor)
assert len(masked) == num_spk
masked[0].abs().mean().backward()
def test_tcn_separator_invalid_type():
with pytest.raises(ValueError):
TCNSeparator(
input_dim=10,
nonlinear="fff",
)
with pytest.raises(ValueError):
TCNSeparator(
input_dim=10,
norm_type="xxx",
)
def test_tcn_separator_output():
x = torch.rand(2, 10, 10)
x_lens = torch.tensor([10, 8], dtype=torch.long)
for num_spk in range(1, 3):
model = TCNSeparator(
input_dim=10,
layer=num_spk,
stack=2,
bottleneck_dim=3,
hidden_dim=3,
kernel=3,
causal=False,
)
model.eval()
specs, _, others = model(x, x_lens)
assert isinstance(specs, list)
assert isinstance(others, dict)
for n in range(num_spk):
assert "mask_spk{}".format(n + 1) in others
assert specs[n].shape == others["mask_spk{}".format(n + 1)].shape
def test_tcn_streaming():
SEQ_LEN = 100
num_spk = 2
BS = 2
separator = TCNSeparator(
input_dim=128,
num_spk=2,
layer=2,
stack=3,
bottleneck_dim=32,
hidden_dim=64,
kernel=3,
causal=True,
norm_type="cLN",
)
separator.eval()
input_feature = torch.randn((BS, SEQ_LEN, 128))
ilens = torch.LongTensor([SEQ_LEN] * BS)
with torch.no_grad():
seq_output, _, _ = separator.forward(input_feature, ilens=ilens)
state = None
stream_outputs = []
for i in range(SEQ_LEN):
frame = input_feature[:, i : i + 1, :]
frame_out, state, _ = separator.forward_streaming(frame, state)
stream_outputs.append(frame_out)
for i in range(SEQ_LEN):
for s in range(num_spk):
torch.testing.assert_allclose(
stream_outputs[i][s], seq_output[s][:, i : i + 1, :]
)
| 4,667 | 26.139535 | 77 | py |
espnet | espnet-master/test/espnet2/enh/separator/test_fasnet_separator.py | import pytest
import torch
from torch import Tensor
from espnet2.enh.separator.fasnet_separator import FaSNetSeparator
@pytest.mark.parametrize("input_dim", [1])
@pytest.mark.parametrize("enc_dim", [4])
@pytest.mark.parametrize("feature_dim", [4])
@pytest.mark.parametrize("hidden_dim", [4])
@pytest.mark.parametrize("segment_size", [2])
@pytest.mark.parametrize("layer", [1, 2])
@pytest.mark.parametrize("num_spk", [1, 2])
@pytest.mark.parametrize("win_len", [2, 4])
@pytest.mark.parametrize("context_len", [2, 4])
@pytest.mark.parametrize("fasnet_type", ["fasnet", "ifasnet"])
@pytest.mark.parametrize("sr", [100])
def test_fasnet_separator_forward_backward_real(
input_dim,
enc_dim,
feature_dim,
hidden_dim,
segment_size,
layer,
num_spk,
win_len,
context_len,
fasnet_type,
sr,
):
model = FaSNetSeparator(
input_dim=input_dim,
enc_dim=enc_dim,
feature_dim=feature_dim,
hidden_dim=hidden_dim,
segment_size=segment_size,
layer=layer,
num_spk=num_spk,
win_len=win_len,
context_len=context_len,
fasnet_type=fasnet_type,
sr=sr,
)
model.train()
x = torch.rand(2, 400, 4)
x_lens = torch.tensor([400, 300], dtype=torch.long)
separated, flens, others = model(x, ilens=x_lens)
assert isinstance(separated[0], Tensor)
assert len(separated) == num_spk
separated[0].abs().mean().backward()
@pytest.mark.parametrize("fasnet_type", ["fasnet", "ifasnet"])
def test_fasnet_separator_output(fasnet_type):
x = torch.rand(2, 800, 4)
x_lens = torch.tensor([10, 8], dtype=torch.long)
for num_spk in range(1, 3):
model = FaSNetSeparator(
input_dim=16,
enc_dim=16,
feature_dim=16,
hidden_dim=16,
segment_size=4,
layer=2,
num_spk=num_spk,
win_len=2,
context_len=2,
fasnet_type=fasnet_type,
sr=100,
)
model.eval()
specs, _, others = model(x, x_lens)
assert isinstance(specs, list)
assert isinstance(others, dict)
assert x[:, :, 0].shape == specs[0].shape
| 2,209 | 25.95122 | 66 | py |
espnet | espnet-master/test/espnet2/enh/separator/test_dccrn_separator.py | import pytest
import torch
from packaging.version import parse as V
from torch_complex import ComplexTensor
from espnet2.enh.separator.dccrn_separator import DCCRNSeparator
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
@pytest.mark.parametrize("input_dim", [9])
@pytest.mark.parametrize("num_spk", [1, 2])
@pytest.mark.parametrize("rnn_layer", [2, 3])
@pytest.mark.parametrize("rnn_units", [32])
@pytest.mark.parametrize("masking_mode", ["E", "C", "R"])
@pytest.mark.parametrize("use_clstm", [True, False])
@pytest.mark.parametrize("bidirectional", [True, False])
@pytest.mark.parametrize("use_cbn", [True, False])
@pytest.mark.parametrize("kernel_size", [5])
@pytest.mark.parametrize("use_builtin_complex", [True, False])
@pytest.mark.parametrize("use_noise_mask", [True, False])
def test_dccrn_separator_forward_backward_complex(
input_dim,
num_spk,
rnn_layer,
rnn_units,
masking_mode,
use_clstm,
bidirectional,
use_cbn,
kernel_size,
use_builtin_complex,
use_noise_mask,
):
model = DCCRNSeparator(
input_dim=input_dim,
num_spk=num_spk,
rnn_layer=rnn_layer,
rnn_units=rnn_units,
masking_mode=masking_mode,
use_clstm=use_clstm,
bidirectional=bidirectional,
use_cbn=use_cbn,
kernel_size=kernel_size,
kernel_num=[4, 8, 16],
use_builtin_complex=use_builtin_complex,
use_noise_mask=use_noise_mask,
)
model.train()
real = torch.rand(2, 10, input_dim)
imag = torch.rand(2, 10, input_dim)
x = ComplexTensor(real, imag)
x_lens = torch.tensor([10, 8], dtype=torch.long)
masked, flens, others = model(x, ilens=x_lens)
if use_builtin_complex and is_torch_1_9_plus:
assert isinstance(masked[0], torch.Tensor)
else:
assert isinstance(masked[0], ComplexTensor)
assert len(masked) == num_spk
masked[0].abs().mean().backward()
def test_dccrn_separator_invalid_type():
with pytest.raises(ValueError):
DCCRNSeparator(
input_dim=10,
masking_mode="fff",
)
def test_rnn_separator_output():
real = torch.rand(2, 10, 9)
imag = torch.rand(2, 10, 9)
x = ComplexTensor(real, imag)
x_lens = torch.tensor([10, 8], dtype=torch.long)
for num_spk in range(1, 3):
model = DCCRNSeparator(
input_dim=9,
num_spk=num_spk,
rnn_units=32,
kernel_num=[4, 8, 16],
)
model.eval()
specs, _, others = model(x, x_lens)
assert isinstance(specs, list)
assert isinstance(others, dict)
for n in range(num_spk):
assert "mask_spk{}".format(n + 1) in others
assert specs[n].shape == others["mask_spk{}".format(n + 1)].shape
| 2,790 | 28.378947 | 77 | py |
espnet | espnet-master/test/espnet2/enh/separator/test_dprnn_separator.py | import pytest
import torch
from torch import Tensor
from torch_complex import ComplexTensor
from espnet2.enh.separator.dprnn_separator import DPRNNSeparator
@pytest.mark.parametrize("input_dim", [5])
@pytest.mark.parametrize("rnn_type", ["lstm", "gru"])
@pytest.mark.parametrize("layer", [1, 3])
@pytest.mark.parametrize("bidirectional", [True, False])
@pytest.mark.parametrize("unit", [8])
@pytest.mark.parametrize("dropout", [0.0, 0.2])
@pytest.mark.parametrize("num_spk", [1, 2])
@pytest.mark.parametrize("nonlinear", ["relu", "sigmoid", "tanh"])
@pytest.mark.parametrize("segment_size", [2, 4])
def test_dprnn_separator_forward_backward_complex(
input_dim,
rnn_type,
bidirectional,
layer,
unit,
dropout,
num_spk,
nonlinear,
segment_size,
):
model = DPRNNSeparator(
input_dim=input_dim,
rnn_type=rnn_type,
bidirectional=bidirectional,
num_spk=num_spk,
nonlinear=nonlinear,
layer=layer,
unit=unit,
segment_size=segment_size,
dropout=dropout,
)
model.train()
real = torch.rand(2, 10, input_dim)
imag = torch.rand(2, 10, input_dim)
x = ComplexTensor(real, imag)
x_lens = torch.tensor([10, 8], dtype=torch.long)
masked, flens, others = model(x, ilens=x_lens)
assert isinstance(masked[0], ComplexTensor)
assert len(masked) == num_spk
masked[0].abs().mean().backward()
@pytest.mark.parametrize("input_dim", [5])
@pytest.mark.parametrize("rnn_type", ["lstm", "gru"])
@pytest.mark.parametrize("layer", [1, 3])
@pytest.mark.parametrize("bidirectional", [True, False])
@pytest.mark.parametrize("unit", [8])
@pytest.mark.parametrize("dropout", [0.0, 0.2])
@pytest.mark.parametrize("num_spk", [1, 2])
@pytest.mark.parametrize("nonlinear", ["relu", "sigmoid", "tanh"])
@pytest.mark.parametrize("segment_size", [2, 4])
def test_dprnn_separator_forward_backward_real(
input_dim,
rnn_type,
bidirectional,
layer,
unit,
dropout,
num_spk,
nonlinear,
segment_size,
):
model = DPRNNSeparator(
input_dim=input_dim,
rnn_type=rnn_type,
bidirectional=bidirectional,
num_spk=num_spk,
nonlinear=nonlinear,
layer=layer,
unit=unit,
segment_size=segment_size,
dropout=dropout,
)
model.train()
x = torch.rand(2, 10, input_dim)
x_lens = torch.tensor([10, 8], dtype=torch.long)
masked, flens, others = model(x, ilens=x_lens)
assert isinstance(masked[0], Tensor)
assert len(masked) == num_spk
masked[0].abs().mean().backward()
def test_dprnn_separator_invalid_type():
with pytest.raises(ValueError):
DPRNNSeparator(
input_dim=10,
rnn_type="rnn",
layer=2,
unit=10,
dropout=0.1,
num_spk=2,
nonlinear="fff",
segment_size=2,
)
def test_dprnn_separator_output():
x = torch.rand(2, 10, 10)
x_lens = torch.tensor([10, 8], dtype=torch.long)
for num_spk in range(1, 3):
model = DPRNNSeparator(
input_dim=10,
rnn_type="rnn",
layer=2,
unit=10,
dropout=0.1,
num_spk=2,
nonlinear="relu",
segment_size=2,
)
model.eval()
specs, _, others = model(x, x_lens)
assert isinstance(specs, list)
assert isinstance(others, dict)
assert x.shape == specs[0].shape
for n in range(num_spk):
assert "mask_spk{}".format(n + 1) in others
assert specs[n].shape == others["mask_spk{}".format(n + 1)].shape
| 3,668 | 25.977941 | 77 | py |
espnet | espnet-master/test/espnet2/enh/separator/test_dan_separator.py | import pytest
import torch
from torch import Tensor
from torch_complex import ComplexTensor
from espnet2.enh.separator.dan_separator import DANSeparator
@pytest.mark.parametrize("input_dim", [5])
@pytest.mark.parametrize("rnn_type", ["blstm"])
@pytest.mark.parametrize("layer", [1, 3])
@pytest.mark.parametrize("unit", [8])
@pytest.mark.parametrize("dropout", [0.0, 0.2])
@pytest.mark.parametrize("num_spk", [2])
@pytest.mark.parametrize("emb_D", [40])
@pytest.mark.parametrize("nonlinear", ["relu", "sigmoid", "tanh"])
def test_dan_separator_forward_backward_complex(
input_dim, rnn_type, layer, unit, dropout, num_spk, emb_D, nonlinear
):
model = DANSeparator(
input_dim=input_dim,
rnn_type=rnn_type,
layer=layer,
unit=unit,
dropout=dropout,
num_spk=num_spk,
emb_D=emb_D,
nonlinear=nonlinear,
)
model.train()
real = torch.rand(2, 10, input_dim)
imag = torch.rand(2, 10, input_dim)
x = ComplexTensor(real, imag)
x_lens = torch.tensor([10, 8], dtype=torch.long)
o = []
for i in range(num_spk):
o.append(ComplexTensor(real, imag))
sep_others = {}
sep_others["feature_ref"] = o
masked, flens, others = model(x, ilens=x_lens, additional=sep_others)
assert isinstance(masked[0], ComplexTensor)
assert len(masked) == num_spk
masked[0].abs().mean().backward()
@pytest.mark.parametrize("input_dim", [5])
@pytest.mark.parametrize("rnn_type", ["blstm"])
@pytest.mark.parametrize("layer", [1, 3])
@pytest.mark.parametrize("unit", [8])
@pytest.mark.parametrize("dropout", [0.0, 0.2])
@pytest.mark.parametrize("num_spk", [1, 2])
@pytest.mark.parametrize("emb_D", [40])
@pytest.mark.parametrize("nonlinear", ["relu", "sigmoid", "tanh"])
def test_dan_separator_forward_backward_real(
input_dim, rnn_type, layer, unit, dropout, num_spk, emb_D, nonlinear
):
model = DANSeparator(
input_dim=input_dim,
rnn_type=rnn_type,
layer=layer,
unit=unit,
dropout=dropout,
num_spk=num_spk,
emb_D=emb_D,
nonlinear=nonlinear,
)
model.train()
x = torch.rand(2, 10, input_dim)
x_lens = torch.tensor([10, 8], dtype=torch.long)
o = []
for i in range(num_spk):
o.append(ComplexTensor(x, x))
sep_others = {}
sep_others["feature_ref"] = o
masked, flens, others = model(x, ilens=x_lens, additional=sep_others)
assert isinstance(masked[0], Tensor)
assert len(masked) == num_spk
masked[0].abs().mean().backward()
def test_dan_separator_invalid_type():
with pytest.raises(ValueError):
DANSeparator(
input_dim=10,
rnn_type="rnn",
layer=2,
unit=10,
dropout=0.1,
num_spk=2,
emb_D=40,
nonlinear="fff",
)
def test_dan_separator_output():
x = torch.rand(1, 10, 10)
x_lens = torch.tensor([10], dtype=torch.long)
for num_spk in range(1, 4):
model = DANSeparator(
input_dim=10,
rnn_type="rnn",
layer=2,
unit=10,
dropout=0.1,
num_spk=num_spk,
emb_D=40,
nonlinear="relu",
)
model.eval()
specs, _, others = model(x, x_lens)
assert isinstance(specs, list)
assert isinstance(others, dict)
for n in range(num_spk):
assert "mask_spk{}".format(n + 1) in others
assert specs[n].shape == others["mask_spk{}".format(n + 1)].shape
| 3,559 | 26.596899 | 77 | py |
espnet | espnet-master/test/espnet2/enh/separator/test_skim_separator.py | import pytest
import torch
from torch import Tensor
from torch_complex import ComplexTensor
from espnet2.enh.separator.skim_separator import SkiMSeparator
@pytest.mark.parametrize("input_dim", [5])
@pytest.mark.parametrize("layer", [1, 3])
@pytest.mark.parametrize("causal", [True, False])
@pytest.mark.parametrize("unit", [8])
@pytest.mark.parametrize("dropout", [0.0, 0.2])
@pytest.mark.parametrize("num_spk", [1, 2])
@pytest.mark.parametrize("nonlinear", ["relu", "sigmoid", "tanh"])
@pytest.mark.parametrize("mem_type", ["hc", "c", "h", None])
@pytest.mark.parametrize("segment_size", [2, 4])
@pytest.mark.parametrize("seg_overlap", [False, True])
def test_skim_separator_forward_backward_complex(
input_dim,
layer,
causal,
unit,
dropout,
num_spk,
nonlinear,
mem_type,
segment_size,
seg_overlap,
):
model = SkiMSeparator(
input_dim=input_dim,
causal=causal,
num_spk=num_spk,
nonlinear=nonlinear,
layer=layer,
unit=unit,
segment_size=segment_size,
dropout=dropout,
mem_type=mem_type,
seg_overlap=seg_overlap,
)
model.train()
real = torch.rand(2, 10, input_dim)
imag = torch.rand(2, 10, input_dim)
x = ComplexTensor(real, imag)
x_lens = torch.tensor([10, 8], dtype=torch.long)
masked, flens, others = model(x, ilens=x_lens)
assert isinstance(masked[0], ComplexTensor)
assert len(masked) == num_spk
masked[0].abs().mean().backward()
@pytest.mark.parametrize("input_dim", [5])
@pytest.mark.parametrize("layer", [1, 3])
@pytest.mark.parametrize("causal", [True, False])
@pytest.mark.parametrize("unit", [8])
@pytest.mark.parametrize("dropout", [0.0, 0.2])
@pytest.mark.parametrize("num_spk", [1, 2])
@pytest.mark.parametrize("nonlinear", ["relu", "sigmoid", "tanh"])
@pytest.mark.parametrize("mem_type", ["hc", "c", "h", "id", None])
@pytest.mark.parametrize("segment_size", [2, 4])
@pytest.mark.parametrize("seg_overlap", [False, True])
def test_skim_separator_forward_backward_real(
input_dim,
layer,
causal,
unit,
dropout,
num_spk,
nonlinear,
mem_type,
segment_size,
seg_overlap,
):
model = SkiMSeparator(
input_dim=input_dim,
causal=causal,
num_spk=num_spk,
nonlinear=nonlinear,
layer=layer,
unit=unit,
segment_size=segment_size,
dropout=dropout,
mem_type=mem_type,
seg_overlap=seg_overlap,
)
model.train()
x = torch.rand(2, 10, input_dim)
x_lens = torch.tensor([10, 8], dtype=torch.long)
masked, flens, others = model(x, ilens=x_lens)
assert isinstance(masked[0], Tensor)
assert len(masked) == num_spk
masked[0].abs().mean().backward()
def test_skim_separator_invalid_type():
with pytest.raises(ValueError):
SkiMSeparator(
input_dim=10,
layer=2,
unit=10,
dropout=0.1,
num_spk=2,
nonlinear="fff",
mem_type="aaa",
segment_size=2,
)
def test_skim_separator_output():
x = torch.rand(2, 10, 10)
x_lens = torch.tensor([10, 8], dtype=torch.long)
for num_spk in range(1, 3):
model = SkiMSeparator(
input_dim=10,
layer=2,
unit=10,
dropout=0.1,
num_spk=2,
nonlinear="relu",
segment_size=2,
)
model.eval()
specs, _, others = model(x, x_lens)
assert isinstance(specs, list)
assert isinstance(others, dict)
assert x.shape == specs[0].shape
for n in range(num_spk):
assert "mask_spk{}".format(n + 1) in others
assert specs[n].shape == others["mask_spk{}".format(n + 1)].shape
def test_skim_streaming():
SEQ_LEN = 100
num_spk = 2
BS = 2
separator = SkiMSeparator(
input_dim=128,
causal=True,
num_spk=2,
layer=2,
unit=32,
segment_size=100,
)
separator.eval()
input_feature = torch.randn((BS, SEQ_LEN, 128))
ilens = torch.LongTensor([SEQ_LEN] * BS)
with torch.no_grad():
seq_output, _, _ = separator.forward(input_feature, ilens=ilens)
state = None
stream_outputs = []
for i in range(SEQ_LEN):
frame = input_feature[:, i : i + 1, :]
frame_out, state, _ = separator.forward_streaming(frame, state)
stream_outputs.append(frame_out)
for i in range(SEQ_LEN):
for s in range(num_spk):
torch.testing.assert_allclose(
stream_outputs[i][s], seq_output[s][:, i : i + 1, :]
)
| 4,718 | 26.436047 | 77 | py |
espnet | espnet-master/test/espnet2/enh/separator/test_transformer_separator.py | import pytest
import torch
from torch import Tensor
from torch_complex import ComplexTensor
from espnet2.enh.separator.transformer_separator import TransformerSeparator
@pytest.mark.parametrize("input_dim", [5])
@pytest.mark.parametrize("num_spk", [1, 2])
@pytest.mark.parametrize("adim", [8])
@pytest.mark.parametrize("layers", [1, 3])
@pytest.mark.parametrize("aheads", [2])
@pytest.mark.parametrize("linear_units", [10])
@pytest.mark.parametrize("positionwise_layer_type", ["linear"])
@pytest.mark.parametrize("normalize_before", [True])
@pytest.mark.parametrize("concat_after", [True])
@pytest.mark.parametrize("use_scaled_pos_enc", [True])
@pytest.mark.parametrize("dropout_rate", [0.1])
@pytest.mark.parametrize("positional_dropout_rate", [0.1])
@pytest.mark.parametrize("attention_dropout_rate", [0.1])
@pytest.mark.parametrize("nonlinear", ["relu", "sigmoid", "tanh"])
def test_transformer_separator_forward_backward_complex(
input_dim,
adim,
layers,
aheads,
linear_units,
num_spk,
nonlinear,
positionwise_layer_type,
normalize_before,
concat_after,
dropout_rate,
positional_dropout_rate,
attention_dropout_rate,
use_scaled_pos_enc,
):
model = TransformerSeparator(
input_dim=input_dim,
num_spk=num_spk,
adim=adim,
aheads=aheads,
layers=layers,
linear_units=linear_units,
positionwise_layer_type=positionwise_layer_type,
normalize_before=normalize_before,
concat_after=concat_after,
dropout_rate=dropout_rate,
positional_dropout_rate=positional_dropout_rate,
attention_dropout_rate=attention_dropout_rate,
use_scaled_pos_enc=use_scaled_pos_enc,
nonlinear=nonlinear,
)
model.train()
real = torch.rand(2, 10, input_dim)
imag = torch.rand(2, 10, input_dim)
x = ComplexTensor(real, imag)
x_lens = torch.tensor([10, 8], dtype=torch.long)
masked, flens, others = model(x, ilens=x_lens)
assert isinstance(masked[0], ComplexTensor)
assert len(masked) == num_spk
masked[0].abs().mean().backward()
@pytest.mark.parametrize("input_dim", [5])
@pytest.mark.parametrize("num_spk", [1, 2])
@pytest.mark.parametrize("adim", [8])
@pytest.mark.parametrize("layers", [1, 3])
@pytest.mark.parametrize("aheads", [2])
@pytest.mark.parametrize("linear_units", [10])
@pytest.mark.parametrize("positionwise_layer_type", ["linear"])
@pytest.mark.parametrize("normalize_before", [True])
@pytest.mark.parametrize("concat_after", [True])
@pytest.mark.parametrize("use_scaled_pos_enc", [True])
@pytest.mark.parametrize("dropout_rate", [0.1])
@pytest.mark.parametrize("positional_dropout_rate", [0.1])
@pytest.mark.parametrize("attention_dropout_rate", [0.1])
@pytest.mark.parametrize("nonlinear", ["relu", "sigmoid", "tanh"])
def test_transformer_separator_forward_backward_real(
input_dim,
adim,
layers,
aheads,
linear_units,
num_spk,
nonlinear,
positionwise_layer_type,
normalize_before,
concat_after,
dropout_rate,
positional_dropout_rate,
attention_dropout_rate,
use_scaled_pos_enc,
):
model = TransformerSeparator(
input_dim=input_dim,
num_spk=num_spk,
adim=adim,
aheads=aheads,
layers=layers,
linear_units=linear_units,
positionwise_layer_type=positionwise_layer_type,
normalize_before=normalize_before,
concat_after=concat_after,
dropout_rate=dropout_rate,
positional_dropout_rate=positional_dropout_rate,
attention_dropout_rate=attention_dropout_rate,
use_scaled_pos_enc=use_scaled_pos_enc,
nonlinear=nonlinear,
)
model.train()
x = torch.rand(2, 10, input_dim)
x_lens = torch.tensor([10, 8], dtype=torch.long)
masked, flens, others = model(x, ilens=x_lens)
assert isinstance(masked[0], Tensor)
assert len(masked) == num_spk
masked[0].abs().mean().backward()
def test_transformer_separator_invalid_type():
with pytest.raises(ValueError):
TransformerSeparator(
input_dim=10,
nonlinear="fff",
)
def test_transformer_separator_output():
x = torch.rand(2, 10, 10)
x_lens = torch.tensor([10, 8], dtype=torch.long)
for num_spk in range(1, 3):
model = TransformerSeparator(
input_dim=10,
layers=2,
adim=4,
aheads=2,
num_spk=num_spk,
linear_units=10,
nonlinear="relu",
)
model.eval()
specs, _, others = model(x, x_lens)
assert isinstance(specs, list)
assert isinstance(others, dict)
for n in range(num_spk):
assert "mask_spk{}".format(n + 1) in others
assert specs[n].shape == others["mask_spk{}".format(n + 1)].shape
| 4,849 | 29.696203 | 77 | py |
espnet | espnet-master/test/espnet2/enh/separator/test_dpcl_separator.py | import pytest
import torch
from torch_complex import ComplexTensor
from espnet2.enh.separator.dpcl_separator import DPCLSeparator
@pytest.mark.parametrize("input_dim", [5])
@pytest.mark.parametrize("rnn_type", ["blstm"])
@pytest.mark.parametrize("layer", [1, 3])
@pytest.mark.parametrize("unit", [8])
@pytest.mark.parametrize("dropout", [0.0, 0.2])
@pytest.mark.parametrize("num_spk", [2])
@pytest.mark.parametrize("emb_D", [40])
@pytest.mark.parametrize("nonlinear", ["relu", "sigmoid", "tanh"])
def test_dpcl_separator_forward_backward_complex(
input_dim, rnn_type, layer, unit, dropout, num_spk, emb_D, nonlinear
):
model = DPCLSeparator(
input_dim=input_dim,
rnn_type=rnn_type,
layer=layer,
unit=unit,
dropout=dropout,
num_spk=num_spk,
emb_D=emb_D,
nonlinear=nonlinear,
)
model.train()
real = torch.rand(2, 10, input_dim)
imag = torch.rand(2, 10, input_dim)
x = ComplexTensor(real, imag)
x_lens = torch.tensor([10, 8], dtype=torch.long)
masked, flens, others = model(x, ilens=x_lens)
assert "tf_embedding" in others
others["tf_embedding"].abs().mean().backward()
@pytest.mark.parametrize("input_dim", [5])
@pytest.mark.parametrize("rnn_type", ["blstm"])
@pytest.mark.parametrize("layer", [1, 3])
@pytest.mark.parametrize("unit", [8])
@pytest.mark.parametrize("dropout", [0.0, 0.2])
@pytest.mark.parametrize("num_spk", [1, 2])
@pytest.mark.parametrize("emb_D", [40])
@pytest.mark.parametrize("nonlinear", ["relu", "sigmoid", "tanh"])
def test_dpcl_separator_forward_backward_real(
input_dim, rnn_type, layer, unit, dropout, num_spk, emb_D, nonlinear
):
model = DPCLSeparator(
input_dim=input_dim,
rnn_type=rnn_type,
layer=layer,
unit=unit,
dropout=dropout,
num_spk=num_spk,
emb_D=emb_D,
nonlinear=nonlinear,
)
model.train()
x = torch.rand(2, 10, input_dim)
x_lens = torch.tensor([10, 8], dtype=torch.long)
masked, flens, others = model(x, ilens=x_lens)
assert "tf_embedding" in others
others["tf_embedding"].abs().mean().backward()
def test_dpcl_separator_invalid_type():
with pytest.raises(ValueError):
DPCLSeparator(
input_dim=10,
rnn_type="rnn",
layer=2,
unit=10,
dropout=0.1,
num_spk=2,
emb_D=40,
nonlinear="fff",
)
def test_dpcl_separator_output():
x = torch.rand(2, 10, 10)
x_lens = torch.tensor([10, 8], dtype=torch.long)
for num_spk in range(1, 4):
model = DPCLSeparator(
input_dim=10,
rnn_type="rnn",
layer=2,
unit=10,
dropout=0.1,
num_spk=num_spk,
emb_D=40,
nonlinear="relu",
)
model.eval()
specs, _, others = model(x, x_lens)
assert isinstance(specs, list)
assert isinstance(others, dict)
assert len(specs) == num_spk, len(specs)
for n in range(num_spk):
assert "tf_embedding" in others
| 3,127 | 26.928571 | 72 | py |
espnet | espnet-master/test/espnet2/enh/separator/test_conformer_separator.py | import pytest
import torch
from torch import Tensor
from torch_complex.tensor import ComplexTensor
from espnet2.enh.separator.conformer_separator import ConformerSeparator
@pytest.mark.parametrize("input_dim", [15])
@pytest.mark.parametrize("num_spk", [1, 2])
@pytest.mark.parametrize("adim", [8])
@pytest.mark.parametrize("layers", [1, 3])
@pytest.mark.parametrize("aheads", [2])
@pytest.mark.parametrize("linear_units", [100])
@pytest.mark.parametrize("positionwise_layer_type", ["conv1d"])
@pytest.mark.parametrize("positionwise_conv_kernel_size", [3])
@pytest.mark.parametrize("normalize_before", [True])
@pytest.mark.parametrize("concat_after", [True])
@pytest.mark.parametrize("dropout_rate", [0.1])
@pytest.mark.parametrize("input_layer", ["linear", "conv2d"])
@pytest.mark.parametrize("positional_dropout_rate", [0.1])
@pytest.mark.parametrize("attention_dropout_rate", [0.1])
@pytest.mark.parametrize("nonlinear", ["relu", "sigmoid", "tanh"])
@pytest.mark.parametrize("conformer_pos_enc_layer_type", ["rel_pos"])
@pytest.mark.parametrize("conformer_self_attn_layer_type", ["rel_selfattn"])
@pytest.mark.parametrize("conformer_activation_type", ["relu", "tanh"])
@pytest.mark.parametrize("use_macaron_style_in_conformer", [True])
@pytest.mark.parametrize("use_cnn_in_conformer", [True])
@pytest.mark.parametrize("conformer_enc_kernel_size", [3, 5, 7])
@pytest.mark.parametrize("padding_idx", [-1])
def test_conformer_separator_forward_backward_complex(
input_dim,
num_spk,
adim,
aheads,
layers,
linear_units,
positionwise_layer_type,
positionwise_conv_kernel_size,
normalize_before,
concat_after,
dropout_rate,
input_layer,
positional_dropout_rate,
attention_dropout_rate,
nonlinear,
conformer_pos_enc_layer_type,
conformer_self_attn_layer_type,
conformer_activation_type,
use_macaron_style_in_conformer,
use_cnn_in_conformer,
conformer_enc_kernel_size,
padding_idx,
):
model = ConformerSeparator(
input_dim=input_dim,
num_spk=num_spk,
adim=adim,
aheads=aheads,
layers=layers,
linear_units=linear_units,
dropout_rate=dropout_rate,
positional_dropout_rate=positional_dropout_rate,
attention_dropout_rate=attention_dropout_rate,
input_layer=input_layer,
normalize_before=normalize_before,
concat_after=concat_after,
positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size,
use_macaron_style_in_conformer=use_macaron_style_in_conformer,
nonlinear=nonlinear,
conformer_pos_enc_layer_type=conformer_pos_enc_layer_type,
conformer_self_attn_layer_type=conformer_self_attn_layer_type,
conformer_activation_type=conformer_activation_type,
use_cnn_in_conformer=use_cnn_in_conformer,
conformer_enc_kernel_size=conformer_enc_kernel_size,
padding_idx=padding_idx,
)
model.train()
real = torch.rand(2, 10, input_dim)
imag = torch.rand(2, 10, input_dim)
x = ComplexTensor(real, imag)
x_lens = torch.tensor([10, 8], dtype=torch.long)
masked, flens, others = model(x, ilens=x_lens)
assert isinstance(masked[0], ComplexTensor)
assert len(masked) == num_spk
masked[0].abs().mean().backward()
@pytest.mark.parametrize("input_dim", [15])
@pytest.mark.parametrize("num_spk", [1, 2])
@pytest.mark.parametrize("adim", [8])
@pytest.mark.parametrize("layers", [1, 3])
@pytest.mark.parametrize("aheads", [2])
@pytest.mark.parametrize("linear_units", [100])
@pytest.mark.parametrize("positionwise_layer_type", ["conv1d"])
@pytest.mark.parametrize("positionwise_conv_kernel_size", [3])
@pytest.mark.parametrize("normalize_before", [True])
@pytest.mark.parametrize("concat_after", [True])
@pytest.mark.parametrize("dropout_rate", [0.1])
@pytest.mark.parametrize("input_layer", ["linear", "conv2d"])
@pytest.mark.parametrize("positional_dropout_rate", [0.1])
@pytest.mark.parametrize("attention_dropout_rate", [0.1])
@pytest.mark.parametrize("nonlinear", ["relu", "sigmoid", "tanh"])
@pytest.mark.parametrize("conformer_pos_enc_layer_type", ["rel_pos"])
@pytest.mark.parametrize("conformer_self_attn_layer_type", ["rel_selfattn"])
@pytest.mark.parametrize("conformer_activation_type", ["relu", "tanh"])
@pytest.mark.parametrize("use_macaron_style_in_conformer", [True])
@pytest.mark.parametrize("use_cnn_in_conformer", [True])
@pytest.mark.parametrize("conformer_enc_kernel_size", [3, 5, 7])
@pytest.mark.parametrize("padding_idx", [-1])
def test_conformer_separator_forward_backward_real(
input_dim,
num_spk,
adim,
aheads,
layers,
linear_units,
positionwise_layer_type,
positionwise_conv_kernel_size,
normalize_before,
concat_after,
dropout_rate,
input_layer,
positional_dropout_rate,
attention_dropout_rate,
nonlinear,
conformer_pos_enc_layer_type,
conformer_self_attn_layer_type,
conformer_activation_type,
use_macaron_style_in_conformer,
use_cnn_in_conformer,
conformer_enc_kernel_size,
padding_idx,
):
model = ConformerSeparator(
input_dim=input_dim,
num_spk=num_spk,
adim=adim,
aheads=aheads,
layers=layers,
linear_units=linear_units,
dropout_rate=dropout_rate,
positional_dropout_rate=positional_dropout_rate,
attention_dropout_rate=attention_dropout_rate,
input_layer=input_layer,
normalize_before=normalize_before,
concat_after=concat_after,
positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size,
use_macaron_style_in_conformer=use_macaron_style_in_conformer,
nonlinear=nonlinear,
conformer_pos_enc_layer_type=conformer_pos_enc_layer_type,
conformer_self_attn_layer_type=conformer_self_attn_layer_type,
conformer_activation_type=conformer_activation_type,
use_cnn_in_conformer=use_cnn_in_conformer,
conformer_enc_kernel_size=conformer_enc_kernel_size,
padding_idx=padding_idx,
)
model.train()
x = torch.rand(2, 10, input_dim)
x_lens = torch.tensor([10, 8], dtype=torch.long)
masked, flens, others = model(x, ilens=x_lens)
assert isinstance(masked[0], Tensor)
assert len(masked) == num_spk
masked[0].abs().mean().backward()
def test_Conformer_separator_invalid_type():
with pytest.raises(ValueError):
ConformerSeparator(
input_dim=10,
nonlinear="fff",
)
def test_Conformer_separator_output():
x = torch.rand(2, 10, 10)
x_lens = torch.tensor([10, 8], dtype=torch.long)
for num_spk in range(1, 3):
model = ConformerSeparator(
input_dim=10,
layers=2,
adim=4,
aheads=2,
num_spk=num_spk,
linear_units=10,
nonlinear="relu",
)
model.eval()
specs, _, others = model(x, x_lens)
assert isinstance(specs, list)
assert isinstance(others, dict)
for n in range(num_spk):
assert "mask_spk{}".format(n + 1) in others
assert specs[n].shape == others["mask_spk{}".format(n + 1)].shape
| 7,306 | 34.470874 | 77 | py |
espnet | espnet-master/test/espnet2/enh/separator/test_rnn_separator.py | import pytest
import torch
from torch import Tensor
from torch_complex import ComplexTensor
from espnet2.enh.separator.rnn_separator import RNNSeparator
@pytest.mark.parametrize("input_dim", [5])
@pytest.mark.parametrize("rnn_type", ["blstm"])
@pytest.mark.parametrize("layer", [1, 3])
@pytest.mark.parametrize("unit", [8])
@pytest.mark.parametrize("dropout", [0.0, 0.2])
@pytest.mark.parametrize("num_spk", [1, 2])
@pytest.mark.parametrize("nonlinear", ["relu", "sigmoid", "tanh"])
def test_rnn_separator_forward_backward_complex(
input_dim, rnn_type, layer, unit, dropout, num_spk, nonlinear
):
model = RNNSeparator(
input_dim=input_dim,
rnn_type=rnn_type,
layer=layer,
unit=unit,
dropout=dropout,
num_spk=num_spk,
nonlinear=nonlinear,
)
model.train()
real = torch.rand(2, 10, input_dim)
imag = torch.rand(2, 10, input_dim)
x = ComplexTensor(real, imag)
x_lens = torch.tensor([10, 8], dtype=torch.long)
masked, flens, others = model(x, ilens=x_lens)
assert isinstance(masked[0], ComplexTensor)
assert len(masked) == num_spk
masked[0].abs().mean().backward()
@pytest.mark.parametrize("input_dim", [5])
@pytest.mark.parametrize("rnn_type", ["blstm"])
@pytest.mark.parametrize("layer", [1, 3])
@pytest.mark.parametrize("unit", [8])
@pytest.mark.parametrize("dropout", [0.0, 0.2])
@pytest.mark.parametrize("num_spk", [1, 2])
@pytest.mark.parametrize("nonlinear", ["relu", "sigmoid", "tanh"])
def test_rnn_separator_forward_backward_real(
input_dim, rnn_type, layer, unit, dropout, num_spk, nonlinear
):
model = RNNSeparator(
input_dim=input_dim,
rnn_type=rnn_type,
layer=layer,
unit=unit,
dropout=dropout,
num_spk=num_spk,
nonlinear=nonlinear,
)
model.train()
x = torch.rand(2, 10, input_dim)
x_lens = torch.tensor([10, 8], dtype=torch.long)
masked, flens, others = model(x, ilens=x_lens)
assert isinstance(masked[0], Tensor)
assert len(masked) == num_spk
masked[0].abs().mean().backward()
def test_rnn_separator_invalid_type():
with pytest.raises(ValueError):
RNNSeparator(
input_dim=10,
rnn_type="rnn",
layer=2,
unit=10,
dropout=0.1,
num_spk=2,
nonlinear="fff",
)
def test_rnn_separator_output():
x = torch.rand(2, 10, 10)
x_lens = torch.tensor([10, 8], dtype=torch.long)
for num_spk in range(1, 3):
model = RNNSeparator(
input_dim=10,
rnn_type="rnn",
layer=2,
unit=10,
dropout=0.1,
num_spk=num_spk,
nonlinear="relu",
)
model.eval()
specs, _, others = model(x, x_lens)
assert isinstance(specs, list)
assert isinstance(others, dict)
for n in range(num_spk):
assert "mask_spk{}".format(n + 1) in others
assert specs[n].shape == others["mask_spk{}".format(n + 1)].shape
def test_rnn_streaming():
SEQ_LEN = 100
num_spk = 2
BS = 2
separator = RNNSeparator(input_dim=128, rnn_type="lstm", num_spk=num_spk)
separator.eval()
input_feature = torch.randn((BS, SEQ_LEN, 128))
ilens = torch.LongTensor([SEQ_LEN] * BS)
with torch.no_grad():
seq_output, _, _ = separator.forward(input_feature, ilens=ilens)
state = None
stream_outputs = []
for i in range(SEQ_LEN):
frame = input_feature[:, i : i + 1, :]
frame_out, state, _ = separator.forward_streaming(frame, state)
stream_outputs.append(frame_out)
for i in range(SEQ_LEN):
for s in range(num_spk):
torch.testing.assert_allclose(
stream_outputs[i][s], seq_output[s][:, i : i + 1, :]
)
| 3,896 | 28.300752 | 77 | py |
espnet | espnet-master/test/espnet2/enh/layers/test_complex_utils.py | import numpy as np
import pytest
import torch
import torch_complex.functional as FC
from packaging.version import parse as V
from torch_complex.tensor import ComplexTensor
from espnet2.enh.layers.complex_utils import (
cat,
complex_norm,
einsum,
inverse,
matmul,
solve,
stack,
trace,
)
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
# invertible matrix
mat_np = np.array(
[
[
[-0.211 + 1.8293j, -0.1138 + 0.0754j, -1.3574 - 0.6358j],
[-1.1041 - 1.0455j, -0.8856 - 0.7828j, 1.6058 + 0.8616j],
[0.3877 - 1.3823j, 1.2027 - 0.4265j, 0.4436 - 0.0173j],
],
[
[0.5322 - 0.2629j, 1.774 - 0.9664j, -0.1956 + 0.8791j],
[-0.156 - 0.1044j, 0.2576 + 1.2311j, 0.0493 - 2.5577j],
[0.4465 - 1.1056j, 0.4398 + 1.4871j, -0.34 + 1.095j],
],
],
dtype=np.complex64,
)
@pytest.mark.parametrize("dim", [0, 1, 2])
def test_cat(dim):
if is_torch_1_9_plus:
wrappers = [ComplexTensor, torch.complex]
modules = [FC, torch]
else:
wrappers = [ComplexTensor]
modules = [FC]
for complex_wrapper, complex_module in zip(wrappers, modules):
mat1 = complex_wrapper(torch.rand(2, 3, 4), torch.rand(2, 3, 4))
mat2 = complex_wrapper(torch.rand(2, 3, 4), torch.rand(2, 3, 4))
ret = cat([mat1, mat2], dim=dim)
ret2 = complex_module.cat([mat1, mat2], dim=dim)
assert complex_module.allclose(ret, ret2)
@pytest.mark.parametrize("dim", [None, 0, 1, 2])
@pytest.mark.skipif(not is_torch_1_9_plus, reason="Require torch 1.9.0+")
def test_complex_norm(dim):
mat = ComplexTensor(torch.rand(2, 3, 4), torch.rand(2, 3, 4))
mat_th = torch.complex(mat.real, mat.imag)
norm = complex_norm(mat, dim=dim, keepdim=True)
norm_th = complex_norm(mat_th, dim=dim, keepdim=True)
assert torch.allclose(norm, norm_th)
if dim is not None:
assert norm.ndim == mat.ndim and mat.numel() == norm.numel() * mat.size(dim)
@pytest.mark.parametrize("real_vec", [True, False])
def test_einsum(real_vec):
if is_torch_1_9_plus:
wrappers = [ComplexTensor, torch.complex]
modules = [FC, torch]
else:
wrappers = [ComplexTensor]
modules = [FC]
for complex_wrapper, complex_module in zip(wrappers, modules):
mat = complex_wrapper(torch.rand(2, 3, 3), torch.rand(2, 3, 3))
if real_vec:
vec = torch.rand(2, 3, 1)
vec2 = complex_wrapper(vec, torch.zeros_like(vec))
else:
vec = complex_wrapper(torch.rand(2, 3, 1), torch.rand(2, 3, 1))
vec2 = vec
ret = einsum("bec,bcf->bef", mat, vec)
ret2 = complex_module.einsum("bec,bcf->bef", mat, vec2)
assert complex_module.allclose(ret, ret2)
def test_inverse():
if is_torch_1_9_plus:
wrappers = [ComplexTensor, torch.complex]
modules = [FC, torch]
else:
wrappers = [ComplexTensor]
modules = [FC]
eye = torch.eye(3).expand(2, 3, 3)
for complex_wrapper, complex_module in zip(wrappers, modules):
mat = complex_wrapper(
torch.from_numpy(mat_np.real), torch.from_numpy(mat_np.imag)
)
eye_complex = complex_wrapper(eye, torch.zeros_like(eye))
assert complex_module.allclose(mat @ inverse(mat), eye_complex, atol=1e-6)
@pytest.mark.parametrize("real_vec", [True, False])
def test_matmul(real_vec):
if is_torch_1_9_plus:
wrappers = [ComplexTensor, torch.complex]
modules = [FC, torch]
else:
wrappers = [ComplexTensor]
modules = [FC]
for complex_wrapper, complex_module in zip(wrappers, modules):
mat = complex_wrapper(torch.rand(2, 3, 3), torch.rand(2, 3, 3))
if real_vec:
vec = torch.rand(2, 3, 1)
vec2 = complex_wrapper(vec, torch.zeros_like(vec))
else:
vec = complex_wrapper(torch.rand(2, 3, 1), torch.rand(2, 3, 1))
vec2 = vec
ret = matmul(mat, vec)
ret2 = complex_module.matmul(mat, vec2)
assert complex_module.allclose(ret, ret2)
def test_trace():
if is_torch_1_9_plus:
wrappers = [ComplexTensor, torch.complex]
modules = [FC, torch]
else:
wrappers = [ComplexTensor]
modules = [FC]
for complex_wrapper, complex_module in zip(wrappers, modules):
mat = complex_wrapper(torch.rand(2, 3, 3), torch.rand(2, 3, 3))
tr = trace(mat)
tr2 = sum([mat[..., i, i] for i in range(mat.size(-1))])
assert complex_module.allclose(tr, tr2)
@pytest.mark.parametrize("real_vec", [True, False])
def test_solve(real_vec):
if is_torch_1_9_plus:
wrappers = [ComplexTensor, torch.complex]
modules = [FC, torch]
else:
wrappers = [ComplexTensor]
modules = [FC]
for complex_wrapper, complex_module in zip(wrappers, modules):
mat = complex_wrapper(
torch.from_numpy(mat_np.real), torch.from_numpy(mat_np.imag)
)
if not real_vec or complex_wrapper is ComplexTensor:
vec = complex_wrapper(torch.rand(2, 3, 1), torch.rand(2, 3, 1))
vec2 = vec
else:
vec = torch.rand(2, 3, 1)
vec2 = complex_wrapper(vec, torch.zeros_like(vec))
ret = solve(vec, mat)
if isinstance(vec2, ComplexTensor):
ret2 = FC.solve(vec2, mat, return_LU=False)
else:
return torch.linalg.solve(mat, vec2)
assert complex_module.allclose(ret, ret2)
@pytest.mark.parametrize("dim", [0, 1, 2])
def test_stack(dim):
if is_torch_1_9_plus:
wrappers = [ComplexTensor, torch.complex]
modules = [FC, torch]
else:
wrappers = [ComplexTensor]
modules = [FC]
for complex_wrapper, complex_module in zip(wrappers, modules):
print(complex_wrapper, complex_module)
mat1 = complex_wrapper(torch.rand(2, 3, 4), torch.rand(2, 3, 4))
mat2 = complex_wrapper(torch.rand(2, 3, 4), torch.rand(2, 3, 4))
ret = stack([mat1, mat2], dim=dim)
ret2 = complex_module.stack([mat1, mat2], dim=dim)
assert complex_module.allclose(ret, ret2)
def test_complex_impl_consistency():
if not is_torch_1_9_plus:
return
mat_th = torch.complex(torch.from_numpy(mat_np.real), torch.from_numpy(mat_np.imag))
mat_ct = ComplexTensor(torch.from_numpy(mat_np.real), torch.from_numpy(mat_np.imag))
bs = mat_th.shape[0]
rank = mat_th.shape[-1]
vec_th = torch.complex(torch.rand(bs, rank), torch.rand(bs, rank)).type_as(mat_th)
vec_ct = ComplexTensor(vec_th.real, vec_th.imag)
for result_th, result_ct in (
(abs(mat_th), abs(mat_ct)),
(inverse(mat_th), inverse(mat_ct)),
(matmul(mat_th, vec_th.unsqueeze(-1)), matmul(mat_ct, vec_ct.unsqueeze(-1))),
(solve(vec_th.unsqueeze(-1), mat_th), solve(vec_ct.unsqueeze(-1), mat_ct)),
(
einsum("bec,bc->be", mat_th, vec_th),
einsum("bec,bc->be", mat_ct, vec_ct),
),
):
np.testing.assert_allclose(result_th.numpy(), result_ct.numpy(), atol=1e-6)
| 7,177 | 33.344498 | 88 | py |
espnet | espnet-master/test/espnet2/enh/layers/test_conv_utils.py | import pytest
import torch
from espnet2.enh.layers.conv_utils import conv2d_output_shape, convtransp2d_output_shape
@pytest.mark.parametrize("input_dim", [(10, 17), (10, 33)])
@pytest.mark.parametrize("kernel_size", [(1, 3), (3, 5)])
@pytest.mark.parametrize("stride", [(1, 1), (1, 2)])
@pytest.mark.parametrize("padding", [(0, 0), (0, 1)])
@pytest.mark.parametrize("dilation", [(1, 1), (1, 2)])
def test_conv2d_output_shape(input_dim, kernel_size, stride, padding, dilation):
h, w = conv2d_output_shape(
input_dim,
kernel_size=kernel_size,
stride=stride,
pad=padding,
dilation=dilation,
)
conv = torch.nn.Conv2d(
1, 1, kernel_size, stride=stride, padding=padding, dilation=dilation
)
x = torch.rand(1, 1, *input_dim)
assert conv(x).shape[2:] == (h, w)
@pytest.mark.parametrize("input_dim", [(10, 17), (10, 33)])
@pytest.mark.parametrize("kernel_size", [(1, 3), (3, 5)])
@pytest.mark.parametrize("stride", [(1, 1), (1, 2)])
@pytest.mark.parametrize("padding", [(0, 0), (0, 1)])
@pytest.mark.parametrize("output_padding", [(0, 0), (0, 1)])
@pytest.mark.parametrize("dilation", [(1, 1), (1, 2)])
def test_deconv2d_output_shape(
input_dim, kernel_size, stride, padding, output_padding, dilation
):
if (
output_padding[0] >= stride[0]
or output_padding[0] >= dilation[0]
or output_padding[1] >= stride[1]
or output_padding[1] >= dilation[1]
):
# skip invalid cases
return
h, w = convtransp2d_output_shape(
input_dim,
kernel_size=kernel_size,
stride=stride,
pad=padding,
dilation=dilation,
out_pad=output_padding,
)
deconv = torch.nn.ConvTranspose2d(
1,
1,
kernel_size,
stride=stride,
padding=padding,
output_padding=output_padding,
dilation=dilation,
)
x = torch.rand(1, 1, *input_dim)
assert deconv(x).shape[2:] == (h, w)
| 1,984 | 30.507937 | 88 | py |
espnet | espnet-master/test/espnet2/enh/layers/test_enh_layers.py | import numpy as np
import pytest
import torch
import torch_complex.functional as FC
from packaging.version import parse as V
from torch_complex.tensor import ComplexTensor
from espnet2.enh.layers.beamformer import (
generalized_eigenvalue_decomposition,
get_rtf,
gev_phase_correction,
signal_framing,
)
from espnet2.enh.layers.complex_utils import solve
from espnet2.layers.stft import Stft
is_torch_1_1_plus = V(torch.__version__) >= V("1.1.0")
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
random_speech = torch.tensor(
[
[
[0.026, 0.031, 0.023, 0.029, 0.026, 0.029, 0.028, 0.027],
[0.027, 0.031, 0.023, 0.027, 0.026, 0.028, 0.027, 0.027],
[0.026, 0.030, 0.023, 0.026, 0.025, 0.028, 0.028, 0.028],
[0.024, 0.028, 0.024, 0.027, 0.024, 0.027, 0.030, 0.030],
[0.025, 0.027, 0.025, 0.028, 0.023, 0.026, 0.031, 0.031],
[0.027, 0.026, 0.025, 0.029, 0.022, 0.026, 0.032, 0.031],
[0.028, 0.026, 0.024, 0.031, 0.023, 0.025, 0.031, 0.029],
[0.029, 0.024, 0.023, 0.032, 0.023, 0.024, 0.030, 0.027],
[0.028, 0.024, 0.023, 0.030, 0.023, 0.023, 0.028, 0.027],
[0.029, 0.026, 0.023, 0.029, 0.025, 0.024, 0.027, 0.025],
[0.029, 0.027, 0.024, 0.026, 0.025, 0.027, 0.025, 0.025],
[0.029, 0.031, 0.026, 0.024, 0.028, 0.028, 0.024, 0.025],
[0.030, 0.038, 0.029, 0.023, 0.035, 0.032, 0.024, 0.026],
[0.029, 0.040, 0.030, 0.023, 0.039, 0.039, 0.025, 0.027],
[0.028, 0.040, 0.032, 0.025, 0.041, 0.039, 0.026, 0.028],
[0.028, 0.041, 0.039, 0.027, 0.044, 0.041, 0.029, 0.035],
],
[
[0.015, 0.021, 0.012, 0.006, 0.028, 0.021, 0.024, 0.018],
[0.005, 0.034, 0.036, 0.017, 0.016, 0.037, 0.011, 0.029],
[0.011, 0.029, 0.060, 0.029, 0.045, 0.035, 0.034, 0.018],
[0.031, 0.036, 0.040, 0.037, 0.059, 0.032, 0.035, 0.029],
[0.031, 0.031, 0.036, 0.029, 0.058, 0.035, 0.039, 0.045],
[0.050, 0.038, 0.052, 0.052, 0.059, 0.044, 0.055, 0.045],
[0.025, 0.054, 0.054, 0.047, 0.043, 0.059, 0.045, 0.060],
[0.042, 0.056, 0.073, 0.029, 0.048, 0.063, 0.051, 0.049],
[0.053, 0.048, 0.045, 0.052, 0.039, 0.045, 0.031, 0.053],
[0.054, 0.044, 0.053, 0.031, 0.062, 0.050, 0.048, 0.046],
[0.053, 0.036, 0.075, 0.046, 0.073, 0.052, 0.045, 0.030],
[0.039, 0.025, 0.061, 0.046, 0.064, 0.032, 0.027, 0.033],
[0.053, 0.032, 0.052, 0.033, 0.052, 0.029, 0.026, 0.017],
[0.054, 0.034, 0.054, 0.033, 0.045, 0.043, 0.024, 0.018],
[0.031, 0.025, 0.043, 0.016, 0.051, 0.040, 0.023, 0.030],
[0.008, 0.023, 0.024, 0.019, 0.032, 0.024, 0.012, 0.027],
],
],
dtype=torch.double,
)
@pytest.mark.parametrize("ch", [2, 4, 6, 8])
@pytest.mark.parametrize("mode", ["power", "evd"])
def test_get_rtf(ch, mode):
if not is_torch_1_9_plus and mode == "evd":
# torch 1.9.0+ is required for "evd" mode
return
if mode == "evd":
complex_wrapper = torch.complex
complex_module = torch
else:
complex_wrapper = ComplexTensor
complex_module = FC
stft = Stft(
n_fft=8,
win_length=None,
hop_length=2,
center=True,
window="hann",
normalized=False,
onesided=True,
)
torch.random.manual_seed(0)
x = random_speech[..., :ch]
ilens = torch.LongTensor([16, 12])
# (B, T, C, F) -> (B, F, C, T)
X = complex_wrapper(*torch.unbind(stft(x, ilens)[0], dim=-1)).transpose(-1, -3)
# (B, F, C, C)
Phi_X = complex_module.einsum("...ct,...et->...ce", [X, X.conj()])
is_singular = True
while is_singular:
N = complex_wrapper(torch.randn_like(X.real), torch.randn_like(X.imag))
Phi_N = complex_module.einsum("...ct,...et->...ce", [N, N.conj()])
is_singular = not np.all(np.linalg.matrix_rank(Phi_N.numpy()) == ch)
# (B, F, C, 1)
rtf = get_rtf(Phi_X, Phi_N, mode=mode, reference_vector=0, iterations=20)
if is_torch_1_1_plus:
rtf = rtf / (rtf.abs().max(dim=-2, keepdim=True).values + 1e-15)
else:
rtf = rtf / (rtf.abs().max(dim=-2, keepdim=True)[0] + 1e-15)
# rtf \approx Phi_N MaxEigVec(Phi_N^-1 @ Phi_X)
if is_torch_1_1_plus:
# torch.solve is required, which is only available after pytorch 1.1.0+
mat = solve(Phi_X, Phi_N)[0]
max_eigenvec = solve(rtf, Phi_N)[0]
else:
mat = complex_module.matmul(Phi_N.inverse2(), Phi_X)
max_eigenvec = complex_module.matmul(Phi_N.inverse2(), rtf)
factor = complex_module.matmul(mat, max_eigenvec)
assert complex_module.allclose(
complex_module.matmul(max_eigenvec, factor.transpose(-1, -2)),
complex_module.matmul(factor, max_eigenvec.transpose(-1, -2)),
)
def test_signal_framing():
# tap length = 1
taps, delay = 0, 1
X = ComplexTensor(torch.rand(2, 10, 6, 20), torch.rand(2, 10, 6, 20))
X2 = signal_framing(X, taps + 1, 1, delay, do_padding=False)
assert FC.allclose(X, X2.squeeze(-1))
# tap length > 1, no padding
taps, delay = 5, 3
X = ComplexTensor(torch.rand(2, 10, 6, 20), torch.rand(2, 10, 6, 20))
X2 = signal_framing(X, taps + 1, 1, delay, do_padding=False)
assert X2.shape == torch.Size([2, 10, 6, 20 - taps - delay + 1, taps + 1])
assert FC.allclose(X2[..., 0], X[..., : 20 - taps - delay + 1])
# tap length > 1, padding
taps, delay = 5, 3
X = ComplexTensor(torch.rand(2, 10, 6, 20), torch.rand(2, 10, 6, 20))
X2 = signal_framing(X, taps + 1, 1, delay, do_padding=True)
assert X2.shape == torch.Size([2, 10, 6, 20, taps + 1])
assert FC.allclose(X2[..., -1], X)
@pytest.mark.skipif(not is_torch_1_9_plus, reason="Require torch 1.9.0+")
@pytest.mark.parametrize("ch", [2, 4, 6, 8])
def test_gevd(ch):
stft = Stft(
n_fft=8,
win_length=None,
hop_length=2,
center=True,
window="hann",
normalized=False,
onesided=True,
)
torch.random.manual_seed(0)
x = random_speech[..., :ch]
ilens = torch.LongTensor([16, 12])
# (B, T, C, F) -> (B, F, C, T)
X = torch.complex(*torch.unbind(stft(x, ilens)[0], dim=-1)).transpose(-1, -3)
# (B, F, C, C)
Phi_X = torch.einsum("...ct,...et->...ce", [X, X.conj()])
is_singular = True
while is_singular:
N = torch.randn_like(X)
Phi_N = torch.einsum("...ct,...et->...ce", [N, N.conj()])
is_singular = not torch.linalg.matrix_rank(Phi_N).eq(ch).all()
# Phi_N = torch.eye(ch, dtype=Phi_X.dtype).view(1, 1, ch, ch).expand_as(Phi_X)
# e_val: (B, F, C)
# e_vec: (B, F, C, C)
e_val, e_vec = generalized_eigenvalue_decomposition(Phi_X, Phi_N)
e_val = e_val.to(dtype=e_vec.dtype)
assert torch.allclose(
torch.matmul(Phi_X, e_vec),
torch.matmul(torch.matmul(Phi_N, e_vec), e_val.diag_embed()),
)
@pytest.mark.skipif(not is_torch_1_9_plus, reason="Require torch 1.9.0+")
def test_gev_phase_correction():
mat = ComplexTensor(torch.rand(2, 3, 4), torch.rand(2, 3, 4))
mat_th = torch.complex(mat.real, mat.imag)
norm = gev_phase_correction(mat)
norm_th = gev_phase_correction(mat_th)
assert np.allclose(norm.numpy(), norm_th.numpy())
| 7,408 | 38.833333 | 83 | py |
espnet | espnet-master/test/espnet2/enh/encoder/test_stft_encoder.py | import pytest
import torch
from packaging.version import parse as V
from espnet2.enh.encoder.stft_encoder import STFTEncoder
is_torch_1_12_1_plus = V(torch.__version__) >= V("1.12.1")
@pytest.mark.parametrize("n_fft", [512])
@pytest.mark.parametrize("win_length", [512])
@pytest.mark.parametrize("hop_length", [128])
@pytest.mark.parametrize("window", ["hann"])
@pytest.mark.parametrize("center", [True])
@pytest.mark.parametrize("normalized", [True, False])
@pytest.mark.parametrize("onesided", [True, False])
@pytest.mark.parametrize("use_builtin_complex", [True, False])
def test_STFTEncoder_backward(
n_fft,
win_length,
hop_length,
window,
center,
normalized,
onesided,
use_builtin_complex,
):
encoder = STFTEncoder(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
window=window,
center=center,
normalized=normalized,
onesided=onesided,
use_builtin_complex=use_builtin_complex,
)
x = torch.rand(2, 32000, requires_grad=True)
x_lens = torch.tensor([32000, 30000], dtype=torch.long)
y, flens = encoder(x, x_lens)
y.abs().sum().backward()
@pytest.mark.skipif(not is_torch_1_12_1_plus, reason="torch.complex32 is used")
@pytest.mark.parametrize("n_fft", [512])
@pytest.mark.parametrize("win_length", [512])
@pytest.mark.parametrize("hop_length", [128])
@pytest.mark.parametrize("window", ["hann"])
@pytest.mark.parametrize("center", [True])
@pytest.mark.parametrize("normalized", [True, False])
@pytest.mark.parametrize("onesided", [True, False])
@pytest.mark.parametrize("use_builtin_complex", [False])
def test_STFTEncoder_float16_dtype(
n_fft,
win_length,
hop_length,
window,
center,
normalized,
onesided,
use_builtin_complex,
):
encoder = STFTEncoder(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
window=window,
center=center,
normalized=normalized,
onesided=onesided,
use_builtin_complex=use_builtin_complex,
)
x = torch.rand(2, 32000, dtype=torch.float16, requires_grad=True)
x_lens = torch.tensor([32000, 30000], dtype=torch.long)
y, flens = encoder(x, x_lens)
(y.real.pow(2) + y.imag.pow(2)).sum().backward()
| 2,290 | 28 | 79 | py |
espnet | espnet-master/test/espnet2/enh/encoder/test_conv_encoder.py | import pytest
import torch
from espnet2.enh.encoder.conv_encoder import ConvEncoder
@pytest.mark.parametrize("channel", [64])
@pytest.mark.parametrize("kernel_size", [10, 20])
@pytest.mark.parametrize("stride", [5, 10])
def test_ConvEncoder_backward(channel, kernel_size, stride):
encoder = ConvEncoder(
channel=channel,
kernel_size=kernel_size,
stride=stride,
)
x = torch.rand(2, 32000)
x_lens = torch.tensor([32000, 30000], dtype=torch.long)
y, flens = encoder(x, x_lens)
y.sum().backward()
| 545 | 25 | 60 | py |
espnet | espnet-master/test/espnet2/enh/loss/criterions/test_time_domain.py | import pytest
import torch
from packaging.version import parse as V
from espnet2.enh.loss.criterions.time_domain import (
CISDRLoss,
MultiResL1SpecLoss,
SDRLoss,
SISNRLoss,
SNRLoss,
TimeDomainL1,
TimeDomainMSE,
)
is_torch_1_12_1_plus = V(torch.__version__) >= V("1.12.1")
@pytest.mark.parametrize(
"criterion_class", [CISDRLoss, SISNRLoss, SNRLoss, SDRLoss, MultiResL1SpecLoss]
)
def test_time_domain_criterion_forward(criterion_class):
criterion = criterion_class()
batch = 2
inf = torch.rand(batch, 2000)
ref = torch.rand(batch, 2000)
loss = criterion(ref, inf)
assert loss.shape == (batch,), "Invlid loss shape with " + criterion.name
@pytest.mark.parametrize("criterion_class", [TimeDomainL1, TimeDomainMSE])
@pytest.mark.parametrize("input_ch", [1, 2])
def test_time_domain_l1_l2_forward(criterion_class, input_ch):
criterion = criterion_class()
batch = 2
shape = (batch, 200) if input_ch == 1 else (batch, 200, input_ch)
inf = torch.rand(*shape)
ref = torch.rand(*shape)
loss = criterion(ref, inf)
assert loss.shape == (batch,), "Invlid loss shape with " + criterion.name
with pytest.raises(ValueError):
if input_ch == 1:
loss = criterion(ref[..., None, None], inf[..., None, None])
else:
loss = criterion(ref[..., None], inf[..., None])
@pytest.mark.parametrize("window_sz", [[512], [256, 512]])
@pytest.mark.parametrize("time_domain_weight", [0.5])
@pytest.mark.parametrize("dtype", [torch.float16, torch.float32])
def test_multi_res_l1_spec_loss_forward_backward(window_sz, time_domain_weight, dtype):
if dtype == torch.float16 and not is_torch_1_12_1_plus:
pytest.skip("Skip tests for dtype=torch.float16 due to lack of torch.complex32")
criterion = MultiResL1SpecLoss(
window_sz=window_sz, time_domain_weight=time_domain_weight
)
batch = 2
inf = torch.rand(batch, 2000, dtype=dtype, requires_grad=True)
ref = torch.rand(batch, 2000, dtype=dtype)
loss = criterion(ref, inf)
loss.sum().backward()
assert loss.shape == (batch,), "Invlid loss shape with " + criterion.name
| 2,175 | 30.536232 | 88 | py |
espnet | espnet-master/test/espnet2/enh/loss/criterions/test_tf_domain.py | import pytest
import torch
from packaging.version import parse as V
from torch_complex import ComplexTensor
from espnet2.enh.loss.criterions.tf_domain import (
FrequencyDomainAbsCoherence,
FrequencyDomainCrossEntropy,
FrequencyDomainDPCL,
FrequencyDomainL1,
FrequencyDomainMSE,
)
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
@pytest.mark.parametrize("criterion_class", [FrequencyDomainL1, FrequencyDomainMSE])
@pytest.mark.parametrize(
"mask_type", ["IBM", "IRM", "IAM", "PSM", "NPSM", "PSM^2", "CIRM"]
)
@pytest.mark.parametrize("compute_on_mask", [True, False])
@pytest.mark.parametrize("input_ch", [1, 2])
def test_tf_domain_criterion_forward(
criterion_class, mask_type, compute_on_mask, input_ch
):
criterion = criterion_class(compute_on_mask=compute_on_mask, mask_type=mask_type)
complex_wrapper = torch.complex if is_torch_1_9_plus else ComplexTensor
batch = 2
shape = (batch, 10, 200) if input_ch == 1 else (batch, 10, input_ch, 200)
ref_spec = [complex_wrapper(torch.rand(*shape), torch.rand(*shape))]
mix_spec = complex_wrapper(torch.rand(*shape), torch.rand(*shape))
noise_spec = complex_wrapper(torch.rand(*shape), torch.rand(*shape))
if compute_on_mask:
inf = [torch.rand(*shape)]
ref = criterion.create_mask_label(mix_spec, ref_spec, noise_spec=noise_spec)
loss = criterion(ref[0], inf[0])
else:
inf_spec = [complex_wrapper(torch.rand(*shape), torch.rand(*shape))]
loss = criterion(ref_spec[0], inf_spec[0])
assert loss.shape == (batch,), "Invlid loss shape with " + criterion.name
@pytest.mark.parametrize("input_ch", [1, 2])
def test_tf_coh_criterion_forward(input_ch):
criterion = FrequencyDomainAbsCoherence()
complex_wrapper = torch.complex if is_torch_1_9_plus else ComplexTensor
batch = 2
shape = (batch, 10, 200) if input_ch == 1 else (batch, 10, input_ch, 200)
inf_spec = complex_wrapper(torch.rand(*shape), torch.rand(*shape))
ref_spec = complex_wrapper(torch.rand(*shape), torch.rand(*shape))
loss = criterion(ref_spec, inf_spec)
assert loss.shape == (batch,), "Invlid loss shape with " + criterion.name
@pytest.mark.parametrize("input_ch", [1, 2])
def test_tf_coh_criterion_invalid_forward(input_ch):
criterion = FrequencyDomainAbsCoherence()
complex_wrapper = torch.complex if is_torch_1_9_plus else ComplexTensor
batch = 2
shape = (batch, 10, 200) if input_ch == 1 else (batch, 10, input_ch, 200)
inf_spec = complex_wrapper(torch.rand(*shape), torch.rand(*shape))
ref_spec = complex_wrapper(torch.rand(*shape), torch.rand(*shape))
with pytest.raises(ValueError):
criterion(ref_spec.real, inf_spec)
with pytest.raises(ValueError):
if input_ch == 1:
criterion(ref_spec[0], inf_spec[0])
else:
criterion(ref_spec[0, 0], inf_spec[0, 0])
@pytest.mark.parametrize("input_ch", [1, 2])
def test_tf_ce_criterion_forward(input_ch):
criterion = FrequencyDomainCrossEntropy()
batch = 2
ncls = 200
shape = (batch, 10, ncls) if input_ch == 1 else (batch, 10, input_ch, ncls)
label_shape = (batch, 10) if input_ch == 1 else (batch, 10, input_ch)
inf_spec = torch.rand(*shape)
ref_spec = torch.randint(0, ncls, label_shape)
loss = criterion(ref_spec, inf_spec)
assert loss.shape == (batch,), "Invlid loss shape with " + criterion.name
@pytest.mark.parametrize("loss_type", ["dpcl", "mdc"])
def test_tf_dpcl_loss_criterion_forward(loss_type):
criterion = FrequencyDomainDPCL(loss_type=loss_type)
batch = 2
inf = torch.rand(batch, 10 * 200, 40)
ref_spec = [
ComplexTensor(torch.rand(batch, 10, 200), torch.rand(batch, 10, 200)),
ComplexTensor(torch.rand(batch, 10, 200), torch.rand(batch, 10, 200)),
ComplexTensor(torch.rand(batch, 10, 200), torch.rand(batch, 10, 200)),
]
ref = [abs(r) for r in ref_spec]
loss = criterion(ref, inf)
assert loss.shape == (batch,), "Invalid loss shape with " + criterion.name
| 4,064 | 35.621622 | 85 | py |
espnet | espnet-master/test/espnet2/enh/loss/wrappers/test_fixed_order_solver.py | import pytest
import torch
from espnet2.enh.loss.criterions.tf_domain import FrequencyDomainL1
from espnet2.enh.loss.wrappers.fixed_order import FixedOrderSolver
@pytest.mark.parametrize("num_spk", [1, 2, 3])
def test_PITSolver_forward(num_spk):
batch = 2
inf = [torch.rand(batch, 10, 100) for spk in range(num_spk)]
ref = [inf[num_spk - spk - 1] for spk in range(num_spk)] # reverse inf as ref
solver = FixedOrderSolver(FrequencyDomainL1())
loss, stats, others = solver(ref, inf)
| 506 | 30.6875 | 82 | py |
espnet | espnet-master/test/espnet2/enh/loss/wrappers/test_multilayer_pit_solver.py | import pytest
import torch
from espnet2.enh.loss.criterions.tf_domain import FrequencyDomainL1
from espnet2.enh.loss.wrappers.multilayer_pit_solver import MultiLayerPITSolver
@pytest.mark.parametrize("num_spk", [1, 2, 3])
@pytest.mark.parametrize("layer_weights", [[1, 1], [1, 2]])
def test_MultiLayerPITSolver_forward_multi_layer(num_spk, layer_weights):
batch = 2
num_layers = 2
# infs is a List of List (num_layer x num_speaker Tensors)
infs = [
[torch.rand(batch, 10, 100) for spk in range(num_spk)]
for _ in range(num_layers)
]
ref = [infs[-1][num_spk - spk - 1] for spk in range(num_spk)] # reverse inf as ref
solver = MultiLayerPITSolver(
FrequencyDomainL1(), independent_perm=True, layer_weights=layer_weights
)
loss, stats, others = solver(ref, infs)
perm = others["perm"]
correct_perm = list(range(num_spk))
correct_perm.reverse()
assert perm[0].equal(torch.tensor(correct_perm))
# test for independent_perm is False
solver = MultiLayerPITSolver(
FrequencyDomainL1(), independent_perm=False, layer_weights=layer_weights
)
loss, stats, others = solver(ref, infs, {"perm": perm})
@pytest.mark.parametrize("num_spk", [1, 2, 3])
@pytest.mark.parametrize("layer_weights", [[1], [2]])
def test_MultiLayerPITSolver_forward_single_layer(num_spk, layer_weights):
batch = 2
# inf is a List of Tensors
inf = [torch.rand(batch, 10, 100) for spk in range(num_spk)]
ref = [inf[num_spk - spk - 1] for spk in range(num_spk)] # reverse inf as ref
solver = MultiLayerPITSolver(
FrequencyDomainL1(), independent_perm=True, layer_weights=layer_weights
)
loss, stats, others = solver(ref, inf)
perm = others["perm"]
correct_perm = list(range(num_spk))
correct_perm.reverse()
assert perm[0].equal(torch.tensor(correct_perm))
# test for independent_perm is False
solver = MultiLayerPITSolver(
FrequencyDomainL1(), independent_perm=False, layer_weights=layer_weights
)
loss, stats, others = solver(ref, inf, {"perm": perm})
| 2,099 | 34 | 87 | py |
espnet | espnet-master/test/espnet2/enh/loss/wrappers/test_dpcl_solver.py | import pytest
import torch
from espnet2.enh.loss.criterions.tf_domain import FrequencyDomainDPCL
from espnet2.enh.loss.wrappers.dpcl_solver import DPCLSolver
@pytest.mark.parametrize("num_spk", [1, 2, 3])
def test_DPCLSolver_forward(num_spk):
batch = 2
o = {"tf_embedding": torch.rand(batch, 10 * 200, 40)}
inf = [torch.rand(batch, 10, 200) for spk in range(num_spk)]
ref = [inf[num_spk - spk - 1] for spk in range(num_spk)] # reverse inf as ref
solver = DPCLSolver(FrequencyDomainDPCL())
loss, stats, others = solver(ref, inf, o)
| 560 | 32 | 82 | py |
espnet | espnet-master/test/espnet2/enh/loss/wrappers/test_pit_solver.py | import pytest
import torch
import torch.nn.functional as F
from espnet2.enh.loss.criterions.tf_domain import (
FrequencyDomainCrossEntropy,
FrequencyDomainL1,
)
from espnet2.enh.loss.wrappers.pit_solver import PITSolver
@pytest.mark.parametrize("num_spk", [1, 2, 3])
@pytest.mark.parametrize("flexible_numspk", [True, False])
def test_PITSolver_forward(num_spk, flexible_numspk):
batch = 2
inf = [torch.rand(batch, 10, 100) for spk in range(num_spk)]
ref = [inf[num_spk - spk - 1] for spk in range(num_spk)] # reverse inf as ref
solver = PITSolver(
FrequencyDomainL1(), independent_perm=True, flexible_numspk=flexible_numspk
)
loss, stats, others = solver(ref, inf)
perm = others["perm"]
correct_perm = list(range(num_spk))
correct_perm.reverse()
assert perm[0].equal(torch.tensor(correct_perm))
# test for independent_perm is False
solver = PITSolver(
FrequencyDomainL1(), independent_perm=False, flexible_numspk=flexible_numspk
)
loss, stats, others = solver(ref, inf, {"perm": perm})
@pytest.mark.parametrize("num_spk", [1, 2, 3])
@pytest.mark.parametrize("flexible_numspk", [True, False])
def test_PITSolver_tf_ce_forward(num_spk, flexible_numspk):
batch = 2
ncls = 100
ref = [torch.randint(0, ncls, (batch, 10)) for spk in range(num_spk)]
bias = [F.one_hot(y) for y in ref]
bias = [F.pad(y, (0, ncls - y.size(-1))) for y in bias]
inf = [torch.rand(batch, 10, ncls) + bias[spk] for spk in range(num_spk)]
solver = PITSolver(
FrequencyDomainCrossEntropy(),
independent_perm=True,
flexible_numspk=flexible_numspk,
)
loss, stats, others = solver(ref, inf)
perm = others["perm"]
correct_perm = list(range(num_spk))
assert perm[0].equal(torch.tensor(correct_perm)), (perm, correct_perm)
# test for independent_perm is False
solver = PITSolver(
FrequencyDomainCrossEntropy(),
independent_perm=False,
flexible_numspk=flexible_numspk,
)
loss, stats, others = solver(ref, inf, {"perm": perm})
| 2,092 | 31.703125 | 84 | py |
espnet | espnet-master/test/espnet2/enh/loss/wrappers/test_mixit_solver.py | import pytest
import torch
import torch.nn.functional as F
from packaging.version import parse as V
from torch_complex.tensor import ComplexTensor
from espnet2.enh.loss.criterions.tf_domain import FrequencyDomainL1
from espnet2.enh.loss.criterions.time_domain import TimeDomainL1
from espnet2.enh.loss.wrappers.mixit_solver import MixITSolver
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
@pytest.mark.parametrize("inf_num, time_domain", [(4, True), (4, False)])
def test_MixITSolver_forward(inf_num, time_domain):
batch = 2
if time_domain:
solver = MixITSolver(TimeDomainL1())
inf = [torch.rand(batch, 100) for _ in range(inf_num)]
# 2 speaker's reference
ref = [torch.zeros(batch, 100), torch.zeros(batch, 100)]
else:
solver = MixITSolver(FrequencyDomainL1())
inf = [torch.rand(batch, 100, 10, 10) for _ in range(inf_num)]
# 2 speaker's reference
ref = [torch.zeros(batch, 100, 10, 10), torch.zeros(batch, 100, 10, 10)]
ref[0][0] = inf[2][0] + inf[3][0] # sample1, speaker 1
ref[1][0] = inf[0][0] + inf[1][0] # sample1, speaker 2
ref[0][1] = inf[0][1] + inf[3][1] # sample2, speaker 1
ref[1][1] = inf[1][1] + inf[2][1] # sample2, speaker 2
loss, stats, others = solver(ref, inf)
perm = others["perm"]
correct_perm1 = (
F.one_hot(
torch.tensor([1, 1, 0, 0], dtype=torch.int64),
num_classes=inf_num // 2,
)
.transpose(1, 0)
.float()
)
assert perm[0].equal(torch.tensor(correct_perm1))
correct_perm2 = (
F.one_hot(
torch.tensor([0, 1, 1, 0], dtype=torch.int64),
num_classes=inf_num // 2,
)
.transpose(1, 0)
.float()
)
assert perm[1].equal(torch.tensor(correct_perm2))
@pytest.mark.parametrize(
"inf_num, torch_complex",
[(4, True), (4, False)],
)
def test_MixITSolver_complex_forward(inf_num, torch_complex):
batch = 2
solver = MixITSolver(FrequencyDomainL1())
if torch_complex:
if is_torch_1_9_plus:
inf = [
torch.rand(batch, 100, 10, 10, dtype=torch.cfloat)
for _ in range(inf_num)
]
# 2 speaker's reference
ref = [
torch.zeros(batch, 100, 10, 10, dtype=torch.cfloat),
torch.zeros(batch, 100, 10, 10, dtype=torch.cfloat),
]
else:
return
else:
inf = [
ComplexTensor(
torch.rand(batch, 100, 10, 10),
torch.rand(batch, 100, 10, 10),
)
for _ in range(inf_num)
]
# 2 speaker's reference
ref = [
ComplexTensor(
torch.zeros(batch, 100, 10, 10),
torch.zeros(batch, 100, 10, 10),
)
for _ in range(inf_num // 2)
]
ref[0][0] = inf[2][0] + inf[3][0] # sample1, speaker 1
ref[1][0] = inf[0][0] + inf[1][0] # sample1, speaker 2
ref[0][1] = inf[0][1] + inf[3][1] # sample2, speaker 1
ref[1][1] = inf[1][1] + inf[2][1] # sample2, speaker 2
loss, stats, others = solver(ref, inf)
perm = others["perm"]
correct_perm1 = (
F.one_hot(
torch.tensor([1, 1, 0, 0], dtype=torch.int64),
num_classes=inf_num // 2,
)
.transpose(1, 0)
.float()
)
assert perm[0].equal(torch.tensor(correct_perm1))
correct_perm2 = (
F.one_hot(
torch.tensor([0, 1, 1, 0], dtype=torch.int64),
num_classes=inf_num // 2,
)
.transpose(1, 0)
.float()
)
assert perm[1].equal(torch.tensor(correct_perm2))
| 3,730 | 29.581967 | 80 | py |
espnet | espnet-master/test/espnet2/enh/extractor/test_td_speakerbeam_extractor.py | import pytest
import torch
from espnet2.enh.extractor.td_speakerbeam_extractor import TDSpeakerBeamExtractor
@pytest.mark.parametrize("input_dim", [5])
@pytest.mark.parametrize("layer", [4])
@pytest.mark.parametrize("stack", [2])
@pytest.mark.parametrize("bottleneck_dim", [5])
@pytest.mark.parametrize("hidden_dim", [10])
@pytest.mark.parametrize("skip_dim", [5, None])
@pytest.mark.parametrize("kernel", [3])
@pytest.mark.parametrize("causal", [True, False])
@pytest.mark.parametrize("norm_type", ["BN", "gLN", "cLN"])
@pytest.mark.parametrize("pre_nonlinear", ["linear", "prelu"])
@pytest.mark.parametrize("nonlinear", ["relu", "sigmoid", "tanh"])
@pytest.mark.parametrize("i_adapt_layer", [3])
@pytest.mark.parametrize("adapt_layer_type", ["mul", "concat", "muladd"])
@pytest.mark.parametrize("adapt_enroll_dim", [5])
@pytest.mark.parametrize("use_spk_emb", [True, False])
def test_td_speakerbeam_forward_backward(
input_dim,
layer,
stack,
bottleneck_dim,
hidden_dim,
skip_dim,
kernel,
causal,
norm_type,
pre_nonlinear,
nonlinear,
i_adapt_layer,
adapt_layer_type,
adapt_enroll_dim,
use_spk_emb,
):
if adapt_layer_type == "muladd":
adapt_enroll_dim = adapt_enroll_dim * 2
spk_emb_dim = 10
model = TDSpeakerBeamExtractor(
input_dim=input_dim,
layer=layer,
stack=stack,
bottleneck_dim=bottleneck_dim,
hidden_dim=hidden_dim,
skip_dim=skip_dim,
kernel=kernel,
causal=causal,
norm_type=norm_type,
pre_nonlinear=pre_nonlinear,
nonlinear=nonlinear,
i_adapt_layer=i_adapt_layer,
adapt_layer_type=adapt_layer_type,
adapt_enroll_dim=adapt_enroll_dim,
use_spk_emb=use_spk_emb,
spk_emb_dim=spk_emb_dim,
)
model.train()
x = torch.rand(2, 10, input_dim)
x_lens = torch.tensor([10, 8], dtype=torch.long)
if use_spk_emb:
enroll = torch.rand(2, spk_emb_dim)
enroll_lens = torch.tensor([1, 1])
else:
enroll = torch.rand(2, 20, input_dim)
enroll_lens = torch.tensor([20, 18])
masked, flens, others = model(
x, ilens=x_lens, input_aux=enroll, ilens_aux=enroll_lens, suffix_tag="_spk1"
)
masked.abs().mean().backward()
def test_td_speakerbeam_invalid_type():
with pytest.raises(ValueError):
TDSpeakerBeamExtractor(
input_dim=10,
nonlinear="fff",
)
| 2,465 | 28.710843 | 84 | py |
espnet | espnet-master/test/espnet2/enh/decoder/test_conv_decoder.py | import pytest
import torch
from espnet2.enh.decoder.conv_decoder import ConvDecoder
from espnet2.enh.encoder.conv_encoder import ConvEncoder
@pytest.mark.parametrize("channel", [64])
@pytest.mark.parametrize("kernel_size", [10, 20])
@pytest.mark.parametrize("stride", [5, 10])
def test_ConvEncoder_backward(channel, kernel_size, stride):
decoder = ConvDecoder(
channel=channel,
kernel_size=kernel_size,
stride=stride,
)
x = torch.rand(2, 200, channel)
x_lens = torch.tensor(
[199 * stride + kernel_size, 199 * stride + kernel_size], dtype=torch.long
)
y, flens = decoder(x, x_lens)
y.sum().backward()
@pytest.mark.parametrize("channel", [64])
@pytest.mark.parametrize("kernel_size", [10, 20])
@pytest.mark.parametrize("stride", [5, 10])
def test_conv_dec_streaming(channel, kernel_size, stride):
input_audio = torch.randn((1, 100))
ilens = torch.LongTensor([100])
encoder = ConvEncoder(kernel_size=kernel_size, stride=stride, channel=channel)
decoder = ConvDecoder(kernel_size=kernel_size, stride=stride, channel=channel)
frames, flens = encoder(input_audio, ilens)
wav, ilens = decoder(frames, ilens)
splited = encoder.streaming_frame(input_audio)
sframes = [encoder.forward_streaming(s) for s in splited]
swavs = [decoder.forward_streaming(s) for s in sframes]
merged = decoder.streaming_merge(swavs, ilens)
sframes = torch.cat(sframes, dim=1)
torch.testing.assert_allclose(sframes, frames)
torch.testing.assert_allclose(wav, merged)
| 1,559 | 31.5 | 82 | py |
espnet | espnet-master/test/espnet2/enh/decoder/test_stft_decoder.py | import pytest
import torch
import torch_complex
from packaging.version import parse as V
from torch_complex import ComplexTensor
from espnet2.enh.decoder.stft_decoder import STFTDecoder
from espnet2.enh.encoder.stft_encoder import STFTEncoder
is_torch_1_12_1_plus = V(torch.__version__) >= V("1.12.1")
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
@pytest.mark.parametrize("n_fft", [512])
@pytest.mark.parametrize("win_length", [512])
@pytest.mark.parametrize("hop_length", [128])
@pytest.mark.parametrize("window", ["hann"])
@pytest.mark.parametrize("center", [True])
@pytest.mark.parametrize("normalized", [True, False])
@pytest.mark.parametrize("onesided", [True, False])
def test_STFTDecoder_backward(
n_fft, win_length, hop_length, window, center, normalized, onesided
):
decoder = STFTDecoder(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
window=window,
center=center,
normalized=normalized,
onesided=onesided,
)
real = torch.rand(2, 300, n_fft // 2 + 1 if onesided else n_fft, requires_grad=True)
imag = torch.rand(2, 300, n_fft // 2 + 1 if onesided else n_fft, requires_grad=True)
x = ComplexTensor(real, imag)
x_lens = torch.tensor([300 * hop_length, 295 * hop_length], dtype=torch.long)
y, ilens = decoder(x, x_lens)
y.sum().backward()
@pytest.mark.parametrize("n_fft", [512])
@pytest.mark.parametrize("win_length", [512])
@pytest.mark.parametrize("hop_length", [128])
@pytest.mark.parametrize("window", ["hann"])
@pytest.mark.parametrize("center", [True])
@pytest.mark.parametrize("normalized", [True, False])
@pytest.mark.parametrize("onesided", [True, False])
def test_STFTDecoder_invalid_type(
n_fft, win_length, hop_length, window, center, normalized, onesided
):
decoder = STFTDecoder(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
window=window,
center=center,
normalized=normalized,
onesided=onesided,
)
with pytest.raises(TypeError):
real = torch.rand(
2, 300, n_fft // 2 + 1 if onesided else n_fft, requires_grad=True
)
x_lens = torch.tensor([300 * hop_length, 295 * hop_length], dtype=torch.long)
y, ilens = decoder(real, x_lens)
@pytest.mark.parametrize("n_fft", [512])
@pytest.mark.parametrize("win_length", [512, 400])
@pytest.mark.parametrize("hop_length", [128, 256])
@pytest.mark.parametrize("onesided", [True, False])
def test_stft_enc_dec_streaming(n_fft, win_length, hop_length, onesided):
input_audio = torch.randn((1, 16000))
ilens = torch.LongTensor([16000])
encoder = STFTEncoder(
n_fft=n_fft, win_length=win_length, hop_length=hop_length, onesided=onesided
)
decoder = STFTDecoder(
n_fft=n_fft, win_length=win_length, hop_length=hop_length, onesided=onesided
)
frames, flens = encoder(input_audio, ilens)
wav, ilens = decoder(frames, ilens)
splited = encoder.streaming_frame(input_audio)
sframes = [encoder.forward_streaming(s) for s in splited]
swavs = [decoder.forward_streaming(s) for s in sframes]
merged = decoder.streaming_merge(swavs, ilens)
if not (is_torch_1_9_plus and encoder.use_builtin_complex):
sframes = torch_complex.cat(sframes, dim=1)
else:
sframes = torch.cat(sframes, dim=1)
torch.testing.assert_close(sframes.real, frames.real)
torch.testing.assert_close(sframes.imag, frames.imag)
torch.testing.assert_close(wav, input_audio)
torch.testing.assert_close(wav, merged)
@pytest.mark.skipif(not is_torch_1_12_1_plus, reason="torch.complex32 is used")
@pytest.mark.parametrize("n_fft", [512])
@pytest.mark.parametrize("win_length", [512])
@pytest.mark.parametrize("hop_length", [128])
@pytest.mark.parametrize("window", ["hann"])
@pytest.mark.parametrize("center", [True])
@pytest.mark.parametrize("normalized", [True, False])
@pytest.mark.parametrize("onesided", [True, False])
def test_STFTDecoder_complex32_dtype(
n_fft, win_length, hop_length, window, center, normalized, onesided
):
decoder = STFTDecoder(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
window=window,
center=center,
normalized=normalized,
onesided=onesided,
)
x = torch.rand(
2,
300,
n_fft // 2 + 1 if onesided else n_fft,
dtype=torch.complex32,
requires_grad=True,
)
x_lens = torch.tensor([300 * hop_length, 295 * hop_length], dtype=torch.long)
y, ilens = decoder(x, x_lens)
(y.real.pow(2) + y.imag.pow(2)).sum().backward()
| 4,652 | 33.984962 | 88 | py |
espnet | espnet-master/test/espnet2/torch_utils/test_pytorch_version.py | from espnet2.torch_utils.pytorch_version import pytorch_cudnn_version
def test_pytorch_cudnn_version():
print(pytorch_cudnn_version())
| 141 | 22.666667 | 69 | py |
espnet | espnet-master/test/espnet2/torch_utils/test_set_all_random_seed.py | from espnet2.torch_utils.set_all_random_seed import set_all_random_seed
def test_set_all_random_seed():
set_all_random_seed(0)
| 133 | 21.333333 | 71 | py |
espnet | espnet-master/test/espnet2/torch_utils/test_add_gradient_noise.py | import torch
from espnet2.torch_utils.add_gradient_noise import add_gradient_noise
def test_add_gradient_noise():
linear = torch.nn.Linear(1, 1)
linear(torch.rand(1, 1)).sum().backward()
add_gradient_noise(linear, 100)
| 234 | 22.5 | 69 | py |
espnet | espnet-master/test/espnet2/torch_utils/test_model_summary.py | import torch
from espnet2.torch_utils.model_summary import model_summary
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.l1 = torch.nn.Linear(1000, 1000)
self.l2 = torch.nn.Linear(1000, 1000)
self.l3 = torch.nn.Linear(1000, 1000)
def test_model_summary():
print(model_summary(Model()))
| 357 | 21.375 | 59 | py |
espnet | espnet-master/test/espnet2/torch_utils/test_initialize.py | import pytest
import torch
from espnet2.torch_utils.initialize import initialize
initialize_types = {}
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(2, 2, 3)
self.l1 = torch.nn.Linear(2, 2)
self.rnn_cell = torch.nn.LSTMCell(2, 2)
self.rnn = torch.nn.LSTM(2, 2)
self.emb = torch.nn.Embedding(1, 1)
self.norm = torch.nn.LayerNorm(1)
class Model2(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv3d(2, 2, 3)
@pytest.mark.parametrize(
"init",
[
"chainer",
"xavier_uniform",
"xavier_normal",
"kaiming_normal",
"kaiming_uniform",
"dummy",
],
)
def test_initialize(init):
model = Model()
if init == "dummy":
with pytest.raises(ValueError):
initialize(model, init)
else:
initialize(model, init)
def test_5dim():
model = Model2()
with pytest.raises(NotImplementedError):
initialize(model, "chainer")
| 1,083 | 20.68 | 53 | py |
espnet | espnet-master/test/espnet2/torch_utils/test_load_pretrained_model.py | import numpy as np
import torch
from espnet2.torch_utils.load_pretrained_model import load_pretrained_model
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer1 = torch.nn.Linear(1, 1)
self.layer2 = torch.nn.Linear(2, 2)
def test_load_pretrained_model_all(tmp_path):
model_src = Model()
torch.save(model_src.state_dict(), tmp_path / "model.pth")
model_dst = Model()
load_pretrained_model(f"{tmp_path}/model.pth", model_dst, "cpu")
for k in model_dst.state_dict():
np.testing.assert_array_equal(
model_dst.state_dict()[k].numpy(), model_src.state_dict()[k].numpy()
)
def test_load_pretrained_model_layer1_layer1(tmp_path):
model_src = Model()
torch.save(model_src.state_dict(), tmp_path / "model.pth")
model_dst = Model()
load_pretrained_model(f"{tmp_path}/model.pth:layer1:layer1", model_dst, "cpu")
for k in model_dst.state_dict():
if k.startswith("layer1"):
np.testing.assert_array_equal(
model_dst.state_dict()[k].numpy(), model_src.state_dict()[k].numpy()
)
def test_load_pretrained_model_exclude(tmp_path):
model_src = Model()
torch.save(model_src.state_dict(), tmp_path / "model.pth")
model_dst = Model()
load_pretrained_model(f"{tmp_path}/model.pth:::layer2", model_dst, "cpu")
for k in model_dst.state_dict():
if not k.startswith("layer2"):
np.testing.assert_array_equal(
model_dst.state_dict()[k].numpy(), model_src.state_dict()[k].numpy()
)
def test_load_pretrained_model_layer1(tmp_path):
model_src = Model()
torch.save(model_src.layer1.state_dict(), tmp_path / "layer1.pth")
model_dst = Model()
load_pretrained_model(f"{tmp_path}/layer1.pth::layer1", model_dst, "cpu")
for k in model_dst.state_dict():
if k.startswith("layer1"):
np.testing.assert_array_equal(
model_dst.state_dict()[k].numpy(), model_src.state_dict()[k].numpy()
)
| 2,070 | 29.910448 | 84 | py |
espnet | espnet-master/test/espnet2/torch_utils/test_forward_adaptor.py | import pytest
import torch
from espnet2.torch_utils.forward_adaptor import ForwardAdaptor
class Model(torch.nn.Module):
def func(self, x):
return x
def test_ForwardAdaptor():
model = Model()
x = torch.randn(2, 2)
assert (ForwardAdaptor(model, "func")(x) == x).all()
def test_ForwardAdaptor_no_func():
model = Model()
with pytest.raises(ValueError):
ForwardAdaptor(model, "aa")
| 424 | 18.318182 | 62 | py |
espnet | espnet-master/test/espnet2/torch_utils/test_device_funcs.py | import dataclasses
from typing import NamedTuple
import pytest
import torch
from espnet2.torch_utils.device_funcs import force_gatherable, to_device
x = torch.tensor(10)
@dataclasses.dataclass(frozen=True)
class Data:
x: torch.Tensor
class Named(NamedTuple):
x: torch.Tensor
@pytest.mark.parametrize(
"obj",
[x, x.numpy(), (x,), [x], {"x": [x]}, {x}, Data(x), Named(x), 23, 3.0, None],
)
def test_to_device(obj):
to_device(obj, "cpu")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Require cuda")
def test_to_device_cuda():
obj = {"a": [torch.tensor([0, 1])]}
obj2 = to_device(obj, "cuda")
assert obj2["a"][0].device == torch.device("cuda:0")
@pytest.mark.parametrize(
"obj",
[x, x.numpy(), (x,), [x], {"x": x}, {x}, Data(x), Named(x), 23, 3.0, None],
)
def test_force_gatherable(obj):
force_gatherable(obj, "cpu")
def test_force_gatherable_0dim_to_1dim():
obj = {"a": [3]}
obj2 = force_gatherable(obj, "cpu")
assert obj2["a"][0].shape == (1,)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Require cuda")
def test_force_gatherable_cuda():
obj = {"a": [torch.tensor([0, 1])]}
obj2 = force_gatherable(obj, "cuda")
assert obj2["a"][0].device == torch.device("cuda:0")
| 1,275 | 22.2 | 81 | py |
espnet | espnet-master/test/espnet2/main_funcs/test_average_nbest_models.py | import pytest
import torch
from espnet2.main_funcs.average_nbest_models import average_nbest_models
from espnet2.train.reporter import Reporter
@pytest.fixture
def reporter():
_reporter = Reporter()
_reporter.set_epoch(1)
with _reporter.observe("valid") as sub:
sub.register({"acc": 0.4})
sub.next()
_reporter.set_epoch(2)
with _reporter.observe("valid") as sub:
sub.register({"acc": 0.5})
sub.next()
_reporter.set_epoch(3)
with _reporter.observe("valid") as sub:
sub.register({"acc": 0.6})
sub.next()
return _reporter
@pytest.fixture
def output_dir(tmp_path):
_output_dir = tmp_path / "out"
_output_dir.mkdir()
model = torch.nn.Sequential(
torch.nn.Conv2d(1, 1, 3),
torch.nn.BatchNorm2d(1),
torch.nn.Linear(1, 1),
torch.nn.LSTM(1, 1),
torch.nn.LayerNorm(1),
)
torch.save(model.state_dict(), _output_dir / "1epoch.pth")
torch.save(model.state_dict(), _output_dir / "2epoch.pth")
torch.save(model.state_dict(), _output_dir / "3epoch.pth")
return _output_dir
@pytest.mark.parametrize("nbest", [0, 1, 2, 3, 4, [1, 2, 3, 5], []])
def test_average_nbest_models(reporter, output_dir, nbest):
# Repeat twice to check the case of existing files.
for _ in range(2):
average_nbest_models(
reporter=reporter,
output_dir=output_dir,
best_model_criterion=[("valid", "acc", "max")],
nbest=nbest,
)
@pytest.mark.parametrize("nbest", [0, 1, 2, 3, 4, [1, 2, 3, 5], []])
def test_average_nbest_models_0epoch_reporter(output_dir, nbest):
# Repeat twice to check the case of existing files.
for _ in range(2):
average_nbest_models(
reporter=Reporter(),
output_dir=output_dir,
best_model_criterion=[("valid", "acc", "max")],
nbest=nbest,
)
| 1,927 | 27.352941 | 72 | py |
espnet | espnet-master/test/espnet2/main_funcs/test_calculate_all_attentions.py | from collections import defaultdict
import numpy as np
import pytest
import torch
from espnet2.asr.decoder.rnn_decoder import RNNDecoder
from espnet2.main_funcs.calculate_all_attentions import calculate_all_attentions
from espnet2.train.abs_espnet_model import AbsESPnetModel
from espnet.nets.pytorch_backend.rnn.attentions import AttAdd
from espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention
class Dummy(AbsESPnetModel):
def __init__(self):
super().__init__()
self.att1 = MultiHeadedAttention(2, 10, 0.0)
self.att2 = AttAdd(10, 20, 15)
self.desired = defaultdict(list)
def forward(self, x, x_lengths, y, y_lengths):
self.att1(y, x, x, None)
_, a2 = self.att2(x, x_lengths, y, None)
self.desired["att1"].append(self.att1.attn.squeeze(0))
self.desired["att2"].append(a2)
def collect_feats(self, **batch: torch.Tensor):
return {}
class Dummy2(AbsESPnetModel):
def __init__(self, atype):
super().__init__()
self.decoder = RNNDecoder(50, 128, att_conf=dict(atype=atype))
def forward(self, x, x_lengths, y, y_lengths):
self.decoder(x, x_lengths, y, y_lengths)
def collect_feats(self, **batch: torch.Tensor):
return {}
def test_calculate_all_attentions_MultiHeadedAttention():
model = Dummy()
bs = 2
batch = {
"x": torch.randn(bs, 3, 10),
"x_lengths": torch.tensor([3, 2], dtype=torch.long),
"y": torch.randn(bs, 2, 10),
"y_lengths": torch.tensor([4, 4], dtype=torch.long),
}
t = calculate_all_attentions(model, batch)
print(t)
for k in model.desired:
for i in range(bs):
np.testing.assert_array_equal(t[k][i].numpy(), model.desired[k][i].numpy())
@pytest.mark.parametrize(
"atype",
[
"noatt",
"dot",
"add",
"location",
"location2d",
"location_recurrent",
"coverage",
"coverage_location",
"multi_head_dot",
"multi_head_add",
"multi_head_loc",
"multi_head_multi_res_loc",
],
)
def test_calculate_all_attentions(atype):
model = Dummy2(atype)
bs = 2
batch = {
"x": torch.randn(bs, 20, 128),
"x_lengths": torch.tensor([20, 17], dtype=torch.long),
"y": torch.randint(0, 50, [bs, 7]),
"y_lengths": torch.tensor([7, 5], dtype=torch.long),
}
t = calculate_all_attentions(model, batch)
for k, o in t.items():
for i, att in enumerate(o):
print(att.shape)
if att.dim() == 2:
att = att[None]
for a in att:
assert a.shape == (batch["y_lengths"][i], batch["x_lengths"][i])
| 2,743 | 28.505376 | 87 | py |
espnet | espnet-master/test/espnet2/slu/test_transcript_espnet_model.py | import pytest
import torch
from packaging.version import parse as V
from espnet2.asr.ctc import CTC
from espnet2.asr.decoder.transformer_decoder import TransformerDecoder
from espnet2.asr.encoder.conformer_encoder import ConformerEncoder
from espnet2.asr.encoder.transformer_encoder import TransformerEncoder
from espnet2.slu.espnet_model import ESPnetSLUModel
from espnet2.slu.postdecoder.hugging_face_transformers_postdecoder import (
HuggingFaceTransformersPostDecoder,
)
from espnet2.slu.postencoder.conformer_postencoder import ConformerPostEncoder
from espnet2.slu.postencoder.transformer_postencoder import TransformerPostEncoder
is_torch_1_8_plus = V(torch.__version__) >= V("1.8.0")
@pytest.mark.parametrize("encoder_arch", [TransformerEncoder])
@pytest.mark.execution_timeout(50)
def test_slu_testing(encoder_arch):
if not is_torch_1_8_plus:
return
vocab_size = 5
enc_out = 20
encoder = encoder_arch(
20,
output_size=enc_out,
linear_units=4,
num_blocks=2,
)
decoder = TransformerDecoder(
vocab_size,
enc_out,
linear_units=4,
num_blocks=2,
)
ctc = CTC(odim=vocab_size, encoder_output_size=enc_out)
model = ESPnetSLUModel(
vocab_size,
token_list=["<blank>", "<unk>", "a", "i", "<eos>"],
transcript_token_list=["<blank>", "<unk>", "a", "i", "<eos>"],
frontend=None,
specaug=None,
normalize=None,
preencoder=None,
encoder=encoder,
postencoder=None,
decoder=decoder,
ctc=ctc,
deliberationencoder=None,
postdecoder=None,
joint_network=None,
report_wer=True,
)
model.training = False
inputs = dict(
speech=torch.randn(2, 10, 20, requires_grad=True),
speech_lengths=torch.tensor([10, 8], dtype=torch.long),
text=torch.randint(2, 4, [2, 4], dtype=torch.long),
text_lengths=torch.tensor([4, 3], dtype=torch.long),
transcript=torch.randint(2, 4, [2, 4], dtype=torch.long),
transcript_lengths=torch.tensor([4, 3], dtype=torch.long),
)
loss, *_ = model(**inputs)
@pytest.mark.parametrize("encoder_arch", [TransformerEncoder, ConformerEncoder])
@pytest.mark.parametrize(
"encoder_del", [None, TransformerPostEncoder, ConformerPostEncoder]
)
@pytest.mark.parametrize("decoder_post", [None, HuggingFaceTransformersPostDecoder])
@pytest.mark.execution_timeout(50)
def test_slu_training(encoder_arch, encoder_del, decoder_post):
if not is_torch_1_8_plus:
return
vocab_size = 5
enc_out = 20
encoder = encoder_arch(
20,
output_size=enc_out,
linear_units=4,
num_blocks=2,
)
decoder = TransformerDecoder(
vocab_size,
enc_out,
linear_units=4,
num_blocks=2,
)
if encoder_del is not None:
del_encoder = encoder_del(
20,
output_size=enc_out,
linear_units=4,
)
else:
del_encoder = None
if decoder_post is not None:
post_decoder = decoder_post(
"bert-base-uncased",
output_size=enc_out,
)
else:
post_decoder = None
ctc = CTC(odim=vocab_size, encoder_output_size=enc_out)
model = ESPnetSLUModel(
vocab_size,
token_list=["<blank>", "<unk>", "a", "i", "<eos>"],
transcript_token_list=["<blank>", "<unk>", "a", "i", "<eos>"],
frontend=None,
specaug=None,
normalize=None,
preencoder=None,
encoder=encoder,
postencoder=None,
decoder=decoder,
ctc=ctc,
deliberationencoder=del_encoder,
postdecoder=post_decoder,
joint_network=None,
)
inputs = dict(
speech=torch.randn(2, 10, 20, requires_grad=True),
speech_lengths=torch.tensor([10, 8], dtype=torch.long),
text=torch.randint(2, 4, [2, 4], dtype=torch.long),
text_lengths=torch.tensor([4, 3], dtype=torch.long),
transcript=torch.randint(2, 4, [2, 4], dtype=torch.long),
transcript_lengths=torch.tensor([4, 3], dtype=torch.long),
)
loss, *_ = model(**inputs)
loss.backward()
@pytest.mark.parametrize("encoder_arch", [TransformerEncoder])
@pytest.mark.parametrize("encoder_post", [TransformerPostEncoder])
@pytest.mark.execution_timeout(100)
def test_slu_training_nlu_postencoder(encoder_arch, encoder_post):
if not is_torch_1_8_plus:
return
vocab_size = 5
enc_out = 20
encoder = encoder_arch(
20,
output_size=enc_out,
linear_units=4,
num_blocks=2,
)
decoder = TransformerDecoder(
vocab_size,
enc_out,
linear_units=4,
num_blocks=2,
)
if encoder_post is not None:
post_encoder = encoder_post(
20,
output_size=enc_out,
linear_units=4,
)
else:
post_encoder = None
ctc = CTC(odim=vocab_size, encoder_output_size=enc_out)
model = ESPnetSLUModel(
vocab_size,
token_list=["<blank>", "<unk>", "a", "i", "<eos>"],
transcript_token_list=["<blank>", "<unk>", "a", "i", "<eos>"],
frontend=None,
specaug=None,
normalize=None,
preencoder=None,
encoder=encoder,
postencoder=post_encoder,
decoder=decoder,
ctc=ctc,
deliberationencoder=None,
postdecoder=None,
joint_network=None,
)
inputs = dict(
speech=torch.randn(2, 10, 20, requires_grad=True),
speech_lengths=torch.tensor([10, 8], dtype=torch.long),
text=torch.randint(2, 4, [2, 4], dtype=torch.long),
text_lengths=torch.tensor([4, 3], dtype=torch.long),
transcript=torch.randint(2, 4, [2, 4], dtype=torch.long),
transcript_lengths=torch.tensor([4, 3], dtype=torch.long),
)
loss, *_ = model(**inputs)
loss.backward()
@pytest.mark.parametrize("encoder_arch", [TransformerEncoder])
@pytest.mark.parametrize("ctc_weight", [0.0, 1.0])
@pytest.mark.execution_timeout(50)
def test_slu_no_ctc_training(encoder_arch, ctc_weight):
if not is_torch_1_8_plus:
return
vocab_size = 5
enc_out = 20
encoder = encoder_arch(
20,
output_size=enc_out,
linear_units=4,
num_blocks=2,
)
decoder = TransformerDecoder(
vocab_size,
enc_out,
linear_units=4,
num_blocks=2,
)
ctc = CTC(odim=vocab_size, encoder_output_size=enc_out)
model = ESPnetSLUModel(
vocab_size,
token_list=["<blank>", "<unk>", "a", "i", "<eos>"],
transcript_token_list=["<blank>", "<unk>", "a", "i", "<eos>"],
frontend=None,
specaug=None,
normalize=None,
preencoder=None,
encoder=encoder,
postencoder=None,
decoder=decoder,
ctc=ctc,
ctc_weight=ctc_weight,
deliberationencoder=None,
postdecoder=None,
joint_network=None,
)
inputs = dict(
speech=torch.randn(2, 10, 20, requires_grad=True),
speech_lengths=torch.tensor([10, 8], dtype=torch.long),
text=torch.randint(2, 4, [2, 4], dtype=torch.long),
text_lengths=torch.tensor([4, 3], dtype=torch.long),
transcript=torch.randint(2, 4, [2, 4], dtype=torch.long),
transcript_lengths=torch.tensor([4, 3], dtype=torch.long),
)
loss, *_ = model(**inputs)
loss.backward()
@pytest.mark.parametrize("extract_feats", [True, False])
def test_collect_feats(extract_feats):
if not is_torch_1_8_plus:
return
vocab_size = 5
enc_out = 20
encoder = TransformerEncoder(
20,
output_size=enc_out,
linear_units=4,
num_blocks=2,
)
decoder = TransformerDecoder(
vocab_size,
enc_out,
linear_units=4,
num_blocks=2,
)
ctc = CTC(odim=vocab_size, encoder_output_size=enc_out)
model = ESPnetSLUModel(
vocab_size,
token_list=["<blank>", "<unk>", "a", "i", "<eos>"],
transcript_token_list=["<blank>", "<unk>", "a", "i", "<eos>"],
frontend=None,
specaug=None,
normalize=None,
preencoder=None,
encoder=encoder,
postencoder=None,
decoder=decoder,
ctc=ctc,
deliberationencoder=None,
postdecoder=None,
joint_network=None,
)
inputs = dict(
speech=torch.randn(2, 10, 20, requires_grad=True),
speech_lengths=torch.tensor([10, 8], dtype=torch.long),
text=torch.randint(2, 4, [2, 4], dtype=torch.long),
text_lengths=torch.tensor([4, 3], dtype=torch.long),
transcript=torch.randint(2, 4, [2, 4], dtype=torch.long),
transcript_lengths=torch.tensor([4, 3], dtype=torch.long),
)
model.extract_feats_in_collect_stats = extract_feats
model.collect_feats(**inputs)
| 8,945 | 28.919732 | 84 | py |
espnet | espnet-master/test/espnet2/slu/postencoder/test_conformer_encoder.py | import pytest
import torch
from espnet2.slu.postencoder.conformer_postencoder import ConformerPostEncoder
@pytest.mark.parametrize("input_layer", ["linear"])
@pytest.mark.parametrize("positionwise_layer_type", ["conv1d", "conv1d-linear"])
@pytest.mark.parametrize(
"rel_pos_type, pos_enc_layer_type, selfattention_layer_type",
[
("legacy", "abs_pos", "selfattn"),
("latest", "rel_pos", "rel_selfattn"),
("legacy", "rel_pos", "rel_selfattn"),
("legacy", "legacy_rel_pos", "legacy_rel_selfattn"),
],
)
def test_encoder_forward_backward(
input_layer,
positionwise_layer_type,
rel_pos_type,
pos_enc_layer_type,
selfattention_layer_type,
):
encoder = ConformerPostEncoder(
20,
output_size=2,
attention_heads=2,
linear_units=4,
num_blocks=2,
input_layer=input_layer,
macaron_style=False,
rel_pos_type=rel_pos_type,
pos_enc_layer_type=pos_enc_layer_type,
selfattention_layer_type=selfattention_layer_type,
activation_type="swish",
use_cnn_module=True,
cnn_module_kernel=3,
positionwise_layer_type=positionwise_layer_type,
)
x = torch.randn(2, 32, 2, requires_grad=True)
x_lens = torch.LongTensor([32, 28])
print(x.shape)
print(x_lens.shape)
y, _ = encoder(x, x_lens)
y.sum().backward()
def test_encoder_invalid_layer_type():
with pytest.raises(ValueError):
ConformerPostEncoder(20, rel_pos_type="dummy")
with pytest.raises(ValueError):
ConformerPostEncoder(20, pos_enc_layer_type="dummy")
with pytest.raises(ValueError):
ConformerPostEncoder(
20, pos_enc_layer_type="abc_pos", selfattention_layer_type="dummy"
)
def test_encoder_invalid_rel_pos_combination():
with pytest.raises(AssertionError):
ConformerPostEncoder(
20,
rel_pos_type="latest",
pos_enc_layer_type="legacy_rel_pos",
selfattention_layer_type="legacy_rel_sselfattn",
)
with pytest.raises(AssertionError):
ConformerPostEncoder(
20,
pos_enc_layer_type="rel_pos",
selfattention_layer_type="legacy_rel_sselfattn",
)
with pytest.raises(AssertionError):
ConformerPostEncoder(
20,
pos_enc_layer_type="legacy_rel_pos",
selfattention_layer_type="rel_sselfattn",
)
def test_encoder_output_size():
encoder = ConformerPostEncoder(20, output_size=256)
assert encoder.output_size() == 256
def test_encoder_invalid_type():
with pytest.raises(ValueError):
ConformerPostEncoder(20, input_layer="fff")
| 2,716 | 29.188889 | 80 | py |
espnet | espnet-master/test/espnet2/slu/postencoder/test_transformer_encoder.py | import pytest
import torch
from espnet2.slu.postencoder.transformer_postencoder import TransformerPostEncoder
@pytest.mark.parametrize("input_layer", ["linear", "None"])
@pytest.mark.parametrize("positionwise_layer_type", ["conv1d", "conv1d-linear"])
def test_Encoder_forward_backward(
input_layer,
positionwise_layer_type,
):
encoder = TransformerPostEncoder(
20,
output_size=40,
input_layer=input_layer,
positionwise_layer_type=positionwise_layer_type,
)
x = torch.randn(2, 10, 20, requires_grad=True)
x_lens = torch.LongTensor([10, 8])
y, _ = encoder(x, x_lens)
y.sum().backward()
def test_Encoder_output_size():
encoder = TransformerPostEncoder(20, output_size=256)
assert encoder.output_size() == 256
def test_Encoder_invalid_type():
with pytest.raises(ValueError):
TransformerPostEncoder(20, input_layer="fff")
| 908 | 26.545455 | 82 | py |
espnet | espnet-master/test/espnet2/slu/postdecoder/test_hugging_face_transformers_postdecoder.py | import pytest
import torch
from packaging.version import parse as V
from espnet2.slu.postdecoder.hugging_face_transformers_postdecoder import (
HuggingFaceTransformersPostDecoder,
)
is_torch_1_8_plus = V(torch.__version__) >= V("1.8.0")
@pytest.mark.execution_timeout(50)
def test_transformers_forward():
if not is_torch_1_8_plus:
return
postdecoder = HuggingFaceTransformersPostDecoder("bert-base-cased", 400)
max_length = 128
transcript_data = ["increase the heating in the bathroom"]
(
transcript_input_id_features,
transcript_input_mask_features,
transcript_segment_ids_feature,
transcript_position_ids_feature,
input_id_length,
) = postdecoder.convert_examples_to_features(transcript_data, max_length)
y = postdecoder(
torch.LongTensor(transcript_input_id_features),
torch.LongTensor(transcript_input_mask_features),
torch.LongTensor(transcript_segment_ids_feature),
torch.LongTensor(transcript_position_ids_feature),
)
odim = postdecoder.output_size()
assert y.shape == torch.Size([1, max_length, odim])
@pytest.mark.execution_timeout(30)
def test_convert_examples_to_features():
if not is_torch_1_8_plus:
return
postdecoder = HuggingFaceTransformersPostDecoder("bert-base-cased", 400)
max_length = 128
transcript_data = ["increase the heating in the bathroom"]
(
transcript_input_id_features,
transcript_input_mask_features,
transcript_segment_ids_feature,
transcript_position_ids_feature,
input_id_length,
) = postdecoder.convert_examples_to_features(transcript_data, max_length)
assert torch.LongTensor(transcript_input_id_features).shape == torch.Size(
[1, max_length]
)
assert torch.LongTensor(transcript_input_mask_features).shape == torch.Size(
[1, max_length]
)
assert torch.LongTensor(transcript_segment_ids_feature).shape == torch.Size(
[1, max_length]
)
assert torch.LongTensor(transcript_position_ids_feature).shape == torch.Size(
[1, max_length]
)
assert torch.LongTensor(input_id_length).shape == torch.Size([1])
| 2,203 | 33.984127 | 81 | py |
espnet | espnet-master/test/espnet2/bin/test_slu_inference.py | import string
from argparse import ArgumentParser
from distutils.version import LooseVersion
from pathlib import Path
import numpy as np
import pytest
import torch
from espnet2.bin.slu_inference import Speech2Understand, get_parser, main
from espnet2.tasks.lm import LMTask
from espnet2.tasks.slu import SLUTask
from espnet.nets.beam_search import Hypothesis
is_torch_1_5_plus = LooseVersion(torch.__version__) >= LooseVersion("1.5.0")
def test_get_parser():
assert isinstance(get_parser(), ArgumentParser)
def test_main():
with pytest.raises(SystemExit):
main()
@pytest.fixture()
def token_list(tmp_path: Path):
with (tmp_path / "tokens.txt").open("w") as f:
f.write("<blank>\n")
for c in string.ascii_letters:
f.write(f"{c}\n")
f.write("<unk>\n")
f.write("<sos/eos>\n")
return tmp_path / "tokens.txt"
@pytest.fixture()
def slu_config_file(tmp_path: Path, token_list):
# Write default configuration file
SLUTask.main(
cmd=[
"--dry_run",
"true",
"--output_dir",
str(tmp_path / "slu"),
"--token_list",
str(token_list),
"--transcript_token_list",
str(token_list),
"--token_type",
"char",
]
)
return tmp_path / "slu" / "config.yaml"
@pytest.mark.execution_timeout(50)
def test_Speech2Understand(slu_config_file):
speech2understand = Speech2Understand(slu_train_config=slu_config_file, beam_size=1)
speech = np.random.randn(100000)
results = speech2understand(speech)
for text, token, token_int, hyp in results:
assert isinstance(text, str)
assert isinstance(token[0], str)
assert isinstance(token_int[0], int)
assert isinstance(hyp, Hypothesis)
@pytest.mark.execution_timeout(50)
def test_Speech2Understand_transcript(slu_config_file):
speech2understand = Speech2Understand(slu_train_config=slu_config_file)
speech = np.random.randn(100000)
transcript = torch.randint(2, 4, [1, 4], dtype=torch.long)
results = speech2understand(speech, transcript)
for text, token, token_int, hyp in results:
assert isinstance(text, str)
assert isinstance(token[0], str)
assert isinstance(token_int[0], int)
assert isinstance(hyp, Hypothesis)
@pytest.fixture()
def lm_config_file(tmp_path: Path, token_list):
# Write default configuration file
LMTask.main(
cmd=[
"--dry_run",
"true",
"--output_dir",
str(tmp_path / "lm"),
"--token_list",
str(token_list),
"--token_type",
"char",
]
)
return tmp_path / "lm" / "config.yaml"
@pytest.mark.execution_timeout(10)
@pytest.mark.parametrize(
"use_lm, token_type",
[
(False, "char"),
(True, "char"),
(False, "bpe"),
(False, None),
],
)
def test_Speech2Understand_lm(use_lm, token_type, slu_config_file, lm_config_file):
speech2understand = Speech2Understand(
slu_train_config=slu_config_file,
lm_train_config=lm_config_file if use_lm else None,
beam_size=1,
token_type=token_type,
)
speech = np.random.randn(100000)
results = speech2understand(speech)
for text, token, token_int, hyp in results:
assert text is None or isinstance(text, str)
assert isinstance(token[0], str)
assert isinstance(token_int[0], int)
assert isinstance(hyp, Hypothesis)
@pytest.mark.execution_timeout(5)
def test_Speech2Understand_quantized(slu_config_file, lm_config_file):
speech2understand = Speech2Understand(
slu_train_config=slu_config_file,
lm_train_config=lm_config_file,
beam_size=1,
quantize_asr_model=True,
quantize_lm=True,
)
speech = np.random.randn(100000)
results = speech2understand(speech)
for text, token, token_int, hyp in results:
assert isinstance(text, str)
assert isinstance(token[0], str)
assert isinstance(token_int[0], int)
assert isinstance(hyp, Hypothesis)
| 4,171 | 27.972222 | 88 | py |
espnet | espnet-master/test/espnet2/bin/test_enh_inference.py | import string
from argparse import ArgumentParser
from pathlib import Path
import pytest
import torch
import yaml
from espnet2.bin.enh_inference import SeparateSpeech, get_parser, main
from espnet2.enh.encoder.stft_encoder import STFTEncoder
from espnet2.tasks.enh import EnhancementTask
from espnet2.tasks.enh_s2t import EnhS2TTask
from espnet2.utils.get_default_kwargs import get_default_kwargs
from espnet2.utils.yaml_no_alias_safe_dump import yaml_no_alias_safe_dump
def test_get_parser():
assert isinstance(get_parser(), ArgumentParser)
def test_main():
with pytest.raises(SystemExit):
main()
@pytest.fixture()
def config_file(tmp_path: Path):
# Write default configuration file
EnhancementTask.main(
cmd=[
"--dry_run",
"true",
"--output_dir",
str(tmp_path / "enh"),
]
)
with open(tmp_path / "enh" / "config.yaml", "r") as f:
args = yaml.safe_load(f)
if args["encoder"] == "stft" and len(args["encoder_conf"]) == 0:
args["encoder_conf"] = get_default_kwargs(STFTEncoder)
with open(tmp_path / "enh" / "config.yaml", "w") as f:
yaml_no_alias_safe_dump(args, f, indent=4, sort_keys=False)
return tmp_path / "enh" / "config.yaml"
@pytest.mark.execution_timeout(5)
@pytest.mark.parametrize("batch_size", [1, 2])
@pytest.mark.parametrize(
"input_size, segment_size, hop_size, normalize_segment_scale",
[(16000, None, None, False), (35000, 2.4, 0.8, False), (35000, 2.4, 0.8, True)],
)
def test_SeparateSpeech(
config_file, batch_size, input_size, segment_size, hop_size, normalize_segment_scale
):
separate_speech = SeparateSpeech(
train_config=config_file,
segment_size=segment_size,
hop_size=hop_size,
normalize_segment_scale=normalize_segment_scale,
)
wav = torch.rand(batch_size, input_size)
separate_speech(wav, fs=8000)
@pytest.fixture()
def enh_inference_config(tmp_path: Path):
# Write default configuration file
args = {
"encoder": "stft",
"encoder_conf": {"n_fft": 64, "hop_length": 32},
"decoder": "stft",
"decoder_conf": {"n_fft": 64, "hop_length": 32},
}
(tmp_path / "enh").mkdir(parents=True, exist_ok=True)
with open(tmp_path / "enh" / "inference.yaml", "w") as f:
yaml_no_alias_safe_dump(args, f, indent=4, sort_keys=False)
return tmp_path / "enh" / "inference.yaml"
@pytest.fixture()
def invalid_enh_inference_config(tmp_path: Path):
# Write default configuration file
args = {
"encoder": "stft",
"encoder_conf": {"n_fft": 64, "hop_length": 32},
"xxx": "invalid",
}
(tmp_path / "enh").mkdir(parents=True, exist_ok=True)
with open(tmp_path / "enh" / "invalid_inference.yaml", "w") as f:
yaml_no_alias_safe_dump(args, f, indent=4, sort_keys=False)
return tmp_path / "enh" / "invalid_inference.yaml"
@pytest.mark.execution_timeout(5)
def test_SeparateSpeech_with_inference_config(config_file, enh_inference_config):
separate_speech = SeparateSpeech(
train_config=config_file, inference_config=enh_inference_config
)
wav = torch.rand(2, 16000)
separate_speech(wav, fs=8000)
def test_SeparateSpeech_invalid_inference_config(
enh_inference_config, invalid_enh_inference_config
):
with pytest.raises(AssertionError):
SeparateSpeech(
train_config=None, model_file=None, inference_config=enh_inference_config
)
with pytest.raises(AssertionError):
SeparateSpeech(train_config=None, inference_config=invalid_enh_inference_config)
@pytest.fixture()
def token_list(tmp_path: Path):
with (tmp_path / "tokens.txt").open("w") as f:
f.write("<blank>\n")
for c in string.ascii_letters:
f.write(f"{c}\n")
f.write("<unk>\n")
f.write("<sos/eos>\n")
return tmp_path / "tokens.txt"
@pytest.fixture()
def enh_s2t_config_file(tmp_path: Path, token_list):
# Write default configuration file
EnhS2TTask.main(
cmd=[
"--dry_run",
"true",
"--output_dir",
str(tmp_path / "enh_s2t"),
"--token_list",
str(token_list),
"--token_type",
"char",
"--asr_decoder",
"rnn",
]
)
with open(tmp_path / "enh_s2t" / "config.yaml", "r") as f:
args = yaml.safe_load(f)
if args["enh_encoder"] == "stft" and len(args["enh_encoder_conf"]) == 0:
args["enh_encoder_conf"] = get_default_kwargs(STFTEncoder)
with open(tmp_path / "enh_s2t" / "config.yaml", "w") as f:
yaml_no_alias_safe_dump(args, f, indent=4, sort_keys=False)
return tmp_path / "enh_s2t" / "config.yaml"
@pytest.mark.execution_timeout(5)
@pytest.mark.parametrize("batch_size", [1, 2])
@pytest.mark.parametrize(
"input_size, segment_size, hop_size, normalize_segment_scale",
[(16000, None, None, False), (35000, 2.4, 0.8, False), (35000, 2.4, 0.8, True)],
)
def test_enh_s2t_SeparateSpeech(
enh_s2t_config_file,
batch_size,
input_size,
segment_size,
hop_size,
normalize_segment_scale,
):
separate_speech = SeparateSpeech(
train_config=enh_s2t_config_file,
segment_size=segment_size,
hop_size=hop_size,
normalize_segment_scale=normalize_segment_scale,
enh_s2t_task=True,
)
wav = torch.rand(batch_size, input_size)
separate_speech(wav, fs=8000)
@pytest.fixture()
def enh_s2t_inference_config(tmp_path: Path):
# Write default configuration file
args = {
"enh_encoder": "stft",
"enh_encoder_conf": {"n_fft": 64, "hop_length": 32},
"enh_decoder": "stft",
"enh_decoder_conf": {"n_fft": 64, "hop_length": 32},
}
(tmp_path / "enh_s2t").mkdir(parents=True, exist_ok=True)
with open(tmp_path / "enh_s2t" / "inference.yaml", "w") as f:
yaml_no_alias_safe_dump(args, f, indent=4, sort_keys=False)
return tmp_path / "enh_s2t" / "inference.yaml"
@pytest.fixture()
def invalid_enh_s2t_inference_config(tmp_path: Path):
# Write default configuration file
args = {
"enh_encoder": "stft",
"enh_encoder_conf": {"n_fft": 64, "hop_length": 32},
"xxx": "invalid",
}
(tmp_path / "enh_s2t").mkdir(parents=True, exist_ok=True)
with open(tmp_path / "enh_s2t" / "invalid_inference.yaml", "w") as f:
yaml_no_alias_safe_dump(args, f, indent=4, sort_keys=False)
return tmp_path / "enh_s2t" / "invalid_inference.yaml"
@pytest.mark.execution_timeout(5)
def test_enh_s2t_SeparateSpeech_with_inference_config(
enh_s2t_config_file, enh_s2t_inference_config
):
separate_speech = SeparateSpeech(
train_config=enh_s2t_config_file,
inference_config=enh_s2t_inference_config,
enh_s2t_task=True,
)
wav = torch.rand(2, 16000)
separate_speech(wav, fs=8000)
def test_enh_s2t_SeparateSpeech_invalid_inference_config(
enh_s2t_inference_config, invalid_enh_s2t_inference_config
):
with pytest.raises(AssertionError):
SeparateSpeech(
train_config=None,
model_file=None,
inference_config=enh_s2t_inference_config,
enh_s2t_task=True,
)
with pytest.raises(AssertionError):
SeparateSpeech(
train_config=None,
inference_config=invalid_enh_s2t_inference_config,
enh_s2t_task=True,
)
| 7,502 | 29.75 | 88 | py |
espnet | espnet-master/test/espnet2/bin/test_enh_inference_streaming.py | from argparse import ArgumentParser
from pathlib import Path
import pytest
import torch
import yaml
from espnet2.bin.enh_inference_streaming import (
SeparateSpeechStreaming,
get_parser,
main,
)
from espnet2.tasks.enh import EnhancementTask
from espnet2.utils.yaml_no_alias_safe_dump import yaml_no_alias_safe_dump
def test_get_parser():
assert isinstance(get_parser(), ArgumentParser)
def test_main():
with pytest.raises(SystemExit):
main()
@pytest.fixture()
def config_file(tmp_path: Path):
# Write default configuration file
EnhancementTask.main(
cmd=[
"--dry_run",
"true",
"--output_dir",
str(tmp_path / "enh"),
]
)
with open(tmp_path / "enh" / "config.yaml", "r") as f:
args = yaml.safe_load(f)
args.update(
{
"encoder": "stft",
"encoder_conf": {"n_fft": 512, "hop_length": 256},
"decoder": "stft",
"decoder_conf": {"n_fft": 512, "hop_length": 256},
"separator": "skim",
"separator_conf": {
"causal": True,
"seg_overlap": False,
"num_spk": 2,
"unit": 128,
"layer": 2,
},
}
)
with open(tmp_path / "enh" / "config.yaml", "w") as f:
yaml_no_alias_safe_dump(args, f, indent=4, sort_keys=False)
return tmp_path / "enh" / "config.yaml"
@pytest.mark.execution_timeout(10)
@pytest.mark.parametrize("batch_size", [1, 2])
@pytest.mark.parametrize("input_size", [8000, 16000])
def test_SeparateSpeech(
config_file,
batch_size,
input_size,
):
separate_speech = SeparateSpeechStreaming(
train_config=config_file,
)
wav = torch.rand(batch_size, input_size)
ilens = torch.LongTensor([input_size])
speech_sim_chunks = separate_speech.frame(wav)
output_chunks = [[] for ii in range(separate_speech.num_spk)]
for chunk in speech_sim_chunks:
output = separate_speech(chunk)
for channel in range(separate_speech.num_spk):
output_chunks[channel].append(output[channel])
separate_speech.reset()
waves = [separate_speech.merge(chunks, ilens) for chunks in output_chunks]
| 2,274 | 24.852273 | 78 | py |
espnet | espnet-master/test/espnet2/bin/test_diar_inference.py | from argparse import ArgumentParser
from pathlib import Path
import pytest
import torch
from espnet2.bin.diar_inference import DiarizeSpeech, get_parser, main
from espnet2.tasks.diar import DiarizationTask
from espnet2.tasks.enh_s2t import EnhS2TTask
def test_get_parser():
assert isinstance(get_parser(), ArgumentParser)
def test_main():
with pytest.raises(SystemExit):
main()
@pytest.fixture()
def diar_config_file(tmp_path: Path):
# Write default configuration file
DiarizationTask.main(
cmd=[
"--dry_run",
"true",
"--output_dir",
str(tmp_path),
"--num_spk",
"2",
]
)
return tmp_path / "config.yaml"
@pytest.mark.execution_timeout(5)
@pytest.mark.parametrize("batch_size", [1])
@pytest.mark.parametrize(
"input_size, segment_size, normalize_segment_scale, num_spk",
[(16000, None, False, 2), (35000, 2.4, False, 2), (34000, 2.4, True, 2)],
)
def test_DiarizeSpeech(
diar_config_file,
batch_size,
input_size,
segment_size,
normalize_segment_scale,
num_spk,
):
diarize_speech = DiarizeSpeech(
train_config=diar_config_file,
segment_size=segment_size,
normalize_segment_scale=normalize_segment_scale,
num_spk=num_spk,
)
wav = torch.rand(batch_size, input_size)
diarize_speech(wav, fs=8000)
@pytest.fixture()
def diar_config_file2(tmp_path: Path):
# Write default configuration file
DiarizationTask.main(
cmd=[
"--dry_run",
"true",
"--output_dir",
str(tmp_path),
"--attractor",
"rnn",
"--attractor_conf",
"unit=256",
"--num_spk",
"2",
]
)
return tmp_path / "config.yaml"
@pytest.mark.execution_timeout(5)
@pytest.mark.parametrize("batch_size", [1])
@pytest.mark.parametrize(
"input_size, segment_size, normalize_segment_scale",
[
(16000, None, False),
(35000, 2.4, False),
(34000, 2.4, True),
],
)
@pytest.mark.parametrize("num_spk", [None, 2])
def test_DiarizeSpeech2(
diar_config_file2,
batch_size,
input_size,
segment_size,
normalize_segment_scale,
num_spk,
):
diarize_speech = DiarizeSpeech(
train_config=diar_config_file2,
segment_size=segment_size,
normalize_segment_scale=normalize_segment_scale,
num_spk=num_spk,
)
wav = torch.rand(batch_size, input_size)
diarize_speech(wav, fs=8000)
@pytest.fixture()
def diarsep_config_file(tmp_path: Path):
# Write default configuration file
EnhS2TTask.main(
cmd=[
"--dry_run",
"true",
"--output_dir",
str(tmp_path),
"--diar_num_spk",
"2",
"--diar_frontend",
"default",
"--subtask_series",
"enh",
"diar",
"--diar_input_size",
"128",
"--enh_separator",
"tcn_nomask",
"--enh_separator_conf",
"bottleneck_dim=128",
"--enh_mask_module",
"multi_mask",
"--enh_mask_module_conf",
"bottleneck_dim=128",
]
)
return tmp_path / "config.yaml"
@pytest.mark.execution_timeout(5)
@pytest.mark.parametrize("batch_size", [1])
@pytest.mark.parametrize(
"input_size, segment_size, hop_size, normalize_segment_scale, num_spk",
[
(16000, None, None, False, 2),
(35000, 2.4, 1.2, False, 2),
(34000, 2.4, 1.2, True, 2),
],
)
@pytest.mark.parametrize("multiply_diar_result", [True, False])
def test_DiarSepSpeech(
diarsep_config_file,
batch_size,
input_size,
segment_size,
hop_size,
normalize_segment_scale,
num_spk,
multiply_diar_result,
):
diarize_speech = DiarizeSpeech(
train_config=diarsep_config_file,
segment_size=segment_size,
hop_size=hop_size,
normalize_segment_scale=normalize_segment_scale,
num_spk=num_spk,
enh_s2t_task=True,
normalize_output_wav=True,
multiply_diar_result=multiply_diar_result,
)
wav = torch.rand(batch_size, input_size)
diarize_speech(wav, fs=8000)
| 4,311 | 23.781609 | 77 | py |
espnet | espnet-master/test/espnet2/bin/test_asr_transducer_inference.py | import string
from argparse import ArgumentParser
from distutils.version import LooseVersion
from pathlib import Path
from typing import List
import numpy as np
import pytest
import torch
from espnet2.asr_transducer.beam_search_transducer import Hypothesis
from espnet2.bin.asr_transducer_inference import Speech2Text, get_parser, main
from espnet2.tasks.asr_transducer import ASRTransducerTask
from espnet2.tasks.lm import LMTask
is_torch_1_5_plus = LooseVersion(torch.__version__) >= LooseVersion("1.5.0")
def test_get_parser():
assert isinstance(get_parser(), ArgumentParser)
def test_main():
with pytest.raises(SystemExit):
main()
@pytest.fixture()
def output_dir(tmp_path: Path):
return tmp_path / "asr"
@pytest.fixture()
def token_list(tmp_path: Path):
with (tmp_path / "tokens.txt").open("w") as f:
f.write("<blank>\n")
for c in string.ascii_letters:
f.write(f"{c}\n")
f.write("<unk>\n")
return tmp_path / "tokens.txt"
@pytest.fixture()
def asr_config_file(tmp_path: Path, token_list):
enc_body_conf = (
"{'body_conf': [{'block_type': 'conformer',"
" 'hidden_size': 4, 'linear_size': 4,"
" 'conv_mod_kernel_size': 3}]}"
)
decoder_conf = "{'hidden_size': 4}"
joint_net_conf = "{'joint_space_size': 4}"
ASRTransducerTask.main(
cmd=[
"--dry_run",
"true",
"--output_dir",
str(tmp_path / "asr"),
"--token_list",
str(token_list),
"--token_type",
"char",
"--encoder_conf",
enc_body_conf,
"--decoder",
"rnn",
"--decoder_conf",
decoder_conf,
"--joint_network_conf",
joint_net_conf,
]
)
return tmp_path / "asr" / "config.yaml"
@pytest.fixture(
params=[
"conv2d_branchformer",
"vgg_branchformer",
"conv2d_conformer",
"vgg_conformer",
"conv2d_ebranchformer",
"vgg_ebranchformer",
]
)
def asr_stream_config_file(request, tmp_path: Path, token_list):
main_type = request.param.split("_")[1]
enc_body_conf = (
"{'body_conf': [{'block_type': '%s', 'hidden_size': 4, "
"'linear_size': 4, 'conv_mod_kernel_size': 3},"
"{'block_type': 'conv1d', 'kernel_size': 2, 'output_size': 2, "
"'batch_norm': True, 'relu': True}], "
"'main_conf': {'dynamic_chunk_training': True',"
"'short_chunk_size': 1, 'num_left_chunks': 1}}"
) % (main_type)
if request.param.startswith("vgg"):
enc_body_conf = enc_body_conf[:-1] + (",'input_conf': {'vgg_like': True}}")
decoder_conf = "{'hidden_size': 4}"
joint_net_conf = "{'joint_space_size': 4}"
ASRTransducerTask.main(
cmd=[
"--dry_run",
"true",
"--output_dir",
str(tmp_path / "asr_stream"),
"--token_list",
str(token_list),
"--token_type",
"char",
"--encoder_conf",
enc_body_conf,
"--decoder",
"rnn",
"--decoder_conf",
decoder_conf,
"--joint_network_conf",
joint_net_conf,
]
)
return tmp_path / "asr_stream" / "config.yaml"
@pytest.fixture()
def lm_config_file(tmp_path: Path, token_list):
lm_conf = "{'nlayers': 1, 'unit': 8}"
LMTask.main(
cmd=[
"--dry_run",
"true",
"--output_dir",
str(tmp_path / "lm"),
"--token_list",
str(token_list),
"--token_type",
"char",
"--lm_conf",
lm_conf,
]
)
return tmp_path / "lm" / "config.yaml"
@pytest.mark.execution_timeout(10)
@pytest.mark.parametrize(
"use_lm, token_type",
[
(False, "char"),
(True, "char"),
(False, "bpe"),
(False, None),
],
)
def test_Speech2Text(use_lm, token_type, asr_config_file, lm_config_file):
speech2text = Speech2Text(
asr_train_config=asr_config_file,
lm_train_config=lm_config_file if use_lm else None,
beam_size=1,
token_type=token_type,
)
speech = np.random.randn(10000)
hyps = speech2text(speech)
results = speech2text.hypotheses_to_results(hyps)
for text, token, token_int, hyp in results:
assert text is None or isinstance(text, str)
assert isinstance(token, List)
assert isinstance(token_int, List)
assert isinstance(hyp, Hypothesis)
@pytest.mark.execution_timeout(10)
@pytest.mark.parametrize(
"use_lm, token_type, beam_search_config, decoding_window, left_context",
[
(False, "char", {"search_type": "default"}, 160, 0),
(True, "char", {"search_type": "default"}, 160, 1),
(False, "bpe", {"search_type": "default"}, 160, 0),
(False, None, {"search_type": "default"}, 160, 1),
(False, "char", {"search_type": "default"}, 320, 0),
(False, "char", {"search_type": "tsd"}, 160, 1),
(False, "char", {"search_type": "maes"}, 160, 1),
],
)
def test_streaming_Speech2Text(
use_lm,
token_type,
beam_search_config,
decoding_window,
left_context,
asr_stream_config_file,
lm_config_file,
):
speech2text = Speech2Text(
asr_train_config=asr_stream_config_file,
lm_train_config=lm_config_file if use_lm else None,
beam_size=2,
beam_search_config=beam_search_config,
token_type=token_type,
streaming=True,
decoding_window=decoding_window,
left_context=left_context,
)
speech = np.random.randn(10000)
decoding_samples = speech2text.audio_processor.decoding_samples
decoding_steps = len(speech) // decoding_samples
for i in range(0, decoding_steps + 1, 1):
_start = i * decoding_samples
if i == decoding_steps:
hyps = speech2text.streaming_decode(
speech[i * decoding_samples : len(speech)], is_final=True
)
else:
speech2text.streaming_decode(
speech[(i * decoding_samples) : _start + decoding_samples - 1],
is_final=False,
)
results = speech2text.hypotheses_to_results(hyps)
for text, token, token_int, hyp in results:
assert text is None or isinstance(text, str)
assert isinstance(token, List)
assert isinstance(token_int, List)
assert isinstance(hyp, Hypothesis)
# TO DO: upload mini_an4 pre-trained model to huggingface for additional tests.
def test_pretrained_speech2Text(asr_config_file):
speech2text = Speech2Text.from_pretrained(
model_tag=None,
asr_train_config=asr_config_file,
beam_size=1,
)
speech = np.random.randn(100000)
_ = speech2text(speech)
@pytest.mark.parametrize(
"quantize_params",
[
{},
{"quantize_modules": ["LSTM", "Linear"]},
{"quantize_dtype": "float16"},
],
)
def test_Speech2Text_quantization(asr_config_file, lm_config_file, quantize_params):
if not is_torch_1_5_plus and quantize_params.get("quantize_dtype") == "float16":
with pytest.raises(ValueError):
speech2text = Speech2Text(
asr_train_config=asr_config_file,
lm_train_config=None,
beam_size=1,
token_type="char",
quantize_asr_model=True,
**quantize_params,
)
else:
speech2text = Speech2Text(
asr_train_config=asr_config_file,
lm_train_config=None,
beam_size=1,
token_type="char",
quantize_asr_model=True,
**quantize_params,
)
speech = np.random.randn(100000)
_ = speech2text(speech)
def test_Speech2Text_quantization_wrong_module(asr_config_file, lm_config_file):
with pytest.raises(ValueError):
_ = Speech2Text(
asr_train_config=asr_config_file,
lm_train_config=None,
beam_size=1,
token_type="char",
quantize_asr_model=True,
quantize_modules=["foo"],
)
| 8,279 | 27.453608 | 84 | py |
espnet | espnet-master/test/espnet2/bin/test_enh_tse_inference.py | from argparse import ArgumentParser
from pathlib import Path
import pytest
import torch
import yaml
from espnet2.bin.enh_tse_inference import SeparateSpeech, get_parser, main
from espnet2.enh.encoder.stft_encoder import STFTEncoder
from espnet2.tasks.enh_tse import TargetSpeakerExtractionTask
from espnet2.utils.get_default_kwargs import get_default_kwargs
from espnet2.utils.yaml_no_alias_safe_dump import yaml_no_alias_safe_dump
def test_get_parser():
assert isinstance(get_parser(), ArgumentParser)
def test_main():
with pytest.raises(SystemExit):
main()
@pytest.fixture()
def config_file(tmp_path: Path):
# Write default configuration file
TargetSpeakerExtractionTask.main(
cmd=[
"--dry_run",
"true",
"--output_dir",
str(tmp_path / "enh"),
]
)
with open(tmp_path / "enh" / "config.yaml", "r") as f:
args = yaml.safe_load(f)
if args["encoder"] == "stft" and len(args["encoder_conf"]) == 0:
args["encoder_conf"] = get_default_kwargs(STFTEncoder)
with open(tmp_path / "enh" / "config.yaml", "w") as f:
yaml_no_alias_safe_dump(args, f, indent=4, sort_keys=False)
return tmp_path / "enh" / "config.yaml"
@pytest.mark.execution_timeout(5)
@pytest.mark.parametrize("batch_size", [1, 2])
@pytest.mark.parametrize(
"input_size, segment_size, hop_size, normalize_segment_scale",
[(16000, None, None, False), (35000, 2.4, 0.8, False), (35000, 2.4, 0.8, True)],
)
def test_SeparateSpeech(
config_file, batch_size, input_size, segment_size, hop_size, normalize_segment_scale
):
separate_speech = SeparateSpeech(
train_config=config_file,
segment_size=segment_size,
hop_size=hop_size,
normalize_segment_scale=normalize_segment_scale,
)
wav = torch.rand(batch_size, input_size)
separate_speech(wav, fs=8000, enroll_ref1=torch.rand(batch_size, 16000))
@pytest.fixture()
def enh_inference_config(tmp_path: Path):
# Write default configuration file
args = {
"encoder": "stft",
"encoder_conf": {"n_fft": 64, "hop_length": 32},
"decoder": "stft",
"decoder_conf": {"n_fft": 64, "hop_length": 32},
}
(tmp_path / "enh").mkdir(parents=True, exist_ok=True)
with open(tmp_path / "enh" / "inference.yaml", "w") as f:
yaml_no_alias_safe_dump(args, f, indent=4, sort_keys=False)
return tmp_path / "enh" / "inference.yaml"
@pytest.fixture()
def invalid_enh_inference_config(tmp_path: Path):
# Write default configuration file
args = {
"encoder": "stft",
"encoder_conf": {"n_fft": 64, "hop_length": 32},
"xxx": "invalid",
}
(tmp_path / "enh").mkdir(parents=True, exist_ok=True)
with open(tmp_path / "enh" / "invalid_inference.yaml", "w") as f:
yaml_no_alias_safe_dump(args, f, indent=4, sort_keys=False)
return tmp_path / "enh" / "invalid_inference.yaml"
@pytest.mark.execution_timeout(5)
def test_SeparateSpeech_with_inference_config(config_file, enh_inference_config):
separate_speech = SeparateSpeech(
train_config=config_file, inference_config=enh_inference_config
)
wav = torch.rand(2, 16000)
separate_speech(wav, fs=8000, enroll_ref1=torch.rand(2, 8000))
def test_SeparateSpeech_invalid_inference_config(
enh_inference_config, invalid_enh_inference_config
):
with pytest.raises(AssertionError):
SeparateSpeech(
train_config=None, model_file=None, inference_config=enh_inference_config
)
with pytest.raises(AssertionError):
SeparateSpeech(train_config=None, inference_config=invalid_enh_inference_config)
| 3,691 | 31.104348 | 88 | py |
espnet | espnet-master/test/espnet2/hubert/test_hubert_loss.py | import pytest
import torch
from espnet2.asr.encoder.hubert_encoder import ( # noqa: H301
FairseqHubertPretrainEncoder,
)
from espnet2.hubert.hubert_loss import HubertPretrainLoss # noqa: H301
pytest.importorskip("fairseq")
@pytest.fixture
def hubert_args():
encoder = FairseqHubertPretrainEncoder(
output_size=32,
linear_units=32,
attention_heads=4,
num_blocks=2,
hubert_dict="../../../test_utils/hubert_test.txt",
)
bs = 2
n_cls = 10
logit_m_list = [torch.randn(bs, n_cls + 1)]
logit_u_list = [torch.randn(bs, n_cls + 1)]
padding_mask = torch.tensor([[False for _ in range(20)]])
features_pen = torch.tensor(0.0)
enc_outputs = {
"logit_m_list": logit_m_list,
"logit_u_list": logit_u_list,
"padding_mask": padding_mask,
"features_pen": features_pen,
}
return encoder.encoder, enc_outputs
def test_hubert_loss_forward_backward(hubert_args):
hloss = HubertPretrainLoss()
hloss(*hubert_args)
| 1,026 | 24.675 | 71 | py |
espnet | espnet-master/test/espnet2/hubert/test_hubert_espnet_model.py | import pytest
import torch
from packaging.version import parse as V
from espnet2.asr.encoder.hubert_encoder import TorchAudioHuBERTPretrainEncoder
from espnet2.hubert.espnet_model import TorchAudioHubertPretrainModel
is_torch_1_12_1_plus = V(torch.__version__) >= V("1.12.1")
@pytest.mark.parametrize("finetuning", [False])
def test_forward_backward_finetuning_false(finetuning):
if not is_torch_1_12_1_plus:
return
encoder = TorchAudioHuBERTPretrainEncoder(
20,
extractor_conv_layer_config=[(3, 3, 2)],
encoder_pos_conv_kernel=16,
encoder_pos_conv_groups=4,
encoder_embed_dim=4,
encoder_num_layers=1,
encoder_num_heads=1,
encoder_ff_interm_features=4,
num_classes=5,
final_dim=10,
finetuning=finetuning,
)
model = TorchAudioHubertPretrainModel(
5,
token_list=["0", "1", "2", "3", "4"],
frontend=None,
specaug=None,
normalize=None,
preencoder=None,
encoder=encoder,
)
inputs = dict(
speech=torch.randn(2, 32, requires_grad=True),
speech_lengths=torch.tensor([32, 16], dtype=torch.long),
text=torch.randint(0, 5, [2, 15], dtype=torch.long),
text_lengths=torch.tensor([15, 7], dtype=torch.long),
)
loss, *_ = model(**inputs)
loss.backward()
| 1,366 | 27.479167 | 78 | py |
espnet | espnet-master/test/espnet2/diar/test_espnet_model.py | import pytest
import torch
from espnet2.asr.encoder.transformer_encoder import TransformerEncoder
from espnet2.asr.frontend.default import DefaultFrontend
from espnet2.diar.attractor.rnn_attractor import RnnAttractor
from espnet2.diar.decoder.linear_decoder import LinearDecoder
from espnet2.diar.espnet_model import ESPnetDiarizationModel
from espnet2.layers.label_aggregation import LabelAggregate
frontend = DefaultFrontend(
n_fft=32,
win_length=32,
hop_length=16,
n_mels=10,
)
encoder = TransformerEncoder(
input_size=10,
input_layer="linear",
num_blocks=1,
linear_units=32,
output_size=16,
attention_heads=2,
)
decoder = LinearDecoder(
num_spk=2,
encoder_output_size=encoder.output_size(),
)
rnn_attractor = RnnAttractor(unit=16, encoder_output_size=encoder.output_size())
label_aggregator = LabelAggregate(
win_length=32,
hop_length=16,
)
@pytest.mark.parametrize(
"frontend, encoder, decoder, label_aggregator",
[(frontend, encoder, decoder, label_aggregator)],
)
@pytest.mark.parametrize("training", [True, False])
@pytest.mark.parametrize("attractor", [rnn_attractor, None])
def test_single_channel_model(
label_aggregator,
frontend,
encoder,
decoder,
attractor,
training,
):
inputs = torch.randn(2, 300)
ilens = torch.LongTensor([300, 200])
spk_labels = torch.randint(high=2, size=(2, 300, 2))
diar_model = ESPnetDiarizationModel(
label_aggregator=label_aggregator,
attractor=attractor,
encoder=encoder,
decoder=decoder,
frontend=frontend,
specaug=None,
normalize=None,
)
if training:
diar_model.train()
else:
diar_model.eval()
kwargs = {
"speech": inputs,
"speech_lengths": ilens,
"spk_labels": spk_labels,
"spk_labels_lengths": ilens,
}
loss, stats, weight = diar_model(**kwargs)
| 1,934 | 23.493671 | 80 | py |
espnet | espnet-master/test/espnet2/diar/attractor/test_rnn_attractor.py | import pytest
import torch
from espnet2.diar.attractor.rnn_attractor import RnnAttractor
@pytest.mark.parametrize("encoder_output_size", [10])
@pytest.mark.parametrize("layer", [1])
@pytest.mark.parametrize("unit", [10])
@pytest.mark.parametrize("dropout", [0.1])
def test_rnn_attractor(encoder_output_size, layer, unit, dropout):
eda = RnnAttractor(
encoder_output_size=encoder_output_size,
layer=layer,
unit=unit,
dropout=dropout,
)
enc_input = torch.rand(5, 100, encoder_output_size)
ilens = torch.tensor([100, 100, 100, 100, 100])
dec_input = torch.zeros(5, 3, encoder_output_size)
attractor, att_prob = eda.forward(
enc_input=enc_input,
ilens=ilens,
dec_input=dec_input,
)
assert attractor.shape == (5, 3, encoder_output_size)
assert att_prob.shape == (5, 3, 1)
| 862 | 29.821429 | 66 | py |
espnet | espnet-master/test/espnet2/diar/decoder/test_linear_decoder.py | import pytest
import torch
from espnet2.diar.decoder.linear_decoder import LinearDecoder
@pytest.mark.parametrize("encoder_output_size", [10])
@pytest.mark.parametrize("num_spk", [2])
def test_linear_decoder(encoder_output_size, num_spk):
linear_decoder = LinearDecoder(
encoder_output_size=encoder_output_size, num_spk=num_spk
)
input = torch.rand(5, 100, encoder_output_size)
ilens = torch.tensor([100, 100, 100, 100, 100])
output = linear_decoder.forward(input=input, ilens=ilens)
assert output.shape == (5, 100, num_spk)
| 560 | 32 | 64 | py |
espnet | espnet-master/test/espnet2/layers/test_global_mvn.py | from pathlib import Path
import numpy as np
import pytest
import torch
from espnet2.layers.global_mvn import GlobalMVN
@pytest.fixture()
def stats_file(tmp_path: Path):
"""Kaldi like style"""
p = tmp_path / "stats.npy"
count = 10
np.random.seed(0)
x = np.random.randn(count, 80)
s = x.sum(0)
s = np.pad(s, [0, 1], mode="constant", constant_values=count)
s2 = (x**2).sum(0)
s2 = np.pad(s2, [0, 1], mode="constant", constant_values=0.0)
stats = np.stack([s, s2])
np.save(p, stats)
return p
@pytest.fixture()
def stats_file2(tmp_path: Path):
"""New style"""
p = tmp_path / "stats.npz"
count = 10
np.random.seed(0)
x = np.random.randn(count, 80)
s = x.sum(0)
s2 = (x**2).sum(0)
np.savez(p, sum=s, sum_square=s2, count=count)
return p
@pytest.mark.parametrize(
"norm_vars, norm_means",
[(True, True), (False, False), (True, False), (False, True)],
)
def test_repl(stats_file, norm_vars, norm_means):
layer = GlobalMVN(stats_file, norm_means=norm_means, norm_vars=norm_vars)
print(layer)
@pytest.mark.parametrize(
"norm_vars, norm_means",
[(True, True), (False, False), (True, False), (False, True)],
)
def test_backward_leaf_in(stats_file, norm_vars, norm_means):
layer = GlobalMVN(stats_file, norm_means=norm_means, norm_vars=norm_vars)
x = torch.randn(1, 2, 80, requires_grad=True)
y, _ = layer(x)
y.sum().backward()
@pytest.mark.parametrize(
"norm_vars, norm_means",
[(True, True), (False, False), (True, False), (False, True)],
)
def test_backward_not_leaf_in(stats_file, norm_vars, norm_means):
layer = GlobalMVN(stats_file, norm_means=norm_means, norm_vars=norm_vars)
x = torch.randn(2, 3, 80, requires_grad=True)
x = x + 2
y, _ = layer(x)
y.sum().backward()
@pytest.mark.parametrize(
"norm_vars, norm_means",
[(True, True), (False, False), (True, False), (False, True)],
)
def test_inverse_backwar_leaf_in(stats_file, norm_vars, norm_means):
layer = GlobalMVN(stats_file, norm_means=norm_means, norm_vars=norm_vars)
x = torch.randn(2, 3, 80, requires_grad=True)
y, _ = layer.inverse(x)
y.sum().backward()
@pytest.mark.parametrize(
"norm_vars, norm_means",
[(True, True), (False, False), (True, False), (False, True)],
)
def test_inverse_backwar_not_leaf_in(stats_file, norm_vars, norm_means):
layer = GlobalMVN(stats_file, norm_means=norm_means, norm_vars=norm_vars)
x = torch.randn(2, 3, 80, requires_grad=True)
x = x + 2
y, _ = layer.inverse(x)
@pytest.mark.parametrize(
"norm_vars, norm_means",
[(True, True), (False, False), (True, False), (False, True)],
)
def test_inverse_identity(stats_file, norm_vars, norm_means):
layer = GlobalMVN(stats_file, norm_means=norm_means, norm_vars=norm_vars)
x = torch.randn(2, 3, 80)
y, _ = layer(x)
x2, _ = layer.inverse(y)
np.testing.assert_allclose(x.numpy(), x2.numpy())
@pytest.mark.parametrize(
"norm_vars, norm_means",
[(True, True), (False, False), (True, False), (False, True)],
)
def test_new_style_stats_file(stats_file, stats_file2, norm_vars, norm_means):
layer = GlobalMVN(stats_file, norm_means=norm_means, norm_vars=norm_vars)
layer2 = GlobalMVN(stats_file2, norm_means=norm_means, norm_vars=norm_vars)
x = torch.randn(2, 3, 80)
y, _ = layer(x)
y2, _ = layer2(x)
np.testing.assert_allclose(y.numpy(), y2.numpy())
| 3,455 | 27.8 | 79 | py |
espnet | espnet-master/test/espnet2/layers/test_stft.py | import torch
from espnet2.layers.stft import Stft
def test_repr():
print(Stft())
def test_forward():
layer = Stft(win_length=4, hop_length=2, n_fft=4)
x = torch.randn(2, 30)
y, _ = layer(x)
assert y.shape == (2, 16, 3, 2)
y, ylen = layer(x, torch.tensor([30, 15], dtype=torch.long))
assert (ylen == torch.tensor((16, 8), dtype=torch.long)).all()
def test_backward_leaf_in():
layer = Stft()
x = torch.randn(2, 400, requires_grad=True)
y, _ = layer(x)
y.sum().backward()
def test_backward_not_leaf_in():
layer = Stft()
x = torch.randn(2, 400, requires_grad=True)
x = x + 2
y, _ = layer(x)
y.sum().backward()
def test_inverse():
layer = Stft()
x = torch.randn(2, 400, requires_grad=True)
y, _ = layer(x)
x_lengths = torch.IntTensor([400, 300])
raw, _ = layer.inverse(y, x_lengths)
raw, _ = layer.inverse(y)
def test_librosa_stft():
mkl_is_available = torch.backends.mkl.is_available()
if not mkl_is_available:
raise RuntimeError("MKL is not available.")
layer = Stft()
layer.eval()
x = torch.randn(2, 16000, device="cpu")
y_torch, _ = layer(x)
torch._C.has_mkl = False
y_librosa, _ = layer(x)
assert torch.allclose(y_torch, y_librosa, atol=7e-6)
torch._C.has_mkl = True
| 1,317 | 22.535714 | 66 | py |
espnet | espnet-master/test/espnet2/layers/test_utterance_mvn.py | import pytest
import torch
from espnet2.layers.utterance_mvn import UtteranceMVN
def test_repr():
print(UtteranceMVN())
@pytest.mark.parametrize(
"norm_vars, norm_means",
[(True, True), (False, False), (True, False), (False, True)],
)
def test_forward(norm_vars, norm_means):
layer = UtteranceMVN(norm_means=norm_means, norm_vars=norm_vars)
x = torch.randn(2, 10, 80)
y, _ = layer(x)
assert y.shape == (2, 10, 80)
y, ylen = layer(x, torch.tensor([10, 8], dtype=torch.long))
assert (ylen == torch.tensor((10, 8), dtype=torch.long)).all()
@pytest.mark.parametrize(
"norm_vars, norm_means",
[(True, True), (False, False), (True, False), (False, True)],
)
def test_backward_leaf_in(norm_vars, norm_means):
layer = UtteranceMVN(norm_means=norm_means, norm_vars=norm_vars)
x = torch.randn(2, 1000, requires_grad=True)
y, _ = layer(x)
y.sum().backward()
@pytest.mark.parametrize(
"norm_vars, norm_means",
[(True, True), (False, False), (True, False), (False, True)],
)
def test_backward_not_leaf_in(norm_vars, norm_means):
layer = UtteranceMVN(norm_means=norm_means, norm_vars=norm_vars)
x = torch.randn(2, 1000, requires_grad=True)
x = x + 2
y, _ = layer(x)
y.sum().backward()
| 1,268 | 27.2 | 68 | py |
espnet | espnet-master/test/espnet2/layers/test_sinc_filters.py | import torch
from espnet2.layers.sinc_conv import BarkScale, LogCompression, MelScale, SincConv
def test_log_compression():
activation = LogCompression()
x = torch.randn([5, 20, 1, 40], requires_grad=True)
y = activation(x)
assert x.shape == y.shape
def test_sinc_filters():
filters = SincConv(
in_channels=1, out_channels=128, kernel_size=101, stride=1, fs=16000
)
x = torch.randn([50, 1, 400], requires_grad=True)
y = filters(x)
assert y.shape == torch.Size([50, 128, 300])
# now test multichannel
filters = SincConv(
in_channels=2, out_channels=128, kernel_size=101, stride=1, fs=16000
)
x = torch.randn([50, 2, 400], requires_grad=True)
y = filters(x)
assert y.shape == torch.Size([50, 128, 300])
def test_sinc_filter_static_functions():
N = 400
x = torch.linspace(1, N, N)
print(f"no window function: {SincConv.none_window(x)}")
print(f"hamming window function: {SincConv.hamming_window(x)}")
SincConv.sinc(torch.tensor(50.0))
def test_sinc_filter_output_size():
sinc_conv = SincConv(in_channels=1, out_channels=128, kernel_size=101)
assert sinc_conv.get_odim(400) == 300
def test_bark_scale():
f = 16000.0
x = BarkScale.convert(f)
f_back = BarkScale.invert(x)
assert torch.abs(f_back - f) < 0.1
BarkScale.bank(128, 16000.0)
def test_mel_scale():
f = 16000.0
x = MelScale.convert(f)
f_back = MelScale.invert(x)
assert torch.abs(f_back - f) < 0.1
MelScale.bank(128, 16000.0)
| 1,536 | 26.446429 | 82 | py |
espnet | espnet-master/test/espnet2/layers/test_log_mel.py | import torch
from espnet2.layers.log_mel import LogMel
def test_repr():
print(LogMel())
def test_forward():
layer = LogMel(n_fft=16, n_mels=2)
x = torch.randn(2, 4, 9)
y, _ = layer(x)
assert y.shape == (2, 4, 2)
y, ylen = layer(x, torch.tensor([4, 2], dtype=torch.long))
assert (ylen == torch.tensor((4, 2), dtype=torch.long)).all()
def test_backward_leaf_in():
layer = LogMel(n_fft=16, n_mels=2)
x = torch.randn(2, 4, 9, requires_grad=True)
y, _ = layer(x)
y.sum().backward()
def test_backward_not_leaf_in():
layer = LogMel(n_fft=16, n_mels=2)
x = torch.randn(2, 4, 9, requires_grad=True)
x = x + 2
y, _ = layer(x)
y.sum().backward()
| 708 | 21.15625 | 65 | py |
espnet | espnet-master/test/espnet2/layers/test_label_aggregation.py | import pytest
import torch
from espnet2.layers.label_aggregation import LabelAggregate
@pytest.mark.parametrize(
("input_label", "expected_output"),
[
(torch.ones(10, 20000, 2), torch.ones(10, 157, 2)),
(torch.zeros(10, 20000, 2), torch.zeros(10, 157, 2)),
],
)
def test_LabelAggregate(input_label, expected_output):
label_aggregate = LabelAggregate(win_length=512, hop_length=128, center=True)
aggregated_label, _ = label_aggregate.forward(input=input_label)
assert torch.equal(aggregated_label, expected_output)
def test_LabelAggregate_repr():
label_aggregate = LabelAggregate(win_length=512, hop_length=128, center=True)
print(label_aggregate)
| 700 | 29.478261 | 81 | py |
espnet | espnet-master/test/espnet2/layers/test_mask_along_axis.py | import pytest
import torch
from espnet2.layers.mask_along_axis import MaskAlongAxis
@pytest.mark.parametrize("requires_grad", [False, True])
@pytest.mark.parametrize("replace_with_zero", [False, True])
@pytest.mark.parametrize("dim", ["freq", "time"])
def test_MaskAlongAxis(dim, replace_with_zero, requires_grad):
freq_mask = MaskAlongAxis(
dim=dim,
mask_width_range=30,
num_mask=2,
replace_with_zero=replace_with_zero,
)
x = torch.randn(2, 100, 80, requires_grad=requires_grad)
x_lens = torch.tensor([80, 78])
y, y_lens = freq_mask(x, x_lens)
assert all(l1 == l2 for l1, l2 in zip(x_lens, y_lens))
if requires_grad:
y.sum().backward()
@pytest.mark.parametrize("replace_with_zero", [False, True])
@pytest.mark.parametrize("dim", ["freq", "time"])
def test_MaskAlongAxis_repr(dim, replace_with_zero):
freq_mask = MaskAlongAxis(
dim=dim,
mask_width_range=30,
num_mask=2,
replace_with_zero=replace_with_zero,
)
print(freq_mask)
| 1,043 | 28.828571 | 62 | py |
espnet | espnet-master/test/espnet2/layers/test_time_warp.py | import pytest
import torch
from espnet2.layers.time_warp import TimeWarp
@pytest.mark.parametrize("x_lens", [None, torch.tensor([80, 78])])
@pytest.mark.parametrize("requires_grad", [False, True])
def test_TimeWarp(x_lens, requires_grad):
time_warp = TimeWarp(window=10)
x = torch.randn(2, 100, 80, requires_grad=requires_grad)
y, y_lens = time_warp(x, x_lens)
if x_lens is not None:
assert all(l1 == l2 for l1, l2 in zip(x_lens, y_lens))
if requires_grad:
y.sum().backward()
def test_TimeWarp_repr():
time_warp = TimeWarp(window=10)
print(time_warp)
| 600 | 26.318182 | 66 | py |
espnet | espnet-master/test/espnet2/train/test_distributed_utils.py | import argparse
import unittest.mock
from concurrent.futures.process import ProcessPoolExecutor
from concurrent.futures.thread import ThreadPoolExecutor
import pytest
from espnet2.tasks.abs_task import AbsTask
from espnet2.train.distributed_utils import (
DistributedOption,
free_port,
resolve_distributed_mode,
)
from espnet2.utils.build_dataclass import build_dataclass
@pytest.fixture()
def dist_init_method(tmp_path):
return f"file://{tmp_path}/init"
def _init(option):
option.init_options()
option.init_torch_distributed()
def test_default_work():
parser = AbsTask.get_parser()
args = parser.parse_args([])
resolve_distributed_mode(args)
option = build_dataclass(DistributedOption, args)
option.init_options()
option.init_torch_distributed()
def test_resolve_distributed_mode1(dist_init_method):
args = argparse.Namespace(
multiprocessing_distributed=False,
dist_world_size=2,
dist_rank=None,
ngpu=2,
local_rank=0,
dist_launcher=None,
dist_backend="nccl",
dist_init_method=dist_init_method,
dist_master_addr=None,
dist_master_port=None,
)
with pytest.raises(RuntimeError):
resolve_distributed_mode(args)
def test_resolve_distributed_mode2(dist_init_method):
args = argparse.Namespace(
multiprocessing_distributed=False,
dist_world_size=2,
dist_rank=0,
ngpu=2,
local_rank=None,
dist_launcher=None,
dist_backend="nccl",
dist_init_method=dist_init_method,
dist_master_addr=None,
dist_master_port=None,
)
with pytest.raises(RuntimeError):
resolve_distributed_mode(args)
def test_resolve_distributed_mode3(dist_init_method):
args = argparse.Namespace(
multiprocessing_distributed=False,
dist_world_size=None,
dist_rank=None,
ngpu=2,
local_rank=None,
dist_launcher=None,
dist_backend="nccl",
dist_init_method=dist_init_method,
dist_master_addr=None,
dist_master_port=None,
)
resolve_distributed_mode(args)
def test_resolve_distributed_mode4(dist_init_method):
args = argparse.Namespace(
multiprocessing_distributed=False,
dist_world_size=2,
dist_rank=0,
ngpu=2,
local_rank=1,
dist_launcher=None,
dist_backend="nccl",
dist_init_method=dist_init_method,
dist_master_addr=None,
dist_master_port=None,
)
resolve_distributed_mode(args)
assert args.distributed
def test_resolve_distributed_mode5(dist_init_method):
args = argparse.Namespace(
multiprocessing_distributed=False,
dist_world_size=2,
dist_rank=0,
ngpu=2,
local_rank=1,
dist_launcher="slurm",
dist_backend="nccl",
dist_init_method=dist_init_method,
dist_master_addr=None,
dist_master_port=None,
)
with pytest.raises(RuntimeError):
resolve_distributed_mode(args)
def test_resolve_distributed_mode6(dist_init_method):
args = argparse.Namespace(
multiprocessing_distributed=True,
dist_world_size=2,
dist_rank=None,
ngpu=1,
local_rank=None,
dist_launcher=None,
dist_backend="nccl",
dist_init_method=dist_init_method,
dist_master_addr=None,
dist_master_port=None,
)
with pytest.raises(RuntimeError):
resolve_distributed_mode(args)
def test_resolve_distributed_mode7(dist_init_method):
args = argparse.Namespace(
multiprocessing_distributed=True,
dist_world_size=2,
dist_rank=0,
ngpu=1,
local_rank=None,
dist_launcher=None,
dist_backend="nccl",
dist_init_method=dist_init_method,
dist_master_addr=None,
dist_master_port=None,
)
resolve_distributed_mode(args)
assert args.distributed
assert not args.multiprocessing_distributed
def test_resolve_distributed_mode9(dist_init_method):
args = argparse.Namespace(
multiprocessing_distributed=True,
dist_world_size=1,
dist_rank=None,
ngpu=2,
local_rank=None,
dist_launcher=None,
dist_backend="nccl",
dist_init_method=dist_init_method,
dist_master_addr=None,
dist_master_port=None,
)
resolve_distributed_mode(args)
assert args.distributed
assert args.multiprocessing_distributed
def test_resolve_distributed_mode10(dist_init_method):
args = argparse.Namespace(
multiprocessing_distributed=True,
dist_world_size=None,
dist_rank=None,
ngpu=1,
local_rank=None,
dist_launcher=None,
dist_backend="nccl",
dist_init_method=dist_init_method,
dist_master_addr=None,
dist_master_port=None,
)
resolve_distributed_mode(args)
assert not args.distributed
assert not args.multiprocessing_distributed
@pytest.mark.skipif(True, reason="sometimes hangup?")
def test_init_cpu(dist_init_method):
args = argparse.Namespace(
multiprocessing_distributed=True,
dist_world_size=2,
dist_rank=None,
ngpu=0,
local_rank=None,
dist_launcher=None,
distributed=True,
dist_backend="gloo",
dist_init_method=dist_init_method,
dist_master_addr=None,
dist_master_port=None,
)
args.dist_rank = 0
option = build_dataclass(DistributedOption, args)
args.dist_rank = 1
option2 = build_dataclass(DistributedOption, args)
with ProcessPoolExecutor(max_workers=2) as e:
fn = e.submit(_init, option)
fn2 = e.submit(_init, option2)
fn.result()
fn2.result()
@pytest.mark.skipif(True, reason="sometimes hangup?")
def test_init_cpu2():
args = argparse.Namespace(
multiprocessing_distributed=True,
dist_world_size=2,
dist_rank=None,
ngpu=0,
local_rank=None,
dist_launcher=None,
distributed=True,
dist_backend="gloo",
dist_init_method="env://",
dist_master_addr=None,
dist_master_port=free_port(),
)
args.dist_rank = 0
option = build_dataclass(DistributedOption, args)
args.dist_rank = 1
option2 = build_dataclass(DistributedOption, args)
with ProcessPoolExecutor(max_workers=2) as e:
fn = e.submit(_init, option)
fn2 = e.submit(_init, option2)
with pytest.raises(RuntimeError):
fn.result()
fn2.result()
@pytest.mark.skipif(True, reason="sometimes hangup?")
def test_init_cpu3():
args = argparse.Namespace(
multiprocessing_distributed=True,
dist_world_size=2,
dist_rank=None,
ngpu=0,
local_rank=None,
dist_launcher=None,
distributed=True,
dist_backend="gloo",
dist_init_method="env://",
dist_master_addr="localhost",
dist_master_port=None,
)
args.dist_rank = 0
option = build_dataclass(DistributedOption, args)
args.dist_rank = 1
option2 = build_dataclass(DistributedOption, args)
with ThreadPoolExecutor(max_workers=2) as e:
fn = e.submit(_init, option)
fn2 = e.submit(_init, option2)
with pytest.raises(RuntimeError):
fn.result()
fn2.result()
@pytest.mark.skipif(True, reason="sometimes hangup?")
def test_init_cpu4():
args = argparse.Namespace(
multiprocessing_distributed=True,
dist_world_size=2,
dist_rank=None,
ngpu=0,
local_rank=None,
dist_launcher=None,
distributed=True,
dist_backend="gloo",
dist_init_method="env://",
dist_master_addr="localhost",
dist_master_port=free_port(),
)
args.dist_rank = 0
option = build_dataclass(DistributedOption, args)
args.dist_rank = 1
option2 = build_dataclass(DistributedOption, args)
with ProcessPoolExecutor(max_workers=2) as e:
fn = e.submit(_init, option)
fn2 = e.submit(_init, option2)
fn.result()
fn2.result()
@pytest.mark.skipif(True, reason="sometimes hangup?")
def test_init_cpu5():
args = argparse.Namespace(
multiprocessing_distributed=True,
dist_world_size=2,
dist_rank=None,
ngpu=0,
local_rank=None,
dist_launcher=None,
distributed=True,
dist_backend="gloo",
dist_init_method="env://",
dist_master_addr="localhost",
dist_master_port=free_port(),
)
args.dist_rank = 0
option = build_dataclass(DistributedOption, args)
args.dist_rank = 1
option2 = build_dataclass(DistributedOption, args)
with ProcessPoolExecutor(max_workers=2) as e:
fn = e.submit(_init, option)
fn2 = e.submit(_init, option2)
fn.result()
fn2.result()
def test_resolve_distributed_mode_slurm1(dist_init_method):
args = argparse.Namespace(
multiprocessing_distributed=False,
dist_world_size=None,
dist_rank=None,
ngpu=2,
local_rank=None,
dist_launcher="slurm",
dist_backend="nccl",
dist_init_method=dist_init_method,
dist_master_addr=None,
dist_master_port=None,
)
with unittest.mock.patch.dict(
"os.environ",
dict(
SLURM_PROCID="0",
SLURM_NTASKS="2",
SLURM_STEP_NUM_NODES="2",
SLURM_STEP_NODELIST="host1",
SLURM_NODEID="0",
SLURM_LOCALID="0",
CUDA_VISIBLE_DEVICES="0,1",
),
):
resolve_distributed_mode(args)
def test_resolve_distributed_mode_slurm2(dist_init_method):
args = argparse.Namespace(
multiprocessing_distributed=False,
dist_world_size=None,
dist_rank=None,
ngpu=2,
local_rank=None,
dist_launcher="slurm",
dist_backend="nccl",
dist_init_method=dist_init_method,
dist_master_addr=None,
dist_master_port=None,
)
with unittest.mock.patch.dict(
"os.environ",
dict(
SLURM_PROCID="0",
SLURM_NTASKS="2",
SLURM_STEP_NUM_NODES="1",
SLURM_STEP_NODELIST="host1",
SLURM_NODEID="0",
SLURM_LOCALID="0",
CUDA_VISIBLE_DEVICES="0,1",
),
):
with pytest.raises(RuntimeError):
resolve_distributed_mode(args)
def test_resolve_distributed_mode_slurm3():
args = argparse.Namespace(
multiprocessing_distributed=True,
dist_world_size=None,
dist_rank=None,
ngpu=1,
local_rank=None,
dist_launcher="slurm",
dist_backend="nccl",
dist_init_method="env://",
dist_master_addr=None,
dist_master_port=10000,
)
env = dict(
SLURM_PROCID="0",
SLURM_NTASKS="1",
SLURM_STEP_NUM_NODES="1",
SLURM_STEP_NODELIST="localhost",
SLURM_NODEID="0",
CUDA_VISIBLE_DEVICES="0,1",
)
e = ProcessPoolExecutor(max_workers=2)
with unittest.mock.patch.dict("os.environ", dict(env, SLURM_LOCALID="0")):
resolve_distributed_mode(args)
option = build_dataclass(DistributedOption, args)
fn = e.submit(_init, option)
with unittest.mock.patch.dict("os.environ", dict(env, SLURM_LOCALID="0")):
option2 = build_dataclass(DistributedOption, args)
fn2 = e.submit(_init, option2)
fn.result()
fn2.result()
| 11,600 | 27.021739 | 78 | py |
espnet | espnet-master/test/espnet2/train/test_reporter.py | import logging
import uuid
from pathlib import Path
import numpy as np
import pytest
import torch
from torch.utils.tensorboard import SummaryWriter
from espnet2.train.reporter import Average, ReportedValue, Reporter, aggregate
@pytest.mark.parametrize("weight1,weight2", [(None, None), (19, np.array(9))])
def test_register(weight1, weight2):
reporter = Reporter()
reporter.set_epoch(1)
with reporter.observe(uuid.uuid4().hex) as sub:
stats1 = {
"float": 0.6,
"int": 6,
"np": np.random.random(),
"torch": torch.rand(1),
}
sub.register(stats1, weight1)
sub.next()
stats2 = {
"float": 0.3,
"int": 100,
"np": np.random.random(),
"torch": torch.rand(1),
}
sub.register(stats2, weight2)
sub.next()
assert sub.get_epoch() == 1
with pytest.raises(RuntimeError):
sub.register({})
desired = {}
for k in stats1:
if stats1[k] is None:
continue
if weight1 is None:
desired[k] = (stats1[k] + stats2[k]) / 2
else:
weight1 = float(weight1)
weight2 = float(weight2)
desired[k] = float(weight1 * stats1[k] + weight2 * stats2[k])
desired[k] /= weight1 + weight2
for k1, k2 in reporter.get_all_keys():
if k2 in ("time", "total_count", "gpu_max_cached_mem_GB", "gpu_cached_mem_GB"):
continue
np.testing.assert_allclose(reporter.get_value(k1, k2), desired[k2])
@pytest.mark.parametrize("mode", ["min", "max", "foo"])
def test_sort_epochs_and_values(mode):
reporter = Reporter()
key1 = uuid.uuid4().hex
stats_list = [{"aa": 0.3}, {"aa": 0.5}, {"aa": 0.2}]
for e in range(len(stats_list)):
reporter.set_epoch(e + 1)
with reporter.observe(key1) as sub:
sub.register(stats_list[e])
sub.next()
if mode not in ("min", "max"):
with pytest.raises(ValueError):
reporter.sort_epochs_and_values(key1, "aa", mode)
return
else:
sort_values = reporter.sort_epochs_and_values(key1, "aa", mode)
if mode == "min":
sign = 1
else:
sign = -1
desired = sorted(
[(e + 1, stats_list[e]["aa"]) for e in range(len(stats_list))],
key=lambda x: sign * x[1],
)
for e in range(len(stats_list)):
assert sort_values[e] == desired[e]
def test_sort_epochs_and_values_no_key():
reporter = Reporter()
key1 = uuid.uuid4().hex
stats_list = [{"aa": 0.3}, {"aa": 0.5}, {"aa": 0.2}]
for e in range(len(stats_list)):
reporter.set_epoch(e + 1)
with reporter.observe(key1) as sub:
sub.register(stats_list[e])
sub.next()
with pytest.raises(KeyError):
reporter.sort_epochs_and_values("foo", "bar", "min")
def test_get_value_not_found():
reporter = Reporter()
with pytest.raises(KeyError):
reporter.get_value("a", "b")
def test_sort_values():
mode = "min"
reporter = Reporter()
key1 = uuid.uuid4().hex
stats_list = [{"aa": 0.3}, {"aa": 0.5}, {"aa": 0.2}]
for e in range(len(stats_list)):
reporter.set_epoch(e + 1)
with reporter.observe(key1) as sub:
sub.register(stats_list[e])
sub.next()
sort_values = reporter.sort_values(key1, "aa", mode)
desired = sorted(
[stats_list[e]["aa"] for e in range(len(stats_list))],
)
for e in range(len(stats_list)):
assert sort_values[e] == desired[e]
def test_sort_epochs():
mode = "min"
reporter = Reporter()
key1 = uuid.uuid4().hex
stats_list = [{"aa": 0.3}, {"aa": 0.5}, {"aa": 0.2}]
for e in range(len(stats_list)):
reporter.set_epoch(e + 1)
with reporter.observe(key1) as sub:
sub.register(stats_list[e])
sub.next()
sort_values = reporter.sort_epochs(key1, "aa", mode)
desired = sorted(
[(e + 1, stats_list[e]["aa"]) for e in range(len(stats_list))],
key=lambda x: x[1],
)
for e in range(len(stats_list)):
assert sort_values[e] == desired[e][0]
def test_best_epoch():
mode = "min"
reporter = Reporter()
key1 = uuid.uuid4().hex
stats_list = [{"aa": 0.3}, {"aa": 0.5}, {"aa": 0.2}]
for e in range(len(stats_list)):
reporter.set_epoch(e + 1)
with reporter.observe(key1) as sub:
sub.register(stats_list[e])
sub.next()
best_epoch = reporter.get_best_epoch(key1, "aa", mode)
assert best_epoch == 3
def test_check_early_stopping():
mode = "min"
reporter = Reporter()
key1 = uuid.uuid4().hex
stats_list = [{"aa": 0.3}, {"aa": 0.2}, {"aa": 0.4}, {"aa": 0.3}]
patience = 1
results = []
for e in range(len(stats_list)):
reporter.set_epoch(e + 1)
with reporter.observe(key1) as sub:
sub.register(stats_list[e])
sub.next()
truefalse = reporter.check_early_stopping(patience, key1, "aa", mode)
results.append(truefalse)
assert results == [False, False, False, True]
def test_logging(tmp_path):
reporter = Reporter()
key1 = uuid.uuid4().hex
key2 = uuid.uuid4().hex
stats_list = [
{"aa": 0.3, "bb": 3.0},
{"aa": 0.5, "bb": 3.0},
{"aa": 0.2, "bb": 3.0},
]
writer = SummaryWriter(tmp_path)
for e in range(len(stats_list)):
reporter.set_epoch(e + 1)
with reporter.observe(key1) as sub:
sub.register(stats_list[e])
sub.next()
with reporter.observe(key2) as sub:
sub.register(stats_list[e])
sub.next()
logging.info(sub.log_message())
logging.info(sub.log_message(-1))
logging.info(sub.log_message(0, 1))
sub.tensorboard_add_scalar(writer, -1)
with pytest.raises(RuntimeError):
logging.info(sub.log_message())
logging.info(reporter.log_message())
with reporter.observe(key1) as sub:
sub.register({"aa": 0.1, "bb": 0.4})
sub.next()
sub.register({"aa": 0.1})
sub.next()
def test_has_key():
reporter = Reporter()
reporter.set_epoch(1)
key1 = uuid.uuid4().hex
with reporter.observe(key1) as sub:
stats1 = {"aa": 0.6}
sub.register(stats1)
sub.next()
assert reporter.has(key1, "aa")
def test_get_Keys():
reporter = Reporter()
reporter.set_epoch(1)
key1 = uuid.uuid4().hex
with reporter.observe(key1) as sub:
stats1 = {"aa": 0.6}
sub.register(stats1)
sub.next()
assert reporter.get_keys() == (key1,)
def test_get_Keys2():
reporter = Reporter()
reporter.set_epoch(1)
key1 = uuid.uuid4().hex
with reporter.observe(key1) as sub:
stats1 = {"aa": 0.6}
sub.register(stats1)
sub.next()
assert reporter.get_keys2(key1) == ("aa",)
def test_matplotlib_plot(tmp_path: Path):
reporter = Reporter()
reporter.set_epoch(1)
key1 = uuid.uuid4().hex
with reporter.observe(key1) as sub:
stats1 = {"aa": 0.6}
sub.register(stats1)
sub.next()
reporter.set_epoch(1)
with reporter.observe(key1) as sub:
# Skip epoch=2
sub.register({})
sub.next()
reporter.set_epoch(3)
with reporter.observe(key1) as sub:
stats1 = {"aa": 0.6}
sub.register(stats1)
sub.next()
reporter.matplotlib_plot(tmp_path)
assert (tmp_path / "aa.png").exists()
def test_tensorboard_add_scalar(tmp_path: Path):
reporter = Reporter()
reporter.set_epoch(1)
key1 = "train"
with reporter.observe(key1) as sub:
stats1 = {"aa": 0.6}
sub.register(stats1)
sub.next()
reporter.set_epoch(1)
with reporter.observe(key1) as sub:
# Skip epoch=2
sub.register({})
sub.next()
reporter.set_epoch(3)
with reporter.observe(key1) as sub:
stats1 = {"aa": 0.6}
sub.register(stats1)
sub.next()
writer = SummaryWriter(tmp_path)
reporter.tensorboard_add_scalar(writer)
def test_state_dict():
reporter = Reporter()
reporter.set_epoch(1)
with reporter.observe("train") as sub:
stats1 = {"aa": 0.6}
sub.register(stats1)
sub.next()
with reporter.observe("eval") as sub:
stats1 = {"bb": 0.6}
sub.register(stats1)
sub.next()
state = reporter.state_dict()
reporter2 = Reporter()
reporter2.load_state_dict(state)
state2 = reporter2.state_dict()
assert state == state2
def test_get_epoch():
reporter = Reporter(2)
assert reporter.get_epoch() == 2
def test_total_count():
reporter = Reporter(2)
assert reporter.get_epoch() == 2
with reporter.observe("train", 1) as sub:
sub.register({})
sub.next()
with reporter.observe("train", 2) as sub:
sub.register({})
sub.next()
sub.register({})
sub.next()
assert sub.get_total_count() == 3
def test_change_epoch():
reporter = Reporter()
with pytest.raises(RuntimeError):
with reporter.observe("train", 1):
reporter.set_epoch(2)
def test_minus_epoch():
with pytest.raises(ValueError):
Reporter(-1)
def test_minus_epoch2():
reporter = Reporter()
with pytest.raises(ValueError):
reporter.set_epoch(-1)
reporter.start_epoch("aa", 1)
with pytest.raises(ValueError):
reporter.start_epoch("aa", -1)
def test_register_array():
reporter = Reporter()
with reporter.observe("train", 1) as sub:
with pytest.raises(ValueError):
sub.register({"a": np.array([0, 1])})
sub.next()
with pytest.raises(ValueError):
sub.register({"b": 1}, weight=np.array([1, 2]))
sub.next()
def test_zero_weight():
reporter = Reporter()
with reporter.observe("train", 1) as sub:
sub.register({"a": 1}, weight=0)
sub.next()
def test_register_nan():
reporter = Reporter()
with reporter.observe("train", 1) as sub:
sub.register({"a": np.nan}, weight=1.0)
sub.next()
def test_no_register():
reporter = Reporter()
with reporter.observe("train", 1):
pass
def test_mismatch_key2():
reporter = Reporter()
with reporter.observe("train", 1) as sub:
sub.register({"a": 2})
sub.next()
with reporter.observe("train", 2) as sub:
sub.register({"b": 3})
sub.next()
def test_reserved():
reporter = Reporter()
with reporter.observe("train", 1) as sub:
with pytest.raises(RuntimeError):
sub.register({"time": 2})
sub.next()
with pytest.raises(RuntimeError):
sub.register({"total_count": 3})
sub.next()
def test_different_type():
reporter = Reporter()
with pytest.raises(ValueError):
with reporter.observe("train", 1) as sub:
sub.register({"a": 2}, weight=1)
sub.next()
sub.register({"a": 3})
sub.next()
def test_start_middle_epoch():
reporter = Reporter()
with reporter.observe("train", 2) as sub:
sub.register({"a": 3})
sub.next()
def test__plot_stats_input_str():
reporter = Reporter()
with pytest.raises(TypeError):
reporter._plot_stats("aaa", "a")
class DummyReportedValue(ReportedValue):
pass
def test_aggregate():
vs = [Average(0.1), Average(0.3)]
assert aggregate(vs) == 0.2
vs = []
assert aggregate(vs) is np.nan
with pytest.raises(NotImplementedError):
vs = [DummyReportedValue()]
aggregate(vs)
def test_measure_time():
reporter = Reporter()
with reporter.observe("train", 2) as sub:
with sub.measure_time("foo"):
pass
sub.next()
def test_measure_iter_time():
reporter = Reporter()
with reporter.observe("train", 2) as sub:
for _ in sub.measure_iter_time(range(3), "foo"):
sub.next()
| 12,124 | 25.416122 | 87 | py |
espnet | espnet-master/test/espnet2/asr/test_pit_espnet_model.py | import pytest
import torch
from espnet2.asr.ctc import CTC
from espnet2.asr.decoder.transformer_decoder import TransformerDecoder
from espnet2.asr.encoder.transformer_encoder_multispkr import TransformerEncoder
from espnet2.asr.pit_espnet_model import ESPnetASRModel
@pytest.mark.parametrize("encoder_arch", [TransformerEncoder])
@pytest.mark.parametrize("num_inf", [1, 2, 3])
def test_pit_espnet_model(encoder_arch, num_inf):
vocab_size = 5
enc_out = 4
encoder = encoder_arch(
20,
output_size=enc_out,
linear_units=4,
num_blocks=1,
num_blocks_sd=1,
num_inf=num_inf,
)
decoder = TransformerDecoder(vocab_size, enc_out, linear_units=4, num_blocks=2)
ctc = CTC(odim=vocab_size, encoder_output_size=enc_out, reduce=False)
model = ESPnetASRModel(
vocab_size,
token_list=["<blank>", "<unk>", "a", "i", "<eos>"],
frontend=None,
specaug=None,
normalize=None,
preencoder=None,
encoder=encoder,
postencoder=None,
decoder=decoder,
ctc=ctc,
joint_network=None,
num_inf=num_inf,
num_ref=num_inf,
)
inputs = dict(
speech=torch.randn(2, 10, 20, requires_grad=True),
speech_lengths=torch.tensor([10, 8], dtype=torch.long),
text=torch.randint(2, 4, [2, 4], dtype=torch.long),
text_lengths=torch.tensor([4, 3], dtype=torch.long),
)
if num_inf > 1:
for i in range(2, num_inf + 1):
inputs[f"text_spk{i}"] = torch.randint(2, 4, [2, 4], dtype=torch.long)
inputs[f"text_spk{i}_lengths"] = torch.tensor([4, 3], dtype=torch.long)
loss, *_ = model(**inputs)
loss.backward()
| 1,723 | 30.345455 | 83 | py |
espnet | espnet-master/test/espnet2/asr/test_ctc.py | import pytest
import torch
from espnet2.asr.ctc import CTC
@pytest.fixture
def ctc_args():
bs = 2
h = torch.randn(bs, 10, 10)
h_lens = torch.LongTensor([10, 8])
y = torch.randint(0, 4, [2, 5])
y_lens = torch.LongTensor([5, 2])
return h, h_lens, y, y_lens
@pytest.mark.parametrize("ctc_type", ["builtin", "gtnctc"])
def test_ctc_forward_backward(ctc_type, ctc_args):
ctc = CTC(encoder_output_size=10, odim=5, ctc_type=ctc_type)
ctc(*ctc_args).sum().backward()
@pytest.mark.parametrize("ctc_type", ["builtin", "gtnctc"])
def test_ctc_softmax(ctc_type, ctc_args):
ctc = CTC(encoder_output_size=10, odim=5, ctc_type=ctc_type)
ctc.softmax(ctc_args[0])
@pytest.mark.parametrize("ctc_type", ["builtin", "gtnctc"])
def test_ctc_log_softmax(ctc_type, ctc_args):
ctc = CTC(encoder_output_size=10, odim=5, ctc_type=ctc_type)
ctc.log_softmax(ctc_args[0])
@pytest.mark.parametrize("ctc_type", ["builtin", "gtnctc"])
def test_ctc_argmax(ctc_type, ctc_args):
ctc = CTC(encoder_output_size=10, odim=5, ctc_type=ctc_type)
ctc.argmax(ctc_args[0])
| 1,096 | 27.128205 | 64 | py |
espnet | espnet-master/test/espnet2/asr/test_discrete_asr_espnet_model.py | import pytest
import torch
from espnet2.asr.ctc import CTC
from espnet2.asr.decoder.transformer_decoder import TransformerDecoder
from espnet2.asr.discrete_asr_espnet_model import ESPnetDiscreteASRModel
from espnet2.asr.encoder.e_branchformer_encoder import EBranchformerEncoder
from espnet2.mt.frontend.embedding import Embedding
@pytest.mark.parametrize("input_layer_type", ["conv1d2", "conv1d3"])
def test_discrete_asr_espnet_model(input_layer_type):
vocab_size = 5
src_vocab_size = 4
enc_out = 4
frontend = Embedding(
input_size=src_vocab_size,
embed_dim=6,
positional_dropout_rate=0,
)
encoder = EBranchformerEncoder(
6,
output_size=enc_out,
linear_units=4,
num_blocks=2,
input_layer=input_layer_type,
)
decoder = TransformerDecoder(vocab_size, enc_out, linear_units=4, num_blocks=2)
ctc = CTC(odim=vocab_size, encoder_output_size=enc_out)
model = ESPnetDiscreteASRModel(
vocab_size,
src_vocab_size=src_vocab_size,
token_list=["<blank>", "<unk>", "a", "i", "<eos>"],
frontend=frontend,
specaug=None,
preencoder=None,
encoder=encoder,
postencoder=None,
decoder=decoder,
ctc=ctc,
)
inputs = dict(
src_text=torch.randint(0, 3, [2, 10], dtype=torch.long),
src_text_lengths=torch.tensor([10, 8], dtype=torch.long),
text=torch.randint(2, 4, [2, 4], dtype=torch.long),
text_lengths=torch.tensor([4, 3], dtype=torch.long),
)
loss, *_ = model(**inputs)
loss.backward()
| 1,606 | 29.903846 | 83 | py |
espnet | espnet-master/test/espnet2/asr/test_maskctc_model.py | import pytest
import torch
from espnet2.asr.ctc import CTC
from espnet2.asr.decoder.mlm_decoder import MLMDecoder
from espnet2.asr.encoder.conformer_encoder import ConformerEncoder
from espnet2.asr.encoder.transformer_encoder import TransformerEncoder
from espnet2.asr.maskctc_model import MaskCTCInference, MaskCTCModel
@pytest.mark.parametrize("encoder_arch", [TransformerEncoder, ConformerEncoder])
@pytest.mark.parametrize(
"interctc_layer_idx, interctc_use_conditioning, interctc_weight",
[
([], False, 0.0),
([1], True, 0.5),
],
)
def test_maskctc(
encoder_arch, interctc_layer_idx, interctc_use_conditioning, interctc_weight
):
vocab_size = 5
enc_out = 4
encoder = encoder_arch(
20,
output_size=enc_out,
linear_units=4,
num_blocks=2,
interctc_layer_idx=interctc_layer_idx,
interctc_use_conditioning=interctc_use_conditioning,
)
decoder = MLMDecoder(
vocab_size,
enc_out,
linear_units=4,
num_blocks=2,
)
ctc = CTC(odim=vocab_size, encoder_output_size=enc_out)
model = MaskCTCModel(
vocab_size,
token_list=["<blank>", "<unk>", "a", "i", "<eos>"],
frontend=None,
specaug=None,
normalize=None,
preencoder=None,
encoder=encoder,
postencoder=None,
decoder=decoder,
ctc=ctc,
interctc_weight=interctc_weight,
)
inputs = dict(
speech=torch.randn(2, 10, 20, requires_grad=True),
speech_lengths=torch.tensor([10, 8], dtype=torch.long),
text=torch.randint(2, 4, [2, 4], dtype=torch.long),
text_lengths=torch.tensor([4, 3], dtype=torch.long),
)
loss, *_ = model(**inputs)
loss.backward()
with torch.no_grad():
model.eval()
s2t = MaskCTCInference(
asr_model=model,
n_iterations=2,
threshold_probability=0.5,
)
# free running
inputs = dict(
enc_out=torch.randn(2, 4),
)
s2t(**inputs)
| 2,076 | 25.974026 | 80 | py |
espnet | espnet-master/test/espnet2/asr/test_espnet_model.py | import pytest
import torch
from espnet2.asr.ctc import CTC
from espnet2.asr.decoder.transducer_decoder import TransducerDecoder
from espnet2.asr.decoder.transformer_decoder import TransformerDecoder
from espnet2.asr.encoder.conformer_encoder import ConformerEncoder
from espnet2.asr.encoder.transformer_encoder import TransformerEncoder
from espnet2.asr.espnet_model import ESPnetASRModel
from espnet2.asr_transducer.joint_network import JointNetwork
@pytest.mark.parametrize("encoder_arch", [TransformerEncoder, ConformerEncoder])
@pytest.mark.parametrize("decoder_arch", [TransformerDecoder, None])
@pytest.mark.parametrize("aux_ctc", [None, {"0": "lid"}])
def test_espnet_model(encoder_arch, decoder_arch, aux_ctc):
vocab_size = 5
enc_out = 4
encoder = encoder_arch(20, output_size=enc_out, linear_units=4, num_blocks=2)
decoder = TransformerDecoder(vocab_size, enc_out, linear_units=4, num_blocks=2)
ctc = CTC(odim=vocab_size, encoder_output_size=enc_out)
model = ESPnetASRModel(
vocab_size,
token_list=["<blank>", "<unk>", "a", "i", "<eos>"],
frontend=None,
specaug=None,
normalize=None,
preencoder=None,
encoder=encoder,
postencoder=None,
decoder=decoder,
ctc=ctc,
joint_network=None,
aux_ctc=aux_ctc,
)
inputs = dict(
speech=torch.randn(2, 10, 20, requires_grad=True),
speech_lengths=torch.tensor([10, 8], dtype=torch.long),
text=torch.randint(2, 4, [2, 4], dtype=torch.long),
text_lengths=torch.tensor([4, 3], dtype=torch.long),
)
loss, *_ = model(**inputs)
loss.backward()
@pytest.mark.parametrize("encoder_arch", [TransformerEncoder, ConformerEncoder])
@pytest.mark.parametrize("decoder_arch", [TransducerDecoder])
@pytest.mark.parametrize("multi_blank_durations", [[], [2]])
def test_espnet_model_transducer(encoder_arch, decoder_arch, multi_blank_durations):
# Multi-Blank Transducer only supports GPU
if len(multi_blank_durations) > 0 and not torch.cuda.is_available():
return
device = "cuda" if len(multi_blank_durations) > 0 else "cpu"
device = torch.device(device)
vocab_size = 5
enc_out = 4
encoder = encoder_arch(20, output_size=enc_out, linear_units=4, num_blocks=2)
decoder = TransducerDecoder(vocab_size, hidden_size=4)
joint_network = JointNetwork(vocab_size, encoder_size=enc_out, decoder_size=4)
ctc = CTC(odim=vocab_size, encoder_output_size=enc_out)
model = ESPnetASRModel(
vocab_size,
token_list=["<blank>", "<unk>", "a", "i", "<eos>"],
frontend=None,
specaug=None,
normalize=None,
preencoder=None,
encoder=encoder,
postencoder=None,
decoder=decoder,
ctc=ctc,
joint_network=joint_network,
aux_ctc=None,
transducer_multi_blank_durations=multi_blank_durations,
).to(device)
inputs = dict(
speech=torch.randn(2, 10, 20, requires_grad=True).to(device),
speech_lengths=torch.tensor([10, 8], dtype=torch.long).to(device),
text=torch.randint(2, 4, [2, 4], dtype=torch.long).to(device),
text_lengths=torch.tensor([4, 3], dtype=torch.long).to(device),
)
loss, *_ = model(**inputs)
loss.backward()
| 3,305 | 36.146067 | 84 | py |
espnet | espnet-master/test/espnet2/asr/postencoder/test_hugging_face_transformers_postencoder.py | import pytest
import torch
from packaging.version import parse as V
from espnet2.asr.postencoder.hugging_face_transformers_postencoder import (
HuggingFaceTransformersPostEncoder,
)
is_torch_1_8_plus = V(torch.__version__) >= V("1.8.0")
@pytest.mark.parametrize(
"model_name_or_path, length_adaptor_n_layers, lang_token_id",
[
("akreal/tiny-random-bert", 0, 1),
("akreal/tiny-random-gpt2", 0, 1),
("akreal/tiny-random-xlnet", 0, 1),
("akreal/tiny-random-t5", 0, 1),
("akreal/tiny-random-mbart", 0, 1),
("akreal/tiny-random-mbart", 0, -1),
("akreal/tiny-random-mbart", 1, -1),
("akreal/tiny-random-mpnet", 0, 1),
],
)
@pytest.mark.execution_timeout(50)
def test_transformers_forward(
model_name_or_path, length_adaptor_n_layers, lang_token_id
):
if not is_torch_1_8_plus:
return
idim = 400
postencoder = HuggingFaceTransformersPostEncoder(
idim, model_name_or_path, length_adaptor_n_layers, lang_token_id
)
x = torch.randn([4, 50, idim], requires_grad=True)
x_lengths = torch.LongTensor([20, 5, 50, 15])
y, y_lengths = postencoder(x, x_lengths)
y.sum().backward()
odim = postencoder.output_size()
y_shape_1_expected = 50 // 2**length_adaptor_n_layers
y_lengths_expected = (
x_lengths.float().div(2**length_adaptor_n_layers).floor().long()
)
if lang_token_id != -1:
y_shape_1_expected += 1
y_lengths_expected += 1
assert y.shape == torch.Size([4, y_shape_1_expected, odim])
assert torch.equal(y_lengths, y_lengths_expected)
@pytest.mark.execution_timeout(30)
def test_transformers_too_short_utt():
if not is_torch_1_8_plus:
return
idim = 400
postencoder = HuggingFaceTransformersPostEncoder(idim, "akreal/tiny-random-bert", 2)
x = torch.randn([2, 3, idim], requires_grad=True)
x_lengths = torch.LongTensor([3, 2])
with pytest.raises(Exception):
y, y_lengths = postencoder(x, x_lengths)
@pytest.mark.execution_timeout(30)
def test_reload_pretrained_parameters():
if not is_torch_1_8_plus:
return
postencoder = HuggingFaceTransformersPostEncoder(400, "akreal/tiny-random-bert")
saved_param = postencoder.parameters().__next__().detach().clone()
postencoder.parameters().__next__().data *= 0
new_param = postencoder.parameters().__next__().detach().clone()
assert not torch.equal(saved_param, new_param)
postencoder.reload_pretrained_parameters()
new_param = postencoder.parameters().__next__().detach().clone()
assert torch.equal(saved_param, new_param)
| 2,631 | 31.9 | 88 | py |
espnet | espnet-master/test/espnet2/asr/transducer/test_transducer_beam_search.py | import pytest
import torch
from espnet2.asr.decoder.transducer_decoder import TransducerDecoder
from espnet2.asr.transducer.beam_search_transducer import BeamSearchTransducer
from espnet2.asr_transducer.joint_network import JointNetwork
from espnet2.lm.seq_rnn_lm import SequentialRNNLM
from espnet2.lm.transformer_lm import TransformerLM
@pytest.mark.execution_timeout(5)
@pytest.mark.parametrize("rnn_type", ["lstm", "gru"])
@pytest.mark.parametrize(
"search_params",
[
{"search_type": "greedy"},
{"search_type": "default", "score_norm": False, "nbest": 4},
{
"search_type": "default",
"score_norm": False,
"nbest": 4,
"lm": "TransformerLM",
},
{"search_type": "alsd", "u_max": 20},
{"search_type": "tsd", "max_sym_exp": 3},
{"search_type": "nsc", "nstep": 2, "lm": None},
{"search_type": "nsc", "nstep": 2},
{"search_type": "maes", "nstep": 2, "lm": None},
{"search_type": "maes", "nstep": 2},
{
"search_type": "mbg",
"multi_blank_durations": [2, 1],
"multi_blank_indices": [0, 1],
},
],
)
def test_transducer_beam_search(rnn_type, search_params):
token_list = ["<blank>", "a", "b", "c", "<sos>"]
vocab_size = len(token_list)
beam_size = 1 if search_params["search_type"] in ["greedy", "mbg"] else 2
encoder_output_size = 4
decoder_output_size = 4
decoder = TransducerDecoder(
vocab_size, hidden_size=decoder_output_size, rnn_type=rnn_type
)
joint_net = JointNetwork(
vocab_size, encoder_output_size, decoder_output_size, joint_space_size=2
)
lm = search_params.pop("lm", SequentialRNNLM(vocab_size, rnn_type="lstm"))
if isinstance(lm, str) and lm == "TransformerLM":
lm = TransformerLM(vocab_size, pos_enc=None, unit=10, layer=2)
beam = BeamSearchTransducer(
decoder,
joint_net,
beam_size=beam_size,
lm=lm,
token_list=token_list,
**search_params,
)
enc_out = torch.randn(30, encoder_output_size)
with torch.no_grad():
_ = beam(enc_out)
| 2,181 | 30.623188 | 80 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.