code stringlengths 17 6.64M |
|---|
def get_loader_from_returnn_dataset(dataset: Dataset, mp_manager: torch.multiprocessing.Manager) -> DataLoader:
epoch_mp_shared = mp_manager.Value('i', 0)
epoch_mp_shared.value = 1
reset_callback = returnn_dataset_wrapper.ReturnnDatasetResetMpSharedEpochCallback(dataset=dataset, epoch_mp_shared=epoch_mp_shared)
wrapped_dataset = returnn_dataset_wrapper.ReturnnDatasetIterDataPipe(dataset, reset_callback=reset_callback)
batch_size = 5
max_seqs = 2
batches_dataset = data_pipeline.BatchingIterDataPipe(wrapped_dataset, batch_size=batch_size, max_seqs=max_seqs)
from copy import deepcopy
deepcopy(batches_dataset)
import pickle
pickle.loads(pickle.dumps(batches_dataset))
loader = DataLoader(batches_dataset, batch_size=None, collate_fn=data_pipeline.collate_batch)
return loader
|
def test_pipeline_serialization():
dataset = Task12AXDataset(num_seqs=1000)
mp_manager = torch.multiprocessing.Manager()
loader = get_loader_from_returnn_dataset(dataset, mp_manager)
c = 0
n = 3
for batch in loader:
print(batch)
c += 1
if (c >= n):
break
assert (c == n)
|
def test_HDFDataset():
from test_HDFDataset import generate_hdf_from_other, HDFDataset
hdf_fn = generate_hdf_from_other({'class': 'Task12AXDataset', 'num_seqs': 23})
hdf_dataset = HDFDataset(files=[hdf_fn], cache_byte_size=0)
mp_manager = torch.multiprocessing.Manager()
loader = get_loader_from_returnn_dataset(hdf_dataset, mp_manager)
c = 0
n = 3
for batch in loader:
print(batch)
c += 1
if (c >= n):
break
assert (c == n)
|
def test_MultiProcDataset_HDFDataset():
from test_HDFDataset import generate_hdf_from_other
from test_MultiProcDataset import timeout
from returnn.datasets.multi_proc import MultiProcDataset
hdf_fn = generate_hdf_from_other({'class': 'Task12AXDataset', 'num_seqs': 23})
with timeout(10):
mp_dataset = MultiProcDataset(dataset={'class': 'HDFDataset', 'files': [hdf_fn], 'cache_byte_size': 0}, num_workers=1, buffer_size=1)
mp_dataset.initialize()
mp_manager = torch.multiprocessing.Manager()
loader = get_loader_from_returnn_dataset(mp_dataset, mp_manager)
c = 0
n = 3
for batch in loader:
print(batch)
c += 1
if (c >= n):
break
assert (c == n)
|
def test_torch_engine_train():
class _Model(torch.nn.Module):
def __init__(self, **_kwargs):
super(_Model, self).__init__()
self.lin = torch.nn.Linear(9, 2)
def __call__(self, x: torch.Tensor) -> torch.Tensor:
"\n :param x: [B,T,D]\n :return: [B,T,D']\n "
x = self.lin(x)
return torch.nn.functional.log_softmax(x, dim=(- 1))
@classmethod
def train_step(cls, *, model: _Model, extern_data: TensorDict, **_kwargs):
'train step'
data: Tensor = extern_data['data']
logits = model(data.raw_tensor)
logits_packed = torch.nn.utils.rnn.pack_padded_sequence(logits, data.dims[1].dyn_size_ext.raw_tensor, batch_first=True, enforce_sorted=False)
targets = extern_data['classes']
targets_packed = torch.nn.utils.rnn.pack_padded_sequence(targets.raw_tensor, data.dims[1].dyn_size_ext.raw_tensor, batch_first=True, enforce_sorted=False)
loss = torch.nn.CrossEntropyLoss(reduction='none')(logits_packed.data, targets_packed.data.long())
rf.get_run_ctx().mark_as_loss(name='ce', loss=loss)
frame_error = torch.argmax(logits_packed.data, dim=(- 1)).not_equal(targets_packed.data)
rf.get_run_ctx().mark_as_loss(name='fer', loss=frame_error, as_error=True)
config = Config(dict(task='train', device='cpu', extern_data={'data': {'dim': 9}, 'classes': {'dim': 2, 'sparse': True}}, get_model=_Model, train_step=_Model.train_step, batch_size=500, optimizer={'class': 'adam'}))
dataset = init_dataset({'class': 'Task12AXDataset', 'num_seqs': 100, 'name': 'train'})
dataset.init_seq_order(epoch=1)
with global_config_ctx(config):
engine = Engine(config=config)
engine.init_train_from_config(train_data=dataset)
engine.train()
|
def test_torch_engine_forward_simple():
def _get_model(**_kwargs):
return torch.nn.Module()
def _forward_step(*, extern_data: TensorDict, **_kwargs):
rf.get_run_ctx().mark_as_default_output(extern_data['data'])
config = Config(dict(task='forward', extern_data={'data': {'dim': 9}}, batch_size=500, get_model=_get_model, forward_step=_forward_step))
dataset = init_dataset({'class': 'Task12AXDataset', 'num_seqs': 100, 'name': 'dev', 'fixed_random_seed': 1})
dataset.init_seq_order(epoch=1)
callback = ForwardCallbackIface()
with global_config_ctx(config):
engine = Engine(config=config)
engine.init_network_from_config()
engine.forward_with_callback(callback=callback, dataset=dataset)
|
def test_torch_engine_forward():
def _get_model(**_kwargs):
return torch.nn.Module()
def _forward_step(*, extern_data: TensorDict, **_kwargs):
rf.get_run_ctx().mark_as_default_output(extern_data['data'])
class _ForwardCallback(ForwardCallbackIface):
def __init__(self):
self.num_seqs = 0
self.init_called = False
self.finish_called = False
def init(self, *, model):
assert isinstance(model, torch.nn.Module)
assert (self.num_seqs == 0)
self.init_called = True
def process_seq(self, *, seq_tag: str, outputs: TensorDict):
assert (isinstance(seq_tag, str) and seq_tag.startswith('seq-'))
assert isinstance(outputs, TensorDict)
out = outputs['output']
assert isinstance(out, Tensor)
assert ((out.batch_ndim == 2) and (out.batch_shape[(- 1)] == 9))
self.num_seqs += 1
def finish(self):
self.finish_called = True
config = Config(dict(task='forward', extern_data={'data': {'dim': 9}}, batch_size=500, get_model=_get_model, forward_step=_forward_step))
dataset = init_dataset({'class': 'Task12AXDataset', 'num_seqs': 100, 'name': 'dev', 'fixed_random_seed': 1})
dataset.init_seq_order(epoch=1)
callback = _ForwardCallback()
with global_config_ctx(config):
engine = Engine(config=config)
engine.init_network_from_config()
engine.forward_with_callback(callback=callback, dataset=dataset)
assert (callback.num_seqs == 100)
assert (callback.init_called and callback.finish_called)
|
def test_torch_engine_forward_pure_torch_no_model_out():
def _get_model(**_kwargs):
return torch.nn.Module()
def _forward_step(*, extern_data: TensorDict, **_kwargs):
rf.get_run_ctx().mark_as_default_output(extern_data['data'].raw_tensor)
config = Config(dict(task='forward', extern_data={'data': {'dim': 9}}, batch_size=500, get_model=_get_model, forward_step=_forward_step))
dataset = init_dataset({'class': 'Task12AXDataset', 'num_seqs': 100, 'name': 'dev', 'fixed_random_seed': 1})
dataset.init_seq_order(epoch=1)
callback = ForwardCallbackIface()
with global_config_ctx(config):
engine = Engine(config=config)
engine.init_network_from_config()
engine.forward_with_callback(callback=callback, dataset=dataset)
|
def test_torch_forward_raw_strings():
from test_Dataset import create_ogg_zip_txt_only_dataset
def _get_model(**_kwargs):
return torch.nn.Module()
def _forward_step(*, extern_data: TensorDict, **_kwargs):
for (key, value) in extern_data.data.items():
rf.get_run_ctx().mark_as_output(value, key)
config = Config(dict(task='forward', extern_data={'classes': {'shape': (None,), 'dim': 29, 'sparse': True}, 'orth': {'shape': (None,), 'dim': 256, 'sparse': True}, 'raw': {'shape': (), 'dtype': 'string'}}, batch_size=500, get_model=_get_model, forward_step=_forward_step))
_demo_txt = 'hello world'
_demo_seq_tag = 'seq-000000'
class _ForwardCallback(ForwardCallbackIface):
def process_seq(self, *, seq_tag: str, outputs: TensorDict):
assert (isinstance(seq_tag, str) and (seq_tag == _demo_seq_tag))
raw = outputs['raw'].raw_tensor
orth = outputs['orth'].raw_tensor
classes = outputs['classes'].raw_tensor
assert (isinstance(raw, numpy.ndarray) and raw.dtype.name.startswith('str') and (raw.shape == ()))
raw_ = raw.item()
assert (isinstance(raw_, str) and (raw_ == _demo_txt))
assert (isinstance(orth, numpy.ndarray) and (orth.dtype == numpy.uint8) and (orth.ndim == 1))
orth_ = orth.tostring()
assert (orth_.decode('utf8') == _demo_txt)
assert (isinstance(classes, numpy.ndarray) and (classes.dtype == numpy.int32) and (classes.ndim == 1))
classes_ = ''.join([dataset.targets.id_to_label(c) for c in classes])
assert (classes_ == (_demo_txt + '.'))
with global_config_ctx(config), create_ogg_zip_txt_only_dataset(text=_demo_txt, seq_tag=_demo_seq_tag) as dataset:
dataset.init_seq_order(epoch=1)
engine = Engine(config=config)
engine.init_network_from_config()
engine.forward_with_callback(callback=_ForwardCallback(), dataset=dataset)
|
def test_forward_beam_seq_lens():
from returnn.tensor import Dim, batch_dim
def _get_model(**_kwargs):
return torch.nn.Module()
def _forward_step(*, extern_data: TensorDict, **_kwargs):
data = extern_data['data']
assert (data.dims[0] == batch_dim)
time_dim = data.dims[1]
feat_dim = data.dims[2]
beam_dim = Dim(dimension=5, name='beam')
with rf.set_default_device_ctx(time_dim.dyn_size_ext.device):
ext_seq_lens = rf.relu(rf.combine_bc(time_dim.dyn_size_ext, '-', rf.range_over_dim(beam_dim, dtype=time_dim.dyn_size_ext.dtype)))
assert (set(ext_seq_lens.dims) == {batch_dim, beam_dim})
ext_time_dim = Dim(ext_seq_lens, name='time_with_beam')
ext_data = rf.expand_dim(data, beam_dim)
(ext_data, _) = rf.replace_dim(ext_data, in_dim=time_dim, out_dim=ext_time_dim)
assert (set(ext_data.dims) == {batch_dim, beam_dim, ext_time_dim, feat_dim})
rf.get_run_ctx().mark_as_output(ext_data, 'ext_data', dims=(batch_dim, beam_dim, ext_time_dim, feat_dim))
max_sizes = set()
class _ForwardCallback(ForwardCallbackIface):
def process_seq(self, *, seq_tag: str, outputs: TensorDict):
out: Tensor = outputs['ext_data']
(beam_dim, ext_time_dim, feat_dim) = out.dims
assert isinstance(ext_time_dim.dyn_size_ext.raw_tensor, numpy.ndarray)
assert (ext_time_dim.dyn_size_ext.dims == (beam_dim,))
max_size = max(ext_time_dim.dyn_size_ext.raw_tensor)
assert (set(ext_time_dim.dyn_size_ext.raw_tensor) == set(range(max(((max_size - beam_dim.dimension) + 1), 0), (max_size + 1))))
max_sizes.add(max_size)
config = Config(dict(task='forward', batch_size=500, extern_data={'data': {'dim': 9}}, get_model=_get_model, forward_step=_forward_step))
dataset = init_dataset({'class': 'Task12AXDataset', 'num_seqs': 100, 'name': 'dev', 'fixed_random_seed': 1})
callback = _ForwardCallback()
with global_config_ctx(config):
dataset.init_seq_order(epoch=1)
engine = Engine(config=config)
engine.init_network_from_config()
engine.forward_with_callback(callback=callback, dataset=dataset)
assert (len(max_sizes) > 1)
|
def test_min_seq_len():
from returnn.datasets.generating import DummyDataset
config = Config({'min_seq_length': 2, 'batch_size': 3})
dataset = DummyDataset(input_dim=1, output_dim=4, num_seqs=1, seq_len=1)
dataset.initialize()
dataset.init_seq_order(epoch=1)
engine = Engine(config=config)
data_loader = engine._create_data_loader(dataset)
for _ in data_loader:
assert False, 'Should not contain sequences'
config = Config(dict(batch_size=3))
dataset = DummyDataset(input_dim=1, output_dim=4, num_seqs=1, seq_len=3)
dataset.initialize()
dataset.init_seq_order(epoch=1)
engine = Engine(config=config)
data_loader = engine._create_data_loader(dataset)
for _ in data_loader:
return
assert False, 'Should have contained sequences'
|
def test_max_seq_len():
from returnn.datasets.generating import DummyDataset
config = Config({'max_seq_length': 4, 'batch_size': 3})
dataset = DummyDataset(input_dim=1, output_dim=4, num_seqs=1, seq_len=5)
dataset.initialize()
dataset.init_seq_order(epoch=1)
engine = Engine(config=config)
data_loader = engine._create_data_loader(dataset)
for _ in data_loader:
assert False, 'Should not contain sequences'
config = Config(dict(batch_size=3))
dataset = DummyDataset(input_dim=1, output_dim=4, num_seqs=1, seq_len=3)
dataset.initialize()
dataset.init_seq_order(epoch=1)
engine = Engine(config=config)
data_loader = engine._create_data_loader(dataset)
for _ in data_loader:
return
assert False, 'Should have contained sequences'
|
def test_data_loader_oggzip():
from test_Dataset import create_ogg_zip_txt_only_dataset_mult_seqs
ds_num_seqs = 23
ds_max_seq_len = 11
max_seqs = 3
config = Config({'max_seqs': max_seqs, 'batch_size': (max_seqs * ds_max_seq_len)})
with create_ogg_zip_txt_only_dataset_mult_seqs(num_seqs=ds_num_seqs, max_seq_len=ds_max_seq_len) as dataset:
dataset.init_seq_order(epoch=1)
engine = Engine(config=config)
data_loader = engine._create_data_loader(dataset)
num_batches = 0
num_seqs = 0
last_batch_num_seqs = None
for batch in data_loader:
assert isinstance(batch, dict)
data: torch.Tensor = batch['classes']
assert isinstance(data, torch.Tensor)
num_batches += 1
num_seqs += data.shape[0]
if (last_batch_num_seqs is not None):
assert (last_batch_num_seqs == max_seqs)
last_batch_num_seqs = data.shape[0]
assert (1 <= last_batch_num_seqs <= max_seqs)
assert ((num_batches == (- ((- num_seqs) // max_seqs))) and (num_seqs == ds_num_seqs))
ds_num_seqs = 5
ds_max_seq_len = 5
max_seqs = 2
config = Config({'max_seqs': max_seqs, 'batch_size': (max_seqs * ds_max_seq_len)})
batches = []
with create_ogg_zip_txt_only_dataset_mult_seqs(num_seqs=ds_num_seqs, max_seq_len=ds_max_seq_len) as dataset:
dataset.init_seq_order(epoch=1)
engine = Engine(config=config)
data_loader = engine._create_data_loader(dataset)
for batch in data_loader:
assert isinstance(batch, dict)
data: torch.Tensor = batch['classes']
batches.append(data.numpy().tolist())
print(batches)
assert (batches == [[[12, 8, 9, 11], [16, 0, 0, 0]], [[6, 25, 18, 20, 5], [28, 10, 28, 14, 0]], [[17, 23]]])
|
def test_load_optimizer_old_format():
config = Config(dict(optimizer={'class': 'adamw', 'weight_decay': 0.001}))
model = torch.nn.Linear(7, 5)
updater = Updater(config=config, network=model, device=torch.device('cpu'))
updater.create_optimizer()
with tempfile.TemporaryDirectory(prefix='returnn_test_load_optimizer_old_format') as tmp_dir:
torch.save(updater.optimizer.state_dict(), (tmp_dir + '/model.opt.old_format.pt'))
updater.load_optimizer((tmp_dir + '/model.opt.old_format.pt'))
updater.save_optimizer((tmp_dir + '/model.opt.new_format.pt'))
updater.load_optimizer((tmp_dir + '/model.opt.new_format.pt'))
|
def test_optimizer_convert_aux_param():
from returnn.torch.frontend.bridge import rf_module_to_pt_module
config = Config(dict(optimizer={'class': 'adamw', 'weight_decay': 0.001}))
rf.select_backend_torch()
class _Model(rf.Module):
def __init__(self):
super().__init__()
self.batch_norm = rf.BatchNorm(in_dim=rf.Dim(3))
self.linear = rf.Linear(in_dim=rf.Dim(2), out_dim=rf.Dim(3))
rf_model = _Model()
pt_model_buf = rf_module_to_pt_module(rf_model, aux_params_as_buffers=True)
pt_model_param = rf_module_to_pt_module(rf_model, aux_params_as_buffers=False)
pt_model_buf_param_names = set((name for (name, _) in pt_model_buf.named_parameters()))
pt_model_param_param_names = set((name for (name, _) in pt_model_param.named_parameters()))
print('buf params:', pt_model_buf_param_names)
print('all params:', pt_model_param_param_names)
assert (len(pt_model_buf_param_names) < len(pt_model_param_param_names))
assert pt_model_buf_param_names.issubset(pt_model_param_param_names)
updater_buf = Updater(config=config, network=pt_model_buf, device=torch.device('cpu'))
updater_buf.create_optimizer()
updater_param = Updater(config=config, network=pt_model_param, device=torch.device('cpu'))
updater_param.create_optimizer()
with tempfile.TemporaryDirectory(prefix='returnn_test_optimizer_convert_aux_param') as tmp_dir:
updater_buf.save_optimizer((tmp_dir + '/model_buf.opt.pt'))
updater_param.save_optimizer((tmp_dir + '/model_param.opt.pt'))
updater_buf.load_optimizer((tmp_dir + '/model_buf.opt.pt'))
updater_param.load_optimizer((tmp_dir + '/model_param.opt.pt'))
updater_buf.load_optimizer((tmp_dir + '/model_param.opt.pt'))
updater_param.load_optimizer((tmp_dir + '/model_buf.opt.pt'))
|
class _DemoException(Exception):
pass
|
class _TestTorchSubModelRaisingException(torch.nn.Module):
def __init__(self, in_features: int, out_features: int):
super().__init__()
self.lin = torch.nn.Linear(in_features, out_features)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"\n :param x: [B,T,D]\n :return: [B,T,D']\n "
x = self.lin(x)
if (int('1') == 1):
raise _DemoException('uh')
return x
|
def test_torch_engine_train_exception():
class _Model(torch.nn.Module):
def __init__(self, **_kwargs):
super(_Model, self).__init__()
self.sub = _TestTorchSubModelRaisingException(9, 2)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"\n :param x: [B,T,D]\n :return: [B,T,D']\n "
x = self.sub(x)
return torch.nn.functional.log_softmax(x, dim=(- 1))
@classmethod
def train_step(cls, *, model: _Model, extern_data: TensorDict, **_kwargs):
'train step'
data: Tensor = extern_data['data']
logits = model(data.raw_tensor)
logits_packed = torch.nn.utils.rnn.pack_padded_sequence(logits, data.dims[1].dyn_size_ext.raw_tensor, batch_first=True, enforce_sorted=False)
targets = extern_data['classes']
targets_packed = torch.nn.utils.rnn.pack_padded_sequence(targets.raw_tensor, data.dims[1].dyn_size_ext.raw_tensor, batch_first=True, enforce_sorted=False)
loss = torch.nn.CrossEntropyLoss(reduction='none')(logits_packed.data, targets_packed.data.long())
rf.get_run_ctx().mark_as_loss(name='ce', loss=loss)
frame_error = torch.argmax(logits_packed.data, dim=(- 1)).not_equal(targets_packed.data)
rf.get_run_ctx().mark_as_loss(name='fer', loss=frame_error, as_error=True)
config = Config(dict(task='train', device='cpu', extern_data={'data': {'dim': 9}, 'classes': {'dim': 2, 'sparse': True}}, get_model=_Model, train_step=_Model.train_step, batch_size=500, optimizer={'class': 'adam'}))
dataset = init_dataset({'class': 'Task12AXDataset', 'num_seqs': 100, 'name': 'train'})
dataset.init_seq_order(epoch=1)
with global_config_ctx(config):
engine = Engine(config=config)
engine.init_train_from_config(train_data=dataset)
try:
engine.train()
except _DemoException as exc:
print('got demo exception:', exc)
exc_lines = str(exc).splitlines()
assert (('Module call stack:' in exc_lines) and ('(_TestTorchSubModelRaisingException.forward) sub' in exc_lines))
else:
raise Exception('did not get expected exception')
|
def test_dot_scalar_multiplication():
a_raw = torch.tensor(2.0)
b_raw = torch.tensor(3.0)
a = Tensor(name='a', dims=[], dtype='float32', raw_tensor=a_raw)
b = Tensor(name='b', dims=[], dtype='float32', raw_tensor=b_raw)
result = rf.matmul(a, b, reduce=[])
assert (pytest.approx(result.raw_tensor) == 6.0)
|
def test_dot_scalar_product():
a_raw = torch.tensor([1.0, 2.0, 3.0])
b_raw = torch.tensor([4.0, 5.0, 6.0])
feature_dim = Dim(dimension=3)
a = Tensor(name='a', dims=[feature_dim], dtype='float32', raw_tensor=a_raw)
b = Tensor(name='b', dims=[feature_dim], dtype='float32', raw_tensor=b_raw)
result = rf.matmul(a, b, reduce=[feature_dim])
assert (pytest.approx(result.raw_tensor) == 32.0)
|
def test_dot_outer_product():
a_raw = torch.tensor([1.0, 2.0, 3.0])
b_raw = torch.tensor([4.0, 5.0, 6.0])
a_feature_dim = Dim(dimension=3)
b_feature_dim = Dim(dimension=3)
a = Tensor(name='a', dims=[a_feature_dim], dtype='float32', raw_tensor=a_raw)
b = Tensor(name='b', dims=[b_feature_dim], dtype='float32', raw_tensor=b_raw)
result = rf.matmul(a, b, reduce=[])
assert (result.dims == (a_feature_dim, b_feature_dim))
assert (result.raw_tensor.shape == (3, 3))
|
def test_dot_matrix_vector_product():
a_raw = torch.tensor([[1.0, 2.0, 3.0], [(- 1.0), (- 2.0), (- 3.0)]])
b_raw = torch.tensor([4.0, 5.0])
a_feature_dim = Dim(dimension=3)
reduce_dim = Dim(dimension=2)
a = Tensor(name='a', dims=[reduce_dim, a_feature_dim], dtype='float32', raw_tensor=a_raw)
b = Tensor(name='b', dims=[reduce_dim], dtype='float32', raw_tensor=b_raw)
result = rf.matmul(a, b, reduce=[reduce_dim])
assert (result.dims == (a_feature_dim,))
assert (result.raw_tensor.tolist() == pytest.approx([(- 1.0), (- 2.0), (- 3.0)]))
|
def test_dot_matrix_matrix_product():
a_raw = torch.tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
b_raw = torch.tensor([[1.0, (- 2.0)], [2.0, (- 4.0)]])
a_feature_dim = Dim(dimension=3)
b_feature_dim = Dim(dimension=2)
reduce_dim = Dim(dimension=2)
a = Tensor(name='a', dims=[a_feature_dim, reduce_dim], dtype='float32', raw_tensor=a_raw)
b = Tensor(name='b', dims=[reduce_dim, b_feature_dim], dtype='float32', raw_tensor=b_raw)
result = rf.matmul(a, b, reduce=[reduce_dim])
assert (result.dims == (a_feature_dim, b_feature_dim))
assert torch.allclose(result.raw_tensor, torch.tensor([[5.0, (- 10.0)], [11.0, (- 22.0)], [17.0, (- 34.0)]]))
|
def test_dot_scale_matrix():
a_raw = torch.tensor([[1.0, 2.0, 3.0], [(- 1.0), (- 2.0), (- 3.0)]])
b_raw = torch.tensor(2.0)
a_feature_dim1 = Dim(dimension=2)
a_feature_dim2 = Dim(dimension=3)
a = Tensor(name='a', dims=[a_feature_dim1, a_feature_dim2], dtype='float32', raw_tensor=a_raw)
b = Tensor(name='b', dims=[], dtype='float32', raw_tensor=b_raw)
result = rf.matmul(a, b, reduce=[])
assert (result.dims == (a_feature_dim1, a_feature_dim2))
assert torch.allclose(result.raw_tensor, torch.tensor([[2.0, 4.0, 6.0], [(- 2.0), (- 4.0), (- 6.0)]]))
|
def test_dot_batched_scalar_multiplication():
a_raw = torch.tensor([1.0, 2.0, 3.0])
b_raw = torch.tensor([4.0, 5.0, 6.0])
batch_dim = Dim(dimension=3)
a = Tensor(name='a', dims=[batch_dim], dtype='float32', raw_tensor=a_raw)
b = Tensor(name='b', dims=[batch_dim], dtype='float32', raw_tensor=b_raw)
result = rf.matmul(a, b, reduce=[])
assert (result.dims == (batch_dim,))
assert (result.raw_tensor.tolist() == pytest.approx([4.0, 10.0, 18.0]))
|
def test_dot_batched_scalar_product():
a_raw = torch.tensor([[1.0, 2.0, 3.0], [(- 1.0), (- 2.0), (- 3.0)]])
b_raw = torch.tensor([[4.0, 5.0, 6.0], [4.0, 5.0, 6.0]])
batch_dim = Dim(dimension=2)
feature_dim = Dim(dimension=3)
a = Tensor(name='a', dims=[batch_dim, feature_dim], dtype='float32', raw_tensor=a_raw)
b = Tensor(name='b', dims=[batch_dim, feature_dim], dtype='float32', raw_tensor=b_raw)
result = rf.matmul(a, b, reduce=[feature_dim])
assert (result.dims == (batch_dim,))
assert (result.raw_tensor.tolist() == pytest.approx([32.0, (- 32.0)]))
|
def test_dot_batched_outer_product():
a_raw = torch.tensor([[1.0, 2.0, 3.0], [(- 1.0), (- 2.0), (- 3.0)]])
b_raw = torch.tensor([[4.0, 5.0, 6.0], [4.0, 5.0, 6.0]])
batch_dim = Dim(dimension=2)
a_feature_dim = Dim(dimension=3)
b_feature_dim = Dim(dimension=3)
a = Tensor(name='a', dims=[batch_dim, a_feature_dim], dtype='float32', raw_tensor=a_raw)
b = Tensor(name='b', dims=[batch_dim, b_feature_dim], dtype='float32', raw_tensor=b_raw)
result = rf.matmul(a, b, reduce=[])
assert (result.dims == (batch_dim, a_feature_dim, b_feature_dim))
assert (result.raw_tensor.shape == (2, 3, 3))
|
def test_dot_batched_matrix_vector_product():
a_raw = torch.tensor([[[1.0, (- 1.0)], [2.0, (- 2.0)]], [[3.0, (- 3.0)], [4.0, (- 4.0)]], [[5.0, (- 5.0)], [6.0, (- 6.0)]]])
b_raw = torch.tensor([[1.0, 2.0], [2.0, 4.0]])
batch_dim = Dim(dimension=2)
a_feature_dim = Dim(dimension=3)
reduce_dim = Dim(dimension=2)
a = Tensor(name='a', dims=[a_feature_dim, reduce_dim, batch_dim], dtype='float32', raw_tensor=a_raw)
b = Tensor(name='b', dims=[batch_dim, reduce_dim], dtype='float32', raw_tensor=b_raw)
result = rf.matmul(a, b, reduce=[reduce_dim])
assert (result.dims == (batch_dim, a_feature_dim))
assert torch.allclose(result.raw_tensor, torch.tensor([[5.0, 11.0, 17.0], [(- 10.0), (- 22.0), (- 34.0)]]))
|
def test_dot_batched_matrix_matrix_product():
a_raw = torch.tensor([[[1.0, 2.0], [(- 1.0), (- 2.0)]], [[3.0, 4.0], [(- 3.0), (- 4.0)]], [[5.0, 6.0], [(- 5.0), (- 6.0)]]])
b_raw = torch.tensor([[[1.0, 1.0], [2.0, 2.0]], [[3.0, 3.0], [4.0, 4.0]], [[5.0, 5.0], [6.0, 6.0]]])
batch_dim = Dim(dimension=2)
a_feature_dim = Dim(dimension=3)
b_feature_dim = Dim(dimension=3)
reduce_dim = Dim(dimension=2)
a = Tensor(name='a', dims=[a_feature_dim, reduce_dim, batch_dim], dtype='float32', raw_tensor=a_raw)
b = Tensor(name='b', dims=[b_feature_dim, batch_dim, reduce_dim], dtype='float32', raw_tensor=b_raw)
result = rf.matmul(a, b, reduce=[reduce_dim])
assert (result.dims == (batch_dim, a_feature_dim, b_feature_dim))
assert torch.allclose(result.raw_tensor, torch.zeros(size=(2, 3, 3)))
|
def test_dot_batched_scale_matrix():
a_raw = torch.tensor([2.0, 3.0])
b_raw = torch.tensor([[[1.0, 2.0, 3.0], [(- 1.0), (- 2.0), (- 3.0)]], [[2.0, 3.0, 4.0], [(- 2.0), (- 3.0), (- 4.0)]]])
batch_dim = Dim(dimension=2)
b_feature_dim1 = Dim(dimension=2)
b_feature_dim2 = Dim(dimension=3)
a = Tensor(name='a', dims=[batch_dim], dtype='float32', raw_tensor=a_raw)
b = Tensor(name='b', dims=[batch_dim, b_feature_dim1, b_feature_dim2], dtype='float32', raw_tensor=b_raw)
result = rf.matmul(a, b, reduce=[])
assert (result.dims == (batch_dim, b_feature_dim1, b_feature_dim2))
assert torch.allclose(result.raw_tensor, torch.tensor([[[2.0, 4.0, 6.0], [(- 2.0), (- 4.0), (- 6.0)]], [[6.0, 9.0, 12.0], [(- 6.0), (- 9.0), (- 12.0)]]]))
|
def test_dot_multiple_dims():
a_raw = torch.rand(size=(2, 4, 6, 9, 5, 3, 8, 1))
b_raw = torch.rand(size=(7, 2, 6, 8, 3, 1, 5, 4))
reduce_dim_1 = Dim(dimension=3)
reduce_dim_2 = Dim(dimension=6)
reduce_dim_3 = Dim(dimension=1)
common_dim_1 = Dim(dimension=2)
common_dim_2 = Dim(dimension=8)
common_dim_3 = Dim(dimension=5)
a_unique_dim_1 = Dim(dimension=9)
a_unique_dim_2 = Dim(dimension=4)
b_unique_dim_1 = Dim(dimension=7)
b_unique_dim_2 = Dim(dimension=4)
a = Tensor(name='a', dims=[common_dim_1, a_unique_dim_2, reduce_dim_2, a_unique_dim_1, common_dim_3, reduce_dim_1, common_dim_2, reduce_dim_3], dtype='float32', raw_tensor=a_raw)
b = Tensor(name='b', dims=[b_unique_dim_1, common_dim_1, reduce_dim_2, common_dim_2, reduce_dim_1, reduce_dim_3, common_dim_3, b_unique_dim_2], dtype='float32', raw_tensor=b_raw)
result = rf.matmul(a, b, reduce=[reduce_dim_1, reduce_dim_2, reduce_dim_3])
assert (result.dims == (common_dim_1, common_dim_3, common_dim_2, a_unique_dim_2, a_unique_dim_1, b_unique_dim_1, b_unique_dim_2))
assert (result.raw_tensor.shape == (2, 5, 8, 4, 9, 7, 4))
|
def test_cross_entropy_no_batch_dim():
logits_raw = torch.tensor([0.0, 0.0, math.log(10.0), 0.0, 0.0, 0.0])
target_raw = torch.tensor(2, dtype=torch.int64)
classes_dim = Dim(dimension=6)
logits = Tensor(name='logits', dims=[classes_dim], dtype='float32', raw_tensor=logits_raw)
target = Tensor(name='target', dims=[], sparse_dim=classes_dim, dtype='int64', raw_tensor=target_raw)
cross_entropy = rf.cross_entropy(estimated=logits, target=target, axis=classes_dim, estimated_type='logits')
assert (not cross_entropy.dims)
assert (cross_entropy.raw_tensor.tolist() == pytest.approx((- math.log((10 / 15)))))
|
def test_cross_entropy_no_batch_dim_dense_target():
logits_raw = torch.tensor([0.0, 0.0, math.log(10.0), 0.0, 0.0, 0.0])
target_raw = torch.tensor([0.0, 0.0, 0.5, 0.0, 0.0, 0.5])
classes_dim = Dim(dimension=6)
logits = Tensor(name='logits', dims=[classes_dim], dtype='float32', raw_tensor=logits_raw)
target = Tensor(name='target', dims=[classes_dim], dtype='float32', raw_tensor=target_raw)
cross_entropy = rf.cross_entropy(estimated=logits, target=target, axis=classes_dim, estimated_type='logits')
assert (not cross_entropy.dims)
assert (cross_entropy.raw_tensor.tolist() == pytest.approx((((- 0.5) * math.log((10 / 15))) - (0.5 * math.log((1 / 15))))))
|
def test_cross_entropy():
logits_raw = torch.tensor([[0.0, 0.0, math.log(3.0)], [math.log(5.0), 0.0, 0.0], [0.0, math.log(2.0), 0.0]])
target_raw = torch.tensor([2, 0, 1], dtype=torch.int64)
batch_dim = Dim(dimension=3)
classes_dim = Dim(dimension=3)
logits = Tensor(name='logits', dims=[batch_dim, classes_dim], dtype='float32', raw_tensor=logits_raw)
target = Tensor(name='target', dims=[batch_dim], sparse_dim=classes_dim, dtype='int64', raw_tensor=target_raw)
cross_entropy = rf.cross_entropy(estimated=logits, target=target, axis=classes_dim, estimated_type='logits')
assert (cross_entropy.dims == (batch_dim,))
assert (cross_entropy.raw_tensor.tolist() == pytest.approx([(- math.log((3 / 5))), (- math.log((5 / 7))), (- math.log((2 / 4)))]))
|
def test_cross_entropy_dense_target():
logits_raw = torch.tensor([[0.0, math.log(5.0)], [0.0, 0.0], [math.log(3.0), 0.0]])
target_raw = torch.tensor([[0.0, 0.4, 0.6], [0.3, 0.7, 0.0]])
batch_dim = Dim(dimension=2)
classes_dim = Dim(dimension=3)
logits = Tensor(name='logits', dims=[classes_dim, batch_dim], dtype='float32', raw_tensor=logits_raw)
target = Tensor(name='target', dims=[batch_dim, classes_dim], dtype='float32', raw_tensor=target_raw)
cross_entropy = rf.cross_entropy(estimated=logits, target=target, axis=classes_dim, estimated_type='logits')
assert (cross_entropy.dims == (batch_dim,))
cross_entropy_list = cross_entropy.raw_tensor.tolist()
assert (cross_entropy_list[0] == pytest.approx((((- 0.6) * math.log((3 / 5))) - (0.4 * math.log((1 / 5))))))
assert (cross_entropy_list[1] == pytest.approx((((- 0.3) * math.log((5 / 7))) - (0.7 * math.log((1 / 7))))))
|
def test_pack_padded():
def _loss_rf_packed(logits: Tensor, targets: Tensor) -> torch.Tensor:
(logits_packed, pack_dim) = rf.pack_padded(logits, dims=(batch_dim, time_dim), enforce_sorted=False)
(targets_packed, _) = rf.pack_padded(targets, dims=(batch_dim, time_dim), enforce_sorted=False, out_dim=pack_dim)
loss_rf_packed = rf.cross_entropy(estimated=logits_packed, estimated_type='logits', target=targets_packed, axis=classes_dim)
loss_rf_packed_sum = rf.reduce_sum(loss_rf_packed, axis=loss_rf_packed.dims)
return loss_rf_packed_sum.raw_tensor
def _loss_pt_packed(logits: Tensor, targets: Tensor) -> torch.Tensor:
logits_pt_packed_raw = torch.nn.utils.rnn.pack_padded_sequence(logits.raw_tensor, time_dim.dyn_size, batch_first=True, enforce_sorted=False)
targets_pt_packed_raw = torch.nn.utils.rnn.pack_padded_sequence(targets.raw_tensor, time_dim.dyn_size, batch_first=True, enforce_sorted=False)
loss_pt_packed_raw = torch.nn.CrossEntropyLoss(reduction='none')(logits_pt_packed_raw.data, targets_pt_packed_raw.data.long())
loss_pt_packed_sum_raw = torch.sum(loss_pt_packed_raw)
return loss_pt_packed_sum_raw
def _loss_rf_padded(logits: Tensor, targets: Tensor) -> torch.Tensor:
loss_rf_padded = rf.cross_entropy(estimated=logits, estimated_type='logits', target=targets, axis=classes_dim)
loss_rf_padded_sum = rf.reduce_sum(loss_rf_padded, axis=loss_rf_padded.dims)
return loss_rf_padded_sum.raw_tensor
prev_loss_value = None
prev_bias_grad = None
for loss_fn in [_loss_pt_packed, _loss_rf_padded, _loss_rf_packed]:
torch.manual_seed(42)
batch_dim = Dim(dimension=3, name='batch')
in_dim = Dim(dimension=5, name='in')
classes_dim = Dim(dimension=5, name='classes')
net = torch.nn.Conv1d(in_dim.dimension, classes_dim.dimension, 5, padding='same')
time_dim = Dim(Tensor(name='time', dims=[batch_dim], dtype='int32', raw_tensor=torch.tensor([4, 3, 2], dtype=torch.int32)))
inputs = Tensor(name='inputs', dims=[batch_dim, time_dim, classes_dim], dtype='float32', raw_tensor=torch.randn(3, 4, 5, requires_grad=True))
targets = Tensor(name='target', dims=[batch_dim, time_dim], sparse_dim=classes_dim, dtype='int64', raw_tensor=torch.randint(0, 5, (3, 4)))
logits_raw_ = net(inputs.raw_tensor.transpose(1, 2))
logits_raw = logits_raw_.transpose(1, 2)
logits = Tensor(name='logits', dims=[batch_dim, time_dim, classes_dim], dtype='float32', raw_tensor=logits_raw)
loss_raw = loss_fn(logits, targets)
loss_value = loss_raw.detach().cpu().numpy()
print('loss:', loss_raw)
(bias_grad,) = torch.autograd.grad(loss_raw, net.bias, create_graph=True)
print('bias grad:', bias_grad)
bias_grad = bias_grad.detach().cpu().numpy()
if (prev_loss_value is not None):
numpy.testing.assert_almost_equal(loss_value, prev_loss_value, decimal=5, err_msg='loss')
numpy.testing.assert_almost_equal(bias_grad, prev_bias_grad, decimal=5, err_msg='bias grad')
prev_loss_value = loss_value
prev_bias_grad = bias_grad
|
def test_Data_copy_compatible_to_match_priority():
feat_dim = Dim(2, name='feature')
in_dim = feat_dim.copy(match_priority=1)
assert ((in_dim == feat_dim) and (in_dim.match_priority > feat_dim.match_priority) and (in_dim is not feat_dim))
raw_np = numpy.arange(0, (2 * 2), dtype=numpy.float32).reshape((2, 2))
raw = torch.tensor(raw_np)
x = Tensor('x', [in_dim, feat_dim], 'float32', raw_tensor=raw)
x_ = x.copy_compatible_to(Tensor('y', [in_dim, feat_dim], 'float32'))
assert ((len(x_.dims) == 2) and (x_.dims[0] is in_dim) and (x_.dims[1] is feat_dim))
x_np = x_.raw_tensor.detach().numpy()
numpy.testing.assert_equal(x_np, raw_np)
x_ = x.copy_compatible_to(Tensor('y', [feat_dim, in_dim], 'float32'))
assert ((len(x_.dims) == 2) and (x_.dims[0] is feat_dim) and (x_.dims[1] is in_dim))
x_np = x_.raw_tensor.detach().numpy()
numpy.testing.assert_equal(x_np, raw_np.transpose([1, 0]))
x_ = x_.copy_compatible_to(Tensor('y', [feat_dim, in_dim], 'float32'))
assert ((len(x_.dims) == 2) and (x_.dims[0] is feat_dim) and (x_.dims[1] is in_dim))
x_np = x_.raw_tensor.detach().numpy()
numpy.testing.assert_equal(x_np, raw_np.transpose([1, 0]))
x_ = x_.copy_compatible_to(Tensor('y', [in_dim, feat_dim], 'float32'))
assert ((len(x_.dims) == 2) and (x_.dims[0] is in_dim) and (x_.dims[1] is feat_dim))
x_np = x_.raw_tensor.detach().numpy()
numpy.testing.assert_equal(x_np, raw_np)
|
def test_Data_copy_compatible_to_dims_match_priority():
feat_dim = Dim(2, name='feature')
in_dim = feat_dim.copy(match_priority=1)
assert ((in_dim == feat_dim) and (in_dim.match_priority > feat_dim.match_priority) and (in_dim is not feat_dim))
raw_np = numpy.arange(0, (2 * 2), dtype=numpy.float32).reshape((2, 2))
raw = torch.tensor(raw_np)
x = Tensor('x', [in_dim, feat_dim], 'float32', raw_tensor=raw)
x_ = x.copy_compatible_to_dims([in_dim, feat_dim])
assert ((len(x_.dims) == 2) and (x_.dims[0] is in_dim) and (x_.dims[1] is feat_dim))
x_np = x_.raw_tensor.detach().numpy()
numpy.testing.assert_equal(x_np, raw_np)
x_ = x.copy_compatible_to_dims([feat_dim, in_dim])
assert ((len(x_.dims) == 2) and (x_.dims[0] is feat_dim) and (x_.dims[1] is in_dim))
x_np = x_.raw_tensor.detach().numpy()
numpy.testing.assert_equal(x_np, raw_np.transpose([1, 0]))
x_ = x_.copy_compatible_to_dims([feat_dim, in_dim])
assert ((len(x_.dims) == 2) and (x_.dims[0] is feat_dim) and (x_.dims[1] is in_dim))
x_np = x_.raw_tensor.detach().numpy()
numpy.testing.assert_equal(x_np, raw_np.transpose([1, 0]))
x_ = x_.copy_compatible_to_dims([in_dim, feat_dim])
assert ((len(x_.dims) == 2) and (x_.dims[0] is in_dim) and (x_.dims[1] is feat_dim))
x_np = x_.raw_tensor.detach().numpy()
numpy.testing.assert_equal(x_np, raw_np)
|
def test_Data_copy_tranpose_match_priority():
feat_dim = Dim(2, name='feature')
in_dim = feat_dim.copy(match_priority=1)
assert ((in_dim == feat_dim) and (in_dim.match_priority > feat_dim.match_priority) and (in_dim is not feat_dim))
raw_np = numpy.arange(0, (2 * 2), dtype=numpy.float32).reshape((2, 2))
raw = torch.tensor(raw_np)
x = Tensor('x', [in_dim, feat_dim], 'float32', raw_tensor=raw)
x_ = x.copy_transpose([in_dim, feat_dim])
assert ((len(x_.dims) == 2) and (x_.dims[0] is in_dim) and (x_.dims[1] is feat_dim))
x_np = x_.raw_tensor.detach().numpy()
numpy.testing.assert_equal(x_np, raw_np)
x_ = x.copy_transpose([feat_dim, in_dim])
assert ((len(x_.dims) == 2) and (x_.dims[0] is feat_dim) and (x_.dims[1] is in_dim))
x_np = x_.raw_tensor.detach().numpy()
numpy.testing.assert_equal(x_np, raw_np.transpose([1, 0]))
x_ = x_.copy_transpose([feat_dim, in_dim])
assert ((len(x_.dims) == 2) and (x_.dims[0] is feat_dim) and (x_.dims[1] is in_dim))
x_np = x_.raw_tensor.detach().numpy()
numpy.testing.assert_equal(x_np, raw_np.transpose([1, 0]))
x_ = x_.copy_transpose([in_dim, feat_dim])
assert ((len(x_.dims) == 2) and (x_.dims[0] is in_dim) and (x_.dims[1] is feat_dim))
x_np = x_.raw_tensor.detach().numpy()
numpy.testing.assert_equal(x_np, raw_np)
|
def test_compare_eq():
a_raw = torch.tensor([2.0, 2.0, 2.0])
b_raw = torch.tensor([1.0, 2.0, 3.0])
feature_dim = Dim(3)
a = Tensor(name='a', raw_tensor=a_raw, dims=[feature_dim], dtype='float32')
b = Tensor(name='b', raw_tensor=b_raw, dims=[feature_dim], dtype='float32')
result = (a == b)
result_alt1 = rf.compare(a, '==', b)
result_alt2 = rf.compare(a, 'eq', b)
assert (result.raw_tensor.tolist() == [False, True, False])
assert (result_alt1.raw_tensor.tolist() == [False, True, False])
assert (result_alt2.raw_tensor.tolist() == [False, True, False])
|
def test_compare_ne():
a_raw = torch.tensor([2.0, 2.0, 2.0])
b_raw = torch.tensor([1.0, 2.0, 3.0])
feature_dim = Dim(3)
a = Tensor(name='a', raw_tensor=a_raw, dims=[feature_dim], dtype='float32')
b = Tensor(name='b', raw_tensor=b_raw, dims=[feature_dim], dtype='float32')
result = (a != b)
result_alt1 = rf.compare(a, '!=', b)
result_alt2 = rf.compare(a, '<>', b)
result_alt3 = rf.compare(a, 'not_equal', b)
assert (result.raw_tensor.tolist() == [True, False, True])
assert (result_alt1.raw_tensor.tolist() == [True, False, True])
assert (result_alt2.raw_tensor.tolist() == [True, False, True])
assert (result_alt3.raw_tensor.tolist() == [True, False, True])
|
def test_compare_lt():
a_raw = torch.tensor([2.0, 2.0, 2.0])
b_raw = torch.tensor([1.0, 2.0, 3.0])
feature_dim = Dim(3)
a = Tensor(name='a', raw_tensor=a_raw, dims=[feature_dim], dtype='float32')
b = Tensor(name='b', raw_tensor=b_raw, dims=[feature_dim], dtype='float32')
result = (a < b)
result_alt1 = rf.compare(a, '<', b)
result_alt2 = rf.compare(a, 'less', b)
assert (result.raw_tensor.tolist() == [False, False, True])
assert (result_alt1.raw_tensor.tolist() == [False, False, True])
assert (result_alt2.raw_tensor.tolist() == [False, False, True])
|
def test_compare_le():
a_raw = torch.tensor([2.0, 2.0, 2.0])
b_raw = torch.tensor([1.0, 2.0, 3.0])
feature_dim = Dim(3)
a = Tensor(name='a', raw_tensor=a_raw, dims=[feature_dim], dtype='float32')
b = Tensor(name='b', raw_tensor=b_raw, dims=[feature_dim], dtype='float32')
result = (a <= b)
result_alt1 = rf.compare(a, '<=', b)
result_alt2 = rf.compare(a, 'less_equal', b)
assert (result.raw_tensor.tolist() == [False, True, True])
assert (result_alt1.raw_tensor.tolist() == [False, True, True])
assert (result_alt2.raw_tensor.tolist() == [False, True, True])
|
def test_compare_gt():
a_raw = torch.tensor([2.0, 2.0, 2.0])
b_raw = torch.tensor([1.0, 2.0, 3.0])
feature_dim = Dim(3)
a = Tensor(name='a', raw_tensor=a_raw, dims=[feature_dim], dtype='float32')
b = Tensor(name='b', raw_tensor=b_raw, dims=[feature_dim], dtype='float32')
result = (a > b)
result_alt1 = rf.compare(a, '>', b)
result_alt2 = rf.compare(a, 'greater', b)
assert (result.raw_tensor.tolist() == [True, False, False])
assert (result_alt1.raw_tensor.tolist() == [True, False, False])
assert (result_alt2.raw_tensor.tolist() == [True, False, False])
|
def test_compare_ge():
a_raw = torch.tensor([2.0, 2.0, 2.0])
b_raw = torch.tensor([1.0, 2.0, 3.0])
feature_dim = Dim(3)
a = Tensor(name='a', raw_tensor=a_raw, dims=[feature_dim], dtype='float32')
b = Tensor(name='b', raw_tensor=b_raw, dims=[feature_dim], dtype='float32')
result = (a >= b)
result_alt1 = rf.compare(a, '>=', b)
result_alt2 = rf.compare(a, 'greater_equal', b)
assert (result.raw_tensor.tolist() == [True, True, False])
assert (result_alt1.raw_tensor.tolist() == [True, True, False])
assert (result_alt2.raw_tensor.tolist() == [True, True, False])
|
def test_combine_add_int_tensors():
a_raw = torch.tensor([2, 2, 2])
b_raw = torch.tensor([1, 2, 3])
feature_dim = Dim(3)
a = Tensor(name='a', raw_tensor=a_raw, dims=[feature_dim], dtype='int64')
b = Tensor(name='b', raw_tensor=b_raw, dims=[feature_dim], dtype='int64')
result = (a + b)
result_alt1 = rf.combine(a, '+', b)
result_alt2 = rf.combine(a, 'add', b)
assert (result.raw_tensor.tolist() == pytest.approx([3, 4, 5]))
assert (result_alt1.raw_tensor.tolist() == pytest.approx([3, 4, 5]))
assert (result_alt2.raw_tensor.tolist() == pytest.approx([3, 4, 5]))
assert (result.dtype == result_alt1.dtype == result_alt2.dtype == 'int64')
|
def test_combine_add_float_tensors():
a_raw = torch.tensor([2.0, 2.0, 2.0])
b_raw = torch.tensor([1.0, 2.0, 3.0])
feature_dim = Dim(3)
a = Tensor(name='a', raw_tensor=a_raw, dims=[feature_dim], dtype='float32')
b = Tensor(name='b', raw_tensor=b_raw, dims=[feature_dim], dtype='float32')
result = (a + b)
result_alt1 = rf.combine(a, '+', b)
result_alt2 = rf.combine(a, 'add', b)
assert (result.raw_tensor.tolist() == pytest.approx([3.0, 4.0, 5.0]))
assert (result_alt1.raw_tensor.tolist() == pytest.approx([3.0, 4.0, 5.0]))
assert (result_alt2.raw_tensor.tolist() == pytest.approx([3.0, 4.0, 5.0]))
assert (result.dtype == result_alt1.dtype == result_alt2.dtype == 'float32')
|
def test_combine_add_number_to_tensor():
a_raw = torch.tensor([2.0, 2.0, 2.0])
b_raw = torch.tensor(3.0)
feature_dim = Dim(3)
a = Tensor(name='a', raw_tensor=a_raw, dims=[feature_dim], dtype='float32')
b = Tensor(name='b', raw_tensor=b_raw, dims=[], dtype='float32')
result = (a + b)
result_alt1 = rf.combine(a, '+', b)
result_alt2 = rf.combine(a, 'add', b)
assert (result.raw_tensor.tolist() == pytest.approx([5.0, 5.0, 5.0]))
assert (result_alt1.raw_tensor.tolist() == pytest.approx([5.0, 5.0, 5.0]))
assert (result_alt2.raw_tensor.tolist() == pytest.approx([5.0, 5.0, 5.0]))
assert (result.dtype == result_alt1.dtype == result_alt2.dtype == 'float32')
|
def test_combine_sub_int_tensors():
a_raw = torch.tensor([2, 2, 2])
b_raw = torch.tensor([1, 2, 3])
feature_dim = Dim(3)
a = Tensor(name='a', raw_tensor=a_raw, dims=[feature_dim], dtype='int64')
b = Tensor(name='b', raw_tensor=b_raw, dims=[feature_dim], dtype='int64')
result = (a - b)
result_alt1 = rf.combine(a, '-', b)
result_alt2 = rf.combine(a, 'sub', b)
assert (result.raw_tensor.tolist() == pytest.approx([1, 0, (- 1)]))
assert (result_alt1.raw_tensor.tolist() == pytest.approx([1, 0, (- 1)]))
assert (result_alt2.raw_tensor.tolist() == pytest.approx([1, 0, (- 1)]))
assert (result.dtype == result_alt1.dtype == result_alt2.dtype == 'int64')
|
def test_combine_sub_float_tensors():
a_raw = torch.tensor([2.0, 2.0, 2.0])
b_raw = torch.tensor([1.0, 2.0, 3.0])
feature_dim = Dim(3)
a = Tensor(name='a', raw_tensor=a_raw, dims=[feature_dim], dtype='float32')
b = Tensor(name='b', raw_tensor=b_raw, dims=[feature_dim], dtype='float32')
result = (a - b)
result_alt1 = rf.combine(a, '-', b)
result_alt2 = rf.combine(a, 'sub', b)
assert (result.raw_tensor.tolist() == pytest.approx([1.0, 0.0, (- 1.0)]))
assert (result_alt1.raw_tensor.tolist() == pytest.approx([1.0, 0.0, (- 1.0)]))
assert (result_alt2.raw_tensor.tolist() == pytest.approx([1.0, 0.0, (- 1.0)]))
assert (result.dtype == result_alt1.dtype == result_alt2.dtype == 'float32')
|
def test_combine_mul_int_tensors():
a_raw = torch.tensor([2, 2, 2])
b_raw = torch.tensor([1, 2, 3])
feature_dim = Dim(3)
a = Tensor(name='a', raw_tensor=a_raw, dims=[feature_dim], dtype='int64')
b = Tensor(name='b', raw_tensor=b_raw, dims=[feature_dim], dtype='int64')
result = (a * b)
result_alt1 = rf.combine(a, '*', b)
result_alt2 = rf.combine(a, 'mul', b)
assert (result.raw_tensor.tolist() == pytest.approx([2, 4, 6]))
assert (result_alt1.raw_tensor.tolist() == pytest.approx([2, 4, 6]))
assert (result_alt2.raw_tensor.tolist() == pytest.approx([2, 4, 6]))
assert (result.dtype == result_alt1.dtype == result_alt2.dtype == 'int64')
|
def test_combine_mul_float_tensors():
a_raw = torch.tensor([2.0, 2.0, 2.0])
b_raw = torch.tensor([1.0, 2.0, 3.0])
feature_dim = Dim(3)
a = Tensor(name='a', raw_tensor=a_raw, dims=[feature_dim], dtype='float32')
b = Tensor(name='b', raw_tensor=b_raw, dims=[feature_dim], dtype='float32')
result = (a * b)
result_alt1 = rf.combine(a, '*', b)
result_alt2 = rf.combine(a, 'mul', b)
assert (result.raw_tensor.tolist() == pytest.approx([2.0, 4.0, 6.0]))
assert (result_alt1.raw_tensor.tolist() == pytest.approx([2.0, 4.0, 6.0]))
assert (result_alt2.raw_tensor.tolist() == pytest.approx([2.0, 4.0, 6.0]))
assert (result.dtype == result_alt1.dtype == result_alt2.dtype == 'float32')
|
def test_combine_truediv_float_tensors():
a_raw = torch.tensor([2.0, 2.0, 2.0])
b_raw = torch.tensor([1.0, 2.0, 3.0])
feature_dim = Dim(3)
a = Tensor(name='a', raw_tensor=a_raw, dims=[feature_dim], dtype='float32')
b = Tensor(name='b', raw_tensor=b_raw, dims=[feature_dim], dtype='float32')
result = (a / b)
result_alt1 = rf.combine(a, '/', b)
result_alt2 = rf.combine(a, 'truediv', b)
assert (result.raw_tensor.tolist() == pytest.approx([2.0, 1.0, (2.0 / 3.0)]))
assert (result_alt1.raw_tensor.tolist() == pytest.approx([2.0, 1.0, (2.0 / 3.0)]))
assert (result_alt2.raw_tensor.tolist() == pytest.approx([2.0, 1.0, (2.0 / 3.0)]))
assert (result.dtype == result_alt1.dtype == result_alt2.dtype == 'float32')
|
def test_combine_floordiv_int_tensors():
a_raw = torch.tensor([2, 2, 2])
b_raw = torch.tensor([1, 2, 3])
feature_dim = Dim(3)
a = Tensor(name='a', raw_tensor=a_raw, dims=[feature_dim], dtype='int64')
b = Tensor(name='b', raw_tensor=b_raw, dims=[feature_dim], dtype='int64')
result = (a // b)
result_alt1 = rf.combine(a, '//', b)
result_alt2 = rf.combine(a, 'floordiv', b)
assert (result.raw_tensor.tolist() == pytest.approx([2, 1, 0]))
assert (result_alt1.raw_tensor.tolist() == pytest.approx([2, 1, 0]))
assert (result_alt2.raw_tensor.tolist() == pytest.approx([2, 1, 0]))
assert (result.dtype == result_alt1.dtype == result_alt2.dtype == 'int64')
|
def test_combine_floordiv_float_tensors():
a_raw = torch.tensor([2.0, 2.0, 2.0])
b_raw = torch.tensor([1.0, 2.0, 3.0])
feature_dim = Dim(3)
a = Tensor(name='a', raw_tensor=a_raw, dims=[feature_dim], dtype='float32')
b = Tensor(name='b', raw_tensor=b_raw, dims=[feature_dim], dtype='float32')
result = (a // b)
result_alt1 = rf.combine(a, '//', b)
result_alt2 = rf.combine(a, 'floordiv', b)
assert (result.raw_tensor.tolist() == pytest.approx([2.0, 1.0, 0.0]))
assert (result_alt1.raw_tensor.tolist() == pytest.approx([2.0, 1.0, 0.0]))
assert (result_alt2.raw_tensor.tolist() == pytest.approx([2.0, 1.0, 0.0]))
assert (result.dtype == result_alt1.dtype == result_alt2.dtype == 'float32')
|
def test_combine_mod_int_tensors():
a_raw = torch.tensor([2, 2, 2, 17])
b_raw = torch.tensor([1, 2, 3, 4])
feature_dim = Dim(4)
a = Tensor(name='a', raw_tensor=a_raw, dims=[feature_dim], dtype='int64')
b = Tensor(name='b', raw_tensor=b_raw, dims=[feature_dim], dtype='int64')
result = (a % b)
result_alt1 = rf.combine(a, '%', b)
result_alt2 = rf.combine(a, 'mod', b)
assert (result.raw_tensor.tolist() == pytest.approx([0, 0, 2, 1]))
assert (result_alt1.raw_tensor.tolist() == pytest.approx([0, 0, 2, 1]))
assert (result_alt2.raw_tensor.tolist() == pytest.approx([0, 0, 2, 1]))
assert (result.dtype == result_alt1.dtype == result_alt2.dtype == 'int64')
|
def test_combine_mod_float_tensors():
a_raw = torch.tensor([2.0, 2.0, 2.0, 17.0])
b_raw = torch.tensor([1.0, 2.0, 3.0, 4.0])
feature_dim = Dim(4)
a = Tensor(name='a', raw_tensor=a_raw, dims=[feature_dim], dtype='float32')
b = Tensor(name='b', raw_tensor=b_raw, dims=[feature_dim], dtype='float32')
result = (a % b)
result_alt1 = rf.combine(a, '%', b)
result_alt2 = rf.combine(a, 'mod', b)
assert (result.raw_tensor.tolist() == pytest.approx([0.0, 0.0, 2.0, 1.0]))
assert (result_alt1.raw_tensor.tolist() == pytest.approx([0.0, 0.0, 2.0, 1.0]))
assert (result_alt2.raw_tensor.tolist() == pytest.approx([0.0, 0.0, 2.0, 1.0]))
assert (result.dtype == result_alt1.dtype == result_alt2.dtype == 'float32')
|
def test_combine_pow_int_tensors():
a_raw = torch.tensor([2, 2, 2])
b_raw = torch.tensor([1, 2, 3])
feature_dim = Dim(3)
a = Tensor(name='a', raw_tensor=a_raw, dims=[feature_dim], dtype='int64')
b = Tensor(name='b', raw_tensor=b_raw, dims=[feature_dim], dtype='int64')
result = (a ** b)
result_alt1 = rf.combine(a, '**', b)
result_alt2 = rf.combine(a, 'pow', b)
assert (result.raw_tensor.tolist() == pytest.approx([2, 4, 8]))
assert (result_alt1.raw_tensor.tolist() == pytest.approx([2, 4, 8]))
assert (result_alt2.raw_tensor.tolist() == pytest.approx([2, 4, 8]))
assert (result.dtype == result_alt1.dtype == result_alt2.dtype == 'int64')
|
def test_combine_pow_float_tensors():
a_raw = torch.tensor([2.0, 2.0, 2.0])
b_raw = torch.tensor([1.0, 2.0, 3.0])
feature_dim = Dim(3)
a = Tensor(name='a', raw_tensor=a_raw, dims=[feature_dim], dtype='float32')
b = Tensor(name='b', raw_tensor=b_raw, dims=[feature_dim], dtype='float32')
result = (a ** b)
result_alt1 = rf.combine(a, '**', b)
result_alt2 = rf.combine(a, 'pow', b)
assert (result.raw_tensor.tolist() == pytest.approx([2.0, 4.0, 8.0]))
assert (result_alt1.raw_tensor.tolist() == pytest.approx([2.0, 4.0, 8.0]))
assert (result_alt2.raw_tensor.tolist() == pytest.approx([2.0, 4.0, 8.0]))
assert (result.dtype == result_alt1.dtype == result_alt2.dtype == 'float32')
|
def test_combine_max():
a_raw = torch.tensor([2.0, 2.0, 2.0])
b_raw = torch.tensor([1.0, 2.0, 3.0])
feature_dim = Dim(3)
a = Tensor(name='a', raw_tensor=a_raw, dims=[feature_dim], dtype='float32')
b = Tensor(name='b', raw_tensor=b_raw, dims=[feature_dim], dtype='float32')
result = rf.combine(a, 'max', b)
result_alt1 = rf.combine(a, 'maximum', b)
assert (result.raw_tensor.tolist() == [2.0, 2.0, 3.0])
assert (result_alt1.raw_tensor.tolist() == [2.0, 2.0, 3.0])
|
def test_combine_min():
a_raw = torch.tensor([2.0, 2.0, 2.0])
b_raw = torch.tensor([1.0, 2.0, 3.0])
feature_dim = Dim(3)
a = Tensor(name='a', raw_tensor=a_raw, dims=[feature_dim], dtype='float32')
b = Tensor(name='b', raw_tensor=b_raw, dims=[feature_dim], dtype='float32')
result = rf.combine(a, 'min', b)
result_alt1 = rf.combine(a, 'minimum', b)
assert (result.raw_tensor.tolist() == [1.0, 2.0, 2.0])
assert (result_alt1.raw_tensor.tolist() == [1.0, 2.0, 2.0])
|
class _CheckNoPythonCalls():
'\n Check that there is no Python code executed via sys.settrace.\n '
def __init__(self):
self.num_calls = 0
self.old_tracefunc = None
def _tracefunc(self, frame, event, arg):
print('*** trace:', frame, event, arg)
if (frame.f_globals is vars(typing)):
print(' (ignore typing module)')
return
if (frame.f_code is Tensor.__init__.__code__):
print(' (ignoring Tensor.__init__ for now, remains to be implemented...)')
return
if (frame.f_code is _CheckNoPythonCalls.__exit__.__code__):
print(' (ignoring _CheckNoPythonCalls.__exit__)')
return
self.num_calls += 1
def __enter__(self):
self.old_tracefunc = sys.gettrace()
sys.settrace(self._tracefunc)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
sys.settrace(self.old_tracefunc)
assert (self.num_calls == 0)
|
def test_native_is_raw_torch_tensor_type():
raw_tensor = torch.zeros(2, 3)
raw_parameter = torch.nn.Parameter(torch.zeros(2, 3))
numpy_tensor = numpy.zeros((2, 3))
from returnn.frontend import _native
mod = _native.get_module()
with _CheckNoPythonCalls():
assert (mod.is_raw_torch_tensor_type(type(raw_tensor)) is True)
assert (mod.is_raw_torch_tensor_type(type(raw_parameter)) is True)
assert (mod.is_raw_torch_tensor_type(type(numpy_tensor)) is False)
assert (mod.is_raw_torch_tensor_type(type(43)) is False)
assert (mod.is_raw_torch_tensor_type(43) is False)
|
def test_native_get_out_permutation_to_dims():
batch_dim = Dim(2, name='batch_dim')
time_dim = Dim(3, name='time_dim')
feature_dim = Dim(5, name='feature_dim')
tensor_f = Tensor(name='x', dims=[feature_dim], dtype='float32')
tensor_bf = Tensor(name='x', dims=[batch_dim, feature_dim], dtype='float32')
tensor_bft = Tensor(name='x', dims=[batch_dim, feature_dim, time_dim], dtype='float32')
with _CheckNoPythonCalls():
assert (tensor_f.get_out_permutation_to_dims([feature_dim]) == [0])
assert (tensor_f.get_out_permutation_to_dims([batch_dim, feature_dim, time_dim]) == [(- 1), 0, (- 1)])
assert (tensor_f.get_out_permutation_to_dims([feature_dim, time_dim]) == [0, (- 1)])
assert (tensor_f.get_out_permutation_to_dims([time_dim, feature_dim]) == [(- 1), 0])
assert (tensor_f.get_out_permutation_to_dims([batch_dim, time_dim, feature_dim]) == [(- 1), (- 1), 0])
assert (tensor_bf.get_out_permutation_to_dims([batch_dim, feature_dim]) == [0, 1])
assert (tensor_bf.get_out_permutation_to_dims([feature_dim, batch_dim]) == [1, 0])
assert (tensor_bf.get_out_permutation_to_dims([batch_dim, feature_dim, time_dim]) == [0, 1, (- 1)])
assert (tensor_bf.get_out_permutation_to_dims([feature_dim, time_dim, batch_dim]) == [1, (- 1), 0])
assert (tensor_bft.get_out_permutation_to_dims([batch_dim, feature_dim, time_dim]) == [0, 1, 2])
assert (tensor_bft.get_out_permutation_to_dims([feature_dim, batch_dim, time_dim]) == [1, 0, 2])
try:
tensor_f.get_out_permutation_to_dims([batch_dim])
except ValueError as exc:
print('Got expected exc', exc)
else:
assert False, 'should have failed'
|
def test_torch_native_setup():
tensor = Tensor(name='x', raw_tensor=torch.tensor([1.0, 2.0, 3.0]), dims=[Dim(3)], dtype='float32')
from returnn.frontend._backend import global_backend
from returnn.torch.frontend import TorchBackend
assert isinstance(global_backend, TorchBackend)
assert global_backend.executing_eagerly()
assert TorchBackend.executing_eagerly()
assert (global_backend.get_dtype_name_raw(tensor.raw_tensor) == 'float32')
assert (TorchBackend.get_dtype_name_raw(tensor.raw_tensor) == 'float32')
assert (global_backend.get_ndim_raw(tensor.raw_tensor) == 1)
assert (TorchBackend.get_ndim_raw(tensor.raw_tensor) == 1)
|
def test_native_torch_raw_backend():
tensor = Tensor(name='a', raw_tensor=torch.tensor([1.0, 2.0, 3.0]), dims=[Dim(3)], dtype='float32')
backend1 = tensor._raw_backend
import returnn.frontend._backend as _backend_api
backend2 = _backend_api.get_backend_by_raw_tensor_type(type(tensor.raw_tensor))
from returnn.frontend import _native
mod = _native.get_module(verbose=True)
with _CheckNoPythonCalls():
backend3 = mod.get_backend_for_tensor(tensor)
assert (backend1 is backend2 is backend3)
|
def test_native_torch_raw_backend_raw_dtype():
raw = torch.tensor([1.0, 2.0, 3.0], dtype=torch.float32)
from returnn.frontend import _native
mod = _native.get_module()
with _CheckNoPythonCalls():
dtype = mod.raw_torch_tensor_get_dtype(raw)
assert (isinstance(dtype, str) and (dtype == 'float32'))
|
def test_native_torch_tensor_eq():
batch_dim = Dim(2, name='batch_dim')
feature_dim = Dim(3, name='feature_dim')
tensor_bf = Tensor('tensor', dims=[batch_dim, feature_dim], dtype='float32', raw_tensor=torch.zeros(2, 3))
tensor_f = Tensor('tensor', dims=[feature_dim], dtype='float32', raw_tensor=torch.arange((- 1), 2, dtype=torch.float32))
from returnn.frontend import _native
mod = _native.get_module()
with _CheckNoPythonCalls():
res1 = mod.tensor_eq(tensor_bf, tensor_bf)
res2 = mod.tensor_eq(tensor_bf, tensor_f)
res3 = mod.tensor_eq(tensor_bf, 0.0)
assert (isinstance(res1, Tensor) and isinstance(res1.raw_tensor, torch.Tensor))
assert (res1.dims == (batch_dim, feature_dim))
assert (res1.raw_tensor.detach().numpy().tolist() == [[True, True, True], [True, True, True]])
assert (isinstance(res2, Tensor) and isinstance(res2.raw_tensor, torch.Tensor))
assert (res2.dims == (batch_dim, feature_dim))
assert (res2.raw_tensor.detach().numpy().tolist() == [[False, True, False], [False, True, False]])
assert (isinstance(res3, Tensor) and isinstance(res3.raw_tensor, torch.Tensor))
assert (res3.dims == (batch_dim, feature_dim))
assert (res3.raw_tensor.detach().numpy().tolist() == [[True, True, True], [True, True, True]])
|
def test_native_torch_tensor_eq_op():
batch_dim = Dim(2, name='batch_dim')
feature_dim = Dim(3, name='feature_dim')
tensor_bf = Tensor('tensor', dims=[batch_dim, feature_dim], dtype='float32', raw_tensor=torch.zeros(2, 3))
tensor_f = Tensor('tensor', dims=[feature_dim], dtype='float32', raw_tensor=torch.arange((- 1), 2, dtype=torch.float32))
from returnn.frontend import _native
mod = _native.get_module()
assert (Tensor.__eq__ is mod.tensor_eq)
with _CheckNoPythonCalls():
res = (tensor_bf == tensor_f)
assert (isinstance(res, Tensor) and isinstance(res.raw_tensor, torch.Tensor))
assert (res.dims == (batch_dim, feature_dim))
assert (res.raw_tensor.detach().numpy().tolist() == [[False, True, False], [False, True, False]])
|
def test_native_torch_tensor_neg():
batch_dim = Dim(2, name='batch_dim')
feature_dim = Dim(3, name='feature_dim')
tensor = Tensor('tensor', dims=[batch_dim, feature_dim], dtype='float32', raw_tensor=torch.ones(2, 3))
from returnn.frontend import _native
mod = _native.get_module()
with _CheckNoPythonCalls():
res = mod.tensor_neg(tensor)
assert (isinstance(res, Tensor) and isinstance(res.raw_tensor, torch.Tensor))
assert (res.dims == (batch_dim, feature_dim))
assert (res.raw_tensor.detach().numpy().tolist() == [[(- 1.0), (- 1.0), (- 1.0)], [(- 1.0), (- 1.0), (- 1.0)]])
|
def test_native_torch_tensor_sub():
batch_dim = Dim(2, name='batch_dim')
feature_dim = Dim(3, name='feature_dim')
tensor_bf = Tensor('tensor', dims=[batch_dim, feature_dim], dtype='float32', raw_tensor=torch.ones(2, 3))
tensor_f = Tensor('tensor', dims=[feature_dim], dtype='float32', raw_tensor=torch.arange((- 1), 2, dtype=torch.float32))
from returnn.frontend import _native
mod = _native.get_module()
with _CheckNoPythonCalls():
res1 = mod.tensor_sub(tensor_bf, tensor_bf)
res2 = mod.tensor_sub(tensor_bf, tensor_f)
res3 = mod.tensor_sub(tensor_bf, 3.0)
assert (isinstance(res1, Tensor) and isinstance(res1.raw_tensor, torch.Tensor))
assert (res1.dims == (batch_dim, feature_dim))
assert (res1.raw_tensor.detach().numpy().tolist() == [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
assert (isinstance(res2, Tensor) and isinstance(res2.raw_tensor, torch.Tensor))
assert (res2.dims == (batch_dim, feature_dim))
assert (res2.raw_tensor.detach().numpy().tolist() == [[2.0, 1.0, 0.0], [2.0, 1.0, 0.0]])
assert (isinstance(res3, Tensor) and isinstance(res3.raw_tensor, torch.Tensor))
assert (res3.dims == (batch_dim, feature_dim))
assert (res3.raw_tensor.detach().numpy().tolist() == [[(- 2.0), (- 2.0), (- 2.0)], [(- 2.0), (- 2.0), (- 2.0)]])
|
def test_native_torch_tensor_sub_permute_more_dims():
batch_dim = Dim(2, name='batch_dim')
time_dim = Dim(3, name='time_dim')
feature_dim = Dim(5, name='feature_dim')
tensor_bft = Tensor('tensor', dims=[batch_dim, feature_dim, time_dim], dtype='int32', raw_tensor=torch.arange(1, (1 + ((2 * 3) * 5)), dtype=torch.int32).reshape(2, 5, 3))
tensor_tbf = tensor_bft.copy_transpose([time_dim, batch_dim, feature_dim])
assert (tensor_tbf.dims == (time_dim, batch_dim, feature_dim))
from returnn.frontend import _native
mod = _native.get_module()
with _CheckNoPythonCalls():
res1 = mod.tensor_sub(tensor_bft, tensor_tbf)
assert (isinstance(res1, Tensor) and isinstance(res1.raw_tensor, torch.Tensor))
assert (res1.dims == (batch_dim, feature_dim, time_dim))
assert all(((0 == v) for v in res1.raw_tensor.detach().numpy().flatten().tolist()))
|
def analyze_dataset(options):
'\n :param options: argparse.Namespace\n '
print(('Epoch: %i' % options.epoch), file=log.v3)
print('Dataset keys:', dataset.get_data_keys(), file=log.v3)
print('Dataset target keys:', dataset.get_target_list(), file=log.v3)
assert (options.key in dataset.get_data_keys())
(terminal_width, _) = util.terminal_size()
show_interactive_process_bar = (log.verbose[3] and (not log.verbose[5]) and (terminal_width > 0))
start_time = time.time()
num_seqs_stats = Stats()
if (options.endseq < 0):
options.endseq = float('inf')
recurrent = True
used_data_keys = dataset.get_data_keys()
batch_size = config.typed_value('batch_size', 1)
max_seqs = config.int('max_seqs', (- 1))
seq_drop = config.float('seq_drop', 0.0)
max_seq_length = (config.typed_value('max_seq_length', None) or config.float('max_seq_length', 0))
max_pad_size = config.typed_value('max_pad_size', None)
batches = dataset.generate_batches(recurrent_net=recurrent, batch_size=batch_size, max_seqs=max_seqs, max_seq_length=max_seq_length, max_pad_size=max_pad_size, seq_drop=seq_drop, used_data_keys=used_data_keys)
step = 0
total_num_seqs = 0
total_num_frames = NumbersDict()
total_num_used_frames = NumbersDict()
try:
while batches.has_more():
(batch,) = batches.peek_next_n(1)
assert isinstance(batch, Batch)
if (batch.start_seq > options.endseq):
break
dataset.load_seqs(batch.start_seq, batch.end_seq)
complete_frac = batches.completed_frac()
start_elapsed = (time.time() - start_time)
try:
num_seqs_s = str(dataset.num_seqs)
except NotImplementedError:
try:
num_seqs_s = ('~%i' % dataset.estimated_num_seqs)
except TypeError:
num_seqs_s = '?'
progress_prefix = ('%i/%s' % (batch.start_seq, num_seqs_s))
progress = ('%s (%.02f%%)' % (progress_prefix, (complete_frac * 100)))
if (complete_frac > 0):
total_time_estimated = (start_elapsed / complete_frac)
remaining_estimated = (total_time_estimated - start_elapsed)
progress += (' (%s)' % hms(remaining_estimated))
batch_max_time = (NumbersDict.max([seq.frame_length for seq in batch.seqs]) * len(batch.seqs))
batch_num_used_frames = sum([seq.frame_length for seq in batch.seqs], NumbersDict())
total_num_seqs += len(batch.seqs)
num_seqs_stats.collect(numpy.array([len(batch.seqs)]))
total_num_frames += batch_max_time
total_num_used_frames += batch_num_used_frames
print(('%s, batch %i, num seqs %i, frames %s, used %s (%s)' % (progress, step, len(batch.seqs), batch_max_time, batch_num_used_frames, (batch_num_used_frames / batch_max_time))), file=log.v5)
if show_interactive_process_bar:
util.progress_bar_with_time(complete_frac, prefix=progress_prefix)
step += 1
batches.advance(1)
finally:
print(('Done. Total time %s. More seqs which we did not dumped: %s' % (hms((time.time() - start_time)), batches.has_more())), file=log.v2)
print(('Dataset epoch %i, order %r.' % (dataset.epoch, dataset.seq_ordering)))
print(('Num batches (steps): %i' % step), file=log.v1)
print(('Num seqs: %i' % total_num_seqs), file=log.v1)
num_seqs_stats.dump(stream=log.v1, stream_prefix='Batch num seqs ')
for key in used_data_keys:
print(('Data key %r:' % key), file=log.v1)
print((' Num frames: %s' % total_num_frames[key]), file=log.v1)
print((' Num used frames: %s' % total_num_used_frames[key]), file=log.v1)
print((' Fraction used frames: %s' % (total_num_used_frames / total_num_frames)[key]), file=log.v1)
dataset.finish_epoch()
|
def init(config_str, config_dataset, use_pretrain, epoch, verbosity):
'\n :param str config_str: either filename to config-file, or dict for dataset\n :param str|None config_dataset:\n :param bool use_pretrain: might overwrite config options, or even the dataset\n :param int epoch:\n :param int verbosity:\n '
rnn.init_better_exchook()
rnn.init_thread_join_hack()
dataset_opts = None
config_filename = None
if config_str.strip().startswith('{'):
print(('Using dataset %s.' % config_str))
dataset_opts = eval(config_str.strip())
elif config_str.endswith('.hdf'):
dataset_opts = {'class': 'HDFDataset', 'files': [config_str]}
print(('Using dataset %r.' % dataset_opts))
assert os.path.exists(config_str)
else:
config_filename = config_str
print(('Using config file %r.' % config_filename))
assert os.path.exists(config_filename)
rnn.init_config(config_filename=config_filename, default_config={'cache_size': '0'})
global config
config = rnn.config
config.set('log', None)
config.set('log_verbosity', verbosity)
rnn.init_log()
print(('Returnn %s starting up.' % __file__), file=log.v2)
rnn.returnn_greeting()
rnn.init_faulthandler()
util.BackendEngine.select_engine(config=config)
if (not dataset_opts):
if config_dataset:
dataset_opts = ('config:%s' % config_dataset)
else:
dataset_opts = 'config:train'
if use_pretrain:
from returnn.pretrain import pretrain_from_config
pretrain = pretrain_from_config(config)
if pretrain:
print(('Using pretrain %s, epoch %i' % (pretrain, epoch)), file=log.v2)
net_dict = pretrain.get_network_json_for_epoch(epoch=epoch)
if ('#config' in net_dict):
config_overwrites = net_dict['#config']
print('Pretrain overwrites these config options:', file=log.v2)
assert isinstance(config_overwrites, dict)
for (key, value) in sorted(config_overwrites.items()):
assert isinstance(key, str)
orig_value = config.typed_dict.get(key, None)
if (isinstance(orig_value, dict) and isinstance(value, dict)):
diff_str = ('\n' + util.obj_diff_str(orig_value, value))
elif isinstance(value, dict):
diff_str = ('\n%r ->\n%s' % (orig_value, pformat(value)))
else:
diff_str = (' %r -> %r' % (orig_value, value))
print(('Config key %r for epoch %i:%s' % (key, epoch, diff_str)), file=log.v2)
config.set(key, value)
else:
print('No config overwrites for this epoch.', file=log.v2)
else:
print('No pretraining used.', file=log.v2)
elif config.typed_dict.get('pretrain', None):
print('Not using pretrain.', file=log.v2)
dataset_default_opts = {}
Dataset.kwargs_update_from_config(config, dataset_default_opts)
print('Using dataset:', dataset_opts, file=log.v2)
global dataset
dataset = init_dataset(dataset_opts, default_kwargs=dataset_default_opts)
assert isinstance(dataset, Dataset)
dataset.init_seq_order(epoch=epoch)
|
def main():
'\n Main entry.\n '
arg_parser = argparse.ArgumentParser(description='Anaylize dataset batches.')
arg_parser.add_argument('returnn_config', help='either filename to config-file, or dict for dataset')
arg_parser.add_argument('--dataset', help="if given the config, specifies the dataset. e.g. 'dev'")
arg_parser.add_argument('--epoch', type=int, default=1)
arg_parser.add_argument('--endseq', type=int, default=(- 1), help='end seq idx (inclusive) or -1 (default: 10)')
arg_parser.add_argument('--verbosity', type=int, default=5, help='overwrites log_verbosity (default: 4)')
arg_parser.add_argument('--key', default='data', help="data-key, e.g. 'data' or 'classes'. (default: 'data')")
arg_parser.add_argument('--use_pretrain', action='store_true')
args = arg_parser.parse_args()
init(config_str=args.returnn_config, config_dataset=args.dataset, epoch=args.epoch, use_pretrain=args.use_pretrain, verbosity=args.verbosity)
try:
analyze_dataset(args)
except KeyboardInterrupt:
print('KeyboardInterrupt')
sys.exit(1)
finally:
rnn.finalize()
|
class BlissItem():
'\n Represents one entry in the Bliss XML.\n '
def __init__(self, segment_name, recording_filename, start_time, end_time, orth):
'\n :param str segment_name:\n :param str recording_filename:\n :param float start_time:\n :param float end_time:\n :param str orth:\n '
self.segment_name = segment_name
self.recording_filename = recording_filename
self.start_time = start_time
self.end_time = end_time
self.orth = orth
def __repr__(self):
keys = ['segment_name', 'recording_filename', 'start_time', 'end_time', 'orth']
return ('BlissItem(%s)' % ', '.join([('%s=%r' % (key, getattr(self, key))) for key in keys]))
@property
def delta_time(self):
'\n :rtype: float\n '
return (self.end_time - self.start_time)
|
def iter_bliss(filename):
'\n :param str filename:\n :return: yields BlissItem\n :rtype: list[BlissItem]\n '
corpus_file = open(filename, 'rb')
if filename.endswith('.gz'):
corpus_file = gzip.GzipFile(fileobj=corpus_file)
context = iter(ElementTree.iterparse(corpus_file, events=('start', 'end')))
(_, root) = next(context)
name_tree = [root.attrib['name']]
elem_tree = [root]
count_tree = [0]
recording_filename = None
for (event, elem) in context:
if (elem.tag == 'recording'):
recording_filename = (elem.attrib['audio'] if (event == 'start') else None)
if ((event == 'end') and (elem.tag == 'segment')):
elem_orth = elem.find('orth')
orth_raw = (elem_orth.text or '')
orth_split = orth_raw.split()
orth = ' '.join(orth_split)
segment_name = '/'.join(name_tree)
(yield BlissItem(segment_name=segment_name, recording_filename=recording_filename, start_time=float(elem.attrib['start']), end_time=float(elem.attrib['end']), orth=orth))
root.clear()
if (event == 'start'):
count_tree[(- 1)] += 1
count_tree.append(0)
elem_tree += [elem]
elem_name = elem.attrib.get('name', None)
if (elem_name is None):
elem_name = str(count_tree[(- 2)])
assert isinstance(elem_name, str)
name_tree += [elem_name]
elif (event == 'end'):
assert (elem_tree[(- 1)] is elem)
elem_tree = elem_tree[:(- 1)]
name_tree = name_tree[:(- 1)]
count_tree = count_tree[:(- 1)]
|
def main():
'\n Main entry.\n '
arg_parser = ArgumentParser()
arg_parser.add_argument('bliss_filename', nargs='+')
arg_parser.add_argument('--output', default='/dev/stdout')
args = arg_parser.parse_args()
if args.output.endswith('.gz'):
out = gzip.GzipFile(args.output, mode='wb')
else:
out = open(args.output, 'wb')
out.write(b'{\n')
for bliss_item in itertools.chain(*[iter_bliss(fn) for fn in args.bliss_filename]):
assert isinstance(bliss_item, BlissItem)
seq_len = round((bliss_item.delta_time * 100.0))
out.write((b'%r: %i,\n' % (bliss_item.segment_name, seq_len)))
out.write(b'}\n')
out.close()
|
def main():
'\n Main entry.\n '
parser = ArgumentParser(description='dump orth from Bliss XML file as-is')
parser.add_argument('xml')
args = parser.parse_args()
corpus_filename = args.xml
def callback(orth):
'\n :param str orth:\n '
print(orth)
_iter_bliss(filename=corpus_filename, callback=callback)
|
class BlissItem():
'\n Bliss item.\n '
def __init__(self, segment_name, recording_filename, start_time, end_time, orth):
'\n :param str segment_name:\n :param str recording_filename:\n :param float start_time:\n :param float end_time:\n :param str orth:\n '
self.segment_name = segment_name
self.recording_filename = recording_filename
self.start_time = start_time
self.end_time = end_time
self.orth = orth
def __repr__(self):
keys = ['segment_name', 'recording_filename', 'start_time', 'end_time', 'orth']
return ('BlissItem(%s)' % ', '.join([('%s=%r' % (key, getattr(self, key))) for key in keys]))
@property
def delta_time(self):
'\n :rtype: float\n '
return (self.end_time - self.start_time)
|
def iter_bliss(filename):
'\n :param str filename:\n :return: yields BlissItem\n :rtype: list[BlissItem]\n '
corpus_file = open(filename, 'rb')
if filename.endswith('.gz'):
corpus_file = gzip.GzipFile(fileobj=corpus_file)
context = iter(ElementTree.iterparse(corpus_file, events=('start', 'end')))
(_, root) = next(context)
name_tree = [root.attrib['name']]
elem_tree = [root]
count_tree = [0]
recording_filename = None
for (event, elem) in context:
if (elem.tag == 'recording'):
recording_filename = (elem.attrib['audio'] if (event == 'start') else None)
if ((event == 'end') and (elem.tag == 'segment')):
elem_orth = elem.find('orth')
orth_raw = (elem_orth.text or '')
orth_split = orth_raw.split()
orth = ' '.join(orth_split)
segment_name = '/'.join(name_tree)
(yield BlissItem(segment_name=segment_name, recording_filename=recording_filename, start_time=float(elem.attrib['start']), end_time=float(elem.attrib['end']), orth=orth))
root.clear()
if (event == 'start'):
count_tree[(- 1)] += 1
count_tree.append(0)
elem_tree += [elem]
elem_name = elem.attrib.get('name', None)
if (elem_name is None):
elem_name = str(count_tree[(- 2)])
assert isinstance(elem_name, str)
name_tree += [elem_name]
elif (event == 'end'):
assert (elem_tree[(- 1)] is elem)
elem_tree = elem_tree[:(- 1)]
name_tree = name_tree[:(- 1)]
count_tree = count_tree[:(- 1)]
|
def main():
'\n Main entry.\n '
arg_parser = ArgumentParser()
arg_parser.add_argument('bliss_filename')
arg_parser.add_argument('--subset_segment_file')
arg_parser.add_argument('--output_type', default='', help='e.g. segment_name')
arg_parser.add_argument('--merge_swb_ab', action='store_true')
arg_parser.add_argument('--sort_by_time', action='store_true')
arg_parser.add_argument('--merge_segs_up_to_time', type=float)
args = arg_parser.parse_args()
subset_segment_list = None
if args.subset_segment_file:
subset_segment_list = set(open(args.subset_segment_file).read().splitlines())
rec_filenames = set()
items_by_rec = {}
for bliss_item in iter_bliss(args.bliss_filename):
if (subset_segment_list and (bliss_item.segment_name not in subset_segment_list)):
continue
rec_name = bliss_item.recording_filename
assert rec_name, ('invalid item %r' % bliss_item)
if args.merge_swb_ab:
rec_name = os.path.basename(rec_name)
(rec_name, _) = os.path.splitext(rec_name)
rec_filenames.add(rec_name)
assert (rec_name[(- 1)] in 'AB')
rec_name = rec_name[:(- 1)]
else:
rec_filenames.add(rec_name)
items_by_rec.setdefault(rec_name, []).append(bliss_item)
assert items_by_rec
if args.merge_swb_ab:
if subset_segment_list:
for key in list(items_by_rec.keys()):
if (((key + 'A') not in rec_filenames) or ((key + 'B') not in rec_filenames)):
del items_by_rec[key]
assert items_by_rec, ('rec_filenames %r' % (rec_filenames,))
else:
for key in items_by_rec.keys():
assert ((key + 'A') in rec_filenames)
assert ((key + 'B') in rec_filenames)
for (key, ls) in items_by_rec.items():
assert isinstance(ls, list)
if args.sort_by_time:
ls.sort(key=(lambda item: item.start_time))
if args.merge_segs_up_to_time:
for (key, ls) in items_by_rec.items():
i = 0
while (i < len(ls)):
j = (i + 1)
dt = ls[i].delta_time
while (j < len(ls)):
if ((dt + ls[j].delta_time) > args.merge_segs_up_to_time):
break
dt += ls[j].delta_time
j += 1
if (j > (i + 1)):
ls[i:j] = [BlissItem(segment_name=';'.join([item.segment_name for item in ls[i:j]]), recording_filename=ls[i].recording_filename, start_time=0.0, end_time=dt, orth=' '.join([item.orth for item in ls[i:j]]))]
i += 1
output_types = args.output_type.split(',')
for (key, ls) in items_by_rec.items():
assert isinstance(ls, list)
for item in ls:
assert isinstance(item, BlissItem)
if (not output_types):
print(item)
else:
print(' '.join([str(getattr(item, key)) for key in output_types]))
|
class BlissItem():
'\n Bliss item.\n '
def __init__(self, segment_name, recording_filename, start_time, end_time, orth, speaker_name=None):
'\n :param str segment_name:\n :param str recording_filename:\n :param Decimal start_time:\n :param Decimal end_time:\n :param str orth:\n :param str|None speaker_name:\n '
self.segment_name = segment_name
self.recording_filename = recording_filename
self.start_time = start_time
self.end_time = end_time
self.orth = orth
self.speaker_name = speaker_name
def __repr__(self):
keys = ['segment_name', 'recording_filename', 'start_time', 'end_time', 'orth', 'speaker_name']
return ('BlissItem(%s)' % ', '.join([('%s=%r' % (key, getattr(self, key))) for key in keys]))
@property
def delta_time(self):
'\n :rtype: float\n '
return (self.end_time - self.start_time)
|
def iter_bliss(filename):
'\n :param str filename:\n :return: yields BlissItem\n :rtype: list[BlissItem]\n '
corpus_file = open(filename, 'rb')
if filename.endswith('.gz'):
corpus_file = gzip.GzipFile(fileobj=corpus_file)
parser = ElementTree.XMLParser(target=ElementTree.TreeBuilder(), encoding='utf-8')
context = iter(ElementTree.iterparse(corpus_file, parser=parser, events=('start', 'end')))
(_, root) = next(context)
name_tree = [root.attrib['name']]
elem_tree = [root]
count_tree = [0]
recording_filename = None
for (event, elem) in context:
if (elem.tag == 'recording'):
recording_filename = (elem.attrib['audio'] if (event == 'start') else None)
if ((event == 'end') and (elem.tag == 'segment')):
elem_orth = elem.find('orth')
orth_raw = (elem_orth.text or '')
orth_split = orth_raw.split()
orth = ' '.join(orth_split)
elem_speaker = elem.find('speaker')
if (elem_speaker is not None):
speaker_name = elem_speaker.attrib['name']
else:
speaker_name = None
segment_name = '/'.join(name_tree)
(yield BlissItem(segment_name=segment_name, recording_filename=recording_filename, start_time=Decimal(elem.attrib['start']), end_time=Decimal(elem.attrib['end']), orth=orth, speaker_name=speaker_name))
root.clear()
if (event == 'start'):
count_tree[(- 1)] += 1
count_tree.append(0)
elem_tree += [elem]
elem_name = elem.attrib.get('name', None)
if (elem_name is None):
elem_name = str(count_tree[(- 2)])
assert isinstance(elem_name, str)
name_tree += [elem_name]
elif (event == 'end'):
assert (elem_tree[(- 1)] is elem)
elem_tree = elem_tree[:(- 1)]
name_tree = name_tree[:(- 1)]
count_tree = count_tree[:(- 1)]
|
class SprintCacheHandler():
'\n This is just to apply the same silence trimming on the raw audio samples\n which was applied on the features in the Sprint cache.\n We can reconstruct this information because the Sprint cache also has the exact timing information.\n '
def __init__(self, opt, bliss_opt, raw_sample_rate, feat_sample_rate):
'\n :param str opt: either filename or filename pattern\n :param str bliss_opt: either filename or filename pattern\n :param int raw_sample_rate:\n :param int feat_sample_rate:\n '
self.sprint_cache = self._load_sprint_cache(opt)
self.seg_times = self._collect_seg_times_from_bliss(bliss_opt)
self.raw_sample_rate = raw_sample_rate
self.feat_sample_rate = feat_sample_rate
self.pp_counter = 0
@staticmethod
def _load_sprint_cache(opt):
'\n :param str opt: either filename or filename pattern\n :rtype: SprintCache.FileArchiveBundle|SprintCache.FileArchive\n '
if ('*' in opt):
sprint_cache_fns = glob(opt)
assert sprint_cache_fns, ('nothing found under sprint cache pattern %r' % (opt,))
sprint_cache = returnn.sprint.cache.FileArchiveBundle()
for fn in sprint_cache_fns:
print('Load Sprint cache:', fn)
sprint_cache.add_bundle_or_archive(fn)
else:
print('Load Sprint cache:', opt)
sprint_cache = returnn.sprint.cache.open_file_archive(opt, must_exists=True)
return sprint_cache
@staticmethod
def _collect_seg_times_from_bliss(opt):
'\n :param str opt: either filename or filename pattern\n :rtype: dict[str,(Decimal,Decimal)]\n '
if ('*' in opt):
items = []
fns = glob(opt)
assert fns, ('nothing found under Bliss XML cache pattern %r' % (opt,))
for fn in fns:
print('Load Bliss XML:', fn)
items.extend(iter_bliss(fn))
else:
print('Load Bliss XML:', opt)
items = list(iter_bliss(opt))
return {seq.segment_name: (seq.start_time, seq.end_time) for seq in items}
def feature_post_process(self, feature_data, seq_name, **kwargs):
'\n :param numpy.ndarray feature_data:\n :param str seq_name:\n :return: features\n :rtype: numpy.ndarray\n '
assert (feature_data.shape[1] == 1)
self.pp_counter += 1
assert ((self.raw_sample_rate % self.feat_sample_rate) == 0)
num_frames_per_feat = (self.raw_sample_rate // self.feat_sample_rate)
assert ((num_frames_per_feat % 2) == 0)
allowed_variance_num_frames = (num_frames_per_feat // 2)
(times, data) = self.sprint_cache.read(seq_name, 'feat')
assert (len(times) == len(data))
prev_end_frame = None
res_feature_data = []
seq_time_offset = float(self.seg_times[seq_name][0])
for ((start_time, end_time), feat) in zip(times, data):
start_time -= seq_time_offset
end_time -= seq_time_offset
center_time = ((start_time + end_time) / 2.0)
start_frame = (int((center_time * self.raw_sample_rate)) - (num_frames_per_feat // 2))
assert (0 <= start_frame < feature_data.shape[0])
if (prev_end_frame is not None):
if ((prev_end_frame - allowed_variance_num_frames) <= start_frame <= (prev_end_frame + allowed_variance_num_frames)):
start_frame = prev_end_frame
assert (start_frame >= prev_end_frame)
end_frame = (start_frame + num_frames_per_feat)
if (feature_data.shape[0] < end_frame <= (feature_data.shape[0] + allowed_variance_num_frames)):
res_feature_data.append(feature_data[start_frame:])
res_feature_data.append(numpy.zeros(((end_frame - feature_data.shape[0]), 1), dtype=feature_data.dtype))
else:
assert (end_frame <= feature_data.shape[0])
res_feature_data.append(feature_data[start_frame:end_frame])
prev_end_frame = end_frame
res_feature_data = numpy.concatenate(res_feature_data, axis=0)
assert ((res_feature_data.shape[0] % num_frames_per_feat) == 0)
assert ((res_feature_data.shape[0] // num_frames_per_feat) == len(data))
return res_feature_data
|
def longest_common_prefix(strings):
'\n :param list[str]|set[str] strings:\n :rtype: str\n '
if (not strings):
return ''
min_s = min(strings)
max_s = max(strings)
if (not min_s):
return ''
for i in range(len(min_s)):
if (max_s[i] != min_s[i]):
return max_s[:i]
return min_s[:]
|
def longest_common_postfix(strings):
'\n :param list[str]|set[str] strings:\n :rtype: str\n '
strings = [''.join(reversed(s)) for s in strings]
res = longest_common_prefix(strings)
return ''.join(reversed(res))
|
def hms(s):
'\n :param float|int s: seconds\n :return: e.g. "1:23:45" (hs:ms:secs). see hms_fraction if you want to get fractional seconds\n :rtype: str\n '
(m, s) = divmod(s, 60)
(h, m) = divmod(m, 60)
return ('%d:%02d:%02d' % (h, m, s))
|
def main():
'\n Main entry.\n '
arg_parser = ArgumentParser()
arg_parser.add_argument('bliss_filename')
arg_parser.add_argument('--subset_segment_file')
arg_parser.add_argument('--no_ogg', help='skip generating ogg files', action='store_true')
arg_parser.add_argument('--no_conversion', help='skip ffmpeg call, assume audio is correct already', action='store_true')
arg_parser.add_argument('--no_cleanup', help="don't delete our temp files", action='store_true')
arg_parser.add_argument('--sprint_cache', help='filename of feature cache for synchronization')
arg_parser.add_argument('--raw_sample_rate', help='sample rate of audio input', type=int, default=8000)
arg_parser.add_argument('--feat_sample_rate', help='sample rate of features for sync', type=int, default=100)
arg_parser.add_argument('--ffmpeg_loglevel', help='loglevel for ffmpeg calls', type=str, default='info')
arg_parser.add_argument('--ffmpeg_acodec', help='force audio codec for ffmpeg calls', type=str)
arg_parser.add_argument('--number_of_channels', help='force number of channels for output audio', type=int, default=0)
arg_parser.add_argument('--output', help='output zip filename (if empty, dummy run)', required=True)
args = arg_parser.parse_args()
subset_segment_list = None
if args.subset_segment_file:
subset_segment_list = set(open(args.subset_segment_file).read().splitlines())
assert subset_segment_list
rec_filenames = set()
seqs = []
for bliss_item in iter_bliss(args.bliss_filename):
if (subset_segment_list and (bliss_item.segment_name not in subset_segment_list)):
continue
seqs.append(bliss_item)
rec_filenames.add(bliss_item.recording_filename)
assert seqs
if subset_segment_list:
seq_names = set([seq.segment_name for seq in seqs])
for seq_name in subset_segment_list:
assert (seq_name in seq_names)
print('Num seqs:', len(seqs))
print('Num recordings:', len(rec_filenames))
rec_filename_common_prefix = longest_common_prefix(rec_filenames)
if (not rec_filename_common_prefix.endswith('/')):
if ('/' in rec_filename_common_prefix):
rec_filename_common_prefix = rec_filename_common_prefix[:(rec_filename_common_prefix.rfind('/') + 1)]
else:
rec_filename_common_prefix = ''
print('Recordings common dir prefix:', rec_filename_common_prefix)
rec_filename_common_postfix = longest_common_postfix(rec_filenames)
if (not rec_filename_common_postfix.startswith('.')):
if ('.' in rec_filename_common_postfix):
rec_filename_common_postfix = rec_filename_common_postfix[rec_filename_common_postfix.find('.'):]
else:
rec_filename_common_postfix = ''
print('Recordings common postfix:', rec_filename_common_postfix)
if args.output:
zip_filename = args.output
(name, ext) = os.path.splitext(os.path.basename(zip_filename))
assert (ext == '.zip')
else:
name = 'dummy'
zip_filename = None
print('Dataset name:', name)
sprint_cache_handler = None
if args.sprint_cache:
sprint_cache_handler = SprintCacheHandler(opt=args.sprint_cache, bliss_opt=args.bliss_filename, raw_sample_rate=args.raw_sample_rate, feat_sample_rate=args.feat_sample_rate)
total_duration = Decimal(0)
total_num_chars = 0
temp_dir = tempfile.mkdtemp()
print('Temp dir for data:', temp_dir)
dest_dirname = ('%s/%s' % (temp_dir, name))
dest_meta_filename = ('%s/%s.txt' % (temp_dir, name))
dest_meta_file = open(dest_meta_filename, 'w')
dest_meta_file.write('[\n')
os.makedirs(dest_dirname, exist_ok=True)
for seq in seqs:
rec_filename = seq.recording_filename
assert os.path.isfile(rec_filename)
assert ((seq.start_time < seq.end_time) and (seq.delta_time > 0))
duration = seq.delta_time
assert (duration > 0)
total_duration += duration
assert (rec_filename.startswith(rec_filename_common_prefix) and rec_filename.endswith(rec_filename_common_postfix))
rec_name = rec_filename[len(rec_filename_common_prefix):(- len(rec_filename_common_postfix))]
if args.sprint_cache:
wav_tmp_filename = ('%s/%s/%s_%s.wav' % (dest_dirname, rec_name, seq.start_time, seq.end_time))
os.makedirs(os.path.dirname(wav_tmp_filename), exist_ok=True)
cmd = ['ffmpeg']
if args.ffmpeg_acodec:
cmd += ['-acodec', args.ffmpeg_acodec]
cmd += ['-i', rec_filename, '-ss', str(seq.start_time), '-t', str(duration)]
if (args.number_of_channels > 0):
cmd += ['-ac', str(args.number_of_channels)]
cmd += [wav_tmp_filename, '-loglevel', args.ffmpeg_loglevel]
print(('$ %s' % ' '.join(cmd)))
check_call(cmd)
import soundfile
(audio, sample_rate) = soundfile.read(wav_tmp_filename)
assert (sample_rate == args.raw_sample_rate)
audio_synced = sprint_cache_handler.feature_post_process(numpy.expand_dims(audio, axis=1), seq.segment_name)
soundfile.write(wav_tmp_filename, audio_synced, args.raw_sample_rate)
source_filename = wav_tmp_filename
start_time = 0
limit_duration = False
else:
soundfile = audio_synced = sample_rate = wav_tmp_filename = None
source_filename = rec_filename
start_time = seq.start_time
limit_duration = True
dest_filename = ('%s/%s/%s_%s.ogg' % (dest_dirname, rec_name, seq.start_time, seq.end_time))
os.makedirs(os.path.dirname(dest_filename), exist_ok=True)
if args.no_ogg:
print(('no Ogg (%s -> %s)' % (os.path.basename(rec_filename), dest_filename[(len(dest_dirname) + 1):])))
else:
if os.path.exists(dest_filename):
print(('already exists, delete: %s' % os.path.basename(dest_filename)))
os.remove(dest_filename)
if args.no_conversion:
assert source_filename.endswith('.ogg')
assert ((not start_time) and (not limit_duration)), f'With no_conversion=True, start_time {start_time} or duration {duration} is not supported. Use no_conversion=False, even if the input is already in OGG.'
print(('skip ffmpeg, copy instead (%s -> %s)' % (os.path.basename(source_filename), dest_filename[(len(dest_dirname) + 1):])))
shutil.copy(src=source_filename, dst=dest_filename)
else:
cmd = ['ffmpeg']
if args.ffmpeg_acodec:
cmd += ['-acodec', args.ffmpeg_acodec]
cmd += ['-i', source_filename]
if (args.number_of_channels > 0):
cmd += ['-ac', str(args.number_of_channels)]
if start_time:
cmd += ['-ss', str(start_time)]
if limit_duration:
cmd += ['-t', str(duration)]
cmd += [dest_filename, '-loglevel', args.ffmpeg_loglevel]
print(('$ %s' % ' '.join(cmd)))
check_call(cmd)
if args.sprint_cache:
(audio_ogg, sample_rate_ogg) = soundfile.read(dest_filename)
assert (len(audio_synced) == len(audio_ogg)), 'Number of frames in synced wav and converted ogg do not match'
assert (sample_rate == sample_rate_ogg), 'Sample rates in synced wav and converted ogg do not match'
os.remove(wav_tmp_filename)
dest_meta_file.write(("{'text': %r, 'speaker_name': %r, 'file': %r, 'seq_name': %r, 'duration': %s},\n" % (seq.orth, seq.speaker_name, dest_filename[(len(dest_dirname) + 1):], seq.segment_name, duration)))
total_num_chars += len(seq.orth)
dest_meta_file.write(']\n')
dest_meta_file.close()
print('Total duration:', total_duration, 'secs', ('(%s)' % hms(total_duration)))
print('Total num chars:', total_num_chars)
print('Dataset zip filename:', (zip_filename if zip_filename else '(dummy run, no zip file)'))
if zip_filename:
print('Zipping...')
zip_file = zipfile.ZipFile(zip_filename, mode='a', compression=zipfile.ZIP_DEFLATED)
for (dirpath, dirnames, filenames) in os.walk(temp_dir):
for name in sorted((dirnames + filenames)):
path = ('%s/%s' % (dirpath, name))
assert path.startswith((temp_dir + '/'))
zip_path = path[(len(temp_dir) + 1):]
print(' Adding:', zip_path)
zip_file.write(path, zip_path)
if (not args.no_cleanup):
print('Cleaning up...')
shutil.rmtree(temp_dir)
else:
print('Keeping temp dir:', temp_dir)
print('Finished.')
|
def parse_vocab(filename):
'\n Can be either pure text file, line-based, or lexicon XML file, or Python vocab dict.\n\n :param str filename:\n :rtype: list[str]\n '
if filename.endswith('.gz'):
import gzip
raw = gzip.open(filename, 'r').read().decode('utf8')
else:
raw = open(filename, 'r').read()
if raw.startswith('{'):
py_vocab = eval(raw)
assert isinstance(py_vocab, dict)
labels = {idx: label for (label, idx) in sorted(py_vocab.items())}
(min_label, max_label, num_labels) = (min(labels), max(labels), len(labels))
assert (0 == min_label)
if ((num_labels - 1) < max_label):
print(('Vocab error: not all indices used? max label: %i' % max_label))
print(('unused labels: %r' % ([i for i in range((max_label + 1)) if (i not in labels)],)))
assert ((num_labels - 1) == max_label)
zero_sym = labels[0]
assert isinstance(zero_sym, str)
return [label for (idx, label) in sorted(labels.items())]
if raw.startswith('<?xml'):
labels = []
from io import StringIO
raw_stream = StringIO(raw)
context = iter(ElementTree.iterparse(raw_stream, events=('start', 'end')))
(_, root) = next(context)
for (event, elem) in context:
if ((event == 'end') and (elem.tag == 'lemma')):
for orth_elem in elem.findall('orth'):
orth = (orth_elem.text or '').strip()
labels.append(orth)
root.clear()
return labels
return raw.splitlines()
|
def xml_prettify(element, indent=' '):
'\n https://stackoverflow.com/a/38574067/133374 (deleted StackOverflow answer)\n\n :param ElementTree.Element element:\n :param str indent:\n '
queue = [(0, element)]
while queue:
(level, element) = queue.pop(0)
children = [((level + 1), child) for child in list(element)]
if children:
element.text = ('\n' + (indent * (level + 1)))
if queue:
element.tail = ('\n' + (indent * queue[0][0]))
else:
element.tail = ('\n' + (indent * (level - 1)))
queue[0:0] = children
|
def main():
'\n Main entry.\n '
arg_parser = ArgumentParser()
arg_parser.add_argument('--bpe_vocab', required=True)
arg_parser.add_argument('--word_vocab', required=True)
arg_parser.add_argument('--unk')
arg_parser.add_argument('--skip_special', action='store_true')
arg_parser.add_argument('--lower_case', action='store_true')
arg_parser.add_argument('--output')
args = arg_parser.parse_args()
bpe_syms = parse_vocab(args.bpe_vocab)
words = parse_vocab(args.word_vocab)
print(('BPE symbols: num %i, first %r' % (len(bpe_syms), bpe_syms[0])))
print(('Words: num %i, first %r' % (len(words), words[0])))
print('Build BPE prefix tree...')
bpe = bpe_utils.PrefixTree()
for bpe_sym in bpe_syms:
bpe.add(bpe_sym)
print('Build lexicon...')
xml = ElementTree.Element('lexicon')
xml_phone_inventory = ElementTree.SubElement(xml, 'phoneme-inventory')
for bpe_sym in bpe_syms:
xml_phone = ElementTree.SubElement(xml_phone_inventory, 'phoneme')
ElementTree.SubElement(xml_phone, 'symbol').text = bpe_sym
ElementTree.SubElement(xml_phone, 'variation').text = 'context'
visited_words = set()
def visit_word(word):
'\n :param str word:\n '
if (word in visited_words):
return
visited_words.add(word)
bpe_sym_seqs = bpe_utils.CharSyncSearch(bpe=bpe, word=word).search()
if (not bpe_sym_seqs):
print(('no BPE seq found for word %r' % word))
return
xml_lemma = ElementTree.SubElement(xml, 'lemma')
ElementTree.SubElement(xml_lemma, 'orth').text = word
for bpe_sym_seq in bpe_sym_seqs:
ElementTree.SubElement(xml_lemma, 'phon').text = ' '.join(bpe_sym_seq)
for word in words:
if args.lower_case:
word = word.lower()
if (not word):
continue
if args.skip_special:
if (word.startswith('[') and word.endswith(']')):
continue
if (word.startswith('<') and word.endswith('>')):
continue
visit_word(word)
for bpe_sym in bpe_syms:
if bpe_sym.endswith(bpe_utils.BpeMergeSymbol):
continue
if (bpe_sym not in words):
continue
visit_word(bpe_sym)
if args.output:
xml_prettify(xml)
xml_str = ElementTree.tostring(xml, encoding='utf-8')
with open(args.output, 'wb') as f:
f.write(xml_str)
print('Wrote XML:', args.output)
else:
print('Specify --output to save the XML.')
|
class WerComputeGraph():
'\n Creates TF computation graph to calculate the WER.\n We accumulate the absolute number of edits and normalize by the accumulated seq lens.\n '
def __init__(self):
self.hyps = tf_compat.v1.placeholder(tf.string, [None])
self.refs = tf_compat.v1.placeholder(tf.string, [None])
(self.wer, self.ref_num_words) = tf_util.string_words_calc_wer(hyps=self.hyps, refs=self.refs)
self.total_wer_var = tf.Variable(initial_value=0, trainable=False, dtype=tf.int64)
self.total_ref_num_words_var = tf.Variable(initial_value=0, trainable=False, dtype=tf.int64)
self.update_total_wer = self.total_wer_var.assign_add(tf.reduce_sum(self.wer))
self.update_ref_num_words = self.total_ref_num_words_var.assign_add(tf.reduce_sum(self.ref_num_words))
self.updated_normalized_wer = (tf.cast(self.update_total_wer, tf.float32) / tf.cast(self.update_ref_num_words, tf.float32))
def step(self, session, hyps, refs):
'\n :param tf.compat.v1.Session session:\n :param list[str] hyps:\n :param list[str] refs:\n :return: updated normalized WER\n :rtype: float\n '
return session.run(self.updated_normalized_wer, feed_dict={self.hyps: hyps, self.refs: refs})
|
def calc_wer_on_dataset(dataset, refs, options, hyps):
'\n :param Dataset|None dataset:\n :param dict[str,str]|None refs: seq tag -> ref string (words delimited by space)\n :param options: argparse.Namespace\n :param dict[str,str] hyps: seq tag -> hyp string (words delimited by space)\n :return: WER\n :rtype: float\n '
assert (dataset or refs)
start_time = time.time()
seq_len_stats = {'refs': Stats(), 'hyps': Stats()}
seq_idx = options.startseq
if (options.endseq < 0):
options.endseq = float('inf')
wer = 1.0
remaining_hyp_seq_tags = set(hyps.keys())
interactive = (util.is_tty() and (not log.verbose[5]))
collected = {'hyps': [], 'refs': []}
max_num_collected = 1
if dataset:
dataset.init_seq_order(epoch=1)
else:
refs = sorted(refs.items(), key=(lambda item: len(item[1])))
while True:
if (seq_idx > options.endseq):
break
if dataset:
if (not dataset.is_less_than_num_seqs(seq_idx)):
break
dataset.load_seqs(seq_idx, (seq_idx + 1))
complete_frac = dataset.get_complete_frac(seq_idx)
seq_tag = dataset.get_tag(seq_idx)
assert isinstance(seq_tag, str)
ref = dataset.get_data(seq_idx, options.key)
if isinstance(ref, numpy.ndarray):
assert (ref.shape == ())
ref = ref.flatten()[0]
if isinstance(ref, bytes):
ref = ref.decode('utf8')
assert isinstance(ref, str)
try:
num_seqs_s = str(dataset.num_seqs)
except NotImplementedError:
try:
num_seqs_s = ('~%i' % dataset.estimated_num_seqs)
except TypeError:
num_seqs_s = '?'
else:
if (seq_idx >= len(refs)):
break
complete_frac = ((seq_idx + 1) / float(len(refs)))
(seq_tag, ref) = refs[seq_idx]
assert isinstance(seq_tag, str)
assert isinstance(ref, str)
num_seqs_s = str(len(refs))
start_elapsed = (time.time() - start_time)
progress_prefix = ('%i/%s (WER %.02f%%)' % (seq_idx, num_seqs_s, (wer * 100)))
progress = ('%s (%.02f%%)' % (progress_prefix, (complete_frac * 100)))
if (complete_frac > 0):
total_time_estimated = (start_elapsed / complete_frac)
remaining_estimated = (total_time_estimated - start_elapsed)
progress += (' (%s)' % hms(remaining_estimated))
remaining_hyp_seq_tags.remove(seq_tag)
hyp = hyps[seq_tag]
seq_len_stats['hyps'].collect([len(hyp)])
seq_len_stats['refs'].collect([len(ref)])
collected['hyps'].append(hyp)
collected['refs'].append(ref)
if (len(collected['hyps']) >= max_num_collected):
wer = wer_compute.step(session, **collected)
del collected['hyps'][:]
del collected['refs'][:]
if interactive:
util.progress_bar_with_time(complete_frac, prefix=progress_prefix)
elif log.verbose[5]:
print(progress_prefix, ('seq tag %r, ref/hyp len %i/%i chars' % (seq_tag, len(ref), len(hyp))))
seq_idx += 1
if (len(collected['hyps']) > 0):
wer = wer_compute.step(session, **collected)
print(('Done. Num seqs %i. Total time %s.' % (seq_idx, hms((time.time() - start_time)))), file=log.v1)
print(('Remaining num hyp seqs %i.' % (len(remaining_hyp_seq_tags),)), file=log.v1)
if dataset:
print(('More seqs which we did not dumped: %s.' % dataset.is_less_than_num_seqs(seq_idx)), file=log.v1)
for key in ['hyps', 'refs']:
seq_len_stats[key].dump(stream_prefix=('Seq-length %r %r ' % (key, options.key)), stream=log.v2)
if options.expect_full:
assert (not remaining_hyp_seq_tags), 'There are still remaining hypotheses.'
return wer
|
def init(config_filename, log_verbosity):
'\n :param str config_filename: filename to config-file\n :param int log_verbosity:\n '
rnn.init_better_exchook()
rnn.init_thread_join_hack()
if config_filename:
print(('Using config file %r.' % config_filename))
assert os.path.exists(config_filename)
rnn.init_config(config_filename=config_filename, command_line_options=[])
global config
config = rnn.config
config.set('task', 'calculate_wer')
config.set('log', None)
config.set('log_verbosity', log_verbosity)
config.set('use_tensorflow', True)
rnn.init_log()
print('Returnn calculate-word-error-rate starting up.', file=log.v1)
rnn.returnn_greeting()
rnn.init_backend_engine()
assert util.BackendEngine.is_tensorflow_selected(), 'this is only for TensorFlow'
rnn.init_faulthandler()
rnn.print_task_properties()
|
def load_hyps_refs(filename):
'\n :param str filename:\n :return: dict of seq_tag -> ref\n :rtype: dict[str,str]\n '
if filename.endswith('.gz'):
import gzip
content_str = gzip.open(filename, 'rt').read()
else:
content_str = open(filename).read()
content = eval(content_str)
assert isinstance(content, dict)
assert (len(content) > 0)
example_hyp = next(iter(content.items()))
assert isinstance(example_hyp[0], str)
if isinstance(example_hyp[1], list):
assert isinstance(example_hyp[1][0][1], str)
content = {seq_tag: nbest_list[0][1] for (seq_tag, nbest_list) in content.items()}
else:
assert isinstance(example_hyp[1], str)
return content
|
def main(argv):
'\n Main entry.\n '
arg_parser = argparse.ArgumentParser(description='Dump something from dataset.')
arg_parser.add_argument('--config', help="filename to config-file. will use dataset 'eval' from it")
arg_parser.add_argument('--dataset', help='dataset, overwriting config')
arg_parser.add_argument('--refs', help='same format as hyps. alternative to providing dataset/config')
arg_parser.add_argument('--hyps', help='hypotheses, dumped via search in py format')
arg_parser.add_argument('--startseq', type=int, default=0, help='start seq idx (inclusive) (default: 0)')
arg_parser.add_argument('--endseq', type=int, default=(- 1), help='end seq idx (inclusive) or -1 (default: -1)')
arg_parser.add_argument('--key', default='raw', help="data-key, e.g. 'data' or 'classes'. (default: 'raw')")
arg_parser.add_argument('--verbosity', default=4, type=int, help='5 for all seqs (default: 4)')
arg_parser.add_argument('--out', help='if provided, will write WER% (as string) to this file')
arg_parser.add_argument('--expect_full', action='store_true', help='full dataset should be scored')
args = arg_parser.parse_args(argv[1:])
assert (args.config or args.dataset or args.refs)
init(config_filename=args.config, log_verbosity=args.verbosity)
dataset = None
refs = None
if args.refs:
refs = load_hyps_refs(args.refs)
elif args.dataset:
dataset = init_dataset(args.dataset)
elif (config.value('wer_data', 'eval') in ['train', 'dev', 'eval']):
dataset = init_dataset(config.opt_typed_value(config.value('search_data', 'eval')))
else:
dataset = init_dataset(config.opt_typed_value('wer_data'))
hyps = load_hyps_refs(args.hyps)
global wer_compute
wer_compute = WerComputeGraph()
with tf_compat.v1.Session(config=tf_compat.v1.ConfigProto(device_count={'GPU': 0})) as _session:
global session
session = _session
session.run(tf_compat.v1.global_variables_initializer())
try:
wer = calc_wer_on_dataset(dataset=dataset, refs=refs, options=args, hyps=hyps)
print(('Final WER: %.02f%%' % (wer * 100)), file=log.v1)
if args.out:
with open(args.out, 'w') as output_file:
output_file.write(('%.02f\n' % (wer * 100)))
print(('Wrote WER%% to %r.' % args.out))
except KeyboardInterrupt:
print('KeyboardInterrupt')
sys.exit(1)
finally:
rnn.finalize()
|
def main():
'\n Main entry.\n '
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--config')
arg_parser.add_argument('--cwd', help='will change to this dir')
arg_parser.add_argument('--model', help='model filenames (default: take from config)')
arg_parser.add_argument('--scores', help='learning_rate_control file, e.g. newbob.data (default: take from config)')
arg_parser.add_argument('--dry_run', action='store_true')
args = arg_parser.parse_args()
return_code = 0
try:
if args.cwd:
os.chdir(args.cwd)
init(extra_greeting='Delete old models.', config_filename=(args.config or None), config_updates={'need_data': False, 'device': 'cpu'})
from returnn.__main__ import engine, config
if args.model:
config.set('model', args.model)
if args.scores:
config.set('learning_rate_file', args.scores)
if args.dry_run:
config.set('dry_run', True)
engine.cleanup_old_models(ask_for_confirmation=True)
except KeyboardInterrupt:
return_code = 1
print('KeyboardInterrupt', file=getattr(log, 'v3', sys.stderr))
if getattr(log, 'verbose', ([False] * 6))[5]:
sys.excepthook(*sys.exc_info())
finalize()
if return_code:
sys.exit(return_code)
|
def found_sub_seq(sub_seq, seq):
'\n :param list[str] sub_seq:\n :param list[str] seq:\n :rtype: bool\n '
for i in range(len(seq)):
if (seq[i:(i + len(sub_seq))] == sub_seq):
return True
return False
|
def iter_dataset(dataset, options, callback):
'\n :type dataset: Dataset.Dataset\n '
dataset.init_seq_order(epoch=1)
assert ('orth' in dataset.get_target_list())
seq_idx = 0
while dataset.is_less_than_num_seqs(seq_idx):
dataset.load_seqs(seq_idx, seq_idx)
frame_len = dataset.get_seq_length(seq_idx)['data']
orth = dataset.get_targets('orth', seq_idx)
callback(frame_len=frame_len, orth=orth)
seq_idx += 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.