code stringlengths 17 6.64M |
|---|
def test_demo_start_subprocess_patched():
from returnn.util.basic import get_patch_atfork_lib
from subprocess import check_call
env = os.environ.copy()
env['LD_PRELOAD'] = get_patch_atfork_lib()
print('LD_PRELOAD:', get_patch_atfork_lib())
check_call([sys.executable, __file__, 'patched_check_demo_start_subprocess'], env=env)
|
def test_hdf_dataset_init():
hdf_filename = tempfile.mktemp(suffix='.hdf', prefix='nose-dataset-init')
hdf_dataset_init(hdf_filename)
assert os.path.exists(hdf_filename)
os.remove(hdf_filename)
|
def test_hdf_create():
hdf_filename = tempfile.mktemp(suffix='.hdf', prefix='nose-dataset-create')
hdf_dataset = hdf_dataset_init(hdf_filename)
assert os.path.exists(hdf_filename)
dataset = DummyDataset(input_dim=2, output_dim=3, num_seqs=4)
dataset.init_seq_order(epoch=1)
hdf_dump_from_dataset(dataset, hdf_dataset, DictAsObj(options))
hdf_close(hdf_dataset)
os.remove(hdf_filename)
|
def test_hdf_create_and_load():
hdf_filename = tempfile.mktemp(suffix='.hdf', prefix='nose-dataset-load')
hdf_dataset = hdf_dataset_init(hdf_filename)
assert os.path.exists(hdf_filename)
dataset = DummyDataset(input_dim=2, output_dim=3, num_seqs=4)
dataset.init_seq_order(epoch=1)
hdf_dump_from_dataset(dataset, hdf_dataset, DictAsObj(options))
hdf_close(hdf_dataset)
loaded_dataset = HDFDataset()
loaded_dataset.add_file(hdf_filename)
os.remove(hdf_filename)
|
def test_hdf_create_unicode_labels():
hdf_filename = tempfile.mktemp(suffix='.hdf', prefix='nose-dataset-create')
hdf_dataset = hdf_dataset_init(hdf_filename)
assert os.path.exists(hdf_filename)
dataset = DummyDataset(input_dim=2, output_dim=3, num_seqs=4)
assert ('classes' in dataset.get_target_list())
dataset.labels['classes'] = ['’', 'ä', 'x']
dataset.init_seq_order(epoch=1)
hdf_dump_from_dataset(dataset, hdf_dataset, DictAsObj(options))
hdf_close(hdf_dataset)
os.remove(hdf_filename)
|
def test_pack_padded():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tuple[(Tensor, Dim)]:
(pack, pack_dim) = rf.pack_padded(x, dims=[batch_dim, time_dim], enforce_sorted=False)
return (pack, pack_dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
(out, pack_dim) = model(extern_data['data'])
out.mark_as_default_output(shape=(pack_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_reshape():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tensor:
return rf.reshape(x, in_dims=(time_dim, in_dim), out_dims=(in_dim, time_dim))
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, time_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_expand_dim():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
expand_dim = Dim(3, name='expand')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tensor:
return rf.expand_dim(x, expand_dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, time_dim, expand_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_concat():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tuple[(Tensor, Dim)]:
return rf.concat((x, in_dim), (x, in_dim))
def _forward_step(*, model: _Net, extern_data: TensorDict):
(out, dim) = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, time_dim, dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_pad():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tuple[(Tensor, Tuple[(Dim, Dim)])]:
(pack, (new_time, new_feat)) = rf.pad(x, axes=[time_dim, in_dim], padding=[(1, 2), (3, 4)], value=0)
return (pack, (new_time, new_feat))
def _forward_step(*, model: _Net, extern_data: TensorDict):
(out, (new_time, new_feat)) = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, new_time, new_feat))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_pad_time():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tuple[(Tensor, Tuple[(Dim, Dim)])]:
(pack, (new_time,)) = rf.pad(x, axes=[time_dim], padding=[(1, 0)], value=0)
return (pack, (new_time,))
def _forward_step(*, model: _Net, extern_data: TensorDict):
(out, (new_time,)) = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, new_time, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_gather():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tensor:
return rf.gather(x, indices=0, axis=time_dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_gather_2d_indices():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32'), 'classes': Tensor('classes', [batch_dim, time_dim], dtype='int32', sparse_dim=in_dim)})
class _Net(rf.Module):
def __call__(self, x: Tensor, y: Tensor) -> Tensor:
return rf.gather(x, indices=y, axis=in_dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'], extern_data['classes'])
out.mark_as_default_output(shape=(batch_dim, time_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_gather_feature_dim():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], feature_dim=in_dim, dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tensor:
return rf.gather(x, indices=0, axis=time_dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
assert (out.feature_dim == in_dim)
out.mark_as_default_output(shape=(batch_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_slice():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tuple[(Tensor, Dim)]:
(pack, new_time) = rf.slice(x, axis=time_dim, start=1, size=2)
return (pack, new_time)
def _forward_step(*, model: _Net, extern_data: TensorDict):
(out, new_time) = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, new_time, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_shift_right():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim], sparse_dim=in_dim, dtype='int32')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tensor:
return rf.shift_right(x, axis=time_dim, pad_value=0)
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, time_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_reverse_sequence():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tensor:
return rf.reverse_sequence(x, axis=time_dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, time_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_where():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'cond': Tensor('cond', [batch_dim, time_dim], dtype='bool'), 'true': Tensor('true', [batch_dim, time_dim, in_dim], dtype='float32'), 'false': Tensor('false', [batch_dim, in_dim], dtype='float32')})
def _forward_step(*, model: rf.Module, extern_data: TensorDict):
out = rf.where(extern_data['cond'], extern_data['true'], extern_data['false'])
out.mark_as_default_output(shape=(batch_dim, time_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: rf.Module()), _forward_step)
|
def test_where_int():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'cond': Tensor('cond', [batch_dim, time_dim], dtype='bool'), 'true': Tensor('true', [batch_dim, time_dim, in_dim], dtype='float32')})
def _forward_step(*, model: rf.Module, extern_data: TensorDict):
out = rf.where(extern_data['cond'], extern_data['true'], 0)
out.mark_as_default_output(shape=(batch_dim, time_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: rf.Module()), _forward_step)
|
def test_copy_masked():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
def _forward_step(*, model: rf.Conv1d, extern_data: TensorDict):
x = extern_data['data']
x = x.copy_masked(1)
(x, _) = rf.pool1d(x, mode='avg', pool_size=3, strides=1, padding='same', in_spatial_dim=time_dim)
x.mark_as_default_output(shape=(batch_dim, time_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: rf.Module()), _forward_step)
|
def test_cast_sparse():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
def _forward_step(*, model: rf.Conv1d, extern_data: TensorDict):
x = rf.reduce_argmax(extern_data['data'], axis=in_dim)
assert (x.sparse_dim == in_dim)
x.mark_as_output('argmax', shape=[batch_dim, time_dim])
rf.cast(x, 'float32').mark_as_output('float', shape=[batch_dim, time_dim])
run_model(extern_data, (lambda *, epoch, step: rf.Module()), _forward_step)
|
def test_dot_attention():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
key_dim = Dim(7, name='key')
value_dim = Dim(13, name='value')
extern_data = TensorDict({'q': Tensor('q', [batch_dim, time_dim, key_dim], dtype='float32'), 'k': Tensor('k', [batch_dim, time_dim, key_dim], dtype='float32'), 'v': Tensor('v', [batch_dim, time_dim, value_dim], dtype='float32', feature_dim_axis=2)})
class _Net(rf.Module):
def __call__(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor:
kv_axis = Dim(None, name=f'kv-axis')
(k, _) = rf.replace_dim(k, in_dim=time_dim, out_dim=kv_axis)
(v, _) = rf.replace_dim(v, in_dim=time_dim, out_dim=kv_axis)
return rf.dot_attention(q, k, v, axis=kv_axis, key_dim=key_dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(q=extern_data['q'], k=extern_data['k'], v=extern_data['v'])
out.mark_as_default_output(shape=(batch_dim, time_dim, value_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_self_attention():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __init__(self):
super().__init__()
self.self_att = rf.SelfAttention(in_dim=in_dim, proj_dim=Dim(5, name='out'), key_dim_total=Dim(21, name='key-dim-total'), value_dim_total=Dim(33, name='value-dim-total'), num_heads=3)
self.out_dim = self.self_att.out_dim
def __call__(self, x: Tensor, *, axis: Dim) -> Tensor:
'forward'
return self.self_att(x, axis=axis)
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'], axis=time_dim)
out.mark_as_default_output(shape=(batch_dim, time_dim, model.out_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_causal_self_attention():
from returnn.tensor import single_step_dim
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __init__(self):
super().__init__()
self.self_att = rf.CausalSelfAttention(in_dim=in_dim, proj_dim=Dim(5, name='out'), key_dim_total=Dim(21, name='key-dim-total'), value_dim_total=Dim(33, name='value-dim-total'), num_heads=3)
self.out_dim = self.self_att.out_dim
def __call__(self, x: Tensor, *, axis: Dim) -> Tensor:
'forward'
def _body(_x: Tensor, _state: rf.State) -> Tuple[(Tensor, rf.State)]:
(_y, _state.self_att) = self.self_att(_x, axis=single_step_dim, state=_state.self_att)
return (_y, _state)
(y, _, _) = rf.scan(spatial_dim=axis, xs=x, body=_body, ys=Tensor('y', dims=[batch_dim, self.out_dim], dtype='float32'), initial=rf.State(self_att=self.self_att.default_initial_state(batch_dims=[batch_dim])))
return y
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'], axis=time_dim)
out.mark_as_default_output(shape=(batch_dim, time_dim, model.out_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, test_tensorflow=False)
|
def test_relative_positional_encoding():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(8, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: Tensor, *, axis: Dim) -> Tuple[(Tensor, Dim)]:
(x, dim) = rf.relative_positional_encoding(key_value_spatial_dim=axis, query_spatial_dim=axis, feat_dim=in_dim)
return (x, dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
(out, dim) = model(extern_data['data'], axis=time_dim)
out.mark_as_default_output(shape=(dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_rel_pos_self_attention():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(8, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __init__(self):
super().__init__()
self.self_att = rf.RelPosSelfAttention(in_dim=in_dim, proj_dim=Dim(5, name='out'), key_dim_total=Dim(21, name='key-dim-total'), value_dim_total=Dim(33, name='value-dim-total'), num_heads=3)
self.out_dim = self.self_att.out_dim
def __call__(self, x: Tensor, *, axis: Dim) -> Tensor:
'forward'
return self.self_att(x, axis=axis)
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'], axis=time_dim)
out.mark_as_default_output(shape=(batch_dim, time_dim, model.out_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_sinusoidal_positional_encoding():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
feat_dim = Dim(8, name='feat')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, feat_dim], dtype='float32')})
def _forward_step(**_kwargs):
out = rf.sinusoidal_positional_encoding(spatial_dim=time_dim, feat_dim=feat_dim)
out.mark_as_default_output(shape=(time_dim, feat_dim))
res = run_model(extern_data, (lambda *, epoch, step: rf.Module()), _forward_step)
from returnn.tf.util import basic as tf_util
with tf_scope() as session:
tf_ref = tf_util.get_positional_encoding(num_channels=feat_dim.dimension, length=res.data['output'].raw_tensor.shape[0])
tf_ref_v = session.run(tf_ref)
np.testing.assert_almost_equal(res.data['output'].raw_tensor, tf_ref_v, decimal=5)
|
def test_CausalSelfAttention():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
feat_dim = Dim(8, name='feat')
key_dim = Dim(6, name='key')
value_dim = Dim(10, name='value')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, feat_dim], dtype='float32')})
def _forward_step(*, model: rf.CausalSelfAttention, extern_data: TensorDict):
data = extern_data['data']
data.mark_as_output('data', shape=[batch_dim, time_dim, feat_dim])
time_dim.dyn_size_ext.mark_as_output('seq_len', shape=[batch_dim])
(out, _) = model(data, axis=time_dim)
out.mark_as_default_output(shape=(batch_dim, time_dim, value_dim))
model.qkv.weight.mark_as_output('qkv_weight', shape=[feat_dim, ((2 * key_dim) + value_dim)])
res = run_model(extern_data, (lambda *, epoch, step: rf.CausalSelfAttention(in_dim=feat_dim, proj_dim=None, key_dim_total=key_dim, value_dim_total=value_dim, num_heads=2, with_bias=False)), _forward_step, test_tensorflow=False)
extern_data.reset_content()
with tf_scope() as session:
from returnn.tf.network import TFNetwork, ExternData
net_dict = {'self_att': {'class': 'self_attention', 'from': 'data', 'num_heads': 2, 'total_key_dim': key_dim.dimension, 'attention_left_only': True, 'out_dim': value_dim, 'is_output_layer': True}}
net = TFNetwork(extern_data=ExternData({'data': {'dims': [batch_dim, time_dim, feat_dim], 'time_dim_axis': 1, 'feature_dim_axis': 2, 'dtype': 'float32', 'version': 1}}))
net.construct_from_dict(net_dict)
layer = net.get_default_output_layer()
layer.params['QKV'].load(res.data['qkv_weight'].raw_tensor, session=session)
out = layer.output.copy_transpose([batch_dim, time_dim, value_dim]).copy_masked(0.0)
out_tf_v = session.run(out.raw_tensor, feed_dict={net.extern_data.data['data'].placeholder: res.data['data'].raw_tensor, net.extern_data.data['data'].dims[1].dyn_size_ext.raw_tensor: res.data['seq_len'].raw_tensor})
numpy.testing.assert_almost_equal(res.data['output'].raw_tensor, out_tf_v, decimal=5)
|
def test_linear_direct():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
(in_dim, out_dim) = (Dim(7, name='in'), Dim(13, name='out'))
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32'), 'classes': Tensor('classes', [batch_dim, time_dim], dtype='int32', sparse_dim=out_dim)})
def _forward_step(*, model: rf.Linear, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output()
run_model(extern_data, (lambda *, epoch, step: rf.Linear(in_dim, out_dim)), _forward_step)
|
def test_linear():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
(in_dim, out_dim) = (Dim(7, name='in'), Dim(13, name='out'))
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32'), 'classes': Tensor('classes', [batch_dim, time_dim], dtype='int32', sparse_dim=out_dim)})
class _Net(rf.Module):
def __init__(self):
super().__init__()
self.linear = rf.Linear(in_dim, out_dim)
def __call__(self, x: Tensor) -> Tensor:
return self.linear(x)
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output()
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_state():
import tree
s = rf.LstmState(h=Tensor('h', (), 'float32'), c=Tensor('c', (), 'float32'))
res = tree.map_structure((lambda x: x), s)
assert isinstance(res, rf.LstmState)
assert (res is not s)
assert ((res.h is s.h) and (res.c is s.c))
|
def test_2layers():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
(in_dim, hidden_dim, out_dim) = (Dim(7, name='in'), Dim(11, name='hidden'), Dim(13, name='out'))
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32'), 'classes': Tensor('classes', [batch_dim, time_dim], dtype='int32', sparse_dim=out_dim)})
class _Net(rf.Module):
def __init__(self):
super().__init__()
self.layer1 = rf.Linear(in_dim, hidden_dim)
self.layer2 = rf.Linear(hidden_dim, out_dim)
def __call__(self, x: Tensor) -> Tensor:
x = rf.relu(self.layer1(x))
x = self.layer2(x)
return x
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output()
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_linear_same_dim():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
(in_dim, out_dim) = (Dim(7, name='in'), Dim(13, name='out'))
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32'), 'classes': Tensor('classes', [batch_dim, time_dim], dtype='int32', sparse_dim=out_dim)})
class _Net(rf.Module):
def __init__(self):
super().__init__()
self.layer1 = rf.Linear(in_dim, out_dim)
self.layer2 = rf.Linear(out_dim, out_dim)
def __call__(self, x: Tensor) -> Tensor:
x = rf.relu(self.layer1(x))
x = self.layer2(x)
return x
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output()
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_linear_cross_entropy():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
(in_dim, out_dim) = (Dim(7, name='in'), Dim(13, name='out'))
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32'), 'classes': Tensor('classes', [batch_dim, time_dim], dtype='int32', sparse_dim=out_dim)})
class _Net(rf.Module):
def __init__(self):
super().__init__()
self.layer1 = rf.Linear(in_dim, out_dim)
self.layer2 = rf.Linear(out_dim, out_dim)
def __call__(self, x: Tensor) -> Tensor:
x = rf.relu(self.layer1(x))
x = self.layer2(x)
return x
def _forward_step(*, model: _Net, extern_data: TensorDict):
logits = model(extern_data['data'])
targets = extern_data['classes']
loss = rf.cross_entropy(estimated=logits, estimated_type='logits', target=targets, axis=out_dim)
loss.mark_as_default_output()
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_linear_ctc():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
target_time_dim = Dim(Tensor('target_time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
hidden_dim = Dim(13, name='hidden')
out_dim = Dim(11, name='classes')
out_wb_dim = (out_dim + 1)
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32'), 'classes': Tensor('classes', [batch_dim, target_time_dim], dtype='int32', sparse_dim=out_dim)})
class _Net(rf.Module):
def __init__(self):
super().__init__()
self.layer1 = rf.Linear(in_dim, hidden_dim)
self.layer2 = rf.Linear(hidden_dim, out_wb_dim)
def __call__(self, x: Tensor) -> Tensor:
x = rf.relu(self.layer1(x))
x = self.layer2(x)
return x
def _forward_step(*, model: _Net, extern_data: TensorDict):
logits = model(extern_data['data'])
targets = extern_data['classes']
loss = rf.ctc_loss(logits=logits, targets=targets, input_spatial_dim=time_dim, targets_spatial_dim=target_time_dim, blank_index=(out_wb_dim.dimension - 1))
loss.mark_as_default_output()
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, dyn_dim_min_sizes={time_dim: 4, target_time_dim: 2}, dyn_dim_max_sizes={time_dim: 11, target_time_dim: 5})
|
def test_dropout():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tensor:
return rf.dropout(x, 0.5, axis=(rf.dropout_broadcast_default() and in_dim), on_forward=True)
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, time_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_dim_value():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tensor:
res = rf.ones((), dtype='int64')
for d in x.dims:
res *= rf.cast(rf.convert_to_tensor(d.get_dim_value_tensor()), 'int64')
return res
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=())
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_dim_mask():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tensor:
mask1 = time_dim.get_mask()
mask1.verify_out_shape({batch_dim, time_dim})
mask2 = time_dim.get_mask()
assert (mask1 is mask2)
time_dim_copy = Dim(None, name='time_copy')
time_dim_copy.copy_from(time_dim)
assert (time_dim_copy != time_dim)
mask3 = time_dim_copy.get_mask()
mask3.verify_out_shape({batch_dim, time_dim_copy})
assert (mask1 is not mask3)
if rf.is_backend_raw_tensor_dim_tag_independent():
assert (mask1.raw_tensor is mask3.raw_tensor)
return mask1
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, time_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_param_assign():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __init__(self):
super().__init__()
self.param = rf.Parameter(dims=(), dtype='int32')
self.param.initial = 2
def __call__(self, x: Tensor) -> Tuple[(Tensor, Tensor, Tensor)]:
a = rf.copy(self.param)
self.param.assign(5)
b = rf.copy(self.param)
self.param.assign(7)
return (a, b, self.param)
def _forward_step(*, model: _Net, extern_data: TensorDict):
(a, b, c) = model(extern_data['data'])
a.mark_as_output('a', shape=())
b.mark_as_output('b', shape=())
c.mark_as_output('c', shape=())
out = run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
assert ((out['a'].raw_tensor == 2) and (out['b'].raw_tensor == 5) and (out['c'].raw_tensor == 7))
|
def test_loss_normalized():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
def _train_step(*, model: rf.Module, extern_data: TensorDict):
model
x = extern_data['data']
loss = rf.reduce_sum(x, axis=in_dim)
loss.mark_as_loss('loss', use_normalized_loss=True)
loss_custom_norm = rf.reduce_sum(loss, axis=time_dim)
loss_custom_norm.mark_as_loss('loss_custom_norm', custom_inv_norm_factor=time_dim.get_size_tensor(), use_normalized_loss=True)
run_model_torch_train(extern_data, (lambda *, epoch, step: rf.Module()), _train_step)
|
def test_loss_normalization():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
use_normalized = False
use_custom_inv_norm_factor = False
def _train_step(*, model: rf.Module, extern_data: TensorDict):
model
x = extern_data['data']
loss = rf.reduce_sum(x, axis=in_dim)
loss.mark_as_loss('loss', custom_inv_norm_factor=(time_dim.get_size_tensor() if use_custom_inv_norm_factor else None), use_normalized_loss=use_normalized)
res1 = run_model_torch_train(extern_data, (lambda *, epoch, step: rf.Module()), _train_step)
res2 = run_model_torch_train(extern_data, (lambda *, epoch, step: rf.Module()), _train_step)
assert (res1 == res2)
use_normalized = True
res3 = run_model_torch_train(extern_data, (lambda *, epoch, step: rf.Module()), _train_step)
assert ((res3['loss:summed'] == res2['loss:summed']) and (res3['loss:inv_norm_factor'] == res2['loss:inv_norm_factor']))
use_custom_inv_norm_factor = True
res4 = run_model_torch_train(extern_data, (lambda *, epoch, step: rf.Module()), _train_step)
assert ((res4['loss:summed'] == res2['loss:summed']) and (res4['loss:inv_norm_factor'] == res2['loss:inv_norm_factor']))
|
def test_rf_range_over_dim():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
def _forward_step(*, model: rf.Module, extern_data: TensorDict):
rf.range_over_dim(time_dim).mark_as_output('range', shape=[time_dim])
rf.range_over_dim(time_dim, dtype='float32').mark_as_output('range_float', shape=[time_dim])
run_model(extern_data, (lambda *, epoch, step: rf.Module()), _forward_step)
|
def test_cond():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
out_dim = Dim(13, name='out')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __init__(self):
super().__init__()
self.linear_true = rf.Linear(in_dim, out_dim)
self.linear_false = rf.Linear(in_dim, out_dim)
def __call__(self, x: Tensor) -> Tensor:
return rf.cond(pred=((batch_dim.get_dim_value_tensor() % 2) == 0), true_fn=(lambda : self.linear_true(x)), false_fn=(lambda : self.linear_false(x)))
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, time_dim, out_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_cond_via_time_even():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
out_dim = Dim(13, name='out')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __init__(self):
super().__init__()
self.linear_true = rf.Linear(in_dim, out_dim)
self.linear_false = rf.Linear(in_dim, out_dim)
def __call__(self, x: Tensor) -> Tensor:
return rf.cond(pred=((time_dim.get_dim_value_tensor() % 2) == 0), true_fn=(lambda : self.linear_true(x)), false_fn=(lambda : self.linear_false(x)))
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, time_dim, out_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, dyn_dim_max_sizes={time_dim: 5})
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, dyn_dim_max_sizes={time_dim: 6})
|
def test_cond_shared_params():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
out_dim = Dim(13, name='out')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __init__(self):
super().__init__()
self.linear = rf.Linear(in_dim, out_dim)
def __call__(self, x: Tensor) -> Tensor:
return rf.cond(pred=((time_dim.get_dim_value_tensor() % 2) == 0), true_fn=(lambda : self.linear(x)), false_fn=(lambda : self.linear((x * 2.0))))
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, time_dim, out_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, dyn_dim_max_sizes={time_dim: 5})
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, dyn_dim_max_sizes={time_dim: 6})
|
def test_cond_twice_shared_params():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
out_dim = Dim(13, name='out')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __init__(self):
super().__init__()
self.pre_linear = rf.Linear(in_dim, out_dim)
self.linear_true = rf.Linear(out_dim, out_dim)
self.linear_false = rf.Linear(out_dim, out_dim)
def __call__(self, x: Tensor) -> Tensor:
x = self.pre_linear(x)
x = rf.cond(pred=((time_dim.get_dim_value_tensor() % 2) == 0), true_fn=(lambda : self.linear_true(x)), false_fn=(lambda : self.linear_false(x)))
x = rf.cond(pred=((time_dim.get_dim_value_tensor() % 2) == 1), true_fn=(lambda : self.linear_true(x)), false_fn=(lambda : self.linear_false(x)))
return x
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, time_dim, out_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, dyn_dim_max_sizes={time_dim: 5})
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, dyn_dim_max_sizes={time_dim: 6})
|
def test_cond_param_assign():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __init__(self):
super().__init__()
self.param = rf.Parameter(dims=(), dtype='int32')
self.param.initial = 2
def __call__(self, x: Tensor) -> Tensor:
rf.cond(pred=((time_dim.get_dim_value_tensor() % 2) == 0), true_fn=(lambda : self.param.assign_add(3)), false_fn=(lambda : None))
return self.param
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=())
out1 = run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, dyn_dim_max_sizes={time_dim: 5})
out2 = run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, dyn_dim_max_sizes={time_dim: 6})
assert (out1['output'].raw_tensor == 2)
assert (out2['output'].raw_tensor == 5)
|
def test_cond_param_assign2():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __init__(self):
super().__init__()
self.param = rf.Parameter(dims=(), dtype='int32')
self.param.initial = 2
def __call__(self, x: Tensor) -> Tensor:
rf.cond(pred=((time_dim.get_dim_value_tensor() % 2) == 0), true_fn=(lambda : self.param.assign_add(3)), false_fn=(lambda : self.param.assign_add(7)))
return self.param
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=())
out1 = run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, dyn_dim_max_sizes={time_dim: 5})
out2 = run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, dyn_dim_max_sizes={time_dim: 6})
assert (out1['output'].raw_tensor == 9)
assert (out2['output'].raw_tensor == 5)
|
def test_cond_param_assign3():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __init__(self):
super().__init__()
self.param = rf.Parameter(dims=(), dtype='int32')
self.param.initial = 2
def __call__(self, x: Tensor) -> Tuple[(Tensor, Tensor)]:
return (rf.cond(pred=((time_dim.get_dim_value_tensor() % 2) == 0), true_fn=(lambda : (self.param.assign_add(3), rf.convert_to_tensor(42))[(- 1)]), false_fn=(lambda : (self.param * 3))), self.param)
def _forward_step(*, model: _Net, extern_data: TensorDict):
(out, param) = model(extern_data['data'])
out.mark_as_default_output(shape=())
param.mark_as_output(shape=(), name='param')
out1 = run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, dyn_dim_max_sizes={time_dim: 5})
out2 = run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, dyn_dim_max_sizes={time_dim: 6})
assert ((out1['output'].raw_tensor == 6) and (out1['param'].raw_tensor == 2))
assert ((out2['output'].raw_tensor == 42) and (out2['param'].raw_tensor == 5))
|
def test_constant_bool():
class _Net(rf.Module):
def __call__(self) -> Tuple[(Tensor, Dim)]:
dim = Dim(3, name='dim')
return (rf.constant(False, dims=[dim]), dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
extern_data
(out, dim) = model()
assert (out.dtype == 'bool')
out.mark_as_default_output(shape=[dim])
run_model(TensorDict(), (lambda *, epoch, step: _Net()), _forward_step)
|
def test_module_list():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __init__(self):
super().__init__()
self.base_dim = Dim(3, name='linear-out')
dims = [(self.base_dim + i) for i in range(4)]
in_dims = ([in_dim] + dims[:(- 1)])
self.out_dim = dims[(- 1)]
self.ls = rf.ModuleList([rf.Linear(in_dim_, out_dim_) for (in_dim_, out_dim_) in zip(in_dims, dims)])
def __call__(self, out: Tensor) -> Tensor:
'\n Forward\n '
for layer in self.ls:
out = layer(out)
return out
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, time_dim, model.out_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_module_slice_set_del():
rf.select_backend_torch()
base_dim = Dim(3, name='linear-out')
dims = [(base_dim + i) for i in range(4)]
in_dim = Dim(7, name='in')
in_dims = ([in_dim] + dims[:(- 1)])
layers = rf.ModuleList([rf.Linear(in_dim_, out_dim_) for (in_dim_, out_dim_) in zip(in_dims, dims)])
assert ((len(layers) == 4) and ([k for (k, v) in layers.items()] == ['0', '1', '2', '3']))
orig_layers = layers[:]
assert isinstance(orig_layers, rf.ModuleList)
assert ((len(orig_layers) == 4) and ([k for (k, v) in orig_layers.items()] == ['0', '1', '2', '3']))
del layers[2:]
assert ((len(layers) == 2) and ([k for (k, v) in layers.items()] == ['0', '1']))
layers[:] = orig_layers
assert ((len(layers) == 4) and ([k for (k, v) in layers.items()] == ['0', '1', '2', '3']))
|
def test_sequential_base_case():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __init__(self):
super().__init__()
dims = [Dim(1, name='feat1'), Dim(2, name='feat2'), Dim(3, name='feat3')]
in_dims = ([in_dim] + dims[:(- 1)])
self.out_dim = dims[(- 1)]
self.seq = rf.Sequential((rf.Linear(in_dim_, out_dim_) for (in_dim_, out_dim_) in zip(in_dims, dims)))
def __call__(self, data: Tensor) -> Tensor:
'\n Forward\n '
seq = self.seq(data)
return seq
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, time_dim, model.out_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_sequential_named_case():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __init__(self):
super().__init__()
dims = [Dim(1, name='feat1'), Dim(2, name='feat2'), Dim(3, name='feat3')]
self.out_dim = dims[(- 1)]
x = OrderedDict()
x['one'] = rf.Linear(in_dim, dims[0])
x['two'] = rf.Linear(dims[0], dims[1])
x['three'] = rf.Linear(dims[1], dims[2])
self.seq = rf.Sequential(x)
def __call__(self, data: Tensor) -> Tensor:
'\n Forward\n '
seq = self.seq(data)
return seq
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, time_dim, model.out_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_parameter_list():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __init__(self):
super().__init__()
self.param_list = rf.ParameterList([rf.Parameter([in_dim]) for _ in range(3)])
def __call__(self, data: Tensor) -> Tensor:
'\n Forward\n '
for param in self.param_list:
data += param
return data
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, time_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_conv1d():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
out_dim = Dim(13, name='out')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __init__(self):
super().__init__()
self.conv = rf.Conv1d(in_dim, out_dim, 4, strides=3, padding='valid')
def __call__(self, x: rf.Tensor) -> Tuple[(Tensor, Dim)]:
return self.conv(x, in_spatial_dim=time_dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
(out, dim) = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, dim, out_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_functional_conv1d_same_padding():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
out_dim = Dim(13, name='out')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: rf.Tensor) -> Tuple[(Tensor, Dim)]:
filter_size = Dim(4, name='filter_size')
filters = rf.ones((filter_size, in_dim, out_dim), dtype=x.dtype)
(y, (out_spatial_dim,)) = rf.conv(x, filter=filters, in_dim=in_dim, out_dim=out_dim, in_spatial_dims=[time_dim], filter_size=[filter_size], strides=1, padding='same')
return (y, out_spatial_dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
(out, dim) = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, dim, out_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_conv1d_same_padding():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
out_dim = Dim(13, name='out')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __init__(self):
super().__init__()
self.conv = rf.Conv1d(in_dim, out_dim, 4, padding='same')
def __call__(self, x: rf.Tensor) -> Tuple[(Tensor, Dim)]:
return self.conv(x, in_spatial_dim=time_dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
(out, dim) = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, dim, out_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_functional_conv1d_stride_same_padding():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(1, name='in')
out_dim = Dim(1, name='out')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: rf.Tensor) -> Tuple[(Tensor, Dim)]:
x = rf.ones(x.dims, dtype=x.dtype)
filter_size = Dim(4, name='filter_size')
filters = rf.ones((filter_size, in_dim, out_dim), dtype=x.dtype)
(y, (out_spatial_dim,)) = rf.conv(x, filter=filters, in_dim=in_dim, out_dim=out_dim, in_spatial_dims=[time_dim], filter_size=[filter_size], strides=3, padding='same')
return (y, out_spatial_dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
(out, dim) = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, dim, out_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, dyn_dim_max_sizes={time_dim: 7})
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, dyn_dim_max_sizes={time_dim: 9})
|
def test_conv1d_stride_same_padding():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
out_dim = Dim(13, name='out')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __init__(self):
super().__init__()
self.conv = rf.Conv1d(in_dim, out_dim, 4, strides=3, padding='same')
def __call__(self, x: rf.Tensor) -> Tuple[(Tensor, Dim)]:
return self.conv(x, in_spatial_dim=time_dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
(out, dim) = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, dim, out_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_conv1d_same_out():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __init__(self):
super().__init__()
self.conv = rf.Conv1d(in_dim, in_dim, 4, padding='same')
def __call__(self, x: rf.Tensor) -> Tensor:
(x, _) = self.conv(x, in_spatial_dim=time_dim)
return x
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, time_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_conv1d_depthwise():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
out_dim = Dim((7 * 3), name='out')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __init__(self):
super().__init__()
self.conv = rf.Conv1d(in_dim, out_dim, 4, groups=in_dim.dimension, padding='valid')
def __call__(self, x: rf.Tensor) -> Tuple[(Tensor, Dim)]:
(x, dim) = self.conv(x, in_spatial_dim=time_dim)
return (x, dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
(out, spatial_dim) = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, spatial_dim, out_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_maxpool1d_padding_valid():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: rf.Tensor, *, in_spatial_dim: Dim) -> Tuple[(Tensor, Dim)]:
return rf.max_pool1d(x, pool_size=3, padding='valid', in_spatial_dim=in_spatial_dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
(out, out_spatial_dim) = model(extern_data['data'], in_spatial_dim=time_dim)
out.mark_as_default_output(shape=(batch_dim, out_spatial_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_maxpool1d_padding_same():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: rf.Tensor, *, in_spatial_dim: Dim) -> Tuple[(Tensor, Dim)]:
return rf.max_pool1d(x, pool_size=3, padding='same', in_spatial_dim=in_spatial_dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
(out, out_spatial_dim) = model(extern_data['data'], in_spatial_dim=time_dim)
out.mark_as_default_output(shape=(batch_dim, out_spatial_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, dyn_dim_max_sizes={time_dim: 7})
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, dyn_dim_max_sizes={time_dim: 9})
|
def test_maxpool1d_stride_padding_same():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: rf.Tensor, *, in_spatial_dim: Dim) -> Tuple[(Tensor, Dim)]:
return rf.max_pool1d(x, pool_size=4, strides=3, padding='same', in_spatial_dim=in_spatial_dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
(out, out_spatial_dim) = model(extern_data['data'], in_spatial_dim=time_dim)
out.mark_as_default_output(shape=(batch_dim, out_spatial_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, dyn_dim_max_sizes={time_dim: 7})
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, dyn_dim_max_sizes={time_dim: 9})
|
def test_maxpool1d_stride_border_cond():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: rf.Tensor, *, in_spatial_dim: Dim) -> Tuple[(Tensor, Dim)]:
return rf.max_pool1d(x, pool_size=6, strides=3, padding='valid', in_spatial_dim=in_spatial_dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
(out, out_spatial_dim) = model(extern_data['data'], in_spatial_dim=time_dim)
out.mark_as_default_output(shape=(batch_dim, out_spatial_dim, in_dim))
out = run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, dyn_dim_max_sizes={time_dim: 9}, dyn_dim_min_sizes={time_dim: 2})
out = out['output']
(out_spatial_dim,) = out.get_dyn_size_tags()
assert isinstance(out_spatial_dim, Dim)
out_sizes = out_spatial_dim.dyn_size_ext.raw_tensor
print('out sizes:', out_sizes)
assert isinstance(out_sizes, numpy.ndarray)
assert (min(out_sizes) != max(out_sizes))
assert (min(out_sizes) == 0)
|
def test_maxpool1d_stride1_padding_same():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: rf.Tensor, *, in_spatial_dim: Dim) -> Tuple[(Tensor, Dim)]:
return rf.max_pool1d(x, pool_size=3, strides=1, padding='same', in_spatial_dim=in_spatial_dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
(out, out_spatial_dim) = model(extern_data['data'], in_spatial_dim=time_dim)
out.mark_as_default_output(shape=(batch_dim, out_spatial_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, dyn_dim_max_sizes={time_dim: 7})
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, dyn_dim_max_sizes={time_dim: 9})
|
def test_avgpool1d_stride1_padding_same():
time_dim = Dim(10, name='time')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: rf.Tensor, *, in_spatial_dim: Dim) -> Tuple[(Tensor, Dim)]:
return rf.pool1d(x, mode='avg', pool_size=3, strides=1, padding='same', in_spatial_dim=in_spatial_dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
(out, _) = model(extern_data['data'], in_spatial_dim=time_dim)
out.mark_as_default_output(shape=[batch_dim, time_dim])
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_conformer():
import resource
import sys
try:
resource.setrlimit(resource.RLIMIT_STACK, ((2 ** 29), (- 1)))
except Exception as exc:
print(f'resource.setrlimit {type(exc).__name__}: {exc}')
sys.setrecursionlimit((10 ** 6))
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
from returnn.frontend.encoder.conformer import ConformerEncoder, ConformerConvSubsample
def _forward_step(*, model: ConformerEncoder, extern_data: TensorDict):
(out, out_spatial_dim) = model(extern_data['data'], in_spatial_dim=time_dim)
out.mark_as_default_output(shape=(batch_dim, out_spatial_dim, model.out_dim))
run_model(extern_data, (lambda *, epoch, step: ConformerEncoder(in_dim, Dim(14, name='out'), ff_dim=Dim(17, name='ff'), input_layer=ConformerConvSubsample(in_dim, out_dims=[Dim(32, name='conv1'), Dim(64, name='conv2')], filter_sizes=[(3, 3), (3, 3)], pool_sizes=[(2, 1), (2, 1)]), num_heads=2, num_layers=2)), _forward_step)
|
def test_scaled_gradient():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
def _forward_step(*, model: rf.Module, extern_data: TensorDict):
model
data = extern_data['data']
rf.set_requires_gradient(data)
out = rf.scaled_gradient(data, scale=(- 0.5))
out.mark_as_default_output(shape=(batch_dim, time_dim, in_dim))
grad = rf.gradient(rf.reduce_sum(out, axis=out.dims, use_mask=False), data)
grad.mark_as_output('grad')
run_model(extern_data, (lambda *, epoch, step: rf.Module()), _forward_step)
|
def test_label_smoothed_log_prob_gradient():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
vocab_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, vocab_dim], dtype='float32', feature_dim=vocab_dim), 'targets': Tensor('targets', [batch_dim, time_dim], dtype='int32', sparse_dim=vocab_dim)})
def _forward_step(*, model: rf.Module, extern_data: TensorDict):
model
data = extern_data['data']
targets = extern_data['targets']
rf.set_requires_gradient(data)
log_prob = rf.log_softmax(data, axis=vocab_dim)
out = rf.label_smoothed_log_prob_gradient(log_prob, 0.1)
loss = rf.cross_entropy(target=targets, estimated=log_prob, estimated_type='log-probs', axis=vocab_dim)
out.mark_as_default_output(shape=(batch_dim, time_dim, vocab_dim))
loss.mark_as_output('loss')
grad = rf.gradient(rf.reduce_sum(loss, axis=loss.dims), data)
grad.mark_as_output('grad')
run_model(extern_data, (lambda *, epoch, step: rf.Module()), _forward_step)
|
def test_while_loop_simple():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
def _forward_step(*, model: rf.Module, extern_data: TensorDict):
(model, extern_data)
i = rf.while_loop(cond=(lambda i_: (i_ < time_dim.get_dim_value_tensor())), body=(lambda i_: (i_ + 1)), initial=rf.constant(0, dims=()))
i.mark_as_default_output(shape=())
run_model(extern_data, (lambda *, epoch, step: rf.Module()), _forward_step)
|
def test_while_loop_two_state():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32', feature_dim=in_dim)})
def _forward_step(*, model: rf.Module, extern_data: TensorDict):
model
data = extern_data['data']
(_, out) = rf.while_loop(cond=(lambda s: (s[0] < 2)), body=(lambda s: ((s[0] + 1), (s[1] * 0.9))), initial=(rf.constant(0, dims=()), data))
assert (out.control_flow_ctx is None)
out.mark_as_default_output(shape=(batch_dim, time_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: rf.Module()), _forward_step)
|
def test_while_loop():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tensor:
def _cond(s: Tuple[(Tensor, Tensor)]):
(t, s_) = s
if t.raw_tensor.__class__.__module__.startswith('torch'):
print('**', t.raw_tensor, rf.reduce_sum(s_, axis=s_.dims).raw_tensor)
return rf.logical_and((rf.reduce_sum(s_, axis=s_.dims) < 50), (t < time_dim.get_dim_value_tensor()))
def _body(s):
(t, s_) = s
return ((t + 1), (s_ + rf.abs(rf.gather(x, indices=t, axis=time_dim))))
(_, final_s) = rf.while_loop(_cond, _body, initial=(rf.zeros((), dtype=rf.get_default_array_index_dtype()), rf.zeros((batch_dim, in_dim))))
return final_s
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, test_tensorflow=False)
|
def test_scan_unknown_len():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tuple[(Tensor, Dim)]:
def _cond(_, s: Tuple[(Tensor, Tensor)]):
(t, s_) = s
if t.raw_tensor.__class__.__module__.startswith('torch'):
print('**', t.raw_tensor, rf.reduce_sum(s_, axis=in_dim).raw_tensor)
return rf.logical_and((rf.reduce_sum(s_, axis=in_dim) < 20), (t < time_dim.get_dim_value_tensor()))
def _body(_, s):
(t, s_) = s
y_ = (s_ + rf.abs(rf.gather(x, indices=t, axis=time_dim)))
return (y_, ((t + 1), y_))
(y, _, out_time_dim) = rf.scan(cond=_cond, body=_body, cond_dims=[batch_dim], initial=(rf.zeros((), dtype=rf.get_default_array_index_dtype()), rf.zeros((batch_dim, in_dim))), ys=Tensor('y', dims=(batch_dim, in_dim), dtype=x.dtype))
return (y, out_time_dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
(out, out_time_dim) = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, out_time_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, test_tensorflow=False)
|
def test_scan_existing_spatial_dim():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tensor:
def _body(x_, s):
y_ = (s + x_)
return (y_, y_)
(y, _, _) = rf.scan(spatial_dim=time_dim, body=_body, initial=rf.zeros((batch_dim, in_dim)), xs=x, ys=Tensor('y', dims=(batch_dim, in_dim), dtype=x.dtype))
return y
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, time_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, test_tensorflow=False)
|
def test_scan_changing_dim():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tuple[(Tensor, Dim)]:
def _body(x_: Tensor, s):
s_ = s['state']
beam_in_dim = s['beam_dim']
y_ = (s_ + x_)
beam_dim = Dim(3, name='beam')
r = rf.range_over_dim(beam_dim, dtype=x_.dtype)
r.sparse_dim = None
y_ = rf.combine_bc(y_, 'mul', r)
y_ = rf.reduce_mean(y_, axis=beam_in_dim)
return (y_, {'state': y_, 'beam_dim': beam_dim})
initial_beam_dim = Dim(1, name='initial-beam')
(y, last_state, _) = rf.scan(spatial_dim=time_dim, body=_body, initial={'state': rf.zeros((batch_dim, initial_beam_dim, in_dim)), 'beam_dim': initial_beam_dim}, xs=x, ys=Tensor('y', dims=(batch_dim, initial_beam_dim, in_dim), dtype=x.dtype), return_tensor_arrays=True)
final_beam_dim = last_state['beam_dim']
assert isinstance(y, TensorArray)
last = y[(- 1)]
return (last, final_beam_dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
(out, beam_dim) = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, beam_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, test_tensorflow=False)
|
def test_neg():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tensor:
return (- x)
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, time_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_squared_difference():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'a': Tensor('a', [batch_dim, time_dim, in_dim], dtype='float32'), 'b': Tensor('b', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, a: Tensor, b: Tensor) -> Tensor:
return rf.squared_difference(a, b)
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['a'], extern_data['b'])
out.mark_as_default_output(shape=(batch_dim, time_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_abs_complex():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='complex64')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tensor:
return rf.abs(x)
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, time_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_relu():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tensor:
return rf.relu(x)
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, time_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_batch_norm():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __init__(self):
super().__init__()
self.bn = rf.BatchNorm(in_dim, use_mask=False)
def __call__(self, out: Tensor) -> Tensor:
'\n Forward\n '
out = self.bn(out)
return out
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, time_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_batch_norm_masking():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __init__(self):
super().__init__()
self.bn = rf.BatchNorm(in_dim, use_mask=True, track_running_stats=False)
def __call__(self, out: Tensor) -> Tensor:
out = self.bn(out)
return out
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, time_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_reduce_max():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tensor:
return rf.reduce_max(x, axis=in_dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, time_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_reduce_argmax():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tensor:
return rf.reduce_argmax(x, axis=in_dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, time_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_reduce_mean_dyn_time():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tensor:
return rf.reduce_mean(x, axis=time_dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_reduce_mean_dyn_batch_time():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tensor:
return rf.reduce_mean(x, axis=(batch_dim, time_dim))
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(in_dim,))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_top_k():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tuple[(Tensor, Tensor, Dim)]:
return rf.top_k(x, axis=in_dim, k=2)
def _forward_step(*, model: _Net, extern_data: TensorDict):
(values, indices, k_dim) = model(extern_data['data'])
values.mark_as_output('values', shape=(batch_dim, time_dim, k_dim))
indices.mark_as_output('indices', shape=(batch_dim, time_dim, k_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_top_k_beam_search():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
vocab_dim = Dim(7, name='vocab')
beam_in_dim = Dim(3, name='beam_in')
beam_out_dim = Dim(5, name='beam_out')
extern_data = TensorDict({'log_probs': Tensor('log_probs', [batch_dim, beam_in_dim, time_dim, vocab_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tuple[(Tensor, Tensor, Tensor)]:
(log_probs, (indices_beam_in, indices_vocab), _) = rf.top_k(x, axis=[beam_in_dim, vocab_dim], k_dim=beam_out_dim, k=beam_out_dim.dimension)
return (log_probs, indices_beam_in, indices_vocab)
def _forward_step(*, model: _Net, extern_data: TensorDict):
(log_probs, indices_beam_in, indices_vocab) = model(extern_data['log_probs'])
log_probs.mark_as_output('log_probs', shape=(batch_dim, time_dim, beam_out_dim))
indices_beam_in.mark_as_output('indices_beam_in', shape=(batch_dim, time_dim, beam_out_dim))
indices_vocab.mark_as_output('indices_vocab', shape=(batch_dim, time_dim, beam_out_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_stft():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: rf.Tensor, *, in_spatial_dim: Dim) -> Tuple[(Tensor, Dim, Dim)]:
return rf.stft(x, in_spatial_dim=in_spatial_dim, frame_step=3, frame_length=5, fft_length=6)
def _forward_step(*, model: _Net, extern_data: TensorDict):
(out, out_spatial_dim, out_dim) = model(extern_data['data'], in_spatial_dim=time_dim)
out.mark_as_default_output(shape=(batch_dim, out_spatial_dim, out_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_mel_filterbank():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
feat_dim = Dim(10, name='mel')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, source: rf.Tensor, *, in_spatial_dim: Dim) -> Tuple[(Tensor, Dim)]:
(source, in_spatial_dim, in_dim_) = rf.stft(source, in_spatial_dim=in_spatial_dim, frame_step=3, frame_length=5, fft_length=6)
source = (rf.abs(source) ** 2.0)
source = rf.audio.mel_filterbank(source, in_dim=in_dim_, out_dim=feat_dim, sampling_rate=16)
return (source, in_spatial_dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
(out, out_spatial_dim) = model(extern_data['data'], in_spatial_dim=time_dim)
out.mark_as_default_output(shape=(batch_dim, out_spatial_dim, feat_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
|
def test_audio_specaugment():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32', feature_dim=in_dim)})
def _forward_step(*, model: rf.Module, extern_data: TensorDict):
model
data = extern_data['data']
out = rf.audio.specaugment(data, spatial_dim=time_dim, feature_dim=in_dim, only_on_train=False)
out.mark_as_default_output(shape=(batch_dim, time_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: rf.Module()), _forward_step)
|
def test_tensor():
batch_dim = Dim(name='batch', dimension=None)
time_dim = Dim(name='time', dimension=None)
feat_dim = Dim(10)
x = Tensor('x', (batch_dim, time_dim, feat_dim), 'float32')
print(x)
|
def run(*args):
args = list(args)
print('run:', args)
global _run_count
if (_run_count == 0):
_run_count += 1
from returnn.util.basic import generic_import_module
mod = generic_import_module(os.path.join(base_dir, args[0]))
mod.main(args)
return
_run_count += 1
p = Popen(args, stdout=PIPE, stderr=STDOUT, cwd=base_dir)
(out, _) = p.communicate()
if (p.returncode != 0):
print(('Return code is %i' % p.returncode))
print(('std out/err:\n---\n%s\n---\n' % out.decode('utf8')))
raise CalledProcessError(cmd=args, returncode=p.returncode, output=out)
return out.decode('utf8')
|
def test_compile_tf_graph_basic():
tmp_dir = tempfile.mkdtemp()
with open(os.path.join(tmp_dir, 'returnn.config'), 'wt') as config:
config.write(rec_encoder_decoder_simple_config)
args = ['tools/compile_tf_graph.py', '--output_file', os.path.join(tmp_dir, 'graph.metatxt'), os.path.join(tmp_dir, 'returnn.config')]
run(*args)
|
def test_compile_tf_graph_basic_second_run():
test_compile_tf_graph_basic()
|
def test_compile_tf_graph_enc_dec_simple_recurrent_step():
tmp_dir = tempfile.mkdtemp()
with open(os.path.join(tmp_dir, 'returnn.config'), 'wt') as config:
config.write(rec_encoder_decoder_simple_config)
args = ['tools/compile_tf_graph.py', '--output_file', os.path.join(tmp_dir, 'graph.metatxt'), '--rec_step_by_step', 'output', os.path.join(tmp_dir, 'returnn.config')]
run(*args)
|
def test_compile_tf_graph_enc_dec_att_recurrent_step():
tmp_dir = tempfile.mkdtemp()
with open(os.path.join(tmp_dir, 'returnn.config'), 'wt') as config:
config.write(rec_encoder_decoder_att_config)
args = ['tools/compile_tf_graph.py', '--output_file', os.path.join(tmp_dir, 'graph.metatxt'), '--rec_step_by_step', 'output', os.path.join(tmp_dir, 'returnn.config')]
run(*args)
|
def test_compile_tf_graph_transducer_time_sync_recurrent_step():
tmp_dir = tempfile.mkdtemp()
with open(os.path.join(tmp_dir, 'returnn.config'), 'wt') as config:
config.write(rec_transducer_time_sync_config)
args = ['tools/compile_tf_graph.py', '--output_file', os.path.join(tmp_dir, 'graph.metatxt'), '--rec_step_by_step', 'output', os.path.join(tmp_dir, 'returnn.config')]
run(*args)
|
def test_compile_tf_graph_transducer_time_sync_delayed_recurrent_step():
tmp_dir = tempfile.mkdtemp()
with open(os.path.join(tmp_dir, 'returnn.config'), 'wt') as config:
config.write(rec_transducer_time_sync_delayed_config)
args = ['tools/compile_tf_graph.py', '--output_file', os.path.join(tmp_dir, 'graph.metatxt'), '--rec_step_by_step', 'output', os.path.join(tmp_dir, 'returnn.config')]
run(*args)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.