code stringlengths 17 6.64M |
|---|
def run(args, input=None):
'run subproc'
args = list(args)
print('run:', args)
p = Popen(args, stdout=PIPE, stderr=STDOUT, stdin=PIPE, env=build_env())
(out, _) = p.communicate(input=input)
print(('Return code is %i' % p.returncode))
print(('std out/err:\n---\n%s\n---\n' % out.decode('utf8')))
if (p.returncode != 0):
raise CalledProcessError(cmd=args, returncode=p.returncode, output=out)
return out.decode('utf8')
|
def filter_out(ls):
'\n :param list[str] ls:\n :rtype: list[str]\n '
if (not isinstance(ls, list)):
ls = list(ls)
res = []
i = 0
while (i < len(ls)):
s = ls[i]
if (('tensorflow/core/' in s) or ('tensorflow/stream_executor/' in s)):
i += 1
continue
if (((i + 1) < len(ls)) and ls[(i + 1)].startswith(' ')):
if (re.match('.*:\\d+: RuntimeWarning: numpy.*', s) or re.match('.*:\\d+: FutureWarning: .*', s)):
i += 2
continue
if any(((msg in s) for msg in ['Setup TF inter and intra global thread pools', 'Collecting TensorFlow device list', 'CUDA_VISIBLE_DEVICES is not set'])):
i += 1
continue
if ('Local devices available to TensorFlow:' in s):
i += 1
while ((i < len(ls)) and re.match('^ {2}\\d+/\\d+: name:.*', ls[i])):
i += 1
while ((i < len(ls)) and re.match('^ {4}.*', ls[i])):
i += 1
continue
if (s.startswith('systemMemory:') or s.startswith('maxCacheSize:')):
i += 1
continue
res.append(ls[i])
i += 1
return res
|
def count_start_with(ls, s):
'\n :param list[str] ls:\n :param str s:\n :rtype: int\n '
c = 0
for l in ls:
if l.startswith(s):
c += 1
return c
|
def test_filter_out():
s = '\n/home/travis/virtualenv/python2.7.14/lib/python2.7/site-packages/scipy/special/__init__.py:640: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n from ._ufuncs import *\n/home/travis/virtualenv/python2.7.14/lib/python2.7/site-packages/h5py/_hl/group.py:22: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n from .. import h5g, h5i, h5o, h5r, h5t, h5l, h5p\nRETURNN starting up, version 20180724.141845--git-7865d01, date/time 2018-07-24-13-11-47 (UTC+0000), pid 2196, cwd /home/travis/build/rwth-i6/returnn, Python /home/travis/virtualenv/python2.7.14/bin/python\nfaulthandler import error. No module named faulthandler\nTheano: 0.9.0 (<site-package> in /home/travis/virtualenv/python2.7.14/lib/python2.7/site-packages/theano)\nTask: No-operation\nelapsed: 0:00:00.0001\n'
ls = filter(None, s.splitlines())
ls = filter_out(ls)
pprint(ls)
assert_equal(len(ls), 5)
|
def test_returnn_startup():
out = run([py, __main_entry__, '-x', 'nop', '++use_tensorflow', '1'])
ls = out.splitlines()
ls = filter_out(ls)
if (not (3 <= len(ls) <= 10)):
print(('output:\n%s\n\nNum lines: %i' % ('\n'.join(ls), len(ls))))
raise Exception('unexpected output number of lines')
assert_equal(count_start_with(ls, 'RETURNN starting up, version '), 1)
assert_equal(count_start_with(ls, 'TensorFlow: '), 1)
assert_in('Task: No-operation', ls)
|
def test_returnn_startup_verbose():
out = run([py, __main_entry__, '-x', 'nop', '++use_tensorflow', '1', '++log_verbosity', '5'])
ls = out.splitlines()
ls = filter_out(ls)
if (not (3 <= len(ls) <= 15)):
print(('output:\n%s\n\nNum lines: %i' % ('\n'.join(ls), len(ls))))
raise Exception('unexpected output number of lines')
assert_equal(count_start_with(ls, 'RETURNN starting up, version '), 1)
assert_equal(count_start_with(ls, 'RETURNN command line options: '), 1)
assert_equal(count_start_with(ls, 'TensorFlow: '), 1)
assert_in('Task: No-operation', ls)
assert_in('Quitting', ls)
|
def test_simple_log():
code = '\nfrom __future__ import annotations\nprint("hello stdout 1")\nfrom returnn.log import log\nlog.initialize(verbosity=[], logs=[], formatter=[])\nprint("hello stdout 2")\nprint("hello log 1", file=log.v3)\nprint("hello log 2", file=log.v3)\n '
out = run([py], input=code.encode('utf8'))
assert_equal(out.splitlines(), ['hello stdout 1', 'hello stdout 2', 'hello log 1', 'hello log 2'])
|
def test_StreamIO():
import io
buf = io.StringIO()
assert_equal(buf.getvalue(), '')
print(('buf: %r' % buf.getvalue()))
buf.write('hello')
print(('buf: %r' % buf.getvalue()))
assert_equal(buf.getvalue(), 'hello')
buf.truncate(0)
print(('buf: %r' % buf.getvalue()))
assert_equal(buf.getvalue(), '')
buf.write('hello')
print(('buf: %r' % buf.getvalue()))
if PY3:
assert_equal(buf.getvalue(), '\x00\x00\x00\x00\x00hello')
buf.truncate(0)
buf.seek(0)
print(('buf: %r' % buf.getvalue()))
assert_equal(buf.getvalue(), '')
buf.write('hello')
print(('buf: %r' % buf.getvalue()))
assert_equal(buf.getvalue(), 'hello')
buf.truncate(0)
buf.seek(0)
print(('buf: %r' % buf.getvalue()))
assert_equal(buf.getvalue(), '')
|
def _sig_alarm_handler(signum, frame):
raise Exception(f'Alarm (timeout) signal handler')
|
@contextlib.contextmanager
def timeout(seconds=10):
'\n :param seconds: when the context is not closed within this time, an exception will be raised\n '
signal.alarm(seconds)
try:
(yield)
finally:
signal.alarm(0)
|
def test_MultiProcDataset_n1_b1_default():
hdf_fn = generate_hdf_from_other({'class': 'Task12AXDataset', 'num_seqs': 23})
hdf_dataset_dict = {'class': 'HDFDataset', 'files': [hdf_fn]}
hdf_dataset = init_dataset(hdf_dataset_dict)
hdf_dataset_seqs = dummy_iter_dataset(hdf_dataset)
with timeout():
mp_dataset = MultiProcDataset(dataset=hdf_dataset_dict, num_workers=1, buffer_size=1)
mp_dataset.initialize()
mp_dataset_seqs = dummy_iter_dataset(mp_dataset)
compare_dataset_seqs(hdf_dataset_seqs, mp_dataset_seqs)
|
def test_MultiProcDataset_n3_b5_shuffle():
hdf_fn = generate_hdf_from_other({'class': 'Task12AXDataset', 'num_seqs': 23})
hdf_dataset_dict = {'class': 'HDFDataset', 'files': [hdf_fn], 'seq_ordering': 'random'}
hdf_dataset = init_dataset(hdf_dataset_dict)
hdf_dataset_seqs = dummy_iter_dataset(hdf_dataset)
with timeout():
mp_dataset = MultiProcDataset(dataset=hdf_dataset_dict, num_workers=3, buffer_size=5)
mp_dataset.initialize()
mp_dataset_seqs = dummy_iter_dataset(mp_dataset)
compare_dataset_seqs(hdf_dataset_seqs, mp_dataset_seqs)
|
def test_MultiProcDataset_meta():
hdf_fn = generate_hdf_from_other({'class': 'Task12AXDataset', 'num_seqs': 23})
meta_dataset_dict = {'class': 'MetaDataset', 'data_map': {'classes': ('hdf', 'classes'), 'data': ('hdf', 'data')}, 'datasets': {'hdf': {'class': 'HDFDataset', 'files': [hdf_fn]}}}
meta_dataset = init_dataset(meta_dataset_dict)
meta_dataset_seqs = dummy_iter_dataset(meta_dataset)
with timeout():
mp_dataset = MultiProcDataset(dataset=meta_dataset_dict, num_workers=1, buffer_size=1)
mp_dataset.initialize()
mp_dataset_seqs = dummy_iter_dataset(mp_dataset)
compare_dataset_seqs(meta_dataset_seqs, mp_dataset_seqs)
|
def test_MultiProcDataset_via_config():
from io import StringIO
import textwrap
from returnn.config import Config, global_config_ctx
config = Config()
config.load_file(StringIO(textwrap.dedent(' #!returnn.py\n\n import numpy\n from returnn.datasets.map import MapDatasetBase\n\n class MyCustomMapDatasetInConfig(MapDatasetBase):\n def __init__(self):\n super().__init__(data_types={"data": {"shape": (None, 3)}})\n\n def __len__(self):\n return 2\n\n def __getitem__(self, item):\n return {"data": numpy.zeros((5, 3))}\n ')))
with timeout(), global_config_ctx(config):
mp_dataset = MultiProcDataset(dataset={'class': 'MapDatasetWrapper', 'map_dataset': config.typed_dict['MyCustomMapDatasetInConfig']}, num_workers=1, buffer_size=1)
mp_dataset.initialize()
items = dummy_iter_dataset(mp_dataset)
assert (len(items) == 2)
|
class _MyCustomMapDatasetException(Exception):
pass
|
class _MyCustomMapDatasetThrowingExceptionAtInit(MapDatasetBase):
def __init__(self):
super().__init__()
raise _MyCustomMapDatasetException('test exception at init')
|
class _MyCustomMapDatasetThrowingExceptionAtItem(MapDatasetBase):
def __init__(self):
super().__init__(data_types={'data': {'shape': (None, 3)}})
def __len__(self):
return 2
def __getitem__(self, item):
if (item == 0):
return {'data': numpy.zeros((5, 3))}
raise _MyCustomMapDatasetException('test exception at getitem')
|
def test_MultiProcDataset_exception_at_init():
with timeout():
mp_dataset = MultiProcDataset(dataset={'class': 'MapDatasetWrapper', 'map_dataset': _MyCustomMapDatasetThrowingExceptionAtInit}, num_workers=1, buffer_size=1)
try:
mp_dataset.initialize()
except Exception as exc:
print('Got expected exception:', exc)
else:
raise Exception('Expected exception')
|
def test_MultiProcDataset_exception_at_item():
with timeout():
mp_dataset = MultiProcDataset(dataset={'class': 'MapDatasetWrapper', 'map_dataset': _MyCustomMapDatasetThrowingExceptionAtItem}, num_workers=1, buffer_size=1)
mp_dataset.initialize()
try:
dummy_iter_dataset(mp_dataset)
except Exception as exc:
print('Got expected exception:', exc)
else:
raise Exception('Expected exception')
|
class _MyCustomDummyMapDataset(MapDatasetBase):
def __init__(self):
super().__init__(data_types={'data': {'shape': (None, 3)}})
def __len__(self):
return 2
def __getitem__(self, item):
return {'data': numpy.zeros((((item * 2) + 5), 3))}
|
def test_MultiProcDataset_pickle():
import pickle
with timeout():
mp_dataset = MultiProcDataset(dataset={'class': 'MapDatasetWrapper', 'map_dataset': _MyCustomDummyMapDataset}, num_workers=1, buffer_size=1)
mp_dataset.initialize()
mp_dataset_seqs = dummy_iter_dataset(mp_dataset)
mp_dataset_serialized = pickle.dumps(mp_dataset)
with timeout():
mp_dataset_ = pickle.loads(mp_dataset_serialized)
mp_dataset_seqs_ = dummy_iter_dataset(mp_dataset_)
compare_dataset_seqs(mp_dataset_seqs, mp_dataset_seqs_)
|
def test_config_net_dict1():
config = Config()
config.update(config_dict)
config.typed_dict['network'] = net_dict
pretrain = pretrain_from_config(config)
assert_equal(pretrain.get_train_num_epochs(), 2)
net1_json = pretrain.get_network_json_for_epoch(1)
net2_json = pretrain.get_network_json_for_epoch(2)
net3_json = pretrain.get_network_json_for_epoch(3)
assert_in('hidden_0', net1_json)
assert_not_in('hidden_1', net1_json)
assert_in('hidden_0', net2_json)
assert_in('hidden_1', net2_json)
assert_equal(net2_json, net3_json)
|
def test_config_net_dict2():
config = Config()
config.update(config_dict)
config.typed_dict['network'] = net_dict2
pretrain = pretrain_from_config(config)
assert_equal(pretrain.get_train_num_epochs(), 3)
|
@contextlib.contextmanager
def make_scope():
with tf.Graph().as_default() as graph:
with tf_compat.v1.Session(graph=graph) as session:
(yield session)
|
def build_resnet(conv_time_dim):
dropout = 0
L2 = 0.1
filter_size = (3, 3)
context_window = 1
window = 1
feature_dim = 64
channel_num = 3
num_inputs = ((feature_dim * channel_num) * window)
num_outputs = 9001
EpochSplit = 6
cur_feat_dim = feature_dim
global _last, network
network = {}
_last = 'data'
def add_sequential_layer(name, d, from_=None):
global _last, network
assert ('from' not in d)
if (from_ is not None):
d['from'] = from_
else:
d['from'] = [_last]
assert (name not in network)
network[name] = d
_last = name
def fixed_padding(prefix, kernel_size, data_format, conv_time_dim):
'Pads the input along the spatial dimensions independently of input size.'
pad_total = (kernel_size - 1)
feature_pad_beg = (pad_total // 2)
feature_pad_end = (pad_total - feature_pad_beg)
time_pad_beg = 0
time_pad_end = 0
return add_sequential_layer(('%s_pad' % prefix), {'class': 'pad', 'axes': ('s:0', 's:1'), 'padding': [(time_pad_beg, time_pad_end), (feature_pad_end, feature_pad_end)]})
def conv2d_fixed_padding(prefix, filters, kernel_size, strides, dilation_rate, data_format, conv_time_dim, source=None):
'Strided 2-D convolution with explicit padding.'
fixed_padding(('%s_pad' % prefix), kernel_size, data_format, conv_time_dim)
padding = 'VALID'
strides = ((1, strides) if conv_time_dim else strides)
filter_size = (kernel_size, kernel_size)
dilation_rate = ((dilation_rate, 1) if conv_time_dim else (1, 1))
if (data_format == 'channels_first'):
NCHW = True
else:
NCHW = False
add_sequential_layer(('%s_conv' % prefix), {'class': 'conv', 'n_out': filters, 'filter_size': filter_size, 'auto_use_channel_first': NCHW, 'strides': strides, 'dilation_rate': dilation_rate, 'padding': padding, 'activation': None, 'with_bias': False, 'dropout': 0, 'forward_weights_init': 'xavier', 'L2': L2}, from_=source)
return ('%s_conv' % prefix)
def _building_block_v2(prefix, filters, projection_shortcut, strides, dilation_rate, dilation_rate_multiplier, kernel_size, data_format, conv_time_dim):
'A single block for ResNet v2, without a bottleneck.\n\n Batch normalization then ReLu then convolution as described by:\n Identity Mappings in Deep Residual Networks\n https://arxiv.org/pdf/1603.05027.pdf\n by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Jul 2016.\n '
add_sequential_layer(('%s_in' % prefix), {'class': 'copy'})
add_sequential_layer(('%s_relu1' % prefix), {'class': 'activation', 'activation': 'relu', 'batch_norm': False})
if conv_time_dim:
conv2d_fixed_padding(prefix=('%s_conv_1' % prefix), filters=filters, kernel_size=kernel_size, strides=1, dilation_rate=dilation_rate, data_format=data_format, conv_time_dim=conv_time_dim)
add_sequential_layer(('%s_stride' % prefix), {'class': 'slice', 'axis': 's:1', 'slice_step': strides})
dilation_rate *= dilation_rate_multiplier
else:
conv2d_fixed_padding(prefix=('%s_conv_1' % prefix), filters=filters, kernel_size=kernel_size, strides=strides, dilation_rate=dilation_rate, data_format=data_format, conv_time_dim=conv_time_dim)
add_sequential_layer(('%s_relu2' % prefix), {'class': 'activation', 'activation': 'relu', 'batch_norm': False})
conv = conv2d_fixed_padding(prefix=('%s_conv_2' % prefix), filters=filters, kernel_size=kernel_size, strides=1, dilation_rate=dilation_rate, data_format=data_format, conv_time_dim=conv_time_dim)
result = ('%s_conv_2' % prefix)
crop_lr = (filter_size[0] - 1)
crop_left = (crop_lr // 2)
crop_right = (crop_lr - crop_left)
if conv_time_dim:
if (dilation_rate_multiplier > 1):
crop = int((crop_left * ((dilation_rate / dilation_rate_multiplier) + dilation_rate)))
else:
crop = int(((crop_left * 2) * dilation_rate))
add_sequential_layer(('%s_crop' % prefix), {'class': 'slice', 'axis': 'T', 'slice_start': crop, 'slice_end': (- crop)}, from_=('%s_relu1' % prefix))
shortcut = ('%s_crop' % prefix)
if (projection_shortcut is not None):
shortcut = projection_shortcut(source=shortcut)
else:
crop = crop_left
add_sequential_layer(('%s_crop_1' % prefix), {'class': 'slice', 'axis': 'T', 'slice_start': crop, 'slice_end': (- crop)}, from_=('%s_relu1' % prefix))
shortcut = ('%s_crop_1' % prefix)
if (projection_shortcut is not None):
shortcut = projection_shortcut(source=shortcut)
add_sequential_layer(('%s_crop_2' % prefix), {'class': 'slice', 'axis': 'T', 'slice_start': crop, 'slice_end': (- crop)}, from_=shortcut)
shortcut = ('%s_crop_2' % prefix)
add_sequential_layer(('%s_out' % prefix), {'class': 'combine', 'kind': 'add'}, from_=[conv, shortcut])
return
def block_layer(prefix, filters, bottleneck, block_fn, blocks, strides, dilation_rate, dilation_rate_multiplier, kernel_size, data_format, conv_time_dim):
'Creates one layer of blocks for the ResNet model.'
filters_out = ((filters * 4) if bottleneck else filters)
if (not conv_time_dim):
strides = (dilation_rate_multiplier, strides)
def projection_shortcut(source=None):
return conv2d_fixed_padding(prefix=('%s_sc' % prefix), filters=filters_out, kernel_size=1, strides=strides, dilation_rate=1, data_format=data_format, conv_time_dim=conv_time_dim, source=source)
block_fn(('%s_0' % prefix), filters, projection_shortcut, strides, dilation_rate, dilation_rate_multiplier, kernel_size, data_format, conv_time_dim)
dilation_rate *= dilation_rate_multiplier
for i in range(1, blocks):
block_fn(('%s_%i' % (prefix, i)), filters, None, 1, dilation_rate, 1, kernel_size, data_format, conv_time_dim)
return add_sequential_layer(('%s_out' % prefix), {'class': 'copy'})
resnet_version = 2
conv_time_dim = conv_time_dim
bottleneck = False
num_filters = 64
first_kernel_size = 5
kernel_size = 3
conv_stride = (2 if conv_time_dim else (1, 2))
first_pool_size = (1, 2)
first_pool_stride = (1, 1)
last_pool_size = (2, 2)
last_pool_stride = ((1, 2) if conv_time_dim else (2, 2))
block_sizes = [2, 2, 2, 2]
block_strides = [1, 2, 2, 2]
block_dilations = [1, 1, 1, 2]
block_fn = _building_block_v2
data_format = 'channels_first'
pre_activation = (resnet_version == 2)
if (data_format == 'channels_first'):
NCHW = True
else:
NCHW = False
if conv_time_dim:
multiplier = (1 if bottleneck else 2)
building_block_reduction = ((multiplier * 2) * (kernel_size // 2))
total_reduction = (first_kernel_size - 1)
dilation_rate_multiplier = 1
total_reduction += (dilation_rate_multiplier * (first_pool_size[0] - 1))
for (i, bs) in enumerate(block_sizes):
total_reduction += ((building_block_reduction / multiplier) * dilation_rate_multiplier)
dilation_rate_multiplier *= block_dilations[i]
total_reduction += ((building_block_reduction / multiplier) * dilation_rate_multiplier)
total_reduction += ((building_block_reduction * (bs - 1)) * dilation_rate_multiplier)
total_reduction += (dilation_rate_multiplier * (last_pool_size[0] - 1))
dilation_rate_multiplier *= 2
print(total_reduction, dilation_rate_multiplier)
total_reduction += (dilation_rate_multiplier * 2)
print(total_reduction, dilation_rate_multiplier)
time_dim_reduction = total_reduction
context_window = int(((2 * (total_reduction // 2)) + 1))
else:
time_dim_reduction = 0
print('time_dim_reduction: ', time_dim_reduction)
print('context_window: ', context_window)
conv2d_fixed_padding(prefix='c_init', filters=num_filters, kernel_size=first_kernel_size, strides=conv_stride, dilation_rate=1, data_format=data_format, conv_time_dim=conv_time_dim)
dilation_rate = 1
if (resnet_version == 1):
add_sequential_layer('c_init_relu', {'class': 'activation', 'activation': 'relu', 'batch_norm': False})
if first_pool_size:
if conv_time_dim:
dr = (dilation_rate, 1)
else:
dr = (1, 1)
pad_total = (first_pool_size[1] - 1)
pad_beg = (pad_total // 2)
pad_end = (pad_total - pad_beg)
add_sequential_layer('c_init_pool_pad', {'class': 'pad', 'axes': 's:1', 'padding': (pad_beg, pad_end)})
add_sequential_layer('c_init_pool', {'class': 'pool', 'mode': 'max', 'padding': 'VALID', 'pool_size': first_pool_size, 'strides': first_pool_stride, 'dilation_rate': dr, 'use_channel_first': NCHW})
print('dr: ', dilation_rate)
for (i, num_blocks) in enumerate(block_sizes):
filters = (num_filters * (2 ** i))
block_layer(prefix=('c_%i' % i), filters=filters, bottleneck=bottleneck, block_fn=block_fn, blocks=num_blocks, strides=block_strides[i], dilation_rate=dilation_rate, dilation_rate_multiplier=block_dilations[i], kernel_size=kernel_size, data_format=data_format, conv_time_dim=conv_time_dim)
dilation_rate *= block_dilations[i]
print('dr: ', dilation_rate)
if pre_activation:
add_sequential_layer('c_out_relu', {'class': 'activation', 'activation': 'relu', 'batch_norm': False})
if last_pool_size:
pad_total = (last_pool_size[1] - 1)
pad_beg = (pad_total // 2)
pad_end = (pad_total - pad_beg)
add_sequential_layer('c_last_pool_pad', {'class': 'pad', 'axes': 's:1', 'padding': (pad_beg, pad_end)})
if conv_time_dim:
dr = (dilation_rate, 1)
add_sequential_layer('c_last_pool', {'class': 'pool', 'mode': 'max', 'padding': 'VALID', 'pool_size': last_pool_size, 'strides': (1, 1), 'dilation_rate': dr, 'use_channel_first': NCHW})
add_sequential_layer('c_last_stride', {'class': 'slice', 'axis': 's:1', 'slice_step': last_pool_stride[1]})
dilation_rate *= 2
else:
dr = (1, 1)
add_sequential_layer('c_last_pool', {'class': 'pool', 'mode': 'max', 'padding': 'VALID', 'pool_size': last_pool_size, 'strides': last_pool_stride, 'dilation_rate': dr, 'use_channel_first': NCHW})
if conv_time_dim:
dr = (dilation_rate, 1)
else:
dr = (1, 1)
'\n See https://arxiv.org/pdf/1611.09288.pdf\n Fully connected layers are equivalent to, and can be trivially replaced by,\n convolutional layers with kernel (1×1) (except the first convolution which\n has kernel size matching the output of the conv stack before being flattened\n for the fully connected layers).\n '
add_sequential_layer('fc1', {'class': 'conv', 'n_out': 2048, 'filter_size': (3, 2), 'auto_use_channel_first': NCHW, 'strides': (1, 1), 'dilation_rate': dr, 'padding': 'VALID', 'activation': None, 'with_bias': False, 'dropout': 0, 'forward_weights_init': 'xavier', 'L2': L2})
add_sequential_layer('fc2', {'class': 'conv', 'n_out': 2048, 'filter_size': (1, 1), 'auto_use_channel_first': NCHW, 'strides': (1, 1), 'dilation_rate': (1, 1), 'padding': 'VALID', 'activation': None, 'with_bias': False, 'dropout': 0, 'forward_weights_init': 'xavier', 'L2': L2})
add_sequential_layer('fc3', {'class': 'conv', 'n_out': 2048, 'filter_size': (1, 1), 'auto_use_channel_first': NCHW, 'strides': (1, 1), 'dilation_rate': (1, 1), 'padding': 'VALID', 'activation': None, 'with_bias': False, 'dropout': 0, 'forward_weights_init': 'xavier', 'L2': L2})
add_sequential_layer('fc4', {'class': 'conv', 'n_out': 1024, 'filter_size': (1, 1), 'auto_use_channel_first': NCHW, 'strides': (1, 1), 'dilation_rate': (1, 1), 'padding': 'VALID', 'activation': None, 'with_bias': False, 'dropout': 0, 'forward_weights_init': 'xavier', 'L2': L2})
add_sequential_layer('fc5', {'class': 'conv', 'n_out': num_outputs, 'filter_size': (1, 1), 'auto_use_channel_first': NCHW, 'strides': (1, 1), 'dilation_rate': (1, 1), 'padding': 'VALID', 'activation': None, 'with_bias': False, 'dropout': 0, 'forward_weights_init': 'xavier', 'L2': L2})
add_sequential_layer('merge', {'class': 'merge_dims', 'axes': ('s:0', 's:1')})
add_sequential_layer('swap', {'class': 'swap_axes', 'axis1': 's:0', 'axis2': 'f'})
add_sequential_layer('output', {'class': 'activation', 'activation': 'softmax', 'loss': 'ce'})
return (network, context_window)
|
def test_ResNet():
'Test to compare Resnet convolving (window x frequency) vs (time x frequency).\n Batch_norm layers are turned off in oder to compare, since the statistics over the\n windowed input data is a bit different from the plain input (when convolving directing\n over the time dim).\n '
def sliding_window(seq, window_size):
import numpy as np
import copy
it = iter(seq)
win = [it.__next__() for cnt in range(window_size)]
res_arr = []
res_arr.append(copy.deepcopy(win))
for e in it:
win[:(- 1)] = win[1:]
win[(- 1)] = e
res_arr.append(copy.deepcopy(win))
return np.array(res_arr)
with make_scope() as session:
import numpy as np
import math
from tensorflow.python.client import timeline
(net_dict_conv_td, window_size) = build_resnet(conv_time_dim=True)
(net_dict_windowed, _) = build_resnet(conv_time_dim=False)
time_size = (window_size + 1)
data_layer_win = Data(name='win', shape=(window_size, 64, 3), dim=3, batch_dim_axis=0, sparse=False)
data_layer_win.placeholder = tf_compat.v1.placeholder(shape=(None, window_size, 64, 3), dtype=tf.float32)
data_layer_nowin = Data(name='nowin', shape=(time_size, 64, 3), dim=3, batch_dim_axis=0, time_dim_axis=1, sparse=False)
data_layer_nowin.placeholder = tf_compat.v1.placeholder(shape=(None, time_size, 64, 3), dtype=tf.float32)
extern_data_nowin = ExternData()
extern_data_nowin.data['data'] = data_layer_nowin
extern_data_win = ExternData()
extern_data_win.data['data'] = data_layer_win
net_conv_td = TFNetwork(extern_data=extern_data_nowin)
net_conv_td.train_flag = True
net_conv_td.construct_from_dict(net_dict_conv_td)
net_conv_td.initialize_params(session)
net_windowed = TFNetwork(extern_data=extern_data_win)
net_windowed.train_flag = True
net_windowed.construct_from_dict(net_dict_windowed)
net_windowed.initialize_params(session)
data = np.random.rand(time_size, 64, 3)
data_win = sliding_window(data, window_size)
data = np.array([data])
feed_dict = {data_layer_nowin.placeholder: data, data_layer_win.placeholder: data_win}
(res1, res2) = session.run([net_conv_td.layers['output'].output.placeholder, net_windowed.layers['output'].output.placeholder], feed_dict=feed_dict)
print((res1[0][0] - res2[0][0]))
print((res1[0][1] - res2[1][0]))
assert math.isclose(np.sum((res1[0][0] - res2[0][0])), 0.0, abs_tol=1e-07)
assert math.isclose(np.sum((res1[0][1] - res2[1][0])), 0.0, abs_tol=1e-07)
|
def generate_batch(seq_idx, dataset):
batch = Batch()
batch.add_frames(seq_idx=seq_idx, seq_start_frame=0, length=dataset.get_seq_length(seq_idx))
return batch
|
def test_read_all():
config = Config()
config.update(dummyconfig_dict)
print('Create ExternSprintDataset')
python_exec = util.which('python')
if (python_exec is None):
raise unittest.SkipTest('python not found')
num_seqs = 4
dataset = ExternSprintDataset([python_exec, sprintExecPath], ('--*.feature-dimension=2 --*.trainer-output-dimension=3 --*.crnn-dataset=DummyDataset(2,3,num_seqs=%i,seq_len=10)' % num_seqs))
dataset.init_seq_order(epoch=1)
seq_idx = 0
while dataset.is_less_than_num_seqs(seq_idx):
dataset.load_seqs(seq_idx, (seq_idx + 1))
for key in dataset.get_data_keys():
value = dataset.get_data(seq_idx, key)
print(('seq idx %i, data %r: %r' % (seq_idx, key, value)))
seq_idx += 1
assert (seq_idx == num_seqs)
|
def test_assign_dev_data():
config = Config()
config.update(dummyconfig_dict)
print('Create ExternSprintDataset')
dataset = ExternSprintDataset([sys.executable, sprintExecPath], '--*.feature-dimension=2 --*.trainer-output-dimension=3 --*.crnn-dataset=DummyDataset(2,3,num_seqs=4,seq_len=10)')
dataset.init_seq_order(epoch=1)
assert_true(dataset.is_less_than_num_seqs(0))
recurrent = False
batch_generator = dataset.generate_batches(recurrent_net=recurrent, batch_size=5)
batches = batch_generator.peek_next_n(2)
assert_equal(len(batches), 2)
|
def test_window():
input_dim = 2
output_dim = 3
num_seqs = 2
seq_len = 5
window = 3
dataset_kwargs = dict(sprintTrainerExecPath=[sys.executable, sprintExecPath], sprintConfigStr=' '.join([('--*.feature-dimension=%i' % input_dim), ('--*.trainer-output-dimension=%i' % output_dim), ('--*.crnn-dataset=DummyDataset(input_dim=%i,output_dim=%i,num_seqs=%i,seq_len=%i)' % (input_dim, output_dim, num_seqs, seq_len))]))
dataset1 = ExternSprintDataset(**dataset_kwargs)
dataset2 = ExternSprintDataset(window=window, **dataset_kwargs)
try:
dataset1.init_seq_order(epoch=1)
dataset2.init_seq_order(epoch=1)
dataset1.load_seqs(0, 1)
dataset2.load_seqs(0, 1)
assert_equal(dataset1.get_data_dim('data'), input_dim)
assert_equal(dataset2.get_data_dim('data'), (input_dim * window))
data1 = dataset1.get_data(0, 'data')
data2 = dataset2.get_data(0, 'data')
assert_equal(data1.shape, (seq_len, input_dim))
assert_equal(data2.shape, (seq_len, (window * input_dim)))
data2a = data2.reshape(seq_len, window, input_dim)
print('data1:')
print(data1)
print('data2:')
print(data2)
print('data1[0]:')
print(data1[0])
print('data2[0]:')
print(data2[0])
print('data2a[0,0]:')
print(data2a[(0, 0)])
assert_equal(list(data2a[(0, 0)]), ([0] * input_dim))
assert_equal(list(data2a[(0, 1)]), list(data1[0]))
assert_equal(list(data2a[(0, 2)]), list(data1[1]))
assert_equal(list(data2a[(1, 0)]), list(data1[0]))
assert_equal(list(data2a[(1, 1)]), list(data1[1]))
assert_equal(list(data2a[(1, 2)]), list(data1[2]))
assert_equal(list(data2a[((- 1), 2)]), ([0] * input_dim))
finally:
dataset1._exit_handler()
dataset2._exit_handler()
|
def install_sigint_handler():
import signal
def signal_handler(signal, frame):
print('\nSIGINT at:')
better_exchook.print_tb(tb=frame, file=sys.stdout)
print('')
if (getattr(sys, 'exited_frame', None) is not None):
print('interrupt_main via:')
better_exchook.print_tb(tb=sys.exited_frame, file=sys.stdout)
print('')
sys.exited_frame = None
raise Exception('Got SIGINT!')
else:
print('\nno sys.exited_frame\n')
if old_action:
old_action()
else:
raise KeyboardInterrupt
old_action = signal.signal(signal.SIGINT, signal_handler)
|
def test_forward():
tmpdir = mkdtemp('returnn-test-sprint')
olddir = os.getcwd()
os.chdir(tmpdir)
from returnn.datasets.generating import DummyDataset
seq_len = 5
n_data_dim = 2
n_classes_dim = 3
train_data = DummyDataset(input_dim=n_data_dim, output_dim=n_classes_dim, num_seqs=4, seq_len=seq_len)
train_data.init_seq_order(epoch=1)
cv_data = DummyDataset(input_dim=n_data_dim, output_dim=n_classes_dim, num_seqs=2, seq_len=seq_len)
cv_data.init_seq_order(epoch=1)
config = '\n'.join(['#!rnn.py', 'use_tensorflow = True', ("model = '%s/model'" % tmpdir), ('num_outputs = %i' % n_classes_dim), ('num_inputs = %i' % n_data_dim), 'network = {"output": {"class": "softmax", "loss": "ce", "from": "data:data"}}', 'num_epochs = 2'])
open('config_write', 'w').write(config)
open('config', 'w').write((config + ("\nload= '%s/model'" % tmpdir)))
config = Config()
config.load_file('config_write')
engine = Engine(config=config)
engine.init_train_from_config(config=config, train_data=train_data, dev_data=cv_data, eval_data=None)
engine.epoch = 1
engine.save_model(engine.get_epoch_model_filename())
Engine._epoch_model = None
from returnn.util.basic import BackendEngine
BackendEngine.selected_engine = None
inputDim = 2
outputDim = 3
SprintAPI.init(inputDim=inputDim, outputDim=outputDim, config='action:forward,configfile:config,epoch:1', targetMode='forward-only')
assert isinstance(SprintAPI.engine, Engine)
print('used data keys via net:', SprintAPI.engine.network.get_used_data_keys())
features = numpy.array([[0.1, 0.2], [0.2, 0.3], [0.3, 0.4], [0.4, 0.5]])
seq_len = features.shape[0]
posteriors = SprintAPI._forward('segment1', features.T).T
assert_equal(posteriors.shape, (seq_len, outputDim))
SprintAPI.exit()
os.chdir(olddir)
shutil.rmtree(tmpdir)
|
@contextlib.contextmanager
def make_scope():
with tf.Graph().as_default() as graph:
with tf_compat.v1.Session(graph=graph) as session:
(yield session)
|
class DummyLoss(Loss):
need_target = False
def get_value(self):
assert (self.layer and isinstance(self.layer, DummyLayer))
return self.layer._get_loss_value()
def get_error(self):
return None
|
class DummyLayer(LayerBase):
def __init__(self, initial_value=0.0, loss_value_factor=1.0, **kwargs):
super(DummyLayer, self).__init__(**kwargs)
self.loss_value_factor = loss_value_factor
self.x = self.add_param(tf.Variable(initial_value))
self.output.placeholder = self.x
def _get_loss_value(self):
return (self.loss_value_factor * self.x)
@classmethod
def get_losses(cls, name, network, output, loss=None, reduce_func=None, layer=None, **kwargs):
assert (not loss)
loss = DummyLoss(base_network=network)
return super(DummyLayer, cls).get_losses(name=name, network=network, output=output, loss=loss, reduce_func=reduce_func, layer=layer, **kwargs)
@classmethod
def get_out_data_from_opts(cls, name, **kwargs):
from returnn.tf.util.basic import Data
return Data(name=('%s_output' % name), batch_dim_axis=None, shape=(), dtype='float32')
|
def test_Updater_GradientDescent():
with make_scope() as session:
from returnn.tf.network import TFNetwork, ExternData
from returnn.config import Config
config = Config()
network = TFNetwork(extern_data=ExternData(), train_flag=True)
network.add_layer(name='output', layer_class=DummyLayer, initial_value=5.0, loss_value_factor=3.0)
network.initialize_params(session=session)
updater = Updater(config=config, network=network)
updater.set_learning_rate(1.0, session=session)
updater.set_trainable_vars(network.get_trainable_params())
updater.init_optimizer_vars(session=session)
session.run(updater.get_optim_op())
assert_almost_equal(session.run(network.get_default_output_layer().output.placeholder), 2.0)
|
def test_Updater_CustomUpdate():
with make_scope() as session:
from returnn.tf.network import TFNetwork, ExternData
from returnn.config import Config
from returnn.tf.util.basic import CustomUpdate
config = Config()
network = TFNetwork(extern_data=ExternData(), train_flag=True)
layer = network.add_layer(name='output', layer_class=DummyLayer, initial_value=4.0)
assert isinstance(layer, DummyLayer)
network.initialize_params(session=session)
class CustomUpdateAdd13(CustomUpdate):
def update_var(self, var):
return tf_compat.v1.assign_add(var, 13.0)
CustomUpdateAdd13().set_on_var(layer.x)
updater = Updater(config=config, network=network)
updater.set_learning_rate(1000.0, session=session)
updater.set_trainable_vars(network.get_trainable_params())
updater.init_optimizer_vars(session=session)
session.run(updater.get_optim_op())
assert_almost_equal(session.run(network.get_default_output_layer().output.placeholder), 17.0)
|
def test_add_check_numerics_ops():
with make_scope() as session:
x = tf.constant(3.0, name='x')
y = tf_compat.v1.log((x * 3), name='y')
assert isinstance(y, tf.Tensor)
assert_almost_equal(session.run(y), numpy.log(9.0))
check = add_check_numerics_ops([y])
session.run(check)
z1 = tf_compat.v1.log((x - 3), name='z1')
assert_equal(str(session.run(z1)), '-inf')
z2 = tf_compat.v1.log((x - 4), name='z2')
assert_equal(str(session.run(z2)), 'nan')
check1 = add_check_numerics_ops([z1])
try:
session.run(check1)
except tf.errors.InvalidArgumentError as exc:
print(('Expected exception: %r' % exc))
else:
assert False, 'should have raised an exception'
check2 = add_check_numerics_ops([z2])
try:
session.run(check2)
except tf.errors.InvalidArgumentError as exc:
print(('Expected exception: %r' % exc))
else:
assert False, 'should have raised an exception'
|
def test_grad_add_check_numerics_ops():
with make_scope() as session:
x = tf.Variable(initial_value=0.0, name='x')
session.run(x.initializer)
y = (1.0 / x)
grad_x = tf.gradients(y, x)[0]
print('grad_x:', grad_x.eval())
assert_equal(str(float('-inf')), '-inf')
assert_equal(str(grad_x.eval()), '-inf')
session.run(x.assign(1.0))
opt = tf_compat.v1.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = opt.minimize(y, var_list=[x])
check = add_check_numerics_ops([train_op])
session.run(check)
session.run(x.assign(0.0))
try:
session.run(check)
except tf.errors.InvalidArgumentError as exc:
print(('Expected exception: %r' % exc))
else:
assert False, 'should have raised an exception'
|
def test_Updater_add_check_numerics_ops():
class _Layer(DummyLayer):
def _get_loss_value(self):
return tf_compat.v1.log(self.x)
from returnn.tf.network import TFNetwork, ExternData
from returnn.config import Config
with make_scope() as session:
config = Config()
config.set('debug_add_check_numerics_ops', True)
network = TFNetwork(extern_data=ExternData(), train_flag=True)
network.add_layer(name='output', layer_class=_Layer, initial_value=1.0)
network.initialize_params(session=session)
updater = Updater(config=config, network=network)
updater.set_learning_rate(1.0, session=session)
updater.set_trainable_vars(network.get_trainable_params())
updater.init_optimizer_vars(session=session)
session.run(updater.get_optim_op())
assert_almost_equal(session.run(network.get_default_output_layer().output.placeholder), 0.0)
try:
session.run(updater.get_optim_op())
except tf.errors.InvalidArgumentError as exc:
print(('Expected exception: %r' % exc))
else:
assert False, 'should have raised an exception'
|
def test_Updater_simple_batch():
with make_scope() as session:
from returnn.tf.network import TFNetwork, ExternData
from returnn.config import Config
from returnn.datasets.generating import Task12AXDataset
dataset = Task12AXDataset()
dataset.init_seq_order(epoch=1)
extern_data = ExternData()
extern_data.init_from_dataset(dataset)
config = Config()
network = TFNetwork(extern_data=extern_data, train_flag=True)
network.construct_from_dict({'layer1': {'class': 'linear', 'activation': 'tanh', 'n_out': 13, 'from': 'data:data'}, 'layer2': {'class': 'linear', 'activation': 'tanh', 'n_out': 13, 'from': ['layer1']}, 'output': {'class': 'softmax', 'loss': 'ce', 'target': 'classes', 'from': ['layer2']}})
network.initialize_params(session=session)
updater = Updater(config=config, network=network)
updater.set_learning_rate(1.0, session=session)
updater.set_trainable_vars(network.get_trainable_params())
updater.init_optimizer_vars(session=session)
from returnn.tf.data_pipeline import FeedDictDataProvider
batches = dataset.generate_batches(recurrent_net=network.recurrent, batch_size=100, max_seqs=10, max_seq_length=sys.maxsize, used_data_keys=network.used_data_keys)
data_provider = FeedDictDataProvider(extern_data=extern_data, data_keys=network.used_data_keys, dataset=dataset, batches=batches)
(feed_dict, _) = data_provider.get_feed_dict(single_threaded=True)
session.run(updater.get_optim_op(), feed_dict=feed_dict)
|
def test_Updater_decouple_constraints():
with make_scope() as session:
from returnn.tf.network import TFNetwork, ExternData
from returnn.config import Config
from returnn.datasets.generating import Task12AXDataset
dataset = Task12AXDataset()
dataset.init_seq_order(epoch=1)
extern_data = ExternData()
extern_data.init_from_dataset(dataset)
config = Config({'decouple_constraints': True})
network = TFNetwork(extern_data=extern_data, train_flag=True)
network.construct_from_dict({'layer1': {'class': 'linear', 'activation': 'tanh', 'n_out': 13, 'L2': 0.01, 'from': 'data:data'}, 'layer2': {'class': 'linear', 'activation': 'tanh', 'n_out': 13, 'L2': 0.01, 'from': 'layer1'}, 'output': {'class': 'softmax', 'loss': 'ce', 'target': 'classes', 'from': 'layer2'}})
network.initialize_params(session=session)
updater = Updater(config=config, network=network)
updater.set_learning_rate(1.0, session=session)
updater.set_trainable_vars(network.get_trainable_params())
updater.init_optimizer_vars(session=session)
update_op = updater.get_optim_op()
assert updater.decouple_constraints
from returnn.tf.data_pipeline import FeedDictDataProvider
batches = dataset.generate_batches(recurrent_net=network.recurrent, batch_size=100, max_seqs=10, max_seq_length=sys.maxsize, used_data_keys=network.used_data_keys)
data_provider = FeedDictDataProvider(extern_data=extern_data, data_keys=network.used_data_keys, dataset=dataset, batches=batches)
(feed_dict, _) = data_provider.get_feed_dict(single_threaded=True)
session.run(update_op, feed_dict=feed_dict)
|
def test_Updater_decouple_constraints_simple_graph():
with make_scope() as session:
from returnn.tf.network import TFNetwork, ExternData
from returnn.config import Config
config = Config({'decouple_constraints': True, 'decouple_constraints_factor': 1.0})
extern_data = ExternData({'data': dict(shape=(), dtype='float32')})
network = TFNetwork(config=config, extern_data=extern_data, train_flag=True)
network.construct_from_dict({'var': {'class': 'variable', 'shape': (), 'init': 1.0, 'L2': 0.01}, 'loss': {'class': 'eval', 'from': ['data', 'var'], 'eval': 'source(0, auto_convert=False) * source(1, auto_convert=False)', 'loss': 'as_is'}})
network.initialize_params(session=session)
var = network.get_layer('var').output.placeholder
assert_equal(session.run(var), 1.0)
updater = Updater(config=config, network=network, initial_learning_rate=1.0)
updater.set_learning_rate(1.0, session=session)
updater.set_trainable_vars(network.get_trainable_params())
updater.init_optimizer_vars(session=session)
update_op = updater.get_optim_op()
assert updater.decouple_constraints
tf_util.print_graph_output(update_op)
session.run(update_op, feed_dict={extern_data.data['data'].placeholder: [0.0, 0.0, 0.0], extern_data.get_batch_info().dim: 3})
assert_almost_equal(session.run(var), (1.0 - (2.0 * 0.01)))
|
def test_Updater_decouple_constraints_simple_graph_grad_accum():
with make_scope() as session:
from returnn.tf.network import TFNetwork, ExternData
from returnn.config import Config
config = Config({'decouple_constraints': True, 'decouple_constraints_factor': 1.0, 'accum_grad_multiple_step': 2})
extern_data = ExternData({'data': dict(shape=(), dtype='float32')})
network = TFNetwork(config=config, extern_data=extern_data, train_flag=True)
network.construct_from_dict({'var': {'class': 'variable', 'shape': (), 'init': 1.0, 'L2': 0.01}, 'loss': {'class': 'eval', 'from': ['data', 'var'], 'eval': 'source(0, auto_convert=False) * source(1, auto_convert=False)', 'loss': 'as_is'}})
network.initialize_params(session=session)
var = network.get_layer('var').output.placeholder
assert_equal(session.run(var), 1.0)
updater = Updater(config=config, network=network, initial_learning_rate=1.0)
updater.set_learning_rate(1.0, session=session)
updater.set_trainable_vars(network.get_trainable_params())
updater.init_optimizer_vars(session=session)
update_op = updater.get_optim_op()
assert updater.decouple_constraints
tf_util.print_graph_output(update_op)
assert_equal(session.run(updater.global_train_step), 0)
expected_var = 1.0
for i in range(10):
print('Run step', i)
session.run(update_op, feed_dict={extern_data.data['data'].placeholder: [0.0, 0.0, 0.0], extern_data.get_batch_info().dim: 3})
assert_equal(session.run(updater.global_train_step), (i + 1))
var_value = session.run(var)
print('var:', var_value)
if ((i % 2) == 1):
expected_var -= ((2.0 * 0.01) * expected_var)
assert_almost_equal(var_value, expected_var)
|
def test_Updater_multiple_optimizers():
with make_scope() as session:
from returnn.tf.network import TFNetwork, ExternData
from returnn.config import Config
from returnn.datasets.generating import Task12AXDataset
dataset = Task12AXDataset()
dataset.init_seq_order(epoch=1)
extern_data = ExternData()
extern_data.init_from_dataset(dataset)
config = Config()
network = TFNetwork(extern_data=extern_data, train_flag=True)
network.construct_from_dict({'layer1': {'class': 'linear', 'activation': 'tanh', 'n_out': 13, 'from': 'data:data', 'updater_opts': {'optimizer': {'class': 'Adam'}}}, 'layer2': {'class': 'linear', 'activation': 'tanh', 'n_out': 13, 'from': ['layer1'], 'updater_opts': {'optimizer': {'class': 'Adagrad'}}}, 'output': {'class': 'softmax', 'loss': 'ce', 'target': 'classes', 'from': ['layer2']}})
network.initialize_params(session=session)
updater = Updater(config=config, network=network)
updater.set_learning_rate(1.0, session=session)
updater.set_trainable_vars(network.get_trainable_params())
updater.init_optimizer_vars(session=session)
optim_op = updater.get_optim_op()
assert (len(updater.optimizers) == 3)
from returnn.tf.data_pipeline import FeedDictDataProvider
batches = dataset.generate_batches(recurrent_net=network.recurrent, batch_size=100, max_seqs=10, max_seq_length=sys.maxsize, used_data_keys=network.used_data_keys)
data_provider = FeedDictDataProvider(extern_data=extern_data, data_keys=network.used_data_keys, dataset=dataset, batches=batches)
(feed_dict, _) = data_provider.get_feed_dict(single_threaded=True)
session.run(optim_op, feed_dict=feed_dict)
|
def test_Updater_multiple_optimizers_and_opts():
with make_scope() as session:
from returnn.tf.network import TFNetwork, ExternData
from returnn.config import Config
from returnn.datasets.generating import Task12AXDataset
dataset = Task12AXDataset()
dataset.init_seq_order(epoch=1)
extern_data = ExternData()
extern_data.init_from_dataset(dataset)
config = Config()
network = TFNetwork(extern_data=extern_data, train_flag=True)
network.construct_from_dict({'layer1': {'class': 'linear', 'activation': 'tanh', 'n_out': 13, 'from': 'data:data', 'updater_opts': {'optimizer': {'class': 'Adam'}, 'accum_grad_multiple_step': 2}}, 'layer2': {'class': 'linear', 'activation': 'tanh', 'n_out': 13, 'from': ['layer1'], 'updater_opts': {'optimizer': {'class': 'Adagrad', 'learning_rate_multiplier': 3}, 'gradient_noise': 0.1}}, 'output': {'class': 'softmax', 'loss': 'ce', 'target': 'classes', 'from': ['layer2']}})
network.initialize_params(session=session)
updater = Updater(config=config, network=network)
updater.set_learning_rate(1.0, session=session)
updater.set_trainable_vars(network.get_trainable_params())
updater.init_optimizer_vars(session=session)
optim_op = updater.get_optim_op()
assert (len(updater.optimizers) == 3)
from returnn.tf.data_pipeline import FeedDictDataProvider
batches = dataset.generate_batches(recurrent_net=network.recurrent, batch_size=100, max_seqs=10, max_seq_length=sys.maxsize, used_data_keys=network.used_data_keys)
data_provider = FeedDictDataProvider(extern_data=extern_data, data_keys=network.used_data_keys, dataset=dataset, batches=batches)
(feed_dict, _) = data_provider.get_feed_dict(single_threaded=True)
session.run(optim_op, feed_dict=feed_dict)
|
def load_data():
from returnn.__main__ import load_data
(dev_data, _) = load_data(config, 0, 'dev', chunking=config.value('chunking', ''), seq_ordering='sorted', shuffle_frames_of_nseqs=0)
(eval_data, _) = load_data(config, 0, 'eval', chunking=config.value('chunking', ''), seq_ordering='sorted', shuffle_frames_of_nseqs=0)
(train_data, _) = load_data(config, 0, 'train')
return (dev_data, eval_data, train_data)
|
def test_determinism_of_vanillalstm():
def create_engine():
(dev_data, eval_data, train_data) = load_data()
engine = Engine()
engine.init_train_from_config(config, train_data, dev_data, eval_data)
engine.init_train_epoch()
engine.train_batches = engine.train_data.generate_batches(recurrent_net=engine.network.recurrent, batch_size=engine.batch_size, max_seqs=engine.max_seqs, max_seq_length=engine.max_seq_length, seq_drop=engine.seq_drop, shuffle_batches=engine.shuffle_batches, used_data_keys=engine.network.used_data_keys)
engine.updater.set_learning_rate(engine.learning_rate, session=engine.tf_session)
engine.updater.init_optimizer_vars(session=engine.tf_session)
return engine
def train_engine_fetch_vars(engine):
data_provider = engine._get_data_provider(dataset=engine.train_data, batches=engine.train_batches, feed_dict=True)
(feed_dict, _) = data_provider.get_feed_dict(single_threaded=True)
trainer = Runner(engine=engine, dataset=engine.train_data, batches=engine.train_batches, train=True)
(feed_dict, _) = data_provider.get_feed_dict(single_threaded=True)
trainer.run(report_prefix='One Run')
return [e.eval(engine.tf_session) for e in engine.network.get_params_list()]
e1 = create_engine()
r1 = train_engine_fetch_vars(e1)
e2 = create_engine()
r2 = train_engine_fetch_vars(e2)
assert_array_equal(r1, r2)
|
def pickle_dumps(obj):
sio = BytesIO()
p = Pickler(sio)
p.dump(obj)
return sio.getvalue()
|
def pickle_loads(s):
p = Unpickler(BytesIO(s))
return p.load()
|
def test_pickle_anon_new_class():
class Foo(object):
a = 'class'
b = 'foo'
def __init__(self):
self.a = 'hello'
def f(self, a):
return a
s = pickle_dumps(Foo)
Foo2 = pickle_loads(s)
assert inspect.isclass(Foo2)
assert (Foo is not Foo2)
assert (Foo2.a == 'class')
assert (Foo2.b == 'foo')
inst = Foo2()
assert (inst.a == 'hello')
assert (inst.b == 'foo')
assert (inst.f(42) == 42)
|
def test_pickle_anon_old_class():
class Foo():
a = 'class'
b = 'foo'
def __init__(self):
self.a = 'hello'
def f(self, a):
return a
s = pickle_dumps(Foo)
Foo2 = pickle_loads(s)
assert inspect.isclass(Foo2)
assert (Foo is not Foo2)
assert (Foo2.a == 'class')
assert (Foo2.b == 'foo')
inst = Foo2()
assert (inst.a == 'hello')
assert (inst.b == 'foo')
assert (inst.f(42) == 42)
|
def test_pickle_inst_anon_class():
class Foo(object):
a = 'class'
b = 'foo'
def __init__(self):
self.a = 'hello'
def f(self, a):
return a
s = pickle_dumps(Foo())
inst = pickle_loads(s)
assert (inst.a == 'hello')
assert (inst.b == 'foo')
assert (inst.f(42) == 42)
|
class DemoClass():
def method(self):
return 42
|
def test_pickle():
obj = DemoClass()
s = pickle_dumps(obj.method)
inst = pickle_loads(s)
assert_equal(inst(), 42)
|
def pickle_dumps(obj):
sio = BytesIO()
p = Pickler(sio)
p.dump(obj)
return sio.getvalue()
|
def pickle_loads(s):
p = Unpickler(BytesIO(s))
return p.load()
|
def find_numpy_shared_by_shmid(shmid):
for sh in SharedNumpyArray.ServerInstances:
assert isinstance(sh, SharedNumpyArray)
assert (sh.mem is not None)
assert (sh.mem.shmid > 0)
if (sh.mem.shmid == shmid):
return sh
return None
|
def have_working_shmget():
global _have_working_shmget
if (_have_working_shmget is None):
_have_working_shmget = SharedMem.is_shmget_functioning()
print('shmget functioning:', _have_working_shmget)
return _have_working_shmget
|
@unittest.skipIf((not have_working_shmget()), 'shmget does not work')
def test_shmget_functioning():
assert SharedMem.is_shmget_functioning()
|
@unittest.skipIf((not have_working_shmget()), 'shmget does not work')
def test_pickle_numpy():
m = numpy.random.randn(10, 10)
p = pickle_dumps(m)
m2 = pickle_loads(p)
assert isinstance(m2, numpy.ndarray)
assert numpy.allclose(m, m2)
assert isinstance(m2.base, SharedNumpyArray)
shared_client = m2.base
assert (not shared_client.is_server)
shared_server = find_numpy_shared_by_shmid(m2.base.mem.shmid)
assert shared_server.is_server
assert numpy.allclose(m, shared_server.create_numpy_array())
assert numpy.allclose(m, shared_client.create_numpy_array())
assert shared_server.is_in_use()
assert shared_client.is_in_use()
numpy_set_unused(m2)
assert (not shared_server.is_in_use())
assert (shared_client.mem is None)
|
@unittest.skipIf((not have_working_shmget()), 'shmget does not work')
def test_pickle_numpy_scalar():
m = numpy.array([numpy.random.randn()])
assert isinstance(m, numpy.ndarray)
assert (m.shape == (1,))
assert (m.nbytes >= 1)
p = pickle_dumps(m)
m2 = pickle_loads(p)
assert isinstance(m2, numpy.ndarray)
assert numpy.allclose(m, m2)
assert isinstance(m2.base, SharedNumpyArray)
shared_client = m2.base
assert (not shared_client.is_server)
shared_server = find_numpy_shared_by_shmid(m2.base.mem.shmid)
assert shared_server.is_server
assert numpy.allclose(m, shared_server.create_numpy_array())
assert numpy.allclose(m, shared_client.create_numpy_array())
assert shared_server.is_in_use()
assert shared_client.is_in_use()
numpy_set_unused(m2)
assert (not shared_server.is_in_use())
assert (shared_client.mem is None)
|
@unittest.skipIf((not have_working_shmget()), 'shmget does not work')
def test_pickle_gc_aggressive():
m = numpy.random.randn(10, 10)
p = pickle_dumps(m)
m2 = pickle_loads(p)
assert isinstance(m2, numpy.ndarray)
assert numpy.allclose(m, m2)
assert isinstance(m2.base, SharedNumpyArray)
print(('refcount: %i' % sys.getrefcount(m2.base)))
gc.collect()
gc.collect()
print(('refcount: %i' % sys.getrefcount(m2.base)))
assert m2.base.is_in_use()
server = find_numpy_shared_by_shmid(m2.base.mem.shmid)
m2 = None
gc.collect()
|
@unittest.skipIf((not have_working_shmget()), 'shmget does not work')
def test_pickle_multiple():
for i in range(20):
ms = [numpy.random.randn(10, 10) for i in range(((i % 3) + 1))]
p = pickle_dumps(ms)
ms2 = pickle_loads(p)
assert (len(ms) == len(ms2))
for (m, m2) in zip(ms, ms2):
assert numpy.allclose(m, m2)
assert isinstance(m2.base, SharedNumpyArray)
|
@unittest.skipIf((not have_working_shmget()), 'shmget does not work')
def test_pickle_unpickle_auto_unused():
old_num_servers = None
for i in range(10):
m = numpy.random.randn(((i * 2) + 1), ((i * 3) + 2))
p = pickle_dumps((m, m, m))
new_num_servers = len(SharedNumpyArray.ServerInstances)
if (old_num_servers is not None):
assert (old_num_servers == new_num_servers)
old_num_servers = new_num_servers
(m2, m3, m4) = pickle_loads(p)
assert numpy.allclose(m, m2)
assert numpy.allclose(m, m3)
assert numpy.allclose(m, m4)
assert (not m4.base.is_server)
m4.base._get_in_use_flag_ref().value = 42
assert (m4.base._get_in_use_flag_ref().value == 42)
assert (find_numpy_shared_by_shmid(m4.base.mem.shmid)._get_in_use_flag_ref().value == 42)
assert numpy.allclose(m, m4)
ss = list([find_numpy_shared_by_shmid(_m.base.mem.shmid) for _m in (m2, m3, m4)])
_m = None
m2 = m3 = m4 = None
gc.collect()
for s in ss:
assert isinstance(s, SharedNumpyArray)
assert s.is_server
assert (not s.is_in_use())
|
def create_vocabulary(text):
'\n :param str text: any natural text\n :return: mapping of words in the text to ids, as well as the inverse mapping\n :rtype: (dict[str, int], dict[int, str])\n '
vocabulary = {word: index for (index, word) in enumerate(set(text.strip().split()))}
inverse_vocabulary = {index: word for (word, index) in vocabulary.items()}
return (vocabulary, inverse_vocabulary)
|
def word_ids_to_sentence(word_ids, vocabulary):
'\n :param list[int] word_ids:\n :param dict[int, str] vocabulary: mapping from word ids to words\n :return: concatenation of all words\n :rtype: str\n '
words = [vocabulary[word_id] for word_id in word_ids]
return ' '.join(words)
|
def test_translation_dataset():
'\n Checks whether a dummy translation dataset can be read and whether the returned word indices are correct.\n We create the necessary corpus and vocabulary files on the fly.\n '
dummy_dataset = tempfile.mkdtemp()
source_file_name = os.path.join(dummy_dataset, 'source.test.gz')
target_file_name = os.path.join(dummy_dataset, 'target.test')
with gzip.open(source_file_name, 'wb') as source_file:
source_file.write(dummy_source_text.encode('utf-8'))
with open(target_file_name, 'wb') as target_file:
target_file.write(dummy_target_text.encode('utf-8'))
for postfix in ['', ' </S>']:
dummy_target_text_with_unk = dummy_target_text.replace('TranslationDatasets', '<UNK>')
(source_vocabulary, inverse_source_vocabulary) = create_vocabulary((dummy_source_text + postfix))
(target_vocabulary, inverse_target_vocabulary) = create_vocabulary((dummy_target_text_with_unk + postfix))
source_vocabulary_file_name = os.path.join(dummy_dataset, 'source.vocab.pkl')
target_vocabulary_file_name = os.path.join(dummy_dataset, 'target.vocab.pkl')
with open(source_vocabulary_file_name, 'wb') as source_vocabulary_file:
pickle.dump(source_vocabulary, source_vocabulary_file)
with open(target_vocabulary_file_name, 'wb') as target_vocabulary_file:
pickle.dump(target_vocabulary, target_vocabulary_file)
translation_dataset = TranslationDataset(path=dummy_dataset, file_postfix='test', source_postfix=postfix, target_postfix=postfix, unknown_label={'classes': '<UNK>'})
translation_dataset.init_seq_order(epoch=1)
translation_dataset.load_seqs(0, 10)
num_seqs = len(dummy_source_text.splitlines())
assert_equal(translation_dataset.num_seqs, num_seqs)
for sequence_index in range(num_seqs):
source_word_ids = translation_dataset.get_data(sequence_index, 'data')
source_sentence = word_ids_to_sentence(source_word_ids, inverse_source_vocabulary)
assert_equal(source_sentence, (dummy_source_text.splitlines()[sequence_index] + postfix))
target_word_ids = translation_dataset.get_data(sequence_index, 'classes')
target_sentence = word_ids_to_sentence(target_word_ids, inverse_target_vocabulary)
assert_equal(target_sentence, (dummy_target_text_with_unk.splitlines()[sequence_index] + postfix))
shutil.rmtree(dummy_dataset)
|
def test_translation_factors_dataset():
'\n Similar to test_translation_dataset(), but using translation factors.\n '
source_text_per_factor = [dummy_source_text_factor_0, dummy_source_text_factor_1]
target_text_per_factor = [dummy_target_text_factor_0, dummy_target_text_factor_1, dummy_target_text_factor_2]
source_vocabulary_names = ['source.vocab.pkl', 'source_factor1.vocab.pkl']
target_vocabulary_names = ['target.vocab.pkl', 'target_factor1.vocab.pkl', 'target_factor2.vocab.pkl']
source_data_keys = ['data', 'source_factor1']
target_data_keys = ['classes', 'target_factor1', 'target_factor2']
dummy_dataset = tempfile.mkdtemp()
source_file_name = os.path.join(dummy_dataset, 'source.test.gz')
target_file_name = os.path.join(dummy_dataset, 'target.test')
with gzip.open(source_file_name, 'wb') as source_file:
source_file.write(dummy_source_text_factored_format.encode('utf8'))
with open(target_file_name, 'wb') as target_file:
target_file.write(dummy_target_text_factored_format.encode('utf8'))
for postfix in ['', ' </S>']:
(vocabularies, inverse_vocabularies) = ([], [])
for dummy_text in (source_text_per_factor + target_text_per_factor):
(vocabulary, inverse_vocabulary) = create_vocabulary((dummy_text + postfix))
vocabularies.append(vocabulary)
inverse_vocabularies.append(inverse_vocabulary)
vocabulary_names = (source_vocabulary_names + target_vocabulary_names)
for (index, vocabulary) in enumerate(vocabularies):
with open(os.path.join(dummy_dataset, vocabulary_names[index]), 'wb') as vocabulary_file:
pickle.dump(vocabulary, vocabulary_file)
translation_dataset = TranslationFactorsDataset(path=dummy_dataset, file_postfix='test', factor_separator='|', source_factors=source_data_keys[1:], target_factors=target_data_keys[1:], source_postfix=postfix, target_postfix=postfix)
translation_dataset.init_seq_order(epoch=1)
translation_dataset.load_seqs(0, 10)
num_seqs = len(dummy_target_text_factored_format.splitlines())
assert_equal(translation_dataset.num_seqs, num_seqs)
data_keys = (source_data_keys + target_data_keys)
texts_per_factor = (source_text_per_factor + target_text_per_factor)
for (index, text) in enumerate(texts_per_factor):
for sequence_index in range(num_seqs):
word_ids = translation_dataset.get_data(sequence_index, data_keys[index])
sentence = word_ids_to_sentence(word_ids, inverse_vocabularies[index])
assert_equal(sentence, (text.splitlines()[sequence_index] + postfix))
shutil.rmtree(dummy_dataset)
|
def build_env(env_update=None):
'\n :param dict[str,str]|None env_update:\n :return: env dict for Popen\n :rtype: dict[str,str]\n '
env_update_ = os.environ.copy()
if env_update:
env_update_.update(env_update)
return env_update_
|
def run(*args, env_update=None, print_stdout=False):
args = list(args)
print('run:', args)
p = Popen(args, stdout=PIPE, stderr=STDOUT, env=build_env(env_update=env_update))
(out, _) = p.communicate()
out = out.decode('utf8')
if (p.returncode != 0):
print(('Return code is %i' % p.returncode))
print(('std out/err:\n---\n%s\n---\n' % out))
raise CalledProcessError(cmd=args, returncode=p.returncode, output=out)
if print_stdout:
print(('std out/err:\n---\n%s\n---\n' % out))
return out
|
def parse_last_fer(out: str) -> float:
'\n :param out:\n :return: FER\n '
parsed_fer = None
for line in out.splitlines():
m = re.match('epoch [0-9]+ score: .* dev: .* error ([0-9.]+)\\s?', line)
if (not m):
m = re.match('dev: score .* error ([0-9.]+)\\s?', line)
if (not m):
continue
parsed_fer = float(m.group(1))
err_msg = 'ERROR: No epoch dev errors found in output'
assert (parsed_fer is not None), ('%s.\nOutput:\n\n%s\n\n%s.' % (err_msg, out, err_msg))
return parsed_fer
|
def run_and_parse_last_fer(*args, **kwargs):
out = run(*args, **kwargs)
return parse_last_fer(out)
|
def run_config_get_fer(config_filename, *args, env_update=None, log_verbosity=5, print_stdout=False, pre_cleanup=True, post_cleanup=True):
if pre_cleanup:
cleanup_tmp_models(config_filename)
fer = run_and_parse_last_fer(py, 'rnn.py', config_filename, '++log_verbosity', str(log_verbosity), *args, env_update=env_update, print_stdout=print_stdout)
print('FER:', fer)
if post_cleanup:
cleanup_tmp_models(config_filename)
return fer
|
def get_model_filename(config_filename: str) -> str:
assert os.path.exists(config_filename)
from returnn.config import Config
config = Config()
config.load_file(config_filename)
model_filename = config.value('model', '')
assert model_filename
assert model_filename.startswith('/tmp/')
return model_filename
|
def cleanup_tmp_models(config_filename: str):
model_filename = get_model_filename(config_filename)
for f in glob((model_filename + '.*')):
os.remove(f)
|
@unittest.skipIf((not tf), 'no TF')
def test_demo_tf_task12ax():
fer = run_config_get_fer('demos/demo-tf-native-lstm.12ax.config', print_stdout=True)
assert_less(fer, 0.015)
|
@unittest.skipIf((not tf), 'no TF')
def test_demo_tf_task12ax_eval():
cfg_filename = 'demos/demo-tf-native-lstm.12ax.config'
train_dataset_repr = '{"class": "Task12AXDataset", "num_seqs": 10}'
dev_dataset_repr = '{"class": "Task12AXDataset", "num_seqs": 10}'
fer1 = run_config_get_fer(cfg_filename, '++num_epochs', '2', '++train', train_dataset_repr, '++dev', dev_dataset_repr, print_stdout=True, post_cleanup=False)
fer2 = run_config_get_fer(cfg_filename, '++task', 'eval', '++load_epoch', '2', '++train', 'None', '++dev', dev_dataset_repr, print_stdout=True, pre_cleanup=False, post_cleanup=False)
assert (fer1 == fer2)
model_filename = get_model_filename(cfg_filename)
ep2_files = glob((model_filename + '.002.*'))
assert ep2_files, f'No model files found for epoch 2, {model_filename}'
for fn in ep2_files:
shutil.copy(fn, fn.replace('.002.', '.003.'))
fer3 = run_config_get_fer(cfg_filename, '++task', 'eval', '++load_epoch', '3', '++train', 'None', '++dev', dev_dataset_repr, print_stdout=True, pre_cleanup=False)
assert (fer3 != fer2)
|
@unittest.skipIf((not torch), 'no PyTorch')
def test_demo_torch_task12ax():
cleanup_tmp_models('demos/demo-torch.config')
out = run(py, 'rnn.py', 'demos/demo-torch.config', print_stdout=True)
fer = parse_last_fer(out)
assert_less(fer, 0.02)
|
def _test_torch_export_to_onnx(cfg_filename: str) -> str:
'\n Executes the demo passed as a parameter and returns the ONNX exported model as a filename.\n\n :param cfg_filename: Demo filename, either "demos/demo-rf.config" or "demos/demo-torch.config".\n :return: Filename representing the ONNX model location.\n '
cleanup_tmp_models(cfg_filename)
out = run(py, 'rnn.py', cfg_filename, '--num_epochs', '1', '--device', 'cpu')
out_pt_model = re.search('\\nSave model under (.*)\\n', out, re.MULTILINE)
assert out_pt_model, ('Could not find the model filename in the output:\n%s' % out)
out_pt_model = out_pt_model.group(1)
print('*** PT model:', out_pt_model)
out_onnx_model = out_pt_model.replace('.pt', '.onnx')
run(py, 'tools/torch_export_to_onnx.py', cfg_filename, out_pt_model, out_onnx_model, print_stdout=True)
return out_onnx_model
|
def _test_torch_onnx_inference_no_seq_lens(out_onnx_model: str):
'\n Tests the inference of the torch demo with an ONNX model passed as parameter.\n '
import onnxruntime as ort
torch.manual_seed(42)
dummy_data = torch.randn([3, 50, 9])
session = ort.InferenceSession(out_onnx_model)
outputs_onnx = session.run(None, {'data': dummy_data.numpy()})
print('*** Result:', torch.FloatTensor(outputs_onnx[0]))
print('*** Batch size:', torch.IntTensor(outputs_onnx[1]))
print('*** Sequence lengths:', torch.IntTensor(outputs_onnx[2]))
|
def _test_torch_onnx_inference_seq_lens_in_out(out_onnx_model: str):
'\n Tests the inference of the torch demo with an ONNX model passed as parameter.\n '
print(out_onnx_model)
import onnxruntime as ort
torch.manual_seed(42)
dummy_data = torch.randn([3, 50, 9])
dummy_seq_lens = torch.tensor([27, 50, 43], dtype=torch.int32)
session = ort.InferenceSession(out_onnx_model)
outputs_onnx = session.run(None, {'data': dummy_data.numpy(), 'data:size1': dummy_seq_lens.numpy()})
out_result = torch.FloatTensor(outputs_onnx[0])
out_seq_lens = torch.IntTensor(outputs_onnx[1])
print('*** Result:', out_result)
print('*** Sequence lengths:', out_seq_lens)
assert (out_result.shape == torch.Size([3, 50, 2]))
|
@unittest.skipIf((not torch), 'no PyTorch')
def test_demo_torch_export_to_onnx():
out_onnx_model = _test_torch_export_to_onnx('demos/demo-torch.config')
_test_torch_onnx_inference_seq_lens_in_out(out_onnx_model)
|
@unittest.skipIf((not torch), 'no PyTorch')
def test_demo_rf_export_to_onnx():
out_onnx_model = _test_torch_export_to_onnx('demos/demo-rf.config')
_test_torch_onnx_inference_seq_lens_in_out(out_onnx_model)
|
@unittest.skipIf((not torch), 'no PyTorch')
def test_demo_rf_torch_task12ax():
cleanup_tmp_models('demos/demo-rf.config')
out = run(py, 'rnn.py', 'demos/demo-rf.config', print_stdout=True)
fer = parse_last_fer(out)
assert_less(fer, 0.02)
|
@unittest.skipIf((not tf), 'no TF')
def test_demo_rf_tf_task12ax():
cleanup_tmp_models('demos/demo-rf.config')
out = run(py, 'rnn.py', 'demos/demo-rf.config', '++backend', 'tensorflow-net-dict', print_stdout=True)
fer = parse_last_fer(out)
assert_less(fer, 0.02)
|
def test_demo_iter_dataset_task12ax():
cleanup_tmp_models('demos/demo-tf-vanilla-lstm.12ax.config')
out = run(py, 'demos/demo-iter-dataset.py', 'demos/demo-tf-vanilla-lstm.12ax.config')
assert_in('Epoch 5.', out.splitlines())
|
@unittest.skipIf((not tf), 'no TF')
def test_demo_returnn_as_framework():
print('Prepare.')
import subprocess
import shutil
from glob import glob
from returnn.util.basic import get_login_username
subprocess.check_call(['echo', 'travis_fold:start:test_demo_returnn_as_framework'])
assert os.path.exists('setup.py')
if glob('dist/*.tar.gz'):
for fn in glob('dist/*.tar.gz'):
os.remove(fn)
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
if os.path.exists('docs/crnn'):
os.remove('docs/crnn')
if os.path.exists('docs/returnn'):
os.remove('docs/returnn')
tmp_model_dir = ('/tmp/%s/returnn-demo-as-framework' % get_login_username())
if os.path.exists(tmp_model_dir):
shutil.rmtree(tmp_model_dir, ignore_errors=True)
print('setup.py sdist, to create package.')
subprocess.check_call([py, 'setup.py', 'sdist'])
dist_fns = glob('dist/*.tar.gz')
assert (len(dist_fns) == 1)
dist_fn = os.path.abspath(dist_fns[0])
pip_path = which_pip()
print('Pip install Returnn.')
in_virtual_env = hasattr(sys, 'real_prefix')
cmd = [py, pip_path, 'install']
if (not in_virtual_env):
cmd += ['--user']
cmd += ['-v', dist_fn]
print(('$ %s' % ' '.join(cmd)))
subprocess.check_call(cmd, cwd='/')
print('Running demo now.')
subprocess.check_call([py, 'demo-returnn-as-framework.py'], cwd='demos')
print('Success.')
subprocess.check_call(['echo', 'travis_fold:end:test_demo_returnn_as_framework'])
|
@unittest.skipIf((not tf), 'no TF')
def test_demo_sprint_interface():
import subprocess
subprocess.check_call(['echo', 'travis_fold:start:test_demo_sprint_interface'])
subprocess.check_call([py, os.path.abspath('demos/demo-sprint-interface.py')], cwd='/')
subprocess.check_call(['echo', 'travis_fold:end:test_demo_sprint_interface'])
|
def test_returnn_as_framework_TaskSystem():
import subprocess
subprocess.check_call(['echo', 'travis_fold:start:test_returnn_as_framework_TaskSystem'])
subprocess.check_call([py, os.path.abspath('tests/returnn-as-framework.py'), 'test_TaskSystem_Pickler()'], cwd='/')
subprocess.check_call(['echo', 'travis_fold:end:test_returnn_as_framework_TaskSystem'])
|
@unittest.skipIf((not tf), 'no TF')
def test_returnn_as_framework_old_style_crnn_TFUtil():
"\n Check that old-style `import crnn.TFUtil` works.\n\n It's not so much about TFUtil, it also could be some other module.\n It's about the old-style module names.\n This is the logic in __old_mod_loader__.\n "
import subprocess
subprocess.check_call(['echo', 'travis_fold:start:test_returnn_as_framework_old_style_crnn_TFUtil'])
subprocess.check_call([py, os.path.abspath('tests/returnn-as-framework.py'), '--old-style', '--returnn-package-name', 'crnn', 'test_old_style_import_crnn_TFUtil()'], cwd='/')
subprocess.check_call(['echo', 'travis_fold:end:test_returnn_as_framework_old_style_crnn_TFUtil'])
|
@unittest.skipIf((not tf), 'no TF')
def test_returnn_as_framework_old_style_TFUtil():
'\n Check that old-style `import TFUtil` works.\n See also :func:`test_returnn_as_framework_old_style_crnn_TFUtil`.\n '
import subprocess
subprocess.check_call(['echo', 'travis_fold:start:test_returnn_as_framework_old_style_TFUtil'])
subprocess.check_call([py, os.path.abspath('tests/returnn-as-framework.py'), '--old-style', 'test_old_style_import_TFUtil()'], cwd='/')
subprocess.check_call(['echo', 'travis_fold:end:test_returnn_as_framework_old_style_TFUtil'])
|
class CLibAtForkDemo():
def __init__(self):
self._load_lib()
def _load_lib(self):
from returnn.util.basic import NativeCodeCompiler
native = NativeCodeCompiler(base_name='test_fork_exec', code_version=1, code=c_code_at_fork_demo, is_cpp=False, ld_flags=['-lpthread'])
self._lib = native.load_lib_ctypes()
print('loaded lib:', native.get_lib_filename())
import ctypes
self._lib.register_hello_from_child.restype = None
self._lib.register_hello_from_child.argtypes = ()
self._lib.register_hello_from_fork_prepare.restype = None
self._lib.register_hello_from_fork_prepare.argtypes = ()
self._lib.set_magic_number.restype = None
self._lib.set_magic_number.argtypes = (ctypes.c_long,)
def set_magic_number(self, i):
self._lib.set_magic_number(i)
def register_hello_from_child(self):
self._lib.register_hello_from_child()
def register_hello_from_fork_prepare(self):
self._lib.register_hello_from_fork_prepare()
|
def demo_hello_from_fork():
print('Hello.')
sys.stdout.flush()
clib_at_fork_demo.set_magic_number(3)
clib_at_fork_demo.register_hello_from_child()
clib_at_fork_demo.register_hello_from_fork_prepare()
pid = os.fork()
if (pid == 0):
print('Hello from child after fork.')
sys.exit()
print('Hello from parent after fork.')
os.waitpid(pid, 0)
print('Bye.')
|
def demo_start_subprocess():
print('Hello.')
sys.stdout.flush()
clib_at_fork_demo.set_magic_number(5)
clib_at_fork_demo.register_hello_from_child()
clib_at_fork_demo.register_hello_from_fork_prepare()
from subprocess import check_call
check_call('echo Hello from subprocess.', shell=True)
print('Bye.')
|
def run_demo_check_output(name):
'\n :param str name: e.g. "demo_hello_from_fork"\n :return: lines of stdout of the demo\n :rtype: list[str]\n '
from subprocess import check_output
output = check_output([sys.executable, __file__, name])
return output.decode('utf8').splitlines()
|
def filter_demo_output(ls):
'\n :param list[str] ls:\n :rtype: list[str]\n '
ls = [l for l in ls if (not l.startswith('Executing: '))]
ls = [l for l in ls if (not l.startswith('Compiler call: '))]
ls = [l for l in ls if (not l.startswith('loaded lib: '))]
ls = [l for l in ls if (not l.startswith('dlopen: '))]
ls = [l for l in ls if (('installLibSigSegfault' not in l) and ('libSegFault' not in l))]
ls = [l for l in ls if ('faulthandler' not in l)]
found_hello = False
for (i, l) in enumerate(ls):
if (l in ['Ignoring pthread_atfork call!', 'Ignoring __register_atfork call!']):
continue
assert (l == 'Hello.')
ls.pop(i)
found_hello = True
break
assert found_hello, ('no Hello: %r' % (ls,))
assert (ls[(- 1)] == 'Bye.')
ls = ls[:(- 1)]
return ls
|
def test_demo_hello_from_fork():
ls = run_demo_check_output('demo_hello_from_fork')
pprint(ls)
ls = filter_demo_output(ls)
pprint(ls)
assert_equal(set(ls), {'Hello from child after fork.', 'Hello from child atfork, magic number 3.', 'Hello from atfork prepare, magic number 3.', 'Hello from parent after fork.'})
|
def test_demo_start_subprocess():
ls = run_demo_check_output('demo_start_subprocess')
pprint(ls)
ls = filter_demo_output(ls)
pprint(ls)
assert ('Hello from subprocess.' in ls)
import platform
print('Python impl:', platform.python_implementation())
print('Python version:', sys.version_info[:3])
if (platform.python_implementation() == 'CPython'):
if (sys.version_info[0] == 2):
old = (sys.version_info[:3] <= (2, 7, 12))
else:
old = (sys.version_info[:3] <= (3, 6, 1))
else:
old = False
if old:
print('atfork handler should have been called.')
assert ('Hello from child atfork, magic number 5.' in ls)
assert ('Hello from atfork prepare, magic number 5.' in ls)
else:
print('Not checking for atfork handler output.')
|
def patched_check_demo_start_subprocess():
'\n Just like test_demo_start_subprocess(), but here we assert that no atfork handlers are executed.\n '
assert_equal(os.environ.get('__RETURNN_ATFORK_PATCHED'), '1')
ls = run_demo_check_output('demo_start_subprocess')
pprint(ls)
ls = filter_demo_output(ls)
pprint(ls)
assert ('Hello from subprocess.' in ls)
ls = [l for l in ls if (l not in ['Ignoring pthread_atfork call!', 'Ignoring __register_atfork call!'])]
pprint(ls)
assert_equal(ls, ['Hello from subprocess.'])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.