code stringlengths 17 6.64M |
|---|
def sort_by_size(file_list, transcription_list, size_list):
zipped = list(zip(file_list, transcription_list, size_list))
sorted_lists = sorted(zipped, key=(lambda x: (x[2][1], x[2][0])))
return ([x[0] for x in sorted_lists], [x[1] for x in sorted_lists], [x[2] for x in sorted_lists])
|
def convert(file_list_path, char_list_path, selections, out_file_names, pad_whitespace, dataset_prefix, base_path, compress):
charlist = load_char_list(char_list_path)
(file_list, transcription_list, size_list, n_labels) = load_file_list_and_transcriptions_and_sizes_and_n_labels(file_list_path, char_list_path, pad_whitespace, base_path)
(file_list, transcription_list, size_list) = sort_by_size(file_list, transcription_list, size_list)
for (selection, out_file_name) in zip(selections, out_file_names):
print(out_file_name)
selection_set = set(selection)
assert selection_set.issubset(set((x.split('/')[(- 1)].split('.png')[0] for x in file_list)))
selected_file_list = []
selected_transcription_list = []
for (f, t) in zip(file_list, transcription_list):
if (f.split('/')[(- 1)].split('.png')[0] in selection_set):
selected_file_list.append(f)
selected_transcription_list.append(t)
write_to_hdf(selected_file_list, selected_transcription_list, charlist, n_labels, out_file_name, dataset_prefix, compress=compress)
|
def get_image_list(train_list_path):
with open(train_list_path) as f:
imgs = f.readlines()
imgs = [img.replace('\n', '') for img in imgs]
return imgs
|
def get_train_and_train_valid_lists(train_list_path, blacklist, train_fraction=0.9):
with open(train_list_path) as f:
imgs = f.readlines()
imgs = [img.replace('\n', '') for img in imgs]
n_train = int(round((train_fraction * len(imgs))))
train_imgs = imgs[:n_train]
train_valid_imgs = imgs[n_train:]
n_before = len(train_imgs)
train_imgs = [s for s in train_imgs if (s not in blacklist)]
n_after = len(train_imgs)
if (n_before != n_after):
print('removed', (n_before - n_after), 'blacklisted images from train')
n_before = len(train_valid_imgs)
train_valid_imgs = [s for s in train_valid_imgs if (s not in blacklist)]
n_after = len(train_valid_imgs)
if (n_before != n_after):
print('removed', (n_before - n_after), 'blacklisted images from train_valid')
return (train_imgs, train_valid_imgs)
|
def convert_IAM_lines_demo(base_path_imgs, tag, blacklist=[]):
base_path_out = (('features/' + tag) + '/')
mkdir_p(base_path_out)
file_list_path = 'lines.txt'
char_list_path = 'chars.txt'
selection_list_path = 'split/demo.txt'
out_file_name_demo = (base_path_out + 'demo.h5')
print('converting IAM_lines to', out_file_name_demo)
demo_list = ['a01-000u-00', 'a01-007-04', 'a01-007-06']
selections = [demo_list]
out_file_names = [out_file_name_demo]
convert(file_list_path, char_list_path, selections, out_file_names, pad_whitespace=True, dataset_prefix='trainset', base_path=base_path_imgs, compress=True)
|
def convert_IAM_lines_train(base_path_imgs, tag, blacklist=[]):
base_path_out = (('features/' + tag) + '/')
mkdir_p(base_path_out)
file_list_path = 'lines.txt'
char_list_path = 'chars.txt'
selection_list_path = 'split/train.txt'
out_file_name_train1 = (base_path_out + 'train.1.h5')
out_file_name_train2 = (base_path_out + 'train.2.h5')
out_file_name_train_valid = (base_path_out + 'train_valid.h5')
print('converting IAM_lines to', out_file_name_train1, 'and', out_file_name_train2)
(train_list, train_valid_list) = get_train_and_train_valid_lists(selection_list_path, blacklist, 0.9)
len1 = (len(train_list) / 2)
train_list1 = train_list[:len1]
train_list2 = train_list[len1:]
selections = [train_list1, train_list2, train_valid_list]
out_file_names = [out_file_name_train1, out_file_name_train2, out_file_name_train_valid]
convert(file_list_path, char_list_path, selections, out_file_names, pad_whitespace=True, dataset_prefix='trainset', base_path=base_path_imgs, compress=True)
|
def convert_IAM_lines_valid_test(base_path_imgs, tag):
base_path_out = (('features/' + tag) + '/')
mkdir_p(base_path_out)
char_list_path = 'chars.txt'
selection_list_path_valid = 'split/valid.txt'
selection_list_path_test = 'split/eval.txt'
out_file_name_valid = (base_path_out + 'valid.h5')
out_file_name_test = (base_path_out + 'test.h5')
prefix_valid = 'validationset'
prefix_test = 'testset'
charlist = load_char_list(char_list_path)
n_labels = len(charlist)
for (selection_list_path, out_file_name, prefix) in zip([selection_list_path_valid, selection_list_path_test], [out_file_name_valid, out_file_name_test], [prefix_valid, prefix_test]):
selection = [x.strip() for x in open(selection_list_path).readlines()]
imgs = []
for sel in selection:
pattern = ((base_path_imgs + sel) + '-[0-9][0-9].png')
new_imgs = glob.glob(pattern)
assert (len(new_imgs) > 0), (sel, pattern)
imgs.extend(new_imgs)
imgs = sorted(imgs)
transcriptions = ([[]] * len(imgs))
print('converting IAM_lines to', out_file_name)
write_to_hdf(imgs, transcriptions, charlist, n_labels, out_file_name, dataset_prefix=prefix, compress=True)
|
def main():
base_path_imgs = 'IAM_lines'
tag = 'raw'
if (base_path_imgs[(- 1)] != '/'):
base_path_imgs += '/'
convert_IAM_lines_demo(base_path_imgs, tag)
|
def hdf5_strings(handle, name, data):
try:
S = max([len(d) for d in data])
dset = handle.create_dataset(name, (len(data),), dtype=('S' + str(S)))
dset[...] = data
except Exception:
dt = h5py.special_dtype(vlen=unicode)
del handle[name]
dset = handle.create_dataset(name, (len(data),), dtype=dt)
dset[...] = data
|
def write_to_hdf(img_list, transcription_list, charlist, out_file_name, dataset_prefix='train'):
with h5py.File(out_file_name, 'w') as f:
f.attrs['inputPattSize'] = 1
f.attrs['numDims'] = 1
f.attrs['numSeqs'] = len(img_list)
classes = charlist
inputs = []
sizes = []
seq_lengths = []
targets = []
for (img, transcription) in zip(img_list, transcription_list):
targets += transcription
sizes.append(img.shape)
img = img.reshape(img.size, 1)
inputs.append(img)
seq_lengths.append([[img.size, len(transcription), 2]])
inputs = numpy.concatenate(inputs, axis=0)
sizes = numpy.concatenate(numpy.array(sizes, dtype='int32'), axis=0)
seq_lengths = numpy.concatenate(numpy.array(seq_lengths, dtype='int32'), axis=0)
targets = numpy.array(targets, dtype='int32')
f.attrs['numTimesteps'] = inputs.shape[0]
f['inputs'] = (inputs.astype('float32') / 255.0)
hdf5_strings(f, 'labels', classes)
f['seqLengths'] = seq_lengths
seq_tags = [((dataset_prefix + '/') + str(idx)) for idx in range(len(img_list))]
hdf5_strings(f, 'seqTags', seq_tags)
f['targets/data/classes'] = targets
f['targets/data/sizes'] = sizes
hdf5_strings(f, 'targets/labels/classes', classes)
hdf5_strings(f, 'targets/labels/sizes', ['foo'])
g = f.create_group('targets/size')
g.attrs['classes'] = len(classes)
g.attrs['sizes'] = 2
|
def main():
char_list = ['a', 'b', 'c', 'd']
img_list = [numpy.zeros((14, 14), dtype='float32'), numpy.zeros((12, 12), dtype='float32')]
transcription_list = [[0, 1, 2], [2, 0, 1]]
out_file_name = 'test.h5'
write_to_hdf(img_list, transcription_list, char_list, out_file_name)
|
def hdf5_strings(handle, name, data):
try:
S = max([len(d) for d in data])
dset = handle.create_dataset(name, (len(data),), dtype=('S' + str(S)))
dset[...] = data
except Exception:
dt = h5py.special_dtype(vlen=unicode)
del handle[name]
dset = handle.create_dataset(name, (len(data),), dtype=dt)
dset[...] = data
|
def write_to_hdf(img_list, transcription_list, charlist, out_file_name, dataset_prefix='train'):
with h5py.File(out_file_name, 'w') as f:
f.attrs['inputPattSize'] = 3
f.attrs['numDims'] = 1
f.attrs['numSeqs'] = len(img_list)
classes = charlist
inputs = []
sizes = []
seq_lengths = []
targets = []
for (img, transcription) in zip(img_list, transcription_list):
targets += transcription
size = img.shape[:2]
sizes.append(size)
img = img.reshape((size[0] * size[1]), 3)
inputs.append(img)
seq_lengths.append([[(size[0] * size[1]), len(transcription), 2]])
inputs = numpy.concatenate(inputs, axis=0)
sizes = numpy.concatenate(numpy.array(sizes, dtype='int32'), axis=0)
seq_lengths = numpy.concatenate(numpy.array(seq_lengths, dtype='int32'), axis=0)
targets = numpy.array(targets, dtype='int32')
f.attrs['numTimesteps'] = inputs.shape[0]
f['inputs'] = (inputs.astype('float32') / 255.0)
hdf5_strings(f, 'labels', classes)
f['seqLengths'] = seq_lengths
seq_tags = [((dataset_prefix + '/') + str(idx)) for idx in range(len(img_list))]
hdf5_strings(f, 'seqTags', seq_tags)
f['targets/data/classes'] = targets
f['targets/data/sizes'] = sizes
hdf5_strings(f, 'targets/labels/classes', classes)
hdf5_strings(f, 'targets/labels/sizes', ['foo'])
g = f.create_group('targets/size')
g.attrs['classes'] = len(classes)
g.attrs['sizes'] = 2
|
def main():
char_list = ['a', 'b', 'c', 'd']
img_list = [numpy.zeros((14, 14, 3), dtype='float32'), numpy.zeros((12, 12, 3), dtype='float32')]
transcription_list = [[0, 1, 2], [2, 0, 1]]
out_file_name = 'test.h5'
write_to_hdf(img_list, transcription_list, char_list, out_file_name)
|
def linkcode_resolve(domain, info):
def find_source():
obj = sys.modules[info['module']]
for part in info['fullname'].split('.'):
obj = getattr(obj, part)
import inspect
import os
fn = inspect.getsourcefile(obj)
fn = os.path.relpath(fn, start='returnn')
(source, lineno) = inspect.getsourcelines(obj)
return (fn, lineno, ((lineno + len(source)) - 1))
if ((domain != 'py') or (not info['module'])):
return None
try:
filename = ('%s#L%d-L%d' % find_source())
except Exception:
filename = (info['module'].replace('.', '/') + '.py')
tag = 'master'
return ('https://github.com/rwth-i6/returnn/blob/%s/returnn/%s' % (tag, filename))
|
def generate():
updater._init_optimizer_classes_dict()
optimizer_dict = updater._OptimizerClassesDict
rst_file = open('optimizer.rst', 'w')
rst_file.write(header_text)
for (optimizer_name, optimizer_class) in sorted(optimizer_dict.items()):
if (not optimizer_name.endswith('optimizer')):
module = optimizer_class.__module__
class_name = optimizer_class.__name__
name = (class_name[:(- len('Optimizer'))] if class_name.endswith('Optimizer') else class_name)
rst_file.write('\n')
rst_file.write(('%s\n' % name))
rst_file.write(('%s\n' % ('-' * len(name))))
rst_file.write('\n')
rst_file.write(('.. autoclass:: %s.%s\n' % (module, class_name)))
rst_file.write(' :members:\n')
rst_file.write(' :undoc-members:\n')
rst_file.write('\n')
rst_file.close()
|
def generate():
RecLayer._create_rnn_cells_dict()
layer_names = sorted(list(RecLayer._rnn_cells_dict.keys()))
rst_file = open('layer_reference/units.rst', 'w')
rst_file.write(header_text)
for layer_name in layer_names:
unit_class = RecLayer.get_rnn_cell_class(layer_name)
if (issubclass(unit_class, RNNCell) or issubclass(unit_class, RecSeqCellOp)):
module = unit_class.__module__
name = unit_class.__name__
if (name.endswith('Cell') and (not name.startswith('_'))):
rst_file.write('\n')
rst_file.write(('%s\n' % name))
rst_file.write(('%s\n' % ('-' * len(name))))
rst_file.write('\n')
rst_file.write(('.. autoclass:: %s.%s\n' % (module, name)))
rst_file.write(' :members:\n')
rst_file.write(' :undoc-members:\n')
rst_file.write('\n')
rst_file.close()
|
def generate():
if (not os.path.exists('api')):
os.mkdir('api')
def makeapi(modname):
'\n :param str modname:\n '
fn = ('api/%s.rst' % (modname[len('returnn.'):] or '___base'))
if os.path.exists(fn):
return
f = open(fn, 'w')
target_python_file_path = modname.replace('.', '/')
if os.path.isfile((target_python_file_path + '.py')):
f.write((':github_url: https://github.com/rwth-i6/returnn/blob/master/%s.py\n\n' % modname.replace('.', '/')))
else:
f.write((':github_url: https://github.com/rwth-i6/returnn/blob/master/%s.py\n\n' % os.path.join(modname.replace('.', '/'), '__init__')))
title = (':mod:`%s`' % modname)
f.write(('\n%s\n%s\n\n' % (title, ('-' * len(title)))))
f.write(('.. automodule:: %s\n\t:members:\n\t:undoc-members:\n\n' % modname))
f.close()
def scan_modules(modpath):
'\n :param list[str] modpath:\n '
makeapi('.'.join(modpath))
path = '/'.join(modpath)
for fn in sorted(os.listdir(path)):
if (not os.path.isdir(os.path.join(path, fn))):
continue
if os.path.exists(os.path.join(path, fn, '.git')):
continue
if (fn == '__pycache__'):
continue
if (not os.path.exists(os.path.join(path, fn, '__init__.py'))):
continue
scan_modules((modpath + [fn]))
for fn in sorted(os.listdir(path)):
if os.path.isdir(os.path.join(path, fn)):
continue
if (not fn.endswith('.py')):
continue
if (fn == '__init__.py'):
continue
(modname, _) = os.path.splitext(os.path.basename(fn))
if (modname in exclude):
continue
makeapi('.'.join((modpath + [modname])))
scan_modules(['returnn'])
|
def init_config(config_filename=None, command_line_options=(), default_config=None, extra_updates=None):
'\n :param str|None config_filename:\n :param list[str]|tuple[str] command_line_options: e.g. ``sys.argv[1:]``\n :param dict[str]|None default_config:\n :param dict[str]|None extra_updates:\n\n Initializes the global config.\n There are multiple sources which are used to init the config:\n\n * ``configFilename``, and maybe first item of ``commandLineOptions`` interpret as config filename\n * other options via ``commandLineOptions``\n * ``extra_updates``\n\n Note about the order/priority of these:\n\n * ``extra_updates``\n * options from ``commandLineOptions``\n * ``configFilename``\n * config filename from ``commandLineOptions[0]``\n * ``extra_updates``\n * options from ``commandLineOptions``\n\n ``extra_updates`` and ``commandLineOptions`` are used twice so that they are available\n when the config is loaded, which thus has access to them, and can e.g. use them via Python code.\n However, the purpose is that they overwrite any option from the config;\n that is why we apply them again in the end.\n\n ``commandLineOptions`` is applied after ``extra_updates`` so that the user has still the possibility\n to overwrite anything set by ``extra_updates``.\n '
global config
config = Config()
config_filenames_by_cmd_line = []
if command_line_options:
i = 0
for arg in command_line_options:
if (arg[:1] in '-+'):
break
config_filenames_by_cmd_line.append(arg)
i += 1
command_line_options = command_line_options[i:]
if default_config:
config.update(default_config)
if extra_updates:
config.update(extra_updates)
if command_line_options:
config.parse_cmd_args(command_line_options)
if config_filename:
config.load_file(config_filename)
for fn in config_filenames_by_cmd_line:
config.load_file(fn)
if extra_updates:
config.update(extra_updates)
if command_line_options:
config.parse_cmd_args(command_line_options)
if config.bool('EnableAutoNumpySharedMemPickling', False):
import returnn.util.task_system
returnn.util.task_system.SharedMemNumpyConfig['enabled'] = True
if (config.value('task', 'train') == 'server'):
config.set('num_inputs', 2)
config.set('num_outputs', 1)
BehaviorVersion.set(config.int('behavior_version', None))
|
def init_log():
'\n Initializes the global :class:`Log`.\n '
log.init_by_config(config)
|
def get_cache_byte_sizes():
'\n :rtype: (int,int,int)\n :returns cache size in bytes for (train,dev,eval)\n '
cache_sizes_user = config.list('cache_size', [('%iG' % util.default_cache_size_in_gbytes())])
num_datasets = ((1 + config.has('dev')) + config.has('eval'))
cache_factor = 1.0
if (len(cache_sizes_user) == 1):
cache_sizes_user *= 3
cache_factor /= float(num_datasets)
elif (len(cache_sizes_user) == 2):
cache_sizes_user.append('0')
assert (len(cache_sizes_user) == 3), 'invalid amount of cache sizes specified'
cache_sizes = []
for cache_size_user in cache_sizes_user:
cache_size = (cache_factor * float(cache_size_user.replace('G', '').replace('M', '').replace('K', '')))
assert ((len(cache_size_user) - len(str(cache_size))) <= 1), 'invalid cache size specified'
if (cache_size_user.find('G') > 0):
cache_size *= ((1024 * 1024) * 1024)
elif (cache_size_user.find('M') > 0):
cache_size *= (1024 * 1024)
elif (cache_size_user.find('K') > 0):
cache_size *= 1024
cache_size = ((int(cache_size) + 1) if (int(cache_size) > 0) else 0)
cache_sizes.append(cache_size)
return cache_sizes
|
def load_data(config, cache_byte_size, files_config_key, **kwargs):
'\n :param Config config:\n :param int cache_byte_size:\n :param str files_config_key: such as "train" or "dev"\n :param kwargs: passed on to init_dataset() or init_dataset_via_str()\n :rtype: (Dataset,int)\n :returns the dataset, and the cache byte size left over if we cache the whole dataset.\n '
if (not config.bool_or_other(files_config_key, None)):
return (None, 0)
kwargs = kwargs.copy()
kwargs.setdefault('name', files_config_key)
if (config.is_typed(files_config_key) and isinstance(config.typed_value(files_config_key), dict)):
config_opts = config.typed_value(files_config_key)
assert isinstance(config_opts, dict)
kwargs.update(config_opts)
if ('cache_byte_size' not in config_opts):
if (kwargs.get('class', None) == 'HDFDataset'):
kwargs['cache_byte_size'] = cache_byte_size
Dataset.kwargs_update_from_config(config, kwargs)
data = init_dataset(kwargs)
elif (config.is_typed(files_config_key) and callable(config.typed_value(files_config_key))):
data = init_dataset(config.typed_value(files_config_key), default_kwargs=kwargs)
else:
config_str = config.value(files_config_key, '')
data = init_dataset_via_str(config_str, config=config, cache_byte_size=cache_byte_size, **kwargs)
cache_leftover = 0
if isinstance(data, HDFDataset):
cache_leftover = data.definite_cache_leftover
return (data, cache_leftover)
|
def init_data():
'\n Initializes the globals train,dev,eval of type Dataset.\n '
cache_byte_sizes = get_cache_byte_sizes()
global train_data, dev_data, eval_data
(dev_data, extra_cache_bytes_dev) = load_data(config, cache_byte_sizes[1], 'dev', **Dataset.get_default_kwargs_eval(config=config))
(eval_data, extra_cache_bytes_eval) = load_data(config, cache_byte_sizes[2], 'eval', **Dataset.get_default_kwargs_eval(config=config))
train_cache_bytes = cache_byte_sizes[0]
if (train_cache_bytes >= 0):
train_cache_bytes += (extra_cache_bytes_dev + extra_cache_bytes_eval)
(train_data, extra_train) = load_data(config, train_cache_bytes, 'train')
|
def print_task_properties():
'\n print information about used data\n '
if train_data:
print('Train data:', file=log.v2)
print(' input:', train_data.num_inputs, 'x', train_data.window, file=log.v2)
print(' output:', train_data.num_outputs, file=log.v2)
print(' ', (train_data.len_info() or 'no info'), file=log.v2)
if dev_data:
print('Dev data:', file=log.v2)
print(' ', (dev_data.len_info() or 'no info'), file=log.v2)
if eval_data:
print('Eval data:', file=log.v2)
print(' ', (eval_data.len_info() or 'no info'), file=log.v2)
|
def init_engine():
'\n Initializes global ``engine``, for example :class:`returnn.tf.engine.Engine`.\n '
global engine
if BackendEngine.is_tensorflow_selected():
from returnn.tf.engine import Engine
engine = Engine(config=config)
elif BackendEngine.is_torch_selected():
from returnn.torch.engine import Engine
engine = Engine(config=config)
else:
raise NotImplementedError('Backend engine not implemented')
|
def returnn_greeting(config_filename=None, command_line_options=None):
'\n Prints some RETURNN greeting to the log.\n\n :param str|None config_filename:\n :param list[str]|None command_line_options:\n '
print(('RETURNN starting up, version %s, date/time %s, pid %i, cwd %s, Python %s' % (util.describe_returnn_version(), time.strftime('%Y-%m-%d-%H-%M-%S (UTC%z)'), os.getpid(), os.getcwd(), sys.executable)), file=log.v3)
if config_filename:
print(('RETURNN config: %s' % config_filename), file=log.v4)
if os.path.islink(config_filename):
print(('RETURNN config is symlink to: %s' % os.readlink(config_filename)), file=log.v4)
if (command_line_options is not None):
print(('RETURNN command line options: %s' % (command_line_options,)), file=log.v4)
import socket
print('Hostname:', socket.gethostname(), file=log.v4)
|
def init_backend_engine():
'\n Selects the backend engine (TensorFlow, PyTorch, Theano, or whatever)\n and does corresponding initialization and preparation.\n\n This does not initialize the global ``engine`` object yet.\n See :func:`init_engine` for that.\n '
if config.value('PYTORCH_CUDA_ALLOC_CONF', None):
value = config.value('PYTORCH_CUDA_ALLOC_CONF', '')
print(f'Set PYTORCH_CUDA_ALLOC_CONF={value!r}.')
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = value
BackendEngine.select_engine(config=config)
if BackendEngine.is_tensorflow_selected():
print('TensorFlow:', util.describe_tensorflow_version(), file=log.v3)
if (util.get_tensorflow_version_tuple()[0] == 0):
print('Warning: TF <1.0 is not supported and likely broken.', file=log.v2)
if os.environ.get('TF_DEVICE'):
print(('Devices: Use %s via TF_DEVICE instead of %s.' % (os.environ.get('TF_DEVICE'), config.opt_typed_value('device'))), file=log.v4)
config.set('device', os.environ.get('TF_DEVICE'))
if config.is_true('use_horovod'):
import returnn.tf.horovod
hvd = returnn.tf.horovod.get_ctx(config=config)
import socket
if (('gpu' in config.value('device', '')) or os.environ.get('CUDA_VISIBLE_DEVICES', '')):
gpu_opts = config.typed_dict.setdefault('tf_session_opts', {}).setdefault('gpu_options', {})
assert ('visible_device_list' not in gpu_opts)
gpu_opts['visible_device_list'] = str(hvd.local_rank())
print(('Horovod: Hostname %s, pid %i, using GPU %s.' % (socket.gethostname(), os.getpid(), gpu_opts['visible_device_list'])), file=log.v3)
elif (hvd.rank() == 0):
print('Horovod: Not using GPU.', file=log.v3)
if (hvd.rank() == 0):
print('Horovod: Reduce type:', hvd.get_reduce_type(), file=log.v3)
from returnn.tf.util import basic as tf_util
tf_session_opts = config.typed_value('tf_session_opts', {})
assert isinstance(tf_session_opts, dict)
tf_util.setup_tf_thread_pools(log_file=log.v3, tf_session_opts=tf_session_opts)
tf_util.print_available_devices(tf_session_opts=tf_session_opts, file=log.v2)
from returnn.tf.native_op import OpMaker
OpMaker.log_stream = log.v3
tf_util.debug_register_better_repr()
if config.is_true('distributed_tf'):
import returnn.tf.distributed
returnn.tf.distributed.init_distributed_tf(config)
elif BackendEngine.is_torch_selected():
print('PyTorch:', util.describe_torch_version(), file=log.v3)
if (config.typed_value('torch_distributed') is not None):
import socket
import returnn.torch.distributed
torch_distributed = returnn.torch.distributed.get_ctx(config=config)
print(('Torch: Hostname %s, pid %i, using GPU %s.' % (socket.gethostname(), os.getpid(), str(torch_distributed.local_rank()))), file=log.v3)
from returnn.torch.util import diagnose_gpu
diagnose_gpu.print_available_devices(file=log.v2)
if config.is_true('use_lovely_tensors'):
try:
import lovely_tensors
lovely_tensors.monkey_patch()
except ImportError as exc:
print('Warning: could not import lovely_tensors:', exc, file=log.v3)
else:
raise NotImplementedError(f'Backend engine {BackendEngine.get_selected_engine()} not implemented')
|
def init(config_filename=None, command_line_options=(), config_updates=None, extra_greeting=None):
'\n :param str|None config_filename:\n :param tuple[str]|list[str]|None command_line_options: e.g. sys.argv[1:]\n :param dict[str]|None config_updates: see :func:`init_config`\n :param str|None extra_greeting:\n '
debug_util.init_better_exchook()
util.init_thread_join_hack()
init_config(config_filename=config_filename, command_line_options=command_line_options, extra_updates=config_updates)
if config.bool('patch_atfork', False):
from returnn.util.basic import maybe_restart_returnn_with_atfork_patch
maybe_restart_returnn_with_atfork_patch()
init_log()
if extra_greeting:
print(extra_greeting, file=log.v1)
returnn_greeting(config_filename=config_filename, command_line_options=command_line_options)
debug_util.init_faulthandler()
if config.bool('watch_memory', False):
from returnn.util.watch_memory import watch_memory
watch_memory()
init_backend_engine()
if config.bool('ipython', False):
debug_util.init_ipython_kernel()
if config.typed_value('startup_callback'):
startup_callback = config.typed_value('startup_callback')
startup_callback(config=config)
if need_data():
init_data()
print_task_properties()
init_engine()
|
def finalize(error_occurred=False):
'\n Cleanup at the end.\n\n :param bool error_occurred:\n '
print('Quitting', file=getattr(log, 'v4', sys.stderr))
global quit_returnn
quit_returnn = True
sys.exited = True
if engine:
if BackendEngine.is_tensorflow_selected():
engine.finalize(error_occurred=error_occurred)
if config.is_true('use_horovod'):
import horovod.tensorflow as hvd
hvd.shutdown()
elif BackendEngine.is_torch_selected():
if (config.typed_value('torch_distributed') is not None):
from torch.distributed import destroy_process_group
destroy_process_group()
|
def need_data():
'\n :return: whether we need to init the data (call :func:`init_data`) for the current task (:func:`execute_main_task`)\n :rtype: bool\n '
if (config.has('need_data') and (not config.bool('need_data', True))):
return False
task = config.value('task', 'train')
if (task in ('nop', 'nop_init_net_train', 'cleanup_old_models')):
return False
return True
|
def execute_main_task():
'\n Executes the main task (via config ``task`` option).\n '
from returnn.util.basic import hms_fraction
start_time = time.time()
task = config.value('task', 'train')
if config.is_true('dry_run'):
print('Dry run, will not save anything.', file=log.v1)
if (task == 'train'):
assert (train_data and train_data.have_seqs()), ("no train files specified, check 'train' option: %s" % config.value('train', None))
engine.init_train_from_config(config, train_data, dev_data, eval_data)
engine.train()
elif (task == 'eval'):
if config.value('load', None):
print('Evaluate model', config.value('load', None), file=log.v2)
lr_control_update_scores = False
else:
epoch = config.int('epoch', (- 1))
load_epoch = config.int('load_epoch', (- 1))
if (epoch >= 0):
assert ((load_epoch < 0) or (load_epoch == epoch)), 'epoch and load_epoch have to match'
engine.epoch = epoch
config.set('load_epoch', engine.epoch)
else:
assert (load_epoch >= 0), 'specify epoch or load_epoch'
engine.epoch = load_epoch
print('Evaluate epoch', engine.epoch, file=log.v2)
lr_control_update_scores = True
engine.init_train_from_config(config, train_data, dev_data, eval_data)
engine.eval_model(output_file=config.value('eval_output_file', None), output_per_seq_file=config.value('eval_output_file_per_seq', None), loss_name=config.value('loss_name', None), output_per_seq_format=config.list('output_per_seq_format', ['score']), output_per_seq_file_format=config.value('output_per_seq_file_format', 'txt'), lr_control_update_scores=lr_control_update_scores)
elif (task in ['forward', 'hpx']):
if (config.typed_value('forward_callback') or (not BackendEngine.is_tensorflow_selected())):
engine.init_network_from_config(config)
if (config.value('forward_data', 'eval') in ['train', 'dev', 'eval']):
data = {'train': train_data, 'dev': dev_data, 'eval': eval_data}[config.value('forward_data', 'eval')]
assert data, 'set forward_data'
else:
data = init_dataset(config.opt_typed_value('forward_data'))
data.init_seq_order(epoch=(engine.epoch or 1))
forward_callback = config.typed_value('forward_callback')
assert forward_callback, 'no forward_callback specified'
if callable(forward_callback):
forward_callback = forward_callback()
engine.forward_with_callback(dataset=data, callback=forward_callback)
else:
assert BackendEngine.is_tensorflow_selected()
assert (eval_data is not None), 'no eval data provided'
combine_labels = config.value('combine_labels', '')
engine.use_search_flag = config.bool('forward_use_search', False)
if config.has('epoch'):
config.set('load_epoch', config.int('epoch', 0))
engine.init_network_from_config(config)
eval_data.init_seq_order(epoch=(engine.epoch or 1))
output_file = config.value('output_file', ('dump-fwd-epoch-%i.hdf' % engine.epoch))
forward_batch_size = config.int('forward_batch_size', 0)
if (not forward_batch_size):
raise Exception('forward_batch_size not set')
engine.forward_to_hdf(data=eval_data, output_file=output_file, combine_labels=combine_labels, batch_size=forward_batch_size)
elif (task == 'search'):
engine.use_search_flag = True
engine.use_eval_flag = config.bool('search_do_eval', True)
engine.init_network_from_config(config)
if (config.value('search_data', 'eval') in ['train', 'dev', 'eval']):
data = {'train': train_data, 'dev': dev_data, 'eval': eval_data}[config.value('search_data', 'eval')]
assert data, 'set search_data'
else:
data = init_dataset(config.opt_typed_value('search_data'))
engine.search(data, do_eval=config.bool('search_do_eval', True), output_layer_names=config.typed_value('search_output_layer', 'output'), output_file=config.value('search_output_file', ''), output_file_format=config.value('search_output_file_format', 'txt'))
elif (task == 'compute_priors'):
assert (train_data is not None), 'train data for priors should be provided'
engine.init_network_from_config(config)
engine.compute_priors(dataset=train_data, config=config)
elif (task == 'analyze'):
statistics = config.list('statistics', None)
engine.init_network_from_config(config)
engine.analyze(data=(eval_data or dev_data), statistics=statistics)
elif (task == 'analyze_data'):
analyze_data(config)
elif (task == 'hyper_param_tuning'):
import returnn.tf.hyper_param_tuning
tuner = returnn.tf.hyper_param_tuning.Optimization(config=config, train_data=train_data)
tuner.work()
elif (task == 'cleanup_old_models'):
engine.cleanup_old_models(ask_for_confirmation=True)
elif (task == 'search_server'):
engine.use_search_flag = True
engine.init_network_from_config(config)
engine.web_server(port=config.int('web_server_port', 12380))
elif task.startswith('config:'):
action = config.typed_dict[task[len('config:'):]]
print(('Task: %r' % action), file=log.v1)
assert callable(action)
action()
elif task.startswith('optional-config:'):
action = config.typed_dict.get(task[len('optional-config:'):], None)
if (action is None):
print(('No task found for %r, so just quitting.' % task), file=log.v1)
else:
print(('Task: %r' % action), file=log.v1)
assert callable(action)
action()
elif (task == 'nop'):
print('Task: No-operation', file=log.v1)
elif (task == 'nop_init_net_train'):
print('Task: No-operation, despite initializing the network (for training)', file=log.v1)
engine.init_train_from_config(config, train_data, dev_data, eval_data)
elif (task == 'initialize_model'):
engine.init_train_from_config(config, train_data, dev_data, eval_data)
engine.save_model(config.value('model', 'dummy'))
elif (task == 'debug_shell'):
debug_shell(locals(), globals())
else:
raise Exception(('unknown task: %r' % (task,)))
print(('elapsed: %s' % hms_fraction((time.time() - start_time))), file=log.v3)
|
def analyze_data(config):
'\n :param Config config:\n '
dss = config.value('analyze_dataset', 'train')
ds = {'train': train_data, 'dev': dev_data, 'eval': eval_data}[dss]
epoch = config.int('epoch', 1)
print('Analyze dataset', dss, 'epoch', epoch, file=log.v1)
ds.init_seq_order(epoch=epoch)
stat_prefix = config.value('statistics_save_prefix', 'statistics')
dtype = config.value('statistics_dtype', 'float64')
target = config.value('target', 'classes')
data_key = config.value('data_key', 'data')
assert ds.is_data_sparse(target), 'need for prior calculation'
assert (not ds.is_data_sparse(data_key)), 'needed for mean/var estimation'
from returnn.util.basic import inplace_increment, progress_bar_with_time, NumbersDict
priors = numpy.zeros((ds.get_data_dim(target),), dtype=dtype)
mean = numpy.zeros((ds.get_data_dim(data_key),), dtype=dtype)
mean_sq = numpy.zeros((ds.get_data_dim(data_key),), dtype=dtype)
total_targets_len = 0
total_data_len = 0
seq_idx = 0
while ds.is_less_than_num_seqs(seq_idx):
progress_bar_with_time(ds.get_complete_frac(seq_idx))
ds.load_seqs(seq_idx, (seq_idx + 1))
targets = ds.get_data(seq_idx, target)
inplace_increment(priors, targets, 1)
total_targets_len += targets.shape[0]
data = ds.get_data(seq_idx, data_key)
new_total_data_len = (total_data_len + data.shape[0])
f = (float(total_data_len) / new_total_data_len)
mean = ((mean * f) + (numpy.sum(data, axis=0) * (1.0 - f)))
mean_sq = ((mean_sq * f) + (numpy.sum((data * data), axis=0) * (1.0 - f)))
total_data_len = new_total_data_len
seq_idx += 1
log_priors = numpy.log(priors)
log_priors -= numpy.log(NumbersDict(ds.get_num_timesteps())[target])
std_dev = numpy.sqrt((mean_sq - (mean * mean)))
print(('Finished. %i total target frames, %i total data frames' % (total_targets_len, total_data_len)), file=log.v1)
priors_fn = (stat_prefix + '.log_priors.txt')
mean_fn = (stat_prefix + '.mean.txt')
std_dev_fn = (stat_prefix + '.std_dev.txt')
print('Dump priors to', priors_fn, file=log.v1)
numpy.savetxt(priors_fn, log_priors)
print('Dump mean to', mean_fn, file=log.v1)
numpy.savetxt(mean_fn, mean)
print('Dump std dev to', std_dev_fn, file=log.v1)
numpy.savetxt(std_dev_fn, std_dev)
print('Done.', file=log.v1)
|
def main(argv=None):
'\n Main entry point of RETURNN.\n\n :param list[str]|None argv: ``sys.argv`` by default\n '
if (argv is None):
argv = sys.argv
return_code = 0
try:
assert (len(argv) >= 2), ('usage: %s <config>' % argv[0])
init(command_line_options=argv[1:])
execute_main_task()
except KeyboardInterrupt:
return_code = 1
print('KeyboardInterrupt', file=getattr(log, 'v3', sys.stderr))
if getattr(log, 'verbose', ([False] * 6))[5]:
sys.excepthook(*sys.exc_info())
finalize(error_occurred=(return_code != 0))
if return_code:
sys.exit(return_code)
|
def setup(package_name=__package__, modules=None):
'\n This does the setup, such that all the modules become available in the `returnn` package.\n It does not import all the modules now, but instead provides them lazily.\n\n :param str package_name: "returnn" by default\n :param dict[str,types.ModuleType]|None modules: if set, will do ``modules[old_mod_name] = mod``\n '
for (old_mod_name, new_mod_name) in sorted(old_to_new_mod_mapping.items()):
full_mod_name = ('returnn.%s' % new_mod_name)
full_old_mod_name = ('%s.%s' % (package_name, old_mod_name))
if (full_mod_name in _mod_cache):
mod = _mod_cache[full_mod_name]
elif (full_mod_name in sys.modules):
mod = sys.modules[full_mod_name]
_mod_cache[full_mod_name] = mod
else:
mod = _LazyLoader(full_mod_name=full_mod_name, full_old_mod_name=full_old_mod_name, old_mod_name=old_mod_name, modules=modules)
if (old_mod_name not in sys.modules):
sys.modules[old_mod_name] = mod
if (full_old_mod_name not in sys.modules):
sys.modules[full_old_mod_name] = mod
if (modules is not None):
modules[old_mod_name] = mod
|
class _LazyLoader(types.ModuleType):
'\n Lazily import a module, mainly to avoid pulling in large dependencies.\n Code borrowed from TensorFlow, and simplified, and extended.\n '
def __init__(self, full_mod_name, **kwargs):
'\n :param str full_mod_name:\n '
super(_LazyLoader, self).__init__(full_mod_name)
fn = ('%s/%s.py' % (_base_dir, full_mod_name.replace('.', '/')))
if (not os.path.exists(fn)):
fn = ('%s/%s/__init__.py' % (_base_dir, full_mod_name.replace('.', '/')))
assert os.path.exists(fn), ('_LazyLoader: mod %r not found in %r' % (full_mod_name, _base_dir))
self.__file__ = fn
self._lazy_mod_config = dict(full_mod_name=full_mod_name, **kwargs)
def _load(self):
full_mod_name = self.__name__
lazy_mod_config = self._lazy_mod_config
old_mod_name = lazy_mod_config.get('old_mod_name', None)
full_old_mod_name = lazy_mod_config.get('full_old_mod_name', None)
modules = lazy_mod_config.get('modules', None)
if (full_mod_name in _mod_cache):
module = _mod_cache[full_mod_name]
else:
try:
module = importlib.import_module(full_mod_name)
except Exception:
raise
_mod_cache[full_mod_name] = module
if old_mod_name:
sys.modules[old_mod_name] = module
if full_old_mod_name:
sys.modules[full_old_mod_name] = module
if (modules is not None):
assert old_mod_name
modules[old_mod_name] = module
return module
def __getattribute__(self, item):
if (item == '__dict__'):
try:
mod = self._load()
except Exception:
print(('WARNING: %s cannot be imported, __dict__ not available' % self.__name__))
return {}
return getattr(mod, '__dict__')
return super(_LazyLoader, self).__getattribute__(item)
def __getattr__(self, item):
module = self._load()
return getattr(module, item)
def __dir__(self):
module = self._load()
return dir(module)
def __setattr__(self, key, value):
if (key in ['__file__', '_lazy_mod_config']):
super(_LazyLoader, self).__setattr__(key, value)
return
module = self._load()
setattr(module, key, value)
|
def debug_print_file(fn):
'\n :param str fn:\n '
print(('%s:' % fn))
if (not os.path.exists(fn)):
print('<does not exist>')
return
if os.path.isdir(fn):
print('<dir:>')
pprint(sorted(os.listdir(fn)))
return
print(open(fn).read())
|
def parse_pkg_info(fn):
'\n :param str fn:\n :return: dict with info written by distutils. e.g. ``res["Version"]`` is the version.\n :rtype: dict[str,str]\n '
res = {}
for ln in open(fn).read().splitlines():
if ((not ln) or (not ln[:1].strip())):
continue
(key, value) = ln.split(': ', 1)
res[key] = value
return res
|
def git_head_version(git_dir=_root_dir, long=False):
'\n :param str git_dir:\n :param bool long: see :func:`get_version_str`\n :rtype: str\n '
from returnn.util.basic import git_commit_date, git_commit_rev, git_is_dirty
commit_date = git_commit_date(git_dir=git_dir)
version = ('1.%s' % commit_date)
if long:
rev = git_commit_rev(git_dir=git_dir)
version += ('+git.%s' % rev)
if git_is_dirty(git_dir=git_dir):
version += '.dirty'
return version
|
def get_version_str(verbose=False, verbose_error=False, fallback=None, long=False):
'\n :param bool verbose: print exactly how we end up with some version\n :param bool verbose_error: print only any potential errors\n :param str|None fallback:\n :param bool long:\n False: Always distutils.version.StrictVersion compatible. just like "1.20190202.154527".\n True: Will also add the revision string, like "1.20180724.141845+git.7865d01".\n The format might change in the future.\n We will keep it `SemVer <https://semver.org/>`__ compatible.\n I.e. the string before the `"+"` will be the short version.\n We always make sure that there is a `"+"` in the string.\n :rtype: str\n '
if os.path.exists(('%s/_setup_info_generated.py' % _my_dir)):
from . import _setup_info_generated as info
if verbose:
print(('Found _setup_info_generated.py, long version %r, version %r.' % (info.long_version, info.version)))
if long:
assert ('+' in info.long_version)
return info.long_version
return info.version
info_in_root_filename = ('%s/_setup_info_generated.py' % _root_dir)
if os.path.exists(info_in_root_filename):
code = compile(open(info_in_root_filename).read(), info_in_root_filename, 'exec')
info = {}
eval(code, info)
version = info['version']
long_version = info['long_version']
if verbose:
print(('Found %r in root, long version %r, version %r.' % (info_in_root_filename, long_version, version)))
if long:
assert ('+' in long_version)
return long_version
return version
if os.path.exists(('%s/.git' % _root_dir)):
try:
version = git_head_version(git_dir=_root_dir, long=long)
if verbose:
print('Version via Git:', version)
if long:
assert ('+' in version)
return version
except Exception as exc:
if (verbose or verbose_error):
print('Exception while getting Git version:', exc)
sys.excepthook(*sys.exc_info())
if (not fallback):
raise
if fallback:
if verbose:
print('Version via fallback:', fallback)
if long:
assert ('+' in fallback)
return fallback
raise Exception('Cannot get RETURNN version.')
|
class Config():
'\n Reads in some config file, and provides access to the key/value items.\n We support some simple text-line-based config, JSON, and Python format.\n '
def __init__(self, items=None):
'\n :param dict[str]|None items: optional initial typed_dict\n '
self.dict = {}
self.typed_dict = {}
self.network_topology_json = None
self.files = []
if (items is not None):
self.typed_dict.update(items)
def __getstate__(self):
import io
from pickle import PicklingError
from returnn.util.task_system import Pickler
class _CustomPickler(Pickler):
dispatch = Pickler.dispatch.copy()
def save_global(self, obj, name=None):
'save global'
module_name = getattr(obj, '__module__', None)
if (module_name == _PyModuleName):
raise PicklingError(('Can not pickle %r from RETURNN config' % obj))
super().save_global(obj, name=name)
def intellisave_dict(self_, obj):
'save dict'
if (obj is self.typed_dict):
assert (id(obj) not in self_.memo)
super().save_dict(obj)
return
super().intellisave_dict(obj)
dispatch[dict] = intellisave_dict
buffer = io.BytesIO()
pickler = _CustomPickler(buffer)
memo_idx = len(pickler.memo)
pickler.memo[id(self)] = (memo_idx, self)
pickler.dump(self.typed_dict)
return {'_pid': os.getpid(), '_self_memo_idx': memo_idx, '_typed_dict_pickled': buffer.getvalue(), '_is_global': (self is get_global_config(raise_exception=False))}
def __setstate__(self, state):
import io
from pickle import _Unpickler
self.__init__()
buffer = io.BytesIO(state['_typed_dict_pickled'])
unpickler = _Unpickler(buffer)
unpickler.memo[state['_self_memo_idx']] = self
self.typed_dict = unpickler.load()
if (state['_is_global'] and (os.getpid() != state['_pid'])):
set_global_config(self)
_global_config_as_py_module_proxy_setup()
def load_file(self, f):
'\n Reads the configuration parameters from a file and adds them to the inner set of parameters.\n\n :param string|io.TextIOBase|io.StringIO f:\n '
if isinstance(f, str):
assert os.path.isfile(f), ('config file not found: %r' % f)
self.files.append(f)
filename = f
dirname = (os.path.dirname(filename) or '.')
content = open(filename).read()
else:
filename = '<config string>'
dirname = None
content = f.read()
content = content.strip()
if (content.startswith('#!') or filename.endswith('.py')):
if (dirname and os.path.exists(f'{dirname}/__init__.py') and filename.endswith('.py')):
import importlib
basedir = os.path.abspath(dirname)
while os.path.exists(f'{basedir}/__init__.py'):
basedir = os.path.dirname(basedir)
if (basedir not in sys.path):
sys.path.insert(0, basedir)
modname = ((os.path.relpath(dirname, basedir).replace('/', '.') + '.') + os.path.basename(filename)[:(- 3)])
mod = importlib.import_module(modname)
self.update(vars(mod))
else:
from returnn.util.basic import custom_exec
user_ns = self.typed_dict
user_ns.update({'config': self, '__file__': filename, '__name__': _PyModuleName})
custom_exec(content, filename, user_ns, user_ns)
_global_config_as_py_module_proxy_setup()
return
if content.startswith('{'):
from returnn.util.basic import load_json
json_content = load_json(content=content)
assert isinstance(json_content, dict)
self.update(json_content)
return
for line in content.splitlines():
if ('#' in line):
line = line[:line.index('#')]
line = line.strip()
if (not line):
continue
line = line.split(None, 1)
assert (len(line) == 2), ('unable to parse config line: %r' % line)
self.add_line(key=line[0], value=line[1])
@classmethod
def get_config_file_type(cls, f):
'\n :param str f: file path\n :return: "py", "js" or "txt"\n :rtype: str\n '
with open(f, 'r') as f:
start = f.read(3)
if start.startswith('#!'):
return 'py'
if start.startswith('{'):
return 'js'
return 'txt'
def parse_cmd_args(self, args):
'\n :param list[str]|tuple[str] args:\n '
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-a', '--activation', dest='activation', help='[STRING/LIST] Activation functions: logistic, tanh, softsign, relu, identity, zero, one, maxout.')
parser.add_option('-b', '--batch_size', dest='batch_size', help='[INTEGER/TUPLE] Maximal number of frames per batch (optional: shift of batching window).')
parser.add_option('-c', '--chunking', dest='chunking', help='[INTEGER/TUPLE] Maximal number of frames per sequence (optional: shift of chunking window).')
parser.add_option('-d', '--description', dest='description', help='[STRING] Description of experiment.')
parser.add_option('-e', '--epoch', dest='epoch', help='[INTEGER] Starting epoch.')
parser.add_option('-E', '--eval', dest='eval', help='[STRING] eval file path')
parser.add_option('-f', '--gate_factors', dest='gate_factors', help='[none/local/global] Enables pooled (local) or separate (global) coefficients on gates.')
parser.add_option('-g', '--lreg', dest='lreg', help='[FLOAT] L1 or L2 regularization.')
parser.add_option('-i', '--save_interval', dest='save_interval', help='[INTEGER] Number of epochs until a new model will be saved.')
parser.add_option('-j', '--dropout', dest='dropout', help='[FLOAT] Dropout probability (0 to disable).')
parser.add_option('-k', '--output_file', dest='output_file', help='[STRING] Path to target file for network output.')
parser.add_option('-l', '--log', dest='log', help='[STRING] Log file path.')
parser.add_option('-L', '--load', dest='load', help='[STRING] load model file path.')
parser.add_option('-m', '--momentum', dest='momentum', help='[FLOAT] Momentum term in gradient descent optimization.')
parser.add_option('-n', '--num_epochs', dest='num_epochs', help='[INTEGER] Number of epochs that should be trained.')
parser.add_option('-o', '--order', dest='order', help='[default/sorted/random] Ordering of sequences.')
parser.add_option('-p', '--loss', dest='loss', help='[loglik/sse/ctc] Objective function to be optimized.')
parser.add_option('-q', '--cache', dest='cache', help='[INTEGER] Cache size in bytes (supports notation for kilo (K), mega (M) and gigabyte (G)).')
parser.add_option('-r', '--learning_rate', dest='learning_rate', help='[FLOAT] Learning rate in gradient descent optimization.')
parser.add_option('-s', '--hidden_sizes', dest='hidden_sizes', help='[INTEGER/LIST] Number of units in hidden layers.')
parser.add_option('-t', '--truncate', dest='truncate', help='[INTEGER] Truncates sequence in BPTT routine after specified number of timesteps (-1 to disable).')
parser.add_option('-u', '--device', dest='device', help='[STRING/LIST] CPU and GPU devices that should be used (example: gpu0,cpu[1-6] or gpu,cpu*).')
parser.add_option('-v', '--verbose', dest='log_verbosity', help='[INTEGER] Verbosity level from 0 - 5.')
parser.add_option('-w', '--window', dest='window', help='[INTEGER] Width of sliding window over sequence.')
parser.add_option('-x', '--task', dest='task', help='[train/forward/analyze] Task of the current program call.')
parser.add_option('-y', '--hidden_type', dest='hidden_type', help='[VALUE/LIST] Hidden layer types: forward, recurrent, lstm.')
parser.add_option('-z', '--max_sequences', dest='max_seqs', help='[INTEGER] Maximal number of sequences per batch.')
parser.add_option('--config', dest='load_config', help='[STRING] load config')
(options, args) = parser.parse_args(list(args))
options = vars(options)
for opt in options.keys():
if (options[opt] is not None):
if (opt == 'load_config'):
self.load_file(options[opt])
else:
self.add_line(opt, options[opt])
assert ((len(args) % 2) == 0), ('expect (++key, value) config tuples in remaining args: %r' % args)
for i in range(0, len(args), 2):
(key, value) = args[i:(i + 2)]
assert (key[0:2] == '++'), ("expect key prefixed with '++' in (%r, %r)" % (key, value))
if (value[:2] == '+-'):
value = value[1:]
self.add_line(key=key[2:], value=value)
def add_line(self, key, value):
'\n Adds one specific configuration (key,value) pair to the inner set of parameters\n :type key: str\n :type value: str\n '
if (key in self.typed_dict):
value_type = type(self.typed_dict[key])
if (value_type == str):
pass
else:
try:
value = eval(value)
except SyntaxError:
from returnn.log import log
print(("WARNING: can't evaluate config param %r to previous type: %s. Keeping as string." % (value, value_type)), file=log.v1)
self.typed_dict[key] = value
return
if (value.find(',') > 0):
value = value.split(',')
else:
value = [value]
if (key == 'include'):
for f in value:
self.load_file(f)
else:
self.dict[key] = value
def has(self, key):
'\n Returns whether the given key is present in the inner set of parameters\n :type key: string\n :rtype: boolean\n :returns True if and only if the given key is in the inner set of parameters\n '
if (key in self.typed_dict):
return True
return (key in self.dict)
def is_typed(self, key):
'\n :type key: string\n :rtype: boolean\n :returns True if and only if the value of the given key has a specified data type\n '
return (key in self.typed_dict)
def is_true(self, key, default=False):
'\n :param str key:\n :param bool default:\n :return: bool(value) if it is set or default\n :rtype: bool\n '
if self.is_typed(key):
return bool(self.typed_dict[key])
return self.bool(key, default=default)
def is_of_type(self, key, types):
'\n :param str key:\n :param type|tuple[type] types: for isinstance() check\n :return: whether is_typed(key) is True and isinstance(value, types) is True\n :rtype: bool\n '
if (key in self.typed_dict):
return isinstance(self.typed_dict[key], types)
return False
def get_of_type(self, key, types, default=None):
'\n :param str key:\n :param type|list[type]|T types: for isinstance() check\n :param T|None default:\n :return: if is_of_type(key, types) is True, returns the value, otherwise default\n :rtype: T\n '
if self.is_of_type(key, types):
return self.typed_dict[key]
return default
def set(self, key, value):
'\n :type key: str\n :type value: list[str] | str | int | float | bool | dict | None\n '
self.typed_dict[key] = value
def update(self, dikt):
'\n :type dikt: dict\n '
for (key, value) in dikt.items():
self.set(key, value)
def _hack_value_reading_debug(self):
orig_value_func = self.value
def wrapped_value_func(*args, **kwargs):
'\n Wrapped func.\n '
res = orig_value_func(*args, **kwargs)
print(('Config.value(%s) -> %r' % (', '.join((list(map(repr, args)) + [('%s=%r' % (k, v)) for (k, v) in kwargs.items()])), res)))
return res
setattr(self, 'value', wrapped_value_func)
def value(self, key, default, index=None, list_join_str=','):
'\n :type key: str\n :type default: T\n :type index: int | None\n :param str list_join_str:\n :rtype: str | T\n '
if (key in self.typed_dict):
ls = self.typed_dict[key]
if (index is None):
if isinstance(ls, (list, tuple)):
return list_join_str.join([str(v) for v in ls])
elif (ls is None):
return default
else:
return str(ls)
else:
return str(ls[index])
if (key in self.dict):
ls = self.dict[key]
if (index is None):
return list_join_str.join(ls)
else:
return ls[index]
return default
def typed_value(self, key, default=None, index=None):
'\n :type key: str\n :type default: T\n :type index: int | None\n :rtype: T | typing.Any\n '
value = self.typed_dict.get(key, default)
if (index is not None):
assert isinstance(index, int)
if isinstance(value, (list, tuple)):
value = value[index]
else:
assert (index == 0)
return value
def opt_typed_value(self, key, default=None):
'\n :param str key:\n :param T|None default:\n :rtype: T|object|str|None\n '
if (key in self.typed_dict):
return self.typed_dict[key]
return self.value(key, default)
def int(self, key, default, index=0):
'\n Parses the value of the given key as integer, returning default if not existent\n :type key: str\n :type default: T\n :type index: int\n :rtype: int | T\n '
if (key in self.typed_dict):
value = self.typed_value(key, default=default, index=index)
if (value is not None):
assert isinstance(value, int)
return value
if (key in self.dict):
return int(self.value(key, default, index))
return default
def bool(self, key, default, index=0):
'\n Parses the value of the given key as boolean, returning default if not existent\n :type key: str\n :type default: T\n :type index: bool\n :rtype: bool | T\n '
if (key in self.typed_dict):
value = self.typed_value(key, default=default, index=index)
if isinstance(value, int):
value = bool(value)
if (value is not None):
assert isinstance(value, bool)
return value
if (key not in self.dict):
return default
v = str(self.value(key, None, index))
if (not v):
return default
from returnn.util.basic import to_bool
return to_bool(v)
def bool_or_other(self, key, default, index=0):
'\n :param str key:\n :param T default:\n :param int index:\n :return: if we have typed value, just as-is. otherwise try to convert to bool. or default if not there.\n :rtype: bool|T|object\n '
if (key in self.typed_dict):
return self.typed_value(key, default=default, index=index)
if (key not in self.dict):
return default
v = str(self.value(key, None, index))
if (not v):
return default
from returnn.util.basic import to_bool
try:
return to_bool(v)
except ValueError:
return v
def float(self, key, default, index=0):
'\n Parses the value of the given key as float, returning default if not existent\n :type key: str\n :type default: T\n :type index: int\n :rtype: float | T\n '
if (key in self.typed_dict):
value = self.typed_value(key, default=default, index=index)
else:
value = self.value(key, default, index)
if (value is not None):
if isinstance(value, str):
value = float(value)
assert isinstance(value, (int, float))
return value
def list(self, key, default=None):
'\n :type key: str\n :type default: T\n :rtype: list[str] | T\n '
if (default is None):
default = []
if (key in self.typed_dict):
value = self.typed_value(key, default=default)
if (value is None):
return default
if (not isinstance(value, (tuple, list))):
value = [value]
return list(value)
if (key not in self.dict):
return default
return self.dict[key]
def int_list(self, key, default=None):
'\n :type key: str\n :type default: T\n :rtype: list[int] | T\n '
if (default is None):
default = []
if (key in self.typed_dict):
value = self.typed_value(key, default=default)
if (value is None):
return default
if (not isinstance(value, (tuple, list))):
value = [value]
for x in value:
assert isinstance(x, int)
return list(value)
return [int(x) for x in self.list(key, default)]
def float_list(self, key, default=None):
'\n :type key: str\n :type default: T\n :rtype: list[float] | T\n '
if (default is None):
default = []
if (key in self.typed_dict):
value = self.typed_value(key, default=default)
if (value is None):
return default
if (not isinstance(value, (tuple, list))):
value = [value]
for x in value:
assert isinstance(x, (float, int))
return list(value)
return [float(x) for x in self.list(key, default)]
def int_pair(self, key, default=None):
'\n :param str key:\n :param (int,int)|None default:\n :rtype: (int,int)\n '
if (default is None):
default = (0, 0)
if (not self.has(key)):
return default
if (key in self.typed_dict):
value = self.typed_value(key, default=default)
if (not isinstance(value, (tuple, list))):
value = (value, value)
assert (len(value) == 2)
for x in value:
assert isinstance(x, int)
return tuple(value)
value = self.value(key, '')
if (':' in value):
return (int(value.split(':')[0]), int(value.split(':')[1]))
else:
return (int(value), int(value))
|
@contextlib.contextmanager
def global_config_ctx(config: Config):
'\n sets the config as global config in this context,\n and recovers the original global config afterwards\n '
global _global_config
prev_global_config = _global_config
try:
set_global_config(config)
(yield)
finally:
_global_config = prev_global_config
|
def set_global_config(config):
'\n Will define the global config, returned by :func:`get_global_config`\n\n :param Config config:\n '
_get_or_set_config_via_tf_default_graph(config)
global _global_config
_global_config = config
|
def get_global_config(*, raise_exception: bool=True, auto_create: bool=False, return_empty_if_none: bool=False):
'\n :param raise_exception: if no global config is found, raise an exception, otherwise return None\n :param auto_create: if no global config is found, it creates one, registers it as global, and returns it\n :param return_empty_if_none: if no global config is found, it creates one (which is empty) and returns it\n :rtype: Config|None\n '
config = _get_or_set_config_via_tf_default_graph()
if config:
return config
if _global_config:
return _global_config
import sys
main_mod = sys.modules['__main__']
if (hasattr(main_mod, 'config') and isinstance(main_mod.config, Config)):
return main_mod.config
import returnn.__main__ as rnn
if isinstance(rnn.config, Config):
return rnn.config
if auto_create:
config = Config()
set_global_config(config)
return config
if return_empty_if_none:
return Config()
if raise_exception:
raise Exception('No global config found.')
return None
|
def _get_or_set_config_via_tf_default_graph(config=None):
'\n This is done in a safe way, and might just be a no-op.\n When TF is not imported yet, it will just return.\n\n :param Config|None config: if set, will set it\n :rtype: Config|None\n '
if ('tensorflow' not in sys.modules):
return None
from returnn.tf.compat import v1 as tf_v1
graph = tf_v1.get_default_graph()
attrib_name = '_RETURNN_config_in_graph'
if config:
setattr(graph, attrib_name, config)
return getattr(graph, attrib_name, None)
|
def network_json_from_config(config):
'\n :param Config config:\n :rtype: dict[str]\n '
if (config.has('network') and config.is_typed('network')):
json_content = config.typed_value('network')
assert isinstance(json_content, dict)
assert json_content
return json_content
else:
raise ValueError('Network is not defined in config. Define `network`.')
|
def tf_should_use_gpu(config):
'\n :param Config config:\n :rtype: bool\n '
cfg_dev = config.value('device', None)
if (cfg_dev == 'gpu'):
return True
if (cfg_dev == 'cpu'):
return False
if (not cfg_dev):
from returnn.log import log
from returnn.tf.util.basic import is_gpu_available
if is_gpu_available():
print('Device not set explicitly, and we found a GPU, which we will use.', file=log.v2)
config.set('device', 'gpu')
return True
else:
print('Device not set explicitly, and no GPU found.', file=log.v2)
config.set('device', 'cpu')
return False
else:
raise ValueError(('Currently unsupported TF device %r specified' % (cfg_dev,)))
|
def _global_config_as_py_module_proxy_setup():
if (_PyModuleName in sys.modules):
return
sys.modules[_PyModuleName] = _GlobalConfigAsPyModuleProxy(_PyModuleName)
|
class _GlobalConfigAsPyModuleProxy(_types.ModuleType):
'\n Takes :func:`get_global_config`, and makes its ``typed_dict`` available as module attributes.\n '
@staticmethod
def _get_config() -> Optional[Config]:
'\n :return: config or None if not available anymore\n '
return get_global_config(raise_exception=False)
def __getattribute__(self, item):
if (item == '__dict__'):
cfg: Optional[Config] = self._get_config()
if (not cfg):
return {}
return cfg
return super().__getattribute__(item)
def __getattr__(self, item):
cfg: Optional[Config] = self._get_config()
if (not cfg):
raise AttributeError(('config %s not loaded anymore' % self.__name__))
if (item not in cfg.typed_dict):
raise AttributeError(('config %s has no attribute %r' % (self.__name__, item)))
return cfg.typed_dict[item]
def __dir__(self):
cfg: Optional[Config] = self._get_config()
if (not cfg):
return []
return sorted(cfg.typed_dict.keys())
def __setattr__(self, key, value):
if (key in ['__file__']):
super().__setattr__(key, value)
return
cfg: Optional[Config] = self._get_config()
if cfg:
cfg.typed_dict[key] = value
|
class OggZipDataset(CachedDataset2):
"\n Generic dataset which reads a Zip file containing Ogg files for each sequence and a text document.\n The feature extraction settings are determined by the ``audio`` option,\n which is passed to :class:`ExtractAudioFeatures`.\n Does also support Wav files, and might even support other file formats readable by the 'soundfile'\n library (not tested). By setting ``audio`` or ``targets`` to ``None``, the dataset can be used in\n text only or audio only mode. The content of the zip file is:\n\n - a .txt file with the same name as the zipfile, containing a python list of dictionaries\n - a subfolder with the same name as the zipfile, containing the audio files\n\n The dictionaries in the .txt file must be a list of dicts, i.e. have the following structure:\n\n .. code::\n\n [{'text': 'some utterance text', 'duration': 2.3, 'file': 'sequence0.wav'},\n ...]\n\n The dict can optionally also have the entry ``'seq_name': 'arbitrary_sequence_name'``.\n If ``seq_name`` is not included, the seq_tag will be the name of the file.\n ``duration`` is mandatory, as this information is needed for the sequence sorting,\n however, it does not have to match the real duration in any way.\n "
def __init__(self, path, audio, targets, targets_post_process=None, use_cache_manager=False, segment_file=None, zip_audio_files_have_name_as_prefix=True, fixed_random_subset=None, epoch_wise_filter=None, **kwargs):
"\n :param str|list[str] path: filename to zip\n :param dict[str]|None audio: options for :class:`ExtractAudioFeatures`.\n use {} for default. None means to disable.\n :param Vocabulary|dict[str]|None targets: options for :func:`Vocabulary.create_vocab`\n (e.g. :class:`BytePairEncoding`)\n :param str|list[str]|((str)->str)|None targets_post_process: :func:`get_post_processor_function`,\n applied on orth\n :param bool use_cache_manager: uses :func:`Util.cf`\n :param str|None segment_file: .txt or .gz text file containing sequence tags that will be used as whitelist\n :param bool zip_audio_files_have_name_as_prefix:\n :param float|int|None fixed_random_subset:\n Value in [0,1] to specify the fraction, or integer >=1 which specifies number of seqs.\n If given, will use this random subset. This will be applied initially at loading time,\n i.e. not dependent on the epoch.\n It will use an internally hardcoded fixed random seed, i.e. it's deterministic.\n :param dict|None epoch_wise_filter: see init_seq_order\n "
import os
import zipfile
import returnn.util.basic
from .meta import EpochWiseFilter
self._separate_txt_files = {}
self._path = path
self._use_cache_manager = use_cache_manager
self._zip_files: Optional[List[zipfile.ZipFile]] = None
if (isinstance(path, str) and (os.path.splitext(path)[1] != '.zip') and os.path.isdir(path) and os.path.isfile((path + '.txt'))):
self.paths = [os.path.dirname(path)]
self._names = [os.path.basename(path)]
self._zip_files = None
self._use_zip_files = False
assert (not use_cache_manager), 'cache manager only for zip file'
else:
if (not isinstance(path, (tuple, list))):
path = [path]
self.paths = []
self._names = []
for path_ in path:
assert isinstance(path_, str)
(name, ext) = os.path.splitext(os.path.basename(path_))
if (('.' in name) and (ext == '.gz')):
(name, ext) = (name[:name.rindex('.')], (name[name.rindex('.'):] + ext))
if use_cache_manager:
path_ = returnn.util.basic.cf(path_)
if (ext == '.txt.gz'):
self._separate_txt_files[name] = path_
continue
assert (ext == '.zip')
self.paths.append(path_)
self._names.append(name)
self._use_zip_files = True
self.segments: Optional[typing.Set[str]] = None
self._segment_file = segment_file
self.zip_audio_files_have_name_as_prefix = zip_audio_files_have_name_as_prefix
kwargs.setdefault('name', self._names[0])
super(OggZipDataset, self).__init__(**kwargs)
if (targets is None):
self.targets = None
elif isinstance(targets, dict):
self.targets = Vocabulary.create_vocab(**targets)
else:
assert isinstance(targets, Vocabulary)
self.targets = targets
if self.targets:
self.labels['classes'] = self.targets.labels
self.targets_post_process = None
if targets_post_process:
if callable(targets_post_process):
self.targets_post_process = targets_post_process
else:
from .lm import get_post_processor_function
self.targets_post_process = get_post_processor_function(targets_post_process)
self._audio_random = numpy.random.RandomState(1)
self._audio = audio
self.feature_extractor = (ExtractAudioFeatures(random_state=self._audio_random, **audio) if (audio is not None) else None)
self.num_inputs = (self.feature_extractor.get_feature_dimension() if self.feature_extractor else 0)
self.num_outputs = {'raw': {'dtype': 'string', 'shape': ()}, 'orth': [256, 1]}
self.labels['orth'] = [chr(i) for i in range(255)]
if self.targets:
self.num_outputs['classes'] = [self.targets.num_labels, 1]
if self.feature_extractor:
self.num_outputs['data'] = [self.num_inputs, 2]
else:
self.num_outputs['data'] = [0, 2]
self._data: Optional[List[Dict[(str, Any)]]] = None
self._fixed_random_subset = fixed_random_subset
if (epoch_wise_filter is None):
self.epoch_wise_filter = None
elif isinstance(epoch_wise_filter, dict):
self.epoch_wise_filter = EpochWiseFilter(epoch_wise_filter)
else:
assert isinstance(epoch_wise_filter, EpochWiseFilter)
self.epoch_wise_filter = epoch_wise_filter
self._seq_order = None
def _read(self, filename, zip_index):
'\n :param str filename: in zip-file\n :param int zip_index: index of the zip file to load, unused when loading without zip\n :rtype: bytes\n '
import os
if filename.endswith('.txt'):
(name, _) = os.path.splitext(filename)
assert (name == self._names[zip_index])
if (name in self._separate_txt_files):
import gzip
return gzip.open(self._separate_txt_files[name], 'rb').read()
if (self._zip_files is not None):
return self._zip_files[zip_index].read(filename)
return open(('%s/%s' % (self.paths[0], filename)), 'rb').read()
def _collect_data_part(self, zip_index):
'\n collect all the entries of a single zip-file or txt file\n :param int zip_index: index of the zip-file in self._zip_files, unused when loading without zip\n :return: data entries\n :rtype: list[dict[str]]\n '
from returnn.util.literal_py_to_pickle import literal_eval
data = literal_eval(self._read(('%s.txt' % self._names[zip_index]), zip_index))
assert (data and isinstance(data, list))
first_entry = data[0]
assert isinstance(first_entry, dict)
assert isinstance(first_entry['text'], str)
assert isinstance(first_entry['duration'], float)
if ('file' in first_entry):
assert isinstance(first_entry['file'], str)
else:
assert (not self.feature_extractor), ('%s: feature extraction is enabled, but no audio files are specified' % self)
assert isinstance(first_entry['seq_name'], str)
for entry in data:
entry['_zip_file_index'] = zip_index
if self.segments:
data[:] = [entry for entry in data if (self._get_tag_from_info_dict(entry) in self.segments)]
return data
def _lazy_init(self):
'\n :return: entries\n :rtype: list[dict[str]]\n '
if (self._data is not None):
return
if self._segment_file:
self._read_segment_list(self._segment_file)
if self._use_zip_files:
import zipfile
self._zip_files = [zipfile.ZipFile(path) for path in self.paths]
data = []
if self._use_zip_files:
for zip_index in range(len(self._zip_files)):
zip_data = self._collect_data_part(zip_index)
data += zip_data
else:
data = self._collect_data_part(0)
fixed_random_subset = self._fixed_random_subset
if fixed_random_subset:
if (0 < fixed_random_subset < 1):
fixed_random_subset = int((len(data) * fixed_random_subset))
assert (isinstance(fixed_random_subset, int) and (fixed_random_subset > 0))
rnd = numpy.random.RandomState(42)
rnd.shuffle(data)
data = data[:fixed_random_subset]
self._data = data
def _read_segment_list(self, segment_file):
'\n read a list of segment names in either plain text or gzip\n\n :param str segment_file:\n '
if segment_file.endswith('.gz'):
import gzip
segment_file_handle = gzip.open(segment_file)
self.segments = set([s.decode() for s in segment_file_handle.read().splitlines()])
else:
segment_file_handle = open(segment_file)
self.segments = set(segment_file_handle.read().splitlines())
def init_seq_order(self, epoch=None, seq_list=None, seq_order=None):
'\n If random_shuffle_epoch1, for epoch 1 with "random" ordering, we leave the given order as is.\n Otherwise, this is mostly the default behavior.\n\n :param int|None epoch:\n :param list[str]|None seq_list: List of sequence tags, to set a predefined order.\n :param list[int]|None seq_order: List of corpus sequence indices, to set a predefined order.\n :rtype: bool\n :returns whether the order changed (True is always safe to return)\n '
super(OggZipDataset, self).init_seq_order(epoch=epoch, seq_list=seq_list, seq_order=seq_order)
if ((epoch is None) and (seq_list is None) and (seq_order is None)):
self._num_seqs = 0
return True
self._lazy_init()
random_seed = self._get_random_seed_for_epoch(epoch=epoch)
self._audio_random.seed(random_seed)
if self.targets:
self.targets.set_random_seed(random_seed)
def get_seq_len(i):
'\n Returns the length based on the duration entry of the dataset,\n multiplied by 100 to avoid similar rounded durations.\n It is also used when using the dataset in text-only-mode (`audio` is None).\n :param int i:\n :rtype: int\n '
return int((self._data[i]['duration'] * 100))
if (seq_order is not None):
self._seq_order = seq_order
elif (seq_list is not None):
seqs = {self._get_tag_from_info_dict(seq): i for (i, seq) in enumerate(self._data) if (self._get_tag_from_info_dict(seq) in seq_list)}
for seq_tag in seq_list:
assert (seq_tag in seqs), ('did not found all requested seqs. we have eg: %s' % (self._get_tag_from_info_dict(self._data[0]),))
self._seq_order = [seqs[seq_tag] for seq_tag in seq_list]
else:
num_seqs = len(self._data)
self._seq_order = self.get_seq_order_for_epoch(epoch=epoch, num_seqs=num_seqs, get_seq_len=get_seq_len)
if self.epoch_wise_filter:
self.epoch_wise_filter.debug_msg_prefix = str(self)
self._seq_order = self.epoch_wise_filter.filter(epoch=epoch, seq_order=self._seq_order, get_seq_len=get_seq_len)
self._num_seqs = len(self._seq_order)
return True
def supports_seq_order_sorting(self) -> bool:
'supports sorting'
return True
def get_current_seq_order(self):
'\n :rtype: list[int]\n '
assert (self._seq_order is not None)
return self._seq_order
def _get_ref_seq_idx(self, seq_idx):
'\n :param int seq_idx:\n :return: idx in self._reference_seq_order\n :rtype: int\n '
return self._seq_order[seq_idx]
def have_corpus_seq_idx(self):
'\n :rtype: bool\n '
return True
def get_corpus_seq_idx(self, seq_idx):
'\n :param int seq_idx:\n :rtype: int\n '
return self._get_ref_seq_idx(seq_idx)
@staticmethod
def _get_tag_from_info_dict(info):
'\n :param dict[str] info:\n :rtype: str\n '
return info.get('seq_name', info.get('file', ''))
def get_tag(self, seq_idx):
'\n :param int seq_idx:\n :rtype: str\n '
return self._get_tag_from_info_dict(self._data[self._get_ref_seq_idx(seq_idx)])
def get_all_tags(self):
'\n :rtype: list[str]\n '
self._lazy_init()
return [self._get_tag_from_info_dict(seq) for seq in self._data]
def get_total_num_seqs(self):
'\n :rtype: int\n '
self._lazy_init()
return len(self._data)
def get_data_shape(self, key):
'\n :returns get_data(*, key).shape[1:], i.e. num-frames excluded\n :rtype: list[int]\n '
if ((key == 'data') and (self.feature_extractor is not None)):
if (self.feature_extractor.num_channels is not None):
return [self.feature_extractor.num_channels, self.feature_extractor.get_feature_dimension()]
return super(OggZipDataset, self).get_data_shape(key)
def _get_transcription(self, corpus_seq_idx: int):
'\n :param corpus_seq_idx:\n :return: (targets (e.g. bpe), txt)\n :rtype: (list[int], str)\n '
seq = self._data[corpus_seq_idx]
raw_targets_txt = seq['text']
targets_txt = raw_targets_txt
if self.targets:
if self.targets_post_process:
targets_txt = self.targets_post_process(targets_txt)
targets_seq = self.targets.get_seq(targets_txt)
else:
targets_seq = []
return (targets_seq, raw_targets_txt)
def _open_audio_file(self, corpus_seq_idx: int):
'\n :param corpus_seq_idx:\n :return: io.FileIO\n '
import io
seq = self._data[corpus_seq_idx]
if self.zip_audio_files_have_name_as_prefix:
audio_fn = ('%s/%s' % (self._names[seq['_zip_file_index']], seq['file']))
else:
audio_fn = seq['file']
raw_bytes = self._read(audio_fn, seq['_zip_file_index'])
return io.BytesIO(raw_bytes)
def _collect_single_seq(self, seq_idx):
'\n :param int seq_idx:\n :rtype: DatasetSeq\n '
corpus_seq_idx = self._get_ref_seq_idx(seq_idx)
seq = self.get_corpus_seq(corpus_seq_idx)
seq.seq_idx = seq_idx
return seq
def have_get_corpus_seq(self) -> bool:
'\n :return: whether this dataset supports :func:`get_corpus_seq`\n '
return True
def get_corpus_seq(self, corpus_seq_idx: int) -> DatasetSeq:
'\n :param corpus_seq_idx:\n :return: seq\n '
self._lazy_init()
seq_tag = self._get_tag_from_info_dict(self._data[corpus_seq_idx])
if self.feature_extractor:
with self._open_audio_file(corpus_seq_idx) as audio_file:
features = self.feature_extractor.get_audio_features_from_raw_bytes(audio_file, seq_name=seq_tag)
else:
features = numpy.zeros((), dtype=numpy.float32)
(targets, txt) = self._get_transcription(corpus_seq_idx)
targets = numpy.array(targets, dtype='int32')
raw_txt = str_to_numpy_array(txt)
orth = txt.encode('utf8')
if PY3:
assert isinstance(orth, bytes)
orth = list(orth)
else:
orth = list(map(ord, orth))
orth = numpy.array(orth, dtype='uint8')
return DatasetSeq(features=features, targets={'classes': targets, 'raw': raw_txt, 'orth': orth}, seq_idx=corpus_seq_idx, seq_tag=seq_tag)
|
class Dataset(object):
'\n Base class for any dataset. This defines the dataset API.\n '
@staticmethod
def kwargs_update_from_config(config, kwargs):
'\n :type config: returnn.config.Config\n :type kwargs: dict[str]\n '
def set_or_remove(key, value):
'\n :param str key:\n :param value:\n '
if ((key in kwargs) and (kwargs[key] is None)):
del kwargs[key]
if ((value is not None) and (key not in kwargs)):
kwargs[key] = value
set_or_remove('window', (config.int('window', 0) or None))
set_or_remove('context_window', config.typed_value('context_window'))
set_or_remove('chunking', config.opt_typed_value('chunking', None))
set_or_remove('seq_ordering', config.value('batching', None))
set_or_remove('shuffle_frames_of_nseqs', (config.int('shuffle_frames_of_nseqs', 0) or None))
set_or_remove('min_chunk_size', (config.opt_typed_value('min_chunk_size', 0) or None))
set_or_remove('chunking_variance', config.float('chunking_variance', 0))
@staticmethod
def get_default_kwargs_eval(config):
'\n :param returnn.config.Config config:\n :rtype: dict[str]\n '
chunking = '0'
if (config.value('on_size_limit', 'ignore') == 'chunk'):
chunking = config.value('batch_size', '0')
elif (config.value('chunking', '0') == '1'):
chunking = '1'
elif config.bool('chunk_eval', False):
chunking = config.value('chunking', '0')
return dict(chunking=chunking, seq_ordering='sorted', shuffle_frames_of_nseqs=0)
@classmethod
def from_config(cls, config, **kwargs):
'\n :type config: returnn.config.Config\n :param dict[str] kwargs: passed on to __init__\n :rtype: Dataset\n '
cls.kwargs_update_from_config(config, kwargs)
return cls(**kwargs)
def __init__(self, name=None, window=1, context_window=None, chunking=None, seq_ordering='default', fixed_random_seed=None, random_seed_offset=None, partition_epoch=None, repeat_epoch=None, seq_list_filter_file=None, unique_seq_tags=False, seq_order_seq_lens_file=None, shuffle_frames_of_nseqs=0, min_chunk_size=0, chunking_variance=0, estimated_num_seqs=None):
'\n :param str name: e.g. "train" or "eval"\n :param int window: features will be of dimension window * feature_dim, as we add a context-window around.\n not all datasets support this option.\n :param None|int|dict|NumbersDict|(dict,dict) context_window: will add this context for each chunk\n :param None|str|int|(int,int)|dict|(dict,dict)|function chunking: "chunk_size:chunk_step"\n :param str seq_ordering: "batching"-option in config. e.g. "default", "sorted" or "random".\n See self.get_seq_order_for_epoch() for more details.\n :param int|None fixed_random_seed: for the shuffling, e.g. for seq_ordering=\'random\'.\n otherwise epoch will be used.\n useful when used as eval dataset.\n :param int|None random_seed_offset: for shuffling, e.g. for seq_ordering=\'random\'.\n ignored when fixed_random_seed is set.\n :param int|None partition_epoch:\n :param int|None repeat_epoch: Repeat the sequences in an epoch this many times. Useful to scale the dataset\n relative to other datasets, e.g. when used in CombinedDataset. Not allowed to be used in combination with\n partition_epoch.\n :param str|None seq_list_filter_file: defines a subset of sequences (by tag) to use\n :param bool unique_seq_tags: uniquify seqs with same seq tags in seq order\n :param str|None seq_order_seq_lens_file: for seq order, use the seq length given by this file\n :param int shuffle_frames_of_nseqs: shuffles the frames. not always supported\n :param None|int estimated_num_seqs: for progress reporting in case the real num_seqs is unknown\n '
self.name = (name or ('dataset_id%s' % id(self)))
self.lock = None
self.rnd_seq_drop = None
self.num_inputs = 0
self.num_outputs = None
self.window = window
self.seq_ordering = seq_ordering
self.fixed_random_seed = fixed_random_seed
if (random_seed_offset is None):
random_seed_offset = self._get_default_random_seed_offset()
self.random_seed_offset = random_seed_offset
self.partition_epoch = (partition_epoch or 1)
self.repeat_epoch = (repeat_epoch or 1)
self._seq_list_filter_file = seq_list_filter_file
self.seq_tags_filter = (set(self._load_seq_list_file(seq_list_filter_file)) if seq_list_filter_file else None)
self.unique_seq_tags = unique_seq_tags
self._seq_order_seq_lens_file = seq_order_seq_lens_file
self._seq_order_seq_lens_by_idx = None
assert ((self.partition_epoch == 1) or (self.repeat_epoch == 1)), 'Combining partition_epoch and repeat_epoch is prohibited.'
self.labels = {}
self.weights = {}
self._num_timesteps = 0
self._num_seqs = 0
self._estimated_num_seqs = estimated_num_seqs
self.min_chunk_size = NumbersDict(min_chunk_size)
self.chunking_variance = chunking_variance
self._chunking = chunking
(self.chunk_size, self.chunk_step, self.custom_chunking_func) = self._parse_chunking(chunking)
self._context_window = context_window
if isinstance(context_window, (tuple, list)):
assert (len(context_window) == 2)
for elem in context_window:
assert isinstance(elem, dict)
self.ctx_left = NumbersDict(numbers_dict=context_window[0])
self.ctx_right = NumbersDict(numbers_dict=context_window[1])
else:
if (context_window is None):
context_window = NumbersDict()
elif isinstance(context_window, int):
context_window = NumbersDict(numbers_dict={'data': context_window})
elif isinstance(context_window, dict):
context_window = NumbersDict(numbers_dict=context_window)
assert isinstance(context_window, NumbersDict)
ctx_total = (NumbersDict.max([context_window, 1]) - 1)
self.ctx_left = (ctx_total // 2)
self.ctx_right = (ctx_total - self.ctx_left)
assert isinstance(self.ctx_left, NumbersDict)
assert isinstance(self.ctx_right, NumbersDict)
self.shuffle_frames_of_nseqs = shuffle_frames_of_nseqs
self.epoch = None
self.zpad = None
def __repr__(self):
return ('<%s %r epoch=%s>' % (self.__class__.__name__, getattr(self, 'name', '<unknown>'), getattr(self, 'epoch', '<unknown>')))
_getnewargs_exclude_attrs = set()
_getnewargs_remap = {}
@staticmethod
def _create_from_reduce(cls, kwargs, state) -> Dataset:
'\n :param type cls:\n :param dict[str] kwargs:\n :param dict[str] state:\n :rtype: Dataset\n '
assert issubclass(cls, Dataset)
ds = cls(**kwargs)
assert isinstance(ds, Dataset)
for (attr, value) in state.items():
setattr(ds, attr, value)
return ds
def __reduce__(self):
import inspect
kwargs = {}
for cls in self.__class__.__mro__:
if (isinstance(cls, type) and issubclass(cls, Dataset)):
for arg in inspect.getargs(cls.__init__.__code__).args[1:]:
if (arg in self._getnewargs_exclude_attrs):
continue
if (arg in self._getnewargs_remap):
kwargs[arg] = getattr(self, self._getnewargs_remap[arg])
elif hasattr(self, ('_' + arg)):
kwargs[arg] = getattr(self, ('_' + arg))
else:
kwargs[arg] = getattr(self, arg)
state = {attr: getattr(self, attr) for attr in ['epoch', 'zpad']}
return (Dataset._create_from_reduce, (self.__class__, kwargs, state))
@staticmethod
def _get_default_random_seed_offset():
'\n :return: 0 usually\n :rtype: int\n '
from returnn.config import get_global_config
config = get_global_config(raise_exception=False)
if (not config):
return 0
if (config.typed_value('torch_distributed') is not None):
import returnn.torch.distributed
return (returnn.torch.distributed.get_ctx(config=config).rank() * 16127)
elif config.is_true('use_horovod'):
assert (config.bool('use_tensorflow', False) or config.value('backend', '').startswith('tensorflow'))
import returnn.tf.horovod
if returnn.tf.horovod.get_ctx(config=config).is_dataset_distribution_random_seed_offset():
return (returnn.tf.horovod.get_ctx(config=config).rank() * 16127)
return 0
@staticmethod
def _parse_chunking(chunking):
'\n :param None|str|int|(int,int)|dict|(dict,dict)|(NumbersDict,NumbersDict)|function chunking:\n as it comes from the config / from the user\n :return: chunk_size, chunk_step, custom_chunking_func\n :rtype: (NumbersDict|None,NumbersDict|None,function|None)\n '
if callable(chunking):
return (None, None, chunking)
if isinstance(chunking, str):
if (':' in chunking):
chunking = tuple(map(int, chunking.split(':')))
else:
chunking = int(chunking)
if (not isinstance(chunking, (tuple, list))):
chunking = (chunking, None)
(chunk_size, chunk_step) = chunking
if (chunk_size is None):
chunk_size = 0
assert isinstance(chunk_size, (int, dict, NumbersDict))
chunk_size = NumbersDict(chunk_size)
assert ((chunk_size == 0) or (chunk_size.min_value() > 0)), 'chunk size must not be negative'
if (chunk_step in (None, 0)):
chunk_step = chunk_size
assert isinstance(chunk_step, (int, dict, NumbersDict))
chunk_step = NumbersDict(chunk_step)
if (chunk_size != 0):
assert (sorted(chunk_step.keys()) == sorted(chunk_size.keys()))
assert (chunk_step.max_value() > 0), 'chunking step must be positive (for some key)'
return (chunk_size, chunk_step, None)
@staticmethod
def _load_seq_list_file(filename, use_cache_manager=False, expect_list=True):
'\n :param str filename:\n :param bool use_cache_manager:\n :param bool expect_list:\n :rtype: list[str]|dict[str,list[str]]\n '
if use_cache_manager:
import returnn.util.basic
filename = returnn.util.basic.cf(filename)
if filename.endswith('.pkl'):
import pickle
seq_list = pickle.load(open(filename, 'rb'))
if expect_list:
assert isinstance(seq_list, list)
elif filename.endswith('.gz'):
import gzip
seq_list = gzip.open(filename, 'rt').read().splitlines()
else:
seq_list = open(filename).read().splitlines()
return seq_list
def _sliding_window(self, xr):
'\n :type xr: numpy.ndarray\n :rtype: numpy.ndarray\n '
from numpy.lib.stride_tricks import as_strided
x = numpy.concatenate([self.zpad, xr, self.zpad])
return as_strided(x, shape=(((x.shape[0] - self.window) + 1), 1, self.window, self.num_inputs), strides=((x.strides[0], (x.strides[1] * self.num_inputs)) + x.strides)).reshape((xr.shape[0], (self.num_inputs * self.window)))
def is_cached(self, start, end):
'\n :param int start: like in load_seqs(), sorted seq idx\n :param int end: like in load_seqs(), sorted seq idx\n :rtype: bool\n :returns whether we have the full range (start,end) of sorted seq idx.\n '
if (start == end):
return True
assert (start < end)
return False
def get_seq_length(self, seq_idx: int) -> NumbersDict:
'\n :param seq_idx:\n :returns the len of the input features and the len of the target sequence.\n '
raise NotImplementedError
def get_estimated_seq_length(self, seq_idx):
'\n In contrast to self.get_seq_length(),\n this method is designed to work for sequences that have not been loaded yet\n via self.load_seqs().\n Used by meta-datasets for sequence ordering.\n Currently we only provide one number, i.e. do not give different\n estimates for the different data keys (as in get_seq_length()).\n It is up to the dataset what this number represents\n and how it is computed.\n\n :param int seq_idx: for current epoch, not the corpus seq idx\n :rtype: int\n :returns sequence length estimate (for sorting)\n '
raise OptionalNotImplementedError
def get_num_timesteps(self):
'\n :rtype: int\n '
assert (self._num_timesteps > 0)
return self._num_timesteps
def load_seqs(self, start, end):
'\n Load data sequences, such that self.get_data() & friends can return the data.\n\n :param int start: start sorted seq idx, inclusive\n :param int end: end sorted seq idx, exclusive\n '
assert (start >= 0)
assert (start <= end)
if self.is_cached(start, end):
return
if (self.shuffle_frames_of_nseqs > 0):
(start, end) = self._get_load_seqs_superset(start, end)
self._load_seqs(start, end)
while (start < end):
self._shuffle_frames_in_seqs(start, (start + self.shuffle_frames_of_nseqs))
start += self.shuffle_frames_of_nseqs
else:
self._load_seqs(start, end)
def _get_load_seqs_superset(self, start, end):
'\n :type start: int\n :type end: int\n :returns the superset (start,end) of seqs to be loaded.\n For shuffle_frames_of_nseqs > 0, we always load N seqs at once\n and shuffle all their frames,\n thus start/end will be aligned to self.shuffle_frames_of_nseqs.\n '
assert (start <= end)
assert (start < self.num_seqs)
if (self.shuffle_frames_of_nseqs > 0):
m = self.shuffle_frames_of_nseqs
start -= (start % m)
end += ((m - (end % m)) % m)
return (start, end)
def _shuffle_frames_in_seqs(self, start, end):
raise OptionalNotImplementedError
def _load_seqs(self, start, end):
'\n Load data sequences.\n If end > num_seqs, will not load them.\n\n :param int start: inclusive seq idx start\n :param int end: exclusive seq idx end. can be more than num_seqs\n '
raise NotImplementedError
def _get_seq_order_seq_lens_by_idx(self, seq_idx):
'\n :param int seq_idx:\n :rtype: int\n '
if (not self._seq_order_seq_lens_by_idx):
assert self._seq_order_seq_lens_file
if self._seq_order_seq_lens_file.endswith('.gz'):
import gzip
raw = gzip.GzipFile(self._seq_order_seq_lens_file, 'rb').read()
else:
raw = open(self._seq_order_seq_lens_file, 'rb').read()
seq_lens = eval(raw)
assert isinstance(seq_lens, dict)
all_tags = self.get_all_tags()
self._seq_order_seq_lens_by_idx = [seq_lens[tag] for tag in all_tags]
return self._seq_order_seq_lens_by_idx[seq_idx]
def get_seq_order_for_epoch(self, epoch, num_seqs, get_seq_len=None):
"\n Returns the order of the given epoch.\n This is mostly a static method, except that is depends on the configured type of ordering,\n such as 'default' (= as-is), 'sorted' or 'random'. 'sorted' also uses the sequence length.\n\n :param int|None epoch: for 'random', this determines the random seed\n :param int num_seqs:\n :param ((int) -> int)|None get_seq_len: function (originalSeqIdx: int) -> int\n :return: the order for the given epoch. such that seq_idx -> underlying idx\n :rtype: typing.Sequence[int]\n "
if (epoch is None):
return []
partition_epoch = (self.partition_epoch or 1)
repeat_epoch = (self.repeat_epoch or 1)
assert (num_seqs > 0)
assert (num_seqs == int(num_seqs))
num_seqs = int(num_seqs)
if self._seq_order_seq_lens_file:
get_seq_len = self._get_seq_order_seq_lens_by_idx
if (self.seq_ordering == 'default'):
seq_index = range(num_seqs)
elif self.seq_ordering.startswith('default_every_n:'):
(_, num) = self.seq_ordering.split(':')
num = int(num)
seq_index = numpy.arange((num_seqs // num), dtype='int64').repeat(num)
for i in range(1, num):
seq_index[i::num] += (i * (num_seqs // num))
elif (self.seq_ordering == 'reverse'):
seq_index = range((num_seqs - 1), (- 1), (- 1))
elif (self.seq_ordering in ['sorted', 'sorted_reverse']):
assert get_seq_len
reverse = ((- 1) if (self.seq_ordering == 'sorted_reverse') else 1)
seq_lens = [(reverse * get_seq_len(i)) for i in range(num_seqs)]
seq_index = numpy.argsort(seq_lens, kind='stable')
elif self.seq_ordering.startswith('random'):
tmp = self.seq_ordering.split(':')
nth = (int(tmp[1]) if (len(tmp) > 1) else 1)
rnd_seed = self._get_random_seed_for_epoch(epoch=epoch, num_epochs_fixed=nth)
random_generator = numpy.random.RandomState(rnd_seed)
seq_index = random_generator.permutation(num_seqs)
elif self.seq_ordering.startswith('sort_bin_shuffle'):
assert get_seq_len
tmp = self.seq_ordering.split(':')[1:]
if (len(tmp) <= 1):
nth = 1
else:
nth = int(tmp[1])
rnd_seed = self._get_random_seed_for_epoch(epoch=epoch, num_epochs_fixed=nth)
random_generator = numpy.random.RandomState(rnd_seed)
seq_index = random_generator.permutation(num_seqs).tolist()
seq_index.sort(key=get_seq_len)
if (len(tmp) == 0):
bins = 2
elif tmp[0].startswith('.'):
bins = max((num_seqs // int(tmp[0][1:])), 2)
else:
bins = int(tmp[0])
bin_ids = random_generator.permutation(bins)
out_index = []
for i in bin_ids:
if (i == (bins - 1)):
part = seq_index[((i * len(seq_index)) // bins):][:]
else:
part = seq_index[((i * len(seq_index)) // bins):(((i + 1) * len(seq_index)) // bins)][:]
if self.seq_ordering.startswith('sort_bin_shuffle_x2'):
random_generator.shuffle(part)
out_index.append(part)
seq_index = numpy.concatenate(out_index)
elif self.seq_ordering.startswith('laplace'):
assert get_seq_len
tmp = self.seq_ordering.split(':')[1:]
if (len(tmp) == 0):
bins = 2
elif tmp[0].startswith('.'):
bins = max((num_seqs // int(tmp[0][1:])), 2)
else:
bins = int(tmp[0])
if (len(tmp) <= 1):
nth = 1
else:
nth = int(tmp[1])
rnd_seed = self._get_random_seed_for_epoch(epoch=epoch, num_epochs_fixed=nth)
random_generator = numpy.random.RandomState(rnd_seed)
seq_index = random_generator.permutation(num_seqs)
out_index = []
for i in range(bins):
if (i == (bins - 1)):
part = seq_index[((i * len(seq_index)) // bins):].tolist()
else:
part = seq_index[((i * len(seq_index)) // bins):(((i + 1) * len(seq_index)) // bins)].tolist()
part.sort(key=get_seq_len, reverse=((i % 2) == 1))
out_index += part
seq_index = out_index
else:
assert False, ('invalid batching specified: ' + self.seq_ordering)
if self.unique_seq_tags:
all_seq_tags = self.get_all_tags()
used_seq_tags = set()
seq_index = [i for i in seq_index if ((all_seq_tags[i] not in used_seq_tags), used_seq_tags.add(all_seq_tags[i]))[0]]
if (partition_epoch > 1):
seq_index = self._apply_partition_epoch(seq_index, partition_epoch, epoch)
if (repeat_epoch > 1):
seq_index = (list(seq_index) * repeat_epoch)
if (self.seq_tags_filter is not None):
assert len(seq_index)
all_seq_tags = self.get_all_tags()
assert (len(all_seq_tags) == num_seqs == self.get_total_num_seqs()), ('%r vs %r vs %r' % (len(all_seq_tags), num_seqs, self.get_total_num_seqs()))
old_seq_index = seq_index
seq_index = [i for i in seq_index if (all_seq_tags[i] in self.seq_tags_filter)]
assert seq_index, ('%s: empty after applying seq_list_filter_file. Example filter tags: %r, used tags: %r' % (self, sorted(self.seq_tags_filter)[:3], [all_seq_tags[i] for i in old_seq_index[:3]]))
return seq_index
@classmethod
def _apply_partition_epoch(cls, seq_index, partition_epoch, epoch):
'\n :param typing.Sequence[int] seq_index: full list of ordered sequence indices\n :param int partition_epoch: number of partitions seq_index should be split into\n :param int|None epoch: current epoch\n :return: partition of seq_index for current epoch\n :rtype: typing.Sequence[int]\n '
num_seqs = len(seq_index)
current_partition = (((epoch or 1) - 1) % partition_epoch)
seqs_per_epoch = (num_seqs // partition_epoch)
partition_sizes = (([(seqs_per_epoch + 1)] * (num_seqs % partition_epoch)) + ([seqs_per_epoch] * (partition_epoch - (num_seqs % partition_epoch))))
assert ((sum(partition_sizes) == num_seqs) and (len(partition_sizes) == partition_epoch))
partitions = functools.reduce((lambda a, x: (a + [(a[(- 1)] + x)])), partition_sizes, [0])
assert (len(partitions) == (partition_epoch + 1))
seq_index = seq_index[partitions[current_partition]:partitions[(current_partition + 1)]]
assert (len(seq_index) == partition_sizes[current_partition])
return seq_index
def _get_random_seed_for_epoch(self, epoch, num_epochs_fixed=1):
'\n :param int|None epoch:\n :param int num_epochs_fixed: keep random seed fixed for n subsequent epochs\n :rtype: int\n '
if (self.fixed_random_seed is not None):
return self.fixed_random_seed
partition_epoch = (self.partition_epoch or 1)
seed = (epoch or 1)
if (partition_epoch > 1):
seed = (((seed - 1) // partition_epoch) + 1)
if (num_epochs_fixed > 1):
seed = (((seed - 1) // num_epochs_fixed) + 1)
return (seed + self.random_seed_offset)
def init_seq_order(self, epoch=None, seq_list=None, seq_order=None):
'\n :type epoch: int|None\n :param list[str]|None seq_list: List of sequence tags, to set a predefined order.\n :param list[int]|None seq_order: List of corpus sequence indices, to set a predefined order. Only possible\n if the dataset has such indices (see self.have_corpus_seq_idx()).\n :rtype: bool\n :returns whether the order changed (True is always safe to return)\n\n This is called when we start a new epoch, or at initialization.\n Call this when you reset the seq list.\n '
self.epoch = epoch
self.rnd_seq_drop = Random(self._get_random_seed_for_epoch(epoch=epoch))
return False
def finish_epoch(self):
'\n This would get called at the end of the epoch (currently optional only).\n After this, further calls to :func:`get_data` or :func:`load_seqs` are invalid,\n until a new call to :func:`init_seq_order` follows.\n '
self.epoch = None
def get_current_seq_order(self):
'\n :return: many datasets use self.get_seq_order_for_epoch. this function would return the current seq order\n for the current epoch, after self.init_seq_order was called.\n Not all datasets implement this.\n :rtype: typing.Sequence[int]\n '
raise OptionalNotImplementedError
def supports_seq_order_sorting(self) -> bool:
'\n :return: whether "sorted" or "sorted_reverse" is supported for seq_ordering\n '
return False
def _base_init(self):
self.zpad = None
assert self.num_outputs
if (not self.num_inputs):
assert ((not self.window) or (self.window in (0, 1)) or ('data' in self.num_outputs))
return
assert (self.num_inputs > 0)
assert (self.window > 0)
if ((int(self.window) % 2) == 0):
self.window += 1
if (self.window > 1):
self.zpad = numpy.zeros(((int(self.window) // 2), self.num_inputs), dtype=numpy.float32)
def initialize(self):
'\n Does the main initialization before it can be used.\n This needs to be called before self.load_seqs() can be used.\n '
self._base_init()
self.init_seq_order()
def get_times(self, sorted_seq_idx):
'\n :param int sorted_seq_idx:\n '
raise OptionalNotImplementedError
def get_data(self, seq_idx, key) -> numpy.ndarray:
'\n :param int seq_idx: sorted seq idx\n :param str key: data-key, e.g. "data" or "classes"\n :return: features or targets: format 2d (time,feature) (float)\n '
if (key == 'data'):
return self.get_input_data(seq_idx)
else:
return self.get_targets(key, seq_idx)
def get_input_data(self, sorted_seq_idx):
'\n :type sorted_seq_idx: int\n :rtype: numpy.ndarray\n :returns features: format 2d (time,feature) (float)\n '
raise NotImplementedError
def get_targets(self, target, sorted_seq_idx):
'\n :param str target: data key\n :type sorted_seq_idx: int\n :rtype: numpy.ndarray\n :returns targets: format 1d (time) (int: idx of output-feature)\n '
return self.get_data(sorted_seq_idx, target)
def get_data_slice(self, seq_idx, key, start_frame, end_frame):
'\n :param int seq_idx:\n :param str key:\n :param int start_frame:\n :param int end_frame:\n :return: x[start_frame:end_frame], with x = get_data(seq_idx, key)\n :rtype: numpy.ndarray\n '
if (('[sparse:' in key) and ((start_frame > 0) or (end_frame < self.get_seq_length(seq_idx)[key]))):
return self._get_data_slice_sparse(seq_idx, key, start_frame, end_frame)
data = self.get_data(seq_idx, key)
return data[start_frame:end_frame]
def _get_data_slice_sparse(self, seq_idx, key, start_frame, end_frame):
key_prefix = key[:key.index('[')]
sparse_info = key[(key.index('[') + 1):key.index(']')].split(':')
assert (len(sparse_info) == 4)
assert (tuple(sparse_info[0:3]) == ('sparse', 'coo', '2'))
s0 = self.get_data(seq_idx, ('%s[sparse:coo:2:0]' % key_prefix))
assert (s0 is not None)
from returnn.native_op import sparse_splice_offset_numpy
s0_start = sparse_splice_offset_numpy(s0, start_frame)
s0_end = sparse_splice_offset_numpy(s0, end_frame)
if (sparse_info[(- 1)] == '0'):
return (s0[s0_start:s0_end] - start_frame)
else:
data = self.get_data(seq_idx, key)
return data[s0_start:s0_end]
def get_tag(self, sorted_seq_idx):
'\n :param int sorted_seq_idx:\n :rtype: str\n '
return ('seq-%i' % sorted_seq_idx)
def get_all_tags(self):
'\n :return: list of all seq tags, of the whole dataset, without partition epoch.\n Note that this is not possible with all datasets.\n :rtype: list[str]\n '
raise OptionalNotImplementedError(f'{self} get_all_tags not implemented')
def get_total_num_seqs(self) -> int:
'\n :return: total number of seqs, without partition epoch.\n Should be the same as len(self.get_all_tags()).\n Note that this is not possible with all datasets.\n '
raise OptionalNotImplementedError(f'{self} get_total_num_seqs not implemented')
def have_corpus_seq_idx(self):
'\n :rtype: bool\n :return: whether you can call self.get_corpus_seq_idx()\n '
return False
def get_corpus_seq_idx(self, seq_idx):
'\n :param int seq_idx: sorted sequence index from the current epoch, depending on seq_ordering\n :return: the sequence index as-is in the original corpus (as if you would have sorting="default").\n only defined if self.have_corpus_seq_idx()\n :rtype: int\n '
if ((self.seq_ordering == 'default') and (self.partition_epoch == 1)):
return seq_idx
assert self.have_corpus_seq_idx()
raise NotImplemented
def have_get_corpus_seq(self) -> bool:
'\n :return: whether you can call :func:`get_corpus_seq`\n '
return False
def get_corpus_seq(self, corpus_seq_idx: int) -> DatasetSeq:
'\n This function allows random access directly into the corpus.\n Only implement this if such random access is possible in a reasonable efficient way.\n This allows to write map-style wrapper datasets around such RETURNN datasets.\n\n :param corpus_seq_idx: corresponds to output of :func:`get_corpus_seq_idx`\n :return: data\n '
raise OptionalNotImplementedError
@classmethod
def generic_complete_frac(cls, seq_idx, num_seqs):
'\n :param int seq_idx: idx\n :param int|None num_seqs: None if not available\n :return: Returns a fraction (float in [0,1], always > 0) of how far we have advanced\n for this seq in the dataset.\n This does not have to be exact. This is only for the user.\n '
if num_seqs:
return min((float((seq_idx + 1)) / num_seqs), 1.0)
else:
import math
return max(1e-10, (1.0 - math.exp(((- seq_idx) * 1000))))
def get_complete_frac(self, seq_idx):
'\n :param int seq_idx:\n :return: Returns a fraction (float in [0,1], always > 0) of how far we have advanced\n for this seq in the dataset.\n This does not have to be exact. This is only for the user.\n :rtype: float\n '
try:
num_seqs = self.num_seqs
except Exception:
try:
num_seqs = self.estimated_num_seqs
except Exception:
num_seqs = None
return self.generic_complete_frac(seq_idx, num_seqs)
@property
def num_seqs(self) -> int:
'\n :return: num seqs for current epoch\n '
raise NotImplementedError
@property
def estimated_num_seqs(self):
'\n :return: estimated num seqs. does not have to be exact\n :rtype: int|None\n '
try:
return self.num_seqs
except Exception:
pass
if (self._estimated_num_seqs is not None):
return self._estimated_num_seqs
return None
def get_data_keys(self):
'\n :return: all available data keys (for get_data and all other functions)\n :rtype: list[str]\n '
return (['data'] + self.get_target_list())
def get_target_list(self):
'\n :return: subset of :func:`get_data_keys`. target keys are usually not available during inference\n :rtype: list[str]\n '
return ['classes']
def get_data_dim(self, key):
'\n :param str key: e.g. "data" or "classes"\n :return: number of classes, no matter if sparse or not\n :rtype: int\n '
if (key in self.num_outputs):
return self.num_outputs[key][0]
if ((self.window > 1) and (key == 'data')):
assert self.num_inputs
return (self.num_inputs * self.window)
return 1
def get_data_dtype(self, key):
'\n :param str key: e.g. "data" or "classes"\n :return: dtype as str, e.g. "int32" or "float32"\n :rtype: str\n '
if self.is_data_sparse(key):
return 'int32'
return 'float32'
def is_data_sparse(self, key):
'\n :param str key: e.g. "data" or "classes"\n :return: whether the data is sparse\n :rtype: bool\n '
if (key in self.num_outputs):
return (self.num_outputs[key][1] <= 1)
if (key == 'data'):
return False
return True
def get_data_shape(self, key: str) -> List[int]:
'\n :returns get_data(*, key).shape[1:], i.e. num-frames excluded\n '
if (key in self.num_outputs):
if (self.num_outputs[key][1] <= 1):
return []
res_shape = ([None] * (self.num_outputs[key][1] - 1))
if (not self.is_data_sparse(key)):
res_shape[(- 1)] = self.get_data_dim(key)
return res_shape
if self.is_data_sparse(key):
return []
return [self.get_data_dim(key)]
def have_seqs(self) -> bool:
'\n :return: whether num_seqs > 0\n '
try:
total_num_seqs = self.get_total_num_seqs()
return (total_num_seqs > 0)
except NotImplementedError:
pass
if (self.epoch is not None):
return self.is_less_than_num_seqs(0)
raise NotImplementedError(f'{self} have_seqs() is not implemented (and neither get_total_num_seqs())')
def len_info(self):
'\n :rtype: str\n :returns a string to present the user as information about our len.\n Depending on our implementation, we can give some more or some less information.\n '
return ', '.join([self.__class__.__name__, ('sequences: %s' % try_run(self.get_total_num_seqs, default='unknown')), ('frames: %s' % try_run(self.get_num_timesteps, default='unknown'))])
def is_less_than_num_seqs(self, n):
'\n :type n: int\n :rtype: bool\n :returns whether n < num_seqs. In case num_seqs is not known in advance, it will wait\n until it knows that n is behind the end or that we have the seq.\n '
return (n < self.num_seqs)
def can_serialize_data(self, key):
'\n :param str key: e.g. "classes"\n :rtype: bool\n '
return (key in self.labels)
def serialize_data(self, key, data):
'\n In case you have a :class:`Vocabulary`, just use :func:`Vocabulary.get_seq_labels`.\n\n :param str key: e.g. "classes". self.labels[key] should be set\n :param numpy.ndarray data: 0D or 1D\n :rtype: str\n '
vocab = Vocabulary.create_vocab_from_labels(self.labels[key])
if (data.ndim == 0):
return vocab.labels[data]
return vocab.get_seq_labels(data)
def iterate_seqs(self, recurrent_net=True, used_data_keys=None):
'\n Takes chunking into consideration.\n\n :param bool recurrent_net: whether the order of frames matter\n :param set(str)|None used_data_keys:\n :return: generator which yields tuples (seq index, seq start, seq end)\n :rtype: list[(int,NumbersDict,NumbersDict)]\n '
if self.custom_chunking_func:
sentinel_kw = {('__fwd_compatible_random_arg_%i' % int((random() * 100))): None}
for (seq_idx, t_start, t_end) in self.custom_chunking_func(dataset=self, seq_idx_start=0, recurrent_net=recurrent_net, used_data_keys=used_data_keys, **sentinel_kw):
(yield (seq_idx, t_start, t_end))
return
chunk_size = self.chunk_size
chunk_step = self.chunk_step
if (not recurrent_net):
if chunk_size:
print(('Non-recurrent network, chunk size %s:%s ignored' % (chunk_size, chunk_step)), file=log.v4)
chunk_size = 0
chunk_size = NumbersDict(chunk_size)
chunk_step = NumbersDict(chunk_step)
chunk_size_orig = chunk_size.copy()
chunk_step_orig = chunk_step.copy()
s = 0
while self.is_less_than_num_seqs(s):
length = self.get_seq_length(s)
if (chunk_size == 0):
(yield (s, NumbersDict.constant_like(0, numbers_dict=length), length))
else:
default_key = 'data'
if (used_data_keys is not None):
length = NumbersDict({k: length[k] for k in used_data_keys})
if (default_key not in used_data_keys):
default_key = sorted(used_data_keys)[0]
if (chunk_step[default_key] == 0):
assert (chunk_step.max_value() > 0)
default_key = [key for key in sorted(used_data_keys) if (chunk_step[key] > 0)][0]
if (self.chunking_variance > 0):
chunking_variance = (1.0 - (self.rnd_seq_drop.random() * self.chunking_variance))
for k in used_data_keys:
chunk_size[k] = max(int((chunk_size_orig[k] * chunking_variance)), 1)
chunk_step[k] = max(int((chunk_step_orig[k] * chunking_variance)), 1)
smallest_key = [k for k in sorted(used_data_keys, key=(lambda key: (chunk_step_orig[key], key))) if (chunk_step_orig[k] > 0)][0]
for k in used_data_keys:
if (chunk_size_orig[k] > chunk_size_orig[smallest_key]):
if ((chunk_size_orig[k] % chunk_size_orig[smallest_key]) == 0):
ratio = (chunk_size_orig[k] // chunk_size_orig[smallest_key])
chunk_size[k] = (chunk_size[smallest_key] * ratio)
chunk_step[k] = (chunk_step[smallest_key] * ratio)
assert (chunk_step[default_key] > 0)
t = NumbersDict.constant_like(0, numbers_dict=length)
keys_with_full_seqs = []
for key in length.keys():
if (chunk_step[key] == chunk_step[default_key]):
if (length[key] == length[default_key]):
continue
if (length[key] <= 1):
keys_with_full_seqs.append(key)
continue
if (chunk_step[key] == chunk_step[default_key]):
raise Exception(('Chunking with multiple data-keys of different length: %r' % length))
else:
limit = (self.min_chunk_size[key] or 1)
limit_default = (self.min_chunk_size[default_key] or 1)
nr_of_chunks = (((length[key] - limit) // chunk_step[key]) + 1)
nr_of_chunks_default = (((length[default_key] - limit_default) // chunk_step[default_key]) + 1)
assert (nr_of_chunks == nr_of_chunks_default), ('%s: iterate seqs with chunking: length %r, chunk size/step %r/%r (min %r), key %r (default %r)' % (self, length, chunk_size, chunk_step, self.min_chunk_size, key, default_key))
while (length[default_key] > t[default_key]):
chunk_start = NumbersDict(t)
chunk_end = NumbersDict.min([(t + chunk_size), length])
for key in keys_with_full_seqs:
chunk_start[key] = 0
chunk_end[key] = length[key]
if (length.value is None):
chunk_start.value = None
chunk_end.value = None
(yield (s, chunk_start, chunk_end))
t += chunk_step
if any([((length[key] - t[key]) <= self.min_chunk_size[key]) for key in length.keys()]):
break
s += 1
def get_start_end_frames_full_seq(self, seq_idx):
'\n :param int seq_idx:\n :return: (start,end) frame, taking context_window into account\n :rtype: (NumbersDict,NumbersDict)\n '
end = self.get_seq_length(seq_idx)
start = NumbersDict.constant_like(0, numbers_dict=end)
start -= self.ctx_left
end += self.ctx_right
return (start, end)
def sample(self, seq_idx):
'\n :param int seq_idx:\n :rtype: bool\n '
if (seq_idx in self.weights):
weight = self.weights[seq_idx]
return (weight[0] >= weight[1])
return True
def _generate_batches(self, recurrent_net, batch_size, max_seqs=(- 1), max_seq_length=sys.maxsize, max_pad_size=None, min_seq_length=0, pruning=0.0, seq_drop=0.0, max_total_num_seqs=(- 1), used_data_keys=None):
'\n :param bool recurrent_net: If True, the batch might have a batch seq dimension > 1.\n Otherwise, the batch seq dimension is always 1 and multiple seqs will be concatenated.\n :param int|dict[str,int]|NumbersDict batch_size: Max number of frames in one batch.\n :param int|dict[str,int]|NumbersDict max_pad_size: Max number of zero-padded frames in one batch.\n :param int max_seqs: Max number of seqs per batch.\n :param int max_total_num_seqs:\n :param int|dict[str,int]|NumbersDict max_seq_length:\n :param set(str)|None used_data_keys:\n '
if (not batch_size):
raise Exception('batch_size must be set and be greater than 0')
batch_size = NumbersDict(batch_size)
assert (not batch_size.any_compare(NumbersDict(0), (lambda a, b: (a <= b))))
max_pad_size = NumbersDict(max_pad_size)
if (max_seqs == (- 1)):
max_seqs = float('inf')
if (not max_seq_length):
max_seq_length = sys.maxsize
if (isinstance(max_seq_length, int) and (max_seq_length < 0)):
max_seq_length = {'classes': (- max_seq_length)}
max_seq_length = NumbersDict(max_seq_length)
min_seq_length = NumbersDict(min_seq_length)
assert (max_seqs > 0)
assert (seq_drop <= 1.0)
if ((not max_total_num_seqs) or (max_total_num_seqs < 0)):
max_total_num_seqs = float('inf')
batch = Batch()
total_num_seqs = 0
last_seq_idx = (- 1)
avg_weight = (sum([v[0] for v in self.weights.values()]) / (len(self.weights.keys()) or 1))
for idx in self.weights:
self.weights[idx][1] = ((random() * avg_weight) * pruning)
self.weights[idx][0] *= (1.0 + pruning)
for (seq_idx, t_start, t_end) in self.iterate_seqs(recurrent_net=recurrent_net, used_data_keys=used_data_keys):
if (not self.sample(seq_idx)):
continue
if (total_num_seqs > max_total_num_seqs):
break
t_start -= self.ctx_left
t_end += self.ctx_right
if recurrent_net:
length = (t_end - t_start)
if length.any_compare(max_seq_length, (lambda a, b: (a > b))):
continue
if length.any_compare(min_seq_length, (lambda a, b: (a < b))):
continue
if length.any_compare(batch_size, (lambda a, b: (a > b))):
print(('warning: sequence length (%r) larger than limit (%r)' % (length, batch_size)), file=log.v4)
if (self.rnd_seq_drop.random() < seq_drop):
continue
(dt, ds) = batch.try_sequence_as_slice(length)
if (batch.num_slices >= 1):
if (dt * ds).any_compare(batch_size, (lambda a, b: (a > b))):
(yield batch)
batch = Batch()
elif (ds > max_seqs):
(yield batch)
batch = Batch()
elif (((dt * ds) - batch.get_total_num_frames()) - length).any_compare(max_pad_size, (lambda a, b: (a > b))):
(yield batch)
batch = Batch()
batch.add_sequence_as_slice(seq_idx=seq_idx, seq_start_frame=t_start, length=length)
else:
while (t_start.max_value() < t_end.max_value()):
length = (t_end - t_start)
num_frames = NumbersDict.min([length, (batch_size.copy_like(length) - batch.get_all_slices_num_frames().copy_like(length))])
assert (num_frames.max_value() > 0)
batch.add_frames(seq_idx=seq_idx, seq_start_frame=t_start, length=num_frames)
if (batch.get_all_slices_num_frames().any_compare(batch_size, (lambda a, b: (a >= b))) or (batch.get_num_seqs() > max_seqs)):
(yield batch)
batch = Batch()
t_start += num_frames
if (seq_idx != last_seq_idx):
last_seq_idx = seq_idx
total_num_seqs += 1
if (batch.get_all_slices_num_frames().max_value() > 0):
(yield batch)
def batch_set_generator_cache_whole_epoch(self):
'\n The BatchSetGenerator can cache the list of batches which we generated across epochs.\n See self.generate_batches() and self._generate_batches().\n In many cases, the dataset does not support this, and in that case,\n it is not needed to enable this cache and waste memory.\n Caching it together with option shuffle_batches could also mean that\n there will be self.load_seqs() calls with non-monotonic seq-idxs.\n The only dataset currently which enables this is CachedDataset and thus HDFDataset.\n\n :return: whether we should enable this cache\n :rtype: bool\n '
return False
def generate_batches(self, shuffle_batches=False, **kwargs):
'\n :param bool shuffle_batches:\n :param kwargs: will be passed to :func:`_generate_batches`\n :rtype: BatchSetGenerator\n '
return BatchSetGenerator(dataset=self, generator=self._generate_batches(**kwargs), shuffle_batches=shuffle_batches, cache_whole_epoch=self.batch_set_generator_cache_whole_epoch())
@classmethod
def index_shape_for_batches(cls, batches, data_key='data'):
'\n :param list[EngineBatch.Batch] batches:\n :param str data_key:\n :return: shape as (time, batch)\n :rtype: (int, int)\n '
shape = [0, 0]
for batch in batches:
shape = [max(shape[0], batch.max_num_frames_per_slice[data_key]), (shape[1] + batch.num_slices)]
return tuple(shape)
|
class DatasetSeq():
'\n Encapsulates all data for one sequence.\n '
def __init__(self, seq_idx, features, targets=None, seq_tag=None):
'\n :param int seq_idx: sorted seq idx in the Dataset\n :param numpy.ndarray|dict[str,numpy.ndarray] features: format 2d (time,feature) (float)\n :param dict[str,numpy.ndarray]|numpy.ndarray|None targets: name -> format 1d (time) (idx of output-feature)\n :param str seq_tag: sequence name / tag\n '
assert isinstance(seq_idx, (int, numpy.integer))
self.seq_idx = int(seq_idx)
self.seq_tag = (seq_tag or ('seq-%i' % seq_idx))
if (not isinstance(features, dict)):
assert isinstance(features, numpy.ndarray)
features = {'data': features}
if (targets is None):
targets = {}
if isinstance(targets, numpy.ndarray):
targets = {'classes': targets}
assert isinstance(targets, dict)
features.update(targets)
targets = None
assert (isinstance(features, dict) and (targets is None))
for v in features.values():
assert isinstance(v, numpy.ndarray)
self.features = features
@property
def num_frames(self):
'\n :rtype: NumbersDict\n '
d = {k: (v.shape[0] if (v.ndim >= 1) else 1) for (k, v) in self.features.items()}
return NumbersDict(d)
def get_data(self, key):
'\n :param str key:\n :rtype: numpy.ndarray\n '
return self.features[key]
def get_data_keys(self):
'\n :rtype: set[str]\n '
return self.features.keys()
def __repr__(self):
return ('<DataCache seq_idx=%i>' % self.seq_idx)
|
def get_dataset_class(name: Union[(str, Type[Dataset])]) -> Optional[Type[Dataset]]:
'\n :param str|type name:\n '
if isinstance(name, type):
assert issubclass(name, Dataset)
return name
if _dataset_classes:
return _dataset_classes.get(name, None)
from importlib import import_module
mod_names = ['hdf', 'sprint', 'generating', 'numpy_dump', 'meta', 'lm', 'stereo', 'raw_wav', 'map', 'multi_proc']
for mod_name in mod_names:
mod = import_module(('returnn.datasets.%s' % mod_name))
for (name_, clazz) in vars(mod).items():
if (name_ in _dataset_classes):
continue
if ((not isinstance(clazz, type)) or (not issubclass(clazz, Dataset))):
continue
_dataset_classes[name_] = clazz
return _dataset_classes.get(name, None)
|
def init_dataset(kwargs, extra_kwargs=None, default_kwargs=None):
'\n :param dict[str]|str|(()->dict[str])|Dataset kwargs:\n :param dict[str]|None extra_kwargs:\n :param dict[str]|None default_kwargs:\n :rtype: Dataset\n '
assert kwargs
if isinstance(kwargs, Dataset):
data = kwargs
data.initialize()
return data
if callable(kwargs):
return init_dataset(kwargs(), extra_kwargs=extra_kwargs, default_kwargs=default_kwargs)
if isinstance(kwargs, str):
if kwargs.startswith('{'):
kwargs = eval(kwargs)
elif kwargs.startswith('config:'):
from returnn.config import get_global_config
config = get_global_config()
data = eval(kwargs[len('config:'):], config.typed_dict, config.typed_dict)
return init_dataset(data, extra_kwargs=extra_kwargs, default_kwargs=default_kwargs)
else:
config_str = kwargs
kwargs = {}
if default_kwargs:
kwargs.update(default_kwargs)
if extra_kwargs:
kwargs.update(extra_kwargs)
return init_dataset_via_str(config_str=config_str, **kwargs)
assert isinstance(kwargs, dict)
kwargs = kwargs.copy()
assert ('class' in kwargs)
clazz_name = kwargs.pop('class')
clazz = get_dataset_class(clazz_name)
if (not clazz):
raise Exception(('Dataset class %r not found' % clazz_name))
if default_kwargs:
for (key, value) in default_kwargs.items():
kwargs.setdefault(key, value)
if extra_kwargs:
kwargs.update(extra_kwargs)
obj = clazz(**kwargs)
assert isinstance(obj, Dataset)
obj.initialize()
return obj
|
def init_dataset_via_str(config_str, config=None, cache_byte_size=None, **kwargs):
'\n :param str config_str: hdf-files, or "LmDataset:..." or so\n :param returnn.config.Config|None config: optional, only for "sprint:..."\n :param int|None cache_byte_size: optional, only for HDFDataset\n :rtype: Dataset\n '
kwargs = kwargs.copy()
if (('window' not in kwargs) and config and config.has('window')):
kwargs['window'] = config.int('window', 1)
from returnn.datasets.hdf import HDFDataset
if config_str.startswith('sprint:'):
kwargs['sprintConfigStr'] = config_str[len('sprint:'):]
assert config, "need config for dataset in 'sprint:...' format. or use 'ExternSprintDataset:...' instead"
sprint_trainer_exec_path = config.value('sprint_trainer_exec_path', None)
assert sprint_trainer_exec_path, 'specify sprint_trainer_exec_path in config'
kwargs['sprintTrainerExecPath'] = sprint_trainer_exec_path
from returnn.datasets.sprint import ExternSprintDataset
cls = ExternSprintDataset
elif config_str.startswith('config:'):
from returnn.config import get_global_config
if (not config):
config = get_global_config()
data = eval(config_str[len('config:'):], config.typed_dict, config.typed_dict)
return init_dataset(data, extra_kwargs=kwargs)
elif (':' in config_str):
kwargs.update(eval(('dict(%s)' % config_str[(config_str.find(':') + 1):])))
class_name = config_str[:config_str.find(':')]
cls = get_dataset_class(class_name)
else:
if (cache_byte_size is not None):
kwargs['cache_byte_size'] = cache_byte_size
cls = HDFDataset
if config:
data = cls.from_config(config, **kwargs)
else:
data = cls(**kwargs)
if isinstance(data, HDFDataset):
for f in config_str.split(','):
if f:
assert os.path.exists(f)
data.add_file(f)
data.initialize()
return data
|
def convert_data_dims(data_dims, leave_dict_as_is=False):
'\n This converts what we called num_outputs originally,\n from the various formats which were allowed in the past\n (just an int, or dict[str,int]) into the format which we currently expect.\n In all cases, the output will be a new copy of the dict.\n\n :param int|dict[str,int|(int,int)|dict] data_dims: what we called num_outputs originally\n :param bool leave_dict_as_is:\n :rtype: dict[str,(int,int)|dict]\n :returns dict data-key -> (data-dimension, len(shape) (1 ==> sparse))\n (or potentially data-key -> dict, if leave_dict_as_is is True; for TensorFlow)\n '
if isinstance(data_dims, int):
data_dims = {'classes': data_dims}
assert isinstance(data_dims, dict)
data_dims = data_dims.copy()
for (k, v) in list(data_dims.items()):
if isinstance(v, int):
v = (v, (2 if (k == 'data') else 1))
data_dims[k] = v
if (isinstance(v, dict) and leave_dict_as_is):
continue
assert isinstance(v, (tuple, list))
data_dims[k] = tuple(v)
assert (len(v) == 2)
assert isinstance(v[0], int)
assert isinstance(v[1], int)
assert (1 <= v[1])
return data_dims
|
def shapes_for_batches(batches: Sequence[Batch], *, data_keys: Sequence[str], dataset: Optional[Dataset]=None, extern_data: Optional[TensorDict], enforce_min_len1: bool=False) -> Optional[Dict[(str, List[int])]]:
'\n :param batches:\n :param data_keys:\n :param dataset:\n :param extern_data: detailed data description\n :param enforce_min_len1:\n '
assert (dataset or extern_data)
all_data_keys = set(data_keys)
shape = [NumbersDict(0), 0]
for batch in batches:
shape = [NumbersDict.max([shape[0], batch.max_num_frames_per_slice]), (shape[1] + batch.num_slices)]
if (shape[1] == 0):
return None
assert (shape[0].max_value() > 0)
if ((not extern_data) or enforce_min_len1):
for k in all_data_keys:
shape[0][k] = max(shape[0][k], 1)
d = {}
for k in all_data_keys:
data = extern_data.data[k]
data_shape = list(data.batch_shape)
assert data.have_batch_axis()
data_shape[data.batch_dim_axis] = shape[1]
dyn_axes = data.get_dynamic_axes()
if dyn_axes:
assert (len(dyn_axes) == 1), f'data {data} has multiple dynamic axes, not supported currently'
data_shape[dyn_axes[0]] = shape[0][k]
assert all([(n is not None) for n in data_shape]), f'data {data} leaves shape {data_shape} partially undefined'
d[k] = data_shape
return d
|
def set_config_extern_data_from_dataset(config, dataset):
'\n :param returnn.config.Config config:\n :param Dataset dataset:\n '
from returnn.tf.network import _data_kwargs_from_dataset_key
config.set('extern_data', {key: _data_kwargs_from_dataset_key(dataset=dataset, key=key) for key in dataset.get_data_keys()})
|
class BundleFile(object):
'Holds paths to HDF dataset files.'
def __init__(self, filePath):
'Reads paths to HDF dataset files from a bundle file.\n Example of contents of a bundle file:\n\n /work/asr2/ryndin/crnnRegressionSpeechEnhancemenent/data/data_tr05_real_1_100.hdf\n /work/asr2/ryndin/crnnRegressionSpeechEnhancemenent/data/data_tr05_real_2_100.hdf\n /work/asr2/ryndin/crnnRegressionSpeechEnhancemenent/data/data_tr05_real_3_100.hdf\n /work/asr2/ryndin/crnnRegressionSpeechEnhancemenent/data/data_tr05_real_4_100.hdf\n /work/asr2/ryndin/crnnRegressionSpeechEnhancemenent/data/data_tr05_real_5_100.hdf\n /work/asr2/ryndin/crnnRegressionSpeechEnhancemenent/data/data_tr05_real_6_100.hdf\n /work/asr2/ryndin/crnnRegressionSpeechEnhancemenent/data/data_tr05_simu_1_100.hdf\n /work/asr2/ryndin/crnnRegressionSpeechEnhancemenent/data/data_tr05_simu_2_100.hdf\n /work/asr2/ryndin/crnnRegressionSpeechEnhancemenent/data/data_tr05_simu_3_100.hdf\n /work/asr2/ryndin/crnnRegressionSpeechEnhancemenent/data/data_tr05_simu_4_100.hdf\n /work/asr2/ryndin/crnnRegressionSpeechEnhancemenent/data/data_tr05_simu_5_100.hdf\n /work/asr2/ryndin/crnnRegressionSpeechEnhancemenent/data/data_tr05_simu_6_100.hdf\n\n :type filePath: str\n :param filePath: path to a bundle file which contains paths to HDF\n dataset files. One path per line.\n '
self._filePath = filePath
self._datasetFilesPaths = []
self._readDatasetFilesPaths()
def _readDatasetFilesPaths(self):
'Reads paths to HDF dataset files from a bundle file.'
with open(self._filePath, 'r') as bundleFile:
self._datasetFilesPaths = filter((lambda f: bool(f)), map((lambda l: l.strip()), bundleFile.readlines()))
@property
def datasetFilePaths(self):
'Paths to HDF dataset files.\n\n :rtype: list of str\n :return: Paths to HDF dataset files.\n '
return self._datasetFilesPaths
@property
def numberOfDatasetFiles(self):
'Number of HDF dataset files.\n\n :rtype: int\n :return: Number of HDF dataset files.\n '
return len(self._datasetFilesPaths)
|
class CachedDataset(Dataset):
'\n Base class for datasets with caching. This is only used for the :class:`HDFDataset`.\n Also see :class:`CachedDataset2`.\n '
def __init__(self, cache_byte_size=0, **kwargs):
'\n :param int cache_byte_size:\n '
super(CachedDataset, self).__init__(**kwargs)
self._cache_byte_size = cache_byte_size
self.cache_byte_size_total_limit = cache_byte_size
if (cache_byte_size == (- 1)):
self.cache_byte_size_limit_at_start = (1024 ** 4)
elif (cache_byte_size == 0):
self.cache_byte_size_limit_at_start = 0
else:
self.cache_byte_size_limit_at_start = max(((cache_byte_size * 2) // 3), 1)
self.cache_byte_size_total_limit = max((cache_byte_size - self.cache_byte_size_limit_at_start), 1)
self.num_seqs_cached_at_start = 0
self.cached_bytes_at_start = 0
self.nbytes = 0
self.start_cache_initialized = False
self.definite_cache_leftover = 0
self.cache_num_frames_free = 0
self.preload_set = set([])
self.preload_end = 0
self.alloc_intervals = None
self._seq_start = []
self._seq_index = []
self._seq_index_inv = {}
self._index_map = range(len(self._seq_index))
self._tag_idx = {}
self.targets = {}
self.target_keys = []
self.timestamps = None
def initialize(self):
'\n Initialization.\n '
super(CachedDataset, self).initialize()
if (self.cache_byte_size_limit_at_start > 0):
self.nbytes = (numpy.array([], dtype=numpy.float32).itemsize * (((self.num_inputs * self.window) + 1) + 1))
temp_cache_size_bytes = max(0, self.cache_byte_size_total_limit)
self.definite_cache_leftover = (temp_cache_size_bytes if (self.num_seqs_cached_at_start == self.num_seqs) else 0)
self.cache_num_frames_free = (temp_cache_size_bytes // self.nbytes)
print(('cached %i seqs' % self.num_seqs_cached_at_start), ('%s GB' % (self.cached_bytes_at_start / float(((1024 * 1024) * 1024)))), (('(fully loaded, %s GB left over)' if self.definite_cache_leftover else '(%s GB free)') % max((temp_cache_size_bytes / float(((1024 * 1024) * 1024))), 0)), file=log.v4)
def init_seq_order(self, epoch=None, seq_list=None, seq_order=None):
'\n :type epoch: int|None\n :param list[str]|None seq_list: List of sequence tags, to set a predefined order.\n :param list[int]|None seq_order: List of corpus sequence indices, to set a predefined order.\n Initialize lists:\n self.seq_index # sorted seq idx\n '
super(CachedDataset, self).init_seq_order(epoch=epoch, seq_list=seq_list, seq_order=seq_order)
if (seq_order is not None):
seq_index = seq_order
elif (seq_list is not None):
self._update_tag_idx()
seq_index = [self._tag_idx[tag] for tag in seq_list]
else:
seq_index = self.get_seq_order_for_epoch(epoch, self._num_seqs, (lambda s: self._get_seq_length_by_real_idx(s)[0]))
old_index_map = self._index_map[:]
self._index_map = range(len(seq_index))
if (isinstance(seq_index, numpy.ndarray) or isinstance(self._seq_index, numpy.ndarray)):
seq_index_unchanged = numpy.array_equal(self._seq_index, seq_index)
else:
seq_index_unchanged = (self._seq_index == seq_index)
if (seq_index_unchanged and self.start_cache_initialized):
return False
if (epoch is not None):
print(('Reinitialize dataset seq order for epoch %i.' % epoch), file=log.v4)
if ((self.cache_byte_size_limit_at_start == 0) or (self.num_seqs_cached_at_start != len(seq_index)) or (not self.start_cache_initialized)):
self._seq_index = seq_index
self._seq_index_inv = {}
self._init_seq_starts()
self._init_alloc_intervals()
self._init_start_cache()
self.start_cache_initialized = True
else:
if (not self._seq_index_inv):
self._seq_index_inv = dict(zip(self._seq_index, range(len(self._seq_index))))
self._index_map = [self._seq_index_inv[i] for i in seq_index]
if (self._index_map == old_index_map):
return False
return True
def supports_seq_order_sorting(self) -> bool:
'supports sorting'
return True
def get_current_seq_order(self):
assert (self.cache_byte_size_limit_at_start == 0)
return self._seq_index
def _get_tag_by_real_idx(self, real_idx):
raise NotImplementedError
def _update_tag_idx(self):
if self._tag_idx:
return
for i in range(self._num_seqs):
self._tag_idx[self._get_tag_by_real_idx(i)] = i
def batch_set_generator_cache_whole_epoch(self):
return True
def _init_alloc_intervals(self):
if (self.cache_byte_size_limit_at_start == 0):
return
if (self.epoch is None):
return
assert (self.num_seqs > 0)
assert (self.num_inputs > 0)
assert (self.window > 0)
self.preload_set = set([])
self.alloc_intervals = [(0, 0, numpy.zeros(([1] + self.get_data_shape('data')), dtype=self.get_data_dtype('data'))), (self.num_seqs, self.num_seqs, numpy.zeros(([1] + self.get_data_shape('data')), dtype=self.get_data_dtype('data')))]
def _init_seq_starts(self):
if (self.cache_byte_size_limit_at_start == 0):
return
self._seq_start = [(self._seq_start[0] * 0)]
for i in range(self.num_seqs):
ids = self._seq_index[i]
self._seq_start.append((self._seq_start[(- 1)] + self._get_seq_length_by_real_idx(ids)))
def _init_start_cache(self):
if (self.cache_byte_size_limit_at_start == 0):
return
if (not self.alloc_intervals):
return
if (not self.nbytes):
return
if (not self.epoch):
return
num_cached = 0
cached_bytes = 0
for i in range(self.num_seqs):
if (i == num_cached):
nbytes = (self.get_seq_length_nd(i)[0] * self.nbytes)
if (self.cache_byte_size_limit_at_start >= (cached_bytes + nbytes)):
num_cached = (i + 1)
cached_bytes += nbytes
self.num_seqs_cached_at_start = num_cached
self.cached_bytes_at_start = cached_bytes
if (num_cached > 0):
self.preload_end = num_cached
if (sys.version_info >= (3, 0)):
threading.Thread(target=self._preload_seqs, args=(0, num_cached), daemon=True).start()
else:
threading.Thread(target=self._preload_seqs, args=(0, num_cached)).start()
def load_seqs(self, start, end):
'\n Load data sequences.\n As a side effect, will modify / fill-up:\n self.alloc_intervals\n self.targets\n This does some extra logic for the cache and calls self._load_seqs()\n for the real loading.\n\n :param int start: start sorted seq idx\n :param int end: end sorted seq idx\n '
assert (start >= 0)
assert (start <= end)
if self.is_cached(start, end, blocking=True):
return
if (self.cache_byte_size_limit_at_start > 0):
self._load_seqs_with_cache(start, end)
return self.is_cached(start, end, blocking=True)
super(CachedDataset, self).load_seqs(start, end)
def _load_seqs(self, start, end):
raise NotImplementedError
def _load_seqs_with_cache(self, start, end, clear=True):
if (not clear):
num_needed_cache_frames = (self.get_seq_start(end)[0] - self.get_seq_start(start)[0])
if (self.cache_num_frames_free < num_needed_cache_frames):
self.cache_num_frames_free += self.delete((num_needed_cache_frames - self.cache_num_frames_free))
gc.collect()
self.cache_num_frames_free -= num_needed_cache_frames
threading.Thread(target=self._preload_seqs, args=(start, end)).start()
else:
self.cache_num_frames_free += self.delete(None)
gc.collect()
while (end < self.num_seqs):
num_needed_cache_frames = self.get_seq_length_nd(end)[0]
if ((self.cache_num_frames_free - num_needed_cache_frames) < 0):
break
self.cache_num_frames_free -= num_needed_cache_frames
end += 1
self.preload_end = end
threading.Thread(target=self._preload_seqs, args=(start, end)).start()
def _preload_seqs(self, start, end):
print('Preloading cache from', start, 'to', end, file=log.v4)
super(CachedDataset, self).load_seqs(start, end)
self.preload_end = self.num_seqs_cached_at_start
def _shuffle_frames_in_seqs(self, start, end):
'\n :type start: int\n :type end: int\n '
assert (start < end)
assert self.is_cached(start, end)
alloc_idx = self.alloc_interval_index(start)
(alloc_start, alloc_end, alloc_data) = self.alloc_intervals[alloc_idx]
assert (start >= alloc_start)
assert (end <= alloc_end)
rnd = numpy.random.RandomState(start)
num_frames = (self._seq_start[end][0] - self._seq_start[start][0])
assert (num_frames > 0)
perm = rnd.permutation(num_frames)
alloc_offset = (self._seq_start[start][0] - self._seq_start[alloc_start][0])
assert ((alloc_offset + num_frames) <= alloc_data.shape[0])
data = alloc_data[alloc_offset:(alloc_offset + num_frames)]
alloc_data[alloc_offset:(alloc_offset + num_frames)] = data[perm]
for k in self.targets:
idx = (self.target_keys.index(k) + 1)
targets = self.targets[k][self._seq_start[idx]:(self._seq_start[start][idx] + num_frames)]
self.targets[k][self._seq_start[start][idx]:((self._seq_start[start][idx] + self._seq_start[end][idx]) - self._seq_start[start][idx])] = targets[perm]
def _set_alloc_intervals_data(self, idc, data):
'\n :param int idc: index of sorted seq idx\n :param numpy.ndarray data: raw data\n '
idi = self.alloc_interval_index(idc)
assert (idi >= 0)
o = (self._seq_start[idc][0] - self._seq_start[self.alloc_intervals[idi][0]][0])
l = data.shape[0]
x = data
if (self.window > 1):
x = self._sliding_window(x)
self.alloc_intervals[idi][2][o:(o + l)] = x
def alloc_interval_index(self, ids):
'\n :param int ids: sorted seq idx\n :return index in self.alloc_intervals\n :rtype: int\n '
s = 0
e = len(self.alloc_intervals)
while (s < e):
i = ((s + e) // 2)
(alloc_start, alloc_end, _) = self.alloc_intervals[i]
if (alloc_start <= ids < alloc_end):
return i
elif ((alloc_start <= ids) and (ids >= alloc_end)):
if (s == i):
return (- 1)
s = i
elif (alloc_start > ids):
if (e == i):
return (- 1)
e = i
else:
assert False
return (- 1)
def _insert_alloc_interval(self, pos, value, merge=False):
'\n Insert np.zeros into self.alloc_intervals.\n :param int pos: idx in self.alloc_intervals\n :param (int,int) value: (start,end) like in load_seqs(), sorted seq idx\n :rtype: int\n '
if (value[0] == value[1]):
return 0
ci = self.alloc_intervals[pos][1]
ni = self.alloc_intervals[(pos + 1)][0]
xc = self.alloc_intervals[pos][2]
xn = self.alloc_intervals[(pos + 1)][2]
if ((value[0] == ci) and (value[1] == ni) and merge):
nj = self.alloc_intervals[pos][0]
nk = self.alloc_intervals[(pos + 1)][1]
del self.alloc_intervals[pos]
del self.alloc_intervals[pos]
self.alloc_intervals.insert(pos, (nj, nk, numpy.concatenate([xc, numpy.zeros(([self._seq_start[ni][0]] + self.get_data_shape('data')), dtype=self.get_data_dtype('data')), xn])))
return 0
elif ((value[0] == ci) and merge):
nj = self.alloc_intervals[pos][0]
del self.alloc_intervals[pos]
self.alloc_intervals.insert(pos, (nj, value[1], numpy.concatenate([xc, numpy.zeros(([(self._seq_start[value[1]][0] - self._seq_start[ci][0])] + self.get_data_shape('data')), dtype=self.get_data_dtype('data'))])))
return 0
elif ((value[1] == ni) and merge):
nk = self.alloc_intervals[(pos + 1)][1]
del self.alloc_intervals[(pos + 1)]
self.alloc_intervals.insert((pos + 1), (value[0], nk, numpy.concatenate([numpy.zeros(([(self._seq_start[ni][0] - self._seq_start[value[0]][0])] + self.get_data_shape('data')), dtype=self.get_data_dtype('data')), xc])))
return 0
else:
self.alloc_intervals.insert((pos + 1), (value + (numpy.zeros(([(self._seq_start[value[1]][0] - self._seq_start[value[0]][0])] + self.get_data_shape('data')), dtype=self.get_data_dtype('data')),)))
return 1
def _remove_alloc_interval(self, pos, value):
'\n Remove data from self.alloc_intervals.\n :param int pos: idx in self.alloc_intervals\n :param (int,int) value: (start,end) like in load_seqs(), sorted seq idx\n :rtype: int\n '
(ci, ni, xi) = self.alloc_intervals[pos]
if ((value[0] == ci) and (value[1] == ni)):
del self.alloc_intervals[pos]
return (- 1)
elif (value[0] == ci):
self.alloc_intervals.insert(pos, (value[1], ni, xi[(self._seq_start[value[1]][0] - self._seq_start[ci][0]):]))
del self.alloc_intervals[(pos + 1)]
return 0
elif (value[1] == ni):
self.alloc_intervals.insert(pos, (ci, value[0], xi[:(self._seq_start[value[0]][0] - self._seq_start[ci][0])]))
del self.alloc_intervals[(pos + 1)]
return 0
else:
self.alloc_intervals.insert(pos, (value[1], ni, xi[(self._seq_start[value[1]][0] - self._seq_start[ci][0]):]))
self.alloc_intervals.insert(pos, (ci, value[0], xi[:(self._seq_start[value[0]][0] - self._seq_start[ci][0])]))
del self.alloc_intervals[(pos + 2)]
return 1
def _modify_alloc_intervals(self, start, end, invert):
'\n Inserts/removes sorted seq idx range (start,end).\n :param int start: like in load_seqs(), sorted seq idx\n :param int end: like in load_seqs(), sorted seq idx\n :param bool invert: True->insert, False->remove\n :rtype: list[int]\n :return selection list, modified sorted seq idx in self.alloc_intervals\n '
if (end is None):
end = (start + 1)
if (start == end):
return
assert (start < end)
i = 0
selection = []
modify = (self._insert_alloc_interval if invert else self._remove_alloc_interval)
while (i < (len(self.alloc_intervals) - invert)):
ni = self.alloc_intervals[(i + invert)][(1 - invert)]
ci = self.alloc_intervals[i][invert]
assert (ci <= ni)
flag = ((ci <= start < ni), (ci < end <= ni), (((ci < start) and (ni <= start)) or ((ci >= end) and (ni > end))))
if ((not flag[0]) and (not flag[1])):
if (not flag[2]):
selection.extend(range(ci, ni))
i += modify(i, (ci, ni))
elif flag[1]:
v = ((start if flag[0] else ci), end)
selection.extend(range(v[0], v[1]))
i += modify(i, v)
break
elif flag[0]:
selection.extend(range(start, ni))
i += modify(i, (start, ni))
i += 1
if (self.alloc_intervals[0][0] != 0):
self.alloc_intervals.insert(0, (0, 0, numpy.zeros(([1] + self.get_data_shape('data')), dtype=self.get_data_dtype('data'))))
if (self.alloc_intervals[(- 1)][1] != self.num_seqs):
self.alloc_intervals.append((self.num_seqs, self.num_seqs, numpy.zeros(([1] + self.get_data_shape('data')), dtype=self.get_data_dtype('data'))))
return selection
def insert_alloc_interval(self, start, end=None):
return self._modify_alloc_intervals(start, end, True)
def remove_alloc_interval(self, start, end=None):
return self._modify_alloc_intervals(start, end, False)
def delete(self, nframes):
'\n :param int|None nframes: how much frames to delete max.\n Note that this limit is not strict. We can end up\n deleting more than nframes.\n :return: number of frames deleted\n :rtype: int\n '
if (nframes is not None):
if (nframes == 0):
return 0
assert (nframes > 0)
deleted = 0
i = 0
while (((not nframes) or (deleted < nframes)) and (i < len(self.alloc_intervals))):
ai = self.alloc_intervals[i]
if ((ai[1] > self.num_seqs_cached_at_start) and (ai[0] < ai[1])):
removed = self.remove_alloc_interval(max(ai[0], self.num_seqs_cached_at_start), ai[1])
self.preload_set -= set(removed)
deleted += sum([self._get_seq_length_by_real_idx(self._seq_index[i])[0] for i in removed])
else:
i += 1
return deleted
@property
def num_seqs(self):
return len(self._index_map)
def is_cached(self, start, end, blocking=False):
'\n :param int start: like in load_seqs(), sorted seq idx\n :param int end: like in load_seqs(), sorted seq idx\n :rtype: bool\n :returns whether we have the full range (start,end) of sorted seq idx\n cached in self.alloc_intervals (end is exclusive).\n '
if (self.cache_byte_size_total_limit == 0):
return False
if (start == end):
return True
assert (start < end)
if (blocking and (end <= self.preload_end)):
while (not (set(range(start, end)) <= self.preload_set)):
time.sleep(0.2)
return True
return (set(range(start, end)) <= self.preload_set)
def _get_seq_length_by_real_idx(self, real_seq_idx):
"\n :param int real_seq_idx:\n :returns length of the sequence with index 'real_seq_idx'\n :rtype: numpy.ndarray\n "
raise NotImplementedError
def get_seq_length_nd(self, sorted_seq_idx):
'\n :type sorted_seq_idx: int\n :rtype: numpy.ndarray\n '
real_seq_idx = self._seq_index[self._index_map[sorted_seq_idx]]
return self._get_seq_length_by_real_idx(real_seq_idx)
def get_seq_length(self, seq_idx):
'\n :rtype: NumbersDict\n '
lengths = self.get_seq_length_nd(seq_idx)
d = {}
first_target_idx = 0
if (self.num_inputs > 0):
d['data'] = lengths[0]
first_target_idx = 1
for (k, l) in zip(self.target_keys, lengths[first_target_idx:]):
d[k] = l
return NumbersDict(d)
def get_seq_start(self, sorted_seq_idx):
'\n :type sorted_seq_idx: int\n :rtype: (int,int)\n '
return self._seq_start[sorted_seq_idx]
def get_times(self, sorted_seq_idx):
seq_start = self.get_seq_start(sorted_seq_idx)[0]
seq_len = self.get_seq_length_nd(sorted_seq_idx)[0]
return self.timestamps[seq_start:(seq_start + seq_len)]
def get_input_data(self, sorted_seq_idx):
seq_idx = self._index_map[sorted_seq_idx]
idi = self.alloc_interval_index(seq_idx)
assert (idi >= 0), ('failed to get data for seq %i' % sorted_seq_idx)
(alloc_start_seq, alloc_end_seq, alloc_data) = self.alloc_intervals[idi]
o = (self.get_seq_start(seq_idx)[0] - self.get_seq_start(alloc_start_seq)[0])
assert (o >= 0)
l = self.get_seq_length_nd(sorted_seq_idx)[0]
assert (alloc_data.shape[0] >= (o + l))
return alloc_data[o:(o + l)]
def get_data_dim(self, key):
if ((key == 'data') and (self.num_inputs > 0)):
return (self.num_inputs * self.window)
return self.num_outputs[key][0]
def get_targets(self, target, sorted_seq_idx):
seq_idx = self._index_map[sorted_seq_idx]
idx = (self.target_keys.index(target) + 1)
seq_start = self.get_seq_start(seq_idx)[idx]
seq_len = self.get_seq_length_nd(sorted_seq_idx)[idx]
return self.targets[target][seq_start:(seq_start + seq_len)]
def get_target_list(self):
return list(self.targets.keys())
def get_tag(self, sorted_seq_idx):
raise NotImplementedError
def have_corpus_seq_idx(self):
return True
def get_corpus_seq_idx(self, seq_idx):
'\n :param int seq_idx: sorted sequence index from the current epoch, depending on seq_ordering\n :return: the sequence index as-is in the original corpus. only defined if self.have_corpus_seq_idx()\n :rtype: int\n '
return self._seq_index[self._index_map[seq_idx]]
|
class CachedDataset2(Dataset):
'\n Somewhat like CachedDataset, but different.\n Simpler in some sense. And more generic. Caching might be worse.\n\n If you derive from this class:\n - you must override `_collect_single_seq`\n - you must set `num_inputs` (dense-dim of "data" key) and `num_outputs` (dict key -> dim, ndim-1)\n - you should set `labels`\n - handle seq ordering by overriding `init_seq_order`\n - you can set `_estimated_num_seqs`\n - you can set `_num_seqs` or `_num_timesteps` if you know them in advance\n '
def __init__(self, **kwargs):
super(CachedDataset2, self).__init__(**kwargs)
self._num_timesteps = None
self.epoch = None
self.reached_final_seq = False
self.added_data = []
self.expected_load_seq_start = 0
self._num_timesteps_accumulated = 0
def init_seq_order(self, epoch=None, seq_list=None, seq_order=None):
'\n :param int|None epoch:\n :param list[str]|None seq_list: List of sequence tags, to set a predefined order.\n :param list[int]|None seq_order: List of corpus sequence indices, to set a predefined order. Only possible\n if the dataset has such indices (see self.have_corpus_seq_idx()).\n :rtype: bool\n :returns whether the order changed (True is always safe to return)\n\n This is called when we start a new epoch, or at initialization.\n Call this when you reset the seq list.\n '
super(CachedDataset2, self).init_seq_order(epoch=epoch, seq_list=seq_list, seq_order=seq_order)
self.expected_load_seq_start = 0
self.reached_final_seq = False
self.added_data = []
self._num_timesteps_accumulated = 0
self._num_seqs = None
self.epoch = epoch
return True
def _cleanup_old_seqs(self, seq_idx_end):
'\n :param int seq_idx_end:\n '
i = 0
while (i < len(self.added_data)):
if (self.added_data[i].seq_idx >= seq_idx_end):
break
i += 1
del self.added_data[:i]
def _get_seq(self, seq_idx):
'\n :param int seq_idx:\n :rtype: DatasetSeq|None\n '
for data in self.added_data:
if (data.seq_idx == seq_idx):
return data
return None
def is_cached(self, start, end):
'\n :param int start:\n :param int end:\n :rtype: bool\n '
return False
@property
def num_seqs(self):
'\n :rtype: int\n '
if (self._num_seqs is not None):
return self._num_seqs
if (self.epoch is None):
return 0
raise NotImplementedError
def _load_seqs(self, start, end):
'\n :param int start: inclusive seq idx start\n :param int end: exclusive seq idx end. can be more than num_seqs\n If end > num_seqs, will not load them.\n '
assert (start >= self.expected_load_seq_start)
if (start > self.expected_load_seq_start):
self._cleanup_old_seqs(start)
self.expected_load_seq_start = start
if self.added_data:
start = max((self.added_data[(- 1)].seq_idx + 1), start)
seqs = [self._collect_single_seq(seq_idx=seq_idx) for seq_idx in range(start, end)]
seqs = list(filter(None, seqs))
self._num_timesteps_accumulated += sum([seq.num_frames for seq in seqs])
self.added_data += seqs
def is_less_than_num_seqs(self, n):
'\n :param int n:\n :rtype: int\n '
if (n < self.expected_load_seq_start):
return True
if (self.epoch is None):
return False
try:
return super(CachedDataset2, self).is_less_than_num_seqs(n)
except Exception:
assert (n >= self.expected_load_seq_start)
self._load_seqs(self.expected_load_seq_start, (n + 1))
if (self._get_seq(n) is not None):
return True
assert self.added_data, 'Not a single seq was loaded?'
self._num_seqs = (self.added_data[(- 1)].seq_idx + 1)
assert (n >= self._num_seqs)
self.reached_final_seq = True
return False
def _collect_single_seq(self, seq_idx: int) -> Optional[DatasetSeq]:
'\n :param seq_idx:\n :returns DatasetSeq or None if seq_idx >= num_seqs.\n '
raise NotImplementedError
def get_num_timesteps(self):
'\n :rtype: int\n '
if (self._num_timesteps is not None):
return self._num_timesteps
else:
assert self.reached_final_seq
return self._num_timesteps_accumulated
def _load_something(self):
if self.added_data:
return
self.load_seqs(self.expected_load_seq_start, (self.expected_load_seq_start + 1))
def get_seq_length(self, sorted_seq_idx):
'\n :type sorted_seq_idx: int\n :rtype: returnn.util.NumbersDict\n '
assert (sorted_seq_idx >= self.expected_load_seq_start)
self.load_seqs(self.expected_load_seq_start, (sorted_seq_idx + 1))
return self._get_seq(sorted_seq_idx).num_frames
def get_data(self, seq_idx, key):
'\n :param int seq_idx:\n :param str key:\n :rtype: numpy.ndarray\n '
return self._get_seq(seq_idx).features[key]
def get_input_data(self, seq_idx):
'\n :param int seq_idx:\n :rtype: numpy.ndarray\n '
return self.get_data(seq_idx, 'data')
def get_targets(self, target, seq_idx):
'\n :param str target:\n :param int seq_idx:\n :rtype: numpy.ndarray\n '
return self.get_data(seq_idx, target)
def get_tag(self, sorted_seq_idx):
'\n :param int sorted_seq_idx:\n :rtype: str\n '
self.load_seqs(self.expected_load_seq_start, (sorted_seq_idx + 1))
return self._get_seq(sorted_seq_idx).seq_tag
def get_data_keys(self):
'\n :rtype: list[str]\n '
self._load_something()
return sorted(self.added_data[0].get_data_keys())
def get_target_list(self):
'\n Target data keys are usually not available during inference.\n Overwrite this if your dataset is more custom.\n '
keys = list(self.get_data_keys())
if ('data' in keys):
keys.remove('data')
return keys
def is_data_sparse(self, key):
'\n :param str key: e.g. "data" or "classes"\n :rtype: bool\n '
if (key in self.num_outputs):
return (self.num_outputs[key][1] == 1)
self._load_something()
return (len(self.added_data[0].features[key].shape) == 1)
def get_data_dim(self, key):
'\n :param str key: e.g. "data" or "classes"\n :rtype: int\n :return: number of classes, no matter if sparse or not\n '
if (key in self.num_outputs):
d = self.num_outputs[key][0]
if (self.added_data and (not self.is_data_sparse(key))):
assert (self.added_data[0].get_data(key).shape[1] == d)
return d
self._load_something()
if (len(self.added_data[0].get_data(key).shape) == 1):
return super(CachedDataset2, self).get_data_dim(key)
assert (len(self.added_data[0].get_data(key).shape) == 2)
return self.added_data[0].get_data(key).shape[1]
def get_data_dtype(self, key):
'\n :param str key:\n :rtype: str\n '
self._load_something()
return str(self.added_data[0].get_data(key).dtype)
|
class SingleStreamPipeDataset(CachedDataset2):
'\n Producer: Gets data from somewhere / an external source, running in some thread.\n Consumer: The thread / code which calls load_seqs and get_data here.\n '
def __init__(self, dim, ndim, sparse=False, dtype='float32'):
'\n :param int dim:\n :param int ndim:\n :param bool sparse:\n :param str dtype:\n '
super(SingleStreamPipeDataset, self).__init__()
self.num_inputs = dim
self.num_outputs = {'data': [dim, ndim]}
self.sparse = sparse
self.dtype = dtype
self.condition = Condition()
self.producer_seq_idx = 0
self.producer_data = []
self.producer_finished = False
def is_data_sparse(self, key):
'\n :param str key:\n :rtype: bool\n '
return self.sparse
def get_data_dtype(self, key):
'\n :param str key:\n :rtype: str\n '
return self.dtype
def init_seq_order(self, epoch=None, seq_list=None, seq_order=None):
'\n :param int epoch:\n :param list[str]|None seq_list:\n :param list[int]|None seq_order:\n :rtype: bool\n '
assert ((not seq_list) and (not seq_order))
super(SingleStreamPipeDataset, self).init_seq_order(epoch=epoch)
with self.condition:
self.producer_seq_idx = 0
self.producer_data.clear()
self.producer_finished = False
return True
def producer_add_data(self, data, seq_tag=None):
'\n :param numpy.ndarray data:\n :param str|None seq_tag:\n '
with self.condition:
if (seq_tag is None):
seq_tag = ('seq-%i' % self.producer_seq_idx)
seq = DatasetSeq(features=data, seq_idx=self.producer_seq_idx, seq_tag=seq_tag)
self.producer_seq_idx += 1
self.producer_data.append(seq)
self.condition.notify()
def producer_set_finished(self):
'\n Mark finished.\n '
with self.condition:
self.producer_finished = True
self.condition.notify()
def _collect_single_seq(self, seq_idx):
'\n :type seq_idx: int\n :rtype: DatasetSeq | None\n :returns DatasetSeq or None if seq_idx >= num_seqs.\n '
with self.condition:
while True:
if self.producer_data:
seq = self.producer_data.pop(0)
assert isinstance(seq, DatasetSeq)
assert (seq.seq_idx == seq_idx)
return seq
if self.producer_finished:
return None
self.condition.wait()
|
class LmDataset(CachedDataset2):
'\n Dataset useful for language modeling.\n It creates index sequences for either words, characters or other orthographics symbols based on a vocabulary.\n Can also perform internal word to phoneme conversion with a lexicon file.\n Reads simple txt files or bliss xml files (also gzipped).\n '
def __init__(self, corpus_file, skip_empty_lines=True, orth_symbols_file=None, orth_symbols_map_file=None, orth_replace_map_file=None, word_based=False, word_end_symbol=None, seq_end_symbol='[END]', unknown_symbol='[UNKNOWN]', parse_orth_opts=None, phone_info=None, add_random_phone_seqs=0, auto_replace_unknown_symbol=False, log_auto_replace_unknown_symbols=10, log_skipped_seqs=10, error_on_invalid_seq=True, add_delayed_seq_data=False, delayed_seq_data_start_symbol='[START]', **kwargs):
'\n To use the LmDataset with words or characters,\n either ``orth_symbols_file`` or ``orth_symbols_map_file``\n has to be\n specified (both is not possible).\n If words should be used, set ``word_based`` to True.\n\n The LmDatasets also support the conversion of words to phonemes with the help of the\n :class:`LmDataset.PhoneSeqGenerator` class. To enable this mode, the input parameters to\n :class:`LmDataset.PhoneSeqGenerator` have to be provided as dict in ``phone_info``.\n As a lexicon file has to specified in this dict, ``orth_symbols_file`` and ``orth_symbols_map_file``\n are not used in this case.\n\n The LmDataset does not work without providing a vocabulary with any of the above mentioned ways.\n\n After initialization, the corpus is represented by self.orths (as a list of sequences).\n The vocabulary is given by self.orth_symbols and self.orth_symbols_map gives the corresponding\n mapping from symbol to integer index (in case ``phone_info`` is not set).\n\n :param str|()->str|list[str]|()->list[str] corpus_file: Bliss XML or line-based txt. optionally can be gzip.\n :param bool skip_empty_lines: for line-based txt\n :param str|()->str|None orth_symbols_file: a text file containing a list of orthography symbols\n :param str|()->str|None orth_symbols_map_file: either a list of orth symbols, each line: "<symbol> <index>",\n a python dict with {"<symbol>": <index>, ...}\n or a pickled dictionary\n :param str|()->str|None orth_replace_map_file: JSON file with replacement dict for orth symbols.\n :param bool word_based: whether to parse single words, or otherwise will be character based.\n :param str|None word_end_symbol: If provided and if word_based is False (character based modeling),\n token to be used to represent word ends.\n :param str|None seq_end_symbol: what to add at the end, if given.\n will be set as postfix=[seq_end_symbol] or postfix=[] for parse_orth_opts.\n :param str|None unknown_symbol: token to represent unknown words.\n :param dict[str]|None parse_orth_opts: kwargs for parse_orthography().\n :param dict|None phone_info: A dict containing parameters including a lexicon file for\n :class:`LmDataset.PhoneSeqGenerator`.\n :param int add_random_phone_seqs: will add random seqs with the same len as the real seq as additional data.\n :param bool|int log_auto_replace_unknown_symbols: write about auto-replacements with unknown symbol.\n if this is an int, it will only log the first N replacements, and then keep quiet.\n :param bool|int log_skipped_seqs: write about skipped seqs to logging, due to missing lexicon entry or so.\n if this is an int, it will only log the first N entries, and then keep quiet.\n :param bool error_on_invalid_seq: if there is a seq we would have to skip, error.\n :param bool add_delayed_seq_data: will add another data-key "delayed" which will have the sequence.\n delayed_seq_data_start_symbol + original_sequence[:-1].\n :param str delayed_seq_data_start_symbol: used for add_delayed_seq_data.\n '
super(LmDataset, self).__init__(**kwargs)
if callable(corpus_file):
corpus_file = corpus_file()
if callable(orth_symbols_file):
orth_symbols_file = orth_symbols_file()
if callable(orth_symbols_map_file):
orth_symbols_map_file = orth_symbols_map_file()
if callable(orth_replace_map_file):
orth_replace_map_file = orth_replace_map_file()
print('LmDataset, loading file', corpus_file, file=log.v4)
self.word_based = word_based
self.word_end_symbol = word_end_symbol
self.seq_end_symbol = seq_end_symbol
self.unknown_symbol = unknown_symbol
self.parse_orth_opts = (parse_orth_opts or {})
self.parse_orth_opts.setdefault('word_based', self.word_based)
if (self.word_end_symbol and (not self.word_based)):
self.parse_orth_opts.setdefault('postfix', ([self.word_end_symbol, self.seq_end_symbol] if (self.seq_end_symbol is not None) else [self.word_end_symbol]))
else:
self.parse_orth_opts.setdefault('postfix', ([self.seq_end_symbol] if (self.seq_end_symbol is not None) else []))
if orth_symbols_file:
assert (not phone_info)
assert (not orth_symbols_map_file)
orth_symbols = open(orth_symbols_file).read().splitlines()
self.orth_symbols_map = {sym: i for (i, sym) in enumerate(orth_symbols)}
self.orth_symbols = orth_symbols
self.labels['data'] = orth_symbols
self.seq_gen = None
if (orth_symbols_map_file and orth_symbols_map_file.endswith('.pkl')):
import pickle
with open(orth_symbols_map_file, 'rb') as f:
self.orth_symbols_map = pickle.load(f)
self.orth_symbols = self.orth_symbols_map.keys()
reverse_map = {i: sym for (sym, i) in sorted(self.orth_symbols_map.items())}
self.labels['data'] = [sym for (i, sym) in sorted(reverse_map.items())]
self.seq_gen = None
elif orth_symbols_map_file:
assert (not phone_info)
with open(orth_symbols_map_file, 'r') as f:
test_string = f.read(1024).replace(' ', '').replace('\n', '')
match = re.search('^{["\'].+["\']:[0-9]+,', test_string)
f.seek(0)
if (match is not None):
d = literal_eval(f.read())
orth_symbols_imap_list = [(int(v), k) for (k, v) in d.items()]
orth_symbols_imap_list.sort()
else:
orth_symbols_imap_list = [(int(b), a) for (a, b) in [line.split(None, 1) for line in f.read().splitlines()]]
orth_symbols_imap_list.sort()
assert (orth_symbols_imap_list[0][0] == 0)
self.orth_symbols_map = {sym: i for (i, sym) in orth_symbols_imap_list}
self.orth_symbols = [sym for (i, sym) in orth_symbols_imap_list]
reverse_map = {i: sym for (i, sym) in orth_symbols_imap_list}
self.labels['data'] = [sym for (i, sym) in sorted(reverse_map.items())]
self.seq_gen = None
else:
assert (not orth_symbols_file)
assert isinstance(phone_info, dict)
self.seq_gen = PhoneSeqGenerator(**phone_info)
self.orth_symbols = None
self.labels['data'] = self.seq_gen.get_class_labels()
if orth_replace_map_file:
orth_replace_map = load_json(filename=orth_replace_map_file)
assert isinstance(orth_replace_map, dict)
self.orth_replace_map = {key: parse_orthography_into_symbols(v, word_based=self.word_based) for (key, v) in orth_replace_map.items()}
if self.orth_replace_map:
if (len(self.orth_replace_map) <= 5):
print((' orth_replace_map: %r' % self.orth_replace_map), file=log.v5)
else:
print((' orth_replace_map: %i entries' % len(self.orth_replace_map)), file=log.v5)
else:
self.orth_replace_map = {}
if (word_end_symbol and (not word_based)):
self.orth_replace_map[' '] = [word_end_symbol]
num_labels = len(self.labels['data'])
use_uint_types = False
if BackendEngine.is_tensorflow_selected():
use_uint_types = True
if (num_labels <= (2 ** 7)):
self.dtype = 'int8'
elif ((num_labels <= (2 ** 8)) and use_uint_types):
self.dtype = 'uint8'
elif (num_labels <= (2 ** 31)):
self.dtype = 'int32'
elif ((num_labels <= (2 ** 32)) and use_uint_types):
self.dtype = 'uint32'
elif (num_labels <= (2 ** 61)):
self.dtype = 'int64'
elif ((num_labels <= (2 ** 62)) and use_uint_types):
self.dtype = 'uint64'
else:
raise Exception(('cannot handle so much labels: %i' % num_labels))
self.num_outputs = {'data': [num_labels, 1]}
self.num_inputs = num_labels
self.seq_order = None
self._tag_prefix = 'line-'
self.auto_replace_unknown_symbol = auto_replace_unknown_symbol
self.log_auto_replace_unknown_symbols = log_auto_replace_unknown_symbols
self.log_skipped_seqs = log_skipped_seqs
self.error_on_invalid_seq = error_on_invalid_seq
self.add_random_phone_seqs = add_random_phone_seqs
for i in range(add_random_phone_seqs):
self.num_outputs[('random%i' % i)] = self.num_outputs['data']
self.add_delayed_seq_data = add_delayed_seq_data
self.delayed_seq_data_start_symbol = delayed_seq_data_start_symbol
if add_delayed_seq_data:
self.num_outputs['delayed'] = self.num_outputs['data']
self.labels['delayed'] = self.labels['data']
if isinstance(corpus_file, list):
self.orths = []
for file_name in corpus_file:
self.orths += read_corpus(file_name, skip_empty_lines=skip_empty_lines)
else:
self.orths = read_corpus(corpus_file, skip_empty_lines=skip_empty_lines)
self._estimated_num_seqs = (len(self.orths) // self.partition_epoch)
print((' done, loaded %i sequences' % len(self.orths)), file=log.v4)
self.next_orth_idx = 0
self.next_seq_idx = 0
self.num_skipped = 0
self.num_unknown = 0
def get_data_keys(self):
'\n :rtype: list[str]\n '
return sorted(self.num_outputs.keys())
def get_target_list(self):
'\n Unfortunately, the logic is swapped around for this dataset.\n "data" is the original data, which is usually the target,\n and you would use "delayed" as inputs.\n\n :rtype: list[str]\n '
return ['data']
def get_data_dtype(self, key):
'\n :param str key:\n :rtype: str\n '
return self.dtype
def init_seq_order(self, epoch=None, seq_list=None, seq_order=None):
'\n If random_shuffle_epoch1, for epoch 1 with "random" ordering, we leave the given order as is.\n Otherwise, this is mostly the default behavior.\n\n :param int|None epoch:\n :param list[str]|None seq_list: List of sequence tags, to set a predefined order.\n :param list[int]|None seq_order: List of corpus sequence indices, to set a predefined order.\n :rtype: bool\n :returns whether the order changed (True is always safe to return)\n '
if (seq_list and (not self.error_on_invalid_seq)):
print('Setting error_on_invalid_seq to True since a seq_list is given. Please activate auto_replace_unknown_symbol if you want to prevent invalid sequences!', file=log.v4)
self.error_on_invalid_seq = True
super(LmDataset, self).init_seq_order(epoch=epoch, seq_list=seq_list, seq_order=seq_order)
if (seq_order is not None):
self.seq_order = seq_order
elif (seq_list is not None):
self.seq_order = [int(s[len(self._tag_prefix):]) for s in seq_list]
else:
self.seq_order = self.get_seq_order_for_epoch(epoch=epoch, num_seqs=len(self.orths), get_seq_len=(lambda i: len(self.orths[i])))
self.next_orth_idx = 0
self.next_seq_idx = 0
self.num_skipped = 0
self.num_unknown = 0
if self.seq_gen:
self.seq_gen.random_seed(self._get_random_seed_for_epoch(epoch))
return True
def supports_seq_order_sorting(self) -> bool:
'supports sorting'
return True
def get_total_num_seqs(self) -> int:
'total num seqs'
return len(self.orths)
def _reduce_log_skipped_seqs(self):
if isinstance(self.log_skipped_seqs, bool):
return
assert isinstance(self.log_skipped_seqs, int)
assert (self.log_skipped_seqs >= 1)
self.log_skipped_seqs -= 1
if (not self.log_skipped_seqs):
print('LmDataset: will stop logging about skipped sequences now', file=log.v4)
def _reduce_log_auto_replace_unknown_symbols(self):
if isinstance(self.log_auto_replace_unknown_symbols, bool):
return
assert isinstance(self.log_auto_replace_unknown_symbols, int)
assert (self.log_auto_replace_unknown_symbols >= 1)
self.log_auto_replace_unknown_symbols -= 1
if (not self.log_auto_replace_unknown_symbols):
print('LmDataset: will stop logging about auto-replace with unknown symbol now', file=log.v4)
def _collect_single_seq(self, seq_idx):
'\n :type seq_idx: int\n :rtype: DatasetSeq | None\n :returns DatasetSeq or None if seq_idx >= num_seqs.\n '
while True:
if (self.next_orth_idx >= len(self.seq_order)):
assert (self.next_seq_idx <= seq_idx), 'We expect that we iterate through all seqs.'
if (self.num_skipped > 0):
print(('LmDataset: reached end, skipped %i sequences' % self.num_skipped))
return None
assert (self.next_seq_idx == seq_idx), 'We expect that we iterate through all seqs.'
true_idx = self.seq_order[self.next_orth_idx]
orth = self.orths[true_idx]
seq_tag = (self._tag_prefix + str(true_idx))
self.next_orth_idx += 1
if (orth == '</s>'):
continue
if self.seq_gen:
try:
phones = self.seq_gen.generate_seq(orth)
except KeyError as e:
if self.log_skipped_seqs:
print(('LmDataset: skipping sequence %r because of missing lexicon entry: %s' % (orth, e)), file=log.v4)
self._reduce_log_skipped_seqs()
if self.error_on_invalid_seq:
raise Exception(('LmDataset: invalid seq %r, missing lexicon entry %r' % (orth, e)))
self.num_skipped += 1
continue
data = self.seq_gen.seq_to_class_idxs(phones, dtype=self.dtype)
elif self.orth_symbols:
orth_syms = parse_orthography(orth, **self.parse_orth_opts)
while True:
orth_syms = sum([self.orth_replace_map.get(s, [s]) for s in orth_syms], [])
i = 0
space_symbol = (self.word_end_symbol if (self.word_end_symbol and (not self.word_based)) else ' ')
while (i < (len(orth_syms) - 1)):
if (orth_syms[i:(i + 2)] == [space_symbol, space_symbol]):
orth_syms[i:(i + 2)] = [space_symbol]
else:
i += 1
if self.auto_replace_unknown_symbol:
try:
list(map(self.orth_symbols_map.__getitem__, orth_syms))
except KeyError as e:
if (sys.version_info >= (3, 0)):
orth_sym = e.args[0]
else:
orth_sym = e.message
if self.log_auto_replace_unknown_symbols:
print(('LmDataset: unknown orth symbol %r, adding to orth_replace_map as %r' % (orth_sym, self.unknown_symbol)), file=log.v3)
self._reduce_log_auto_replace_unknown_symbols()
self.orth_replace_map[orth_sym] = ([self.unknown_symbol] if (self.unknown_symbol is not None) else [])
continue
break
self.num_unknown += orth_syms.count(self.unknown_symbol)
if self.word_based:
orth_debug_str = repr(orth_syms)
else:
orth_debug_str = repr(''.join(orth_syms))
try:
data = numpy.array(list(map(self.orth_symbols_map.__getitem__, orth_syms)), dtype=self.dtype)
except KeyError as e:
if self.log_skipped_seqs:
print(('LmDataset: skipping sequence %s because of missing orth symbol: %s' % (orth_debug_str, e)), file=log.v4)
self._reduce_log_skipped_seqs()
if self.error_on_invalid_seq:
raise Exception(('LmDataset: invalid seq %s, missing orth symbol %s' % (orth_debug_str, e)))
self.num_skipped += 1
continue
else:
assert False
targets = {}
for i in range(self.add_random_phone_seqs):
assert self.seq_gen
phones = self.seq_gen.generate_garbage_seq(target_len=data.shape[0])
targets[('random%i' % i)] = self.seq_gen.seq_to_class_idxs(phones, dtype=self.dtype)
if self.add_delayed_seq_data:
targets['delayed'] = numpy.concatenate(([self.orth_symbols_map[self.delayed_seq_data_start_symbol]], data[:(- 1)])).astype(self.dtype)
assert (targets['delayed'].shape == data.shape)
self.next_seq_idx = (seq_idx + 1)
return DatasetSeq(seq_idx=seq_idx, features=data, targets=targets, seq_tag=seq_tag)
|
def _is_bliss(filename):
'\n :param str filename:\n :rtype: bool\n '
try:
corpus_file = open(filename, 'rb')
if filename.endswith('.gz'):
corpus_file = gzip.GzipFile(fileobj=corpus_file)
context = iter(ElementTree.iterparse(corpus_file, events=('start', 'end')))
(_, root) = next(context)
assert isinstance(root, ElementTree.Element)
return (root.tag == 'corpus')
except IOError:
pass
except ElementTree.ParseError:
pass
return False
|
def _iter_bliss(filename, callback):
'\n :param str filename:\n :param (str)->None callback:\n '
corpus_file = open(filename, 'rb')
if filename.endswith('.gz'):
corpus_file = gzip.GzipFile(fileobj=corpus_file)
def getelements(tag):
'\n Yield *tag* elements from *filename_or_file* xml incrementally.\n\n :param str tag:\n '
context = iter(ElementTree.iterparse(corpus_file, events=('start', 'end')))
(_, root) = next(context)
tree_ = [root]
for (event, elem_) in context:
if (event == 'start'):
tree_ += [elem_]
elif (event == 'end'):
assert (tree_[(- 1)] is elem_)
tree_ = tree_[:(- 1)]
if ((event == 'end') and (elem_.tag == tag)):
(yield (tree_, elem_))
root.clear()
for (tree, elem) in getelements('segment'):
elem_orth = elem.find('orth')
orth_raw = (elem_orth.text or '')
orth_split = orth_raw.split()
orth = ' '.join(orth_split)
callback(orth)
|
def _iter_txt(filename, callback, skip_empty_lines=True):
'\n :param str filename:\n :param (str)->None callback:\n :param bool skip_empty_lines:\n '
f = open(filename, 'rb')
if filename.endswith('.gz'):
f = gzip.GzipFile(fileobj=f)
for line in f:
try:
line = line.decode('utf8')
except UnicodeDecodeError:
line = line.decode('latin_1')
line = line.strip()
if (skip_empty_lines and (not line)):
continue
callback(line)
|
def iter_corpus(filename, callback, skip_empty_lines=True):
'\n :param str filename:\n :param ((str)->None) callback:\n :param bool skip_empty_lines:\n '
if _is_bliss(filename):
_iter_bliss(filename=filename, callback=callback)
else:
_iter_txt(filename=filename, callback=callback, skip_empty_lines=skip_empty_lines)
|
def read_corpus(filename, skip_empty_lines=True):
'\n :param str filename: either Bliss XML or line-based text\n :param bool skip_empty_lines: in case of line-based text, skip empty lines\n :return: list of orthographies\n :rtype: list[str]\n '
out_list = []
iter_corpus(filename=filename, callback=out_list.append, skip_empty_lines=skip_empty_lines)
return out_list
|
class AllophoneState():
'\n Represents one allophone (phone with context) state (number, boundary).\n In Sprint, see AllophoneStateAlphabet::index().\n '
id = None
context_history = ()
context_future = ()
boundary = 0
state = None
_attrs = ['id', 'context_history', 'context_future', 'boundary', 'state']
def __init__(self, id=None, state=None):
'\n :param str id: phone\n :param int|None state:\n '
self.id = id
self.state = state
def format(self):
'\n :rtype: str\n '
s = ('%s{%s+%s}' % (self.id, ('-'.join(self.context_history) or '#'), ('-'.join(self.context_future) or '#')))
if (self.boundary & 1):
s += '@i'
if (self.boundary & 2):
s += '@f'
if (self.state is not None):
s += ('.%i' % self.state)
return s
def __repr__(self):
return self.format()
def copy(self):
'\n :rtype: AllophoneState\n '
a = AllophoneState(id=self.id, state=self.state)
for attr in self._attrs:
if getattr(self, attr):
setattr(a, attr, getattr(self, attr))
return a
def mark_initial(self):
'\n Add flag to self.boundary.\n '
self.boundary = (self.boundary | 1)
def mark_final(self):
'\n Add flag to self.boundary.\n '
self.boundary = (self.boundary | 2)
def phoneme(self, ctx_offset, out_of_context_id=None):
'\n\n Phoneme::Id ContextPhonology::PhonemeInContext::phoneme(s16 pos) const {\n if (pos == 0)\n return phoneme_;\n else if (pos > 0) {\n if (u16(pos - 1) < context_.future.length())\n return context_.future[pos - 1];\n else\n return Phoneme::term;\n } else { verify(pos < 0);\n if (u16(-1 - pos) < context_.history.length())\n return context_.history[-1 - pos];\n else\n return Phoneme::term;\n }\n }\n\n :param int ctx_offset: 0 for center, >0 for future, <0 for history\n :param str|None out_of_context_id: what to return out of our context\n :return: phone-id from the offset\n :rtype: str\n '
if (ctx_offset == 0):
return self.id
if (ctx_offset > 0):
idx = (ctx_offset - 1)
if (idx >= len(self.context_future)):
return out_of_context_id
return self.context_future[idx]
if (ctx_offset < 0):
idx = ((- ctx_offset) - 1)
if (idx >= len(self.context_history)):
return out_of_context_id
return self.context_history[idx]
assert False
def set_phoneme(self, ctx_offset, phone_id):
'\n :param int ctx_offset: 0 for center, >0 for future, <0 for history\n :param str phone_id:\n '
if (ctx_offset == 0):
self.id = phone_id
elif (ctx_offset > 0):
idx = (ctx_offset - 1)
assert (idx == len(self.context_future))
self.context_future = (self.context_future + (phone_id,))
elif (ctx_offset < 0):
idx = ((- ctx_offset) - 1)
assert (idx == len(self.context_history))
self.context_history = (self.context_history + (phone_id,))
def phone_idx(self, ctx_offset, phone_idxs):
'\n :param int ctx_offset: see self.phoneme()\n :param dict[str,int] phone_idxs:\n :rtype: int\n '
phone = self.phoneme(ctx_offset=ctx_offset)
if (phone is None):
return 0
else:
return (phone_idxs[phone] + 1)
def index(self, phone_idxs, num_states=3, context_length=1):
'\n See self.from_index() for the inverse function.\n And see Sprint NoStateTyingDense::classify().\n\n :param dict[str,int] phone_idxs:\n :param int num_states: how much state per allophone\n :param int context_length: how much left/right context\n :rtype: int\n '
assert (max(len(self.context_history), len(self.context_future)) <= context_length)
assert (0 <= self.boundary < 4)
assert (0 <= self.state < num_states)
num_phones = (max(phone_idxs.values()) + 1)
num_phone_classes = (num_phones + 1)
result = 0
for i in range(((2 * context_length) + 1)):
pos = (i // 2)
if ((i % 2) == 1):
pos = ((- pos) - 1)
result *= num_phone_classes
result += self.phone_idx(ctx_offset=pos, phone_idxs=phone_idxs)
result *= num_states
result += self.state
result *= 4
result += self.boundary
return result
@classmethod
def from_index(cls, index, phone_ids, num_states=3, context_length=1):
'\n Original Sprint C++ code:\n\n Mm::MixtureIndex NoStateTyingDense::classify(const AllophoneState& a) const {\n require_lt(a.allophone()->boundary, numBoundaryClasses_);\n require_le(0, a.state());\n require_lt(u32(a.state()), numStates_);\n u32 result = 0;\n for(u32 i = 0; i < 2 * contextLength_ + 1; ++i) { // context len is usually 1\n // pos sequence: 0, -1, 1, [-2, 2, ...]\n s16 pos = i / 2;\n if(i % 2 == 1)\n pos = -pos - 1;\n result *= numPhoneClasses_;\n u32 phoneIdx = a.allophone()->phoneme(pos);\n require_lt(phoneIdx, numPhoneClasses_);\n result += phoneIdx;\n }\n result *= numStates_;\n result += u32(a.state());\n result *= numBoundaryClasses_;\n result += a.allophone()->boundary;\n require_lt(result, nClasses_);\n return result;\n }\n\n Note that there is also AllophoneStateAlphabet::allophoneState, via Am/ClassicStateModel.cc,\n which unfortunately uses a different encoding.\n See :func:`from_classic_index`.\n\n :param int index:\n :param dict[int,str] phone_ids: reverse-map from self.index(). idx -> id\n :param int num_states: how much state per allophone\n :param int context_length: how much left/right context\n :rtype: int\n :rtype: AllophoneState\n '
num_phones = (max(phone_ids.keys()) + 1)
num_phone_classes = (num_phones + 1)
code = index
result = AllophoneState()
result.boundary = (code % 4)
code //= 4
result.state = (code % num_states)
code //= num_states
for i in range(((2 * context_length) + 1)):
pos = (i // 2)
if ((i % 2) == 1):
pos = ((- pos) - 1)
phone_idx = (code % num_phone_classes)
code //= num_phone_classes
result.set_phoneme(ctx_offset=pos, phone_id=(phone_ids[(phone_idx - 1)] if phone_idx else ''))
return result
@classmethod
def from_classic_index(cls, index, allophones, max_states=6):
'\n Via Sprint C++ Archiver.cc:getStateInfo():\n\n const u32 max_states = 6; // TODO: should be increased for non-speech\n for (state = 0; state < max_states; ++state) {\n if (emission >= allophones_.size())\n emission -= (1<<26);\n else break;\n }\n\n :param int index:\n :param int max_states:\n :param dict[int,AllophoneState] allophones:\n :rtype: AllophoneState\n '
emission = index
state = 0
while (state < max_states):
if (emission >= (1 << 26)):
emission -= (1 << 26)
state += 1
else:
break
a = allophones[emission].copy()
a.state = state
return a
def __hash__(self):
return hash(tuple([getattr(self, a) for a in self._attrs]))
def __eq__(self, other):
for a in self._attrs:
if (getattr(self, a) != getattr(other, a)):
return False
return True
def __ne__(self, other):
return (not (self == other))
|
class Lexicon():
'\n Lexicon. Map of words to phoneme sequences (can have multiple pronunciations).\n '
def __init__(self, filename):
'\n :param str filename:\n '
print('Loading lexicon', filename, file=log.v4)
lex_file = open(filename, 'rb')
if filename.endswith('.gz'):
lex_file = gzip.GzipFile(fileobj=lex_file)
self.phoneme_list = []
self.phonemes = {}
self.lemmas = {}
context = iter(ElementTree.iterparse(lex_file, events=('start', 'end')))
(_, root) = next(context)
tree = [root]
for (event, elem) in context:
if (event == 'start'):
tree += [elem]
elif (event == 'end'):
assert (tree[(- 1)] is elem)
tree = tree[:(- 1)]
if (elem.tag == 'phoneme'):
symbol = elem.find('symbol').text.strip()
assert isinstance(symbol, (str, unicode))
if (elem.find('variation') is not None):
variation = elem.find('variation').text.strip()
else:
variation = 'context'
assert (symbol not in self.phonemes)
assert (variation in ['context', 'none'])
self.phoneme_list.append(symbol)
self.phonemes[symbol] = {'index': len(self.phonemes), 'symbol': symbol, 'variation': variation}
root.clear()
elif (elem.tag == 'phoneme-inventory'):
print(('Finished phoneme inventory, %i phonemes' % len(self.phonemes)), file=log.v4)
root.clear()
elif (elem.tag == 'lemma'):
for orth_elem in elem.findall('orth'):
orth = (orth_elem.text or '').strip()
phons = [{'phon': e.text.strip(), 'score': float(e.attrib.get('score', 0))} for e in elem.findall('phon')]
assert (orth not in self.lemmas)
self.lemmas[orth] = {'orth': orth, 'phons': phons}
root.clear()
print(('Finished whole lexicon, %i lemmas' % len(self.lemmas)), file=log.v4)
|
class StateTying():
'\n Clustering of (allophone) states into classes.\n '
def __init__(self, state_tying_file):
'\n :param str state_tying_file:\n '
self.allo_map = {}
self.class_map = {}
lines = open(state_tying_file).read().splitlines()
for line in lines:
(allo_str, class_idx_str) = line.split()
class_idx = int(class_idx_str)
assert (allo_str not in self.allo_map)
self.allo_map[allo_str] = class_idx
self.class_map.setdefault(class_idx, set()).add(allo_str)
min_class_idx = min(self.class_map.keys())
max_class_idx = max(self.class_map.keys())
assert (min_class_idx == 0)
assert (max_class_idx == (len(self.class_map) - 1)), 'some classes are not represented'
self.num_classes = len(self.class_map)
|
class PhoneSeqGenerator():
'\n Generates phone sequences.\n '
def __init__(self, lexicon_file, allo_num_states=3, allo_context_len=1, state_tying_file=None, add_silence_beginning=0.1, add_silence_between_words=0.1, add_silence_end=0.1, repetition=0.9, silence_repetition=0.95):
'\n :param str lexicon_file: lexicon XML file\n :param int allo_num_states: how much HMM states per allophone (all but silence)\n :param int allo_context_len: how much context to store left and right. 1 -> triphone\n :param str | None state_tying_file: for state-tying, if you want that\n :param float add_silence_beginning: prob of adding silence at beginning\n :param float add_silence_between_words: prob of adding silence between words\n :param float add_silence_end: prob of adding silence at end\n :param float repetition: prob of repeating an allophone\n :param float silence_repetition: prob of repeating the silence allophone\n '
self.lexicon = Lexicon(lexicon_file)
self.phonemes = sorted(self.lexicon.phonemes.keys(), key=(lambda s: self.lexicon.phonemes[s]['index']))
self.rnd = Random(0)
self.allo_num_states = allo_num_states
self.allo_context_len = allo_context_len
self.add_silence_beginning = add_silence_beginning
self.add_silence_between_words = add_silence_between_words
self.add_silence_end = add_silence_end
self.repetition = repetition
self.silence_repetition = silence_repetition
self.si_lemma = self.lexicon.lemmas['[SILENCE]']
self.si_phone = self.si_lemma['phons'][0]['phon']
if state_tying_file:
self.state_tying = StateTying(state_tying_file)
else:
self.state_tying = None
def random_seed(self, seed):
'\n :param int seed:\n '
self.rnd.seed(seed)
def get_class_labels(self):
'\n :rtype: list[str]\n '
if self.state_tying:
return ['|'.join(sorted(self.state_tying.class_map[i])) for i in range(self.state_tying.num_classes)]
else:
return self.phonemes
def seq_to_class_idxs(self, phones, dtype=None):
'\n :param list[AllophoneState] phones: list of allophone states\n :param str dtype: eg "int32"\n :rtype: numpy.ndarray\n :returns 1D numpy array with the indices\n '
if (dtype is None):
dtype = 'int32'
if self.state_tying:
return numpy.array([self.state_tying.allo_map[a.format()] for a in phones], dtype=dtype)
else:
return numpy.array([self.lexicon.phonemes[p.id]['index'] for p in phones], dtype=dtype)
def _iter_orth(self, orth):
'\n :param str orth:\n :rtype: typing.Iterator[typing.Dict[str]]\n '
if (self.rnd.random() < self.add_silence_beginning):
(yield self.si_lemma)
symbols = list(orth.split())
i = 0
while (i < len(symbols)):
symbol = symbols[i]
try:
lemma = self.lexicon.lemmas[symbol]
except KeyError:
if ('/' in symbol):
symbols[i:(i + 1)] = symbol.split('/')
continue
if ('-' in symbol):
symbols[i:(i + 1)] = symbol.split('-')
continue
raise
i += 1
(yield lemma)
if (i < len(symbols)):
if (self.rnd.random() < self.add_silence_between_words):
(yield self.si_lemma)
if (self.rnd.random() < self.add_silence_end):
(yield self.si_lemma)
def orth_to_phones(self, orth):
'\n :param str orth:\n :rtype: str\n '
phones = []
for lemma in self._iter_orth(orth):
phon = self.rnd.choice(lemma['phons'])
phones += [phon['phon']]
return ' '.join(phones)
def _phones_to_allos(self, phones):
for p in phones:
a = AllophoneState()
a.id = p
(yield a)
def _random_allo_silence(self, phone=None):
if (phone is None):
phone = self.si_phone
while True:
a = AllophoneState()
a.id = phone
a.mark_initial()
a.mark_final()
a.state = 0
(yield a)
if (self.rnd.random() >= self.silence_repetition):
break
def _allos_add_states(self, allos):
for _a in allos:
if (_a.id == self.si_phone):
for a in self._random_allo_silence(_a.id):
(yield a)
else:
for state in range(self.allo_num_states):
while True:
a = AllophoneState()
a.id = _a.id
a.context_history = _a.context_history
a.context_future = _a.context_future
a.boundary = _a.boundary
a.state = state
(yield a)
if (self.rnd.random() >= self.repetition):
break
def _allos_set_context(self, allos):
'\n :param list[AllophoneState] allos:\n '
if (self.allo_context_len == 0):
return
ctx = []
for a in allos:
if (self.lexicon.phonemes[a.id]['variation'] == 'context'):
a.context_history = tuple(ctx)
ctx += [a.id]
ctx = ctx[(- self.allo_context_len):]
else:
ctx = []
ctx = []
for a in reversed(allos):
if (self.lexicon.phonemes[a.id]['variation'] == 'context'):
a.context_future = tuple(reversed(ctx))
ctx += [a.id]
ctx = ctx[(- self.allo_context_len):]
else:
ctx = []
def generate_seq(self, orth):
'\n :param str orth: orthography as a str. orth.split() should give words in the lexicon\n :rtype: list[AllophoneState]\n :returns allophone state list. those will have repetitions etc\n '
allos = []
for lemma in self._iter_orth(orth):
phon = self.rnd.choice(lemma['phons'])
l_allos = list(self._phones_to_allos(phon['phon'].split()))
l_allos[0].mark_initial()
l_allos[(- 1)].mark_final()
allos += l_allos
self._allos_set_context(allos)
allos = list(self._allos_add_states(allos))
return allos
def _random_phone_seq(self, prob_add=0.8):
while True:
(yield self.rnd.choice(self.phonemes))
if (self.rnd.random() >= prob_add):
break
def _random_allo_seq(self, prob_word_add=0.8):
allos = []
while True:
phones = self._random_phone_seq()
w_allos = list(self._phones_to_allos(phones))
w_allos[0].mark_initial()
w_allos[(- 1)].mark_final()
allos += w_allos
if (self.rnd.random() >= prob_word_add):
break
self._allos_set_context(allos)
return list(self._allos_add_states(allos))
def generate_garbage_seq(self, target_len):
'\n :param int target_len: len of the returned seq\n :rtype: list[AllophoneState]\n :returns allophone state list. those will have repetitions etc.\n It will randomly generate a sequence of phonemes and transform that\n into a list of allophones in a similar way than generate_seq().\n '
allos = []
while True:
allos += self._random_allo_seq()
allos += list(self._random_allo_silence())
if (len(allos) >= target_len):
allos = allos[:target_len]
break
return allos
|
class TranslationDataset(CachedDataset2):
'\n Based on the conventions by our team for translation datasets.\n It gets a directory and expects these files:\n\n - source.dev(.gz)\n - source.train(.gz)\n - source.vocab.pkl\n - target.dev(.gz)\n - target.train(.gz)\n - target.vocab.pkl\n\n The convention is to use "dev" and "train" as ``file_postfix`` for the dev and train set respectively, but any\n file_postfix can be used. The target file and vocabulary do not have to exists when setting ``source_only``.\n It is also automatically checked if a gzip version of the file exists.\n\n To follow the RETURNN conventions on data input and output, the source text is mapped to the "data" key,\n and the target text to the "classes" data key. Both are index sequences.\n\n '
source_file_prefix = 'source'
target_file_prefix = 'target'
main_source_data_key = 'data'
main_target_data_key = 'classes'
def __init__(self, path, file_postfix, source_postfix='', target_postfix='', source_only=False, search_without_reference=False, unknown_label=None, seq_list_file=None, use_cache_manager=False, **kwargs):
'\n :param str path: the directory containing the files\n :param str file_postfix: e.g. "train" or "dev".\n it will then search for "source." + postfix and "target." + postfix.\n :param bool random_shuffle_epoch1: if True, will also randomly shuffle epoch 1. see self.init_seq_order().\n :param str source_postfix: will concat this at the end of the source.\n :param str target_postfix: will concat this at the end of the target.\n You might want to add some sentence-end symbol.\n :param bool source_only: if targets are not available\n :param bool search_without_reference:\n :param str|dict[str,str]|None unknown_label: Label to replace out-of-vocabulary words with, e.g. "<UNK>".\n If not given, will not replace unknowns but throw an error. Can also be a dict data_key -> unknown_label\n to configure for each data key separately (default for each key is None).\n :param str seq_list_file: filename. line-separated list of line numbers defining fixed sequence order.\n multiple occurrences supported, thus allows for repeating examples while loading only once.\n :param bool use_cache_manager: uses :func:`Util.cf` for files\n '
super(TranslationDataset, self).__init__(**kwargs)
assert os.path.isdir(path)
self.path = path
self.file_postfix = file_postfix
self.source_only = source_only
self.search_without_reference = search_without_reference
self._source_postfix = source_postfix
self._target_postfix = target_postfix
self._seq_list_file = seq_list_file
self.seq_list = ([int(n) for n in open(seq_list_file).read().splitlines()] if seq_list_file else None)
self._add_postfix = {self.source_file_prefix: source_postfix, self.target_file_prefix: target_postfix}
self._use_cache_manager = use_cache_manager
from threading import Lock, Thread
self._lock = Lock()
self._main_data_key_map = {self.source_file_prefix: self.main_source_data_key}
if (not source_only):
self._main_data_key_map[self.target_file_prefix] = self.main_target_data_key
self._files_to_read = [prefix for prefix in self._main_data_key_map.keys() if (not ((prefix == self.target_file_prefix) and search_without_reference))]
self._data_files = {prefix: self._get_data_file(prefix) for prefix in self._files_to_read}
self._data_keys = (self._source_data_keys + self._target_data_keys)
self._data = {data_key: [] for data_key in self._data_keys}
self._data_len = None
self._vocabs = self._get_vocabs()
self.num_outputs = {k: [(max(self._vocabs[k].values()) + 1), 1] for k in self._vocabs.keys()}
assert all([(v1 <= (2 ** 31)) for (k, (v1, v2)) in self.num_outputs.items()])
self.num_inputs = self.num_outputs[self.main_source_data_key][0]
self._reversed_vocabs = {k: self._reverse_vocab(k) for k in self._vocabs.keys()}
self.labels = {k: self._get_label_list(k) for k in self._vocabs.keys()}
if (not isinstance(unknown_label, dict)):
assert isinstance(unknown_label, (str, type(None)))
unknown_label = {data_key: unknown_label for data_key in self._data_keys}
for data_key in self._data_keys:
unknown_label.setdefault(data_key, None)
self._unknown_label = unknown_label
self._seq_order = None
self._tag_prefix = 'line-'
self._thread = Thread(name=('%r reader' % self), target=self._thread_main)
self._thread.daemon = True
self._thread.start()
@property
def _source_data_keys(self):
return [self.main_source_data_key]
@property
def _target_data_keys(self):
if self.source_only:
return []
else:
return [self.main_target_data_key]
def _extend_data(self, file_prefix, data_strs):
'\n :param str file_prefix: prefix of the corpus file, "source" or "target"\n :param list[bytes] data_strs: lines of text read from the corpus file\n '
data_key = (self.main_source_data_key if (file_prefix == self.source_file_prefix) else self.main_target_data_key)
data = [self._words_to_numpy(data_key, (s.decode('utf8').strip() + self._add_postfix[file_prefix]).split()) for s in data_strs]
with self._lock:
self._data[data_key].extend(data)
def _thread_main(self):
from returnn.util.basic import interrupt_main
try:
import returnn.util.better_exchook
returnn.util.better_exchook.install()
from returnn.util.basic import AsyncThreadRun
data_len = 0
while True:
ls = self._data_files[self.source_file_prefix].readlines((10 ** 4))
data_len += len(ls)
if (not ls):
break
with self._lock:
self._data_len = data_len
self._data_files[self.source_file_prefix].seek(0, os.SEEK_SET)
files_to_read = list(self._files_to_read)
while True:
for file_prefix in files_to_read:
data_strs = self._data_files[file_prefix].readlines((10 ** 6))
if (not data_strs):
assert (len(self._data[self._main_data_key_map[file_prefix]]) == self._data_len)
files_to_read.remove(file_prefix)
continue
assert ((len(self._data[self._main_data_key_map[file_prefix]]) + len(data_strs)) <= self._data_len)
self._extend_data(file_prefix, data_strs)
if (not files_to_read):
break
for (file_prefix, file_handle) in list(self._data_files.items()):
file_handle.close()
self._data_files[file_prefix] = None
except Exception:
sys.excepthook(*sys.exc_info())
interrupt_main()
def _transform_filename(self, filename):
'\n :param str filename:\n :return: maybe transformed filename, e.g. via cache manager\n :rtype: str\n '
if self._use_cache_manager:
from returnn.util.basic import cf
filename = cf(filename)
return filename
def _get_data_file(self, prefix):
'\n :param str prefix: e.g. "source" or "target"\n :return: full filename\n :rtype: io.FileIO\n '
import os
filename = ('%s/%s.%s' % (self.path, prefix, self.file_postfix))
if os.path.exists(filename):
return open(self._transform_filename(filename), 'rb')
if os.path.exists((filename + '.gz')):
import gzip
return gzip.GzipFile(self._transform_filename((filename + '.gz')), 'rb')
raise Exception(('Data file not found: %r (.gz)?' % filename))
def _get_vocabs(self):
'\n :return: vocabularies for main data keys ("data" and "classes") as a dict data_key -> vocabulary\n :rtype: dict[str,dict[str,int]]\n '
return {data_key: self._get_vocab(prefix) for (prefix, data_key) in self._main_data_key_map.items()}
def _get_vocab(self, prefix):
'\n :param str prefix: e.g. "source" or "target"\n :rtype: dict[str,int]\n '
import os
filename = ('%s/%s.vocab.pkl' % (self.path, prefix))
if (not os.path.exists(filename)):
raise Exception(('Vocab file not found: %r' % filename))
import pickle
vocab = pickle.load(open(self._transform_filename(filename), 'rb'))
assert isinstance(vocab, dict)
return vocab
def _reverse_vocab(self, data_key):
'\n Note that there might be multiple items in the vocabulary (e.g. "<S>" and "</S>")\n which map to the same label index.\n We sort the list by lexical order\n and the last entry for a particular label index is used ("<S>" in that example).\n\n :param str data_key: e.g. "data" or "classes"\n :rtype: dict[int,str]\n '
return {v: k for (k, v) in sorted(self._vocabs[data_key].items())}
def _get_label_list(self, data_key):
'\n :param str data_key: e.g. "data" or "classes"\n :return: list of len num labels\n :rtype: list[str]\n '
reversed_vocab = self._reversed_vocabs[data_key]
assert isinstance(reversed_vocab, dict)
num_labels = self.num_outputs[data_key][0]
return list(map(reversed_vocab.__getitem__, range(num_labels)))
def _words_to_numpy(self, data_key, words):
'\n :param str data_key: e.g. "data" or "classes"\n :param list[str] words:\n :rtype: numpy.ndarray\n '
vocab = self._vocabs[data_key]
if (self._unknown_label[data_key] is None):
try:
words_idxs = list(map(vocab.__getitem__, words))
except KeyError as e:
raise Exception(('Can not handle unknown token without unknown_label: %s (%s)' % (str(e), bytes(str(e), 'utf-8'))))
else:
unknown_label_id = vocab[self._unknown_label[data_key]]
words_idxs = [vocab.get(w, unknown_label_id) for w in words]
return numpy.array(words_idxs, dtype=numpy.int32)
def _get_data(self, key, line_nr):
'\n :param str key: "data" or "classes"\n :param int line_nr:\n :return: 1D array\n :rtype: numpy.ndarray\n '
import time
last_print_time = 0
last_print_len = None
while True:
with self._lock:
if (self._data_len is not None):
assert (line_nr <= self._data_len)
cur_len = len(self._data[key])
if (line_nr < cur_len):
return self._data[key][line_nr]
if ((cur_len != last_print_len) and ((time.time() - last_print_time) > 10)):
print(('%r: waiting for %r, line %i (%i loaded so far)...' % (self, key, line_nr, cur_len)), file=log.v3)
last_print_len = cur_len
last_print_time = time.time()
time.sleep(1)
def _get_data_len(self):
'\n :return: num seqs of the whole underlying data\n :rtype: int\n '
import time
t = 0
while True:
with self._lock:
if (self._data_len is not None):
return self._data_len
if (t == 0):
print(('%r: waiting for data length info...' % (self,)), file=log.v3)
time.sleep(1)
t += 1
def have_corpus_seq_idx(self):
'\n :rtype: bool\n '
return True
def get_all_tags(self):
'\n :rtype: list[str]\n '
return [(self._tag_prefix + str(line_nr)) for line_nr in range(len(self._data[self.main_source_data_key]))]
def get_corpus_seq_idx(self, seq_idx):
'\n :param int seq_idx:\n :rtype: int\n '
if (self._seq_order is None):
return None
return self._seq_order[seq_idx]
def is_data_sparse(self, key):
'\n :param str key:\n :rtype: bool\n '
return True
def get_data_dtype(self, key):
'\n :param str key:\n :rtype: str\n '
return 'int32'
def init_seq_order(self, epoch=None, seq_list=None, seq_order=None):
'\n If random_shuffle_epoch1, for epoch 1 with "random" ordering, we leave the given order as is.\n Otherwise, this is mostly the default behavior.\n\n :param int|None epoch:\n :param list[str]|None seq_list: List of sequence tags, to set a predefined order.\n :param list[int]|None seq_order: List of corpus sequence indices, to set a predefined order.\n :rtype: bool\n :returns whether the order changed (True is always safe to return)\n '
super(TranslationDataset, self).init_seq_order(epoch=epoch, seq_list=seq_list, seq_order=seq_order)
if ((seq_list is None) and self.seq_list):
seq_list = self.seq_list
if (seq_order is not None):
self._seq_order = seq_order
elif (seq_list is not None):
self._seq_order = [int(s[len(self._tag_prefix):]) for s in seq_list]
else:
num_seqs = self._get_data_len()
self._seq_order = self.get_seq_order_for_epoch(epoch=epoch, num_seqs=num_seqs, get_seq_len=(lambda i: len(self._get_data(key=self.main_source_data_key, line_nr=i))))
self._num_seqs = len(self._seq_order)
return True
def supports_seq_order_sorting(self) -> bool:
'supports sorting'
return True
def get_estimated_seq_length(self, seq_idx):
'\n :param int seq_idx: for current epoch, not the corpus seq idx\n :rtype: int\n :returns sequence length of main source data key ("data"), used for sequence sorting\n '
corpus_seq_idx = self.get_corpus_seq_idx(seq_idx)
assert (corpus_seq_idx is not None)
return len(self._get_data(key=self.main_source_data_key, line_nr=corpus_seq_idx))
def _collect_single_seq(self, seq_idx):
if (seq_idx >= self._num_seqs):
return None
line_nr = self._seq_order[seq_idx]
data_keys = (self._source_data_keys if self.search_without_reference else self._data_keys)
features = {data_key: self._get_data(key=data_key, line_nr=line_nr) for data_key in data_keys}
assert all([(data is not None) for data in features.values()])
return DatasetSeq(seq_idx=seq_idx, seq_tag=(self._tag_prefix + str(line_nr)), features=features)
|
class TranslationFactorsDataset(TranslationDataset):
'\n Extends TranslationDataset with support for translation factors,\n see https://workshop2016.iwslt.org/downloads/IWSLT_2016_paper_2.pdf, https://arxiv.org/abs/1910.03912.\n\n Each word in the source and/or target corpus is represented by a tuple of tokens ("factors"). The number of factors\n must be the same for each word in the corpus. The format used is simply the concatenation of all factors\n separated by a special character (see the \'factor_separator\' parameter).\n\n Example: "this|u is|l example|u 1.|l"\n Here, the factor indicates the casing (u for upper-case, l for lower-case).\n\n In addition to the files expected by TranslationDataset we require a vocabulary for all factors.\n The input sequence will be available in the network for each factor separately via the given data key\n (see the \'source_factors\' parameter).\n '
def __init__(self, source_factors=None, target_factors=None, factor_separator='|', **kwargs):
'\n :param list[str]|None source_factors: Data keys for the source factors (excluding first factor, which is always\n called \'data\'). Words in source file have to have that many factors. Also, a vocabulary\n "<factor_data_key>.vocab.pkl" has to exist for each factor.\n :param list[str]|None target_factors: analogous to source_factors. Excluding first factor, which is always\n called \'classes\'.\n :param str factor_separator: string to separate factors of the words. E.g. if "|", words are expected to be\n of format "<factor_0>|<factor_1>|...".\n :param None|str source_postfix: See TranslationDataset. Note here, that we apply it to all factors.\n :param None|str target_postfix: Same as above.\n '
if isinstance(source_factors, str):
source_factors = [source_factors]
if isinstance(target_factors, str):
target_factors = [target_factors]
self._source_factors = source_factors
self._target_factors = target_factors
self._factor_separator = factor_separator
super(TranslationFactorsDataset, self).__init__(**kwargs)
@property
def _source_data_keys(self):
return ([self.main_source_data_key] + (self._source_factors or []))
@property
def _target_data_keys(self):
if self.source_only:
return []
else:
return ([self.main_target_data_key] + (self._target_factors or []))
def _get_vocabs(self):
'\n :return: vocabularies for all factors as a dict data_key -> vocabulary\n :rtype: dict[str,dict[str,int]]\n '
vocabs = super(TranslationFactorsDataset, self)._get_vocabs()
vocabs.update({data_key: self._get_vocab(data_key) for data_key in (self._source_data_keys[1:] + self._target_data_keys[1:])})
return vocabs
def _extend_data(self, file_prefix, data_strs):
'\n Similar to the base class method, but handles several data streams read from one string.\n\n :param str file_prefix: prefix of the corpus file, "source" or "target"\n :param list[bytes] data_strs: lines of text read from the corpus file\n '
if (file_prefix == self.source_file_prefix):
data_keys = self._source_data_keys
else:
assert (file_prefix == self.target_file_prefix)
data_keys = self._target_data_keys
data = [self._factored_words_to_numpy(data_keys, s.decode('utf8').strip().split(), self._add_postfix[file_prefix]) for s in data_strs]
data = zip(*data)
with self._lock:
for (i, data_) in enumerate(data):
self._data[data_keys[i]].extend(data_)
def _factored_words_to_numpy(self, data_keys, words, postfix):
'\n Creates list of words for each factor separately\n and converts to numpy by calling self._words_to_numpy() for each.\n\n :param list[str] data_keys: data keys corresponding to the factors present for each word\n :param list[str] words: list of factored words of the form "<factor_0>|<factor_1>|..."\n :param str postfix:\n :return: numpy word indices for each data key\n :rtype: list[numpy.ndarray]\n '
numpy_data = []
if (not words):
words_per_factor = ([[]] * len(data_keys))
elif (len(data_keys) > 1):
factored_words = [word.split(self._factor_separator) for word in words]
assert all(((len(factors) == len(data_keys)) for factors in factored_words)), ('All words must have all factors. Expected: ' + self._factor_separator.join(data_keys))
words_per_factor = zip(*factored_words)
words_per_factor = [list(w) for w in words_per_factor]
else:
words_per_factor = [words]
for (i, words_this_factor) in enumerate(words_per_factor):
if postfix:
words_this_factor = (words_this_factor + [postfix.strip()])
numpy_data_this_factor = self._words_to_numpy(data_keys[i], words_this_factor)
numpy_data.append(numpy_data_this_factor)
return numpy_data
|
class ConfusionNetworkDataset(TranslationDataset):
'\n This dataset allows for multiple (weighted) options for each word in the source sequence.\n In particular, it can be\n used to represent confusion networks.\n Two matrices (of dimension source length x max_density) will be provided as\n input to the network,\n one containing the word ids ("sparse_inputs")\n and one containing the weights ("sparse_weights").\n The matrices are read from the following input format (example):\n\n "__ALT__ we\'re|0.999659__were|0.000341148 a|0.977656__EPS|0.0223441 social|1.0 species|1.0"\n\n Input positions are separated by a space,\n different word options at one positions are separated by two underscores.\n Each word option has a weight appended to it, separated by "|".\n If "__ALT__" is missing, the line is interpreted\n as a regular plain text sentence.\n For this, all weights are set to 1.0 and only one word option is used at each position.\n Epsilon arcs of confusion networks can be represented by a special token (e.g. "EPS"), which has to be\n added to the source vocabulary.\n\n Via "seq_list_file" (see TranslationDataset) it is possible to give an explicit order of training examples.\n This can e.g. be used to repeat the confusion net part of the training data without loading it several times.\n '
main_source_data_key = 'sparse_inputs'
def __init__(self, max_density=20, **kwargs):
'\n :param str path: the directory containing the files\n :param str file_postfix: e.g. "train" or "dev".\n it will then search for "source." + postfix and "target." + postfix.\n :param bool random_shuffle_epoch1: if True, will also randomly shuffle epoch 1. see self.init_seq_order().\n :param None|str source_postfix: will concat this at the end of the source. e.g.\n :param None|str target_postfix: will concat this at the end of the target.\n You might want to add some sentence-end symbol.\n :param bool source_only: if targets are not available\n :param str|None unknown_label: "UNK" or so. if not given, then will not replace unknowns but throw an error\n :param int max_density: the density of the confusion network: max number of arcs per slot\n '
self.density = max_density
super(ConfusionNetworkDataset, self).__init__(**kwargs)
if ('sparse_weights' not in self._data.keys()):
self._data['sparse_weights'] = []
def get_data_keys(self):
'\n :rtype: list[str]\n '
return ['sparse_inputs', 'sparse_weights', 'classes']
def is_data_sparse(self, key):
'\n :param str key:\n :rtype: bool\n '
if (key == 'sparse_weights'):
return False
return True
def get_data_dtype(self, key):
'\n :param str key:\n :rtype: str\n '
if (key == 'sparse_weights'):
return 'float32'
return 'int32'
def get_data_shape(self, key):
'\n :param str key:\n :rtype: list[int]\n '
if (key in ['sparse_inputs', 'sparse_weights']):
return [self.density]
return []
def _load_single_confusion_net(self, words, vocab, postfix, key):
'\n :param list[str] words:\n :param dict[str,int] vocab:\n :param str postfix:\n :param str key:\n :rtype: (numpy.ndarray, numpy.ndarray)\n '
unknown_label_id = vocab[self._unknown_label[key]]
offset = 0
postfix_index = None
if (postfix is not None):
postfix_index = vocab.get(postfix, unknown_label_id)
if (postfix_index != unknown_label_id):
offset = 1
words_idxs = numpy.zeros(shape=((len(words) + offset), self.density), dtype=numpy.int32)
words_confs = numpy.zeros(shape=((len(words) + offset), self.density), dtype=numpy.float32)
for n in range(len(words)):
arcs = words[n].split('__')
for k in range(min(self.density, len(arcs))):
(arc, conf) = arcs[k].split('|')
words_idxs[n][k] = vocab.get(arc, unknown_label_id)
words_confs[n][k] = float(conf)
if (offset != 0):
words_idxs[len(words)][0] = postfix_index
words_confs[len(words)][0] = 1
return (words_idxs, words_confs)
def _data_str_to_sparse_inputs(self, data_key, s, postfix=None):
'\n :param str s:\n :param str postfix:\n :rtype: (numpy.ndarray, numpy.ndarray)\n '
vocab = self._vocabs[data_key]
words = s.split()
if (words and (words[0] == '__ALT__')):
words.pop(0)
return self._load_single_confusion_net(words, vocab, postfix, data_key)
if (postfix is not None):
words.append(postfix.strip())
unknown_label_id = vocab[self._unknown_label[data_key]]
words_idxs = numpy.array([vocab.get(w, unknown_label_id) for w in words], dtype=numpy.int32)
words_confs = None
return (words_idxs, words_confs)
def _extend_data(self, file_prefix, data_strs):
'\n :param str file_prefix: "source" or "target"\n :param list[bytes] data_strs: array of input for the key\n '
if (file_prefix == self.source_file_prefix):
key = self.main_source_data_key
idx_data = []
conf_data = []
for s in data_strs:
(words_idxs, words_confs) = self._data_str_to_sparse_inputs(data_key=key, s=s.decode('utf8').strip(), postfix=self._add_postfix[file_prefix])
idx_data.append(words_idxs)
conf_data.append(words_confs)
with self._lock:
self._data[key].extend(idx_data)
self._data['sparse_weights'].extend(conf_data)
else:
key = self.main_target_data_key
data = [self._words_to_numpy(data_key=key, words=(s.decode('utf8').strip() + self._add_postfix[file_prefix]).split()) for s in data_strs]
with self._lock:
self._data[key].extend(data)
def _collect_single_seq(self, seq_idx):
if (seq_idx >= self._num_seqs):
return None
line_nr = self._seq_order[seq_idx]
features = {key: self._get_data(key=key, line_nr=line_nr) for key in self.get_data_keys()}
if (features['sparse_weights'] is None):
seq = features[self.main_source_data_key]
features[self.main_source_data_key] = numpy.zeros(shape=(len(seq), self.density), dtype=numpy.int32)
features['sparse_weights'] = numpy.zeros(shape=(len(seq), self.density), dtype=numpy.float32)
for n in range(len(seq)):
features[self.main_source_data_key][n][0] = seq[n]
features['sparse_weights'][n][0] = 1
return DatasetSeq(seq_idx=seq_idx, seq_tag=(self._tag_prefix + str(line_nr)), features=features, targets=None)
|
def expand_abbreviations(text):
'\n :param str text:\n :rtype: str\n '
for (regex, replacement) in _abbreviations:
text = re.sub(regex, replacement, text)
return text
|
def lowercase(text):
'\n :param str text:\n :rtype: str\n '
return text.lower()
|
def lowercase_keep_special(text):
'\n :param str text:\n :rtype: str\n '
return re.sub('(\\s|^)(?!(\\[\\S*])|(<\\S*>))\\S+(?=\\s|$)', (lambda m: m.group(0).lower()), text)
|
def collapse_whitespace(text):
'\n :param str text:\n :rtype: str\n '
text = re.sub(_whitespace_re, ' ', text)
text = text.strip()
return text
|
def convert_to_ascii(text):
'\n :param str text:\n :rtype: str\n '
from unidecode import unidecode
return unidecode(text)
|
def basic_cleaners(text):
'\n Basic pipeline that lowercases and collapses whitespace without transliteration.\n\n :param str text:\n :rtype: str\n '
text = lowercase(text)
text = collapse_whitespace(text)
return text
|
def transliteration_cleaners(text):
'\n Pipeline for non-English text that transliterates to ASCII.\n\n :param str text:\n :rtype: str\n '
text = convert_to_ascii(text)
text = lowercase(text)
text = collapse_whitespace(text)
return text
|
def english_cleaners(text):
'\n Pipeline for English text, including number and abbreviation expansion.\n :param str text:\n :rtype: str\n '
text = convert_to_ascii(text)
text = lowercase(text)
text = normalize_numbers(text, with_spacing=True)
text = expand_abbreviations(text)
text = collapse_whitespace(text)
return text
|
def english_cleaners_keep_special(text):
'\n Pipeline for English text, including number and abbreviation expansion.\n :param str text:\n :rtype: str\n '
text = convert_to_ascii(text)
text = lowercase_keep_special(text)
text = normalize_numbers(text, with_spacing=True)
text = expand_abbreviations(text)
text = collapse_whitespace(text)
return text
|
def get_remove_chars(chars):
'\n :param str|list[str] chars:\n :rtype: (str)->str\n '
def remove_chars(text):
'\n :param str text:\n :rtype: str\n '
for c in chars:
text = text.replace(c, ' ')
text = collapse_whitespace(text)
return text
return remove_chars
|
def get_replace(old, new):
'\n :param str old:\n :param str new:\n :rtype: (str)->str\n '
def replace(text):
'\n :param str text:\n :rtype: str\n '
text = text.replace(old, new)
return text
return replace
|
def _get_inflect():
global _inflect
if _inflect:
return _inflect
import inflect
_inflect = inflect.engine()
return _inflect
|
def _remove_commas(m):
'\n :param typing.Match m:\n :rtype: str\n '
return m.group(1).replace(',', '')
|
def _expand_decimal_point(m):
'\n :param typing.Match m:\n :rtype: str\n '
return m.group(1).replace('.', ' point ')
|
def _expand_dollars(m):
'\n :param typing.Match m:\n :rtype: str\n '
match = m.group(1)
parts = match.split('.')
if (len(parts) > 2):
return (match + ' dollars')
dollars = (int(parts[0]) if parts[0] else 0)
cents = (int(parts[1]) if ((len(parts) > 1) and parts[1]) else 0)
if (dollars and cents):
dollar_unit = ('dollar' if (dollars == 1) else 'dollars')
cent_unit = ('cent' if (cents == 1) else 'cents')
return ('%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit))
elif dollars:
dollar_unit = ('dollar' if (dollars == 1) else 'dollars')
return ('%s %s' % (dollars, dollar_unit))
elif cents:
cent_unit = ('cent' if (cents == 1) else 'cents')
return ('%s %s' % (cents, cent_unit))
else:
return 'zero dollars'
|
def _expand_ordinal(m):
'\n :param typing.Match m:\n :rtype: str\n '
return _get_inflect().number_to_words(m.group(0))
|
def _expand_number(m):
'\n :param typing.Match m:\n :rtype: str\n '
num_s = m.group(0)
num_s = num_s.strip()
if ('.' in num_s):
return _get_inflect().number_to_words(num_s, andword='')
num = int(num_s)
if (num_s.startswith('0') or (num in {747})):
digits = {'0': 'zero', '1': 'one', '2': 'two', '3': 'three', '4': 'four', '5': 'five', '6': 'six', '7': 'seven', '8': 'eight', '9': 'nine'}
return ' '.join([digits.get(c, c) for c in num_s])
if (1000 < num < 3000):
if (num == 2000):
return 'two thousand'
elif (2000 < num < 2010):
return ('two thousand ' + _get_inflect().number_to_words((num % 100)))
elif ((num % 100) == 0):
return (_get_inflect().number_to_words((num // 100)) + ' hundred')
else:
return _get_inflect().number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
else:
return _get_inflect().number_to_words(num, andword='')
|
def _expand_number_with_spacing(m):
'\n :param typing.Match m:\n :rtype: str\n '
return (' %s ' % _expand_number(m))
|
def normalize_numbers(text, with_spacing=False):
'\n :param str text:\n :param bool with_spacing:\n :rtype: str\n '
text = re.sub(_comma_number_re, _remove_commas, text)
text = re.sub(_pounds_re, '\\1 pounds', text)
text = re.sub(_dollars_re, _expand_dollars, text)
text = re.sub(_decimal_number_re, _expand_decimal_point, text)
text = re.sub(_ordinal_re, _expand_ordinal, text)
text = re.sub(_number_re, (_expand_number_with_spacing if with_spacing else _expand_number), text)
return text
|
def _dummy_identity_pp(text):
'\n :param str text:\n :rtype: str\n '
return text
|
def get_post_processor_function(opts):
'\n You might want to use :mod:`inflect` or :mod:`unidecode`\n for some normalization / cleanup.\n This function can be used to get such functions.\n\n :param str|list[str] opts: e.g. "english_cleaners", or "get_remove_chars(\',/\')"\n :return: function\n :rtype: (str)->str\n '
if (not opts):
return _dummy_identity_pp
if callable(opts):
res_test = opts('test')
assert isinstance(res_test, str), ('%r does not seem as a valid function str->str' % (opts,))
return opts
if isinstance(opts, str):
if (('(' in opts) or (',' in opts)):
f = eval(opts)
else:
f = globals()[opts]
if isinstance(f, (tuple, list)):
f = get_post_processor_function(f)
assert callable(f)
res_test = f('test')
assert isinstance(res_test, str), ('%r does not seem as a valid function str->str' % (opts,))
return f
assert isinstance(opts, (tuple, list))
if (len(opts) == 1):
return get_post_processor_function(opts[0])
pps = [get_post_processor_function(pp) for pp in opts]
def chained_post_processors(text):
'\n :param str text:\n :rtype: str\n '
for pp in pps:
text = pp(text)
return text
return chained_post_processors
|
def _main():
from returnn.util import better_exchook
better_exchook.install()
from argparse import ArgumentParser
arg_parser = ArgumentParser()
arg_parser.add_argument('lm_dataset', help=('Python eval string, should eval to dict' + ', or otherwise filename, and will just dump'))
arg_parser.add_argument('--post_processor', nargs='*')
args = arg_parser.parse_args()
if (not args.lm_dataset.startswith('{')):
callback = print
if args.post_processor:
pp = get_post_processor_function(args.post_processor)
def callback(text):
'\n :param str text:\n '
print(pp(text))
if os.path.isfile(args.lm_dataset):
iter_corpus(args.lm_dataset, callback)
else:
callback(args.lm_dataset)
sys.exit(0)
log.initialize(verbosity=[5])
print('LmDataset demo startup')
kwargs = eval(args.lm_dataset)
assert isinstance(kwargs, dict), ('arg should be str of dict: %s' % args.lm_dataset)
print(('Creating LmDataset with kwargs=%r ...' % kwargs))
dataset = LmDataset(**kwargs)
print('init_seq_order ...')
dataset.init_seq_order(epoch=1)
seq_idx = 0
last_log_time = time.time()
print('start iterating through seqs ...')
while dataset.is_less_than_num_seqs(seq_idx):
if (seq_idx == 0):
print(('load_seqs with seq_idx=%i ....' % seq_idx))
dataset.load_seqs(seq_idx, (seq_idx + 1))
if ((time.time() - last_log_time) > 2.0):
last_log_time = time.time()
print(('Loading %s progress, %i/%i (%.0f%%) seqs loaded (%.0f%% skipped), (%.0f%% unknown) total syms %i ...' % (dataset.__class__.__name__, dataset.next_orth_idx, dataset.estimated_num_seqs, ((100.0 * dataset.next_orth_idx) / dataset.estimated_num_seqs), ((100.0 * dataset.num_skipped) / (dataset.next_orth_idx or 1)), ((100.0 * dataset.num_unknown) / dataset._num_timesteps_accumulated['data']), dataset._num_timesteps_accumulated['data'])))
seq_idx += 1
print(('finished iterating, num seqs: %i' % seq_idx))
print('dataset len:', dataset.len_info())
|
class MapDatasetBase(object):
'\n This dataset can be used as template to implement user-side Datasets,\n where the data can be access in arbitrary order.\n For global sorting, the length information needs to be known beforehand, see get_seq_len.\n '
def __init__(self, data_types=None):
"\n :param dict[str,dict] data_types: data_key -> constructor parameters of Data object, for all data streams the\n dataset provides (inputs and targets). E.g. {'data': {'dim': 1000, 'sparse': True, ...}, 'classes': ...}.\n "
self.data_types = (data_types or {})
def __len__(self):
'\n :return: total number of sequences in the dataset\n :rtype: int\n '
raise NotImplementedError
def __getitem__(self, seq_idx):
'\n This function does the actual data loading, the order can be arbitrary.\n\n :param int seq_idx:\n :return: The content of a single dataset entry\n :rtype: dict[str,numpy.array]\n '
raise NotImplementedError
def get_seq_len(self, seq_idx):
'\n This optional function provides the sequence length for the `seq_ordering` parameter.\n If not specified only a limited set of options is available.\n\n :param int seq_idx:\n :return: sequence length\n :rtype: int\n '
raise OptionalNotImplementedError
def get_seq_tag(self, seq_idx):
"\n :param int seq_idx:\n :return: tag for the sequence of the given index, default is 'seq-{seq_idx}'.\n :rtype: str\n "
return ('seq-%i' % seq_idx)
def get_seq_order(self, epoch=None):
'\n Override to implement a dataset specific sequence order for a given epoch.\n The number of sequences can be less than the total number.\n This will override the effects of `partition_epoch` and `seq_ordering` when using MapDatasetWrapper.\n\n :param int epoch:\n :return: sequence order (list of sequence indices)\n :rtype: list[int]\n '
raise OptionalNotImplementedError
|
class MapDatasetWrapper(CachedDataset2):
'\n Takes a MapDataset and turns it into a returnn.datasets.Dataset by providing the required class methods.\n '
def __init__(self, map_dataset, **kwargs):
'\n :param MapDatasetBase|function map_dataset: the MapDataset to be wrapped\n '
super(MapDatasetWrapper, self).__init__(**kwargs)
if callable(map_dataset):
map_dataset = map_dataset()
assert isinstance(map_dataset, MapDatasetBase)
self._dataset = map_dataset
self._seq_order = None
assert map_dataset.data_types, f'{self}: map_dataset {map_dataset} needs to provide data_types'
self.num_outputs = {key: _get_num_outputs_entry(key, opts) for (key, opts) in map_dataset.data_types.items()}
@property
def map_dataset(self) -> MapDatasetBase:
'\n :return: the wrapped MapDataset\n '
return self._dataset
@property
def num_seqs(self):
'\n :returns number of sequences in the current epoch\n :rtype: int\n '
if (self._seq_order is None):
raise NotImplementedError("'num_seqs' is only known after calling init_seq_order().")
return len(self._seq_order)
def get_total_num_seqs(self) -> int:
'\n :return: total number of seqs\n '
return len(self._dataset)
def init_seq_order(self, epoch=None, seq_list=None, seq_order=None):
'\n :param int|None epoch:\n :param list[str]|None seq_list: List of sequence tags, to set a predefined order.\n :param list[int]|None seq_order: List of corpus sequence indices, to set a predefined order.\n :rtype: bool\n :returns whether the order changed (True is always safe to return)\n '
super(MapDatasetWrapper, self).init_seq_order(epoch=epoch, seq_list=seq_list, seq_order=seq_order)
if (seq_list is not None):
assert (seq_order is None)
tag_to_idx = {self._dataset.get_seq_tag(corpus_seq_idx): corpus_seq_idx for corpus_seq_idx in range(len(self._dataset))}
seq_order = [tag_to_idx[tag] for tag in seq_list]
if (seq_order is not None):
self._seq_order = seq_order
else:
try:
self._seq_order = self._dataset.get_seq_order(epoch=epoch)
except OptionalNotImplementedError:
try:
self._seq_order = self.get_seq_order_for_epoch(epoch=epoch, num_seqs=len(self._dataset), get_seq_len=self._dataset.get_seq_len)
except OptionalNotImplementedError:
assert (self.seq_ordering in ['default', 'reverse', 'random']), f'{self}: dataset {self._dataset}.get_seq_len is not implemented, seq_ordering {self.seq_ordering!r} is not supported'
self._seq_order = self.get_seq_order_for_epoch(epoch=epoch, num_seqs=len(self._dataset), get_seq_len=None)
return True
def supports_seq_order_sorting(self) -> bool:
'supports sorting'
try:
self._dataset.get_seq_len(0)
return True
except OptionalNotImplementedError:
return False
def _collect_single_seq(self, seq_idx):
'\n :param int seq_idx: sorted seq idx\n :return:\n '
corpus_seq_idx = self.get_corpus_seq_idx(seq_idx)
return DatasetSeq(seq_idx, features=self._dataset[corpus_seq_idx], seq_tag=self._dataset.get_seq_tag(corpus_seq_idx))
def get_current_seq_order(self):
'\n :rtype: typing.Sequence[int]\n '
assert (self._seq_order is not None)
return self._seq_order
def get_tag(self, sorted_seq_idx):
'\n :param sorted_seq_idx:\n :return:\n '
seq_tag = self._dataset.get_seq_tag(self.get_corpus_seq_idx(sorted_seq_idx))
return seq_tag
def get_all_tags(self) -> List[str]:
'\n :return: list of all tags\n '
return [self._dataset.get_seq_tag(i) for i in range(len(self._dataset))]
def get_corpus_seq_idx(self, sorted_seq_idx):
'\n :param int sorted_seq_idx:\n :return corpus_seq_idx\n :rtype: int\n '
return self._seq_order[sorted_seq_idx]
def have_corpus_seq_idx(self):
'\n :rtype: bool\n :return: whether you can call self.get_corpus_seq_idx()\n '
return True
def get_data_keys(self) -> List[str]:
'\n :return: keys\n '
return list(self._dataset.data_types.keys())
def get_data_dim(self, key):
'\n :param str key: e.g. "data" or "classes"\n :return: number of classes, no matter if sparse or not\n :rtype: int\n '
if (key in self._dataset.data_types):
return self._dataset.data_types[key].get('dim', 1)
return 1
def get_data_dtype(self, key):
'\n :param str key: e.g. "data" or "classes"\n :return: dtype as str, e.g. "int32" or "float32"\n :rtype: str\n '
if ((key in self._dataset.data_types) and ('dtype' in self._dataset.data_types[key])):
return self._dataset.data_types[key]['dtype']
if self.is_data_sparse(key):
return 'int32'
return 'float32'
def is_data_sparse(self, key):
'\n :param str key: e.g. "data" or "classes"\n :return: whether the data is sparse\n :rtype: bool\n '
if (key in self._dataset.data_types):
return self._dataset.data_types[key].get('sparse', False)
return False
def get_data_shape(self, key):
'\n :returns get_data(*, key).shape[1:], i.e. num-frames excluded\n :rtype: list[int]\n '
if (key in self._dataset.data_types):
if ('shape' in self._dataset.data_types[key].keys()):
if (self._dataset.data_types[key]['shape'][0] is None):
return self._dataset.data_types[key]['shape'][1:]
else:
assert False, 'data shape has no time axis, calling get_data_shape is not possible'
if self.is_data_sparse(key):
return []
return [self.get_data_dim(key)]
|
class FromListDataset(MapDatasetBase):
'\n Simple implementation of a MapDataset where all data is given in a list.\n '
def __init__(self, data_list, sort_data_key=None, **kwargs):
'\n :param list[dict[str,numpy.ndarray]] data_list: sequence data as a dict data_key -> data for all sequences.\n :param str sort_data_key: Sequence length will be determined from data of this data_key.\n '
self._data_list = data_list
self._sort_data_key = sort_data_key
super(FromListDataset, self).__init__(**kwargs)
def __len__(self):
'\n :return: total number of sequences in the dataset\n :rtype: int\n '
return len(self._data_list)
def __getitem__(self, seq_idx):
'\n :param int seq_idx:\n :return: The content of a single dataset entry\n :rtype dict[str,numpy.array]\n '
return self._data_list[seq_idx]
def get_seq_len(self, seq_idx):
"\n :param seq_idx:\n :return: length of data for 'sort_data_key'\n :rtype: int\n "
assert self._sort_data_key, "Specify which data key should be used for sequence sorting via 'sort_data_key'."
return len(self._data_list[seq_idx][self._sort_data_key])
|
def _get_num_outputs_entry(name: str, opts: Dict[(str, Any)]) -> Tuple[(int, int)]:
'\n :param opts: data opts from data_types in MapDatasetBase\n :return: num_outputs entry: (num-classes, len(shape))\n\n This is maybe optional at some point when we remove num_outputs in Dataset.\n '
from returnn.tensor import Tensor
data = Tensor(name, **opts)
return ((data.dim or (data.shape[(- 1)] if data.shape else 0)), len(data.shape))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.