code stringlengths 17 6.64M |
|---|
def get_wav_time_len(filename):
'\n :param str filename:\n :rtype: float\n '
f = wave.open(filename)
num_frames = f.getnframes()
frame_rate = f.getframerate()
f.close()
return (num_frames / float(frame_rate))
|
def iter_bliss(filename, options, callback):
corpus_file = open(filename, 'rb')
if filename.endswith('.gz'):
corpus_file = gzip.GzipFile(fileobj=corpus_file)
def getelements(tag):
'Yield *tag* elements from *filename_or_file* xml incrementaly.'
context = iter(ElementTree.iterparse(corpus_file, events=('start', 'end')))
(_, root) = next(context)
tree = [root]
for (event, elem) in context:
if (event == 'start'):
tree += [elem]
elif (event == 'end'):
assert (tree[(- 1)] is elem)
tree = tree[:(- 1)]
if ((event == 'end') and (elem.tag == tag)):
(yield (tree, elem))
root.clear()
time_via_wav = False
for (tree, elem) in getelements('segment'):
if options.collect_time:
start = float(elem.attrib.get('start', 0))
if ('end' in elem.attrib):
end = float(elem.attrib['end'])
else:
if (not time_via_wav):
time_via_wav = True
print('Time will be read from WAV recordings. Can be slow. Maybe use `--collect_time False`.', file=log.v3)
rec_elem = tree[(- 1)]
assert (rec_elem.tag == 'recording')
wav_filename = rec_elem.attrib['audio']
end = get_wav_time_len(wav_filename)
assert (end > start)
frame_len = ((end - start) * (1000.0 / options.frame_time))
else:
frame_len = 0
elem_orth = elem.find('orth')
orth_raw = (elem_orth.text or '')
orth_split = orth_raw.split()
orth = ' '.join(orth_split)
callback(frame_len=frame_len, orth=orth)
|
def iter_txt(filename, options, callback):
f = open(filename, 'rb')
if filename.endswith('.gz'):
f = gzip.GzipFile(fileobj=f)
if options.collect_time:
print('No time-info in txt.', file=log.v3)
options.collect_time = False
for line in f:
line = line.strip()
if (not line):
continue
callback(frame_len=0, orth=line)
|
def collect_stats(options, iter_corpus):
'\n :param options: argparse.Namespace\n '
orth_symbols_filename = options.output
if orth_symbols_filename:
assert (not os.path.exists(orth_symbols_filename))
class Stats():
count = 0
process_last_time = time.time()
total_frame_len = 0
total_orth_len = 0
orth_syms_set = set()
if options.add_numbers:
Stats.orth_syms_set.update(map(chr, list(range(ord('0'), (ord('9') + 1)))))
if options.add_lower_alphabet:
Stats.orth_syms_set.update(map(chr, list(range(ord('a'), (ord('z') + 1)))))
if options.add_upper_alphabet:
Stats.orth_syms_set.update(map(chr, list(range(ord('A'), (ord('Z') + 1)))))
def cb(frame_len, orth):
if (frame_len >= options.max_seq_frame_len):
return
orth_syms = parse_orthography(orth)
if (len(orth_syms) >= options.max_seq_orth_len):
return
Stats.count += 1
Stats.total_frame_len += frame_len
if options.dump_orth_syms:
print('Orth:', ''.join(orth_syms), file=log.v3)
if options.filter_orth_sym:
if (options.filter_orth_sym in orth_syms):
print('Found orth:', ''.join(orth_syms), file=log.v3)
if options.filter_orth_syms_seq:
filter_seq = parse_orthography_into_symbols(options.filter_orth_syms_seq)
if found_sub_seq(filter_seq, orth_syms):
print('Found orth:', ''.join(orth_syms), file=log.v3)
Stats.orth_syms_set.update(orth_syms)
Stats.total_orth_len += len(orth_syms)
if ((time.time() - Stats.process_last_time) > 2):
Stats.process_last_time = time.time()
if options.collect_time:
print('Collect process, total frame len so far:', hms((Stats.total_frame_len * (options.frame_time / 1000.0))), file=log.v3)
else:
print('Collect process, total orth len so far:', human_size(Stats.total_orth_len), file=log.v3)
iter_corpus(cb)
if options.remove_symbols:
filter_syms = parse_orthography_into_symbols(options.remove_symbols)
Stats.orth_syms_set -= set(filter_syms)
if options.collect_time:
print('Total frame len:', Stats.total_frame_len, 'time:', hms((Stats.total_frame_len * (options.frame_time / 1000.0))), file=log.v3)
else:
print('No time stats (--collect_time False).', file=log.v3)
print('Total orth len:', Stats.total_orth_len, ('(%s)' % human_size(Stats.total_orth_len)), end=' ', file=log.v3)
if options.collect_time:
print('fraction:', (float(Stats.total_orth_len) / Stats.total_frame_len), file=log.v3)
else:
print('', file=log.v3)
print('Average orth len:', (float(Stats.total_orth_len) / Stats.count), file=log.v3)
print('Num symbols:', len(Stats.orth_syms_set), file=log.v3)
if orth_symbols_filename:
orth_syms_file = open(orth_symbols_filename, 'wb')
for orth_sym in sorted(Stats.orth_syms_set):
orth_syms_file.write((b'%s\n' % unicode(orth_sym).encode('utf8')))
orth_syms_file.close()
print('Wrote orthography symbols to', orth_symbols_filename, file=log.v3)
else:
print('Provide --output to save the symbols.', file=log.v3)
|
def init(config_filename=None):
rnn.init_better_exchook()
rnn.init_thread_join_hack()
if config_filename:
rnn.init_config(config_filename, command_line_options=[])
rnn.init_log()
else:
log.initialize()
print('RETURNN collect-orth-symbols starting up.', file=log.v3)
rnn.init_faulthandler()
if config_filename:
rnn.init_data()
rnn.print_task_properties()
|
def is_bliss(filename):
try:
corpus_file = open(filename, 'rb')
if filename.endswith('.gz'):
corpus_file = gzip.GzipFile(fileobj=corpus_file)
context = iter(ElementTree.iterparse(corpus_file, events=('start', 'end')))
(_, root) = next(context)
return True
except IOError:
pass
except ElementTree.ParseError:
pass
return False
|
def is_crnn_config(filename):
if filename.endswith('.gz'):
return False
try:
config = Config()
config.load_file(filename)
return True
except Exception:
pass
return False
|
def main(argv):
argparser = argparse.ArgumentParser(description='Collect orth symbols.')
argparser.add_argument('input', help='RETURNN config, Corpus Bliss XML or just txt-data')
argparser.add_argument('--frame_time', type=int, default=10, help='time (in ms) per frame. not needed for Corpus Bliss XML')
argparser.add_argument('--collect_time', type=int, default=True, help='collect time info. can be slow in some cases')
argparser.add_argument('--dump_orth_syms', action='store_true', help='dump all orthographies')
argparser.add_argument('--filter_orth_sym', help='dump orthographies which match this filter')
argparser.add_argument('--filter_orth_syms_seq', help='dump orthographies which match this filter')
argparser.add_argument('--max_seq_frame_len', type=int, default=float('inf'), help='collect only orthographies <= this max frame len')
argparser.add_argument('--max_seq_orth_len', type=int, default=float('inf'), help='collect only orthographies <= this max orth len')
argparser.add_argument('--add_numbers', type=int, default=True, help='add chars 0-9 to orth symbols')
argparser.add_argument('--add_lower_alphabet', type=int, default=True, help='add chars a-z to orth symbols')
argparser.add_argument('--add_upper_alphabet', type=int, default=True, help='add chars A-Z to orth symbols')
argparser.add_argument('--remove_symbols', default='(){}$', help='remove these chars from orth symbols')
argparser.add_argument('--output', help='where to store the symbols (default: dont store)')
args = argparser.parse_args(argv[1:])
bliss_filename = None
crnn_config_filename = None
txt_filename = None
if is_bliss(args.input):
bliss_filename = args.input
elif is_crnn_config(args.input):
crnn_config_filename = args.input
else:
txt_filename = args.input
init(config_filename=crnn_config_filename)
if bliss_filename:
iter_corpus = (lambda cb: iter_bliss(bliss_filename, options=args, callback=cb))
elif txt_filename:
iter_corpus = (lambda cb: iter_txt(txt_filename, options=args, callback=cb))
else:
iter_corpus = (lambda cb: iter_dataset(rnn.train_data, options=args, callback=cb))
collect_stats(args, iter_corpus)
if crnn_config_filename:
rnn.finalize()
|
def iter_dataset(dataset, callback):
'\n :param Dataset.Dataset dataset:\n :param (*)->None callback:\n '
dataset.init_seq_order(epoch=1)
assert ('orth' in dataset.get_target_list())
seq_idx = 0
while dataset.is_less_than_num_seqs(seq_idx):
dataset.load_seqs(seq_idx, seq_idx)
orth = dataset.get_targets('orth', seq_idx)
callback(orth=orth)
seq_idx += 1
|
def iter_bliss(filename, callback):
'\n Iterate through a Sprint Bliss XML file.\n\n :param str filename:\n :param callback:\n '
corpus_file = open(filename, 'rb')
if filename.endswith('.gz'):
corpus_file = gzip.GzipFile(fileobj=corpus_file)
def get_elements(tag):
'Yield *tag* elements from *filename_or_file* xml incrementally.'
context = iter(ElementTree.iterparse(corpus_file, events=('start', 'end')))
(_, root) = next(context)
tree = [root]
for (event, elem) in context:
if (event == 'start'):
tree += [elem]
elif (event == 'end'):
assert (tree[(- 1)] is elem)
tree = tree[:(- 1)]
if ((event == 'end') and (elem.tag == tag)):
(yield (tree, elem))
root.clear()
for (tree, elem) in get_elements('segment'):
elem_orth = elem.find('orth')
orth_raw = (elem_orth.text or '')
orth_split = orth_raw.split()
orth = ' '.join(orth_split)
callback(orth=orth)
|
def iter_txt(filename, callback):
'\n Iterate through pure text file.\n\n :param str filename:\n :param callback:\n '
f = open(filename, 'rb')
if filename.endswith('.gz'):
f = gzip.GzipFile(fileobj=f)
for line in f:
line = line.strip()
if (not line):
continue
callback(orth=line)
|
class CollectCorpusStats():
'\n Collect stats.\n '
def __init__(self, options, iter_corpus):
'\n :param options: argparse.Namespace\n :param iter_corpus:\n '
self.options = options
self.seq_count = 0
self.words = set()
self.total_word_len = 0
self.process_last_time = time.time()
iter_corpus(self._callback)
print('Total word len:', self.total_word_len, ('(%s)' % human_size(self.total_word_len)), file=log.v3)
print('Average orth len:', (float(self.total_word_len) / self.seq_count), file=log.v3)
print('Num word symbols:', len(self.words), file=log.v3)
def _callback(self, orth):
'\n :param str orth:\n '
orth_words = parse_orthography(orth, prefix=[], postfix=[], word_based=True)
self.seq_count += 1
if self.options.dump_orth:
print('Orth:', orth_words, file=log.v3)
self.words.update(orth_words)
self.total_word_len += len(orth_words)
if ((time.time() - self.process_last_time) > 2):
self.process_last_time = time.time()
print('Collect process, total word len so far:', human_size(self.total_word_len), file=log.v3)
|
def init(config_filename=None):
'\n :param str config_filename:\n '
rnn.init_better_exchook()
rnn.init_thread_join_hack()
if config_filename:
rnn.init_config(config_filename, command_line_options=[])
rnn.init_log()
else:
log.initialize()
print('Returnn collect-words starting up.', file=log.v3)
rnn.init_faulthandler()
if config_filename:
rnn.init_data()
rnn.print_task_properties()
|
def is_bliss(filename):
'\n :param str filename:\n :rtype: bool\n '
try:
corpus_file = open(filename, 'rb')
if filename.endswith('.gz'):
corpus_file = gzip.GzipFile(fileobj=corpus_file)
context = iter(ElementTree.iterparse(corpus_file, events=('start', 'end')))
(_, root) = next(context)
return True
except IOError:
pass
except ElementTree.ParseError:
pass
return False
|
def is_returnn_config(filename):
'\n :param str filename:\n :rtype: bool\n '
if filename.endswith('.gz'):
return False
try:
config = Config()
config.load_file(filename)
return True
except Exception:
pass
return False
|
def main(argv):
'\n Main entry.\n '
arg_parser = argparse.ArgumentParser(description='Collect orth symbols.')
arg_parser.add_argument('input', help='RETURNN config, Corpus Bliss XML or just txt-data')
arg_parser.add_argument('--dump_orth', action='store_true')
arg_parser.add_argument('--lexicon')
args = arg_parser.parse_args(argv[1:])
bliss_filename = None
crnn_config_filename = None
txt_filename = None
if is_bliss(args.input):
bliss_filename = args.input
print('Read Bliss corpus:', bliss_filename)
elif is_returnn_config(args.input):
crnn_config_filename = args.input
print('Read corpus from RETURNN config:', crnn_config_filename)
else:
txt_filename = args.input
print('Read corpus from txt-file:', txt_filename)
init(config_filename=crnn_config_filename)
if bliss_filename:
def _iter_corpus(cb):
return iter_bliss(bliss_filename, callback=cb)
elif txt_filename:
def _iter_corpus(cb):
return iter_txt(txt_filename, callback=cb)
else:
def _iter_corpus(cb):
return iter_dataset(rnn.train_data, callback=cb)
corpus_stats = CollectCorpusStats(args, _iter_corpus)
if args.lexicon:
print('Lexicon:', args.lexicon)
lexicon = Lexicon(args.lexicon)
print('Words not in lexicon:')
c = 0
for w in sorted(corpus_stats.words):
if (w not in lexicon.lemmas):
print(w)
c += 1
print(('Count: %i (%f%%)' % (c, ((100.0 * float(c)) / len(corpus_stats.words)))))
else:
print('No lexicon provided (--lexicon).')
if crnn_config_filename:
rnn.finalize()
|
def init(config_filename, log_verbosity):
'\n :param str config_filename: filename to config-file\n :param int log_verbosity:\n '
rnn.init_better_exchook()
rnn.init_thread_join_hack()
if config_filename:
print(('Using config file %r.' % config_filename))
assert os.path.exists(config_filename)
rnn.init_config(config_filename=config_filename, command_line_options=[])
global config
config = rnn.config
config.set('log', None)
config.set('log_verbosity', log_verbosity)
config.set('use_tensorflow', True)
rnn.init_log()
print('Returnn compile-native-op starting up.', file=log.v1)
rnn.returnn_greeting()
rnn.init_backend_engine()
assert util.BackendEngine.is_tensorflow_selected(), 'this is only for TensorFlow'
rnn.init_faulthandler()
if ('network' in config.typed_dict):
print('Loading network')
from returnn.tf.network import TFNetwork
network = TFNetwork(name='', config=config, rnd_seed=1, train_flag=False, eval_flag=True, search_flag=False)
network.construct_from_dict(config.typed_dict['network'])
|
def main(argv):
'\n Main entry.\n '
from returnn.tf.util.basic import CudaEnv, OpCodeCompiler
CudaEnv.verbose_find_cuda = True
OpCodeCompiler.CollectedCompilers = []
argparser = argparse.ArgumentParser(description='Compile some op')
argparser.add_argument('--config', help='filename to config-file')
argparser.add_argument('--native_op', help="op name. e.g. 'LstmGenericBase'")
argparser.add_argument('--blas_lib', default=None, help='specify which blas lib to use (path to .so or file name to search for)')
argparser.add_argument('--search_for_numpy_blas', dest='search_for_numpy_blas', action='store_true', help='search for blas inside numpys .libs folder')
argparser.add_argument('--no_search_for_numpy_blas', dest='search_for_numpy_blas', action='store_false', help='do not search for blas inside numpys .libs folder')
argparser.add_argument('--verbosity', default=4, type=int, help='5 for all seqs (default: 4)')
argparser.add_argument('--output_file', help='if given, will write the list of libs to this file')
args = argparser.parse_args(argv[1:])
init(config_filename=args.config, log_verbosity=args.verbosity)
import returnn.native_op as native_op
from returnn.tf.native_op import make_op, OpMaker
if args.native_op:
print(('Loading native op %r' % args.native_op))
op_gen = getattr(native_op, args.native_op)
assert issubclass(op_gen, native_op.NativeOpGenBase)
make_op(op_gen, compiler_opts={'verbose': True}, search_for_numpy_blas=args.search_for_numpy_blas, blas_lib=args.blas_lib)
libs = []
if (OpMaker.with_cuda and OpMaker.tf_blas_gemm_workaround):
print('CUDA BLAS lib:', OpMaker.cuda_blas_gemm_so_filename())
libs.append(OpMaker.cuda_blas_gemm_so_filename())
elif (OpMaker.with_cuda is False):
print('No CUDA.')
for compiler in OpCodeCompiler.CollectedCompilers:
assert isinstance(compiler, OpCodeCompiler)
print(compiler)
libs.append(compiler._so_filename)
if libs:
print('libs:')
for fn in libs:
print(fn)
else:
print('no libs compiled. use --native_op or --config')
if args.output_file:
with open(args.output_file, 'w') as f:
for fn in libs:
f.write((fn + '\n'))
print('Wrote lib list to file:', args.output_file)
|
def init(config_filename, log_verbosity, device):
'\n :param str config_filename: filename to config-file\n :param int log_verbosity:\n :param str device:\n '
rnn.init_better_exchook()
rnn.init_thread_join_hack()
print(('Using config file %r.' % config_filename))
assert os.path.exists(config_filename)
rnn.init_config(config_filename=config_filename, extra_updates={'use_tensorflow': True, 'log': None, 'log_verbosity': log_verbosity, 'task': __file__, 'device': device})
global config
config = rnn.config
rnn.init_log()
print('Returnn compile-tf-graph starting up.', file=log.v1)
rnn.returnn_greeting()
rnn.init_backend_engine()
assert util.BackendEngine.is_tensorflow_selected(), 'this is only for TensorFlow'
rnn.init_faulthandler()
|
def create_graph(train_flag, eval_flag, search_flag, net_dict):
'\n :param bool train_flag:\n :param bool eval_flag:\n :param bool search_flag:\n :param dict[str,dict[str]] net_dict:\n :return: adds to the current graph, and then returns the network\n :rtype: returnn.tf.network.TFNetwork\n '
print(('Loading network, train flag %s, eval flag %s, search flag %s' % (train_flag, eval_flag, search_flag)))
from returnn.tf.engine import Engine
from returnn.tf.network import TFNetwork
(network, updater) = Engine.create_network(config=config, rnd_seed=1, train_flag=train_flag, eval_flag=eval_flag, search_flag=search_flag, net_dict=net_dict)
assert isinstance(network, TFNetwork)
return network
|
@contextlib.contextmanager
def helper_variable_scope():
'\n :return: separate scope from the current name scope, such that variables are not treated as model params\n :rtype: tf.VariableScope\n '
with tf_util.reuse_name_scope('IO', absolute=True) as scope:
(yield scope)
|
class SubnetworkRecCellSingleStep(_SubnetworkRecCell):
'\n Adapts :class:`_SubnetworkRecCell` such that we execute only a single step.\n Used by :class:`RecStepByStepLayer`. See :class:`RecStepByStepLayer` for further documentation.\n '
def __init__(self, **kwargs):
self._parent_layers = {}
self._parent_dim_tags = {}
self._parent_replace_deps = []
super(SubnetworkRecCellSingleStep, self).__init__(**kwargs)
extern_data_copy = ExternData()
extern_data_copy.data.update({k: v.copy_template() for (k, v) in self.net.extern_data.data.items()})
self.net_delayed_update = TFNetwork(name=('%s(delayed-update)' % self.net.name), extern_data=extern_data_copy, train_flag=self.net.train_flag, search_flag=self.net.search_flag, eval_flag=False, inside_rec_time_dim=self.time_dim_tag, control_flow_ctx=self.net.control_flow_ctx, absolute_name_prefix=self.net.get_absolute_name_prefix(), parent_net=self.parent_net)
self.net_delayed_update.is_root_in_ctx = True
self.delayed_state_update_op = None
self.state_update_op = None
def _maybe_delay_tiled(self, state_var, output):
'\n :param RecStepByStepLayer.StateVar state_var:\n :param Data output:\n :rtype: tf.Tensor\n '
assert isinstance(state_var, RecStepByStepLayer.StateVar)
assert isinstance(output, Data)
rec_layer = self.parent_rec_layer
assert isinstance(rec_layer, RecStepByStepLayer)
if (rec_layer.construction_state == rec_layer.ConstructionState.GetSources):
self._parent_replace_deps.append((state_var, output))
x = state_var.read()
elif (rec_layer.construction_state == rec_layer.ConstructionState.Init):
self._parent_replace_deps.append((state_var, output))
with tf.control_dependencies([state_var.init_op()]):
x = tf.identity(output.placeholder, name=('state_var_before_loop_depend_on_init__%s' % tf_util.get_valid_scope_name_from_str(state_var.name)))
elif (rec_layer.construction_state == rec_layer.ConstructionState.InLoop):
x = self._tiled(output, state_var.read())
else:
raise ValueError(('unexpected construction state %r' % rec_layer.construction_state))
dim_tag = Dim.get_tag_from_size_tensor(output.placeholder)
if dim_tag:
dim_tag.set_tag_on_size_tensor(x, same_as_before=True)
output.placeholder = x
return x
def _tiled(self, output, x):
'\n :param Data output:\n :param tf.Tensor x:\n :rtype: tf.Tensor\n '
rec_layer = self.parent_rec_layer
assert isinstance(rec_layer, RecStepByStepLayer)
return tf_util.tile_transposed(x, axis=output.batch_dim_axis, multiples=rec_layer.get_parent_tile_multiples())
def get_parent_dim_tag(self, dim_tag):
'\n :param Dim dim_tag:\n :rtype: Dim\n '
if (dim_tag.dimension is not None):
return dim_tag
if dim_tag.is_batch_dim():
return dim_tag
if (dim_tag in self._parent_dim_tags):
return self._parent_dim_tags[dim_tag]
rec_layer = self.parent_rec_layer
assert isinstance(rec_layer, RecStepByStepLayer)
state_var = rec_layer.create_state_var(name=('base_size_%s' % tf_util.get_valid_scope_name_from_str(dim_tag.description)), initial_value=dim_tag.dyn_size)
dim_tag_dyn_size_ext = dim_tag.dyn_size_ext.copy()
with tf_util.same_control_flow_ctx(dim_tag_dyn_size_ext.placeholder), tf_util.reuse_name_scope('', absolute=True):
dim_tag_dyn_size_ext.placeholder = tf.identity(dim_tag_dyn_size_ext.placeholder, name=(dim_tag_dyn_size_ext.placeholder.op.name + '_copy_new_dim_tag'))
dim_tag_ = Dim(kind=dim_tag.kind, description=(dim_tag.description + '_base_state_var'), dimension=None, dyn_size_ext=dim_tag_dyn_size_ext, auto_generated=dim_tag.auto_generated, batch=dim_tag.batch)
dim_tag_.set_tag_on_size_tensor(dim_tag_dyn_size_ext.placeholder)
self._maybe_delay_tiled(state_var, dim_tag_.dyn_size_ext)
self._parent_dim_tags[dim_tag] = dim_tag_
return dim_tag_
def _get_parent_layer(self, layer_name):
'\n :param str layer_name: without "base:" prefix\n :rtype: WrappedInternalLayer\n '
if (layer_name in self._parent_layers):
return self._parent_layers[layer_name]
layer = super(SubnetworkRecCellSingleStep, self)._get_parent_layer(layer_name)
rec_layer = self.parent_rec_layer
if (rec_layer is None):
return layer
assert isinstance(rec_layer, RecStepByStepLayer)
output = layer.output.copy()
new_dim_tags = []
for (i, dim_tag) in enumerate(output.dim_tags):
new_dim_tags.append(self.get_parent_dim_tag(dim_tag))
output = output.copy_template_new_dim_tags(new_dim_tags=new_dim_tags, keep_special_axes=True)
output.placeholder = layer.output.placeholder
state_var = rec_layer.create_state_var(name=('base_value_%s' % layer_name), initial_value=output.placeholder, data_shape=output)
self._maybe_delay_tiled(state_var, output)
layer = WrappedInternalLayer(name=layer_name, network=self.parent_net, output=output, base_layer=layer, debug_type_name='rec-single-step')
self._parent_layers[layer_name] = layer
return layer
def get_sources(self, sources):
'\n :param list[str] sources:\n :rtype: list[WrappedInternalLayer]\n '
return [self._get_parent_layer(layer_name) for layer_name in sources]
def _set_construction_state_in_loop(self):
rec_layer = self.parent_rec_layer
assert isinstance(rec_layer, RecStepByStepLayer)
rec_layer.set_construction_state_in_loop()
for args in self._parent_replace_deps:
self._maybe_delay_tiled(*args)
def _while_loop(self, cond, body, loop_vars, shape_invariants):
'\n :param function cond:\n :param function body:\n :param T loop_vars:\n :param S shape_invariants:\n :rtype: T\n\n def body(i, net_vars, acc_tas, seq_len_info=None)\n tf.Tensor i: loop counter, scalar\n net_vars: the accumulator values. see also self.get_init_loop_vars()\n list[tf.TensorArray] acc_tas: the output accumulator TensorArray\n (tf.Tensor,tf.Tensor)|None seq_len_info: tuple (end_flag, seq_len)\n return: [i + 1, a_flat, tas]: the updated counter + new accumulator values + updated TensorArrays\n rtype (tf.Tensor, object, list[tf.TensorArray])\n\n def cond(i, net_vars, acc_tas, seq_len_info=None)\n ...\n return: tf.Tensor bool, True or False\n '
cell = self
rec_layer = self.parent_rec_layer
assert isinstance(rec_layer, RecStepByStepLayer)
self.net_delayed_update.parent_layer = rec_layer
if (len(loop_vars) == 3):
(i, net_vars, acc_tas) = loop_vars
seq_len_info = None
else:
(i, net_vars, acc_tas, seq_len_info) = loop_vars
seq_len_info_ = rec_layer.create_state_vars_recursive(('end_flag', 'dyn_seq_len'), seq_len_info)
seq_len_info = nest.map_structure((lambda state_var: state_var.read()), seq_len_info_)
initial_i = i
i = rec_layer.create_state_var('i', initial_i).read()
layers_with_state = set(self._initial_outputs.keys()).union(self._initial_extra_outputs.keys())
layers_cur_iteration = set()
layers_delayed = set()
layer_deps_by_layer = {}
layers_delayed_prev_deps = set()
choice_layers = set()
for layer_name in layers_with_state:
template_layer = self.layer_data_templates[layer_name]
queue = [template_layer]
visited = set()
prev_frame_deps = set()
choice_deps = set()
source_deps = set()
while queue:
cur = queue.pop(0)
if (cur in visited):
continue
visited.add(cur)
if (cur.layer_class_type is ChoiceStateVarLayer):
choice_deps.add(cur.name)
continue
if (cur.layer_class_type is SourceLayer):
source_deps.add(cur.name)
continue
for dep in cur.cur_frame_dependencies:
if (dep in visited):
continue
if (self.net not in dep.network.get_network_hierarchy()):
continue
assert isinstance(dep, _TemplateLayer)
queue.append(dep)
prev_frame_deps.update([dep.name for dep in cur.prev_frame_dependencies])
layer_deps_by_layer[layer_name] = prev_frame_deps
if (not choice_deps):
layers_cur_iteration.add(layer_name)
else:
print(('Delayed: Layer %r depends on choices %r, deps on prev frame %r' % (layer_name, choice_deps, prev_frame_deps)), file=log.v4)
if source_deps:
raise NotImplementedError(('Delayed layers with source dependencies (%s) not supported yet' % source_deps))
layers_delayed.add(layer_name)
layers_delayed_prev_deps.update(prev_frame_deps)
choice_layers.update(choice_deps)
for choice_layer in choice_layers:
assert (choice_layer not in layers_cur_iteration)
layers_delayed.add(choice_layer)
rec_layer.add_stochastic_var(choice_layer)
(init_outputs_flat, init_extra_flat) = net_vars
assert (len(init_outputs_flat) == len(self._initial_outputs))
assert (len(init_extra_flat) == len(self._initial_extra_outputs))
init_outputs = {k: v for (k, v) in zip(sorted(self._initial_outputs.keys()), init_outputs_flat)}
init_extra = {k: util.dict_zip(sorted(self._initial_extra_outputs[k]), v) for (k, v) in zip(sorted(self._initial_extra_outputs), init_extra_flat)}
class _LayerStateHelper():
def __init__(self, layer_name, prefix):
'\n :param str layer_name:\n :param str prefix:\n '
self.layer_name = layer_name
self.prefix = prefix
initial_values = {}
data_shapes = {}
if (layer_name in init_outputs):
initial_values['output'] = init_outputs[layer_name]
data_shapes['output'] = cell.layer_data_templates[layer_name].output
if (layer_name in init_extra):
initial_values['extra'] = init_extra[layer_name]
data_shapes['extra'] = nest.map_structure((lambda s: None), init_extra[layer_name])
assert isinstance(rec_layer, RecStepByStepLayer)
self.state_vars = rec_layer.create_state_vars_recursive(name_prefix=('%s/%s' % (prefix, layer_name)), initial_values=initial_values, data_shapes=data_shapes)
self._reads_once = None
def reset_reads_once(self):
'\n Next get_reads_once will create new op.\n '
self._reads_once = None
def get_reads_once(self):
'\n :return: same structure as state vars with the actual reads, type tf.Tensor.\n makes sure they are created once\n '
if (self._reads_once is not None):
return self._reads_once
self._reads_once = self.reads()
return self._reads_once
def reads(self):
'\n :return: same structure as state vars with the actual reads, type tf.Tensor.\n '
return nest.map_structure((lambda state_var: state_var.read()), self.state_vars)
def assigns_flat(self, state):
'\n :param state: same structure as state vars\n :return: list of tf.Assign ops\n :rtype: list[tf.Operation]\n '
def _map(state_var, state_value):
assert isinstance(state_var, RecStepByStepLayer.StateVar)
assert isinstance(state_value, tf.Tensor)
return state_var.assign(state_value)
assert isinstance(rec_layer, RecStepByStepLayer)
nest.assert_same_structure(state, self.state_vars)
return nest.flatten(nest.map_structure(_map, self.state_vars, state))
@staticmethod
def get_from_net(net, layer_name, prev=False):
'\n :param TFNetwork net:\n :param str layer_name:\n :param bool prev:\n :return: same structure as state vars, type tf.Tensor\n '
layer = net.layers[(('prev:' + layer_name) if prev else layer_name)]
out = {}
if (layer_name in init_outputs):
out['output'] = layer.output.placeholder
if (layer_name in init_extra):
out['extra'] = layer.rec_vars_outputs
return out
with tf.name_scope('state_delayed'):
layers_prev_prev = {}
for layer_name in layers_delayed.union(layers_delayed_prev_deps):
layers_prev_prev[layer_name] = _LayerStateHelper(layer_name, 'state_delayed')
with tf.name_scope('state'):
layers_prev = {}
for layer_name in layers_cur_iteration:
layers_prev[layer_name] = _LayerStateHelper(layer_name, 'state')
self._set_construction_state_in_loop()
for layer_name in layers_cur_iteration:
layers_prev[layer_name].get_reads_once()
for layer_name in layers_delayed.union(layers_delayed_prev_deps):
layers_prev_prev[layer_name].get_reads_once()
with tf.name_scope('delayed_state_update'):
def _delayed_state_update():
self._construct_custom(net=self.net_delayed_update, prev_state={layer_name: layers_prev_prev[layer_name].get_reads_once() for layer_name in layers_delayed.union(layers_delayed_prev_deps)}, cur_state={layer_name: layers_prev[layer_name].get_reads_once() for layer_name in layers_cur_iteration}, needed_outputs=layers_delayed)
control_deps = []
for layer_name in layers_delayed.union(layers_delayed_prev_deps):
control_deps += nest.flatten(layers_prev_prev[layer_name].get_reads_once())
for layer_name in choice_layers:
control_deps.append(self.net_delayed_update.layers[layer_name].output.placeholder)
with tf.control_dependencies(control_deps):
ops = []
for layer_name in layers_delayed.union(layers_delayed_prev_deps):
if (layer_name in layers_cur_iteration):
state = layers_prev[layer_name].get_reads_once()
else:
assert (layer_name in layers_delayed)
state = _LayerStateHelper.get_from_net(self.net_delayed_update, layer_name)
ops += layers_prev_prev[layer_name].assigns_flat(state)
return tf.group(*ops)
self.delayed_state_update_op = tf.cond(tf.greater(i, initial_i), _delayed_state_update, tf.no_op)
for v in layers_prev_prev.values():
v.reset_reads_once()
v.get_reads_once()
outputs_flat = []
for layer_name in sorted(self._initial_outputs):
if (layer_name in layers_cur_iteration):
outputs_flat.append(layers_prev[layer_name].get_reads_once()['output'])
else:
assert (layer_name in layers_delayed)
outputs_flat.append(layers_prev_prev[layer_name].get_reads_once()['output'])
extra_flat = []
for (layer_name, v) in sorted(self._initial_extra_outputs.items()):
if (layer_name in layers_cur_iteration):
state = layers_prev[layer_name].get_reads_once()['extra']
else:
assert (layer_name in layers_delayed)
state = layers_prev_prev[layer_name].get_reads_once()['extra']
assert (isinstance(v, dict) and isinstance(state, dict))
assert (set(state.keys()) == set(v.keys()))
extra_flat.append(util.sorted_values_from_dict(state))
net_vars = (outputs_flat, extra_flat)
state_update_ops = []
with tf.name_scope('cond'):
s = rec_layer.create_state_var('cond', tf.constant(True))
state_update_ops.append(s.assign(cond(i, net_vars, acc_tas, seq_len_info, allow_inf_max_len=True)))
with tf.name_scope('body'):
res = body(i, net_vars, acc_tas, seq_len_info)
assert (len(res) == len(loop_vars))
if (len(res) == 3):
(i, net_vars, acc_tas) = res
seq_len_info = None
else:
(i, net_vars, acc_tas, seq_len_info) = res
if seq_len_info:
state_update_ops += rec_layer.assign_state_vars_recursive_flatten(('end_flag', 'dyn_seq_len'), seq_len_info)
if rec_layer.rec_step_by_step_opts['update_i_in_graph']:
state_update_ops.append(rec_layer.state_vars['i'].assign(i))
for layer_name in layers_cur_iteration:
state = _LayerStateHelper.get_from_net(self.net, layer_name)
state_update_ops += layers_prev[layer_name].assigns_flat(state)
self.state_update_op = tf.group(*state_update_ops)
return res
def _construct_custom(self, net, prev_state, cur_state, needed_outputs):
'\n This is a simplified version of _SubnetworkRecCell._construct without search logic.\n\n :param TFNetwork net:\n :param prev_state:\n :param cur_state:\n :param needed_outputs:\n '
assert isinstance(net, TFNetwork)
prev_layers = {}
def _add_predefined_layer(layer_name, state_dict, prev):
assert isinstance(state_dict, dict)
assert set(state_dict.keys()).issubset({'output', 'extra'})
try:
layer = self.layer_data_templates[name].copy_as_prev_time_frame(prev_output=state_dict.get('output', None), rec_vars_prev_outputs=state_dict.get('extra', None))
except Exception as exc:
self._handle_construct_exception(description=('in-loop init of prev layer %r' % name), exception=exc)
raise
layer.network = net
if prev:
prev_layers[layer_name] = layer
net.layers[(('prev:%s' % layer_name) if prev else layer_name)] = layer
for (name, state_dict) in prev_state.items():
_add_predefined_layer(name, state_dict, prev=True)
for (name, state_dict) in cur_state.items():
_add_predefined_layer(name, state_dict, prev=False)
def get_layer(name):
'\n :param str name: layer name\n :rtype: LayerBase\n '
if name.startswith('prev:'):
return prev_layers[name[len('prev:'):]]
if name.startswith('base:'):
return self._get_parent_layer(name[len('base:'):])
try:
layer = net.construct_layer(self.net_dict, name=name, get_layer=get_layer)
if (name == 'end'):
assert (layer.output.shape == ()), ("%s: 'end' layer %r unexpected shape" % (self.parent_rec_layer, layer))
prev_end_layer = net.layers['prev:end']
choices = layer.get_search_choices()
if choices:
prev_end_layer = choices.translate_to_this_search_beam(prev_end_layer)
with tf.name_scope('end_flag'):
layer.output.placeholder = tf.logical_or(prev_end_layer.output.placeholder, layer.output.placeholder)
return layer
except Exception as exc:
self._handle_construct_exception(description=('in-loop construction of layer %r' % name), exception=exc)
raise
for layer_name in sorted(needed_outputs):
get_layer(layer_name)
def _construct(self, prev_outputs, prev_extra, i, data=None, inputs_moved_out_tas=None, needed_outputs=('output',)):
'\n This is called from within the body of the while loop\n (`tf.while_loop` in :class:`RecLayer` but here only a single step)\n to construct the subnetwork, which is performed step by step.\n\n :param dict[str,tf.Tensor] prev_outputs:\n :param dict[str,dict[str,tf.Tensor]] prev_extra:\n :param tf.Tensor i: loop counter. scalar, int32, current step (time)\n :param dict[str,tf.Tensor] data: via tensor arrays\n :param dict[str,tf.TensorArray]|None inputs_moved_out_tas:\n :param set[str] needed_outputs:\n '
assert (data is not None)
for (key, value) in list(data.items()):
if (key != 'source'):
continue
assert (key in self.net.extern_data.data)
data_ = self.net.extern_data.data[key]
data[key] = self._tiled(data_, value)
all_dyn_parent_dim_tags = set()
for layer in self.parent_net.get_root_network().get_all_layers_deep():
if (self.net in layer.network.get_network_hierarchy()):
continue
all_dyn_parent_dim_tags.update([dim for dim in layer.output.dim_tags if ((dim.dimension is None) and (not dim.is_batch_dim()))])
for layer in self.layer_data_templates.values():
assert isinstance(layer, _TemplateLayer)
dim_tags = list(layer.output.dim_tags)
for (_i, tag) in enumerate(dim_tags):
if (tag in all_dyn_parent_dim_tags):
dim_tags[_i] = self.get_parent_dim_tag(tag)
layer.output = layer.output.copy_template_new_dim_tags(dim_tags)
layer.kwargs['output'] = layer.output
super(SubnetworkRecCellSingleStep, self)._construct(prev_outputs=prev_outputs, prev_extra=prev_extra, i=i, data=data, inputs_moved_out_tas=inputs_moved_out_tas, needed_outputs=needed_outputs)
|
class RecStepByStepLayer(RecLayer):
'\n Represents a single step of :class:`RecLayer`.\n The purpose is to execute a single step only.\n This also takes care of all needed state, and stochastic (maybe latent) variables (via :class:`ChoiceLayer`).\n All the state is kept in *state variables*, such that you can avoid feeding/fetching.\n Instead, any decoder implementation using this must explicitly assign the state variables.\n Stochastic variables (:class:`ChoiceLayer`) are breakpoints where an external application\n can implement custom logic to select hypotheses.\n So this can be used to implement beam search, or to implement a custom decoder in an external application.\n RASR (Sprint) is one such example which makes use of this for decoding.\n\n The necessary meta information about the list of state vars,\n ops for specific calculation steps, etc.\n is stored in TF graph collections and also in a JSON file.\n RASR currently only uses the TF graph collections.\n\n There are different kinds of state vars:\n - Base state vars (encoder or so), depends only on the input data, not updated by the decoder loop.\n - decoder_input_vars: Stochastic choice state vars, updated by the decoder loop.\n - decoder_output_vars: Stochastic scores state vars, updated by the decoder loop.\n - (Deterministic) loop state vars, updated by the decoder loop.\n\n RASR does the following logic (currently only a single stochastic var supported)::\n\n # initial state vars\n encode_ops(input_placeholders=...) # -> assign (base and loop) state vars\n\n # state_vars are only the loop state vars here, not the base state vars.\n # Store initial loop state values.\n # Note that this is not really necessary always (including the logic described here)\n # because we anyway then assign them again in the first iteration of the decoder loop\n # and never need them otherwise again.\n state_vars.readout()\n\n # Main decoder loop\n while seq_not_ended(...):\n\n for hyp in current_hyps: # (in practice, this is partially batched)\n\n state_vars.assign(...) # for current hyp (might be skipped in first decoder loop iteration)\n decoder_input_vars.assign(...) # for current hyp, the previous choice\n\n update_ops(decoder_input_vars) # -> assign/update potentially some loop state vars\n\n # Assume only a single decode_ops here (single ChoiceLayer).\n # decoder_input_vars contains prev choices\n decode_ops(state_vars, decoder_input_vars) # -> assign decoder_output_vars (scores)\n\n # note that decoder_input_vars for current hyp is still the previous (!) choice\n\n post_update_ops(state_vars, decoder_input_vars) # -> assign new state_vars\n\n state_vars.readout() # readout and store them for this hyp\n\n < select new hyps and prune some away, based on scores >\n\n Because RASR is the main application, we adopt the recurrence logic of the RecLayer to be compatible to RASR.\n\n Note that the update_ops, decode_ops and post_update_ops all depend on the previous state vars (obviously)\n or maybe decode_ops would depend on some updated state vars, updated by update_ops,\n or maybe post_update_ops would depend on some updated state vars, updated by decode_ops or update_ops.\n The logic flow is as outlined above.\n And all ops depends on the previous (!) choice vars and not on the current choice vars.\n This is in contrast to RETURNN, where the final loop state vars are potentially based on the current choice var.\n\n One way to solve this is by delaying the state vars by one iteration.\n So when "prev state vars" would normally refer to "prev:layer" (also including hidden state),\n now it would refer to "prev:prev:layer".\n Then for the decode_ops, we need to make sure first that we transfer "prev:prev:layer" to "prev:layer"\n and then we can calculate it as normal.\n Even when we merge the post_update_ops with the decode_ops, in many models,\n this still requires to do a lot (most) computation redundantly twice\n because for computing the decode_ops, we do most of the same computation which we would do for computing\n "layer" based on "prev:layer", but "layer" is not used here because we delayed the state vars by one iteration.\n\n We might keep all state vars twice, once for the current iteration (when possible),\n and once for the delayed iteration (when needed).\n But this is bad because this would cause a lot of overhead when the decoder needs to copy around the state vars.\n This is the case for RASR which handles high number of hypotheses, where the state vars are stored in CPU memory.\n\n As mentioned before, decode_ops shares a lot of the computation with updating "layer" based on "prev:layer".\n Or said differently, the calculation of "layer" is often not dependent on the current choice var.\n When we can figure out the dependencies exactly, we can know which state vars can stay in the current iteration,\n and which need to be delayed.\n There are potentially also some state vars which we need to keep both for the current and previous iteration\n in order to be able to compute the delayed update when this depends on it.\n\n So, for the loop state vars, we have two cases:\n\n - It refers to the current iteration.\n - It refers to the previous iteration.\n\n For some layer (which is part of the state because it is accessed via "prev:layer" or has hidden state),\n we can have three cases:\n\n - Loop state vars referring to the current iteration.\n - Loop state vars referring to the previous iteration.\n - Loop state vars for both the current and previous iteration.\n\n The update of delayed state vars must take extra care in the first frame.\n It is initialized with the initial state (layers initial_output, and initial hidden state),\n and in the first frame, it would just not update this.\n Only from the second frame on, it is delayed.\n\n How to we implement this logic here?\n\n First we aggressively search for layers which can stay in the current iteration,\n i.e. which do not depend on any of the :class:`ChoiceLayer`s.\n The update calculation from "prev:layer" to "layer" would be jointly with decode_ops\n to avoid redundant computation.\n This should be the first decode_ops if there are multiple.\n This makes the post_update_ops obsolete.\n\n For delayed state vars, before we do anything else,\n in the beginning of the iteration, we need to update "prev:prev:layer" to "prev:layer",\n or also set "prev:layer" based on the prev choices (e.g. "prev:output").\n For this, we need to know which dependencies on the state are needed for "layer" ("prev:layer"),\n and then all such dependencies are needed to be stored as delayed state vars as well.\n Then everything else in the iteration can be done as usual.\n This first step will be the update_ops.\n\n So first we need to figure out which state vars are needed of what kind.\n Then we must construct the update_ops for one part of layers.\n Then we must construct the decode_ops (including merged post_update_ops) for another set of layers,\n and there can be overlap both in the inputs and outputs,\n so these must be two separate constructions.\n\n Note that RASR only needs to know about the loop state vars, not the base state vars,\n and the stochastic state vars are also handled separately.\n RASR will re-assign and readout the loop state vars in every loop iteration.\n\n ---\n\n If you want to implement beam search in an external application which uses the compiled graph,\n you would compile the graph with search_flag disabled (such that RETURNN does not do any related logic),\n enable this recurrent step-by-step compilation, and then do the following TF session runs:\n\n * Call the initializers of all the state variables, which also includes everything from the base (e.g. encoder),\n while feeding in the placeholders for the input features.\n This is "init_op" in the info json, "encode_ops" in the TF graph collections.\n All further session runs should not need any feed values. All needed state should be in state vars.\n This will init all state vars, except stochastic_var_*.\n * For each decoder step:\n * For each stochastic variable (both latent variables and observable variables),\n in the order as in "stochastic_var_order" in the info json:\n - This calculation depends on all state vars\n (except of stochastic_var_scores_* and only the dependent other stochastic_var_choice_*).\n I.e. all those state vars must be assigned accordingly.\n - Calculate `"stochastic_var_scores_%s" % name` (which are the probabilities in +log space).\n This is "calc_scores_op" in the info json, "decode_ops" in the TF graph collections.\n - Do a choice, build up a beam of hypotheses.\n - Set the stochastic state var `"stochastic_var_choice_%s" % name` to the selected values (label indices).\n - If the beam has multiple items, i.e. the batch dimension changed, you must make sure\n that all further used state variables will also have the same batch dim.\n * Do a single session run for the next values of these state vars:\n "i", "end_flag" (if existing), "dyn_seq_len" (if existing), "state_*" (multiple vars).\n These are also the state vars which will get updated in every further recurrent step.\n The "base_*" state vars are always kept (although you might need to update the batch dim),\n and the "stochastic_var_*" state vars have the special logic for the stochastic variables.\n This is "next_step_op" in the info json.\n\n Thus, the info json should contain the following:\n\n * "state_vars": dict[str, dict[str, Any]]. name -> dict with:\n * "var_op": str\n * "shape": tuple[int|None,...]. including batch dim. always batch-major, or scalar.\n * "dtype": str\n The state vars usually are:\n * "i"\n * "end_flag" (if existing)\n * "dyn_seq_len" (if existing)\n * "state_*" (multiple vars)\n * "base_*" (multiple vars). only once via init_op, maybe then batch-tiled, but otherwise not changed for once seq.\n * "stochastic_var_*" (multiple vars)\n However, no specific assumptions should be made on any of these.\n It should not matter to the decoder what we have here.\n * "stochastic_vars": dict[str,dict[str, str]]. name -> dict with:\n * "calc_scores_op": str. op to calculate the scores state var\n * "scores_state_var": str. == "stochastic_var_scores_%s" % name\n * "choice_state_var": str. == "stochastic_var_choice_%s" % name\n * "stochastic_var_order": list[str]. the order of the stochastic vars.\n\n * "init_op": str. op. the initializer for all state vars, including encoder.\n * "next_step_op: str. op. the update op for all state vars.\n\n ---\n\n This layer class derives from :class:`RecLayer` and adopts the logic to be able\n to construct the mentioned ops for step-by-step execution.\n\n The final ops are constructed in :func:`post_compile`\n and put into corresponding TF graph collections and the info json.\n\n However, the main construction logic happens before, in :func:`__init__`,\n and then further in :func:`SubnetworkRecCellSingleStep._while_loop`.\n\n ---\n\n See https://github.com/rwth-i6/returnn/pull/874 for some discussion.\n '
layer_class = 'rec_step_by_step'
SubnetworkRecCell = SubnetworkRecCellSingleStep
class ConstructionState():
'construction states'
GetSources = Entity('get_sources')
Init = Entity('init')
InLoop = Entity('in_body')
@classmethod
def prepare_compile(cls, rec_layer_name, net_dict, opts):
'\n :param str rec_layer_name:\n :param dict[str,dict[str]] net_dict:\n :param dict[str] opts:\n :return: nothing, will prepare globally, and modify net_dict in place\n '
register_layer_class(RecStepByStepLayer)
register_layer_class(ChoiceStateVarLayer)
assert (rec_layer_name in net_dict)
rec_layer_dict = net_dict[rec_layer_name]
assert (rec_layer_dict['class'] == 'rec')
assert isinstance(rec_layer_dict['unit'], dict)
for (key, layer_dict) in list(rec_layer_dict['unit'].items()):
assert isinstance(layer_dict, dict)
if (layer_dict['class'] == 'choice'):
layer_dict['class'] = ChoiceStateVarLayer.layer_class
rec_layer_dict['class'] = RecStepByStepLayer.layer_class
rec_layer_dict['rec_step_by_step_opts'] = opts
@classmethod
def post_compile(cls, rec_layer_name, network, output_file_name=None):
'\n :param str rec_layer_name:\n :param TFNetwork network:\n :param str|None output_file_name: via command line --rec_step_by_step_output_file\n '
assert (rec_layer_name in network.layers)
rec_layer = network.layers[rec_layer_name]
assert isinstance(rec_layer, RecStepByStepLayer)
cell = rec_layer.cell
assert isinstance(cell, SubnetworkRecCellSingleStep)
rec_sub_net = cell.net
assert isinstance(rec_sub_net, TFNetwork)
info = {'state_vars': {}, 'stochastic_var_order': [], 'stochastic_vars': {}}
print('State vars:')
for (name, var) in sorted(rec_layer.state_vars.items()):
assert isinstance(name, str)
assert isinstance(var, RecStepByStepLayer.StateVar)
print((' %s: %r, shape %s, dtype %s' % (name, var.var.op.name, var.var.shape, var.var.dtype.base_dtype.name)))
info['state_vars'][name] = {'var_op': var.var.op.name, 'shape': [(int(d) if (d is not None) else None) for d in var.var_data_shape.batch_shape], 'dtype': var.var.dtype.base_dtype.name}
global_vars_coll = tf_compat.v1.get_collection_ref('global_vars')
encode_ops_coll = tf_compat.v1.get_collection_ref('encode_ops')
init_ops = []
for (name, var) in sorted(rec_layer.state_vars.items()):
assert isinstance(name, str)
assert isinstance(var, RecStepByStepLayer.StateVar)
if (not name.startswith('stochastic_var_')):
init_ops.append(var.init_op())
init_op = tf.group(*init_ops, name='rec_step_by_step_init_op')
info['init_op'] = init_op.name
encode_ops_coll.append(init_op)
decode_ops_coll = tf_compat.v1.get_collection_ref('decode_ops')
decoder_output_vars_coll = tf_compat.v1.get_collection_ref('decoder_output_vars')
decoder_input_vars_coll = tf_compat.v1.get_collection_ref('decoder_input_vars')
print('Stochastic vars, and their order:')
if rec_layer.reverse_stochastic_var_order:
stochastic_var_order = rec_layer.stochastic_var_order[::(- 1)]
else:
stochastic_var_order = rec_layer.stochastic_var_order
for name in stochastic_var_order:
print((' %s' % name))
info['stochastic_var_order'].append(name)
choice_layer = rec_sub_net.layers[name]
assert isinstance(choice_layer, ChoiceStateVarLayer)
if (name == rec_layer.stochastic_var_order[(- 1)]):
assert choice_layer.score_dependent
if choice_layer.score_dependent:
calc_scores_op = rec_layer.state_vars[('stochastic_var_scores_%s' % name)].final_op()
info['stochastic_vars'][name] = {'calc_scores_op': calc_scores_op.name, 'scores_state_var': ('stochastic_var_scores_%s' % name), 'choice_state_var': ('stochastic_var_choice_%s' % name)}
if (name == rec_layer.stochastic_var_order[(- 1)]):
calc_scores_op = tf.group(calc_scores_op, cell.state_update_op)
decode_ops_coll.append(calc_scores_op)
decoder_output_vars_coll.append(rec_layer.state_vars[('stochastic_var_scores_%s' % name)].var)
else:
info['stochastic_vars'][name] = {'choice_state_var': ('stochastic_var_choice_%s' % name)}
decoder_input_vars_coll.append(rec_layer.state_vars[('stochastic_var_choice_%s' % name)].var)
update_ops_coll = tf_compat.v1.get_collection_ref('update_ops')
update_ops_coll.append(cell.delayed_state_update_op)
tf_compat.v1.get_collection_ref('post_update_ops')
state_vars_coll = tf_compat.v1.get_collection_ref(CollectionKeys.STATE_VARS)
for (name, var) in sorted(rec_layer.state_vars.items()):
assert isinstance(name, str)
assert isinstance(var, RecStepByStepLayer.StateVar)
if ((not name.startswith('stochastic_var_')) and (not name.startswith('base_')) and (name != 'cond')):
if var.orig_data_shape.have_batch_axis():
state_vars_coll.append(var.var)
else:
global_vars_coll.append(var.var)
import json
info_str = json.dumps(info, sort_keys=True, indent=2)
if (not output_file_name):
print('No rec-step-by-step output file name specified, not storing this info.')
print('JSON:')
print(info_str)
else:
with open(output_file_name, 'w') as f:
f.write(info_str)
print('Stored rec-step-by-step info JSON in file:', output_file_name)
class StateVar():
'\n Represents a state variable, i.e. either a state, a choice, or encoder state, etc.\n '
def __init__(self, parent, name, initial_value, data_shape):
'\n :param RecStepByStepLayer parent:\n :param str name:\n :param tf.Tensor|None initial_value:\n initial_value might have dim 1 in variable dimensions (which are not the batch-dim-axis),\n see get_rec_initial_output, which should be fine for broadcasting.\n :param Data data_shape:\n Describes the shape of initial_value, and also what we store as self.orig_data_shape,\n and what we return by self.read().\n If it is not a scalar, and batch-dim-axis > 0, the created variable will still be in batch-major.\n '
self.parent = parent
self.name = name
self.orig_data_shape = data_shape
self.var_data_shape = (data_shape.copy_as_batch_major() if (data_shape.batch_dim_axis is not None) else data_shape)
del data_shape
self.orig_initial_value = initial_value
if ((initial_value is not None) and (self.orig_data_shape.batch_dim_axis not in (0, None))):
x = self.orig_data_shape.copy()
x.placeholder = initial_value
x = x.copy_compatible_to(self.var_data_shape)
initial_value = x.placeholder
self.var_initial_value = initial_value
del initial_value
zero_initializer = tf_util.zeros_dyn_shape(shape=self.var_data_shape.batch_shape, dtype=self.var_data_shape.dtype)
with helper_variable_scope():
self.var = tf_compat.v1.get_variable(name=tf_util.get_valid_scope_name_from_str(name), initializer=zero_initializer, validate_shape=False)
self.var.set_shape(self.var_data_shape.batch_shape)
self._init_op = None
print(('New state var %r: %s, shape %s' % (name, self.var, self.var_data_shape)))
self.final_value = None
def __repr__(self):
return ('<StateVar %r, shape %r, initial %r>' % (self.name, self.var_data_shape, self.orig_initial_value))
def set_final_value(self, final_value):
'\n :param tf.Tensor final_value:\n '
assert (self.final_value is None)
assert isinstance(final_value, tf.Tensor)
self.final_value = final_value
def read(self):
'\n :return: tensor in the format of self.orig_data_shape\n :rtype: tf.Tensor\n '
value = self.var.read_value()
if (self.orig_data_shape.batch_dim_axis in (0, None)):
return value
x = self.var_data_shape.copy()
x.placeholder = value
x = x.copy_compatible_to(self.orig_data_shape)
return x.placeholder
def init_op(self):
'\n :return: op which assigns self.var_initial_value to self.var\n :rtype: tf.Operation\n '
if self._init_op:
return self._init_op
assert (self.var_initial_value is not None)
self._init_op = tf_compat.v1.assign(self.var, self.var_initial_value, name=('init_state_var_%s' % self.name)).op
return self._init_op
def final_op(self):
'\n :return: op which does self.var.assign(self.final_value) (final value maybe converted)\n :rtype: tf.Operation\n '
assert (self.final_value is not None)
return self.assign(self.final_value)
def assign(self, value):
'\n :return: op which does self.var.assign(value) (value maybe converted)\n :rtype: tf.Operation\n '
from returnn.tf.util.basic import find_ops_path_output_to_input
feed_tensors = []
for data in self.parent.network.extern_data.data.values():
feed_tensors.append(data.placeholder)
feed_tensors.extend(data.size_placeholder.values())
path = find_ops_path_output_to_input(fetches=value, tensors=feed_tensors)
assert (not path), ('There should be no path from extern data to %s final op value, but there is:\n%s' % (self, '\n'.join(map(repr, path))))
if (self.orig_data_shape.batch_dim_axis not in (0, None)):
x = self.orig_data_shape.copy()
x.placeholder = value
x = x.copy_compatible_to(self.var_data_shape)
value = x.placeholder
return tf_compat.v1.assign(self.var, value, name=('final_state_var_%s' % self.name)).op
def __init__(self, _orig_sources, sources, network, name, output, unit, axis, rec_step_by_step_opts, reverse_stochastic_var_order=False, **kwargs):
'\n :param str|list[str]|None _orig_sources:\n :param list[LayerBase] sources:\n :param returnn.tf.network.TFNetwork network:\n :param str name:\n :param Data output:\n :param SubnetworkRecCellSingleStep unit:\n :param Dim axis:\n :param dict[str] rec_step_by_step_opts:\n :param bool reverse_stochastic_var_order:\n '
assert isinstance(unit, SubnetworkRecCellSingleStep)
kwargs = kwargs.copy()
kwargs['optimize_move_layers_out'] = False
self.state_vars = {}
self.stochastic_var_order = []
self.stochastic_vars = {}
self._parent_tile_multiples = None
self.construction_state = self.ConstructionState.GetSources
self.reverse_stochastic_var_order = reverse_stochastic_var_order
self.rec_step_by_step_opts = rec_step_by_step_opts
self.network = network
self.name = name
self.output = output
unit.set_parent_layer(self)
if _orig_sources:
if isinstance(_orig_sources, (list, tuple)):
assert (len(sources) == len(_orig_sources))
else:
assert isinstance(_orig_sources, str)
assert (len(sources) == 1)
_orig_sources = [_orig_sources]
assert (unit.parent_rec_layer is self)
sources = unit.get_sources(_orig_sources)
else:
assert (not sources)
assert isinstance(axis, Dim)
if (axis and sources and sources[0].base_layer.output.have_dim_tag(axis)):
base_axis = axis
axis = unit.get_parent_dim_tag(base_axis)
unit.time_dim_tag = axis
unit._time_dim_tags.add(axis)
unit.net._inside_rec_time_dim = axis
if output.have_dim_tag(base_axis):
output = output.copy_template_replace_dim_tag(axis=output.get_axis_from_description(base_axis), new_dim_tag=axis)
self.construction_state = self.ConstructionState.Init
if self.have_base_state_vars():
self._set_global_batch_dim(self.get_batch_dim_from_base_state_var())
super(RecStepByStepLayer, self).__init__(sources=sources, network=network, name=name, output=output, unit=unit, axis=axis, **kwargs)
def set_construction_state_in_loop(self):
'\n Set that we entered the body.\n '
self.construction_state = self.ConstructionState.InLoop
self._set_global_batch_dim(self.get_batch_dim_from_loop_state_var())
def _set_global_batch_dim(self, batch_dim):
'\n :param tf.Tensor batch_dim:\n '
self.network.get_global_batch_info().dim = batch_dim
def get_parent_tile_multiples(self):
'\n :rtype: tf.Tensor\n '
if (self._parent_tile_multiples is not None):
return self._parent_tile_multiples
with tf_util.reuse_name_scope('parent_tile_multiples', absolute=True):
base_batch_dim = self.get_batch_dim_from_base_state_var()
loop_batch_dim = self.get_batch_dim_from_loop_state_var()
with tf.control_dependencies([tf.Assert(tf.equal((loop_batch_dim % base_batch_dim), 0), ['loop_batch_dim', loop_batch_dim, 'base_batch_dim', base_batch_dim])]):
self._parent_tile_multiples = (loop_batch_dim // base_batch_dim)
return self._parent_tile_multiples
def create_state_var(self, name, initial_value=None, data_shape=None):
'\n A state var is a variable where the initial value is given by the encoder, or a constant,\n and the final value is determined by one step of this rec layer (usually called the decoder).\n\n :param str name:\n :param tf.Tensor|None initial_value: assumes batch-major, if data_shape is not given\n :param Data|None data_shape:\n :rtype: RecStepByStepLayer.StateVar\n '
assert (name not in self.state_vars)
assert (data_shape or (initial_value is not None))
if data_shape:
assert isinstance(data_shape, Data)
if (data_shape.have_batch_axis() and (initial_value is not None)):
assert (initial_value.shape.dims[data_shape.batch_dim_axis].value is None)
elif (initial_value.shape.ndims == 0):
data_shape = Data(name=name, batch_dim_axis=None, shape=(), dtype=initial_value.dtype.name)
else:
assert (initial_value.shape.dims[0].value is None)
data_shape = Data(name=name, batch_dim_axis=0, shape=initial_value.shape.as_list()[1:], dtype=initial_value.dtype.name)
if (initial_value is not None):
initial_value.set_shape(data_shape.batch_shape)
var = self.StateVar(parent=self, name=name, initial_value=initial_value, data_shape=data_shape)
self.state_vars[name] = var
return var
def set_state_var_final_value(self, name, final_value):
'\n :param str name:\n :param tf.Tensor final_value:\n '
self.state_vars[name].set_final_value(final_value)
def create_state_vars_recursive(self, name_prefix, initial_values, data_shapes=None):
'\n :param str|tuple[str] name_prefix: single or same structure as initial_values\n :param T initial_values:\n :param data_shapes: same structure as initial_values or None, but values are of instance :class:`Data`\n :return: same as initial_values, but the state vars\n '
def _map_state_vars(path, name, initial_value, data_shape):
assert isinstance(initial_value, tf.Tensor)
assert ((data_shape is None) or isinstance(data_shape, Data))
if (not name):
name = '/'.join(map(str, ((name_prefix,) + path)))
return self.create_state_var(name=name, initial_value=initial_value, data_shape=data_shape)
if isinstance(name_prefix, str):
name_per_entry = nest.map_structure((lambda v: None), initial_values)
else:
name_per_entry = name_prefix
if (data_shapes is None):
data_shapes = nest.map_structure((lambda v: None), initial_values)
nest.assert_same_structure(initial_values, name_per_entry)
nest.assert_same_structure(initial_values, data_shapes)
return nest.map_structure_with_tuple_paths(_map_state_vars, name_per_entry, initial_values, data_shapes)
def assign_state_vars_recursive_flatten(self, name_prefix, values):
'\n :param str|tuple[str] name_prefix:\n :param T values:\n :rtype: list[tf.Operation]\n '
def _map_state_vars(path, name, value):
assert isinstance(value, tf.Tensor)
if (not name):
name = '/'.join(map(str, ((name_prefix,) + path)))
return self.state_vars[name].assign(value)
if isinstance(name_prefix, str):
name_per_entry = nest.map_structure((lambda v: None), values)
else:
name_per_entry = name_prefix
nest.assert_same_structure(values, name_per_entry)
return nest.flatten(nest.map_structure_with_tuple_paths(_map_state_vars, name_per_entry, values))
def have_base_state_vars(self):
'\n :rtype: bool\n '
for (name, _) in sorted(self.state_vars.items()):
assert isinstance(name, str)
if name.startswith('base_'):
return True
return False
def get_batch_dim_from_base_state_var(self):
'\n :return: batch-dim, from some (any) base state var, scalar, int32\n :rtype: tf.Tensor|int\n '
for (name, v) in sorted(self.state_vars.items()):
assert isinstance(name, str)
assert isinstance(v, RecStepByStepLayer.StateVar)
if name.startswith('base_'):
if v.var_data_shape.have_batch_axis():
with tf_util.reuse_name_scope(('batch_dim_from_base_state_var_%s' % v.name), absolute=True):
return tf.shape(v.var.value())[v.var_data_shape.batch_dim_axis]
raise Exception(('None of the base state vars do have a batch-dim: %s' % self.state_vars))
def get_batch_dim_from_loop_state_var(self):
'\n :return: batch-dim, from some (any) loop state var, scalar, int32\n :rtype: tf.Tensor|int\n '
for (name, v) in sorted(self.state_vars.items()):
assert isinstance(v, RecStepByStepLayer.StateVar)
if ((not name.startswith('base_')) and (not name.startswith('stochastic_var_'))):
if (v.var_data_shape.batch_dim_axis is not None):
with tf_util.reuse_name_scope(('batch_dim_from_state_%s' % v.name), absolute=True):
return tf.shape(v.var.value())[v.var_data_shape.batch_dim_axis]
raise Exception(('None of the loop state vars do have a batch-dim: %s' % self.state_vars))
class StochasticVar():
'\n Manages a stochastic variable, which corresponds to a :class:`ChoiceLayer`.\n '
def __init__(self, parent_rec_layer, layer_template):
'\n :param RecStepByStepLayer parent_rec_layer:\n :param _TemplateLayer layer_template:\n '
self.parent_rec_layer = parent_rec_layer
self.name = layer_template.name
self.choice_layer_opts = layer_template.kwargs
self.score_dependent = self.choice_layer_opts.get('score_dependent', True)
self.score_state_var = None
if self.score_dependent:
sources = layer_template.kwargs['sources']
assert (len(sources) == 1)
(source_template,) = sources
self.score_state_var = self.parent_rec_layer.create_state_var(name=('stochastic_var_scores_%s' % self.name), data_shape=source_template.output)
self.choice_state_var = self.parent_rec_layer.create_state_var(name=('stochastic_var_choice_%s' % self.name), data_shape=layer_template.output)
def assign_score(self, source):
'\n :param LayerBase source:\n '
assert self.score_dependent
assert (source.output.is_batch_major and (len(source.output.shape) == 1))
scores_in = source.output.placeholder
input_type = self.choice_layer_opts.get('input_type', 'prob')
if (input_type == 'prob'):
if source.output_before_activation:
scores_in = source.output_before_activation.get_log_output()
else:
from returnn.tf.util.basic import safe_log
scores_in = safe_log(scores_in)
elif (input_type == 'log_prob'):
pass
else:
raise ValueError(('Not handled input_type %r' % (input_type,)))
self.score_state_var.set_final_value(final_value=scores_in)
def get_choice(self):
'\n :return: the choice value\n :rtype: tf.Tensor\n '
return self.choice_state_var.read()
def add_stochastic_var(self, name):
'\n :param str name:\n :rtype: RecStepByStepLayer.StochasticVar\n '
assert (name not in self.stochastic_vars)
cell = self.cell
assert isinstance(cell, SubnetworkRecCellSingleStep)
self.stochastic_vars[name] = self.StochasticVar(self, cell.layer_data_templates[name])
return self.stochastic_vars[name]
@classmethod
def transform_config_dict(cls, d, network, get_layer):
'\n :param dict[str] d: will modify inplace\n :param returnn.tf.network.TFNetwork network:\n :param ((str) -> LayerBase) get_layer: function to get or construct another layer\n '
d['_orig_sources'] = d.get('from', 'data')
super(RecStepByStepLayer, cls).transform_config_dict(d, network=network, get_layer=get_layer)
|
class ChoiceStateVarLayer(LayerBase):
"\n Like :class:`ChoiceLayer`, but we don't do the search/choice ourselves,\n instead we store the scores in a variable, and the final result is another variable,\n which is expected to be set externally.\n This is expected to be used together with :class:`RecStepByStepLayer`.\n "
layer_class = 'choice_state_var'
def __init__(self, beam_size, search=NotSpecified, input_type='prob', prob_scale=1.0, base_beam_score_scale=1.0, random_sample_scale=0.0, length_normalization=True, custom_score_combine=None, source_beam_sizes=None, scheduled_sampling=False, cheating=False, explicit_search_sources=None, score_dependent=True, **kwargs):
super(ChoiceStateVarLayer, self).__init__(**kwargs)
rec_layer = self.network.parent_layer
assert isinstance(rec_layer, RecStepByStepLayer)
self.stochastic_var = rec_layer.stochastic_vars[self.name]
cell = rec_layer.cell
assert isinstance(cell, SubnetworkRecCellSingleStep)
assert (self.network in {cell.net, cell.net_delayed_update})
if (self.network == cell.net):
rec_layer.stochastic_var_order.append(self.name)
self.score_dependent = score_dependent
if self.score_dependent:
assert (len(self.sources) == 1)
(source,) = self.sources
self.stochastic_var.assign_score(source)
self.output.placeholder = self.stochastic_var.get_choice()
@classmethod
def transform_config_dict(cls, d, network, get_layer):
'\n :param dict[str] d: will modify inplace\n :param returnn.tf.network.TFNetwork network:\n :param ((str) -> LayerBase) get_layer: function to get or construct another layer\n '
assert (d.get('from', NotSpecified) is not NotSpecified), "specify 'from' explicitly for choice layer"
if (not isinstance(d['from'], (tuple, list))):
d['from'] = [d['from']]
if (d.get('target', NotSpecified) is not None):
assert ('target' in d), ("%s: specify 'target' explicitly" % (cls.__name__,))
if isinstance(d['target'], str):
d['target'] = [d['target']]
assert isinstance(d['target'], list)
assert (len(d['target']) == len(d['from']))
if d.get('explicit_search_source'):
assert ('explicit_search_sources' not in d)
d['explicit_search_sources'] = [get_layer(d.pop('explicit_search_source'))]
elif d.get('explicit_search_sources'):
assert isinstance(d['explicit_search_sources'], (list, tuple))
d['explicit_search_sources'] = [get_layer(name) for name in d['explicit_search_sources']]
parent_rec_layer = network.parent_layer
if parent_rec_layer:
assert isinstance(parent_rec_layer, RecStepByStepLayer)
cell = parent_rec_layer.cell
assert isinstance(cell, SubnetworkRecCellSingleStep)
assert (network in {cell.net, cell.net_delayed_update})
if (network == cell.net_delayed_update):
d['from'] = ()
d['score_dependent'] = False
super(ChoiceStateVarLayer, cls).transform_config_dict(d, network=network, get_layer=get_layer)
@classmethod
def get_out_data_from_opts(cls, name, sources, target, network, **kwargs):
'\n :param str name:\n :param list[LayerBase] sources:\n :param str target:\n :param returnn.tf.network.TFNetwork network:\n :rtype: Data\n '
target = (target[0] if isinstance(target, list) else target)
if target:
out_data = cls._static_get_target_value(target=target, network=network, mark_data_key_as_used=False).copy_template(name=('%s_output' % name))
out_data.available_for_inference = True
else:
out_data = sources[0].output.copy_template().copy_as_batch_major()
dim_tags = list(out_data.dim_tags)
del dim_tags[out_data.feature_dim_axis]
out_data = Data(name=('%s_output' % name), dim_tags=dim_tags, sparse=True, dim=out_data.dim, batch=(out_data.batch.copy_set_beam(None) if out_data.batch else network.get_global_batch_info()))
return out_data
|
def main(argv):
'\n Main entry.\n '
argparser = argparse.ArgumentParser(description='Compile some op')
argparser.add_argument('config', help='filename to config-file')
argparser.add_argument('--epoch', type=int, default=None, help='specific epoch to construct, use for dynamic network definitions')
argparser.add_argument('--train', type=int, default=0, help='0 disable (default), 1 enable, -1 dynamic')
argparser.add_argument('--eval', type=int, default=0, help='calculate losses. 0 disable (default), 1 enable')
argparser.add_argument('--search', type=int, default=0, help='beam search. 0 disable (default), 1 enable')
argparser.add_argument('--device', default='cpu', help="'cpu' (default) or 'gpu'. optimizes the graph for this device")
argparser.add_argument('--verbosity', default=4, type=int, help='5 for all seqs (default: 4)')
argparser.add_argument('--summaries_tensor_name', help='create Tensor for tf.compat.v1.summary.merge_all()')
argparser.add_argument('--rec_step_by_step', help="make step-by-step graph for this rec layer (eg. 'output')")
argparser.add_argument('--rec_step_by_step_output_file', help='store meta info for rec_step_by_step (JSON)')
argparser.add_argument('--output_file', help='allowed extensions: pb, pbtxt, meta, metatxt, logdir')
argparser.add_argument('--output_file_model_params_list', help='line-based, names of model params')
argparser.add_argument('--output_file_state_vars_list', help='line-based, name of state vars')
argparser.add_argument('--update_i_in_graph', action='store_true', help='whether to update i in the graph')
args = argparser.parse_args(argv[1:])
assert ((args.train in [0, 1, (- 1)]) and (args.eval in [0, 1]) and (args.search in [0, 1]))
init(config_filename=args.config, log_verbosity=args.verbosity, device=args.device)
if (args.epoch and ('get_network' in config.typed_dict)):
net_dict = config.typed_dict['get_network'](epoch=args.epoch)
elif (args.epoch and ('pretrain' in config.typed_dict)):
raise NotImplementedError('Compiling a network at a specific epoch using pre-train logic is not implemented yet')
else:
assert ('network' in config.typed_dict)
net_dict = config.typed_dict['network']
assert isinstance(net_dict, dict), ('network should return dict but is %s' % type(net_dict))
if args.rec_step_by_step:
RecStepByStepLayer.prepare_compile(rec_layer_name=args.rec_step_by_step, net_dict=net_dict, opts={'update_i_in_graph': args.update_i_in_graph})
with tf.Graph().as_default() as graph:
assert isinstance(graph, tf.Graph)
print('Create graph...')
tf_compat.v1.set_random_seed(42)
if (args.train < 0):
from returnn.tf.util.basic import get_global_train_flag_placeholder
train_flag = get_global_train_flag_placeholder()
else:
train_flag = bool(args.train)
eval_flag = bool(args.eval)
search_flag = bool(args.search)
network = create_graph(train_flag=train_flag, eval_flag=eval_flag, search_flag=search_flag, net_dict=net_dict)
if args.rec_step_by_step:
RecStepByStepLayer.post_compile(rec_layer_name=args.rec_step_by_step, network=network, output_file_name=args.rec_step_by_step_output_file)
from returnn.tf.layers.base import LayerBase
for layer in network.layers.values():
assert isinstance(layer, LayerBase)
if (layer.output.time_dim_axis is None):
continue
if (layer.output.batch_dim_axis is None):
continue
with tf_util.reuse_name_scope(layer.get_absolute_name_scope_prefix()[:(- 1)], absolute=True):
out = layer.output.copy_as_batch_major()
if out.have_feature_axis():
out = out.copy_with_feature_last()
tf.identity(out.placeholder, name='output_batch_major')
tf.group(*network.get_post_control_dependencies(), name='post_control_dependencies')
tf_compat.v1.get_collection_ref(CollectionKeys.RETURNN_LAYERS).clear()
if args.summaries_tensor_name:
summaries_tensor = tf_compat.v1.summary.merge_all()
assert isinstance(summaries_tensor, tf.Tensor), 'no summaries in the graph?'
tf.identity(summaries_tensor, name=args.summaries_tensor_name)
if (args.output_file and (os.path.splitext(args.output_file)[1] in ['.meta', '.metatxt'])):
saver = tf_compat.v1.train.Saver(var_list=network.get_saveable_params_list(), max_to_keep=((2 ** 31) - 1))
graph_def = saver.export_meta_graph()
else:
graph_def = graph.as_graph_def(add_shapes=True)
print('Graph collection keys:', graph.get_all_collection_keys())
print('Graph num operations:', len(graph.get_operations()))
print('Graph def size:', util.human_bytes_size(graph_def.ByteSize()))
if args.output_file:
filename = args.output_file
(_, ext) = os.path.splitext(filename)
if (ext == '.logdir'):
print('Write TF events to logdir:', filename)
writer = tf_compat.v1.summary.FileWriter(logdir=filename)
writer.add_graph(graph)
writer.flush()
else:
assert (ext in ['.pb', '.pbtxt', '.meta', '.metatxt']), ('filename %r extension invalid' % filename)
print('Write graph to file:', filename)
graph_io.write_graph(graph_def, logdir=os.path.dirname(filename), name=os.path.basename(filename), as_text=ext.endswith('txt'))
else:
print('Use --output_file if you want to store the graph.')
if args.output_file_model_params_list:
print('Write model param list to:', args.output_file_model_params_list)
with open(args.output_file_model_params_list, 'w') as f:
for param in network.get_params_list():
assert (param.name[(- 2):] == ':0')
f.write(('%s\n' % param.name[:(- 2)]))
if args.output_file_state_vars_list:
print('Write state var list to:', args.output_file_state_vars_list)
with open(args.output_file_state_vars_list, 'w') as f:
for param in tf_compat.v1.get_collection(CollectionKeys.STATE_VARS):
assert (param.name[(- 2):] == ':0')
f.write(('%s\n' % param.name[:(- 2)]))
|
def init(config_filename, log_verbosity, remaining_args=()):
'\n :param str config_filename: filename to config-file\n :param int log_verbosity:\n :param list[str] remaining_args:\n '
rnn.init_better_exchook()
rnn.init_thread_join_hack()
print(('Using config file %r.' % config_filename))
assert os.path.exists(config_filename)
rnn.init_config(config_filename=config_filename, command_line_options=remaining_args, extra_updates={'use_tensorflow': True, 'log': None, 'log_verbosity': log_verbosity, 'task': 'search'}, default_config={'debug_print_layer_output_template': True})
global config
config = rnn.config
rnn.init_log()
print(('Returnn %s starting up.' % os.path.basename(__file__)), file=log.v1)
rnn.returnn_greeting()
rnn.init_backend_engine()
assert util.BackendEngine.is_tensorflow_selected(), 'this is only for TensorFlow'
rnn.init_faulthandler()
better_exchook.replace_traceback_format_tb()
|
def prepare_compile(rec_layer_name, net_dict, cheating, dump_att_weights, hdf_filename, possible_labels):
'\n :param str rec_layer_name:\n :param dict[str] net_dict: modify inplace\n :param bool cheating:\n :param bool dump_att_weights:\n :param str hdf_filename:\n :param dict[str,list[str]] possible_labels:\n '
assert isinstance(net_dict, dict)
assert (rec_layer_name in net_dict)
rec_layer_dict = net_dict[rec_layer_name]
assert (rec_layer_dict['class'] == 'rec')
rec_layer_dict['include_eos'] = True
rec_unit = rec_layer_dict['unit']
assert isinstance(rec_unit, dict)
relevant_layer_names = []
target = None
for (name, layer_desc) in sorted(rec_unit.items()):
assert isinstance(name, str)
if name.startswith('#'):
continue
assert isinstance(layer_desc, dict)
assert ('class' in layer_desc)
class_name = layer_desc['class']
assert isinstance(class_name, str)
if (dump_att_weights and (class_name == 'softmax_over_spatial')):
print(('Dump softmax_over_spatial layer %r.' % name))
rec_unit[('_%s_spatial_sm_value' % name)] = {'class': 'copy', 'from': name, 'is_output_layer': True}
relevant_layer_names.append(('_%s_spatial_sm_value' % name))
continue
if (class_name != 'choice'):
continue
if (cheating and layer_desc['target']):
print(('Enable cheating for layer %r with target %r.' % (name, layer_desc['target'])))
layer_desc['cheating'] = True
if (name == 'output'):
target = layer_desc['target']
rec_unit[('_%s_value' % name)] = {'class': 'copy', 'from': name}
rec_unit[('_%s_src_beams' % name)] = {'class': 'choice_get_src_beams', 'from': name}
rec_unit[('_%s_beam_scores' % name)] = {'class': 'choice_get_beam_scores', 'from': name}
for name_ in [('_%s_value' % name), ('_%s_src_beams' % name), ('_%s_beam_scores' % name)]:
rec_unit[name_]['is_output_layer'] = True
relevant_layer_names.append(name_)
rec_unit[('%s_raw' % name_)] = {'class': 'decide_keep_beam', 'from': name_, 'is_output_layer': True}
relevant_layer_names.append(('%s_raw' % name_))
print('Collected layers:')
pprint(relevant_layer_names)
for (i, name) in enumerate(list(relevant_layer_names)):
full_name = ('%s/%s' % (rec_layer_name, name))
if name.endswith('_raw'):
relevant_layer_names[i] = full_name
else:
net_dict[('%s_%s_final' % (rec_layer_name, name))] = {'class': 'decide_keep_beam', 'from': full_name}
relevant_layer_names[i] = ('%s_%s_final' % (rec_layer_name, name))
net_dict[('%s__final_beam_scores_' % rec_layer_name)] = {'class': 'choice_get_beam_scores', 'from': rec_layer_name}
net_dict[('%s__final_beam_scores' % rec_layer_name)] = {'class': 'decide_keep_beam', 'from': ('%s__final_beam_scores_' % rec_layer_name)}
relevant_layer_names.append(('%s__final_beam_scores' % rec_layer_name))
net_dict[('%s_final_decided_' % rec_layer_name)] = {'class': 'decide', 'from': rec_layer_name}
net_dict[('%s_final_decided' % rec_layer_name)] = {'class': 'decide_keep_beam', 'from': ('%s_final_decided_' % rec_layer_name)}
if (target and (target in possible_labels)):
print(('Using labels from target %r.' % target))
net_dict['debug_search_dump'] = {'class': 'hdf_dump', 'filename': hdf_filename, 'from': ('%s_final_decided' % rec_layer_name), 'extra': {name.replace('/', '_'): name for name in relevant_layer_names}, 'labels': possible_labels.get(target, None), 'is_output_layer': True, 'dump_whole_batches': True}
|
def main(argv):
'\n Main entry.\n '
arg_parser = argparse.ArgumentParser(description='Dump search scores and other info to HDF file.')
arg_parser.add_argument('config', help='filename to config-file')
arg_parser.add_argument('--dataset', default='config:train')
arg_parser.add_argument('--epoch', type=int, default=(- 1), help='-1 for last epoch')
arg_parser.add_argument('--output_file', help='hdf', required=True)
arg_parser.add_argument('--rec_layer_name', default='output')
arg_parser.add_argument('--cheating', action='store_true', help='add ground truth to the beam')
arg_parser.add_argument('--att_weights', action='store_true', help='dump all softmax_over_spatial layers')
arg_parser.add_argument('--verbosity', default=4, type=int, help='5 for all seqs (default: 4)')
arg_parser.add_argument('--seq_list', nargs='+', help='use only these seqs')
(args, remaining_args) = arg_parser.parse_known_args(argv[1:])
init(config_filename=args.config, log_verbosity=args.verbosity, remaining_args=remaining_args)
dataset = init_dataset(args.dataset)
print('Dataset:')
pprint(dataset)
if args.seq_list:
dataset.seq_tags_filter = set(args.seq_list)
dataset.partition_epoch = 1
if isinstance(dataset, MetaDataset):
for sub_dataset in dataset.datasets.values():
dataset.seq_tags_filter = set(args.seq_list)
sub_dataset.partition_epoch = 1
dataset.finish_epoch()
if (dataset.seq_tags_filter is not None):
print('Using sequences:')
pprint(dataset.seq_tags_filter)
if (args.epoch >= 1):
config.set('load_epoch', args.epoch)
def net_dict_post_proc(net_dict):
'\n :param dict[str] net_dict:\n :return: net_dict\n :rtype: dict[str]\n '
prepare_compile(rec_layer_name=args.rec_layer_name, net_dict=net_dict, cheating=args.cheating, dump_att_weights=args.att_weights, hdf_filename=args.output_file, possible_labels=dataset.labels)
return net_dict
engine = Engine(config=config)
engine.use_search_flag = True
engine.init_network_from_config(config, net_dict_post_proc=net_dict_post_proc)
engine.search(dataset, do_eval=config.bool('search_do_eval', True), output_layer_names=args.rec_layer_name)
engine.finalize()
print('Search finished.')
assert os.path.exists(args.output_file), 'hdf file not dumped?'
|
def get_raw_strings(dataset, options):
'\n :param Dataset dataset:\n :param options: argparse.Namespace\n :return: list of (seq tag, string)\n :rtype: list[(str,str)]\n '
refs = []
start_time = time.time()
seq_len_stats = Stats()
seq_idx = options.startseq
if (options.endseq < 0):
options.endseq = float('inf')
interactive = (util.is_tty() and (not log.verbose[5]))
print(('Iterating over %r.' % dataset), file=log.v2)
while (dataset.is_less_than_num_seqs(seq_idx) and (seq_idx <= options.endseq)):
dataset.load_seqs(seq_idx, (seq_idx + 1))
complete_frac = dataset.get_complete_frac(seq_idx)
start_elapsed = (time.time() - start_time)
try:
num_seqs_s = str(dataset.num_seqs)
except NotImplementedError:
try:
num_seqs_s = ('~%i' % dataset.estimated_num_seqs)
except TypeError:
num_seqs_s = '?'
progress_prefix = ('%i/%s' % (seq_idx, num_seqs_s))
progress = ('%s (%.02f%%)' % (progress_prefix, (complete_frac * 100)))
if (complete_frac > 0):
total_time_estimated = (start_elapsed / complete_frac)
remaining_estimated = (total_time_estimated - start_elapsed)
progress += (' (%s)' % hms(remaining_estimated))
seq_tag = dataset.get_tag(seq_idx)
assert isinstance(seq_tag, str)
ref = dataset.get_data(seq_idx, options.key)
if isinstance(ref, numpy.ndarray):
assert ((ref.shape == ()) or ((ref.ndim == 1) and (ref.dtype == numpy.uint8)))
if (ref.shape == ()):
ref = ref.flatten()[0]
else:
ref = ref.tobytes()
if isinstance(ref, bytes):
ref = ref.decode('utf8')
assert isinstance(ref, str)
seq_len_stats.collect([len(ref)])
refs.append((seq_tag, ref))
if interactive:
util.progress_bar_with_time(complete_frac, prefix=progress_prefix)
elif log.verbose[5]:
print(progress_prefix, ('seq tag %r, ref len %i chars' % (seq_tag, len(ref))))
seq_idx += 1
print(('Done. Num seqs %i. Total time %s.' % (seq_idx, hms((time.time() - start_time)))), file=log.v1)
print(('More seqs which we did not dumped: %s.' % (dataset.is_less_than_num_seqs(seq_idx),)), file=log.v1)
seq_len_stats.dump(stream_prefix=('Seq-length %r ' % (options.key,)), stream=log.v2)
return refs
|
def init(config_filename, log_verbosity):
'\n :param str config_filename: filename to config-file\n :param int log_verbosity:\n '
rnn.init_better_exchook()
rnn.init_thread_join_hack()
if config_filename:
print(('Using config file %r.' % config_filename))
assert os.path.exists(config_filename)
rnn.init_config(config_filename=config_filename, command_line_options=[])
global config
config = rnn.config
config.set('task', 'dump')
config.set('log', None)
config.set('log_verbosity', log_verbosity)
rnn.init_log()
print('Returnn dump-dataset-raw-strings starting up.', file=log.v1)
rnn.returnn_greeting()
rnn.init_faulthandler()
|
def generic_open(filename, mode='r'):
'\n :param str filename:\n :param str mode: text mode by default\n :rtype: typing.TextIO|typing.BinaryIO\n '
if filename.endswith('.gz'):
import gzip
if ('b' not in mode):
mode += 't'
return gzip.open(filename, mode)
return open(filename, mode)
|
def main(argv):
'\n Main entry.\n '
arg_parser = argparse.ArgumentParser(description='Dump raw strings from dataset. Same format as in search.')
arg_parser.add_argument('--config', help="filename to config-file. will use dataset 'eval' from it")
arg_parser.add_argument('--dataset', help='dataset, overwriting config')
arg_parser.add_argument('--startseq', type=int, default=0, help='start seq idx (inclusive) (default: 0)')
arg_parser.add_argument('--endseq', type=int, default=(- 1), help='end seq idx (inclusive) or -1 (default: -1)')
arg_parser.add_argument('--key', default='raw', help="data-key, e.g. 'data' or 'classes'. (default: 'raw')")
arg_parser.add_argument('--verbosity', default=4, type=int, help='5 for all seqs (default: 4)')
arg_parser.add_argument('--out', required=True, help='out-file. py-format as in task=search')
args = arg_parser.parse_args(argv[1:])
assert (args.config or args.dataset)
init(config_filename=args.config, log_verbosity=args.verbosity)
if args.dataset:
dataset = init_dataset(args.dataset)
elif (config.value('dump_data', 'eval') in ['train', 'dev', 'eval']):
dataset = init_dataset(config.opt_typed_value(config.value('search_data', 'eval')))
else:
dataset = init_dataset(config.opt_typed_value('wer_data'))
dataset.init_seq_order(epoch=1)
try:
with generic_open(args.out, 'w') as output_file:
refs = get_raw_strings(dataset=dataset, options=args)
output_file.write('{\n')
for (seq_tag, ref) in refs:
output_file.write(('%r: %r,\n' % (seq_tag, ref)))
output_file.write('}\n')
print(('Done. Wrote to %r.' % args.out))
except KeyboardInterrupt:
print('KeyboardInterrupt')
sys.exit(1)
finally:
rnn.finalize()
|
def plot(m):
'\n :param numpy.ndarray m:\n '
print(('Plotting matrix of shape %s.' % (m.shape,)))
from matplotlib.pyplot import matshow, show
matshow(m.transpose())
show()
|
def dump_dataset(options):
'\n :param options: argparse.Namespace\n '
print(('Epoch: %i' % options.epoch), file=log.v3)
seq_list = None
if options.seqtags:
seq_list = options.seqtags.split(',')
dataset.init_seq_order(epoch=options.epoch, seq_list=seq_list)
print('Dataset keys:', dataset.get_data_keys(), file=log.v3)
print('Dataset target keys:', dataset.get_target_list(), file=log.v3)
assert (options.key in dataset.get_data_keys())
if options.get_num_seqs:
print('Get num seqs.')
print(('estimated_num_seqs: %r' % dataset.estimated_num_seqs))
try:
print(('num_seqs: %r' % dataset.num_seqs))
except Exception as exc:
print(('num_seqs exception %r, which is valid, so we count.' % exc))
seq_idx = 0
if dataset.get_target_list():
default_target = dataset.get_target_list()[0]
else:
default_target = None
while dataset.is_less_than_num_seqs(seq_idx):
dataset.load_seqs(seq_idx, (seq_idx + 1))
if ((seq_idx % 10000) == 0):
if default_target:
targets = dataset.get_targets(default_target, seq_idx)
postfix = (' (targets = %r...)' % (targets[:10],))
else:
postfix = ''
print(('%i ...%s' % (seq_idx, postfix)))
seq_idx += 1
print(('accumulated num seqs: %i' % seq_idx))
print('Done.')
return
dump_file = None
if (options.type == 'numpy'):
print(('Dump files: %r*%r' % (options.dump_prefix, options.dump_postfix)), file=log.v3)
elif (options.type == 'stdout'):
print('Dump to stdout', file=log.v3)
if (options.stdout_limit is not None):
util.set_pretty_print_default_limit(options.stdout_limit)
numpy.set_printoptions(threshold=(sys.maxsize if (options.stdout_limit == float('inf')) else int(options.stdout_limit)))
if options.stdout_as_bytes:
util.set_pretty_print_as_bytes(options.stdout_as_bytes)
elif (options.type == 'print_tag'):
print('Dump seq tag to stdout', file=log.v3)
elif (options.type == 'dump_tag'):
dump_file = open(('%sseq-tags.txt' % options.dump_prefix), 'w')
print(('Dump seq tag to file: %s' % (dump_file.name,)), file=log.v3)
elif (options.type == 'dump_seq_len'):
dump_file = open(('%sseq-lens.txt' % options.dump_prefix), 'w')
print(('Dump seq lens to file: %s' % (dump_file.name,)), file=log.v3)
dump_file.write('{\n')
elif (options.type == 'print_shape'):
print('Dump shape to stdout', file=log.v3)
elif (options.type == 'plot'):
print('Plot.', file=log.v3)
elif (options.type == 'interactive'):
print('Interactive debug shell.', file=log.v3)
elif (options.type == 'null'):
if options.dump_stats:
print('No dump (except stats).')
else:
print('No dump.')
else:
raise Exception(('unknown dump option type %r' % options.type))
start_time = time.time()
stats = (Stats() if (options.stats or options.dump_stats) else None)
seq_len_stats = {key: Stats() for key in dataset.get_data_keys()}
seq_idx = options.startseq
if (options.endseq < 0):
options.endseq = float('inf')
while (dataset.is_less_than_num_seqs(seq_idx) and (seq_idx <= options.endseq)):
dataset.load_seqs(seq_idx, (seq_idx + 1))
complete_frac = dataset.get_complete_frac(seq_idx)
start_elapsed = (time.time() - start_time)
try:
num_seqs_s = str(dataset.num_seqs)
except NotImplementedError:
try:
num_seqs_s = ('~%i' % dataset.estimated_num_seqs)
except TypeError:
num_seqs_s = '?'
progress_prefix = ('%i/%s' % (seq_idx, num_seqs_s))
progress = ('%s (%.02f%%)' % (progress_prefix, (complete_frac * 100)))
data = None
if (complete_frac > 0):
total_time_estimated = (start_elapsed / complete_frac)
remaining_estimated = (total_time_estimated - start_elapsed)
progress += (' (%s)' % hms(remaining_estimated))
if (options.type == 'print_tag'):
print(('seq %s tag:' % (progress if log.verbose[2] else progress_prefix)), dataset.get_tag(seq_idx))
elif (options.type == 'dump_tag'):
print(('seq %s tag:' % (progress if log.verbose[2] else progress_prefix)), dataset.get_tag(seq_idx))
dump_file.write(('%s\n' % dataset.get_tag(seq_idx)))
elif (options.type == 'dump_seq_len'):
seq_len = dataset.get_seq_length(seq_idx)[options.key]
print(('seq %s tag:' % (progress if log.verbose[2] else progress_prefix)), dataset.get_tag(seq_idx), ('%r len:' % options.key), seq_len)
dump_file.write(('%r: %r,\n' % (dataset.get_tag(seq_idx), seq_len)))
else:
data = dataset.get_data(seq_idx, options.key)
if (options.type == 'numpy'):
numpy.savetxt(('%s%i.data%s' % (options.dump_prefix, seq_idx, options.dump_postfix)), data)
elif (options.type == 'stdout'):
print(('seq %s tag:' % progress), dataset.get_tag(seq_idx))
print(('seq %s data:' % progress), pretty_print(data))
elif (options.type == 'print_shape'):
print(('seq %s data shape:' % progress), data.shape)
elif (options.type == 'plot'):
plot(data)
for target in dataset.get_target_list():
targets = dataset.get_targets(target, seq_idx)
if (options.type == 'numpy'):
numpy.savetxt(('%s%i.targets.%s%s' % (options.dump_prefix, seq_idx, target, options.dump_postfix)), targets, fmt='%i')
elif (options.type == 'stdout'):
extra = ''
if ((target in dataset.labels) and (len(dataset.labels[target]) > 1)):
assert dataset.can_serialize_data(target)
extra += (' (%r)' % dataset.serialize_data(key=target, data=targets))
print(('seq %i target %r: %s%s' % (seq_idx, target, pretty_print(targets), extra)))
elif (options.type == 'print_shape'):
print(('seq %i target %r shape:' % (seq_idx, target)), targets.shape)
if (options.type == 'interactive'):
from returnn.util.debug import debug_shell
debug_shell(locals())
seq_len = dataset.get_seq_length(seq_idx)
for key in dataset.get_data_keys():
seq_len_stats[key].collect([seq_len[key]])
if stats:
stats.collect(data)
if (options.type == 'null'):
util.progress_bar_with_time(complete_frac, prefix=progress_prefix)
seq_idx += 1
print(('Done. Total time %s. More seqs which we did not dumped: %s' % (hms_fraction((time.time() - start_time)), dataset.is_less_than_num_seqs(seq_idx))), file=log.v2)
for key in dataset.get_data_keys():
seq_len_stats[key].dump(stream_prefix=('Seq-length %r ' % key), stream=log.v2)
if stats:
stats.dump(output_file_prefix=options.dump_stats, stream_prefix=('Data %r ' % options.key), stream=log.v1)
if (options.type == 'dump_seq_len'):
dump_file.write('}\n')
if dump_file:
print('Dumped to file:', dump_file.name, file=log.v2)
dump_file.close()
|
def init(config_str, config_dataset, verbosity):
'\n :param str config_str: either filename to config-file, or dict for dataset\n :param str|None config_dataset:\n :param int verbosity:\n '
global dataset
rnn.init_better_exchook()
rnn.init_thread_join_hack()
dataset_dict = None
config_filename = None
if config_str.strip().startswith('{'):
print(('Using dataset %s.' % config_str))
dataset_dict = eval(config_str.strip())
elif config_str.endswith('.hdf'):
dataset_dict = {'class': 'HDFDataset', 'files': [config_str]}
print(('Using dataset %r.' % dataset_dict))
assert os.path.exists(config_str)
else:
config_filename = config_str
print(('Using config file %r.' % config_filename))
assert os.path.exists(config_filename)
rnn.init_config(config_filename=config_filename, default_config={'cache_size': '0'})
global config
config = rnn.config
config.set('log', None)
config.set('log_verbosity', verbosity)
if dataset_dict:
assert (not config_dataset)
dataset = init_dataset(dataset_dict)
elif (config_dataset and (config_dataset != 'train')):
print(('Use dataset %r from config.' % config_dataset))
dataset = init_dataset(('config:%s' % config_dataset))
else:
print('Use train dataset from config.')
assert config.value('train', None)
dataset = init_dataset('config:train')
rnn.init_log()
print('Returnn dump-dataset starting up.', file=log.v2)
rnn.returnn_greeting()
rnn.init_faulthandler()
print('Dataset:', file=log.v2)
print(' input:', dataset.num_inputs, 'x', dataset.window, file=log.v2)
print(' output:', dataset.num_outputs, file=log.v2)
print(' ', (dataset.len_info() or 'no info'), file=log.v2)
|
def main():
'\n Main entry.\n '
argparser = argparse.ArgumentParser(description='Dump something from dataset.')
argparser.add_argument('returnn_config', help='either filename to config-file, or dict for dataset')
argparser.add_argument('--dataset', help="if given the config, specifies the dataset. e.g. 'dev'")
argparser.add_argument('--epoch', type=int, default=1)
argparser.add_argument('--startseq', type=int, default=0, help='start seq idx (inclusive) (default: 0)')
argparser.add_argument('--endseq', type=int, default=None, help='end seq idx (inclusive) or -1 (default: 10)')
argparser.add_argument('--seqtags', type=str, default=None, help='comma-separated list of seq-tags to dump')
argparser.add_argument('--get_num_seqs', action='store_true')
argparser.add_argument('--type', default='stdout', help="'numpy', 'stdout', 'plot', 'null' (default 'stdout')")
argparser.add_argument('--stdout_limit', type=float, default=None, help='e.g. inf to disable')
argparser.add_argument('--stdout_as_bytes', action='store_true')
argparser.add_argument('--verbosity', type=int, default=4, help='overwrites log_verbosity (default: 4)')
argparser.add_argument('--dump_prefix', default='/tmp/returnn.dump-dataset.')
argparser.add_argument('--dump_postfix', default='.txt.gz')
argparser.add_argument('--key', default='data', help="data-key, e.g. 'data' or 'classes'. (default: 'data')")
argparser.add_argument('--stats', action='store_true', help='calculate mean/stddev stats')
argparser.add_argument('--dump_stats', help='file-prefix to dump stats to')
args = argparser.parse_args()
if (args.endseq is None):
args.endseq = (10 if (not args.seqtags) else (- 1))
init(config_str=args.returnn_config, config_dataset=args.dataset, verbosity=args.verbosity)
try:
dump_dataset(args)
except KeyboardInterrupt:
print('KeyboardInterrupt')
sys.exit(1)
finally:
rnn.finalize()
|
def dump(dataset, options):
'\n :type dataset: Dataset.Dataset\n :param options: argparse.Namespace\n '
print(('Epoch: %i' % options.epoch), file=log.v3)
dataset.init_seq_order(options.epoch)
output_dict = {}
for (name, layer) in rnn.engine.network.layers.items():
output_dict[('%s:out' % name)] = layer.output.placeholder
for (i, v) in layer.output.size_placeholder.items():
output_dict[('%s:shape(%i)' % (name, layer.output.get_batch_axis(i)))] = v
seq_idx = options.startseq
if (options.endseq < 0):
options.endseq = float('inf')
while (dataset.is_less_than_num_seqs(seq_idx) and (seq_idx <= options.endseq)):
print(('Seq idx: %i' % (seq_idx,)), file=log.v3)
out = rnn.engine.run_single(dataset=dataset, seq_idx=seq_idx, output_dict=output_dict)
for (name, v) in sorted(out.items()):
print((' %s: %s' % (name, pretty_print(v))))
seq_idx += 1
print(('Done. More seqs which we did not dumped: %s' % dataset.is_less_than_num_seqs(seq_idx)), file=log.v1)
|
def init(config_filename, command_line_options):
'\n :param str config_filename:\n :param list[str] command_line_options:\n '
rnn.init(config_filename=config_filename, command_line_options=command_line_options, config_updates={'log': None}, extra_greeting='RETURNN dump-forward starting up.')
rnn.engine.init_train_from_config(config=rnn.config, train_data=rnn.train_data)
|
def main(argv):
'\n Main entry.\n '
arg_parser = argparse.ArgumentParser(description='Forward something and dump it.')
arg_parser.add_argument('returnn_config')
arg_parser.add_argument('--epoch', type=int, default=1)
arg_parser.add_argument('--startseq', type=int, default=0, help='start seq idx (inclusive) (default: 0)')
arg_parser.add_argument('--endseq', type=int, default=10, help='end seq idx (inclusive) or -1 (default: 10)')
args = arg_parser.parse_args(argv[1:])
init(config_filename=args.returnn_config, command_line_options=[])
dump(rnn.train_data, args)
rnn.finalize()
|
def init(config_filename, command_line_options):
'\n :param str config_filename:\n :param list[str] command_line_options:\n '
rnn.init_better_exchook()
rnn.init_config(config_filename, command_line_options)
global config
config = rnn.config
config.set('log', [])
rnn.init_log()
print('RETURNN dump-dataset starting up.', file=log.v3)
|
def main(argv):
'\n Main entry.\n '
arg_parser = argparse.ArgumentParser(description='Dump network as JSON.')
arg_parser.add_argument('returnn_config_file')
arg_parser.add_argument('--epoch', default=1, type=int)
arg_parser.add_argument('--out', default='/dev/stdout')
args = arg_parser.parse_args(argv[1:])
init(config_filename=args.returnn_config_file, command_line_options=[])
pretrain = pretrain_from_config(config)
if pretrain:
json_data = pretrain.get_network_json_for_epoch(args.epoch)
else:
json_data = network_json_from_config(config)
f = open(args.out, 'w')
print(json.dumps(json_data, indent=2, sort_keys=True), file=f)
f.close()
rnn.finalize()
|
def main():
'\n Main entry.\n '
arg_parser = ArgumentParser()
arg_parser.add_argument('file')
args = arg_parser.parse_args()
try:
o = pickle.load(open(args.file, 'rb'))
print(better_repr(o))
except BrokenPipeError:
print('BrokenPipeError', file=sys.stderr)
sys.exit(1)
|
def get_segment_name(tree):
'\n :param tree:\n :return:\n '
def _m(x):
if ('name' in x.attrib):
return x.attrib['name']
if (x.tag == 'segment'):
return '1'
assert False, ('unknown name: %r, %r' % (x, vars(x)))
return '/'.join(map(_m, tree))
|
def iter_bliss_orth(filename):
'\n :param str filename:\n :return:\n '
corpus_file = open(filename, 'rb')
if filename.endswith('.gz'):
corpus_file = gzip.GzipFile(fileobj=corpus_file)
def getelements(tag):
'Yield *tag* elements from *filename_or_file* xml incrementally.'
context = iter(ElementTree.iterparse(corpus_file, events=('start', 'end')))
(_, root) = next(context)
tree = [root]
for (event, elem) in context:
if (event == 'start'):
tree += [elem]
elif (event == 'end'):
assert (tree[(- 1)] is elem)
tree = tree[:(- 1)]
if ((event == 'end') and (elem.tag == tag)):
(yield (tree, elem))
for (tree, elem) in getelements('segment'):
elem_orth = elem.find('orth')
orth_raw = (elem_orth.text or '')
orth_split = orth_raw.split()
orth = ' '.join(orth_split)
(yield (get_segment_name((tree + [elem])), orth))
|
def iter_dataset_targets(dataset):
'\n :type dataset: Dataset.Dataset\n '
dataset.init_seq_order(epoch=1)
seq_idx = 0
while dataset.is_less_than_num_seqs(seq_idx):
dataset.load_seqs(seq_idx, (seq_idx + 1))
segment_name = dataset.get_tag(seq_idx)
targets = dataset.get_targets('classes', seq_idx)
assert (targets.ndim == 1)
targets = targets.astype('int32')
(yield (segment_name, targets))
seq_idx += 1
|
class OrthHandler():
'\n Orthography handler.\n '
allo_add_all = False
def __init__(self, lexicon, si_label=None, allo_num_states=3, allo_context_len=1, allow_ci_in_words=True):
'\n :param Lexicon lexicon:\n :param int si_label:\n :param int allo_num_states:\n :param int allo_context_len:\n :param bool allow_ci_in_words:\n '
self.lexicon = lexicon
self.phonemes = sorted(self.lexicon.phonemes.keys(), key=(lambda s: self.lexicon.phonemes[s]['index']))
self.word_boundary_phones = {(- 1): set(), 1: set()}
self.phon_to_possible_ctx_via_lex = {(- 1): {}, 1: {}}
for lemma in self.lexicon.lemmas.values():
for pron in lemma['phons']:
phons = pron['phon'].split()
assert phons
self.word_boundary_phones[(- 1)].add(phons[0])
self.word_boundary_phones[1].add(phons[(- 1)])
for i in range(len(phons)):
ps = [(phons[(i + j)] if (0 <= (i + j) < len(phons)) else '') for j in [(- 1), 0, 1]]
self.phon_to_possible_ctx_via_lex[1].setdefault(ps[1], set()).add(ps[2])
self.phon_to_possible_ctx_via_lex[(- 1)].setdefault(ps[1], set()).add(ps[0])
for phone in self.lexicon.phoneme_list:
if ('' in self.phon_to_possible_ctx_via_lex[(- 1)][phone]):
self.phon_to_possible_ctx_via_lex[(- 1)][phone].update(self.word_boundary_phones[1])
if ('' in self.phon_to_possible_ctx_via_lex[1][phone]):
self.phon_to_possible_ctx_via_lex[1][phone].update(self.word_boundary_phones[(- 1)])
if allow_ci_in_words:
for phone in self.lexicon.phoneme_list:
self.phon_to_possible_ctx_via_lex[(- 1)][phone].add('')
self.phon_to_possible_ctx_via_lex[1][phone].add('')
self.si_lemma = self.lexicon.lemmas['[SILENCE]']
self.si_phone = self.si_lemma['phons'][0]['phon']
self.si_label = si_label
self.allo_num_states = allo_num_states
self.allo_context_len = allo_context_len
def expected_num_labels_for_monophone_state_tying(self):
'\n Silence has 1 state, all others have allo_num_states.\n\n :rtype: int\n '
num_phones = len(self.lexicon.phonemes)
return (((num_phones - 1) * self.allo_num_states) + 1)
def iter_orth(self, orth):
'\n :param str orth:\n :return: yields lemmas\n '
symbols = list(orth.split())
i = 0
while (i < len(symbols)):
symbol = symbols[i]
try:
lemma = self.lexicon.lemmas[symbol]
except KeyError:
if ('/' in symbol):
symbols[i:(i + 1)] = symbol.split('/')
continue
if ('-' in symbol):
symbols[i:(i + 1)] = symbol.split('-')
continue
raise
i += 1
(yield lemma)
def _iter_possible_ctx(self, phon_id, direction):
'\n :param str phon_id: e.g. "aa", "aw", "uh", "z", etc.\n :param int direction: 1 or -1\n :rtype: list[tuple[str]]\n '
if (self.lexicon.phonemes[phon_id]['variation'] == 'none'):
return [()]
if self.allo_add_all:
res = [()]
res += [(p,) for p in sorted(self.lexicon.phonemes.keys()) if (self.lexicon.phonemes[p]['variation'] == 'context')]
return res
return [((p,) if p else ()) for p in sorted(self.phon_to_possible_ctx_via_lex[direction][phon_id])]
def num_states_for_phone(self, phon_id):
'\n :param str phon_id:\n :return: number of allophone states for this phone\n :rtype: int\n '
if (phon_id == self.si_phone):
return 1
return self.allo_num_states
def all_allophone_variations(self, phon, states=None, all_boundary_variations=False):
"\n :param str phon:\n :param None|list[int] states: which states to yield for this phone\n :param bool all_boundary_variations:\n :return: yields AllophoneState's\n :rtype: list[AllophoneState]\n "
if (states is None):
states = range(self.num_states_for_phone(phon))
if all_boundary_variations:
boundary_variations = [0, 1, 2, 3]
else:
boundary_variations = [0]
for left_ctx in self._iter_possible_ctx(phon, (- 1)):
for right_ctx in self._iter_possible_ctx(phon, 1):
for state in states:
for boundary in boundary_variations:
a = AllophoneState()
a.id = phon
a.context_history = left_ctx
a.context_future = right_ctx
a.state = state
a.boundary = boundary
if (not all_boundary_variations):
if (not left_ctx):
a.mark_initial()
if (not right_ctx):
a.mark_final()
(yield a)
def _phones_to_allos(self, phones):
for p in phones:
a = AllophoneState()
a.id = p
(yield a)
def _allos_set_context(self, allos):
if (self.allo_context_len == 0):
return
ctx = []
for a in allos:
if (self.lexicon.phonemes[a.id]['variation'] == 'context'):
a.context_history = tuple(ctx)
ctx += [a.id]
ctx = ctx[(- self.allo_context_len):]
else:
ctx = []
ctx = []
for a in reversed(allos):
if (self.lexicon.phonemes[a.id]['variation'] == 'context'):
a.context_future = tuple(reversed(ctx))
ctx += [a.id]
ctx = ctx[(- self.allo_context_len):]
else:
ctx = []
def _allos_add_states(self, allos):
for _a in allos:
if (_a.id == self.si_phone):
(yield _a)
else:
for state in range(self.allo_num_states):
a = AllophoneState()
a.id = _a.id
a.context_history = _a.context_history
a.context_future = _a.context_future
a.boundary = _a.boundary
a.state = state
(yield a)
def orth_to_allophone_states(self, orth):
'\n :param str orth: orthography as a str. orth.split() should give words in the lexicon\n :rtype: list[AllophoneState]\n :returns allophone state list. those will have repetitions etc\n '
allos = []
for lemma in self.iter_orth(orth):
assert (len(lemma['phons']) == 1), 'TODO...'
phon = lemma['phons'][0]
l_allos = list(self._phones_to_allos(phon['phon'].split()))
l_allos[0].mark_initial()
l_allos[(- 1)].mark_final()
allos += l_allos
self._allos_set_context(allos)
allos = list(self._allos_add_states(allos))
return allos
|
def main():
'\n Main entry.\n '
arg_parser = ArgumentParser()
arg_parser.add_argument('--action')
arg_parser.add_argument('--print_seq', action='store_true')
arg_parser.add_argument('--print_allos', action='store_true')
arg_parser.add_argument('--print_targets', action='store_true')
arg_parser.add_argument('--dataset')
arg_parser.add_argument('--corpus')
arg_parser.add_argument('--lexicon', help='filename')
arg_parser.add_argument('--silence', type=int, help='index')
arg_parser.add_argument('--context', default=1, type=int)
arg_parser.add_argument('--hmm_states', default=3, type=int)
arg_parser.add_argument('--state_tying_type', help="'monophone' or 'full'")
arg_parser.add_argument('--state_tying_output', help='filename')
arg_parser.add_argument('--allo_add_all', action='store_true')
args = arg_parser.parse_args()
dataset = (init_dataset(args.dataset) if args.dataset else None)
corpus = (dict(iter_bliss_orth(filename=args.corpus)) if args.corpus else None)
lexicon = (Lexicon(filename=args.lexicon) if args.lexicon else None)
silence_label = args.silence
if (args.action == 'show_corpus'):
pprint(corpus)
return
print(('Num phones: %i' % len(lexicon.phonemes)), file=log.v1)
print(('Phones: %r' % sorted(lexicon.phonemes.keys())), file=log.v1)
orth_handler = OrthHandler(lexicon=lexicon, allo_context_len=args.context, allo_num_states=args.hmm_states)
map_idx_to_allo = defaultdict(set)
map_allo_to_idx = {}
if args.allo_add_all:
orth_handler.allo_add_all = True
print(('Num HMM states: %i' % orth_handler.allo_num_states), file=log.v1)
if (args.state_tying_type == 'monophone'):
print('Monophone state tying.', file=log.v1)
num_labels = orth_handler.expected_num_labels_for_monophone_state_tying()
all_label_idx_are_used = True
elif (args.state_tying_type == 'full'):
print('Full state tying.', file=log.v1)
phone_idxs = {k: (i + 1) for (i, k) in enumerate(lexicon.phoneme_list)}
for phon in lexicon.phoneme_list:
for allo in orth_handler.all_allophone_variations(phon, all_boundary_variations=True):
allo_idx = allo.index(phone_idxs=phone_idxs, num_states=orth_handler.allo_num_states, context_length=orth_handler.allo_context_len)
map_idx_to_allo[allo_idx].add(allo)
num_labels = (max(map_idx_to_allo.keys()) + 1)
all_label_idx_are_used = False
else:
raise Exception(('invalid state tying type %r' % args.state_tying_type))
print(('Num labels: %i' % num_labels), file=log.v1)
if dataset:
count = 0
for (segment_name, targets) in iter_dataset_targets(dataset):
count += 1
if ((silence_label is None) or (count == 1)):
likely_silence_label = collections.Counter(targets).most_common(1)[0][0]
if (silence_label is None):
silence_label = likely_silence_label
if (silence_label != likely_silence_label):
print(('warning: silence %i but likely %i' % (silence_label, likely_silence_label)), file=log.v2)
print(('Silence label: %i' % silence_label), file=log.v1)
orth_handler.si_label = silence_label
for allo in orth_handler.all_allophone_variations(orth_handler.si_phone):
map_idx_to_allo[silence_label].add(allo)
map_allo_to_idx[allo] = silence_label
assert (segment_name in corpus)
orth = corpus[segment_name]
allo_states = orth_handler.orth_to_allophone_states(orth=orth)
if args.print_seq:
print(('%r %r' % (segment_name, orth)))
if args.print_allos:
print((' allophone state seq: %r' % allo_states))
tgt_seq = [t for t in uniq(targets) if (t != silence_label)]
if args.print_targets:
print((' target seq: %r' % (tgt_seq,)))
assert (len(allo_states) == len(tgt_seq)), 'check --hmm_states or so'
for (allo, t) in zip(allo_states, tgt_seq):
allo.boundary = 0
allos = map_idx_to_allo[t]
if (allo in map_allo_to_idx):
assert (allo in allos), 'bad mapping'
else:
assert (allo not in allos)
allos.add(allo)
map_allo_to_idx[allo] = t
if (len(map_idx_to_allo) >= num_labels):
assert (len(map_idx_to_allo) == num_labels)
assert (0 in map_idx_to_allo)
assert ((num_labels - 1) in map_idx_to_allo)
print(('Finished with uniq mapping after %i sequences.' % count), file=log.v1)
break
if ((count % 100) == 0):
print(('Have indices: %i (num labels: %i)' % (len(map_idx_to_allo), num_labels)), file=log.v1)
print(('Finished. Have indices: %i (num labels: %i)' % (len(map_idx_to_allo), num_labels)), file=log.v1)
if (len(map_idx_to_allo) < num_labels):
found = []
not_found = []
for p in sorted(lexicon.phonemes.keys()):
allo = AllophoneState(p, state=0)
if (allo in map_allo_to_idx):
found.append(p)
else:
not_found.append(p)
print(('Phonemes found: %r' % found))
print(('Phonemes not found: %r' % not_found))
if args.state_tying_output:
assert (not os.path.exists(args.state_tying_output))
if all_label_idx_are_used:
assert (len(map_idx_to_allo) == num_labels)
assert (0 in map_idx_to_allo)
assert ((num_labels - 1) in map_idx_to_allo)
f = open(args.state_tying_output, 'w')
for (i, allos) in sorted(map_idx_to_allo.items()):
for allo in allos:
f.write(('%s %i\n' % (allo.format(), i)))
f.close()
print(('Wrote state tying to %r.' % args.state_tying_output), file=log.v1)
print('The end.')
|
def main():
'\n Main entry point.\n '
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--config', help='RETURNN config')
arg_parser.add_argument('--learning-rate-file', help='The learning rate file contains scores / errors per epoch.')
arg_parser.add_argument('--key', help="key to use, e.g. 'dev_error'")
arg_parser.add_argument('--n', type=int, default=5, help='print best N epochs')
args = arg_parser.parse_args()
if (bool(args.config) == bool(args.learning_rate_file)):
print('Error: provide either --config or --learning-rate-file')
arg_parser.print_help()
sys.exit(1)
if args.config:
config = Config()
config.load_file(args.config)
lr = LearningRateControl.load_initial_from_config(config)
elif args.learning_rate_file:
lr = LearningRateControl(default_learning_rate=1, filename=args.learning_rate_file)
else:
assert False, ('should not get here with %r' % args)
epochs = sorted(lr.epoch_data.keys())
if (not epochs):
print('Error: no epochs found')
sys.exit(1)
print('Loaded epochs', epochs[0], '..', epochs[(- 1)])
if args.key:
key = args.key
print(('Using key %s' % key))
else:
last_epoch_with_error_info = None
for ep in reversed(epochs):
if lr.epoch_data[ep].error:
last_epoch_with_error_info = ep
break
if (last_epoch_with_error_info is None):
print('Error: no scores/errors found')
sys.exit(1)
key = lr.get_error_key(last_epoch_with_error_info)
print(('Using key %s (auto via epoch %i)' % (key, last_epoch_with_error_info)))
epochs_ = []
missing_epochs = []
for ep in epochs:
errors = lr.epoch_data[ep].error
if (key in errors):
epochs_.append((errors[key], ep))
else:
missing_epochs.append(ep)
if (len(epochs_) == len(epochs)):
print('All epochs have the key.')
else:
print('Epochs missing the key:', missing_epochs)
assert epochs_
epochs_.sort()
for (value, ep) in epochs_[:args.n]:
errors = lr.epoch_data[ep].error
print(', '.join(([('Epoch %i' % ep), ('%r %r' % (key, value))] + [('%r %r' % (k, v)) for (k, v) in errors.items() if (k != key)])))
|
def hdf_dataset_init(file_name):
'\n :param str file_name: filename of hdf dataset file in the filesystem\n :rtype: hdf_dataset_mod.HDFDatasetWriter\n '
return hdf_dataset_mod.HDFDatasetWriter(filename=file_name)
|
def hdf_dump_from_dataset(dataset, hdf_dataset, parser_args):
'\n :param Dataset dataset: could be any dataset implemented as child of Dataset\n :param hdf_dataset_mod.HDFDatasetWriter hdf_dataset:\n :param parser_args: argparse object from main()\n '
hdf_dataset.dump_from_dataset(dataset=dataset, epoch=parser_args.epoch, start_seq=parser_args.start_seq, end_seq=parser_args.end_seq, use_progress_bar=True)
|
def hdf_close(hdf_dataset):
'\n :param HDFDataset.HDFDatasetWriter hdf_dataset: to close\n '
hdf_dataset.close()
|
def init(config_filename, cmd_line_opts, dataset_config_str):
'\n :param str config_filename: global config for CRNN\n :param list[str] cmd_line_opts: options for init_config method\n :param str dataset_config_str: dataset via init_dataset_via_str()\n '
rnn.init_better_exchook()
rnn.init_thread_join_hack()
if config_filename:
rnn.init_config(config_filename, cmd_line_opts)
rnn.init_log()
else:
log.initialize(verbosity=[5])
print('Returnn hdf_dump starting up.', file=log.v3)
rnn.init_faulthandler()
if config_filename:
rnn.init_data()
rnn.print_task_properties()
assert isinstance(rnn.train_data, Dataset)
dataset = rnn.train_data
else:
assert dataset_config_str
dataset = init_dataset(dataset_config_str)
print('Source dataset:', dataset.len_info(), file=log.v3)
return dataset
|
def _is_crnn_config(filename):
'\n :param str filename:\n :rtype: bool\n '
if filename.endswith('.gz'):
return False
if filename.endswith('.config'):
return True
try:
config = Config()
config.load_file(filename)
return True
except Exception:
pass
return False
|
def main(argv):
'\n Main entry.\n '
parser = argparse.ArgumentParser(description='Dump dataset or subset of dataset into external HDF dataset')
parser.add_argument('config_file_or_dataset', type=str, help='Config file for RETURNN, or directly the dataset init string')
parser.add_argument('hdf_filename', type=str, help='File name of the HDF dataset, which will be created')
parser.add_argument('--start_seq', type=int, default=0, help='Start sequence index of the dataset to dump')
parser.add_argument('--end_seq', type=int, default=float('inf'), help='End sequence index of the dataset to dump')
parser.add_argument('--epoch', type=int, default=1, help='Optional start epoch for initialization')
args = parser.parse_args(argv[1:])
returnn_config = None
dataset_config_str = None
if _is_crnn_config(args.config_file_or_dataset):
returnn_config = args.config_file_or_dataset
else:
dataset_config_str = args.config_file_or_dataset
dataset = init(config_filename=returnn_config, cmd_line_opts=[], dataset_config_str=dataset_config_str)
hdf_dataset = hdf_dataset_init(args.hdf_filename)
hdf_dump_from_dataset(dataset, hdf_dataset, args)
hdf_close(hdf_dataset)
rnn.finalize()
|
def checkpoint_exists(path):
'\n :param str path:\n :rtype: bool\n '
return (tf_compat.v1.gfile.Exists(path) or tf_compat.v1.gfile.Exists((path + '.meta')) or tf_compat.v1.gfile.Exists((path + '.index')))
|
def main(_):
'\n Main entry.\n '
_logger = logging.getLogger('tensorflow')
_logger.setLevel('INFO')
tf_compat.v1.logging.info(('%s startup. TF version: %s' % (__file__, tf.__version__)))
if FLAGS.checkpoints:
checkpoints = [c.strip() for c in FLAGS.checkpoints.split(',')]
checkpoints = [c for c in checkpoints if c]
if (not checkpoints):
raise ValueError('No checkpoints provided for averaging.')
if FLAGS.prefix:
checkpoints = [(FLAGS.prefix + c) for c in checkpoints]
else:
assert (FLAGS.num_last_checkpoints >= 1), 'Must average at least one model'
assert FLAGS.prefix, 'Prefix must be provided when averaging last N checkpoints'
checkpoint_state = tf.train.get_checkpoint_state(os.path.dirname(FLAGS.prefix))
checkpoints = checkpoint_state.all_model_checkpoint_paths[(- FLAGS.num_last_checkpoints):]
checkpoints = [c for c in checkpoints if checkpoint_exists(c)]
if (not checkpoints):
if FLAGS.checkpoints:
raise ValueError(('None of the provided checkpoints exist. %s' % FLAGS.checkpoints))
else:
raise ValueError(('Could not find checkpoints at %s' % os.path.dirname(FLAGS.prefix)))
tf_compat.v1.logging.info('Reading variables and averaging checkpoints:')
for c in checkpoints:
tf_compat.v1.logging.info('%s ', c)
var_list = tf.train.list_variables(checkpoints[0])
(var_values, var_dtypes) = ({}, {})
for (name, shape) in var_list:
var_values[name] = numpy.zeros(shape)
for checkpoint in checkpoints:
reader = tf.train.load_checkpoint(checkpoint)
for name in var_values:
tensor = reader.get_tensor(name)
if (not isinstance(tensor, numpy.ndarray)):
tensor = numpy.array(tensor)
assert isinstance(tensor, numpy.ndarray)
var_dtypes[name] = tensor.dtype
if isinstance(tensor.dtype, numpy.integer):
var_values[name] = tensor
else:
var_values[name] += tensor
tf_compat.v1.logging.info('Read from checkpoint %s', checkpoint)
for name in var_values:
if (not isinstance(var_dtypes[name], numpy.integer)):
var_values[name] /= len(checkpoints)
with tf_compat.v1.variable_scope(tf_compat.v1.get_variable_scope(), reuse=tf_compat.v1.AUTO_REUSE):
tf_vars = [tf_compat.v1.get_variable(v, shape=var_values[v].shape, dtype=var_dtypes[v]) for v in var_values]
placeholders = [tf_compat.v1.placeholder(v.dtype, shape=v.shape) for v in tf_vars]
assign_ops = [tf_compat.v1.assign(v, p) for (v, p) in zip(tf_vars, placeholders)]
saver = tf_compat.v1.train.Saver(tf_compat.v1.all_variables())
with tf_compat.v1.Session() as sess:
sess.run(tf_compat.v1.global_variables_initializer())
for (p, assign_op, (name, value)) in zip(placeholders, assign_ops, var_values.items()):
sess.run(assign_op, {p: value})
saver.save(sess, FLAGS.output_path)
tf_compat.v1.logging.info('Averaged checkpoints saved in %s', FLAGS.output_path)
|
def print_tensor(v):
'\n :param numpy.ndarray v:\n '
print(v)
mean = numpy.mean(v)
print('mean:', mean)
print('stddev:', numpy.sqrt(numpy.mean(numpy.square((v - mean)))))
print('rms:', numpy.sqrt(numpy.mean(numpy.square(v))))
print('min:', numpy.min(v))
print('max:', numpy.max(v))
|
def print_tensors_in_checkpoint_file(file_name, tensor_name, all_tensors):
'Prints tensors in a checkpoint file.\n\n If no `tensor_name` is provided, prints the tensor names and shapes\n in the checkpoint file.\n\n If `tensor_name` is provided, prints the content of the tensor.\n\n Args:\n file_name: Name of the checkpoint file.\n tensor_name: Name of the tensor in the checkpoint file to print.\n all_tensors: Boolean indicating whether to print all tensors.\n '
try:
reader = tf_compat.v1.train.NewCheckpointReader(file_name)
if all_tensors:
var_to_shape_map = reader.get_variable_to_shape_map()
for key in sorted(var_to_shape_map):
print('tensor_name: ', key)
print(reader.get_tensor(key))
elif (not tensor_name):
print(reader.debug_string().decode('utf-8'))
else:
print('tensor_name: ', tensor_name)
v = reader.get_tensor(tensor_name)
print_tensor(v)
if (tensor_name.endswith('/Adam') and reader.has_tensor((tensor_name + '_1'))):
print('Guessing Adam m/v')
v2 = reader.get_tensor((tensor_name + '_1'))
eps = 1e-08
print(('Adam update (m / (eps + sqrt(v))) with eps=%r:' % eps))
print_tensor((v / (eps + numpy.sqrt(v2))))
except Exception as e:
print(str(e))
if ('corrupted compressed block contents' in str(e)):
print("It's likely that your checkpoint file has been compressed with SNAPPY.")
if (('Data loss' in str(e)) and any([(e in file_name) for e in ['.index', '.meta', '.data']])):
proposed_file = '.'.join(file_name.split('.')[0:(- 1)])
v2_file_error_template = "\nIt's likely that this is a V2 checkpoint and you need to provide the filename\n*prefix*. Try removing the '.' and extension. Try:\ninspect checkpoint --file_name = {}"
print(v2_file_error_template.format(proposed_file))
|
def parse_numpy_printoption(kv_str):
"Sets a single numpy printoption from a string of the form 'x=y'.\n\n See documentation on numpy.set_printoptions() for details about what values\n x and y can take. x can be any option listed there other than 'formatter'.\n\n Args:\n kv_str: A string of the form 'x=y', such as 'threshold=100000'\n\n Raises:\n argparse.ArgumentTypeError: If the string couldn't be used to set any\n nump printoption.\n "
k_v_str = kv_str.split('=', 1)
if ((len(k_v_str) != 2) or (not k_v_str[0])):
raise argparse.ArgumentTypeError(("'%s' is not in the form k=v." % kv_str))
(k, v_str) = k_v_str
printoptions = numpy.get_printoptions()
if (k not in printoptions):
raise argparse.ArgumentTypeError(("'%s' is not a valid printoption." % k))
v_type = type(printoptions[k])
if (printoptions[k] is None):
raise argparse.ArgumentTypeError(("Setting '%s' from the command line is not supported." % k))
try:
v = (v_type(v_str) if (v_type is not bool) else flags.BooleanParser().parse(v_str))
except ValueError as e:
raise argparse.ArgumentTypeError(str(e))
numpy.set_printoptions(**{k: v})
|
def main(unused_argv):
'\n Main entry:\n '
if (not FLAGS.file_name):
print('Usage: inspect_checkpoint --file_name=checkpoint_file_name [--tensor_name=tensor_to_print]')
sys.exit(1)
else:
print_tensors_in_checkpoint_file(FLAGS.file_name, FLAGS.tensor_name, FLAGS.all_tensors)
|
def main():
'\n Main entry.\n '
argparser = ArgumentParser()
argparser.add_argument('file', help='e.g. events.out.tfevents...')
argparser.add_argument('--tag', default='objective/loss', help="default is 'objective/loss'")
args = argparser.parse_args()
print(('file: %s' % args.file))
print(('tag: %s' % args.tag))
for e in tf_compat.v1.train.summary_iterator(args.file):
for v in e.summary.value:
if (v.tag == args.tag):
print(('step %i: %r' % (e.step, v.simple_value)))
print('done')
|
def main():
'main entry'
print(f'RETURNN {os.path.basename(__file__)} -- average PyTorch model checkpoints')
print('PyTorch version:', torch.__version__)
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--checkpoints', nargs='+', required=True, help='comma-separated (or multiple provided) input checkpoints')
arg_parser.add_argument('--separator', default=',', help='custom separator for --checkpoints, can also disable it')
arg_parser.add_argument('--prefix', default='', help='add this as a prefix to the input checkpoints')
arg_parser.add_argument('--postfix', default='', help="add this as a postfix to the input checkpoints (e.g. '.pt')")
arg_parser.add_argument('--output_path', required=True, help='output checkpoint')
args = arg_parser.parse_args()
in_ckpts = []
def _add_in_ckpt(name: str):
in_ckpt__ = ((args.prefix + name) + args.postfix)
if (not os.path.exists(in_ckpt__)):
raise Exception(f'input checkpoint not found: {in_ckpt__!r}, prefix {args.prefix!r}, postfix {args.postfix!r}, name {name!r}')
print('in ckpt:', in_ckpt__)
in_ckpts.append(in_ckpt__)
for in_ckpt in args.checkpoints:
in_ckpt: str
if args.separator:
for in_ckpt_ in in_ckpt.split(args.separator):
_add_in_ckpt(in_ckpt_)
else:
_add_in_ckpt(in_ckpt)
print('out ckpt:', args.output_path)
merge_checkpoints(in_ckpts=in_ckpts, out_ckpt=args.output_path)
print('Done.')
|
def merge_checkpoints(in_ckpts: Sequence[str], out_ckpt: str, extra_state: Optional[Dict[(str, Any)]]=None):
'\n Merge checkpoints\n '
out_model_state: Dict[(str, torch.Tensor)] = {}
out_model_state_num: Dict[(str, int)] = defaultdict(int)
out_state: Dict[(str, Any)] = {'model': out_model_state, 'merged_epochs': [], 'merged_steps': []}
for in_ckpt in in_ckpts:
print('read ckpt:', in_ckpt)
in_state = torch.load(in_ckpt, map_location=torch.device('cpu'), mmap=True)
assert isinstance(in_state, dict)
assert (('model' in in_state) and isinstance(in_state['model'], dict))
covered_keys = {'model'}
for (k, v) in in_state['model'].items():
k: str
v: torch.Tensor
if (k in out_model_state):
out_model_state[k] += v
else:
out_model_state[k] = v
out_model_state_num[k] += 1
for k in ['epoch', 'step']:
covered_keys.add(k)
if (k in in_state):
if (k in out_state):
out_state[k] = max(out_state[k], in_state[k])
else:
out_state[k] = in_state[k]
for k in ['merged_epochs', 'merged_steps']:
covered_keys.add(k)
if (k in in_state):
out_state[k].extend(in_state[k])
out_state['merged_epochs'].append(in_state.get('epoch'))
out_state['merged_steps'].append(in_state.get('step'))
for (k, v) in in_state.items():
if (k in covered_keys):
continue
if (k not in out_state):
out_state[k] = v
for k in out_model_state:
out_model_state[k] /= out_model_state_num[k]
if extra_state:
out_state.update(extra_state)
print('save ckpt:', out_ckpt)
torch.save(out_state, out_ckpt)
|
def init(config_filename: str, checkpoint: str, log_verbosity: int, device: str):
'\n :param config_filename: Filename to config file.\n :param checkpoint: Filename to the trained model.\n :param log_verbosity: 5 for all seqs (default: 4)\n :param device:\n '
assert os.path.exists(checkpoint), "The specified checkpoint doesn't exist."
rnn.init_better_exchook()
rnn.init_thread_join_hack()
assert os.path.exists(config_filename), "The specified config doesn't exist."
print(('Using config file %r.' % config_filename))
rnn.init_config(config_filename=config_filename, extra_updates={'log': None, 'log_verbosity': log_verbosity, 'task': __file__, 'device': device})
global config
config = rnn.config
rnn.init_log()
print('RETURNN frontend module to ONNX conversion.', file=log.v1)
rnn.returnn_greeting()
config.typed_dict.setdefault('backend', 'torch')
rnn.init_backend_engine()
assert util.BackendEngine.is_torch_selected(), 'For now only the torch backend is supported.'
rnn.init_faulthandler()
|
class ForwardModulePT(torch.nn.Module):
"\n Wrapper of a PyTorch module that's meant to call forward_step from the config when called.\n "
def __init__(self, pt_module: torch.nn.Module, forward_step: Callable, extern_data: TensorDict):
'\n :param pt_module: RF module as obtained from the config.\n :param forward_step: forward_step function as obtained from the config.\n :param extern_data:\n '
super().__init__()
self.model = pt_module
self.forward_step_func = forward_step
self.extern_data = extern_data
def __call__(self, data: Dict[(str, torch.Tensor)]) -> Dict[(str, torch.Tensor)]:
'\n Wrapper to forward_step from the config.\n '
extern_data = self.extern_data.copy_template()
extern_data.assign_from_raw_tensor_dict_(data, with_scalar_dyn_sizes=False, duplicate_dims_are_excluded=True)
self.forward_step_func(model=self.model, extern_data=extern_data)
_check_matching_outputs()
return rf.get_run_ctx().outputs.as_raw_tensor_dict(include_scalar_dyn_sizes=False)
|
class ForwardModuleRF(_RFModuleAsPTModule):
"\n Wrapper of a RETURNN frontend module that's meant to call forward_step from the config when called.\n "
def __init__(self, rf_module: rf.Module, forward_step: Callable, extern_data: TensorDict):
'\n :param rf_module: RF module as obtained from the config.\n :param forward_step: forward_step function as obtained from the config.\n :param extern_data:\n '
super().__init__(rf_module)
self.forward_step_func = forward_step
self.extern_data = extern_data
def __call__(self, data: Dict[(str, torch.Tensor)]) -> Dict[(str, torch.Tensor)]:
'\n Wrapper to forward_step from the config.\n '
extern_data = self.extern_data.copy_template()
extern_data.assign_from_raw_tensor_dict_(data, with_scalar_dyn_sizes=False, duplicate_dims_are_excluded=True)
self.forward_step_func(model=self.rf_module, extern_data=extern_data)
_check_matching_outputs()
return rf.get_run_ctx().outputs.as_raw_tensor_dict(include_scalar_dyn_sizes=False)
|
def _check_matching_outputs():
rf.get_run_ctx().check_outputs_complete()
model_outputs_raw_keys = set(_get_model_outputs_raw_keys())
outputs_raw = rf.get_run_ctx().outputs.as_raw_tensor_dict(include_scalar_dyn_sizes=False)
outputs_raw_keys = set(outputs_raw.keys())
assert (model_outputs_raw_keys == outputs_raw_keys), f'''Model outputs raw keys and output raw keys from forward_step don't match.
Model outputs raw keys: {sorted(model_outputs_raw_keys)}
Output raw keys: {sorted(outputs_raw_keys)}'''
assert all(((v is not None) for (k, v) in outputs_raw.items())), f'''Output raw keys from forward_step contain None values.
Output raw keys with None: {list((k for (k, v) in outputs_raw.items() if (v is None)))}'''
|
def _get_model_outputs_raw_keys():
model_outputs = rf.get_run_ctx().expected_outputs
model_outputs_raw_keys = []
for (k, v) in model_outputs.data.items():
model_outputs_raw_keys.append(k)
for (i, dim) in enumerate(v.dims):
if (dim.dyn_size_ext and dim.dyn_size_ext.dims):
model_outputs_raw_keys.append(f'{k}:size{i}')
return model_outputs_raw_keys
|
def main():
'\n Main entry point\n '
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('config', type=str, help='Filename to config file. Must have `get_model()` and `forward_step()`. Can optionally have `export()`.')
parser.add_argument('checkpoint', type=str, help='Checkpoint to RF module, considering the backend.')
parser.add_argument('out_onnx_filename', type=str, help='Filename of the final ONNX model.')
parser.add_argument('--verbosity', default=4, type=int, help='5 for all seqs (default: 4)')
parser.add_argument('--device', type=str, default='cpu', help="'cpu' (default) or 'gpu'.")
args = parser.parse_args()
init(config_filename=args.config, checkpoint=args.checkpoint, log_verbosity=args.verbosity, device=args.device)
model_outputs_dict = config.typed_value('model_outputs')
assert (model_outputs_dict is not None), 'The specified config needs to have explicit model outputs. Please define `model_outputs` in your config.'
model_outputs = TensorDict()
model_outputs.update(model_outputs_dict, auto_convert=True)
loaded_checkpoint = torch.load(args.checkpoint, map_location=torch.device(args.device))
epoch = loaded_checkpoint['epoch']
step = loaded_checkpoint['step']
rf.init_forward_step_run_ctx(expected_outputs=model_outputs, step=step, epoch=epoch)
rf.set_random_seed(42)
get_model_func = config.typed_value('get_model')
assert get_model_func, "get_model() isn't specified in the config passed as a parameter."
sentinel_kw = {('__fwd_compatible_random_arg_%i' % int((random() * 100))): None}
model = get_model_func(epoch=epoch, step=step, **sentinel_kw)
is_rf_module = isinstance(model, rf.Module)
is_pt_module = isinstance(model, torch.nn.Module)
assert (is_rf_module or is_pt_module), "The module returned by get_model() isn't a returnn.frontend.Module or a torch.nn.Module."
export_func = (config.typed_value('export') or torch.onnx.export)
forward_step_func = config.typed_value('forward_step')
assert (forward_step_func is not None), 'forward_step() must be defined in the config.'
extern_data_dict = config.typed_value('extern_data')
extern_data = TensorDict()
extern_data.update(extern_data_dict, auto_convert=True)
extern_data.reset_content()
for (k, v) in list(extern_data.data.items()):
if (not v.available_for_inference):
del extern_data.data[k]
tensor_dict_fill_random_numpy_(extern_data)
tensor_dict_numpy_to_torch_(extern_data)
extern_data_raw = extern_data.as_raw_tensor_dict(include_scalar_dyn_sizes=False, exclude_duplicate_dims=True)
model_outputs_raw_keys = _get_model_outputs_raw_keys()
if is_pt_module:
model.load_state_dict(loaded_checkpoint['model'])
model.eval()
pt_model_fwd = ForwardModulePT(model, forward_step_func, extern_data)
elif is_rf_module:
pt_model_fwd = ForwardModuleRF(model, forward_step_func, extern_data)
pt_model_fwd.load_state_dict(loaded_checkpoint['model'])
pt_model_fwd.eval()
else:
assert False, 'PT/RF module?'
dynamic_axes = {}
for (k, v) in (list(extern_data.data.items()) + list(model_outputs.data.items())):
dynamic_axes[k] = {i: dim.name for (i, dim) in enumerate(v.dims) if dim.is_dynamic()}
for (i, dim) in enumerate(v.dims):
if (dim.dyn_size_ext and (dim.dyn_size_ext.dims == ())):
continue
if dim.dyn_size_ext:
dynamic_axes[f'{k}:size{i}'] = {j: dim_.name for (j, dim_) in enumerate(dim.dyn_size_ext.dims) if dim_.is_dynamic()}
print('*** Input names:', list(extern_data_raw.keys()))
print('*** Output names:', model_outputs_raw_keys)
print('*** Dynamic axes:', dynamic_axes)
export_func(pt_model_fwd, (extern_data_raw, {}), f=args.out_onnx_filename, verbose=True, input_names=list(extern_data_raw.keys()), output_names=model_outputs_raw_keys, dynamic_axes=dynamic_axes)
|
def main():
'main'
numpy.set_printoptions(precision=4, linewidth=80)
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('checkpoint')
arg_parser.add_argument('--key', type=str, default='', help='Name of the tensor or object to inspect. If not given, list them all (but without values, unless --all_tensors).')
arg_parser.add_argument('--all_tensors', action='store_true', help='If True, print the values of all the tensors.')
arg_parser.add_argument('--stats_only', action='store_true', help='with --all_tensors or --key, print only stats of tensor, not all values')
arg_parser.add_argument('--stats', action='store_true', help='with --key, just like --stats_only, otherwise like --all_tensors --stats_only')
arg_parser.add_argument('--interesting_exclude', help='list of tensors to exclude, separated by comma, matching part of name')
arg_parser.add_argument('--printoptions', nargs='*', type=parse_numpy_printoption, help="Argument for numpy.set_printoptions(), in the form 'k=v'.")
arg_parser.add_argument('--device', default='cpu')
arg_parser.add_argument('--mmap', action='store_true')
args = arg_parser.parse_args()
kwargs = {}
if args.mmap:
kwargs['mmap'] = True
state = torch.load(args.checkpoint, map_location=args.device, **kwargs)
if args.key:
assert isinstance(state, dict)
if ((args.key not in state) and ('model' in state)):
state = state['model']
obj = state[args.key]
print(f'{args.key}:')
print_object(obj, stats_only=(args.stats_only or args.stats))
else:
ctx = PrintCtx(exclude=(args.interesting_exclude.split(',') if args.interesting_exclude else []))
print_object(state, print_all_tensors=(args.all_tensors or args.stats), stats_only=(args.stats_only or args.stats), ctx=ctx)
ctx.report()
|
def print_object(obj: Any, *, print_all_tensors: bool=False, stats_only: bool=False, prefix: str='', ctx: Optional[PrintCtx]=None, ctx_name: Optional[str]=None):
'print object'
if isinstance(obj, (dict, list, tuple)):
for (k, v) in (obj.items() if isinstance(obj, dict) else enumerate(obj)):
_print_key_value(k, v, print_all_tensors=print_all_tensors, stats_only=stats_only, prefix=prefix, ctx=ctx, ctx_name=(f'{ctx_name}.{k}' if ctx_name else f'{k}'))
elif isinstance(obj, (numpy.ndarray, torch.Tensor)):
print_tensor(obj, stats_only=stats_only, prefix=prefix, ctx=ctx, ctx_name=ctx_name)
else:
print(f'{prefix}({type(obj)}) {obj}')
|
def _print_key_value(k: Any, v: Union[(numpy.ndarray, torch.Tensor)], *, print_all_tensors: bool=False, stats_only: bool=False, prefix: str='', ctx: PrintCtx, ctx_name: str):
if isinstance(v, numpy.ndarray):
v = torch.tensor(v)
if isinstance(v, torch.Tensor):
if ((v.numel() <= 1) and (v.device.type != 'meta')):
print(f'{prefix}{k}: {v.dtype} {_format_shape(v.shape)} {_r(v)}')
else:
print(f'{prefix}{k}: {v.dtype} {_format_shape(v.shape)}')
if print_all_tensors:
print_tensor(v, with_type_and_shape=False, stats_only=stats_only, prefix=(prefix + ' '), ctx=ctx, ctx_name=ctx_name)
elif isinstance(v, (dict, list, tuple)):
print(f'{prefix}{k}: ({type(v).__name__})')
print_object(v, print_all_tensors=print_all_tensors, stats_only=stats_only, prefix=(prefix + ' '), ctx=ctx, ctx_name=ctx_name)
else:
print(f'{prefix}{k}: ({type(v).__name__}) {v}')
|
def print_tensor(v: Union[(numpy.ndarray, torch.Tensor)], *, prefix: str='', with_type_and_shape: bool=True, stats_only: bool=False, ctx: Optional[PrintCtx]=None, ctx_name: Optional[str]=None):
'print tensor'
if isinstance(v, numpy.ndarray):
v = torch.tensor(v)
assert isinstance(v, torch.Tensor)
if with_type_and_shape:
print(f'{prefix}{v.dtype}, {_format_shape(v.shape)}')
if (not stats_only):
print(v.detach().cpu().numpy())
n = v.numel()
if (n > 1):
(v_, _) = v.flatten().sort()
if v.is_floating_point():
mean = torch.mean(v)
max_abs = max(torch.abs(v_[[0, (- 1)]]))
print(f'{prefix}mean, stddev, max abs: {_r(mean)}, {_r(torch.sqrt(torch.mean(torch.square((v - mean)))))}, {_r(max_abs)}')
if (ctx and ctx_name):
ctx.visit_tensor(name=ctx_name, tensor=v, max_abs=max_abs)
print(f"{prefix}min, p05, p50, p95, max: {_r(v_[0])}, {', '.join((_r(v_[int((n * q))]) for q in (0.05, 0.5, 0.95)))}, {_r(v_[(- 1)])}")
|
def _format_shape(shape: Tuple[(int, ...)]) -> str:
return ('[%s]' % ','.join(map(str, shape)))
|
def _r(num: Union[(torch.Tensor, float)]) -> str:
return numpy.array2string((num.detach().cpu().numpy() if isinstance(num, torch.Tensor) else num))
|
class PrintCtx():
'print ctx, maybe collect interesting global info'
def __init__(self, *, exclude: List[str]):
self.interesting: Dict[(str, Tuple[(float, str, torch.Tensor)])] = {}
self.exclude = exclude
def visit_tensor(self, *, name: str, tensor: torch.Tensor, max_abs: float):
'visit'
for exclude_name in self.exclude:
if (exclude_name in name):
return
if (tensor.ndim > 0):
key = 'largest max abs'
if ((key not in self.interesting) or (self.interesting[key][0] < max_abs)):
self.interesting[key] = (max_abs, name, tensor)
key = 'smallest max abs'
if ((key not in self.interesting) or (self.interesting[key][0] > max_abs)):
self.interesting[key] = (max_abs, name, tensor)
def report(self):
'report'
if (not self.interesting):
return
print('Collected interesting tensors:')
for (k, (v, name, tensor)) in self.interesting.items():
print(f'{k} {_r(v)}: {name}: {tensor.dtype} {_format_shape(tensor.shape)}')
print_tensor(tensor, with_type_and_shape=False, stats_only=True, prefix=' ')
|
def parse_numpy_printoption(kv_str):
"Sets a single numpy printoption from a string of the form 'x=y'.\n\n See documentation on numpy.set_printoptions() for details about what values\n x and y can take. x can be any option listed there other than 'formatter'.\n\n Args:\n kv_str: A string of the form 'x=y', such as 'threshold=100000'\n\n Raises:\n argparse.ArgumentTypeError: If the string couldn't be used to set any\n nump printoption.\n "
k_v_str = kv_str.split('=', 1)
if ((len(k_v_str) != 2) or (not k_v_str[0])):
raise argparse.ArgumentTypeError(("'%s' is not in the form k=v." % kv_str))
(k, v_str) = k_v_str
printoptions = numpy.get_printoptions()
if (k not in printoptions):
raise argparse.ArgumentTypeError(("'%s' is not a valid printoption." % k))
v_type = type(printoptions[k])
if (printoptions[k] is None):
raise argparse.ArgumentTypeError(("Setting '%s' from the command line is not supported." % k))
try:
v = (v_type(v_str) if (v_type is not bool) else _to_bool(v_str))
except ValueError as e:
raise argparse.ArgumentTypeError(str(e))
numpy.set_printoptions(**{k: v})
|
def _to_bool(s: str) -> bool:
'\n :param s: str to be converted to bool, e.g. "1", "0", "true", "false"\n :return: boolean value, or fallback\n '
s = s.lower()
if (s in ['1', 'true', 'yes', 'y']):
return True
if (s in ['0', 'false', 'no', 'n']):
return False
raise ValueError(f'invalid bool: {s!r}')
|
def main():
'main'
print(f'{os.path.basename(__file__)}: {__doc__.strip()}')
numpy.set_printoptions(precision=4, linewidth=80)
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('returnn_config')
arg_parser.add_argument('--cwd')
arg_parser.add_argument('--key', type=str, default='', help='Name of the tensor or object to inspect')
arg_parser.add_argument('--all_tensors', action='store_true', help='If True, print the values of all the tensors.')
arg_parser.add_argument('--stats_only', action='store_true')
arg_parser.add_argument('--printoptions', nargs='*', type=parse_numpy_printoption, help="Argument for numpy.set_printoptions(), in the form 'k=v'.")
arg_parser.add_argument('--device', default='cpu')
args = arg_parser.parse_args()
if args.cwd:
print('* Change working dir:', args.cwd)
os.chdir(args.cwd)
log.initialize(verbosity=[5])
config = Config()
print('* Load config:', args.returnn_config)
config.load_file(args.returnn_config)
set_global_config(config)
for k in ['train', 'dev', 'eval', 'eval_datasets', 'torch_amp', 'grad_scaler', 'torch_distributed', 'learning_rate_control', 'learning_rate_file']:
config.typed_dict.pop(k, None)
config.set('device', args.device)
print('* Setup RETURNN engine')
engine = Engine(config=config)
print('* Load model and optimizer state')
engine.init_train_from_config()
model = engine.get_pt_model()
assert (model is not None), 'No model loaded?'
opt = engine.get_pt_optimizer()
assert (opt is not None), 'No optimizer loaded?'
print('* Loaded.')
if args.key:
obj = model.get_parameter(args.key)
print(f'{args.key}:')
print_object(obj, stats_only=args.stats_only)
print('Optimizer state:')
if (obj in opt.state):
print_object(opt.state[obj], stats_only=args.stats_only)
else:
print('(None)')
else:
for (name, param) in model.named_parameters():
_print_key_value(name, param, print_all_tensors=args.all_tensors, stats_only=args.stats_only)
if (param in opt.state):
print(' Optimizer state:')
print_object(opt.state[param], prefix=' ', print_all_tensors=args.all_tensors, stats_only=args.stats_only)
else:
print(' Optimizer state: (None)')
|
def syn_shuffle(lst0, lst1, lst2, lst3):
lst = list(zip(lst0, lst1, lst2, lst3))
random.shuffle(lst)
(lst0, lst1, lst2, lst3) = zip(*lst)
return (lst0, lst1, lst2, lst3)
|
class MVTecDataset(Dataset):
def __init__(self, root, transform, gt_transform, phase, category, split_ratio=0.8):
self.phase = phase
if (self.phase in ('train', 'eval')):
self.img_path = os.path.join(root, category, 'train')
else:
self.img_path = os.path.join(root, category, 'test')
self.gt_path = os.path.join(root, category, 'ground_truth')
self.spit_ratio = split_ratio
self.transform = transform
self.gt_transform = gt_transform
assert os.path.isdir(os.path.join(root, category)), 'Error MVTecDataset category:{}'.format(category)
(self.img_paths, self.gt_paths, self.labels, self.types) = self.load_dataset()
def load_dataset(self):
img_tot_paths = []
gt_tot_paths = []
tot_labels = []
tot_types = []
defect_types = os.listdir(self.img_path)
for defect_type in defect_types:
if (defect_type == 'good'):
img_paths = glob.glob((os.path.join(self.img_path, defect_type) + '/*.png'))
img_tot_paths.extend(img_paths)
gt_tot_paths.extend(([0] * len(img_paths)))
tot_labels.extend(([0] * len(img_paths)))
tot_types.extend((['good'] * len(img_paths)))
else:
img_paths = glob.glob((os.path.join(self.img_path, defect_type) + '/*.png'))
gt_paths = glob.glob((os.path.join(self.gt_path, defect_type) + '/*.png'))
img_paths.sort()
gt_paths.sort()
img_tot_paths.extend(img_paths)
if (len(gt_paths) == 0):
gt_paths = ([0] * len(img_paths))
gt_tot_paths.extend(gt_paths)
tot_labels.extend(([1] * len(img_paths)))
tot_types.extend(([defect_type] * len(img_paths)))
train_len = int((len(img_tot_paths) * self.spit_ratio))
(img_tot_paths, gt_tot_paths, tot_labels, tot_types) = syn_shuffle(img_tot_paths, gt_tot_paths, tot_labels, tot_types)
if (self.phase == 'train'):
img_tot_paths = img_tot_paths[:train_len]
gt_tot_paths = gt_tot_paths[:train_len]
tot_labels = tot_labels[:train_len]
tot_types = tot_types[:train_len]
elif (self.phase == 'eval'):
img_tot_paths = img_tot_paths[train_len:]
gt_tot_paths = gt_tot_paths[train_len:]
tot_labels = tot_labels[train_len:]
tot_types = tot_types[train_len:]
return (img_tot_paths, gt_tot_paths, tot_labels, tot_types)
def __len__(self):
return len(self.img_paths)
def __getitem__(self, idx):
(img_path, gt, label, img_type) = (self.img_paths[idx], self.gt_paths[idx], self.labels[idx], self.types[idx])
img = Image.open(img_path).convert('RGB')
origin = img
img = self.transform(img)
if (gt == 0):
gt = torch.zeros([1, img.size()[(- 2)], img.size()[(- 2)]])
else:
gt = Image.open(gt)
gt = self.gt_transform(gt)
assert (img.size()[1:] == gt.size()[1:]), 'image.size != gt.size !!!'
return {'origin': np.array(origin), 'image': img, 'gt': gt, 'label': label, 'name': os.path.basename(img_path[:(- 4)]), 'type': img_type}
|
class MVTecLOCODataset(Dataset):
def __init__(self, root, transform, gt_transform, phase, category, split_ratio=None):
(self.phase == phase)
if (phase == 'train'):
self.img_path = os.path.join(root, category, 'train')
if (phase == 'eval'):
self.img_path = os.path.join(root, category, 'validation')
else:
self.img_path = os.path.join(root, category, 'test')
self.gt_path = os.path.join(root, category, 'ground_truth')
self.transform = transform
self.gt_transform = gt_transform
assert os.path.isdir(os.path.join(root, category)), 'Error MVTecLOCODataset category:{}'.format(category)
(self.img_paths, self.gt_paths, self.labels, self.types) = self.load_dataset()
def load_dataset(self):
img_tot_paths = []
gt_tot_paths = []
tot_labels = []
tot_types = []
defect_types = os.listdir(self.img_path)
for defect_type in defect_types:
if (defect_type == 'good'):
img_paths = glob.glob((os.path.join(self.img_path, defect_type) + '/*.png'))
img_tot_paths.extend(img_paths)
gt_tot_paths.extend(([0] * len(img_paths)))
tot_labels.extend(([0] * len(img_paths)))
tot_types.extend((['good'] * len(img_paths)))
else:
img_paths = glob.glob((os.path.join(self.img_path, defect_type) + '/*.png'))
gt_paths = glob.glob((os.path.join(self.gt_path, defect_type) + '/*'))
gt_paths = [g for g in gt_paths if os.path.isdir(g)]
img_paths.sort()
gt_paths.sort()
img_tot_paths.extend(img_paths)
if (len(gt_paths) == 0):
gt_paths = ([0] * len(img_paths))
gt_tot_paths.extend(gt_paths)
tot_labels.extend(([1] * len(img_paths)))
tot_types.extend(([defect_type] * len(img_paths)))
assert (len(img_tot_paths) == len(gt_tot_paths)), 'Something wrong with test and ground truth pair!'
return (img_tot_paths, gt_tot_paths, tot_labels, tot_types)
def __len__(self):
return len(self.img_paths)
def __getitem__(self, idx):
(img_path, gt, label, img_type) = (self.img_paths[idx], self.gt_paths[idx], self.labels[idx], self.types[idx])
img = Image.open(img_path).convert('RGB')
origin = img
img = self.transform(img)
if (gt == 0):
gt = torch.zeros([1, img.size()[(- 2)], img.size()[(- 2)]])
else:
names = os.listdir(gt)
ims = [cv2.imread(os.path.join(gt, name), cv2.IMREAD_GRAYSCALE) for name in names]
ims = [im for im in ims if isinstance(im, np.ndarray)]
imzeros = np.zeros_like(ims[0])
for im in ims:
imzeros[(im == 255)] = 255
gt = Image.fromarray(imzeros)
gt = self.gt_transform(gt)
assert (img.size()[1:] == gt.size()[1:]), 'image.size != gt.size !!!'
return {'origin': np.array(origin), 'image': img, 'gt': gt, 'label': label, 'name': os.path.basename(img_path[:(- 4)]), 'type': img_type}
|
class VisaDataset(Dataset):
def __init__(self, root, transform, gt_transform, phase, category=None, split_ratio=0.8):
self.phase = phase
self.root = root
self.category = category
self.transform = transform
self.gt_transform = gt_transform
self.split_ratio = split_ratio
self.split_file = (root + '/split_csv/1cls.csv')
assert os.path.isfile(self.split_file), 'Error VsiA dataset'
assert os.path.isdir(os.path.join(self.root, category)), 'Error VsiA dataset category:{}'.format(category)
(self.img_paths, self.gt_paths, self.labels, self.types) = self.load_dataset()
def load_dataset(self):
img_tot_paths = []
gt_tot_paths = []
tot_labels = []
tot_types = []
with open(self.split_file, 'r') as file:
csvreader = csv.reader(file)
next(csvreader)
for row in csvreader:
(category, split, label, image_path, mask_path) = row
image_name = image_path.split('/')[(- 1)]
mask_name = mask_path.split('/')[(- 1)]
if ((split == 'train') and (self.phase == 'eval')):
split = 'eval'
if ((self.phase == split) and (self.category == category)):
img_src_path = os.path.join(self.root, image_path)
if (label == 'normal'):
gt_src_path = 0
index = 0
types = 'good'
else:
index = 1
types = 'bad'
gt_src_path = os.path.join(self.root, mask_path)
img_tot_paths.append(img_src_path)
gt_tot_paths.append(gt_src_path)
tot_labels.append(index)
tot_types.append(types)
train_len = int((len(img_tot_paths) * self.split_ratio))
(img_tot_paths, gt_tot_paths, tot_labels, tot_types) = syn_shuffle(img_tot_paths, gt_tot_paths, tot_labels, tot_types)
if (self.phase == 'train'):
img_tot_paths = img_tot_paths[:train_len]
gt_tot_paths = gt_tot_paths[:train_len]
tot_labels = tot_labels[:train_len]
tot_types = tot_types[:train_len]
elif (self.phase == 'eval'):
img_tot_paths = img_tot_paths[train_len:]
gt_tot_paths = gt_tot_paths[train_len:]
tot_labels = tot_labels[train_len:]
tot_types = tot_types[train_len:]
return (img_tot_paths, gt_tot_paths, tot_labels, tot_types)
def __len__(self):
return len(self.img_paths)
def __getitem__(self, idx):
(img_path, gt, label, img_type) = (self.img_paths[idx], self.gt_paths[idx], self.labels[idx], self.types[idx])
img = Image.open(img_path).convert('RGB')
origin = img
img = self.transform(img)
if (gt == 0):
gt = torch.zeros([1, img.size()[(- 2)], img.size()[(- 2)]])
else:
gt = Image.open(gt)
gt = self.gt_transform(gt)
assert (img.size()[1:] == gt.size()[1:]), 'image.size != gt.size !!!'
return {'origin': np.array(origin), 'image': img, 'gt': gt, 'label': label, 'name': os.path.basename(img_path[:(- 4)]), 'type': img_type}
|
class ImageNetDataset(Dataset):
def __init__(self, imagenet_dir, transform=None):
super().__init__()
self.imagenet_dir = imagenet_dir
self.transform = transform
self.dataset = ImageFolder(self.imagenet_dir, transform=self.transform)
def __len__(self):
return 1000
def __getitem__(self, idx):
return self.dataset[idx][0]
|
def load_infinite(loader):
iterator = iter(loader)
while True:
try:
(yield next(iterator))
except StopIteration:
iterator = iter(loader)
|
def get_AD_dataset(type, root, transform, gt_transform=None, phase='train', category=None, split_ratio=1):
if (type == 'VisA'):
return VisaDataset(root, transform, gt_transform, phase, category, split_ratio=split_ratio)
elif (type == 'MVTec'):
return MVTecDataset(root, transform, gt_transform, phase, category, split_ratio=split_ratio)
elif (type == 'MVTecLoco'):
return MVTecLOCODataset(root, transform, gt_transform, phase, category)
elif (type == 'ImageNet'):
return ImageNetDataset(root, transform)
else:
raise NotImplementedError
|
def cp(src_dir, dst_dir, filename, optional=False):
src_fn = ((src_dir + '/') + filename)
dst_fn = ((dst_dir + '/') + filename)
if (not os.path.exists(src_fn)):
print(('%r does not exist' % src_fn))
assert optional
return
try:
os.makedirs(os.path.dirname(dst_fn))
except os.error:
pass
print(('copy (%s) %s' % (dst_dir, filename)))
shutil.copyfile(src_fn, dst_fn)
|
def main():
for (corpus_src, corpus_dst, experiments) in [(swb_src_base_dir, swb_dst_base_dir, swb_experiments)]:
for fn in base_files:
cp(src_dir=corpus_src, dst_dir=corpus_dst, filename=fn)
for setup_name in experiments:
cp(src_dir=corpus_src, dst_dir=corpus_dst, filename=('config-train/%s.config' % setup_name))
cp(src_dir=corpus_src, dst_dir=corpus_dst, filename=('scores/%s.recog.wers.txt' % setup_name), optional=True)
cp(src_dir=corpus_src, dst_dir=corpus_dst, filename=('scores/%s.train.info.txt' % setup_name))
|
def cf(filename):
'Cache manager'
if (filename in _cf_cache):
return _cf_cache[filename]
cached_fn = check_output(['cf', filename]).strip().decode('utf8')
assert os.path.exists(cached_fn)
_cf_cache[filename] = cached_fn
return cached_fn
|
def get_sprint_dataset(data):
assert (data in ['train', 'cv'])
epochSplit = {'train': EpochSplit, 'cv': 1}
files = {}
files['config'] = 'config/training.config'
files['corpus'] = commonfiles['corpus']
files['segments'] = ('dependencies/seg_%s' % {'train': 'train', 'cv': 'cv_head3000'}[data])
files['features'] = commonfiles['features']
files['lexicon'] = commonfiles['lexicon']
files['alignment'] = commonfiles['alignment']
files['cart'] = commonfiles['cart']
for (k, v) in sorted(files.items()):
assert os.path.exists(v), ('%s %r does not exist' % (k, v))
estimated_num_seqs = {'train': 227047, 'cv': 3000}
args = [('--config=' + files['config']), (lambda : ('--*.corpus.file=' + cf(files['corpus']))), (lambda : ('--*.corpus.segments.file=' + cf(files['segments']))), '--*.corpus.segment-order-sort-by-time-length=true', '--*.state-tying.type=cart', (lambda : ('--*.state-tying.file=' + cf(files['cart']))), ('--*.trainer-output-dimension=%i' % num_outputs), (lambda : ('--*.lexicon.file=' + cf(files['lexicon']))), (lambda : ('--*.alignment-cache-path=' + cf(files['alignment']))), (lambda : ('--*.feature-cache-path=' + cf(files['features']))), '--*.log-channel.file=/dev/null', '--*.window-size=1', ('--*.trainer-output-dimension=%i' % num_outputs)]
return {'class': 'ExternSprintDataset', 'sprintTrainerExecPath': 'sprint-executables/nn-trainer', 'sprintConfigStr': args, 'partitionEpoch': epochSplit[data], 'estimated_num_seqs': (estimated_num_seqs[data] // (epochSplit[data] or 1))}
|
def parse_tdp_config(s):
s = s.replace(' ', '').replace('\t', '')
return [('--*.tdp.%s' % l.strip()) for l in s.splitlines() if l.strip()]
|
def get_sprint_error_signal_proc_args():
files = commonfiles.copy()
for (k, v) in sorted(files.items()):
assert os.path.exists(v), ('%s %r does not exist' % (k, v))
return (['--config=config/ctc.train.config', '--action=python-control', '--python-control-loop-type=python-control-loop', '--*.python-segment-order=false', '--*.extract-features=false', (lambda : ('--*.corpus.file=' + cf(files['corpus']))), '--*.state-tying.type=cart', (lambda : ('--*.state-tying.file=' + cf(files['cart']))), (lambda : ('--*.lexicon.file=' + cf(files['lexicon']))), '--*.feature-cache-path=should-not-be-needed', '--*.alignment-cache-path=should-not-be-needed', '--*.prior-file=dependencies/prior-fixed-f32.xml', '--*.lexicon.normalize-pronunciation=true', '--*.transducer-builder-filter-out-invalid-allophones=true', '--*.fix-allophone-context-at-word-boundaries=true', '--*.allow-for-silence-repetitions=false', '--*.normalize-lemma-sequence-scores=true', ('--*.number-of-classes=%i' % num_outputs), '--*.log-channel.file=/dev/null'] + parse_tdp_config(('\n*.loop = %(loop)f\n*.forward = %(forward)f\n*.skip = infinity\n*.exit = %(forward)f\nentry-m1.forward = 0\nentry-m2.forward = 0\nentry-m1.loop = infinity\nentry-m2.loop = infinity\nsilence.loop = %(sloop)f\nsilence.forward = %(sforward)f\nsilence.skip = infinity\nsilence.exit = %(sforward)f\n' % {'loop': (- numpy.log(0.5)), 'forward': (- numpy.log(0.5)), 'sloop': (- numpy.log(0.5)), 'sforward': (- numpy.log(0.5))})))
|
def check_valid_prior(filename):
from Util import load_txt_vector
v = load_txt_vector(filename)
v = numpy.array(v)
assert (v.ndim == 1)
assert all((v < 0.0)), 'log space assumed'
v = numpy.exp(v)
tot = numpy.sum(v)
assert numpy.isclose(tot, 1.0, atol=0.0001)
|
class Globals():
engine = None
config = None
dataset = None
setup_name = None
setup_dir = None
epoch = None
@classmethod
def get_output_prefix(cls):
return ('fullsum-scores/out.%s.ep%03i.' % (cls.setup_name, cls.epoch))
@classmethod
def get_softmax_prior_filename(cls):
return (cls.get_output_prefix() + 'softmax-prior.txt')
@classmethod
def get_fullsum_scores_filename(cls, prior, am_scale, prior_scale, tdp_scale):
'\n :param str prior: e.g. "none", "softmax", "fixed"\n :param float am_scale:\n :param float prior_scale:\n :param float tdp_scale:\n '
return (cls.get_output_prefix() + ('fullsum-scores.prior_%s.am_scale_%f.prior_scale_%f.tdp_scale_%f.txt' % (prior, am_scale, prior_scale, tdp_scale)))
|
def get_softmax_prior():
fn = Globals.get_softmax_prior_filename()
if os.path.exists(fn):
print('Existing softmax prior:', fn)
return fn
print('Calculate softmax prior and save to:', fn)
Globals.config.set('output_file', fn)
Globals.engine.compute_priors(dataset=Globals.dataset, config=Globals.config)
return fn
|
def calc_fullsum_scores(meta):
from returnn.Util import better_repr
fn = Globals.get_fullsum_scores_filename(**meta)
if os.path.exists(fn):
print('Existing fullsum scores filename:', fn)
print(('content:\n%s\n' % open(fn).read()))
return fn
assert ('output_fullsum' in Globals.engine.network.layers)
analyzer = Globals.engine.analyze(data=Globals.dataset, statistics=None)
print('fullsum score:', analyzer.score['cost:output_fullsum'])
print('Write all to:', fn)
with open(fn, 'w') as f:
f.write(better_repr({'scores': analyzer.score, 'errors': analyzer.error, 'stats': analyzer.stats, 'num_frames': analyzer.num_frames_accumulated}))
return fn
|
def main():
argparser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
argparser.add_argument('--model', required=True, help='or config, or setup')
argparser.add_argument('--epoch', required=True, type=int)
argparser.add_argument('--prior', help='none, fixed, softmax (default: none)')
argparser.add_argument('--prior_scale', type=float, default=1.0)
argparser.add_argument('--am_scale', type=float, default=1.0)
argparser.add_argument('--tdp_scale', type=float, default=1.0)
args = argparser.parse_args()
cfg_fn = args.model
if ('/' not in cfg_fn):
cfg_fn = ('config-train/%s.config' % cfg_fn)
assert os.path.exists(cfg_fn)
setup_name = os.path.splitext(os.path.basename(cfg_fn))[0]
setup_dir = ('data-train/%s' % setup_name)
assert os.path.exists(setup_dir)
Globals.setup_name = setup_name
Globals.setup_dir = setup_dir
Globals.epoch = args.epoch
config_update['epoch'] = args.epoch
config_update['load_epoch'] = args.epoch
config_update['model'] = ('%s/net-model/network' % setup_dir)
from returnn import rnn
rnn.init(configFilename=cfg_fn, config_updates=config_update, extra_greeting='calc full sum score.')
Globals.engine = rnn.engine
Globals.config = rnn.config
Globals.dataset = rnn.dev_data
assert (Globals.engine and Globals.config and Globals.dataset)
Globals.engine.init_train_from_config(config=Globals.config, dev_data=Globals.dataset)
softmax_prior = get_softmax_prior()
prior = (args.prior or 'none')
if (prior == 'none'):
prior_filename = None
elif (prior == 'softmax'):
prior_filename = softmax_prior
elif (prior == 'fixed'):
prior_filename = 'dependencies/prior-fixed-f32.xml'
else:
raise Exception(('invalid prior %r' % prior))
print('using prior:', prior)
if prior_filename:
assert os.path.exists(prior_filename)
check_valid_prior(prior_filename)
print('Do the stuff...')
print('Reinit dataset.')
Globals.dataset.init_seq_order(epoch=args.epoch)
network_update['out_fullsum_scores']['eval_locals']['am_scale'] = args.am_scale
network_update['out_fullsum_scores']['eval_locals']['prior_scale'] = args.prior_scale
network_update['out_fullsum_bw']['tdp_scale'] = args.tdp_scale
if prior_filename:
network_update['out_fullsum_prior']['init'] = ('load_txt_file(%r)' % prior_filename)
else:
network_update['out_fullsum_prior']['init'] = 0
from copy import deepcopy
Globals.config.typed_dict['network'] = deepcopy(Globals.config.typed_dict['network'])
Globals.config.typed_dict['network'].update(network_update)
from returnn.Pretrain import pretrain_from_config
pretrain = pretrain_from_config(Globals.config)
if pretrain:
new_network_desc = pretrain.get_network_json_for_epoch(Globals.epoch)
else:
new_network_desc = Globals.config.typed_dict['network']
assert ('output_fullsum' in new_network_desc)
print('Init new network.')
Globals.engine.maybe_init_new_network(new_network_desc)
print('Calc scores.')
calc_fullsum_scores(meta=dict(prior=prior, prior_scale=args.prior_scale, am_scale=args.am_scale, tdp_scale=args.tdp_scale))
rnn.finalize()
print('Bye.')
|
class Settings():
recog_metric_name = 'WER'
recog_score_lower_is_better = True
|
def run(args, **kwargs):
import subprocess
kwargs = kwargs.copy()
print(('$ %s' % ' '.join(args)), {k: (v if (k != 'input') else '...') for (k, v) in kwargs.items()})
try:
subprocess.run(args, **kwargs, check=True)
except KeyboardInterrupt:
print('KeyboardInterrupt')
sys.exit(1)
|
def qsub_name_from_args(args):
return ('qsub_' + '_'.join(args).replace('./', '').replace('/', '').replace(' ', ''))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.